1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/internal.h>
28 #include <asm/paravirt.h>
29 #include <asm/alternative.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
34 #include <asm/tlbflush.h>
38 static void __init
spectre_v1_select_mitigation(void);
39 static void __init
spectre_v2_select_mitigation(void);
40 static void __init
ssb_select_mitigation(void);
41 static void __init
l1tf_select_mitigation(void);
42 static void __init
mds_select_mitigation(void);
43 static void __init
mds_print_mitigation(void);
44 static void __init
taa_select_mitigation(void);
45 static void __init
srbds_select_mitigation(void);
46 static void __init
l1d_flush_select_mitigation(void);
48 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
49 u64 x86_spec_ctrl_base
;
50 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
51 static DEFINE_MUTEX(spec_ctrl_mutex
);
54 * The vendor and possibly platform specific bits which can be modified in
57 static u64 __ro_after_init x86_spec_ctrl_mask
= SPEC_CTRL_IBRS
;
60 * AMD specific MSR info for Speculative Store Bypass control.
61 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
63 u64 __ro_after_init x86_amd_ls_cfg_base
;
64 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
66 /* Control conditional STIBP in switch_to() */
67 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
68 /* Control conditional IBPB in switch_mm() */
69 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
70 /* Control unconditional IBPB in switch_mm() */
71 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
73 /* Control MDS CPU buffer clear before returning to user space */
74 DEFINE_STATIC_KEY_FALSE(mds_user_clear
);
75 EXPORT_SYMBOL_GPL(mds_user_clear
);
76 /* Control MDS CPU buffer clear before idling (halt, mwait) */
77 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
78 EXPORT_SYMBOL_GPL(mds_idle_clear
);
81 * Controls whether l1d flush based mitigations are enabled,
82 * based on hw features and admin setting via boot parameter
85 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
87 void __init
check_bugs(void)
92 * identify_boot_cpu() initialized SMT support information, let the
95 cpu_smt_check_topology();
97 if (!IS_ENABLED(CONFIG_SMP
)) {
99 print_cpu_info(&boot_cpu_data
);
103 * Read the SPEC_CTRL MSR to account for reserved bits which may
104 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
105 * init code as it is not enumerated and depends on the family.
107 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
108 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
110 /* Allow STIBP in MSR_SPEC_CTRL if supported */
111 if (boot_cpu_has(X86_FEATURE_STIBP
))
112 x86_spec_ctrl_mask
|= SPEC_CTRL_STIBP
;
114 /* Select the proper CPU mitigations before patching alternatives: */
115 spectre_v1_select_mitigation();
116 spectre_v2_select_mitigation();
117 ssb_select_mitigation();
118 l1tf_select_mitigation();
119 mds_select_mitigation();
120 taa_select_mitigation();
121 srbds_select_mitigation();
122 l1d_flush_select_mitigation();
125 * As MDS and TAA mitigations are inter-related, print MDS
126 * mitigation until after TAA mitigation selection is done.
128 mds_print_mitigation();
134 * Check whether we are able to run this kernel safely on SMP.
136 * - i386 is no longer supported.
137 * - In order to run on anything without a TSC, we need to be
138 * compiled for a i486.
140 if (boot_cpu_data
.x86
< 4)
141 panic("Kernel requires i486+ for 'invlpg' and other features");
143 init_utsname()->machine
[1] =
144 '0' + (boot_cpu_data
.x86
> 6 ? 6 : boot_cpu_data
.x86
);
145 alternative_instructions();
147 fpu__init_check_bugs();
148 #else /* CONFIG_X86_64 */
149 alternative_instructions();
152 * Make sure the first 2MB area is not mapped by huge pages
153 * There are typically fixed size MTRRs in there and overlapping
154 * MTRRs into large pages causes slow downs.
156 * Right now we don't do that with gbpages because there seems
157 * very little benefit for that case.
160 set_memory_4k((unsigned long)__va(0), 1);
165 x86_virt_spec_ctrl(u64 guest_spec_ctrl
, u64 guest_virt_spec_ctrl
, bool setguest
)
167 u64 msrval
, guestval
, hostval
= x86_spec_ctrl_base
;
168 struct thread_info
*ti
= current_thread_info();
170 /* Is MSR_SPEC_CTRL implemented ? */
171 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
)) {
173 * Restrict guest_spec_ctrl to supported values. Clear the
174 * modifiable bits in the host base value and or the
175 * modifiable bits from the guest value.
177 guestval
= hostval
& ~x86_spec_ctrl_mask
;
178 guestval
|= guest_spec_ctrl
& x86_spec_ctrl_mask
;
180 /* SSBD controlled in MSR_SPEC_CTRL */
181 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
182 static_cpu_has(X86_FEATURE_AMD_SSBD
))
183 hostval
|= ssbd_tif_to_spec_ctrl(ti
->flags
);
185 /* Conditional STIBP enabled? */
186 if (static_branch_unlikely(&switch_to_cond_stibp
))
187 hostval
|= stibp_tif_to_spec_ctrl(ti
->flags
);
189 if (hostval
!= guestval
) {
190 msrval
= setguest
? guestval
: hostval
;
191 wrmsrl(MSR_IA32_SPEC_CTRL
, msrval
);
196 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
197 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
199 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
200 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
204 * If the host has SSBD mitigation enabled, force it in the host's
205 * virtual MSR value. If its not permanently enabled, evaluate
206 * current's TIF_SSBD thread flag.
208 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
209 hostval
= SPEC_CTRL_SSBD
;
211 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
213 /* Sanitize the guest value */
214 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
216 if (hostval
!= guestval
) {
219 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
220 ssbd_spec_ctrl_to_tif(hostval
);
222 speculation_ctrl_update(tif
);
225 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
227 static void x86_amd_ssb_disable(void)
229 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
231 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
232 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
233 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
234 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
238 #define pr_fmt(fmt) "MDS: " fmt
240 /* Default mitigation for MDS-affected CPUs */
241 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
242 static bool mds_nosmt __ro_after_init
= false;
244 static const char * const mds_strings
[] = {
245 [MDS_MITIGATION_OFF
] = "Vulnerable",
246 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
247 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
250 static void __init
mds_select_mitigation(void)
252 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
253 mds_mitigation
= MDS_MITIGATION_OFF
;
257 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
258 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
259 mds_mitigation
= MDS_MITIGATION_VMWERV
;
261 static_branch_enable(&mds_user_clear
);
263 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
264 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
265 cpu_smt_disable(false);
269 static void __init
mds_print_mitigation(void)
271 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off())
274 pr_info("%s\n", mds_strings
[mds_mitigation
]);
277 static int __init
mds_cmdline(char *str
)
279 if (!boot_cpu_has_bug(X86_BUG_MDS
))
285 if (!strcmp(str
, "off"))
286 mds_mitigation
= MDS_MITIGATION_OFF
;
287 else if (!strcmp(str
, "full"))
288 mds_mitigation
= MDS_MITIGATION_FULL
;
289 else if (!strcmp(str
, "full,nosmt")) {
290 mds_mitigation
= MDS_MITIGATION_FULL
;
296 early_param("mds", mds_cmdline
);
299 #define pr_fmt(fmt) "TAA: " fmt
301 enum taa_mitigations
{
303 TAA_MITIGATION_UCODE_NEEDED
,
305 TAA_MITIGATION_TSX_DISABLED
,
308 /* Default mitigation for TAA-affected CPUs */
309 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
310 static bool taa_nosmt __ro_after_init
;
312 static const char * const taa_strings
[] = {
313 [TAA_MITIGATION_OFF
] = "Vulnerable",
314 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
315 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
316 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
319 static void __init
taa_select_mitigation(void)
323 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
324 taa_mitigation
= TAA_MITIGATION_OFF
;
328 /* TSX previously disabled by tsx=off */
329 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
330 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
334 if (cpu_mitigations_off()) {
335 taa_mitigation
= TAA_MITIGATION_OFF
;
340 * TAA mitigation via VERW is turned off if both
341 * tsx_async_abort=off and mds=off are specified.
343 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
344 mds_mitigation
== MDS_MITIGATION_OFF
)
347 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
348 taa_mitigation
= TAA_MITIGATION_VERW
;
350 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
353 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
354 * A microcode update fixes this behavior to clear CPU buffers. It also
355 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
356 * ARCH_CAP_TSX_CTRL_MSR bit.
358 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
359 * update is required.
361 ia32_cap
= x86_read_arch_cap_msr();
362 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
363 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
364 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
367 * TSX is enabled, select alternate mitigation for TAA which is
368 * the same as MDS. Enable MDS static branch to clear CPU buffers.
370 * For guests that can't determine whether the correct microcode is
371 * present on host, enable the mitigation for UCODE_NEEDED as well.
373 static_branch_enable(&mds_user_clear
);
375 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
376 cpu_smt_disable(false);
379 * Update MDS mitigation, if necessary, as the mds_user_clear is
380 * now enabled for TAA mitigation.
382 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
383 boot_cpu_has_bug(X86_BUG_MDS
)) {
384 mds_mitigation
= MDS_MITIGATION_FULL
;
385 mds_select_mitigation();
388 pr_info("%s\n", taa_strings
[taa_mitigation
]);
391 static int __init
tsx_async_abort_parse_cmdline(char *str
)
393 if (!boot_cpu_has_bug(X86_BUG_TAA
))
399 if (!strcmp(str
, "off")) {
400 taa_mitigation
= TAA_MITIGATION_OFF
;
401 } else if (!strcmp(str
, "full")) {
402 taa_mitigation
= TAA_MITIGATION_VERW
;
403 } else if (!strcmp(str
, "full,nosmt")) {
404 taa_mitigation
= TAA_MITIGATION_VERW
;
410 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
413 #define pr_fmt(fmt) "SRBDS: " fmt
415 enum srbds_mitigations
{
416 SRBDS_MITIGATION_OFF
,
417 SRBDS_MITIGATION_UCODE_NEEDED
,
418 SRBDS_MITIGATION_FULL
,
419 SRBDS_MITIGATION_TSX_OFF
,
420 SRBDS_MITIGATION_HYPERVISOR
,
423 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
425 static const char * const srbds_strings
[] = {
426 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
427 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
428 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
429 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
430 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
433 static bool srbds_off
;
435 void update_srbds_msr(void)
439 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
442 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
445 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
448 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
450 switch (srbds_mitigation
) {
451 case SRBDS_MITIGATION_OFF
:
452 case SRBDS_MITIGATION_TSX_OFF
:
453 mcu_ctrl
|= RNGDS_MITG_DIS
;
455 case SRBDS_MITIGATION_FULL
:
456 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
462 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
465 static void __init
srbds_select_mitigation(void)
469 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
473 * Check to see if this is one of the MDS_NO systems supporting
474 * TSX that are only exposed to SRBDS when TSX is enabled.
476 ia32_cap
= x86_read_arch_cap_msr();
477 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
))
478 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
479 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
480 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
481 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
482 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
483 else if (cpu_mitigations_off() || srbds_off
)
484 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
487 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
490 static int __init
srbds_parse_cmdline(char *str
)
495 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
498 srbds_off
= !strcmp(str
, "off");
501 early_param("srbds", srbds_parse_cmdline
);
504 #define pr_fmt(fmt) "L1D Flush : " fmt
506 enum l1d_flush_mitigations
{
511 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
513 static void __init
l1d_flush_select_mitigation(void)
515 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
518 static_branch_enable(&switch_mm_cond_l1d_flush
);
519 pr_info("Conditional flush on switch_mm() enabled\n");
522 static int __init
l1d_flush_parse_cmdline(char *str
)
524 if (!strcmp(str
, "on"))
525 l1d_flush_mitigation
= L1D_FLUSH_ON
;
529 early_param("l1d_flush", l1d_flush_parse_cmdline
);
532 #define pr_fmt(fmt) "Spectre V1 : " fmt
534 enum spectre_v1_mitigation
{
535 SPECTRE_V1_MITIGATION_NONE
,
536 SPECTRE_V1_MITIGATION_AUTO
,
539 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
540 SPECTRE_V1_MITIGATION_AUTO
;
542 static const char * const spectre_v1_strings
[] = {
543 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
544 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
548 * Does SMAP provide full mitigation against speculative kernel access to
551 static bool smap_works_speculatively(void)
553 if (!boot_cpu_has(X86_FEATURE_SMAP
))
557 * On CPUs which are vulnerable to Meltdown, SMAP does not
558 * prevent speculative access to user data in the L1 cache.
559 * Consider SMAP to be non-functional as a mitigation on these
562 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
568 static void __init
spectre_v1_select_mitigation(void)
570 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
571 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
575 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
577 * With Spectre v1, a user can speculatively control either
578 * path of a conditional swapgs with a user-controlled GS
579 * value. The mitigation is to add lfences to both code paths.
581 * If FSGSBASE is enabled, the user can put a kernel address in
582 * GS, in which case SMAP provides no protection.
584 * If FSGSBASE is disabled, the user can only put a user space
585 * address in GS. That makes an attack harder, but still
586 * possible if there's no SMAP protection.
588 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
589 !smap_works_speculatively()) {
591 * Mitigation can be provided from SWAPGS itself or
592 * PTI as the CR3 write in the Meltdown mitigation
595 * If neither is there, mitigate with an LFENCE to
596 * stop speculation through swapgs.
598 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
599 !boot_cpu_has(X86_FEATURE_PTI
))
600 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
603 * Enable lfences in the kernel entry (non-swapgs)
604 * paths, to prevent user entry from speculatively
607 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
611 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
614 static int __init
nospectre_v1_cmdline(char *str
)
616 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
619 early_param("nospectre_v1", nospectre_v1_cmdline
);
622 #define pr_fmt(fmt) "Spectre V2 : " fmt
624 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
=
627 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
628 SPECTRE_V2_USER_NONE
;
629 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
630 SPECTRE_V2_USER_NONE
;
632 #ifdef CONFIG_RETPOLINE
633 static bool spectre_v2_bad_module
;
635 bool retpoline_module_ok(bool has_retpoline
)
637 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
640 pr_err("System may be vulnerable to spectre v2\n");
641 spectre_v2_bad_module
= true;
645 static inline const char *spectre_v2_module_string(void)
647 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
650 static inline const char *spectre_v2_module_string(void) { return ""; }
653 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
655 int len
= strlen(opt
);
657 return len
== arglen
&& !strncmp(arg
, opt
, len
);
660 /* The kernel command line selection for spectre v2 */
661 enum spectre_v2_mitigation_cmd
{
664 SPECTRE_V2_CMD_FORCE
,
665 SPECTRE_V2_CMD_RETPOLINE
,
666 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
667 SPECTRE_V2_CMD_RETPOLINE_AMD
,
670 enum spectre_v2_user_cmd
{
671 SPECTRE_V2_USER_CMD_NONE
,
672 SPECTRE_V2_USER_CMD_AUTO
,
673 SPECTRE_V2_USER_CMD_FORCE
,
674 SPECTRE_V2_USER_CMD_PRCTL
,
675 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
676 SPECTRE_V2_USER_CMD_SECCOMP
,
677 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
680 static const char * const spectre_v2_user_strings
[] = {
681 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
682 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
683 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
684 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
685 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
688 static const struct {
690 enum spectre_v2_user_cmd cmd
;
692 } v2_user_options
[] __initconst
= {
693 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
694 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
695 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
696 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
697 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
698 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
699 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
702 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
704 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
705 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
708 static enum spectre_v2_user_cmd __init
709 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd
)
715 case SPECTRE_V2_CMD_NONE
:
716 return SPECTRE_V2_USER_CMD_NONE
;
717 case SPECTRE_V2_CMD_FORCE
:
718 return SPECTRE_V2_USER_CMD_FORCE
;
723 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
726 return SPECTRE_V2_USER_CMD_AUTO
;
728 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
729 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
730 spec_v2_user_print_cond(v2_user_options
[i
].option
,
731 v2_user_options
[i
].secure
);
732 return v2_user_options
[i
].cmd
;
736 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
737 return SPECTRE_V2_USER_CMD_AUTO
;
741 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd
)
743 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
744 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
745 enum spectre_v2_user_cmd cmd
;
747 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
750 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
751 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
752 smt_possible
= false;
754 cmd
= spectre_v2_parse_user_cmdline(v2_cmd
);
756 case SPECTRE_V2_USER_CMD_NONE
:
758 case SPECTRE_V2_USER_CMD_FORCE
:
759 mode
= SPECTRE_V2_USER_STRICT
;
761 case SPECTRE_V2_USER_CMD_PRCTL
:
762 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
763 mode
= SPECTRE_V2_USER_PRCTL
;
765 case SPECTRE_V2_USER_CMD_AUTO
:
766 case SPECTRE_V2_USER_CMD_SECCOMP
:
767 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
768 if (IS_ENABLED(CONFIG_SECCOMP
))
769 mode
= SPECTRE_V2_USER_SECCOMP
;
771 mode
= SPECTRE_V2_USER_PRCTL
;
775 /* Initialize Indirect Branch Prediction Barrier */
776 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
777 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
779 spectre_v2_user_ibpb
= mode
;
781 case SPECTRE_V2_USER_CMD_FORCE
:
782 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
783 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
784 static_branch_enable(&switch_mm_always_ibpb
);
785 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
787 case SPECTRE_V2_USER_CMD_PRCTL
:
788 case SPECTRE_V2_USER_CMD_AUTO
:
789 case SPECTRE_V2_USER_CMD_SECCOMP
:
790 static_branch_enable(&switch_mm_cond_ibpb
);
796 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
797 static_key_enabled(&switch_mm_always_ibpb
) ?
798 "always-on" : "conditional");
802 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
805 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
807 spectre_v2_enabled
== SPECTRE_V2_IBRS_ENHANCED
)
811 * At this point, an STIBP mode other than "off" has been set.
812 * If STIBP support is not being forced, check if STIBP always-on
815 if (mode
!= SPECTRE_V2_USER_STRICT
&&
816 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
817 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
819 spectre_v2_user_stibp
= mode
;
822 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
825 static const char * const spectre_v2_strings
[] = {
826 [SPECTRE_V2_NONE
] = "Vulnerable",
827 [SPECTRE_V2_RETPOLINE_GENERIC
] = "Mitigation: Full generic retpoline",
828 [SPECTRE_V2_RETPOLINE_AMD
] = "Mitigation: Full AMD retpoline",
829 [SPECTRE_V2_IBRS_ENHANCED
] = "Mitigation: Enhanced IBRS",
832 static const struct {
834 enum spectre_v2_mitigation_cmd cmd
;
836 } mitigation_options
[] __initconst
= {
837 { "off", SPECTRE_V2_CMD_NONE
, false },
838 { "on", SPECTRE_V2_CMD_FORCE
, true },
839 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
840 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD
, false },
841 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
842 { "auto", SPECTRE_V2_CMD_AUTO
, false },
845 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
847 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
848 pr_info("%s selected on command line.\n", reason
);
851 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
853 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
857 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
858 cpu_mitigations_off())
859 return SPECTRE_V2_CMD_NONE
;
861 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
863 return SPECTRE_V2_CMD_AUTO
;
865 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
866 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
868 cmd
= mitigation_options
[i
].cmd
;
872 if (i
>= ARRAY_SIZE(mitigation_options
)) {
873 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
874 return SPECTRE_V2_CMD_AUTO
;
877 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
878 cmd
== SPECTRE_V2_CMD_RETPOLINE_AMD
||
879 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
) &&
880 !IS_ENABLED(CONFIG_RETPOLINE
)) {
881 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options
[i
].option
);
882 return SPECTRE_V2_CMD_AUTO
;
885 spec_v2_print_cond(mitigation_options
[i
].option
,
886 mitigation_options
[i
].secure
);
890 static void __init
spectre_v2_select_mitigation(void)
892 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
893 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
896 * If the CPU is not affected and the command line mode is NONE or AUTO
897 * then nothing to do.
899 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
900 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
904 case SPECTRE_V2_CMD_NONE
:
907 case SPECTRE_V2_CMD_FORCE
:
908 case SPECTRE_V2_CMD_AUTO
:
909 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
910 mode
= SPECTRE_V2_IBRS_ENHANCED
;
911 /* Force it so VMEXIT will restore correctly */
912 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
913 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
914 goto specv2_set_mode
;
916 if (IS_ENABLED(CONFIG_RETPOLINE
))
919 case SPECTRE_V2_CMD_RETPOLINE_AMD
:
920 if (IS_ENABLED(CONFIG_RETPOLINE
))
923 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
924 if (IS_ENABLED(CONFIG_RETPOLINE
))
925 goto retpoline_generic
;
927 case SPECTRE_V2_CMD_RETPOLINE
:
928 if (IS_ENABLED(CONFIG_RETPOLINE
))
932 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
936 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
937 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
) {
939 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
940 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
941 goto retpoline_generic
;
943 mode
= SPECTRE_V2_RETPOLINE_AMD
;
944 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD
);
945 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
948 mode
= SPECTRE_V2_RETPOLINE_GENERIC
;
949 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
953 spectre_v2_enabled
= mode
;
954 pr_info("%s\n", spectre_v2_strings
[mode
]);
957 * If spectre v2 protection has been enabled, unconditionally fill
958 * RSB during a context switch; this protects against two independent
961 * - RSB underflow (and switch to BTB) on Skylake+
962 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
964 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
965 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
968 * Retpoline means the kernel is safe because it has no indirect
969 * branches. Enhanced IBRS protects firmware too, so, enable restricted
970 * speculation around firmware calls only when Enhanced IBRS isn't
973 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
974 * the user might select retpoline on the kernel command line and if
975 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
976 * enable IBRS around firmware calls.
978 if (boot_cpu_has(X86_FEATURE_IBRS
) && mode
!= SPECTRE_V2_IBRS_ENHANCED
) {
979 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
980 pr_info("Enabling Restricted Speculation for firmware calls\n");
983 /* Set up IBPB and STIBP depending on the general spectre V2 command */
984 spectre_v2_user_select_mitigation(cmd
);
987 static void update_stibp_msr(void * __unused
)
989 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
992 /* Update x86_spec_ctrl_base in case SMT state changed. */
993 static void update_stibp_strict(void)
995 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
997 if (sched_smt_active())
998 mask
|= SPEC_CTRL_STIBP
;
1000 if (mask
== x86_spec_ctrl_base
)
1003 pr_info("Update user space SMT mitigation: STIBP %s\n",
1004 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1005 x86_spec_ctrl_base
= mask
;
1006 on_each_cpu(update_stibp_msr
, NULL
, 1);
1009 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1010 static void update_indir_branch_cond(void)
1012 if (sched_smt_active())
1013 static_branch_enable(&switch_to_cond_stibp
);
1015 static_branch_disable(&switch_to_cond_stibp
);
1019 #define pr_fmt(fmt) fmt
1021 /* Update the static key controlling the MDS CPU buffer clear in idle */
1022 static void update_mds_branch_idle(void)
1025 * Enable the idle clearing if SMT is active on CPUs which are
1026 * affected only by MSBDS and not any other MDS variant.
1028 * The other variants cannot be mitigated when SMT is enabled, so
1029 * clearing the buffers on idle just to prevent the Store Buffer
1030 * repartitioning leak would be a window dressing exercise.
1032 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1035 if (sched_smt_active())
1036 static_branch_enable(&mds_idle_clear
);
1038 static_branch_disable(&mds_idle_clear
);
1041 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1042 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1044 void cpu_bugs_smt_update(void)
1046 mutex_lock(&spec_ctrl_mutex
);
1048 switch (spectre_v2_user_stibp
) {
1049 case SPECTRE_V2_USER_NONE
:
1051 case SPECTRE_V2_USER_STRICT
:
1052 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1053 update_stibp_strict();
1055 case SPECTRE_V2_USER_PRCTL
:
1056 case SPECTRE_V2_USER_SECCOMP
:
1057 update_indir_branch_cond();
1061 switch (mds_mitigation
) {
1062 case MDS_MITIGATION_FULL
:
1063 case MDS_MITIGATION_VMWERV
:
1064 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1065 pr_warn_once(MDS_MSG_SMT
);
1066 update_mds_branch_idle();
1068 case MDS_MITIGATION_OFF
:
1072 switch (taa_mitigation
) {
1073 case TAA_MITIGATION_VERW
:
1074 case TAA_MITIGATION_UCODE_NEEDED
:
1075 if (sched_smt_active())
1076 pr_warn_once(TAA_MSG_SMT
);
1078 case TAA_MITIGATION_TSX_DISABLED
:
1079 case TAA_MITIGATION_OFF
:
1083 mutex_unlock(&spec_ctrl_mutex
);
1087 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1089 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1091 /* The kernel command line selection */
1092 enum ssb_mitigation_cmd
{
1093 SPEC_STORE_BYPASS_CMD_NONE
,
1094 SPEC_STORE_BYPASS_CMD_AUTO
,
1095 SPEC_STORE_BYPASS_CMD_ON
,
1096 SPEC_STORE_BYPASS_CMD_PRCTL
,
1097 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1100 static const char * const ssb_strings
[] = {
1101 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1102 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1103 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1104 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1107 static const struct {
1109 enum ssb_mitigation_cmd cmd
;
1110 } ssb_mitigation_options
[] __initconst
= {
1111 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1112 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1113 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1114 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1115 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1118 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1120 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1124 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1125 cpu_mitigations_off()) {
1126 return SPEC_STORE_BYPASS_CMD_NONE
;
1128 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1131 return SPEC_STORE_BYPASS_CMD_AUTO
;
1133 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1134 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1137 cmd
= ssb_mitigation_options
[i
].cmd
;
1141 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1142 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1143 return SPEC_STORE_BYPASS_CMD_AUTO
;
1150 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1152 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1153 enum ssb_mitigation_cmd cmd
;
1155 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1158 cmd
= ssb_parse_cmdline();
1159 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1160 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1161 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1165 case SPEC_STORE_BYPASS_CMD_AUTO
:
1166 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1168 * Choose prctl+seccomp as the default mode if seccomp is
1171 if (IS_ENABLED(CONFIG_SECCOMP
))
1172 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1174 mode
= SPEC_STORE_BYPASS_PRCTL
;
1176 case SPEC_STORE_BYPASS_CMD_ON
:
1177 mode
= SPEC_STORE_BYPASS_DISABLE
;
1179 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1180 mode
= SPEC_STORE_BYPASS_PRCTL
;
1182 case SPEC_STORE_BYPASS_CMD_NONE
:
1187 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1188 * bit in the mask to allow guests to use the mitigation even in the
1189 * case where the host does not enable it.
1191 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
1192 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1193 x86_spec_ctrl_mask
|= SPEC_CTRL_SSBD
;
1197 * We have three CPU feature flags that are in play here:
1198 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1199 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1200 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1202 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1203 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1205 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1206 * use a completely different MSR and bit dependent on family.
1208 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1209 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1210 x86_amd_ssb_disable();
1212 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1213 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1220 static void ssb_select_mitigation(void)
1222 ssb_mode
= __ssb_select_mitigation();
1224 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1225 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1229 #define pr_fmt(fmt) "Speculation prctl: " fmt
1231 static void task_update_spec_tif(struct task_struct
*tsk
)
1233 /* Force the update of the real TIF bits */
1234 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1237 * Immediately update the speculation control MSRs for the current
1238 * task, but for a non-current task delay setting the CPU
1239 * mitigation until it is scheduled next.
1241 * This can only happen for SECCOMP mitigation. For PRCTL it's
1242 * always the current task.
1245 speculation_ctrl_update_current();
1248 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1251 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1255 case PR_SPEC_ENABLE
:
1256 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1258 case PR_SPEC_DISABLE
:
1259 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1266 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1268 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
1269 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
1273 case PR_SPEC_ENABLE
:
1274 /* If speculation is force disabled, enable is not allowed */
1275 if (task_spec_ssb_force_disable(task
))
1277 task_clear_spec_ssb_disable(task
);
1278 task_clear_spec_ssb_noexec(task
);
1279 task_update_spec_tif(task
);
1281 case PR_SPEC_DISABLE
:
1282 task_set_spec_ssb_disable(task
);
1283 task_clear_spec_ssb_noexec(task
);
1284 task_update_spec_tif(task
);
1286 case PR_SPEC_FORCE_DISABLE
:
1287 task_set_spec_ssb_disable(task
);
1288 task_set_spec_ssb_force_disable(task
);
1289 task_clear_spec_ssb_noexec(task
);
1290 task_update_spec_tif(task
);
1292 case PR_SPEC_DISABLE_NOEXEC
:
1293 if (task_spec_ssb_force_disable(task
))
1295 task_set_spec_ssb_disable(task
);
1296 task_set_spec_ssb_noexec(task
);
1297 task_update_spec_tif(task
);
1305 static bool is_spec_ib_user_controlled(void)
1307 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
1308 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1309 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
1310 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
1313 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1316 case PR_SPEC_ENABLE
:
1317 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1318 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1322 * With strict mode for both IBPB and STIBP, the instruction
1323 * code paths avoid checking this task flag and instead,
1324 * unconditionally run the instruction. However, STIBP and IBPB
1325 * are independent and either can be set to conditionally
1326 * enabled regardless of the mode of the other.
1328 * If either is set to conditional, allow the task flag to be
1329 * updated, unless it was force-disabled by a previous prctl
1330 * call. Currently, this is possible on an AMD CPU which has the
1331 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1332 * kernel is booted with 'spectre_v2_user=seccomp', then
1333 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1334 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1336 if (!is_spec_ib_user_controlled() ||
1337 task_spec_ib_force_disable(task
))
1340 task_clear_spec_ib_disable(task
);
1341 task_update_spec_tif(task
);
1343 case PR_SPEC_DISABLE
:
1344 case PR_SPEC_FORCE_DISABLE
:
1346 * Indirect branch speculation is always allowed when
1347 * mitigation is force disabled.
1349 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1350 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1353 if (!is_spec_ib_user_controlled())
1356 task_set_spec_ib_disable(task
);
1357 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
1358 task_set_spec_ib_force_disable(task
);
1359 task_update_spec_tif(task
);
1367 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
1371 case PR_SPEC_STORE_BYPASS
:
1372 return ssb_prctl_set(task
, ctrl
);
1373 case PR_SPEC_INDIRECT_BRANCH
:
1374 return ib_prctl_set(task
, ctrl
);
1375 case PR_SPEC_L1D_FLUSH
:
1376 return l1d_flush_prctl_set(task
, ctrl
);
1382 #ifdef CONFIG_SECCOMP
1383 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
1385 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
1386 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1387 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1388 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
1389 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1393 static int l1d_flush_prctl_get(struct task_struct
*task
)
1395 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1396 return PR_SPEC_FORCE_DISABLE
;
1398 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
1399 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1401 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1404 static int ssb_prctl_get(struct task_struct
*task
)
1407 case SPEC_STORE_BYPASS_DISABLE
:
1408 return PR_SPEC_DISABLE
;
1409 case SPEC_STORE_BYPASS_SECCOMP
:
1410 case SPEC_STORE_BYPASS_PRCTL
:
1411 if (task_spec_ssb_force_disable(task
))
1412 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1413 if (task_spec_ssb_noexec(task
))
1414 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
1415 if (task_spec_ssb_disable(task
))
1416 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1417 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1419 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1420 return PR_SPEC_ENABLE
;
1421 return PR_SPEC_NOT_AFFECTED
;
1425 static int ib_prctl_get(struct task_struct
*task
)
1427 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
1428 return PR_SPEC_NOT_AFFECTED
;
1430 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1431 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1432 return PR_SPEC_ENABLE
;
1433 else if (is_spec_ib_user_controlled()) {
1434 if (task_spec_ib_force_disable(task
))
1435 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1436 if (task_spec_ib_disable(task
))
1437 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1438 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1439 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
1440 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
1441 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
1442 return PR_SPEC_DISABLE
;
1444 return PR_SPEC_NOT_AFFECTED
;
1447 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
1450 case PR_SPEC_STORE_BYPASS
:
1451 return ssb_prctl_get(task
);
1452 case PR_SPEC_INDIRECT_BRANCH
:
1453 return ib_prctl_get(task
);
1454 case PR_SPEC_L1D_FLUSH
:
1455 return l1d_flush_prctl_get(task
);
1461 void x86_spec_ctrl_setup_ap(void)
1463 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
1464 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1466 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
1467 x86_amd_ssb_disable();
1470 bool itlb_multihit_kvm_mitigation
;
1471 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
1474 #define pr_fmt(fmt) "L1TF: " fmt
1476 /* Default mitigation for L1TF-affected CPUs */
1477 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
1478 #if IS_ENABLED(CONFIG_KVM_INTEL)
1479 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
1481 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
1482 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
1485 * These CPUs all support 44bits physical address space internally in the
1486 * cache but CPUID can report a smaller number of physical address bits.
1488 * The L1TF mitigation uses the top most address bit for the inversion of
1489 * non present PTEs. When the installed memory reaches into the top most
1490 * address bit due to memory holes, which has been observed on machines
1491 * which report 36bits physical address bits and have 32G RAM installed,
1492 * then the mitigation range check in l1tf_select_mitigation() triggers.
1493 * This is a false positive because the mitigation is still possible due to
1494 * the fact that the cache uses 44bit internally. Use the cache bits
1495 * instead of the reported physical bits and adjust them on the affected
1496 * machines to 44bit if the reported bits are less than 44.
1498 static void override_cache_bits(struct cpuinfo_x86
*c
)
1503 switch (c
->x86_model
) {
1504 case INTEL_FAM6_NEHALEM
:
1505 case INTEL_FAM6_WESTMERE
:
1506 case INTEL_FAM6_SANDYBRIDGE
:
1507 case INTEL_FAM6_IVYBRIDGE
:
1508 case INTEL_FAM6_HASWELL
:
1509 case INTEL_FAM6_HASWELL_L
:
1510 case INTEL_FAM6_HASWELL_G
:
1511 case INTEL_FAM6_BROADWELL
:
1512 case INTEL_FAM6_BROADWELL_G
:
1513 case INTEL_FAM6_SKYLAKE_L
:
1514 case INTEL_FAM6_SKYLAKE
:
1515 case INTEL_FAM6_KABYLAKE_L
:
1516 case INTEL_FAM6_KABYLAKE
:
1517 if (c
->x86_cache_bits
< 44)
1518 c
->x86_cache_bits
= 44;
1523 static void __init
l1tf_select_mitigation(void)
1527 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1530 if (cpu_mitigations_off())
1531 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1532 else if (cpu_mitigations_auto_nosmt())
1533 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1535 override_cache_bits(&boot_cpu_data
);
1537 switch (l1tf_mitigation
) {
1538 case L1TF_MITIGATION_OFF
:
1539 case L1TF_MITIGATION_FLUSH_NOWARN
:
1540 case L1TF_MITIGATION_FLUSH
:
1542 case L1TF_MITIGATION_FLUSH_NOSMT
:
1543 case L1TF_MITIGATION_FULL
:
1544 cpu_smt_disable(false);
1546 case L1TF_MITIGATION_FULL_FORCE
:
1547 cpu_smt_disable(true);
1551 #if CONFIG_PGTABLE_LEVELS == 2
1552 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1556 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
1557 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
1558 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
1559 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1560 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1562 pr_info("However, doing so will make a part of your RAM unusable.\n");
1563 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1567 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
1570 static int __init
l1tf_cmdline(char *str
)
1572 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1578 if (!strcmp(str
, "off"))
1579 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1580 else if (!strcmp(str
, "flush,nowarn"))
1581 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
1582 else if (!strcmp(str
, "flush"))
1583 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
1584 else if (!strcmp(str
, "flush,nosmt"))
1585 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1586 else if (!strcmp(str
, "full"))
1587 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
1588 else if (!strcmp(str
, "full,force"))
1589 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
1593 early_param("l1tf", l1tf_cmdline
);
1596 #define pr_fmt(fmt) fmt
1600 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1602 #if IS_ENABLED(CONFIG_KVM_INTEL)
1603 static const char * const l1tf_vmx_states
[] = {
1604 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
1605 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
1606 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
1607 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
1608 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
1609 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
1612 static ssize_t
l1tf_show_state(char *buf
)
1614 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
1615 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1617 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
1618 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
1619 sched_smt_active())) {
1620 return sprintf(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
1621 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
1624 return sprintf(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
1625 l1tf_vmx_states
[l1tf_vmx_mitigation
],
1626 sched_smt_active() ? "vulnerable" : "disabled");
1629 static ssize_t
itlb_multihit_show_state(char *buf
)
1631 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
1632 !boot_cpu_has(X86_FEATURE_VMX
))
1633 return sprintf(buf
, "KVM: Mitigation: VMX unsupported\n");
1634 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
1635 return sprintf(buf
, "KVM: Mitigation: VMX disabled\n");
1636 else if (itlb_multihit_kvm_mitigation
)
1637 return sprintf(buf
, "KVM: Mitigation: Split huge pages\n");
1639 return sprintf(buf
, "KVM: Vulnerable\n");
1642 static ssize_t
l1tf_show_state(char *buf
)
1644 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1647 static ssize_t
itlb_multihit_show_state(char *buf
)
1649 return sprintf(buf
, "Processor vulnerable\n");
1653 static ssize_t
mds_show_state(char *buf
)
1655 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1656 return sprintf(buf
, "%s; SMT Host state unknown\n",
1657 mds_strings
[mds_mitigation
]);
1660 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
1661 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1662 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
1663 sched_smt_active() ? "mitigated" : "disabled"));
1666 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1667 sched_smt_active() ? "vulnerable" : "disabled");
1670 static ssize_t
tsx_async_abort_show_state(char *buf
)
1672 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
1673 (taa_mitigation
== TAA_MITIGATION_OFF
))
1674 return sprintf(buf
, "%s\n", taa_strings
[taa_mitigation
]);
1676 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1677 return sprintf(buf
, "%s; SMT Host state unknown\n",
1678 taa_strings
[taa_mitigation
]);
1681 return sprintf(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
1682 sched_smt_active() ? "vulnerable" : "disabled");
1685 static char *stibp_state(void)
1687 if (spectre_v2_enabled
== SPECTRE_V2_IBRS_ENHANCED
)
1690 switch (spectre_v2_user_stibp
) {
1691 case SPECTRE_V2_USER_NONE
:
1692 return ", STIBP: disabled";
1693 case SPECTRE_V2_USER_STRICT
:
1694 return ", STIBP: forced";
1695 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1696 return ", STIBP: always-on";
1697 case SPECTRE_V2_USER_PRCTL
:
1698 case SPECTRE_V2_USER_SECCOMP
:
1699 if (static_key_enabled(&switch_to_cond_stibp
))
1700 return ", STIBP: conditional";
1705 static char *ibpb_state(void)
1707 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1708 if (static_key_enabled(&switch_mm_always_ibpb
))
1709 return ", IBPB: always-on";
1710 if (static_key_enabled(&switch_mm_cond_ibpb
))
1711 return ", IBPB: conditional";
1712 return ", IBPB: disabled";
1717 static ssize_t
srbds_show_state(char *buf
)
1719 return sprintf(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
1722 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
1723 char *buf
, unsigned int bug
)
1725 if (!boot_cpu_has_bug(bug
))
1726 return sprintf(buf
, "Not affected\n");
1729 case X86_BUG_CPU_MELTDOWN
:
1730 if (boot_cpu_has(X86_FEATURE_PTI
))
1731 return sprintf(buf
, "Mitigation: PTI\n");
1733 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
1734 return sprintf(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1738 case X86_BUG_SPECTRE_V1
:
1739 return sprintf(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
1741 case X86_BUG_SPECTRE_V2
:
1742 return sprintf(buf
, "%s%s%s%s%s%s\n", spectre_v2_strings
[spectre_v2_enabled
],
1744 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
1746 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
1747 spectre_v2_module_string());
1749 case X86_BUG_SPEC_STORE_BYPASS
:
1750 return sprintf(buf
, "%s\n", ssb_strings
[ssb_mode
]);
1753 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
1754 return l1tf_show_state(buf
);
1758 return mds_show_state(buf
);
1761 return tsx_async_abort_show_state(buf
);
1763 case X86_BUG_ITLB_MULTIHIT
:
1764 return itlb_multihit_show_state(buf
);
1767 return srbds_show_state(buf
);
1773 return sprintf(buf
, "Vulnerable\n");
1776 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1778 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
1781 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1783 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
1786 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1788 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
1791 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1793 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
1796 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1798 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
1801 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1803 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
1806 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1808 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
1811 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1813 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
1816 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1818 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);