1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
19 #include <linux/bpf.h>
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/internal.h>
29 #include <asm/paravirt.h>
30 #include <asm/alternative.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlbflush.h>
39 static void __init
spectre_v1_select_mitigation(void);
40 static void __init
spectre_v2_select_mitigation(void);
41 static void __init
ssb_select_mitigation(void);
42 static void __init
l1tf_select_mitigation(void);
43 static void __init
mds_select_mitigation(void);
44 static void __init
mds_print_mitigation(void);
45 static void __init
taa_select_mitigation(void);
46 static void __init
srbds_select_mitigation(void);
47 static void __init
l1d_flush_select_mitigation(void);
49 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
50 u64 x86_spec_ctrl_base
;
51 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
52 static DEFINE_MUTEX(spec_ctrl_mutex
);
55 * The vendor and possibly platform specific bits which can be modified in
58 static u64 __ro_after_init x86_spec_ctrl_mask
= SPEC_CTRL_IBRS
;
61 * AMD specific MSR info for Speculative Store Bypass control.
62 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
64 u64 __ro_after_init x86_amd_ls_cfg_base
;
65 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
67 /* Control conditional STIBP in switch_to() */
68 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
69 /* Control conditional IBPB in switch_mm() */
70 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
71 /* Control unconditional IBPB in switch_mm() */
72 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
74 /* Control MDS CPU buffer clear before returning to user space */
75 DEFINE_STATIC_KEY_FALSE(mds_user_clear
);
76 EXPORT_SYMBOL_GPL(mds_user_clear
);
77 /* Control MDS CPU buffer clear before idling (halt, mwait) */
78 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
79 EXPORT_SYMBOL_GPL(mds_idle_clear
);
82 * Controls whether l1d flush based mitigations are enabled,
83 * based on hw features and admin setting via boot parameter
86 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
88 void __init
check_bugs(void)
93 * identify_boot_cpu() initialized SMT support information, let the
96 cpu_smt_check_topology();
98 if (!IS_ENABLED(CONFIG_SMP
)) {
100 print_cpu_info(&boot_cpu_data
);
104 * Read the SPEC_CTRL MSR to account for reserved bits which may
105 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
106 * init code as it is not enumerated and depends on the family.
108 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
109 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
111 /* Allow STIBP in MSR_SPEC_CTRL if supported */
112 if (boot_cpu_has(X86_FEATURE_STIBP
))
113 x86_spec_ctrl_mask
|= SPEC_CTRL_STIBP
;
115 /* Select the proper CPU mitigations before patching alternatives: */
116 spectre_v1_select_mitigation();
117 spectre_v2_select_mitigation();
118 ssb_select_mitigation();
119 l1tf_select_mitigation();
120 mds_select_mitigation();
121 taa_select_mitigation();
122 srbds_select_mitigation();
123 l1d_flush_select_mitigation();
126 * As MDS and TAA mitigations are inter-related, print MDS
127 * mitigation until after TAA mitigation selection is done.
129 mds_print_mitigation();
135 * Check whether we are able to run this kernel safely on SMP.
137 * - i386 is no longer supported.
138 * - In order to run on anything without a TSC, we need to be
139 * compiled for a i486.
141 if (boot_cpu_data
.x86
< 4)
142 panic("Kernel requires i486+ for 'invlpg' and other features");
144 init_utsname()->machine
[1] =
145 '0' + (boot_cpu_data
.x86
> 6 ? 6 : boot_cpu_data
.x86
);
146 alternative_instructions();
148 fpu__init_check_bugs();
149 #else /* CONFIG_X86_64 */
150 alternative_instructions();
153 * Make sure the first 2MB area is not mapped by huge pages
154 * There are typically fixed size MTRRs in there and overlapping
155 * MTRRs into large pages causes slow downs.
157 * Right now we don't do that with gbpages because there seems
158 * very little benefit for that case.
161 set_memory_4k((unsigned long)__va(0), 1);
166 x86_virt_spec_ctrl(u64 guest_spec_ctrl
, u64 guest_virt_spec_ctrl
, bool setguest
)
168 u64 msrval
, guestval
, hostval
= x86_spec_ctrl_base
;
169 struct thread_info
*ti
= current_thread_info();
171 /* Is MSR_SPEC_CTRL implemented ? */
172 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
)) {
174 * Restrict guest_spec_ctrl to supported values. Clear the
175 * modifiable bits in the host base value and or the
176 * modifiable bits from the guest value.
178 guestval
= hostval
& ~x86_spec_ctrl_mask
;
179 guestval
|= guest_spec_ctrl
& x86_spec_ctrl_mask
;
181 /* SSBD controlled in MSR_SPEC_CTRL */
182 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
183 static_cpu_has(X86_FEATURE_AMD_SSBD
))
184 hostval
|= ssbd_tif_to_spec_ctrl(ti
->flags
);
186 /* Conditional STIBP enabled? */
187 if (static_branch_unlikely(&switch_to_cond_stibp
))
188 hostval
|= stibp_tif_to_spec_ctrl(ti
->flags
);
190 if (hostval
!= guestval
) {
191 msrval
= setguest
? guestval
: hostval
;
192 wrmsrl(MSR_IA32_SPEC_CTRL
, msrval
);
197 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
198 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
200 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
201 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
205 * If the host has SSBD mitigation enabled, force it in the host's
206 * virtual MSR value. If its not permanently enabled, evaluate
207 * current's TIF_SSBD thread flag.
209 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
210 hostval
= SPEC_CTRL_SSBD
;
212 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
214 /* Sanitize the guest value */
215 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
217 if (hostval
!= guestval
) {
220 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
221 ssbd_spec_ctrl_to_tif(hostval
);
223 speculation_ctrl_update(tif
);
226 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
228 static void x86_amd_ssb_disable(void)
230 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
232 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
233 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
234 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
235 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
239 #define pr_fmt(fmt) "MDS: " fmt
241 /* Default mitigation for MDS-affected CPUs */
242 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
243 static bool mds_nosmt __ro_after_init
= false;
245 static const char * const mds_strings
[] = {
246 [MDS_MITIGATION_OFF
] = "Vulnerable",
247 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
248 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
251 static void __init
mds_select_mitigation(void)
253 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
254 mds_mitigation
= MDS_MITIGATION_OFF
;
258 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
259 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
260 mds_mitigation
= MDS_MITIGATION_VMWERV
;
262 static_branch_enable(&mds_user_clear
);
264 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
265 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
266 cpu_smt_disable(false);
270 static void __init
mds_print_mitigation(void)
272 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off())
275 pr_info("%s\n", mds_strings
[mds_mitigation
]);
278 static int __init
mds_cmdline(char *str
)
280 if (!boot_cpu_has_bug(X86_BUG_MDS
))
286 if (!strcmp(str
, "off"))
287 mds_mitigation
= MDS_MITIGATION_OFF
;
288 else if (!strcmp(str
, "full"))
289 mds_mitigation
= MDS_MITIGATION_FULL
;
290 else if (!strcmp(str
, "full,nosmt")) {
291 mds_mitigation
= MDS_MITIGATION_FULL
;
297 early_param("mds", mds_cmdline
);
300 #define pr_fmt(fmt) "TAA: " fmt
302 enum taa_mitigations
{
304 TAA_MITIGATION_UCODE_NEEDED
,
306 TAA_MITIGATION_TSX_DISABLED
,
309 /* Default mitigation for TAA-affected CPUs */
310 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
311 static bool taa_nosmt __ro_after_init
;
313 static const char * const taa_strings
[] = {
314 [TAA_MITIGATION_OFF
] = "Vulnerable",
315 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
316 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
317 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
320 static void __init
taa_select_mitigation(void)
324 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
325 taa_mitigation
= TAA_MITIGATION_OFF
;
329 /* TSX previously disabled by tsx=off */
330 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
331 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
335 if (cpu_mitigations_off()) {
336 taa_mitigation
= TAA_MITIGATION_OFF
;
341 * TAA mitigation via VERW is turned off if both
342 * tsx_async_abort=off and mds=off are specified.
344 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
345 mds_mitigation
== MDS_MITIGATION_OFF
)
348 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
349 taa_mitigation
= TAA_MITIGATION_VERW
;
351 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
354 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
355 * A microcode update fixes this behavior to clear CPU buffers. It also
356 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
357 * ARCH_CAP_TSX_CTRL_MSR bit.
359 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
360 * update is required.
362 ia32_cap
= x86_read_arch_cap_msr();
363 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
364 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
365 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
368 * TSX is enabled, select alternate mitigation for TAA which is
369 * the same as MDS. Enable MDS static branch to clear CPU buffers.
371 * For guests that can't determine whether the correct microcode is
372 * present on host, enable the mitigation for UCODE_NEEDED as well.
374 static_branch_enable(&mds_user_clear
);
376 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
377 cpu_smt_disable(false);
380 * Update MDS mitigation, if necessary, as the mds_user_clear is
381 * now enabled for TAA mitigation.
383 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
384 boot_cpu_has_bug(X86_BUG_MDS
)) {
385 mds_mitigation
= MDS_MITIGATION_FULL
;
386 mds_select_mitigation();
389 pr_info("%s\n", taa_strings
[taa_mitigation
]);
392 static int __init
tsx_async_abort_parse_cmdline(char *str
)
394 if (!boot_cpu_has_bug(X86_BUG_TAA
))
400 if (!strcmp(str
, "off")) {
401 taa_mitigation
= TAA_MITIGATION_OFF
;
402 } else if (!strcmp(str
, "full")) {
403 taa_mitigation
= TAA_MITIGATION_VERW
;
404 } else if (!strcmp(str
, "full,nosmt")) {
405 taa_mitigation
= TAA_MITIGATION_VERW
;
411 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
414 #define pr_fmt(fmt) "SRBDS: " fmt
416 enum srbds_mitigations
{
417 SRBDS_MITIGATION_OFF
,
418 SRBDS_MITIGATION_UCODE_NEEDED
,
419 SRBDS_MITIGATION_FULL
,
420 SRBDS_MITIGATION_TSX_OFF
,
421 SRBDS_MITIGATION_HYPERVISOR
,
424 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
426 static const char * const srbds_strings
[] = {
427 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
428 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
429 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
430 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
431 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
434 static bool srbds_off
;
436 void update_srbds_msr(void)
440 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
443 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
446 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
449 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
451 switch (srbds_mitigation
) {
452 case SRBDS_MITIGATION_OFF
:
453 case SRBDS_MITIGATION_TSX_OFF
:
454 mcu_ctrl
|= RNGDS_MITG_DIS
;
456 case SRBDS_MITIGATION_FULL
:
457 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
463 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
466 static void __init
srbds_select_mitigation(void)
470 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
474 * Check to see if this is one of the MDS_NO systems supporting
475 * TSX that are only exposed to SRBDS when TSX is enabled.
477 ia32_cap
= x86_read_arch_cap_msr();
478 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
))
479 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
480 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
481 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
482 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
483 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
484 else if (cpu_mitigations_off() || srbds_off
)
485 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
488 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
491 static int __init
srbds_parse_cmdline(char *str
)
496 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
499 srbds_off
= !strcmp(str
, "off");
502 early_param("srbds", srbds_parse_cmdline
);
505 #define pr_fmt(fmt) "L1D Flush : " fmt
507 enum l1d_flush_mitigations
{
512 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
514 static void __init
l1d_flush_select_mitigation(void)
516 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
519 static_branch_enable(&switch_mm_cond_l1d_flush
);
520 pr_info("Conditional flush on switch_mm() enabled\n");
523 static int __init
l1d_flush_parse_cmdline(char *str
)
525 if (!strcmp(str
, "on"))
526 l1d_flush_mitigation
= L1D_FLUSH_ON
;
530 early_param("l1d_flush", l1d_flush_parse_cmdline
);
533 #define pr_fmt(fmt) "Spectre V1 : " fmt
535 enum spectre_v1_mitigation
{
536 SPECTRE_V1_MITIGATION_NONE
,
537 SPECTRE_V1_MITIGATION_AUTO
,
540 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
541 SPECTRE_V1_MITIGATION_AUTO
;
543 static const char * const spectre_v1_strings
[] = {
544 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
545 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
549 * Does SMAP provide full mitigation against speculative kernel access to
552 static bool smap_works_speculatively(void)
554 if (!boot_cpu_has(X86_FEATURE_SMAP
))
558 * On CPUs which are vulnerable to Meltdown, SMAP does not
559 * prevent speculative access to user data in the L1 cache.
560 * Consider SMAP to be non-functional as a mitigation on these
563 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
569 static void __init
spectre_v1_select_mitigation(void)
571 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
572 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
576 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
578 * With Spectre v1, a user can speculatively control either
579 * path of a conditional swapgs with a user-controlled GS
580 * value. The mitigation is to add lfences to both code paths.
582 * If FSGSBASE is enabled, the user can put a kernel address in
583 * GS, in which case SMAP provides no protection.
585 * If FSGSBASE is disabled, the user can only put a user space
586 * address in GS. That makes an attack harder, but still
587 * possible if there's no SMAP protection.
589 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
590 !smap_works_speculatively()) {
592 * Mitigation can be provided from SWAPGS itself or
593 * PTI as the CR3 write in the Meltdown mitigation
596 * If neither is there, mitigate with an LFENCE to
597 * stop speculation through swapgs.
599 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
600 !boot_cpu_has(X86_FEATURE_PTI
))
601 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
604 * Enable lfences in the kernel entry (non-swapgs)
605 * paths, to prevent user entry from speculatively
608 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
612 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
615 static int __init
nospectre_v1_cmdline(char *str
)
617 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
620 early_param("nospectre_v1", nospectre_v1_cmdline
);
623 #define pr_fmt(fmt) "Spectre V2 : " fmt
625 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
=
628 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
629 SPECTRE_V2_USER_NONE
;
630 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
631 SPECTRE_V2_USER_NONE
;
633 #ifdef CONFIG_RETPOLINE
634 static bool spectre_v2_bad_module
;
636 bool retpoline_module_ok(bool has_retpoline
)
638 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
641 pr_err("System may be vulnerable to spectre v2\n");
642 spectre_v2_bad_module
= true;
646 static inline const char *spectre_v2_module_string(void)
648 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
651 static inline const char *spectre_v2_module_string(void) { return ""; }
654 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
655 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
656 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
658 #ifdef CONFIG_BPF_SYSCALL
659 void unpriv_ebpf_notify(int new_state
)
664 /* Unprivileged eBPF is enabled */
666 switch (spectre_v2_enabled
) {
667 case SPECTRE_V2_EIBRS
:
668 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
670 case SPECTRE_V2_EIBRS_LFENCE
:
671 if (sched_smt_active())
672 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
680 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
682 int len
= strlen(opt
);
684 return len
== arglen
&& !strncmp(arg
, opt
, len
);
687 /* The kernel command line selection for spectre v2 */
688 enum spectre_v2_mitigation_cmd
{
691 SPECTRE_V2_CMD_FORCE
,
692 SPECTRE_V2_CMD_RETPOLINE
,
693 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
694 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
695 SPECTRE_V2_CMD_EIBRS
,
696 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
697 SPECTRE_V2_CMD_EIBRS_LFENCE
,
700 enum spectre_v2_user_cmd
{
701 SPECTRE_V2_USER_CMD_NONE
,
702 SPECTRE_V2_USER_CMD_AUTO
,
703 SPECTRE_V2_USER_CMD_FORCE
,
704 SPECTRE_V2_USER_CMD_PRCTL
,
705 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
706 SPECTRE_V2_USER_CMD_SECCOMP
,
707 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
710 static const char * const spectre_v2_user_strings
[] = {
711 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
712 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
713 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
714 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
715 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
718 static const struct {
720 enum spectre_v2_user_cmd cmd
;
722 } v2_user_options
[] __initconst
= {
723 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
724 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
725 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
726 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
727 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
728 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
729 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
732 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
734 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
735 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
738 static enum spectre_v2_user_cmd __init
739 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd
)
745 case SPECTRE_V2_CMD_NONE
:
746 return SPECTRE_V2_USER_CMD_NONE
;
747 case SPECTRE_V2_CMD_FORCE
:
748 return SPECTRE_V2_USER_CMD_FORCE
;
753 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
756 return SPECTRE_V2_USER_CMD_AUTO
;
758 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
759 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
760 spec_v2_user_print_cond(v2_user_options
[i
].option
,
761 v2_user_options
[i
].secure
);
762 return v2_user_options
[i
].cmd
;
766 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
767 return SPECTRE_V2_USER_CMD_AUTO
;
770 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode
)
772 return (mode
== SPECTRE_V2_EIBRS
||
773 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
774 mode
== SPECTRE_V2_EIBRS_LFENCE
);
778 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd
)
780 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
781 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
782 enum spectre_v2_user_cmd cmd
;
784 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
787 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
788 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
789 smt_possible
= false;
791 cmd
= spectre_v2_parse_user_cmdline(v2_cmd
);
793 case SPECTRE_V2_USER_CMD_NONE
:
795 case SPECTRE_V2_USER_CMD_FORCE
:
796 mode
= SPECTRE_V2_USER_STRICT
;
798 case SPECTRE_V2_USER_CMD_PRCTL
:
799 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
800 mode
= SPECTRE_V2_USER_PRCTL
;
802 case SPECTRE_V2_USER_CMD_AUTO
:
803 case SPECTRE_V2_USER_CMD_SECCOMP
:
804 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
805 if (IS_ENABLED(CONFIG_SECCOMP
))
806 mode
= SPECTRE_V2_USER_SECCOMP
;
808 mode
= SPECTRE_V2_USER_PRCTL
;
812 /* Initialize Indirect Branch Prediction Barrier */
813 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
814 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
816 spectre_v2_user_ibpb
= mode
;
818 case SPECTRE_V2_USER_CMD_FORCE
:
819 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
820 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
821 static_branch_enable(&switch_mm_always_ibpb
);
822 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
824 case SPECTRE_V2_USER_CMD_PRCTL
:
825 case SPECTRE_V2_USER_CMD_AUTO
:
826 case SPECTRE_V2_USER_CMD_SECCOMP
:
827 static_branch_enable(&switch_mm_cond_ibpb
);
833 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
834 static_key_enabled(&switch_mm_always_ibpb
) ?
835 "always-on" : "conditional");
839 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
842 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
844 spectre_v2_in_eibrs_mode(spectre_v2_enabled
))
848 * At this point, an STIBP mode other than "off" has been set.
849 * If STIBP support is not being forced, check if STIBP always-on
852 if (mode
!= SPECTRE_V2_USER_STRICT
&&
853 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
854 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
856 spectre_v2_user_stibp
= mode
;
859 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
862 static const char * const spectre_v2_strings
[] = {
863 [SPECTRE_V2_NONE
] = "Vulnerable",
864 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
865 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
866 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced IBRS",
867 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced IBRS + LFENCE",
868 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced IBRS + Retpolines",
871 static const struct {
873 enum spectre_v2_mitigation_cmd cmd
;
875 } mitigation_options
[] __initconst
= {
876 { "off", SPECTRE_V2_CMD_NONE
, false },
877 { "on", SPECTRE_V2_CMD_FORCE
, true },
878 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
879 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
880 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
881 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
882 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
883 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
884 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
885 { "auto", SPECTRE_V2_CMD_AUTO
, false },
888 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
890 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
891 pr_info("%s selected on command line.\n", reason
);
894 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
896 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
900 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
901 cpu_mitigations_off())
902 return SPECTRE_V2_CMD_NONE
;
904 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
906 return SPECTRE_V2_CMD_AUTO
;
908 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
909 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
911 cmd
= mitigation_options
[i
].cmd
;
915 if (i
>= ARRAY_SIZE(mitigation_options
)) {
916 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
917 return SPECTRE_V2_CMD_AUTO
;
920 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
921 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
922 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
923 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
924 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
925 !IS_ENABLED(CONFIG_RETPOLINE
)) {
926 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
927 mitigation_options
[i
].option
);
928 return SPECTRE_V2_CMD_AUTO
;
931 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
932 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
933 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
934 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
935 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
936 mitigation_options
[i
].option
);
937 return SPECTRE_V2_CMD_AUTO
;
940 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
941 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
942 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
943 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
944 mitigation_options
[i
].option
);
945 return SPECTRE_V2_CMD_AUTO
;
948 spec_v2_print_cond(mitigation_options
[i
].option
,
949 mitigation_options
[i
].secure
);
953 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
955 if (!IS_ENABLED(CONFIG_RETPOLINE
)) {
956 pr_err("Kernel not compiled with retpoline; no mitigation available!");
957 return SPECTRE_V2_NONE
;
960 return SPECTRE_V2_RETPOLINE
;
963 static void __init
spectre_v2_select_mitigation(void)
965 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
966 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
969 * If the CPU is not affected and the command line mode is NONE or AUTO
970 * then nothing to do.
972 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
973 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
977 case SPECTRE_V2_CMD_NONE
:
980 case SPECTRE_V2_CMD_FORCE
:
981 case SPECTRE_V2_CMD_AUTO
:
982 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
983 mode
= SPECTRE_V2_EIBRS
;
987 mode
= spectre_v2_select_retpoline();
990 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
991 pr_err(SPECTRE_V2_LFENCE_MSG
);
992 mode
= SPECTRE_V2_LFENCE
;
995 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
996 mode
= SPECTRE_V2_RETPOLINE
;
999 case SPECTRE_V2_CMD_RETPOLINE
:
1000 mode
= spectre_v2_select_retpoline();
1003 case SPECTRE_V2_CMD_EIBRS
:
1004 mode
= SPECTRE_V2_EIBRS
;
1007 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1008 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1011 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1012 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1016 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1017 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1019 if (spectre_v2_in_eibrs_mode(mode
)) {
1020 /* Force it so VMEXIT will restore correctly */
1021 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1022 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1026 case SPECTRE_V2_NONE
:
1027 case SPECTRE_V2_EIBRS
:
1030 case SPECTRE_V2_LFENCE
:
1031 case SPECTRE_V2_EIBRS_LFENCE
:
1032 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1035 case SPECTRE_V2_RETPOLINE
:
1036 case SPECTRE_V2_EIBRS_RETPOLINE
:
1037 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1041 spectre_v2_enabled
= mode
;
1042 pr_info("%s\n", spectre_v2_strings
[mode
]);
1045 * If spectre v2 protection has been enabled, unconditionally fill
1046 * RSB during a context switch; this protects against two independent
1049 * - RSB underflow (and switch to BTB) on Skylake+
1050 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1052 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1053 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1056 * Retpoline means the kernel is safe because it has no indirect
1057 * branches. Enhanced IBRS protects firmware too, so, enable restricted
1058 * speculation around firmware calls only when Enhanced IBRS isn't
1061 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1062 * the user might select retpoline on the kernel command line and if
1063 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1064 * enable IBRS around firmware calls.
1066 if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_eibrs_mode(mode
)) {
1067 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1068 pr_info("Enabling Restricted Speculation for firmware calls\n");
1071 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1072 spectre_v2_user_select_mitigation(cmd
);
1075 static void update_stibp_msr(void * __unused
)
1077 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1080 /* Update x86_spec_ctrl_base in case SMT state changed. */
1081 static void update_stibp_strict(void)
1083 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1085 if (sched_smt_active())
1086 mask
|= SPEC_CTRL_STIBP
;
1088 if (mask
== x86_spec_ctrl_base
)
1091 pr_info("Update user space SMT mitigation: STIBP %s\n",
1092 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1093 x86_spec_ctrl_base
= mask
;
1094 on_each_cpu(update_stibp_msr
, NULL
, 1);
1097 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1098 static void update_indir_branch_cond(void)
1100 if (sched_smt_active())
1101 static_branch_enable(&switch_to_cond_stibp
);
1103 static_branch_disable(&switch_to_cond_stibp
);
1107 #define pr_fmt(fmt) fmt
1109 /* Update the static key controlling the MDS CPU buffer clear in idle */
1110 static void update_mds_branch_idle(void)
1113 * Enable the idle clearing if SMT is active on CPUs which are
1114 * affected only by MSBDS and not any other MDS variant.
1116 * The other variants cannot be mitigated when SMT is enabled, so
1117 * clearing the buffers on idle just to prevent the Store Buffer
1118 * repartitioning leak would be a window dressing exercise.
1120 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1123 if (sched_smt_active())
1124 static_branch_enable(&mds_idle_clear
);
1126 static_branch_disable(&mds_idle_clear
);
1129 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1130 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1132 void cpu_bugs_smt_update(void)
1134 mutex_lock(&spec_ctrl_mutex
);
1136 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1137 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1138 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1140 switch (spectre_v2_user_stibp
) {
1141 case SPECTRE_V2_USER_NONE
:
1143 case SPECTRE_V2_USER_STRICT
:
1144 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1145 update_stibp_strict();
1147 case SPECTRE_V2_USER_PRCTL
:
1148 case SPECTRE_V2_USER_SECCOMP
:
1149 update_indir_branch_cond();
1153 switch (mds_mitigation
) {
1154 case MDS_MITIGATION_FULL
:
1155 case MDS_MITIGATION_VMWERV
:
1156 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1157 pr_warn_once(MDS_MSG_SMT
);
1158 update_mds_branch_idle();
1160 case MDS_MITIGATION_OFF
:
1164 switch (taa_mitigation
) {
1165 case TAA_MITIGATION_VERW
:
1166 case TAA_MITIGATION_UCODE_NEEDED
:
1167 if (sched_smt_active())
1168 pr_warn_once(TAA_MSG_SMT
);
1170 case TAA_MITIGATION_TSX_DISABLED
:
1171 case TAA_MITIGATION_OFF
:
1175 mutex_unlock(&spec_ctrl_mutex
);
1179 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1181 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1183 /* The kernel command line selection */
1184 enum ssb_mitigation_cmd
{
1185 SPEC_STORE_BYPASS_CMD_NONE
,
1186 SPEC_STORE_BYPASS_CMD_AUTO
,
1187 SPEC_STORE_BYPASS_CMD_ON
,
1188 SPEC_STORE_BYPASS_CMD_PRCTL
,
1189 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1192 static const char * const ssb_strings
[] = {
1193 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1194 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1195 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1196 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1199 static const struct {
1201 enum ssb_mitigation_cmd cmd
;
1202 } ssb_mitigation_options
[] __initconst
= {
1203 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1204 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1205 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1206 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1207 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1210 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1212 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1216 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1217 cpu_mitigations_off()) {
1218 return SPEC_STORE_BYPASS_CMD_NONE
;
1220 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1223 return SPEC_STORE_BYPASS_CMD_AUTO
;
1225 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1226 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1229 cmd
= ssb_mitigation_options
[i
].cmd
;
1233 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1234 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1235 return SPEC_STORE_BYPASS_CMD_AUTO
;
1242 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1244 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1245 enum ssb_mitigation_cmd cmd
;
1247 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1250 cmd
= ssb_parse_cmdline();
1251 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1252 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1253 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1257 case SPEC_STORE_BYPASS_CMD_AUTO
:
1258 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1260 * Choose prctl+seccomp as the default mode if seccomp is
1263 if (IS_ENABLED(CONFIG_SECCOMP
))
1264 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1266 mode
= SPEC_STORE_BYPASS_PRCTL
;
1268 case SPEC_STORE_BYPASS_CMD_ON
:
1269 mode
= SPEC_STORE_BYPASS_DISABLE
;
1271 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1272 mode
= SPEC_STORE_BYPASS_PRCTL
;
1274 case SPEC_STORE_BYPASS_CMD_NONE
:
1279 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1280 * bit in the mask to allow guests to use the mitigation even in the
1281 * case where the host does not enable it.
1283 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
1284 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1285 x86_spec_ctrl_mask
|= SPEC_CTRL_SSBD
;
1289 * We have three CPU feature flags that are in play here:
1290 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1291 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1292 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1294 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1295 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1297 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1298 * use a completely different MSR and bit dependent on family.
1300 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1301 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1302 x86_amd_ssb_disable();
1304 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1305 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1312 static void ssb_select_mitigation(void)
1314 ssb_mode
= __ssb_select_mitigation();
1316 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1317 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1321 #define pr_fmt(fmt) "Speculation prctl: " fmt
1323 static void task_update_spec_tif(struct task_struct
*tsk
)
1325 /* Force the update of the real TIF bits */
1326 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1329 * Immediately update the speculation control MSRs for the current
1330 * task, but for a non-current task delay setting the CPU
1331 * mitigation until it is scheduled next.
1333 * This can only happen for SECCOMP mitigation. For PRCTL it's
1334 * always the current task.
1337 speculation_ctrl_update_current();
1340 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1343 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1347 case PR_SPEC_ENABLE
:
1348 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1350 case PR_SPEC_DISABLE
:
1351 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1358 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1360 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
1361 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
1365 case PR_SPEC_ENABLE
:
1366 /* If speculation is force disabled, enable is not allowed */
1367 if (task_spec_ssb_force_disable(task
))
1369 task_clear_spec_ssb_disable(task
);
1370 task_clear_spec_ssb_noexec(task
);
1371 task_update_spec_tif(task
);
1373 case PR_SPEC_DISABLE
:
1374 task_set_spec_ssb_disable(task
);
1375 task_clear_spec_ssb_noexec(task
);
1376 task_update_spec_tif(task
);
1378 case PR_SPEC_FORCE_DISABLE
:
1379 task_set_spec_ssb_disable(task
);
1380 task_set_spec_ssb_force_disable(task
);
1381 task_clear_spec_ssb_noexec(task
);
1382 task_update_spec_tif(task
);
1384 case PR_SPEC_DISABLE_NOEXEC
:
1385 if (task_spec_ssb_force_disable(task
))
1387 task_set_spec_ssb_disable(task
);
1388 task_set_spec_ssb_noexec(task
);
1389 task_update_spec_tif(task
);
1397 static bool is_spec_ib_user_controlled(void)
1399 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
1400 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1401 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
1402 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
1405 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1408 case PR_SPEC_ENABLE
:
1409 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1410 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1414 * With strict mode for both IBPB and STIBP, the instruction
1415 * code paths avoid checking this task flag and instead,
1416 * unconditionally run the instruction. However, STIBP and IBPB
1417 * are independent and either can be set to conditionally
1418 * enabled regardless of the mode of the other.
1420 * If either is set to conditional, allow the task flag to be
1421 * updated, unless it was force-disabled by a previous prctl
1422 * call. Currently, this is possible on an AMD CPU which has the
1423 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1424 * kernel is booted with 'spectre_v2_user=seccomp', then
1425 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1426 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1428 if (!is_spec_ib_user_controlled() ||
1429 task_spec_ib_force_disable(task
))
1432 task_clear_spec_ib_disable(task
);
1433 task_update_spec_tif(task
);
1435 case PR_SPEC_DISABLE
:
1436 case PR_SPEC_FORCE_DISABLE
:
1438 * Indirect branch speculation is always allowed when
1439 * mitigation is force disabled.
1441 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1442 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1445 if (!is_spec_ib_user_controlled())
1448 task_set_spec_ib_disable(task
);
1449 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
1450 task_set_spec_ib_force_disable(task
);
1451 task_update_spec_tif(task
);
1459 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
1463 case PR_SPEC_STORE_BYPASS
:
1464 return ssb_prctl_set(task
, ctrl
);
1465 case PR_SPEC_INDIRECT_BRANCH
:
1466 return ib_prctl_set(task
, ctrl
);
1467 case PR_SPEC_L1D_FLUSH
:
1468 return l1d_flush_prctl_set(task
, ctrl
);
1474 #ifdef CONFIG_SECCOMP
1475 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
1477 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
1478 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1479 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1480 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
1481 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1485 static int l1d_flush_prctl_get(struct task_struct
*task
)
1487 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1488 return PR_SPEC_FORCE_DISABLE
;
1490 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
1491 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1493 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1496 static int ssb_prctl_get(struct task_struct
*task
)
1499 case SPEC_STORE_BYPASS_DISABLE
:
1500 return PR_SPEC_DISABLE
;
1501 case SPEC_STORE_BYPASS_SECCOMP
:
1502 case SPEC_STORE_BYPASS_PRCTL
:
1503 if (task_spec_ssb_force_disable(task
))
1504 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1505 if (task_spec_ssb_noexec(task
))
1506 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
1507 if (task_spec_ssb_disable(task
))
1508 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1509 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1511 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1512 return PR_SPEC_ENABLE
;
1513 return PR_SPEC_NOT_AFFECTED
;
1517 static int ib_prctl_get(struct task_struct
*task
)
1519 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
1520 return PR_SPEC_NOT_AFFECTED
;
1522 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1523 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1524 return PR_SPEC_ENABLE
;
1525 else if (is_spec_ib_user_controlled()) {
1526 if (task_spec_ib_force_disable(task
))
1527 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1528 if (task_spec_ib_disable(task
))
1529 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1530 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1531 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
1532 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
1533 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
1534 return PR_SPEC_DISABLE
;
1536 return PR_SPEC_NOT_AFFECTED
;
1539 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
1542 case PR_SPEC_STORE_BYPASS
:
1543 return ssb_prctl_get(task
);
1544 case PR_SPEC_INDIRECT_BRANCH
:
1545 return ib_prctl_get(task
);
1546 case PR_SPEC_L1D_FLUSH
:
1547 return l1d_flush_prctl_get(task
);
1553 void x86_spec_ctrl_setup_ap(void)
1555 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
1556 wrmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
1558 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
1559 x86_amd_ssb_disable();
1562 bool itlb_multihit_kvm_mitigation
;
1563 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
1566 #define pr_fmt(fmt) "L1TF: " fmt
1568 /* Default mitigation for L1TF-affected CPUs */
1569 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
1570 #if IS_ENABLED(CONFIG_KVM_INTEL)
1571 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
1573 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
1574 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
1577 * These CPUs all support 44bits physical address space internally in the
1578 * cache but CPUID can report a smaller number of physical address bits.
1580 * The L1TF mitigation uses the top most address bit for the inversion of
1581 * non present PTEs. When the installed memory reaches into the top most
1582 * address bit due to memory holes, which has been observed on machines
1583 * which report 36bits physical address bits and have 32G RAM installed,
1584 * then the mitigation range check in l1tf_select_mitigation() triggers.
1585 * This is a false positive because the mitigation is still possible due to
1586 * the fact that the cache uses 44bit internally. Use the cache bits
1587 * instead of the reported physical bits and adjust them on the affected
1588 * machines to 44bit if the reported bits are less than 44.
1590 static void override_cache_bits(struct cpuinfo_x86
*c
)
1595 switch (c
->x86_model
) {
1596 case INTEL_FAM6_NEHALEM
:
1597 case INTEL_FAM6_WESTMERE
:
1598 case INTEL_FAM6_SANDYBRIDGE
:
1599 case INTEL_FAM6_IVYBRIDGE
:
1600 case INTEL_FAM6_HASWELL
:
1601 case INTEL_FAM6_HASWELL_L
:
1602 case INTEL_FAM6_HASWELL_G
:
1603 case INTEL_FAM6_BROADWELL
:
1604 case INTEL_FAM6_BROADWELL_G
:
1605 case INTEL_FAM6_SKYLAKE_L
:
1606 case INTEL_FAM6_SKYLAKE
:
1607 case INTEL_FAM6_KABYLAKE_L
:
1608 case INTEL_FAM6_KABYLAKE
:
1609 if (c
->x86_cache_bits
< 44)
1610 c
->x86_cache_bits
= 44;
1615 static void __init
l1tf_select_mitigation(void)
1619 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1622 if (cpu_mitigations_off())
1623 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1624 else if (cpu_mitigations_auto_nosmt())
1625 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1627 override_cache_bits(&boot_cpu_data
);
1629 switch (l1tf_mitigation
) {
1630 case L1TF_MITIGATION_OFF
:
1631 case L1TF_MITIGATION_FLUSH_NOWARN
:
1632 case L1TF_MITIGATION_FLUSH
:
1634 case L1TF_MITIGATION_FLUSH_NOSMT
:
1635 case L1TF_MITIGATION_FULL
:
1636 cpu_smt_disable(false);
1638 case L1TF_MITIGATION_FULL_FORCE
:
1639 cpu_smt_disable(true);
1643 #if CONFIG_PGTABLE_LEVELS == 2
1644 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1648 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
1649 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
1650 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
1651 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1652 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1654 pr_info("However, doing so will make a part of your RAM unusable.\n");
1655 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1659 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
1662 static int __init
l1tf_cmdline(char *str
)
1664 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1670 if (!strcmp(str
, "off"))
1671 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1672 else if (!strcmp(str
, "flush,nowarn"))
1673 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
1674 else if (!strcmp(str
, "flush"))
1675 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
1676 else if (!strcmp(str
, "flush,nosmt"))
1677 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1678 else if (!strcmp(str
, "full"))
1679 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
1680 else if (!strcmp(str
, "full,force"))
1681 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
1685 early_param("l1tf", l1tf_cmdline
);
1688 #define pr_fmt(fmt) fmt
1692 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1694 #if IS_ENABLED(CONFIG_KVM_INTEL)
1695 static const char * const l1tf_vmx_states
[] = {
1696 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
1697 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
1698 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
1699 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
1700 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
1701 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
1704 static ssize_t
l1tf_show_state(char *buf
)
1706 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
1707 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1709 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
1710 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
1711 sched_smt_active())) {
1712 return sprintf(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
1713 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
1716 return sprintf(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
1717 l1tf_vmx_states
[l1tf_vmx_mitigation
],
1718 sched_smt_active() ? "vulnerable" : "disabled");
1721 static ssize_t
itlb_multihit_show_state(char *buf
)
1723 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
1724 !boot_cpu_has(X86_FEATURE_VMX
))
1725 return sprintf(buf
, "KVM: Mitigation: VMX unsupported\n");
1726 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
1727 return sprintf(buf
, "KVM: Mitigation: VMX disabled\n");
1728 else if (itlb_multihit_kvm_mitigation
)
1729 return sprintf(buf
, "KVM: Mitigation: Split huge pages\n");
1731 return sprintf(buf
, "KVM: Vulnerable\n");
1734 static ssize_t
l1tf_show_state(char *buf
)
1736 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
1739 static ssize_t
itlb_multihit_show_state(char *buf
)
1741 return sprintf(buf
, "Processor vulnerable\n");
1745 static ssize_t
mds_show_state(char *buf
)
1747 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1748 return sprintf(buf
, "%s; SMT Host state unknown\n",
1749 mds_strings
[mds_mitigation
]);
1752 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
1753 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1754 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
1755 sched_smt_active() ? "mitigated" : "disabled"));
1758 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
1759 sched_smt_active() ? "vulnerable" : "disabled");
1762 static ssize_t
tsx_async_abort_show_state(char *buf
)
1764 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
1765 (taa_mitigation
== TAA_MITIGATION_OFF
))
1766 return sprintf(buf
, "%s\n", taa_strings
[taa_mitigation
]);
1768 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1769 return sprintf(buf
, "%s; SMT Host state unknown\n",
1770 taa_strings
[taa_mitigation
]);
1773 return sprintf(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
1774 sched_smt_active() ? "vulnerable" : "disabled");
1777 static char *stibp_state(void)
1779 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
))
1782 switch (spectre_v2_user_stibp
) {
1783 case SPECTRE_V2_USER_NONE
:
1784 return ", STIBP: disabled";
1785 case SPECTRE_V2_USER_STRICT
:
1786 return ", STIBP: forced";
1787 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1788 return ", STIBP: always-on";
1789 case SPECTRE_V2_USER_PRCTL
:
1790 case SPECTRE_V2_USER_SECCOMP
:
1791 if (static_key_enabled(&switch_to_cond_stibp
))
1792 return ", STIBP: conditional";
1797 static char *ibpb_state(void)
1799 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1800 if (static_key_enabled(&switch_mm_always_ibpb
))
1801 return ", IBPB: always-on";
1802 if (static_key_enabled(&switch_mm_cond_ibpb
))
1803 return ", IBPB: conditional";
1804 return ", IBPB: disabled";
1809 static ssize_t
spectre_v2_show_state(char *buf
)
1811 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
1812 return sprintf(buf
, "Vulnerable: LFENCE\n");
1814 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1815 return sprintf(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
1817 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1818 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1819 return sprintf(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
1821 return sprintf(buf
, "%s%s%s%s%s%s\n",
1822 spectre_v2_strings
[spectre_v2_enabled
],
1824 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
1826 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
1827 spectre_v2_module_string());
1830 static ssize_t
srbds_show_state(char *buf
)
1832 return sprintf(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
1835 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
1836 char *buf
, unsigned int bug
)
1838 if (!boot_cpu_has_bug(bug
))
1839 return sprintf(buf
, "Not affected\n");
1842 case X86_BUG_CPU_MELTDOWN
:
1843 if (boot_cpu_has(X86_FEATURE_PTI
))
1844 return sprintf(buf
, "Mitigation: PTI\n");
1846 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
1847 return sprintf(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1851 case X86_BUG_SPECTRE_V1
:
1852 return sprintf(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
1854 case X86_BUG_SPECTRE_V2
:
1855 return spectre_v2_show_state(buf
);
1857 case X86_BUG_SPEC_STORE_BYPASS
:
1858 return sprintf(buf
, "%s\n", ssb_strings
[ssb_mode
]);
1861 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
1862 return l1tf_show_state(buf
);
1866 return mds_show_state(buf
);
1869 return tsx_async_abort_show_state(buf
);
1871 case X86_BUG_ITLB_MULTIHIT
:
1872 return itlb_multihit_show_state(buf
);
1875 return srbds_show_state(buf
);
1881 return sprintf(buf
, "Vulnerable\n");
1884 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1886 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
1889 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1891 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
1894 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1896 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
1899 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1901 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
1904 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1906 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
1909 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1911 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
1914 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1916 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
1919 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1921 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
1924 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1926 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);