1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
19 #include <linux/bpf.h>
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
29 #include <asm/paravirt.h>
30 #include <asm/alternative.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlbflush.h>
39 static void __init
spectre_v1_select_mitigation(void);
40 static void __init
spectre_v2_select_mitigation(void);
41 static void __init
retbleed_select_mitigation(void);
42 static void __init
spectre_v2_user_select_mitigation(void);
43 static void __init
ssb_select_mitigation(void);
44 static void __init
l1tf_select_mitigation(void);
45 static void __init
mds_select_mitigation(void);
46 static void __init
md_clear_update_mitigation(void);
47 static void __init
md_clear_select_mitigation(void);
48 static void __init
taa_select_mitigation(void);
49 static void __init
mmio_select_mitigation(void);
50 static void __init
srbds_select_mitigation(void);
51 static void __init
l1d_flush_select_mitigation(void);
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base
;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64
, x86_spec_ctrl_current
);
59 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current
);
61 static DEFINE_MUTEX(spec_ctrl_mutex
);
64 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
65 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
67 void write_spec_ctrl_current(u64 val
, bool force
)
69 if (this_cpu_read(x86_spec_ctrl_current
) == val
)
72 this_cpu_write(x86_spec_ctrl_current
, val
);
75 * When KERNEL_IBRS this MSR is written on return-to-user, unless
76 * forced the update can be delayed until that time.
78 if (force
|| !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS
))
79 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
82 u64
spec_ctrl_current(void)
84 return this_cpu_read(x86_spec_ctrl_current
);
86 EXPORT_SYMBOL_GPL(spec_ctrl_current
);
89 * The vendor and possibly platform specific bits which can be modified in
92 static u64 __ro_after_init x86_spec_ctrl_mask
= SPEC_CTRL_IBRS
;
95 * AMD specific MSR info for Speculative Store Bypass control.
96 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
98 u64 __ro_after_init x86_amd_ls_cfg_base
;
99 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
101 /* Control conditional STIBP in switch_to() */
102 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
103 /* Control conditional IBPB in switch_mm() */
104 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
105 /* Control unconditional IBPB in switch_mm() */
106 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
108 /* Control MDS CPU buffer clear before returning to user space */
109 DEFINE_STATIC_KEY_FALSE(mds_user_clear
);
110 EXPORT_SYMBOL_GPL(mds_user_clear
);
111 /* Control MDS CPU buffer clear before idling (halt, mwait) */
112 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
113 EXPORT_SYMBOL_GPL(mds_idle_clear
);
116 * Controls whether l1d flush based mitigations are enabled,
117 * based on hw features and admin setting via boot parameter
120 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
122 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
123 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
124 EXPORT_SYMBOL_GPL(mmio_stale_data_clear
);
126 void __init
check_bugs(void)
131 * identify_boot_cpu() initialized SMT support information, let the
134 cpu_smt_check_topology();
136 if (!IS_ENABLED(CONFIG_SMP
)) {
138 print_cpu_info(&boot_cpu_data
);
142 * Read the SPEC_CTRL MSR to account for reserved bits which may
143 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
144 * init code as it is not enumerated and depends on the family.
146 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
147 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
149 /* Allow STIBP in MSR_SPEC_CTRL if supported */
150 if (boot_cpu_has(X86_FEATURE_STIBP
))
151 x86_spec_ctrl_mask
|= SPEC_CTRL_STIBP
;
153 /* Select the proper CPU mitigations before patching alternatives: */
154 spectre_v1_select_mitigation();
155 spectre_v2_select_mitigation();
157 * retbleed_select_mitigation() relies on the state set by
158 * spectre_v2_select_mitigation(); specifically it wants to know about
161 retbleed_select_mitigation();
163 * spectre_v2_user_select_mitigation() relies on the state set by
164 * retbleed_select_mitigation(); specifically the STIBP selection is
167 spectre_v2_user_select_mitigation();
168 ssb_select_mitigation();
169 l1tf_select_mitigation();
170 md_clear_select_mitigation();
171 srbds_select_mitigation();
172 l1d_flush_select_mitigation();
178 * Check whether we are able to run this kernel safely on SMP.
180 * - i386 is no longer supported.
181 * - In order to run on anything without a TSC, we need to be
182 * compiled for a i486.
184 if (boot_cpu_data
.x86
< 4)
185 panic("Kernel requires i486+ for 'invlpg' and other features");
187 init_utsname()->machine
[1] =
188 '0' + (boot_cpu_data
.x86
> 6 ? 6 : boot_cpu_data
.x86
);
189 alternative_instructions();
191 fpu__init_check_bugs();
192 #else /* CONFIG_X86_64 */
193 alternative_instructions();
196 * Make sure the first 2MB area is not mapped by huge pages
197 * There are typically fixed size MTRRs in there and overlapping
198 * MTRRs into large pages causes slow downs.
200 * Right now we don't do that with gbpages because there seems
201 * very little benefit for that case.
204 set_memory_4k((unsigned long)__va(0), 1);
209 x86_virt_spec_ctrl(u64 guest_spec_ctrl
, u64 guest_virt_spec_ctrl
, bool setguest
)
211 u64 msrval
, guestval
, hostval
= x86_spec_ctrl_base
;
212 struct thread_info
*ti
= current_thread_info();
214 /* Is MSR_SPEC_CTRL implemented ? */
215 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
)) {
217 * Restrict guest_spec_ctrl to supported values. Clear the
218 * modifiable bits in the host base value and or the
219 * modifiable bits from the guest value.
221 guestval
= hostval
& ~x86_spec_ctrl_mask
;
222 guestval
|= guest_spec_ctrl
& x86_spec_ctrl_mask
;
224 /* SSBD controlled in MSR_SPEC_CTRL */
225 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
226 static_cpu_has(X86_FEATURE_AMD_SSBD
))
227 hostval
|= ssbd_tif_to_spec_ctrl(ti
->flags
);
229 /* Conditional STIBP enabled? */
230 if (static_branch_unlikely(&switch_to_cond_stibp
))
231 hostval
|= stibp_tif_to_spec_ctrl(ti
->flags
);
233 if (hostval
!= guestval
) {
234 msrval
= setguest
? guestval
: hostval
;
235 wrmsrl(MSR_IA32_SPEC_CTRL
, msrval
);
240 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
241 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
243 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
244 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
248 * If the host has SSBD mitigation enabled, force it in the host's
249 * virtual MSR value. If its not permanently enabled, evaluate
250 * current's TIF_SSBD thread flag.
252 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
253 hostval
= SPEC_CTRL_SSBD
;
255 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
257 /* Sanitize the guest value */
258 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
260 if (hostval
!= guestval
) {
263 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
264 ssbd_spec_ctrl_to_tif(hostval
);
266 speculation_ctrl_update(tif
);
269 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
271 static void x86_amd_ssb_disable(void)
273 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
275 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
276 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
277 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
278 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
282 #define pr_fmt(fmt) "MDS: " fmt
284 /* Default mitigation for MDS-affected CPUs */
285 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
286 static bool mds_nosmt __ro_after_init
= false;
288 static const char * const mds_strings
[] = {
289 [MDS_MITIGATION_OFF
] = "Vulnerable",
290 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
291 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
294 static void __init
mds_select_mitigation(void)
296 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
297 mds_mitigation
= MDS_MITIGATION_OFF
;
301 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
302 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
303 mds_mitigation
= MDS_MITIGATION_VMWERV
;
305 static_branch_enable(&mds_user_clear
);
307 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
308 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
309 cpu_smt_disable(false);
313 static int __init
mds_cmdline(char *str
)
315 if (!boot_cpu_has_bug(X86_BUG_MDS
))
321 if (!strcmp(str
, "off"))
322 mds_mitigation
= MDS_MITIGATION_OFF
;
323 else if (!strcmp(str
, "full"))
324 mds_mitigation
= MDS_MITIGATION_FULL
;
325 else if (!strcmp(str
, "full,nosmt")) {
326 mds_mitigation
= MDS_MITIGATION_FULL
;
332 early_param("mds", mds_cmdline
);
335 #define pr_fmt(fmt) "TAA: " fmt
337 enum taa_mitigations
{
339 TAA_MITIGATION_UCODE_NEEDED
,
341 TAA_MITIGATION_TSX_DISABLED
,
344 /* Default mitigation for TAA-affected CPUs */
345 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
346 static bool taa_nosmt __ro_after_init
;
348 static const char * const taa_strings
[] = {
349 [TAA_MITIGATION_OFF
] = "Vulnerable",
350 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
351 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
352 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
355 static void __init
taa_select_mitigation(void)
359 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
360 taa_mitigation
= TAA_MITIGATION_OFF
;
364 /* TSX previously disabled by tsx=off */
365 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
366 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
370 if (cpu_mitigations_off()) {
371 taa_mitigation
= TAA_MITIGATION_OFF
;
376 * TAA mitigation via VERW is turned off if both
377 * tsx_async_abort=off and mds=off are specified.
379 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
380 mds_mitigation
== MDS_MITIGATION_OFF
)
383 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
384 taa_mitigation
= TAA_MITIGATION_VERW
;
386 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
389 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
390 * A microcode update fixes this behavior to clear CPU buffers. It also
391 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
392 * ARCH_CAP_TSX_CTRL_MSR bit.
394 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
395 * update is required.
397 ia32_cap
= x86_read_arch_cap_msr();
398 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
399 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
400 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
403 * TSX is enabled, select alternate mitigation for TAA which is
404 * the same as MDS. Enable MDS static branch to clear CPU buffers.
406 * For guests that can't determine whether the correct microcode is
407 * present on host, enable the mitigation for UCODE_NEEDED as well.
409 static_branch_enable(&mds_user_clear
);
411 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
412 cpu_smt_disable(false);
415 static int __init
tsx_async_abort_parse_cmdline(char *str
)
417 if (!boot_cpu_has_bug(X86_BUG_TAA
))
423 if (!strcmp(str
, "off")) {
424 taa_mitigation
= TAA_MITIGATION_OFF
;
425 } else if (!strcmp(str
, "full")) {
426 taa_mitigation
= TAA_MITIGATION_VERW
;
427 } else if (!strcmp(str
, "full,nosmt")) {
428 taa_mitigation
= TAA_MITIGATION_VERW
;
434 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
437 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
439 enum mmio_mitigations
{
441 MMIO_MITIGATION_UCODE_NEEDED
,
442 MMIO_MITIGATION_VERW
,
445 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
446 static enum mmio_mitigations mmio_mitigation __ro_after_init
= MMIO_MITIGATION_VERW
;
447 static bool mmio_nosmt __ro_after_init
= false;
449 static const char * const mmio_strings
[] = {
450 [MMIO_MITIGATION_OFF
] = "Vulnerable",
451 [MMIO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
452 [MMIO_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
455 static void __init
mmio_select_mitigation(void)
459 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
) ||
460 cpu_mitigations_off()) {
461 mmio_mitigation
= MMIO_MITIGATION_OFF
;
465 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
468 ia32_cap
= x86_read_arch_cap_msr();
471 * Enable CPU buffer clear mitigation for host and VMM, if also affected
472 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
474 if (boot_cpu_has_bug(X86_BUG_MDS
) || (boot_cpu_has_bug(X86_BUG_TAA
) &&
475 boot_cpu_has(X86_FEATURE_RTM
)))
476 static_branch_enable(&mds_user_clear
);
478 static_branch_enable(&mmio_stale_data_clear
);
481 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
482 * be propagated to uncore buffers, clearing the Fill buffers on idle
483 * is required irrespective of SMT state.
485 if (!(ia32_cap
& ARCH_CAP_FBSDP_NO
))
486 static_branch_enable(&mds_idle_clear
);
489 * Check if the system has the right microcode.
491 * CPU Fill buffer clear mitigation is enumerated by either an explicit
492 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
495 if ((ia32_cap
& ARCH_CAP_FB_CLEAR
) ||
496 (boot_cpu_has(X86_FEATURE_MD_CLEAR
) &&
497 boot_cpu_has(X86_FEATURE_FLUSH_L1D
) &&
498 !(ia32_cap
& ARCH_CAP_MDS_NO
)))
499 mmio_mitigation
= MMIO_MITIGATION_VERW
;
501 mmio_mitigation
= MMIO_MITIGATION_UCODE_NEEDED
;
503 if (mmio_nosmt
|| cpu_mitigations_auto_nosmt())
504 cpu_smt_disable(false);
507 static int __init
mmio_stale_data_parse_cmdline(char *str
)
509 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
515 if (!strcmp(str
, "off")) {
516 mmio_mitigation
= MMIO_MITIGATION_OFF
;
517 } else if (!strcmp(str
, "full")) {
518 mmio_mitigation
= MMIO_MITIGATION_VERW
;
519 } else if (!strcmp(str
, "full,nosmt")) {
520 mmio_mitigation
= MMIO_MITIGATION_VERW
;
526 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline
);
529 #define pr_fmt(fmt) "" fmt
531 static void __init
md_clear_update_mitigation(void)
533 if (cpu_mitigations_off())
536 if (!static_key_enabled(&mds_user_clear
))
540 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
541 * mitigation, if necessary.
543 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
544 boot_cpu_has_bug(X86_BUG_MDS
)) {
545 mds_mitigation
= MDS_MITIGATION_FULL
;
546 mds_select_mitigation();
548 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
549 boot_cpu_has_bug(X86_BUG_TAA
)) {
550 taa_mitigation
= TAA_MITIGATION_VERW
;
551 taa_select_mitigation();
553 if (mmio_mitigation
== MMIO_MITIGATION_OFF
&&
554 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
)) {
555 mmio_mitigation
= MMIO_MITIGATION_VERW
;
556 mmio_select_mitigation();
559 if (boot_cpu_has_bug(X86_BUG_MDS
))
560 pr_info("MDS: %s\n", mds_strings
[mds_mitigation
]);
561 if (boot_cpu_has_bug(X86_BUG_TAA
))
562 pr_info("TAA: %s\n", taa_strings
[taa_mitigation
]);
563 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
564 pr_info("MMIO Stale Data: %s\n", mmio_strings
[mmio_mitigation
]);
567 static void __init
md_clear_select_mitigation(void)
569 mds_select_mitigation();
570 taa_select_mitigation();
571 mmio_select_mitigation();
574 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
575 * and print their mitigation after MDS, TAA and MMIO Stale Data
576 * mitigation selection is done.
578 md_clear_update_mitigation();
582 #define pr_fmt(fmt) "SRBDS: " fmt
584 enum srbds_mitigations
{
585 SRBDS_MITIGATION_OFF
,
586 SRBDS_MITIGATION_UCODE_NEEDED
,
587 SRBDS_MITIGATION_FULL
,
588 SRBDS_MITIGATION_TSX_OFF
,
589 SRBDS_MITIGATION_HYPERVISOR
,
592 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
594 static const char * const srbds_strings
[] = {
595 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
596 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
597 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
598 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
599 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
602 static bool srbds_off
;
604 void update_srbds_msr(void)
608 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
611 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
614 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
617 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
619 switch (srbds_mitigation
) {
620 case SRBDS_MITIGATION_OFF
:
621 case SRBDS_MITIGATION_TSX_OFF
:
622 mcu_ctrl
|= RNGDS_MITG_DIS
;
624 case SRBDS_MITIGATION_FULL
:
625 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
631 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
634 static void __init
srbds_select_mitigation(void)
638 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
642 * Check to see if this is one of the MDS_NO systems supporting TSX that
643 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
644 * by Processor MMIO Stale Data vulnerability.
646 ia32_cap
= x86_read_arch_cap_msr();
647 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
) &&
648 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
649 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
650 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
651 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
652 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
653 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
654 else if (cpu_mitigations_off() || srbds_off
)
655 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
658 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
661 static int __init
srbds_parse_cmdline(char *str
)
666 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
669 srbds_off
= !strcmp(str
, "off");
672 early_param("srbds", srbds_parse_cmdline
);
675 #define pr_fmt(fmt) "L1D Flush : " fmt
677 enum l1d_flush_mitigations
{
682 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
684 static void __init
l1d_flush_select_mitigation(void)
686 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
689 static_branch_enable(&switch_mm_cond_l1d_flush
);
690 pr_info("Conditional flush on switch_mm() enabled\n");
693 static int __init
l1d_flush_parse_cmdline(char *str
)
695 if (!strcmp(str
, "on"))
696 l1d_flush_mitigation
= L1D_FLUSH_ON
;
700 early_param("l1d_flush", l1d_flush_parse_cmdline
);
703 #define pr_fmt(fmt) "Spectre V1 : " fmt
705 enum spectre_v1_mitigation
{
706 SPECTRE_V1_MITIGATION_NONE
,
707 SPECTRE_V1_MITIGATION_AUTO
,
710 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
711 SPECTRE_V1_MITIGATION_AUTO
;
713 static const char * const spectre_v1_strings
[] = {
714 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
715 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
719 * Does SMAP provide full mitigation against speculative kernel access to
722 static bool smap_works_speculatively(void)
724 if (!boot_cpu_has(X86_FEATURE_SMAP
))
728 * On CPUs which are vulnerable to Meltdown, SMAP does not
729 * prevent speculative access to user data in the L1 cache.
730 * Consider SMAP to be non-functional as a mitigation on these
733 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
739 static void __init
spectre_v1_select_mitigation(void)
741 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
742 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
746 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
748 * With Spectre v1, a user can speculatively control either
749 * path of a conditional swapgs with a user-controlled GS
750 * value. The mitigation is to add lfences to both code paths.
752 * If FSGSBASE is enabled, the user can put a kernel address in
753 * GS, in which case SMAP provides no protection.
755 * If FSGSBASE is disabled, the user can only put a user space
756 * address in GS. That makes an attack harder, but still
757 * possible if there's no SMAP protection.
759 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
760 !smap_works_speculatively()) {
762 * Mitigation can be provided from SWAPGS itself or
763 * PTI as the CR3 write in the Meltdown mitigation
766 * If neither is there, mitigate with an LFENCE to
767 * stop speculation through swapgs.
769 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
770 !boot_cpu_has(X86_FEATURE_PTI
))
771 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
774 * Enable lfences in the kernel entry (non-swapgs)
775 * paths, to prevent user entry from speculatively
778 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
782 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
785 static int __init
nospectre_v1_cmdline(char *str
)
787 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
790 early_param("nospectre_v1", nospectre_v1_cmdline
);
792 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
=
796 #define pr_fmt(fmt) "RETBleed: " fmt
798 enum retbleed_mitigation
{
799 RETBLEED_MITIGATION_NONE
,
800 RETBLEED_MITIGATION_UNRET
,
801 RETBLEED_MITIGATION_IBRS
,
802 RETBLEED_MITIGATION_EIBRS
,
805 enum retbleed_mitigation_cmd
{
811 const char * const retbleed_strings
[] = {
812 [RETBLEED_MITIGATION_NONE
] = "Vulnerable",
813 [RETBLEED_MITIGATION_UNRET
] = "Mitigation: untrained return thunk",
814 [RETBLEED_MITIGATION_IBRS
] = "Mitigation: IBRS",
815 [RETBLEED_MITIGATION_EIBRS
] = "Mitigation: Enhanced IBRS",
818 static enum retbleed_mitigation retbleed_mitigation __ro_after_init
=
819 RETBLEED_MITIGATION_NONE
;
820 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init
=
823 static int __ro_after_init retbleed_nosmt
= false;
825 static int __init
retbleed_parse_cmdline(char *str
)
831 char *next
= strchr(str
, ',');
837 if (!strcmp(str
, "off")) {
838 retbleed_cmd
= RETBLEED_CMD_OFF
;
839 } else if (!strcmp(str
, "auto")) {
840 retbleed_cmd
= RETBLEED_CMD_AUTO
;
841 } else if (!strcmp(str
, "unret")) {
842 retbleed_cmd
= RETBLEED_CMD_UNRET
;
843 } else if (!strcmp(str
, "nosmt")) {
844 retbleed_nosmt
= true;
846 pr_err("Ignoring unknown retbleed option (%s).", str
);
854 early_param("retbleed", retbleed_parse_cmdline
);
856 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
857 #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
858 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
860 static void __init
retbleed_select_mitigation(void)
862 if (!boot_cpu_has_bug(X86_BUG_RETBLEED
) || cpu_mitigations_off())
865 switch (retbleed_cmd
) {
866 case RETBLEED_CMD_OFF
:
869 case RETBLEED_CMD_UNRET
:
870 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
873 case RETBLEED_CMD_AUTO
:
875 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
876 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
)
877 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
880 * The Intel mitigation (IBRS) was already selected in
881 * spectre_v2_select_mitigation().
887 switch (retbleed_mitigation
) {
888 case RETBLEED_MITIGATION_UNRET
:
890 if (!IS_ENABLED(CONFIG_RETPOLINE
) ||
891 !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK
)) {
892 pr_err(RETBLEED_COMPILER_MSG
);
893 retbleed_mitigation
= RETBLEED_MITIGATION_NONE
;
897 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
898 setup_force_cpu_cap(X86_FEATURE_UNRET
);
900 if (!boot_cpu_has(X86_FEATURE_STIBP
) &&
901 (retbleed_nosmt
|| cpu_mitigations_auto_nosmt()))
902 cpu_smt_disable(false);
904 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
905 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
906 pr_err(RETBLEED_UNTRAIN_MSG
);
914 * Let IBRS trump all on Intel without affecting the effects of the
915 * retbleed= cmdline option.
917 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
918 switch (spectre_v2_enabled
) {
919 case SPECTRE_V2_IBRS
:
920 retbleed_mitigation
= RETBLEED_MITIGATION_IBRS
;
922 case SPECTRE_V2_EIBRS
:
923 case SPECTRE_V2_EIBRS_RETPOLINE
:
924 case SPECTRE_V2_EIBRS_LFENCE
:
925 retbleed_mitigation
= RETBLEED_MITIGATION_EIBRS
;
928 pr_err(RETBLEED_INTEL_MSG
);
932 pr_info("%s\n", retbleed_strings
[retbleed_mitigation
]);
936 #define pr_fmt(fmt) "Spectre V2 : " fmt
938 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
939 SPECTRE_V2_USER_NONE
;
940 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
941 SPECTRE_V2_USER_NONE
;
943 #ifdef CONFIG_RETPOLINE
944 static bool spectre_v2_bad_module
;
946 bool retpoline_module_ok(bool has_retpoline
)
948 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
951 pr_err("System may be vulnerable to spectre v2\n");
952 spectre_v2_bad_module
= true;
956 static inline const char *spectre_v2_module_string(void)
958 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
961 static inline const char *spectre_v2_module_string(void) { return ""; }
964 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
965 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
966 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
968 #ifdef CONFIG_BPF_SYSCALL
969 void unpriv_ebpf_notify(int new_state
)
974 /* Unprivileged eBPF is enabled */
976 switch (spectre_v2_enabled
) {
977 case SPECTRE_V2_EIBRS
:
978 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
980 case SPECTRE_V2_EIBRS_LFENCE
:
981 if (sched_smt_active())
982 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
990 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
992 int len
= strlen(opt
);
994 return len
== arglen
&& !strncmp(arg
, opt
, len
);
997 /* The kernel command line selection for spectre v2 */
998 enum spectre_v2_mitigation_cmd
{
1000 SPECTRE_V2_CMD_AUTO
,
1001 SPECTRE_V2_CMD_FORCE
,
1002 SPECTRE_V2_CMD_RETPOLINE
,
1003 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
1004 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
1005 SPECTRE_V2_CMD_EIBRS
,
1006 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
1007 SPECTRE_V2_CMD_EIBRS_LFENCE
,
1008 SPECTRE_V2_CMD_IBRS
,
1011 enum spectre_v2_user_cmd
{
1012 SPECTRE_V2_USER_CMD_NONE
,
1013 SPECTRE_V2_USER_CMD_AUTO
,
1014 SPECTRE_V2_USER_CMD_FORCE
,
1015 SPECTRE_V2_USER_CMD_PRCTL
,
1016 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
1017 SPECTRE_V2_USER_CMD_SECCOMP
,
1018 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
1021 static const char * const spectre_v2_user_strings
[] = {
1022 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
1023 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
1024 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
1025 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
1026 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
1029 static const struct {
1031 enum spectre_v2_user_cmd cmd
;
1033 } v2_user_options
[] __initconst
= {
1034 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
1035 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
1036 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
1037 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
1038 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
1039 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
1040 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
1043 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
1045 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1046 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
1049 static __ro_after_init
enum spectre_v2_mitigation_cmd spectre_v2_cmd
;
1051 static enum spectre_v2_user_cmd __init
1052 spectre_v2_parse_user_cmdline(void)
1057 switch (spectre_v2_cmd
) {
1058 case SPECTRE_V2_CMD_NONE
:
1059 return SPECTRE_V2_USER_CMD_NONE
;
1060 case SPECTRE_V2_CMD_FORCE
:
1061 return SPECTRE_V2_USER_CMD_FORCE
;
1066 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
1069 return SPECTRE_V2_USER_CMD_AUTO
;
1071 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
1072 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
1073 spec_v2_user_print_cond(v2_user_options
[i
].option
,
1074 v2_user_options
[i
].secure
);
1075 return v2_user_options
[i
].cmd
;
1079 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
1080 return SPECTRE_V2_USER_CMD_AUTO
;
1083 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode
)
1085 return mode
== SPECTRE_V2_IBRS
||
1086 mode
== SPECTRE_V2_EIBRS
||
1087 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
1088 mode
== SPECTRE_V2_EIBRS_LFENCE
;
1092 spectre_v2_user_select_mitigation(void)
1094 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
1095 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
1096 enum spectre_v2_user_cmd cmd
;
1098 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
1101 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
1102 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
1103 smt_possible
= false;
1105 cmd
= spectre_v2_parse_user_cmdline();
1107 case SPECTRE_V2_USER_CMD_NONE
:
1109 case SPECTRE_V2_USER_CMD_FORCE
:
1110 mode
= SPECTRE_V2_USER_STRICT
;
1112 case SPECTRE_V2_USER_CMD_PRCTL
:
1113 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1114 mode
= SPECTRE_V2_USER_PRCTL
;
1116 case SPECTRE_V2_USER_CMD_AUTO
:
1117 case SPECTRE_V2_USER_CMD_SECCOMP
:
1118 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1119 if (IS_ENABLED(CONFIG_SECCOMP
))
1120 mode
= SPECTRE_V2_USER_SECCOMP
;
1122 mode
= SPECTRE_V2_USER_PRCTL
;
1126 /* Initialize Indirect Branch Prediction Barrier */
1127 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1128 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
1130 spectre_v2_user_ibpb
= mode
;
1132 case SPECTRE_V2_USER_CMD_FORCE
:
1133 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1134 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1135 static_branch_enable(&switch_mm_always_ibpb
);
1136 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
1138 case SPECTRE_V2_USER_CMD_PRCTL
:
1139 case SPECTRE_V2_USER_CMD_AUTO
:
1140 case SPECTRE_V2_USER_CMD_SECCOMP
:
1141 static_branch_enable(&switch_mm_cond_ibpb
);
1147 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1148 static_key_enabled(&switch_mm_always_ibpb
) ?
1149 "always-on" : "conditional");
1153 * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
1154 * STIBP is not required.
1156 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
1158 spectre_v2_in_ibrs_mode(spectre_v2_enabled
))
1162 * At this point, an STIBP mode other than "off" has been set.
1163 * If STIBP support is not being forced, check if STIBP always-on
1166 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1167 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
1168 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1170 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
) {
1171 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1172 mode
!= SPECTRE_V2_USER_STRICT_PREFERRED
)
1173 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation'\n");
1174 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1177 spectre_v2_user_stibp
= mode
;
1180 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
1183 static const char * const spectre_v2_strings
[] = {
1184 [SPECTRE_V2_NONE
] = "Vulnerable",
1185 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
1186 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
1187 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced IBRS",
1188 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced IBRS + LFENCE",
1189 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced IBRS + Retpolines",
1190 [SPECTRE_V2_IBRS
] = "Mitigation: IBRS",
1193 static const struct {
1195 enum spectre_v2_mitigation_cmd cmd
;
1197 } mitigation_options
[] __initconst
= {
1198 { "off", SPECTRE_V2_CMD_NONE
, false },
1199 { "on", SPECTRE_V2_CMD_FORCE
, true },
1200 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
1201 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1202 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1203 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
1204 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
1205 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
1206 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
1207 { "auto", SPECTRE_V2_CMD_AUTO
, false },
1208 { "ibrs", SPECTRE_V2_CMD_IBRS
, false },
1211 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
1213 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1214 pr_info("%s selected on command line.\n", reason
);
1217 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
1219 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
1223 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
1224 cpu_mitigations_off())
1225 return SPECTRE_V2_CMD_NONE
;
1227 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
1229 return SPECTRE_V2_CMD_AUTO
;
1231 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
1232 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
1234 cmd
= mitigation_options
[i
].cmd
;
1238 if (i
>= ARRAY_SIZE(mitigation_options
)) {
1239 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1240 return SPECTRE_V2_CMD_AUTO
;
1243 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
1244 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1245 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
1246 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1247 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1248 !IS_ENABLED(CONFIG_RETPOLINE
)) {
1249 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1250 mitigation_options
[i
].option
);
1251 return SPECTRE_V2_CMD_AUTO
;
1254 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
1255 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1256 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1257 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1258 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1259 mitigation_options
[i
].option
);
1260 return SPECTRE_V2_CMD_AUTO
;
1263 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1264 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
1265 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
1266 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1267 mitigation_options
[i
].option
);
1268 return SPECTRE_V2_CMD_AUTO
;
1271 if (cmd
== SPECTRE_V2_CMD_IBRS
&& boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
1272 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1273 mitigation_options
[i
].option
);
1274 return SPECTRE_V2_CMD_AUTO
;
1277 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !boot_cpu_has(X86_FEATURE_IBRS
)) {
1278 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1279 mitigation_options
[i
].option
);
1280 return SPECTRE_V2_CMD_AUTO
;
1283 if (cmd
== SPECTRE_V2_CMD_IBRS
&& boot_cpu_has(X86_FEATURE_XENPV
)) {
1284 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1285 mitigation_options
[i
].option
);
1286 return SPECTRE_V2_CMD_AUTO
;
1289 spec_v2_print_cond(mitigation_options
[i
].option
,
1290 mitigation_options
[i
].secure
);
1294 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
1296 if (!IS_ENABLED(CONFIG_RETPOLINE
)) {
1297 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1298 return SPECTRE_V2_NONE
;
1301 return SPECTRE_V2_RETPOLINE
;
1304 static void __init
spectre_v2_select_mitigation(void)
1306 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
1307 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
1310 * If the CPU is not affected and the command line mode is NONE or AUTO
1311 * then nothing to do.
1313 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
1314 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
1318 case SPECTRE_V2_CMD_NONE
:
1321 case SPECTRE_V2_CMD_FORCE
:
1322 case SPECTRE_V2_CMD_AUTO
:
1323 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1324 mode
= SPECTRE_V2_EIBRS
;
1328 if (boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1329 retbleed_cmd
!= RETBLEED_CMD_OFF
&&
1330 boot_cpu_has(X86_FEATURE_IBRS
) &&
1331 boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1332 mode
= SPECTRE_V2_IBRS
;
1336 mode
= spectre_v2_select_retpoline();
1339 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
1340 pr_err(SPECTRE_V2_LFENCE_MSG
);
1341 mode
= SPECTRE_V2_LFENCE
;
1344 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
1345 mode
= SPECTRE_V2_RETPOLINE
;
1348 case SPECTRE_V2_CMD_RETPOLINE
:
1349 mode
= spectre_v2_select_retpoline();
1352 case SPECTRE_V2_CMD_IBRS
:
1353 mode
= SPECTRE_V2_IBRS
;
1356 case SPECTRE_V2_CMD_EIBRS
:
1357 mode
= SPECTRE_V2_EIBRS
;
1360 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1361 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1364 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1365 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1369 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1370 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1372 if (spectre_v2_in_ibrs_mode(mode
)) {
1373 /* Force it so VMEXIT will restore correctly */
1374 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1375 write_spec_ctrl_current(x86_spec_ctrl_base
, true);
1379 case SPECTRE_V2_NONE
:
1380 case SPECTRE_V2_EIBRS
:
1383 case SPECTRE_V2_IBRS
:
1384 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS
);
1387 case SPECTRE_V2_LFENCE
:
1388 case SPECTRE_V2_EIBRS_LFENCE
:
1389 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1392 case SPECTRE_V2_RETPOLINE
:
1393 case SPECTRE_V2_EIBRS_RETPOLINE
:
1394 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1398 spectre_v2_enabled
= mode
;
1399 pr_info("%s\n", spectre_v2_strings
[mode
]);
1402 * If spectre v2 protection has been enabled, unconditionally fill
1403 * RSB during a context switch; this protects against two independent
1406 * - RSB underflow (and switch to BTB) on Skylake+
1407 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1409 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1410 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1413 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
1414 * and Enhanced IBRS protect firmware too, so enable IBRS around
1415 * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
1418 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1419 * the user might select retpoline on the kernel command line and if
1420 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1421 * enable IBRS around firmware calls.
1423 if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_ibrs_mode(mode
)) {
1424 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1425 pr_info("Enabling Restricted Speculation for firmware calls\n");
1428 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1429 spectre_v2_cmd
= cmd
;
1432 static void update_stibp_msr(void * __unused
)
1434 write_spec_ctrl_current(x86_spec_ctrl_base
, true);
1437 /* Update x86_spec_ctrl_base in case SMT state changed. */
1438 static void update_stibp_strict(void)
1440 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1442 if (sched_smt_active())
1443 mask
|= SPEC_CTRL_STIBP
;
1445 if (mask
== x86_spec_ctrl_base
)
1448 pr_info("Update user space SMT mitigation: STIBP %s\n",
1449 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1450 x86_spec_ctrl_base
= mask
;
1451 on_each_cpu(update_stibp_msr
, NULL
, 1);
1454 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1455 static void update_indir_branch_cond(void)
1457 if (sched_smt_active())
1458 static_branch_enable(&switch_to_cond_stibp
);
1460 static_branch_disable(&switch_to_cond_stibp
);
1464 #define pr_fmt(fmt) fmt
1466 /* Update the static key controlling the MDS CPU buffer clear in idle */
1467 static void update_mds_branch_idle(void)
1469 u64 ia32_cap
= x86_read_arch_cap_msr();
1472 * Enable the idle clearing if SMT is active on CPUs which are
1473 * affected only by MSBDS and not any other MDS variant.
1475 * The other variants cannot be mitigated when SMT is enabled, so
1476 * clearing the buffers on idle just to prevent the Store Buffer
1477 * repartitioning leak would be a window dressing exercise.
1479 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1482 if (sched_smt_active()) {
1483 static_branch_enable(&mds_idle_clear
);
1484 } else if (mmio_mitigation
== MMIO_MITIGATION_OFF
||
1485 (ia32_cap
& ARCH_CAP_FBSDP_NO
)) {
1486 static_branch_disable(&mds_idle_clear
);
1490 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1491 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1492 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1494 void cpu_bugs_smt_update(void)
1496 mutex_lock(&spec_ctrl_mutex
);
1498 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1499 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1500 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1502 switch (spectre_v2_user_stibp
) {
1503 case SPECTRE_V2_USER_NONE
:
1505 case SPECTRE_V2_USER_STRICT
:
1506 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1507 update_stibp_strict();
1509 case SPECTRE_V2_USER_PRCTL
:
1510 case SPECTRE_V2_USER_SECCOMP
:
1511 update_indir_branch_cond();
1515 switch (mds_mitigation
) {
1516 case MDS_MITIGATION_FULL
:
1517 case MDS_MITIGATION_VMWERV
:
1518 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1519 pr_warn_once(MDS_MSG_SMT
);
1520 update_mds_branch_idle();
1522 case MDS_MITIGATION_OFF
:
1526 switch (taa_mitigation
) {
1527 case TAA_MITIGATION_VERW
:
1528 case TAA_MITIGATION_UCODE_NEEDED
:
1529 if (sched_smt_active())
1530 pr_warn_once(TAA_MSG_SMT
);
1532 case TAA_MITIGATION_TSX_DISABLED
:
1533 case TAA_MITIGATION_OFF
:
1537 switch (mmio_mitigation
) {
1538 case MMIO_MITIGATION_VERW
:
1539 case MMIO_MITIGATION_UCODE_NEEDED
:
1540 if (sched_smt_active())
1541 pr_warn_once(MMIO_MSG_SMT
);
1543 case MMIO_MITIGATION_OFF
:
1547 mutex_unlock(&spec_ctrl_mutex
);
1551 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1553 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1555 /* The kernel command line selection */
1556 enum ssb_mitigation_cmd
{
1557 SPEC_STORE_BYPASS_CMD_NONE
,
1558 SPEC_STORE_BYPASS_CMD_AUTO
,
1559 SPEC_STORE_BYPASS_CMD_ON
,
1560 SPEC_STORE_BYPASS_CMD_PRCTL
,
1561 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1564 static const char * const ssb_strings
[] = {
1565 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1566 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1567 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1568 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1571 static const struct {
1573 enum ssb_mitigation_cmd cmd
;
1574 } ssb_mitigation_options
[] __initconst
= {
1575 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1576 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1577 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1578 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1579 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1582 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1584 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1588 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1589 cpu_mitigations_off()) {
1590 return SPEC_STORE_BYPASS_CMD_NONE
;
1592 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1595 return SPEC_STORE_BYPASS_CMD_AUTO
;
1597 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1598 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1601 cmd
= ssb_mitigation_options
[i
].cmd
;
1605 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1606 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1607 return SPEC_STORE_BYPASS_CMD_AUTO
;
1614 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1616 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1617 enum ssb_mitigation_cmd cmd
;
1619 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1622 cmd
= ssb_parse_cmdline();
1623 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1624 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1625 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1629 case SPEC_STORE_BYPASS_CMD_AUTO
:
1630 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1632 * Choose prctl+seccomp as the default mode if seccomp is
1635 if (IS_ENABLED(CONFIG_SECCOMP
))
1636 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1638 mode
= SPEC_STORE_BYPASS_PRCTL
;
1640 case SPEC_STORE_BYPASS_CMD_ON
:
1641 mode
= SPEC_STORE_BYPASS_DISABLE
;
1643 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1644 mode
= SPEC_STORE_BYPASS_PRCTL
;
1646 case SPEC_STORE_BYPASS_CMD_NONE
:
1651 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1652 * bit in the mask to allow guests to use the mitigation even in the
1653 * case where the host does not enable it.
1655 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
1656 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1657 x86_spec_ctrl_mask
|= SPEC_CTRL_SSBD
;
1661 * We have three CPU feature flags that are in play here:
1662 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1663 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1664 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1666 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1667 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1669 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1670 * use a completely different MSR and bit dependent on family.
1672 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1673 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1674 x86_amd_ssb_disable();
1676 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1677 write_spec_ctrl_current(x86_spec_ctrl_base
, true);
1684 static void ssb_select_mitigation(void)
1686 ssb_mode
= __ssb_select_mitigation();
1688 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1689 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1693 #define pr_fmt(fmt) "Speculation prctl: " fmt
1695 static void task_update_spec_tif(struct task_struct
*tsk
)
1697 /* Force the update of the real TIF bits */
1698 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1701 * Immediately update the speculation control MSRs for the current
1702 * task, but for a non-current task delay setting the CPU
1703 * mitigation until it is scheduled next.
1705 * This can only happen for SECCOMP mitigation. For PRCTL it's
1706 * always the current task.
1709 speculation_ctrl_update_current();
1712 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1715 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1719 case PR_SPEC_ENABLE
:
1720 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1722 case PR_SPEC_DISABLE
:
1723 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
1730 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1732 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
1733 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
1737 case PR_SPEC_ENABLE
:
1738 /* If speculation is force disabled, enable is not allowed */
1739 if (task_spec_ssb_force_disable(task
))
1741 task_clear_spec_ssb_disable(task
);
1742 task_clear_spec_ssb_noexec(task
);
1743 task_update_spec_tif(task
);
1745 case PR_SPEC_DISABLE
:
1746 task_set_spec_ssb_disable(task
);
1747 task_clear_spec_ssb_noexec(task
);
1748 task_update_spec_tif(task
);
1750 case PR_SPEC_FORCE_DISABLE
:
1751 task_set_spec_ssb_disable(task
);
1752 task_set_spec_ssb_force_disable(task
);
1753 task_clear_spec_ssb_noexec(task
);
1754 task_update_spec_tif(task
);
1756 case PR_SPEC_DISABLE_NOEXEC
:
1757 if (task_spec_ssb_force_disable(task
))
1759 task_set_spec_ssb_disable(task
);
1760 task_set_spec_ssb_noexec(task
);
1761 task_update_spec_tif(task
);
1769 static bool is_spec_ib_user_controlled(void)
1771 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
1772 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1773 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
1774 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
1777 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
1780 case PR_SPEC_ENABLE
:
1781 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1782 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1786 * With strict mode for both IBPB and STIBP, the instruction
1787 * code paths avoid checking this task flag and instead,
1788 * unconditionally run the instruction. However, STIBP and IBPB
1789 * are independent and either can be set to conditionally
1790 * enabled regardless of the mode of the other.
1792 * If either is set to conditional, allow the task flag to be
1793 * updated, unless it was force-disabled by a previous prctl
1794 * call. Currently, this is possible on an AMD CPU which has the
1795 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1796 * kernel is booted with 'spectre_v2_user=seccomp', then
1797 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1798 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1800 if (!is_spec_ib_user_controlled() ||
1801 task_spec_ib_force_disable(task
))
1804 task_clear_spec_ib_disable(task
);
1805 task_update_spec_tif(task
);
1807 case PR_SPEC_DISABLE
:
1808 case PR_SPEC_FORCE_DISABLE
:
1810 * Indirect branch speculation is always allowed when
1811 * mitigation is force disabled.
1813 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1814 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1817 if (!is_spec_ib_user_controlled())
1820 task_set_spec_ib_disable(task
);
1821 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
1822 task_set_spec_ib_force_disable(task
);
1823 task_update_spec_tif(task
);
1831 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
1835 case PR_SPEC_STORE_BYPASS
:
1836 return ssb_prctl_set(task
, ctrl
);
1837 case PR_SPEC_INDIRECT_BRANCH
:
1838 return ib_prctl_set(task
, ctrl
);
1839 case PR_SPEC_L1D_FLUSH
:
1840 return l1d_flush_prctl_set(task
, ctrl
);
1846 #ifdef CONFIG_SECCOMP
1847 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
1849 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
1850 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1851 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
1852 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
1853 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
1857 static int l1d_flush_prctl_get(struct task_struct
*task
)
1859 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
1860 return PR_SPEC_FORCE_DISABLE
;
1862 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
1863 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1865 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1868 static int ssb_prctl_get(struct task_struct
*task
)
1871 case SPEC_STORE_BYPASS_DISABLE
:
1872 return PR_SPEC_DISABLE
;
1873 case SPEC_STORE_BYPASS_SECCOMP
:
1874 case SPEC_STORE_BYPASS_PRCTL
:
1875 if (task_spec_ssb_force_disable(task
))
1876 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1877 if (task_spec_ssb_noexec(task
))
1878 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
1879 if (task_spec_ssb_disable(task
))
1880 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1881 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1883 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1884 return PR_SPEC_ENABLE
;
1885 return PR_SPEC_NOT_AFFECTED
;
1889 static int ib_prctl_get(struct task_struct
*task
)
1891 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
1892 return PR_SPEC_NOT_AFFECTED
;
1894 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
1895 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
1896 return PR_SPEC_ENABLE
;
1897 else if (is_spec_ib_user_controlled()) {
1898 if (task_spec_ib_force_disable(task
))
1899 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
1900 if (task_spec_ib_disable(task
))
1901 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
1902 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
1903 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
1904 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
1905 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
1906 return PR_SPEC_DISABLE
;
1908 return PR_SPEC_NOT_AFFECTED
;
1911 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
1914 case PR_SPEC_STORE_BYPASS
:
1915 return ssb_prctl_get(task
);
1916 case PR_SPEC_INDIRECT_BRANCH
:
1917 return ib_prctl_get(task
);
1918 case PR_SPEC_L1D_FLUSH
:
1919 return l1d_flush_prctl_get(task
);
1925 void x86_spec_ctrl_setup_ap(void)
1927 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
1928 write_spec_ctrl_current(x86_spec_ctrl_base
, true);
1930 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
1931 x86_amd_ssb_disable();
1934 bool itlb_multihit_kvm_mitigation
;
1935 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
1938 #define pr_fmt(fmt) "L1TF: " fmt
1940 /* Default mitigation for L1TF-affected CPUs */
1941 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
1942 #if IS_ENABLED(CONFIG_KVM_INTEL)
1943 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
1945 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
1946 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
1949 * These CPUs all support 44bits physical address space internally in the
1950 * cache but CPUID can report a smaller number of physical address bits.
1952 * The L1TF mitigation uses the top most address bit for the inversion of
1953 * non present PTEs. When the installed memory reaches into the top most
1954 * address bit due to memory holes, which has been observed on machines
1955 * which report 36bits physical address bits and have 32G RAM installed,
1956 * then the mitigation range check in l1tf_select_mitigation() triggers.
1957 * This is a false positive because the mitigation is still possible due to
1958 * the fact that the cache uses 44bit internally. Use the cache bits
1959 * instead of the reported physical bits and adjust them on the affected
1960 * machines to 44bit if the reported bits are less than 44.
1962 static void override_cache_bits(struct cpuinfo_x86
*c
)
1967 switch (c
->x86_model
) {
1968 case INTEL_FAM6_NEHALEM
:
1969 case INTEL_FAM6_WESTMERE
:
1970 case INTEL_FAM6_SANDYBRIDGE
:
1971 case INTEL_FAM6_IVYBRIDGE
:
1972 case INTEL_FAM6_HASWELL
:
1973 case INTEL_FAM6_HASWELL_L
:
1974 case INTEL_FAM6_HASWELL_G
:
1975 case INTEL_FAM6_BROADWELL
:
1976 case INTEL_FAM6_BROADWELL_G
:
1977 case INTEL_FAM6_SKYLAKE_L
:
1978 case INTEL_FAM6_SKYLAKE
:
1979 case INTEL_FAM6_KABYLAKE_L
:
1980 case INTEL_FAM6_KABYLAKE
:
1981 if (c
->x86_cache_bits
< 44)
1982 c
->x86_cache_bits
= 44;
1987 static void __init
l1tf_select_mitigation(void)
1991 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
1994 if (cpu_mitigations_off())
1995 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
1996 else if (cpu_mitigations_auto_nosmt())
1997 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
1999 override_cache_bits(&boot_cpu_data
);
2001 switch (l1tf_mitigation
) {
2002 case L1TF_MITIGATION_OFF
:
2003 case L1TF_MITIGATION_FLUSH_NOWARN
:
2004 case L1TF_MITIGATION_FLUSH
:
2006 case L1TF_MITIGATION_FLUSH_NOSMT
:
2007 case L1TF_MITIGATION_FULL
:
2008 cpu_smt_disable(false);
2010 case L1TF_MITIGATION_FULL_FORCE
:
2011 cpu_smt_disable(true);
2015 #if CONFIG_PGTABLE_LEVELS == 2
2016 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2020 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
2021 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
2022 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
2023 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2024 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2026 pr_info("However, doing so will make a part of your RAM unusable.\n");
2027 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2031 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
2034 static int __init
l1tf_cmdline(char *str
)
2036 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2042 if (!strcmp(str
, "off"))
2043 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2044 else if (!strcmp(str
, "flush,nowarn"))
2045 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
2046 else if (!strcmp(str
, "flush"))
2047 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
2048 else if (!strcmp(str
, "flush,nosmt"))
2049 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2050 else if (!strcmp(str
, "full"))
2051 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
2052 else if (!strcmp(str
, "full,force"))
2053 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
2057 early_param("l1tf", l1tf_cmdline
);
2060 #define pr_fmt(fmt) fmt
2064 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2066 #if IS_ENABLED(CONFIG_KVM_INTEL)
2067 static const char * const l1tf_vmx_states
[] = {
2068 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
2069 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
2070 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
2071 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
2072 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
2073 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
2076 static ssize_t
l1tf_show_state(char *buf
)
2078 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
2079 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2081 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
2082 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
2083 sched_smt_active())) {
2084 return sprintf(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
2085 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
2088 return sprintf(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
2089 l1tf_vmx_states
[l1tf_vmx_mitigation
],
2090 sched_smt_active() ? "vulnerable" : "disabled");
2093 static ssize_t
itlb_multihit_show_state(char *buf
)
2095 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
2096 !boot_cpu_has(X86_FEATURE_VMX
))
2097 return sprintf(buf
, "KVM: Mitigation: VMX unsupported\n");
2098 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
2099 return sprintf(buf
, "KVM: Mitigation: VMX disabled\n");
2100 else if (itlb_multihit_kvm_mitigation
)
2101 return sprintf(buf
, "KVM: Mitigation: Split huge pages\n");
2103 return sprintf(buf
, "KVM: Vulnerable\n");
2106 static ssize_t
l1tf_show_state(char *buf
)
2108 return sprintf(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2111 static ssize_t
itlb_multihit_show_state(char *buf
)
2113 return sprintf(buf
, "Processor vulnerable\n");
2117 static ssize_t
mds_show_state(char *buf
)
2119 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2120 return sprintf(buf
, "%s; SMT Host state unknown\n",
2121 mds_strings
[mds_mitigation
]);
2124 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
2125 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2126 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
2127 sched_smt_active() ? "mitigated" : "disabled"));
2130 return sprintf(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2131 sched_smt_active() ? "vulnerable" : "disabled");
2134 static ssize_t
tsx_async_abort_show_state(char *buf
)
2136 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
2137 (taa_mitigation
== TAA_MITIGATION_OFF
))
2138 return sprintf(buf
, "%s\n", taa_strings
[taa_mitigation
]);
2140 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2141 return sprintf(buf
, "%s; SMT Host state unknown\n",
2142 taa_strings
[taa_mitigation
]);
2145 return sprintf(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
2146 sched_smt_active() ? "vulnerable" : "disabled");
2149 static ssize_t
mmio_stale_data_show_state(char *buf
)
2151 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
2152 return sysfs_emit(buf
, "%s\n", mmio_strings
[mmio_mitigation
]);
2154 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2155 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2156 mmio_strings
[mmio_mitigation
]);
2159 return sysfs_emit(buf
, "%s; SMT %s\n", mmio_strings
[mmio_mitigation
],
2160 sched_smt_active() ? "vulnerable" : "disabled");
2163 static char *stibp_state(void)
2165 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled
))
2168 switch (spectre_v2_user_stibp
) {
2169 case SPECTRE_V2_USER_NONE
:
2170 return ", STIBP: disabled";
2171 case SPECTRE_V2_USER_STRICT
:
2172 return ", STIBP: forced";
2173 case SPECTRE_V2_USER_STRICT_PREFERRED
:
2174 return ", STIBP: always-on";
2175 case SPECTRE_V2_USER_PRCTL
:
2176 case SPECTRE_V2_USER_SECCOMP
:
2177 if (static_key_enabled(&switch_to_cond_stibp
))
2178 return ", STIBP: conditional";
2183 static char *ibpb_state(void)
2185 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
2186 if (static_key_enabled(&switch_mm_always_ibpb
))
2187 return ", IBPB: always-on";
2188 if (static_key_enabled(&switch_mm_cond_ibpb
))
2189 return ", IBPB: conditional";
2190 return ", IBPB: disabled";
2195 static ssize_t
spectre_v2_show_state(char *buf
)
2197 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
2198 return sprintf(buf
, "Vulnerable: LFENCE\n");
2200 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
2201 return sprintf(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
2203 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2204 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
2205 return sprintf(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2207 return sprintf(buf
, "%s%s%s%s%s%s\n",
2208 spectre_v2_strings
[spectre_v2_enabled
],
2210 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
2212 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
2213 spectre_v2_module_string());
2216 static ssize_t
srbds_show_state(char *buf
)
2218 return sprintf(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
2221 static ssize_t
retbleed_show_state(char *buf
)
2223 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
) {
2224 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
2225 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
2226 return sprintf(buf
, "Vulnerable: untrained return thunk on non-Zen uarch\n");
2228 return sprintf(buf
, "%s; SMT %s\n",
2229 retbleed_strings
[retbleed_mitigation
],
2230 !sched_smt_active() ? "disabled" :
2231 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2232 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
?
2233 "enabled with STIBP protection" : "vulnerable");
2236 return sprintf(buf
, "%s\n", retbleed_strings
[retbleed_mitigation
]);
2239 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
2240 char *buf
, unsigned int bug
)
2242 if (!boot_cpu_has_bug(bug
))
2243 return sprintf(buf
, "Not affected\n");
2246 case X86_BUG_CPU_MELTDOWN
:
2247 if (boot_cpu_has(X86_FEATURE_PTI
))
2248 return sprintf(buf
, "Mitigation: PTI\n");
2250 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
2251 return sprintf(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2255 case X86_BUG_SPECTRE_V1
:
2256 return sprintf(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
2258 case X86_BUG_SPECTRE_V2
:
2259 return spectre_v2_show_state(buf
);
2261 case X86_BUG_SPEC_STORE_BYPASS
:
2262 return sprintf(buf
, "%s\n", ssb_strings
[ssb_mode
]);
2265 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
2266 return l1tf_show_state(buf
);
2270 return mds_show_state(buf
);
2273 return tsx_async_abort_show_state(buf
);
2275 case X86_BUG_ITLB_MULTIHIT
:
2276 return itlb_multihit_show_state(buf
);
2279 return srbds_show_state(buf
);
2281 case X86_BUG_MMIO_STALE_DATA
:
2282 return mmio_stale_data_show_state(buf
);
2284 case X86_BUG_RETBLEED
:
2285 return retbleed_show_state(buf
);
2291 return sprintf(buf
, "Vulnerable\n");
2294 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2296 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
2299 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2301 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
2304 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2306 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
2309 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2311 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
2314 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2316 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
2319 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2321 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
2324 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2326 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
2329 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2331 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
2334 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2336 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);
2339 ssize_t
cpu_show_mmio_stale_data(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2341 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_STALE_DATA
);
2344 ssize_t
cpu_show_retbleed(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2346 return cpu_show_common(dev
, attr
, buf
, X86_BUG_RETBLEED
);