]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - arch/x86/kernel/cpu/bugs.c
x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / cpu / bugs.c
index ecfca3bbcd96866c8666accd32947656b10f7159..1176b383e4adaea5f4ed92dfc21f2580bf3cbfae 100644 (file)
 #include <linux/prctl.h>
 #include <linux/sched/smt.h>
 #include <linux/pgtable.h>
+#include <linux/bpf.h>
 
 #include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/processor-flags.h>
-#include <asm/fpu/internal.h>
+#include <asm/fpu/api.h>
 #include <asm/msr.h>
 #include <asm/vmx.h>
 #include <asm/paravirt.h>
@@ -40,8 +41,9 @@ static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
 static void __init mds_select_mitigation(void);
-static void __init mds_print_mitigation(void);
+static void __init md_clear_update_mitigation(void);
 static void __init taa_select_mitigation(void);
+static void __init mmio_select_mitigation(void);
 static void __init srbds_select_mitigation(void);
 static void __init l1d_flush_select_mitigation(void);
 
@@ -84,6 +86,10 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
  */
 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 
+/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
+DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -118,14 +124,16 @@ void __init check_bugs(void)
        l1tf_select_mitigation();
        mds_select_mitigation();
        taa_select_mitigation();
+       mmio_select_mitigation();
        srbds_select_mitigation();
        l1d_flush_select_mitigation();
 
        /*
-        * As MDS and TAA mitigations are inter-related, print MDS
-        * mitigation until after TAA mitigation selection is done.
+        * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
+        * and print their mitigation after MDS, TAA and MMIO Stale Data
+        * mitigation selection is done.
         */
-       mds_print_mitigation();
+       md_clear_update_mitigation();
 
        arch_smt_update();
 
@@ -266,14 +274,6 @@ static void __init mds_select_mitigation(void)
        }
 }
 
-static void __init mds_print_mitigation(void)
-{
-       if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
-               return;
-
-       pr_info("%s\n", mds_strings[mds_mitigation]);
-}
-
 static int __init mds_cmdline(char *str)
 {
        if (!boot_cpu_has_bug(X86_BUG_MDS))
@@ -328,7 +328,7 @@ static void __init taa_select_mitigation(void)
        /* TSX previously disabled by tsx=off */
        if (!boot_cpu_has(X86_FEATURE_RTM)) {
                taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
-               goto out;
+               return;
        }
 
        if (cpu_mitigations_off()) {
@@ -342,7 +342,7 @@ static void __init taa_select_mitigation(void)
         */
        if (taa_mitigation == TAA_MITIGATION_OFF &&
            mds_mitigation == MDS_MITIGATION_OFF)
-               goto out;
+               return;
 
        if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
                taa_mitigation = TAA_MITIGATION_VERW;
@@ -374,18 +374,6 @@ static void __init taa_select_mitigation(void)
 
        if (taa_nosmt || cpu_mitigations_auto_nosmt())
                cpu_smt_disable(false);
-
-       /*
-        * Update MDS mitigation, if necessary, as the mds_user_clear is
-        * now enabled for TAA mitigation.
-        */
-       if (mds_mitigation == MDS_MITIGATION_OFF &&
-           boot_cpu_has_bug(X86_BUG_MDS)) {
-               mds_mitigation = MDS_MITIGATION_FULL;
-               mds_select_mitigation();
-       }
-out:
-       pr_info("%s\n", taa_strings[taa_mitigation]);
 }
 
 static int __init tsx_async_abort_parse_cmdline(char *str)
@@ -409,6 +397,129 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
 }
 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "MMIO Stale Data: " fmt
+
+enum mmio_mitigations {
+       MMIO_MITIGATION_OFF,
+       MMIO_MITIGATION_UCODE_NEEDED,
+       MMIO_MITIGATION_VERW,
+};
+
+/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
+static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
+static bool mmio_nosmt __ro_after_init = false;
+
+static const char * const mmio_strings[] = {
+       [MMIO_MITIGATION_OFF]           = "Vulnerable",
+       [MMIO_MITIGATION_UCODE_NEEDED]  = "Vulnerable: Clear CPU buffers attempted, no microcode",
+       [MMIO_MITIGATION_VERW]          = "Mitigation: Clear CPU buffers",
+};
+
+static void __init mmio_select_mitigation(void)
+{
+       u64 ia32_cap;
+
+       if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+           cpu_mitigations_off()) {
+               mmio_mitigation = MMIO_MITIGATION_OFF;
+               return;
+       }
+
+       if (mmio_mitigation == MMIO_MITIGATION_OFF)
+               return;
+
+       ia32_cap = x86_read_arch_cap_msr();
+
+       /*
+        * Enable CPU buffer clear mitigation for host and VMM, if also affected
+        * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+        */
+       if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
+                                             boot_cpu_has(X86_FEATURE_RTM)))
+               static_branch_enable(&mds_user_clear);
+       else
+               static_branch_enable(&mmio_stale_data_clear);
+
+       /*
+        * Check if the system has the right microcode.
+        *
+        * CPU Fill buffer clear mitigation is enumerated by either an explicit
+        * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+        * affected systems.
+        */
+       if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+           (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+            boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+            !(ia32_cap & ARCH_CAP_MDS_NO)))
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+       else
+               mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+
+       if (mmio_nosmt || cpu_mitigations_auto_nosmt())
+               cpu_smt_disable(false);
+}
+
+static int __init mmio_stale_data_parse_cmdline(char *str)
+{
+       if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               return 0;
+
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off")) {
+               mmio_mitigation = MMIO_MITIGATION_OFF;
+       } else if (!strcmp(str, "full")) {
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+       } else if (!strcmp(str, "full,nosmt")) {
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+               mmio_nosmt = true;
+       }
+
+       return 0;
+}
+early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "" fmt
+
+static void __init md_clear_update_mitigation(void)
+{
+       if (cpu_mitigations_off())
+               return;
+
+       if (!static_key_enabled(&mds_user_clear))
+               goto out;
+
+       /*
+        * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
+        * mitigation, if necessary.
+        */
+       if (mds_mitigation == MDS_MITIGATION_OFF &&
+           boot_cpu_has_bug(X86_BUG_MDS)) {
+               mds_mitigation = MDS_MITIGATION_FULL;
+               mds_select_mitigation();
+       }
+       if (taa_mitigation == TAA_MITIGATION_OFF &&
+           boot_cpu_has_bug(X86_BUG_TAA)) {
+               taa_mitigation = TAA_MITIGATION_VERW;
+               taa_select_mitigation();
+       }
+       if (mmio_mitigation == MMIO_MITIGATION_OFF &&
+           boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+               mmio_select_mitigation();
+       }
+out:
+       if (boot_cpu_has_bug(X86_BUG_MDS))
+               pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
+       if (boot_cpu_has_bug(X86_BUG_TAA))
+               pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
+       if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+}
+
 #undef pr_fmt
 #define pr_fmt(fmt)    "SRBDS: " fmt
 
@@ -650,6 +761,32 @@ static inline const char *spectre_v2_module_string(void)
 static inline const char *spectre_v2_module_string(void) { return ""; }
 #endif
 
+#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
+#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+
+#ifdef CONFIG_BPF_SYSCALL
+void unpriv_ebpf_notify(int new_state)
+{
+       if (new_state)
+               return;
+
+       /* Unprivileged eBPF is enabled */
+
+       switch (spectre_v2_enabled) {
+       case SPECTRE_V2_EIBRS:
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+               break;
+       case SPECTRE_V2_EIBRS_LFENCE:
+               if (sched_smt_active())
+                       pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+               break;
+       default:
+               break;
+       }
+}
+#endif
+
 static inline bool match_option(const char *arg, int arglen, const char *opt)
 {
        int len = strlen(opt);
@@ -664,7 +801,10 @@ enum spectre_v2_mitigation_cmd {
        SPECTRE_V2_CMD_FORCE,
        SPECTRE_V2_CMD_RETPOLINE,
        SPECTRE_V2_CMD_RETPOLINE_GENERIC,
-       SPECTRE_V2_CMD_RETPOLINE_AMD,
+       SPECTRE_V2_CMD_RETPOLINE_LFENCE,
+       SPECTRE_V2_CMD_EIBRS,
+       SPECTRE_V2_CMD_EIBRS_RETPOLINE,
+       SPECTRE_V2_CMD_EIBRS_LFENCE,
 };
 
 enum spectre_v2_user_cmd {
@@ -737,6 +877,13 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
        return SPECTRE_V2_USER_CMD_AUTO;
 }
 
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+{
+       return (mode == SPECTRE_V2_EIBRS ||
+               mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+               mode == SPECTRE_V2_EIBRS_LFENCE);
+}
+
 static void __init
 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 {
@@ -804,7 +951,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
         */
        if (!boot_cpu_has(X86_FEATURE_STIBP) ||
            !smt_possible ||
-           spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+           spectre_v2_in_eibrs_mode(spectre_v2_enabled))
                return;
 
        /*
@@ -824,9 +971,11 @@ set_mode:
 
 static const char * const spectre_v2_strings[] = {
        [SPECTRE_V2_NONE]                       = "Vulnerable",
-       [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
-       [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
-       [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
+       [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
+       [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
+       [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
+       [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
+       [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
 };
 
 static const struct {
@@ -837,8 +986,12 @@ static const struct {
        { "off",                SPECTRE_V2_CMD_NONE,              false },
        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
-       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
+       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
+       { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
        { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+       { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
+       { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
+       { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
        { "auto",               SPECTRE_V2_CMD_AUTO,              false },
 };
 
@@ -875,17 +1028,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        }
 
        if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
-            cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
-            cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
+            cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
+            cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
            !IS_ENABLED(CONFIG_RETPOLINE)) {
-               pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
+               pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+                      mitigation_options[i].option);
                return SPECTRE_V2_CMD_AUTO;
        }
 
-       if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
-           boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
-           boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
-               pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
+       if ((cmd == SPECTRE_V2_CMD_EIBRS ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+               pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
+           !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+               pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
+                      mitigation_options[i].option);
                return SPECTRE_V2_CMD_AUTO;
        }
 
@@ -894,6 +1060,16 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        return cmd;
 }
 
+static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
+{
+       if (!IS_ENABLED(CONFIG_RETPOLINE)) {
+               pr_err("Kernel not compiled with retpoline; no mitigation available!");
+               return SPECTRE_V2_NONE;
+       }
+
+       return SPECTRE_V2_RETPOLINE;
+}
+
 static void __init spectre_v2_select_mitigation(void)
 {
        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -914,49 +1090,64 @@ static void __init spectre_v2_select_mitigation(void)
        case SPECTRE_V2_CMD_FORCE:
        case SPECTRE_V2_CMD_AUTO:
                if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
-                       mode = SPECTRE_V2_IBRS_ENHANCED;
-                       /* Force it so VMEXIT will restore correctly */
-                       x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-                       goto specv2_set_mode;
+                       mode = SPECTRE_V2_EIBRS;
+                       break;
                }
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_auto;
+
+               mode = spectre_v2_select_retpoline();
                break;
-       case SPECTRE_V2_CMD_RETPOLINE_AMD:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_amd;
+
+       case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
+               pr_err(SPECTRE_V2_LFENCE_MSG);
+               mode = SPECTRE_V2_LFENCE;
                break;
+
        case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_generic;
+               mode = SPECTRE_V2_RETPOLINE;
                break;
+
        case SPECTRE_V2_CMD_RETPOLINE:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_auto;
+               mode = spectre_v2_select_retpoline();
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS:
+               mode = SPECTRE_V2_EIBRS;
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS_LFENCE:
+               mode = SPECTRE_V2_EIBRS_LFENCE;
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
+               mode = SPECTRE_V2_EIBRS_RETPOLINE;
                break;
        }
-       pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
-       return;
 
-retpoline_auto:
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
-       retpoline_amd:
-               if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
-                       pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
-                       goto retpoline_generic;
-               }
-               mode = SPECTRE_V2_RETPOLINE_AMD;
-               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
-               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
-       } else {
-       retpoline_generic:
-               mode = SPECTRE_V2_RETPOLINE_GENERIC;
+       if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+
+       if (spectre_v2_in_eibrs_mode(mode)) {
+               /* Force it so VMEXIT will restore correctly */
+               x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+       }
+
+       switch (mode) {
+       case SPECTRE_V2_NONE:
+       case SPECTRE_V2_EIBRS:
+               break;
+
+       case SPECTRE_V2_LFENCE:
+       case SPECTRE_V2_EIBRS_LFENCE:
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
+               fallthrough;
+
+       case SPECTRE_V2_RETPOLINE:
+       case SPECTRE_V2_EIBRS_RETPOLINE:
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+               break;
        }
 
-specv2_set_mode:
        spectre_v2_enabled = mode;
        pr_info("%s\n", spectre_v2_strings[mode]);
 
@@ -982,7 +1173,7 @@ specv2_set_mode:
         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
         * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
+       if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
@@ -1052,6 +1243,10 @@ void cpu_bugs_smt_update(void)
 {
        mutex_lock(&spec_ctrl_mutex);
 
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+
        switch (spectre_v2_user_stibp) {
        case SPECTRE_V2_USER_NONE:
                break;
@@ -1691,7 +1886,7 @@ static ssize_t tsx_async_abort_show_state(char *buf)
 
 static char *stibp_state(void)
 {
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+       if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
                return "";
 
        switch (spectre_v2_user_stibp) {
@@ -1721,6 +1916,27 @@ static char *ibpb_state(void)
        return "";
 }
 
+static ssize_t spectre_v2_show_state(char *buf)
+{
+       if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+               return sprintf(buf, "Vulnerable: LFENCE\n");
+
+       if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+               return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
+
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+
+       return sprintf(buf, "%s%s%s%s%s%s\n",
+                      spectre_v2_strings[spectre_v2_enabled],
+                      ibpb_state(),
+                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+                      stibp_state(),
+                      boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+                      spectre_v2_module_string());
+}
+
 static ssize_t srbds_show_state(char *buf)
 {
        return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
@@ -1746,12 +1962,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 
        case X86_BUG_SPECTRE_V2:
-               return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-                              ibpb_state(),
-                              boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-                              stibp_state(),
-                              boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
-                              spectre_v2_module_string());
+               return spectre_v2_show_state(buf);
 
        case X86_BUG_SPEC_STORE_BYPASS:
                return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);