]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/kernel/cpu/bugs.c
x86/speculation: Move arch_smt_update() call to after mitigation decisions
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / bugs.c
index 24e3ada7f9e5d5c51708737ef855f2eb6bb4c38b..086bb04cf15fef7939e6563d64754c1c6075042e 100644 (file)
 #include <asm/set_memory.h>
 #include <asm/intel-family.h>
 #include <asm/e820/api.h>
+#include <asm/hypervisor.h>
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
+static void __init mds_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
@@ -53,8 +55,19 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
 u64 __ro_after_init x86_amd_ls_cfg_base;
 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
-/* Control conditional STIPB in switch_to() */
+/* Control conditional STIBP in switch_to() */
 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+/* Control conditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+/* Control unconditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+/* Control MDS CPU buffer clear before returning to user space */
+DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+EXPORT_SYMBOL_GPL(mds_user_clear);
+/* Control MDS CPU buffer clear before idling (halt, mwait) */
+DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+EXPORT_SYMBOL_GPL(mds_idle_clear);
 
 void __init check_bugs(void)
 {
@@ -94,6 +107,10 @@ void __init check_bugs(void)
 
        l1tf_select_mitigation();
 
+       mds_select_mitigation();
+
+       arch_smt_update();
+
 #ifdef CONFIG_X86_32
        /*
         * Check whether we are able to run this kernel safely on SMP.
@@ -147,6 +164,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                    static_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
+               /* Conditional STIBP enabled? */
+               if (static_branch_unlikely(&switch_to_cond_stibp))
+                       hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
                if (hostval != guestval) {
                        msrval = setguest ? guestval : hostval;
                        wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -195,6 +216,60 @@ static void x86_amd_ssb_disable(void)
                wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "MDS: " fmt
+
+/* Default mitigation for L1TF-affected CPUs */
+static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static bool mds_nosmt __ro_after_init = false;
+
+static const char * const mds_strings[] = {
+       [MDS_MITIGATION_OFF]    = "Vulnerable",
+       [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
+       [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+};
+
+static void mds_select_mitigation(void)
+{
+       if (!boot_cpu_has_bug(X86_BUG_MDS)) {
+               mds_mitigation = MDS_MITIGATION_OFF;
+               return;
+       }
+
+       if (mds_mitigation == MDS_MITIGATION_FULL) {
+               if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+                       mds_mitigation = MDS_MITIGATION_VMWERV;
+
+               static_branch_enable(&mds_user_clear);
+
+               if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+                       cpu_smt_disable(false);
+       }
+
+       pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+
+static int __init mds_cmdline(char *str)
+{
+       if (!boot_cpu_has_bug(X86_BUG_MDS))
+               return 0;
+
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off"))
+               mds_mitigation = MDS_MITIGATION_OFF;
+       else if (!strcmp(str, "full"))
+               mds_mitigation = MDS_MITIGATION_FULL;
+       else if (!strcmp(str, "full,nosmt")) {
+               mds_mitigation = MDS_MITIGATION_FULL;
+               mds_nosmt = true;
+       }
+
+       return 0;
+}
+early_param("mds", mds_cmdline);
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
@@ -204,7 +279,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
        SPECTRE_V2_USER_NONE;
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 static bool spectre_v2_bad_module;
 
 bool retpoline_module_ok(bool has_retpoline)
@@ -246,11 +321,18 @@ enum spectre_v2_user_cmd {
        SPECTRE_V2_USER_CMD_NONE,
        SPECTRE_V2_USER_CMD_AUTO,
        SPECTRE_V2_USER_CMD_FORCE,
+       SPECTRE_V2_USER_CMD_PRCTL,
+       SPECTRE_V2_USER_CMD_PRCTL_IBPB,
+       SPECTRE_V2_USER_CMD_SECCOMP,
+       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
 };
 
 static const char * const spectre_v2_user_strings[] = {
-       [SPECTRE_V2_USER_NONE]          = "User space: Vulnerable",
-       [SPECTRE_V2_USER_STRICT]        = "User space: Mitigation: STIBP protection",
+       [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
+       [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
+       [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
+       [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
+       [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
 };
 
 static const struct {
@@ -258,9 +340,13 @@ static const struct {
        enum spectre_v2_user_cmd        cmd;
        bool                            secure;
 } v2_user_options[] __initdata = {
-       { "auto",       SPECTRE_V2_USER_CMD_AUTO,       false },
-       { "off",        SPECTRE_V2_USER_CMD_NONE,       false },
-       { "on",         SPECTRE_V2_USER_CMD_FORCE,      true  },
+       { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
+       { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
+       { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
+       { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
+       { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
+       { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
+       { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
 };
 
 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
@@ -306,6 +392,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 {
        enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
        bool smt_possible = IS_ENABLED(CONFIG_SMP);
+       enum spectre_v2_user_cmd cmd;
 
        if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
                return;
@@ -314,25 +401,70 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
            cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
                smt_possible = false;
 
-       switch (spectre_v2_parse_user_cmdline(v2_cmd)) {
-       case SPECTRE_V2_USER_CMD_AUTO:
+       cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+       switch (cmd) {
        case SPECTRE_V2_USER_CMD_NONE:
                goto set_mode;
        case SPECTRE_V2_USER_CMD_FORCE:
                mode = SPECTRE_V2_USER_STRICT;
                break;
+       case SPECTRE_V2_USER_CMD_PRCTL:
+       case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+               mode = SPECTRE_V2_USER_PRCTL;
+               break;
+       case SPECTRE_V2_USER_CMD_AUTO:
+       case SPECTRE_V2_USER_CMD_SECCOMP:
+       case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+               if (IS_ENABLED(CONFIG_SECCOMP))
+                       mode = SPECTRE_V2_USER_SECCOMP;
+               else
+                       mode = SPECTRE_V2_USER_PRCTL;
+               break;
        }
 
+       /*
+        * At this point, an STIBP mode other than "off" has been set.
+        * If STIBP support is not being forced, check if STIBP always-on
+        * is preferred.
+        */
+       if (mode != SPECTRE_V2_USER_STRICT &&
+           boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+               mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+
        /* Initialize Indirect Branch Prediction Barrier */
        if (boot_cpu_has(X86_FEATURE_IBPB)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
-               pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
+
+               switch (cmd) {
+               case SPECTRE_V2_USER_CMD_FORCE:
+               case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+               case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+                       static_branch_enable(&switch_mm_always_ibpb);
+                       break;
+               case SPECTRE_V2_USER_CMD_PRCTL:
+               case SPECTRE_V2_USER_CMD_AUTO:
+               case SPECTRE_V2_USER_CMD_SECCOMP:
+                       static_branch_enable(&switch_mm_cond_ibpb);
+                       break;
+               default:
+                       break;
+               }
+
+               pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+                       static_key_enabled(&switch_mm_always_ibpb) ?
+                       "always-on" : "conditional");
        }
 
-       /* If enhanced IBRS is enabled no STIPB required */
+       /* If enhanced IBRS is enabled no STIBP required */
        if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
                return;
 
+       /*
+        * If SMT is not possible or STIBP is not available clear the STIBP
+        * mode.
+        */
+       if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+               mode = SPECTRE_V2_USER_NONE;
 set_mode:
        spectre_v2_user = mode;
        /* Only print the STIBP mode when SMT possible */
@@ -504,45 +636,87 @@ specv2_set_mode:
 
        /* Set up IBPB and STIBP depending on the general spectre V2 command */
        spectre_v2_user_select_mitigation(cmd);
+}
 
-       /* Enable STIBP if appropriate */
-       arch_smt_update();
+static void update_stibp_msr(void * __unused)
+{
+       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 }
 
-static bool stibp_needed(void)
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
 {
-       /* Enhanced IBRS makes using STIBP unnecessary. */
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
-               return false;
+       u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+       if (sched_smt_active())
+               mask |= SPEC_CTRL_STIBP;
 
-       /* Check for strict user mitigation mode */
-       return spectre_v2_user == SPECTRE_V2_USER_STRICT;
+       if (mask == x86_spec_ctrl_base)
+               return;
+
+       pr_info("Update user space SMT mitigation: STIBP %s\n",
+               mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+       x86_spec_ctrl_base = mask;
+       on_each_cpu(update_stibp_msr, NULL, 1);
 }
 
-static void update_stibp_msr(void *info)
+/* Update the static key controlling the evaluation of TIF_SPEC_IB */
+static void update_indir_branch_cond(void)
 {
-       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+       if (sched_smt_active())
+               static_branch_enable(&switch_to_cond_stibp);
+       else
+               static_branch_disable(&switch_to_cond_stibp);
 }
 
-void arch_smt_update(void)
+/* Update the static key controlling the MDS CPU buffer clear in idle */
+static void update_mds_branch_idle(void)
 {
-       u64 mask;
+       /*
+        * Enable the idle clearing on CPUs which are affected only by
+        * MDBDS and not any other MDS variant. The other variants cannot
+        * be mitigated when SMT is enabled, so clearing the buffers on
+        * idle would be a window dressing exercise.
+        */
+       if (!boot_cpu_has(X86_BUG_MSBDS_ONLY))
+               return;
+
+       if (sched_smt_active())
+               static_branch_enable(&mds_idle_clear);
+       else
+               static_branch_disable(&mds_idle_clear);
+}
 
-       if (!stibp_needed())
+void arch_smt_update(void)
+{
+       /* Enhanced IBRS implies STIBP. No update required. */
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
                return;
 
        mutex_lock(&spec_ctrl_mutex);
 
-       mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
-       if (sched_smt_active())
-               mask |= SPEC_CTRL_STIBP;
+       switch (spectre_v2_user) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+       case SPECTRE_V2_USER_STRICT:
+       case SPECTRE_V2_USER_STRICT_PREFERRED:
+               update_stibp_strict();
+               break;
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               update_indir_branch_cond();
+               break;
+       }
 
-       if (mask != x86_spec_ctrl_base) {
-               pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
-                       mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
-               x86_spec_ctrl_base = mask;
-               on_each_cpu(update_stibp_msr, NULL, 1);
+       switch(mds_mitigation) {
+       case MDS_MITIGATION_FULL:
+       case MDS_MITIGATION_VMWERV:
+               update_mds_branch_idle();
+               break;
+       case MDS_MITIGATION_OFF:
+               break;
        }
+
        mutex_unlock(&spec_ctrl_mutex);
 }
 
@@ -681,10 +855,25 @@ static void ssb_select_mitigation(void)
 #undef pr_fmt
 #define pr_fmt(fmt)     "Speculation prctl: " fmt
 
-static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+static void task_update_spec_tif(struct task_struct *tsk)
 {
-       bool update;
+       /* Force the update of the real TIF bits */
+       set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
 
+       /*
+        * Immediately update the speculation control MSRs for the current
+        * task, but for a non-current task delay setting the CPU
+        * mitigation until it is scheduled next.
+        *
+        * This can only happen for SECCOMP mitigation. For PRCTL it's
+        * always the current task.
+        */
+       if (tsk == current)
+               speculation_ctrl_update_current();
+}
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
        if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
            ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
                return -ENXIO;
@@ -695,28 +884,58 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
                if (task_spec_ssb_force_disable(task))
                        return -EPERM;
                task_clear_spec_ssb_disable(task);
-               update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+               task_update_spec_tif(task);
                break;
        case PR_SPEC_DISABLE:
                task_set_spec_ssb_disable(task);
-               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               task_update_spec_tif(task);
                break;
        case PR_SPEC_FORCE_DISABLE:
                task_set_spec_ssb_disable(task);
                task_set_spec_ssb_force_disable(task);
-               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               task_update_spec_tif(task);
                break;
        default:
                return -ERANGE;
        }
+       return 0;
+}
 
-       /*
-        * If being set on non-current task, delay setting the CPU
-        * mitigation until it is next scheduled.
-        */
-       if (task == current && update)
-               speculation_ctrl_update_current();
-
+static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+                       return 0;
+               /*
+                * Indirect branch speculation is always disabled in strict
+                * mode.
+                */
+               if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+               break;
+       case PR_SPEC_DISABLE:
+       case PR_SPEC_FORCE_DISABLE:
+               /*
+                * Indirect branch speculation is always allowed when
+                * mitigation is force disabled.
+                */
+               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+                       return -EPERM;
+               if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+                       return 0;
+               task_set_spec_ib_disable(task);
+               if (ctrl == PR_SPEC_FORCE_DISABLE)
+                       task_set_spec_ib_force_disable(task);
+               task_update_spec_tif(task);
+               break;
+       default:
+               return -ERANGE;
+       }
        return 0;
 }
 
@@ -726,6 +945,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
        switch (which) {
        case PR_SPEC_STORE_BYPASS:
                return ssb_prctl_set(task, ctrl);
+       case PR_SPEC_INDIRECT_BRANCH:
+               return ib_prctl_set(task, ctrl);
        default:
                return -ENODEV;
        }
@@ -736,6 +957,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
 {
        if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
                ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+       if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+               ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
 }
 #endif
 
@@ -758,11 +981,36 @@ static int ssb_prctl_get(struct task_struct *task)
        }
 }
 
+static int ib_prctl_get(struct task_struct *task)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return PR_SPEC_NOT_AFFECTED;
+
+       switch (spectre_v2_user) {
+       case SPECTRE_V2_USER_NONE:
+               return PR_SPEC_ENABLE;
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               if (task_spec_ib_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ib_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       case SPECTRE_V2_USER_STRICT:
+       case SPECTRE_V2_USER_STRICT_PREFERRED:
+               return PR_SPEC_DISABLE;
+       default:
+               return PR_SPEC_NOT_AFFECTED;
+       }
+}
+
 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 {
        switch (which) {
        case PR_SPEC_STORE_BYPASS:
                return ssb_prctl_get(task);
+       case PR_SPEC_INDIRECT_BRANCH:
+               return ib_prctl_get(task);
        default:
                return -ENODEV;
        }
@@ -859,6 +1107,10 @@ static void __init l1tf_select_mitigation(void)
        half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
        if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
                pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+               pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
+                               half_pa);
+               pr_info("However, doing so will make a part of your RAM unusable.\n");
+               pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
                return;
        }
 
@@ -929,6 +1181,22 @@ static ssize_t l1tf_show_state(char *buf)
 }
 #endif
 
+static ssize_t mds_show_state(char *buf)
+{
+       if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+               return sprintf(buf, "%s; SMT Host state unknown\n",
+                              mds_strings[mds_mitigation]);
+       }
+
+       if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+               return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+                              sched_smt_active() ? "mitigated" : "disabled");
+       }
+
+       return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+                      sched_smt_active() ? "vulnerable" : "disabled");
+}
+
 static char *stibp_state(void)
 {
        if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -939,16 +1207,26 @@ static char *stibp_state(void)
                return ", STIBP: disabled";
        case SPECTRE_V2_USER_STRICT:
                return ", STIBP: forced";
+       case SPECTRE_V2_USER_STRICT_PREFERRED:
+               return ", STIBP: always-on";
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               if (static_key_enabled(&switch_to_cond_stibp))
+                       return ", STIBP: conditional";
        }
        return "";
 }
 
 static char *ibpb_state(void)
 {
-       if (boot_cpu_has(X86_FEATURE_USE_IBPB))
-               return ", IBPB";
-       else
-               return "";
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {
+               if (static_key_enabled(&switch_mm_always_ibpb))
+                       return ", IBPB: always-on";
+               if (static_key_enabled(&switch_mm_cond_ibpb))
+                       return ", IBPB: conditional";
+               return ", IBPB: disabled";
+       }
+       return "";
 }
 
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
@@ -982,6 +1260,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
                        return l1tf_show_state(buf);
                break;
+
+       case X86_BUG_MDS:
+               return mds_show_state(buf);
+
        default:
                break;
        }
@@ -1013,4 +1295,9 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
 }
+
+ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
+}
 #endif