]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/x86/kernel/cpu/bugs.c
x86/cpufeatures: Disentangle SSBD enumeration
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / bugs.c
index ec06501ec783986bd44ed304640d4285d02a0f15..e2c48f2a04b00737490bf6b59ce1708369be46b3 100644 (file)
@@ -44,10 +44,10 @@ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
 
 /*
  * AMD specific MSR info for Speculative Store Bypass control.
- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
  */
 u64 __ro_after_init x86_amd_ls_cfg_base;
-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
 void __init check_bugs(void)
 {
@@ -63,7 +63,7 @@ void __init check_bugs(void)
         * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
         * init code as it is not enumerated and depends on the family.
         */
-       if (ibrs_inuse)
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
        /* Select the proper spectre mitigation before patching alternatives */
@@ -128,7 +128,8 @@ static const char *spectre_v2_strings[] = {
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
 
 void x86_spec_ctrl_set(u64 val)
 {
@@ -143,8 +144,8 @@ u64 x86_spec_ctrl_get_default(void)
 {
        u64 msrval = x86_spec_ctrl_base;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+               msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
        return msrval;
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
@@ -153,11 +154,13 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
 {
        u64 host = x86_spec_ctrl_base;
 
-       if (!ibrs_inuse)
+       /* Is MSR_SPEC_CTRL implemented ? */
+       if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+       /* SSBD controlled in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+               host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
                wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -168,22 +171,24 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 {
        u64 host = x86_spec_ctrl_base;
 
-       if (!ibrs_inuse)
+       /* Is MSR_SPEC_CTRL implemented ? */
+       if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+       /* SSBD controlled in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+               host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
                wrmsrl(MSR_IA32_SPEC_CTRL, host);
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
 
-static void x86_amd_rds_enable(void)
+static void x86_amd_ssb_disable(void)
 {
-       u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
+       u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 
-       if (boot_cpu_has(X86_FEATURE_AMD_RDS))
+       if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
                wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
@@ -366,7 +371,7 @@ retpoline_auto:
 #undef pr_fmt
 #define pr_fmt(fmt)    "Speculative Store Bypass: " fmt
 
-static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
 
 /* The kernel command line selection */
 enum ssb_mitigation_cmd {
@@ -374,22 +379,25 @@ enum ssb_mitigation_cmd {
        SPEC_STORE_BYPASS_CMD_AUTO,
        SPEC_STORE_BYPASS_CMD_ON,
        SPEC_STORE_BYPASS_CMD_PRCTL,
+       SPEC_STORE_BYPASS_CMD_SECCOMP,
 };
 
 static const char *ssb_strings[] = {
        [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
        [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
-       [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl"
+       [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
+       [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
 };
 
 static const struct {
        const char *option;
        enum ssb_mitigation_cmd cmd;
 } ssb_mitigation_options[] = {
-       { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },  /* Platform decides */
-       { "on",         SPEC_STORE_BYPASS_CMD_ON },    /* Disable Speculative Store Bypass */
-       { "off",        SPEC_STORE_BYPASS_CMD_NONE },  /* Don't touch Speculative Store Bypass */
-       { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
+       { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
+       { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
+       { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
+       { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
+       { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
 };
 
 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
@@ -423,12 +431,12 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
        return cmd;
 }
 
-static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
 {
        enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
        enum ssb_mitigation_cmd cmd;
 
-       if (!boot_cpu_has(X86_FEATURE_RDS))
+       if (!boot_cpu_has(X86_FEATURE_SSBD))
                return mode;
 
        cmd = ssb_parse_cmdline();
@@ -439,8 +447,15 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
 
        switch (cmd) {
        case SPEC_STORE_BYPASS_CMD_AUTO:
-               /* Choose prctl as the default mode */
-               mode = SPEC_STORE_BYPASS_PRCTL;
+       case SPEC_STORE_BYPASS_CMD_SECCOMP:
+               /*
+                * Choose prctl+seccomp as the default mode if seccomp is
+                * enabled.
+                */
+               if (IS_ENABLED(CONFIG_SECCOMP))
+                       mode = SPEC_STORE_BYPASS_SECCOMP;
+               else
+                       mode = SPEC_STORE_BYPASS_PRCTL;
                break;
        case SPEC_STORE_BYPASS_CMD_ON:
                mode = SPEC_STORE_BYPASS_DISABLE;
@@ -455,7 +470,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
        /*
         * We have three CPU feature flags that are in play here:
         *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
-        *  - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
+        *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
         *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
         */
        if (mode == SPEC_STORE_BYPASS_DISABLE) {
@@ -466,12 +481,12 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
                 */
                switch (boot_cpu_data.x86_vendor) {
                case X86_VENDOR_INTEL:
-                       x86_spec_ctrl_base |= SPEC_CTRL_RDS;
-                       x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
-                       x86_spec_ctrl_set(SPEC_CTRL_RDS);
+                       x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+                       x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+                       x86_spec_ctrl_set(SPEC_CTRL_SSBD);
                        break;
                case X86_VENDOR_AMD:
-                       x86_amd_rds_enable();
+                       x86_amd_ssb_disable();
                        break;
                }
        }
@@ -479,7 +494,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
        return mode;
 }
 
-static void ssb_select_mitigation()
+static void ssb_select_mitigation(void)
 {
        ssb_mode = __ssb_select_mitigation();
 
@@ -488,32 +503,76 @@ static void ssb_select_mitigation()
 }
 
 #undef pr_fmt
+#define pr_fmt(fmt)     "Speculation prctl: " fmt
 
-static int ssb_prctl_set(unsigned long ctrl)
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
 {
-       bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
+       bool update;
 
-       if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+       if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+           ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
                return -ENXIO;
 
-       if (ctrl == PR_SPEC_ENABLE)
-               clear_tsk_thread_flag(current, TIF_RDS);
-       else
-               set_tsk_thread_flag(current, TIF_RDS);
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               /* If speculation is force disabled, enable is not allowed */
+               if (task_spec_ssb_force_disable(task))
+                       return -EPERM;
+               task_clear_spec_ssb_disable(task);
+               update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       case PR_SPEC_DISABLE:
+               task_set_spec_ssb_disable(task);
+               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       case PR_SPEC_FORCE_DISABLE:
+               task_set_spec_ssb_disable(task);
+               task_set_spec_ssb_force_disable(task);
+               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       default:
+               return -ERANGE;
+       }
 
-       if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
+       /*
+        * If being set on non-current task, delay setting the CPU
+        * mitigation until it is next scheduled.
+        */
+       if (task == current && update)
                speculative_store_bypass_update();
 
        return 0;
 }
 
-static int ssb_prctl_get(void)
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+                            unsigned long ctrl)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssb_prctl_set(task, ctrl);
+       default:
+               return -ENODEV;
+       }
+}
+
+#ifdef CONFIG_SECCOMP
+void arch_seccomp_spec_mitigate(struct task_struct *task)
+{
+       if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+               ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+}
+#endif
+
+static int ssb_prctl_get(struct task_struct *task)
 {
        switch (ssb_mode) {
        case SPEC_STORE_BYPASS_DISABLE:
                return PR_SPEC_DISABLE;
+       case SPEC_STORE_BYPASS_SECCOMP:
        case SPEC_STORE_BYPASS_PRCTL:
-               if (test_tsk_thread_flag(current, TIF_RDS))
+               if (task_spec_ssb_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ssb_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
        default:
@@ -523,24 +582,11 @@ static int ssb_prctl_get(void)
        }
 }
 
-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
-{
-       if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
-               return -ERANGE;
-
-       switch (which) {
-       case PR_SPEC_STORE_BYPASS:
-               return ssb_prctl_set(ctrl);
-       default:
-               return -ENODEV;
-       }
-}
-
-int arch_prctl_spec_ctrl_get(unsigned long which)
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 {
        switch (which) {
        case PR_SPEC_STORE_BYPASS:
-               return ssb_prctl_get();
+               return ssb_prctl_get(task);
        default:
                return -ENODEV;
        }
@@ -548,16 +594,16 @@ int arch_prctl_spec_ctrl_get(unsigned long which)
 
 void x86_spec_ctrl_setup_ap(void)
 {
-       if (ibrs_inuse)
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
 
        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
-               x86_amd_rds_enable();
+               x86_amd_ssb_disable();
 }
 
 #ifdef CONFIG_SYSFS
-ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
-                       char *buf, unsigned int bug)
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+                              char *buf, unsigned int bug)
 {
        if (!boot_cpu_has_bug(bug))
                return sprintf(buf, "Not affected\n");