]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x86/cpufeatures: Disentangle SSBD enumeration
authorThomas Gleixner <tglx@linutronix.de>
Thu, 10 May 2018 18:21:36 +0000 (20:21 +0200)
committerStefan Bader <stefan.bader@canonical.com>
Thu, 24 May 2018 07:59:19 +0000 (09:59 +0200)
The SSBD enumeration is similarly to the other bits magically shared
between Intel and AMD though the mechanisms are different.

Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
features or family dependent setup.

Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
controlled via MSR_SPEC_CTRL and fix up the usage sites.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CVE-2018-3639 (x86)

(backported from commit 52817587e706686fcdb27f14c1b000c92f266c96)
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/process.c

index c3b406b720cd757716b52512ff1c6a2c50de01bb..b258f4f9031a958bbb50f5e0d51e0b7378fef7a6 100644 (file)
 #define X86_FEATURE_ARCH_CAPABILITIES  ( 7*32+21) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
 #define X86_FEATURE_SSBD               ( 7*32+22) /* Speculative Store Bypass Disable */
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23 ) /* Disable Speculative Store Bypass. */
-#define X86_FEATURE_AMD_SSBD           ( 7*32+24) /* AMD SSBD implementation */
+#define X86_FEATURE_LS_CFG_SSBD                ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
 #define X86_FEATURE_IBPB               ( 7*32+25) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+26) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SPEC_CTRL_SSBD     ( 7*32+27) /* "" Speculative Store Bypass Disable */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
index dfa858f887e8364f972d2fca4e87149d7479826a..c736bcb129017fca77963f0f71bc4e20a74fc197 100644 (file)
@@ -560,8 +560,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                 * avoid RMW. If that faults, do not enable SSBD.
                 */
                if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
                        setup_force_cpu_cap(X86_FEATURE_SSBD);
-                       setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
                        x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
                }
        }
@@ -903,10 +903,8 @@ static void init_amd(struct cpuinfo_x86 *c)
                }
        }
 
-       if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
                set_cpu_cap(c, X86_FEATURE_SSBD);
-               set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
-       }
 }
 
 #ifdef CONFIG_X86_32
index 5010f8c769beb4a2762d1809247b4d7ec05b7472..e2c48f2a04b00737490bf6b59ce1708369be46b3 100644 (file)
@@ -158,8 +158,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
        if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       /* Intel controls SSB in MSR_SPEC_CTRL */
-       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+       /* SSBD controlled in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
                host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
@@ -175,8 +175,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
        if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       /* Intel controls SSB in MSR_SPEC_CTRL */
-       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+       /* SSBD controlled in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
                host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
@@ -188,7 +188,7 @@ static void x86_amd_ssb_disable(void)
 {
        u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 
-       if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+       if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
                wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
index c40e00240cf14e5e8d94340f935541319f77c40d..d8abf4146c3298588b0ec3a3f22bcaec9bf31bec 100644 (file)
@@ -283,7 +283,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
 {
        u64 msr;
 
-       if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+       if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
                msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
                wrmsrl(MSR_AMD64_LS_CFG, msr);
        } else {