]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
authorThomas Gleixner <tglx@linutronix.de>
Thu, 10 May 2018 17:13:18 +0000 (19:13 +0200)
committerStefan Bader <stefan.bader@canonical.com>
Thu, 24 May 2018 07:59:19 +0000 (09:59 +0200)
The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
Intel and implied by IBRS or STIBP support on AMD. That's just confusing
and in case an AMD CPU has IBRS not supported because the underlying
problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
the thing falls apart.

Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
availability on both Intel and AMD.

While at it replace the boot_cpu_has() checks with static_cpu_has() where
possible. This prevents late microcode loading from exposing SPEC_CTRL, but
late loading is already very limited as it does not reevaluate the
mitigation options and other bits and pieces. Having static_cpu_has() is
the simplest and least fragile solution.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CVE-2018-3639 (x86)

(backported from commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961)
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/intel.c

index 18e3d1d6160ecca430bd49d82efdfa7e0c05f468..c3b406b720cd757716b52512ff1c6a2c50de01bb 100644 (file)
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23 ) /* Disable Speculative Store Bypass. */
 #define X86_FEATURE_AMD_SSBD           ( 7*32+24) /* AMD SSBD implementation */
 #define X86_FEATURE_IBPB               ( 7*32+25) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+26) /* "" MSR SPEC_CTRL is implemented */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
index 04bd64a7ae6a3137f185a0ca7fe3b8354d52ffb8..dfa858f887e8364f972d2fca4e87149d7479826a 100644 (file)
@@ -872,6 +872,7 @@ static void init_amd(struct cpuinfo_x86 *c)
                        sysctl_ibrs_enabled = 1;
                if (ibpb_inuse)
                        sysctl_ibpb_enabled = 1;
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
        } else if (cpu_has(c, X86_FEATURE_AMD_IBPB)) {
                pr_info_once("FEATURE SPEC_CTRL Not Present\n");
                pr_info_once("FEATURE IBPB Present\n");
index 765ada5ce042dbbbe8b21d50ee0b3c74a262569e..5010f8c769beb4a2762d1809247b4d7ec05b7472 100644 (file)
@@ -63,7 +63,7 @@ void __init check_bugs(void)
         * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
         * init code as it is not enumerated and depends on the family.
         */
-       if (ibrs_inuse)
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
        /* Select the proper spectre mitigation before patching alternatives */
@@ -144,7 +144,7 @@ u64 x86_spec_ctrl_get_default(void)
 {
        u64 msrval = x86_spec_ctrl_base;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
                msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
        return msrval;
 }
@@ -154,10 +154,12 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
 {
        u64 host = x86_spec_ctrl_base;
 
-       if (!ibrs_inuse)
+       /* Is MSR_SPEC_CTRL implemented ? */
+       if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+       /* Intel controls SSB in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
                host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
@@ -169,10 +171,12 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 {
        u64 host = x86_spec_ctrl_base;
 
-       if (!ibrs_inuse)
+       /* Is MSR_SPEC_CTRL implemented ? */
+       if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+       /* Intel controls SSB in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
                host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
@@ -590,7 +594,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 
 void x86_spec_ctrl_setup_ap(void)
 {
-       if (ibrs_inuse)
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
 
        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
index 8d558e24783cc527ebe25d15e78f9601218ecc49..8fa91ad1610b47411ff567d03d62088bde4c8034 100644 (file)
@@ -637,6 +637,7 @@ static void init_intel(struct cpuinfo_x86 *c)
                                sysctl_ibrs_enabled = 1;
                        if (ibpb_inuse)
                                sysctl_ibpb_enabled = 1;
+                       set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
                } else {
                        printk(KERN_INFO "FEATURE SPEC_CTRL Not Present\n");
                }