]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/speculation: Consolidate CPU whitelists
authorThomas Gleixner <tglx@linutronix.de>
Wed, 27 Feb 2019 09:10:23 +0000 (10:10 +0100)
committerStefan Bader <stefan.bader@canonical.com>
Mon, 6 May 2019 16:58:11 +0000 (18:58 +0200)
The CPU vulnerability whitelists have some overlap and there are more
whitelists coming along.

Use the driver_data field in the x86_cpu_id struct to denote the
whitelisted vulnerabilities and combine all whitelists into one.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
CVE-2018-12126
CVE-2018-12127
CVE-2018-12130

(backported from commit c01bba64427c723870372e97a82ae843ca14e876)
[tyhicks: Backport to 4.18:
 - X86_VENDOR_HYGON does not exist because commit c9661c1e80b6 is
   missing]
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
arch/x86/kernel/cpu/common.c

index 6bded63309e281e964d9847fa841b7a7add315c8..95b3f41d00d5094a43a9221ac41f03056b0b9d5a 100644 (file)
@@ -935,60 +935,68 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL,    X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL_TABLET,     X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL_MID,        X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_BONNELL,     X86_FEATURE_ANY },
-       { X86_VENDOR_CENTAUR,   5 },
-       { X86_VENDOR_INTEL,     5 },
-       { X86_VENDOR_NSC,       5 },
-       { X86_VENDOR_ANY,       4 },
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN    BIT(1)
+#define NO_SSB         BIT(2)
+#define NO_L1TF                BIT(3)
+
+#define VULNWL(_vendor, _family, _model, _whitelist)   \
+       { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+
+#define VULNWL_INTEL(model, whitelist)         \
+       VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist)          \
+       VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+       VULNWL(ANY,     4, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(CENTAUR, 5, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(INTEL,   5, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
+
+       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION),
+       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION),
+       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION),
+       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
+       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
+
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF),
+       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF),
+
+       VULNWL_INTEL(CORE_YONAH,                NO_SSB),
+
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_L1TF),
+
+       VULNWL_AMD(0x0f,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+       VULNWL_AMD(0x10,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+       VULNWL_AMD(0x11,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+       VULNWL_AMD(0x12,                NO_MELTDOWN | NO_SSB | NO_L1TF),
+
+       /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF),
        {}
 };
 
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
-       { X86_VENDOR_AMD },
-       {}
-};
-
-/* Only list CPUs which speculate but are non susceptible to SSB */
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_X    },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_MID  },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
-       { X86_VENDOR_AMD,       0x12,                                   },
-       { X86_VENDOR_AMD,       0x11,                                   },
-       { X86_VENDOR_AMD,       0x10,                                   },
-       { X86_VENDOR_AMD,       0xf,                                    },
-       {}
-};
+static bool __init cpu_matches(unsigned long which)
+{
+       const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
 
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
-       /* in addition to cpu_no_speculation */
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_X    },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_MID  },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT_MID     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT        },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT_X      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT_PLUS   },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
-       {}
-};
+       return m && !!(m->driver_data & which);
+}
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = 0;
 
-       if (x86_match_cpu(cpu_no_speculation))
+       if (cpu_matches(NO_SPECULATION))
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
@@ -997,15 +1005,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
-       if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
-          !(ia32_cap & ARCH_CAP_SSB_NO) &&
+       if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
        if (ia32_cap & ARCH_CAP_IBRS_ALL)
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
 
-       if (x86_match_cpu(cpu_no_meltdown))
+       if (cpu_matches(NO_MELTDOWN))
                return;
 
        /* Rogue Data Cache Load? No! */
@@ -1014,7 +1021,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
-       if (x86_match_cpu(cpu_no_l1tf))
+       if (cpu_matches(NO_L1TF))
                return;
 
        setup_force_cpu_bug(X86_BUG_L1TF);