]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/speculation/swapgs: Exclude ATOMs from speculation through SWAPGS
authorThomas Gleixner <tglx@linutronix.de>
Wed, 17 Jul 2019 19:18:59 +0000 (21:18 +0200)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Thu, 1 Aug 2019 10:24:25 +0000 (12:24 +0200)
Intel provided the following information:

 On all current Atom processors, instructions that use a segment register
 value (e.g. a load or store) will not speculatively execute before the
 last writer of that segment retires. Thus they will not use a
 speculatively written segment value.

That means on ATOMs there is no speculation through SWAPGS, so the SWAPGS
entry paths can be excluded from the extra LFENCE if PTI is disabled.

Create a separate bug flag for the through SWAPGS speculation and mark all
out-of-order ATOMs and AMD/HYGON CPUs as not affected. The in-order ATOMs
are excluded from the whole mitigation mess anyway.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
CVE-2019-1125

(backported from commit f36cf386e3fec258a341d446915862eded3e13d8)
[tyhicks: Dropped VULNWL_HYGON() change since this kernel version
 doesn't know about Hygon processors]
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c

index acf26383e9d964ff2839f863a24c37a60a423077..1c2f0b2b237a794aabe73e3eed317194ac21c167 100644 (file)
 #define X86_BUG_L1TF                   X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
 #define X86_BUG_MDS                    X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
 #define X86_BUG_MSBDS_ONLY             X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS                 X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 908ce7e3d64b9478d7104fb97ae8bbf9ee7cd607..3c84e3259197414116845183b7a901f6001fe02c 100644 (file)
@@ -282,18 +282,6 @@ static const char * const spectre_v1_strings[] = {
        [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
 };
 
-static bool is_swapgs_serializing(void)
-{
-       /*
-        * Technically, swapgs isn't serializing on AMD (despite it previously
-        * being documented as such in the APM).  But according to AMD, %gs is
-        * updated non-speculatively, and the issuing of %gs-relative memory
-        * operands will be blocked until the %gs update completes, which is
-        * good enough for our purposes.
-        */
-       return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
-}
-
 /*
  * Does SMAP provide full mitigation against speculative kernel access to
  * userspace?
@@ -344,9 +332,11 @@ static void __init spectre_v1_select_mitigation(void)
                         * PTI as the CR3 write in the Meltdown mitigation
                         * is serializing.
                         *
-                        * If neither is there, mitigate with an LFENCE.
+                        * If neither is there, mitigate with an LFENCE to
+                        * stop speculation through swapgs.
                         */
-                       if (!is_swapgs_serializing() && !boot_cpu_has(X86_FEATURE_PTI))
+                       if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
+                           !boot_cpu_has(X86_FEATURE_PTI))
                                setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
 
                        /*
index 358cc9355a4f95700b3109be9f803b74e06ac1b2..cf8722348883af9586a3dca0c673c396beb81904 100644 (file)
@@ -939,6 +939,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define NO_L1TF                BIT(3)
 #define NO_MDS         BIT(4)
 #define MSBDS_ONLY     BIT(5)
+#define NO_SWAPGS      BIT(6)
 
 #define VULNWL(_vendor, _family, _model, _whitelist)   \
        { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -962,29 +963,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
        VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
 
-       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY),
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
 
        VULNWL_INTEL(CORE_YONAH,                NO_SSB),
 
-       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY),
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
 
-       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF),
-       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF),
-       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS),
+
+       /*
+        * Technically, swapgs isn't serializing on AMD (despite it previously
+        * being documented as such in the APM).  But according to AMD, %gs is
+        * updated non-speculatively, and the issuing of %gs-relative memory
+        * operands will be blocked until the %gs update completes, which is
+        * good enough for our purposes.
+        */
 
        /* AMD Family 0xf - 0x12 */
-       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
 
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
-       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS),
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
        {}
 };
 
@@ -1021,6 +1030,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
        }
 
+       if (!cpu_matches(NO_SWAPGS))
+               setup_force_cpu_bug(X86_BUG_SWAPGS);
+
        if (cpu_matches(NO_MELTDOWN))
                return;