]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/kernel/cpu/common.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / common.c
index b7beb06d80aecd38ef698c019a782f826cfea1e6..cd85ace7f1cff38bdfa58cf31b1f9fcbfd10008e 100644 (file)
@@ -658,32 +658,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
                tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 }
 
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        u32 eax, ebx, ecx, edx;
-       int index_msb, core_bits;
 
        if (!cpu_has(c, X86_FEATURE_HT))
-               return;
+               return -1;
 
        if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-               return;
+               return -1;
 
        if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
-               return;
+               return -1;
 
        cpuid(1, &eax, &ebx, &ecx, &edx);
 
        smp_num_siblings = (ebx & 0xff0000) >> 16;
+       if (smp_num_siblings == 1)
+               pr_info_once("CPU0: Hyper-Threading is disabled\n");
+#endif
+       return 0;
+}
 
-       if (!smp_num_siblings)
-               smp_num_siblings = 1;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       int index_msb, core_bits;
 
-       if (smp_num_siblings == 1) {
-               pr_info_once("CPU0: Hyper-Threading is disabled\n");
+       if (detect_ht_early(c) < 0)
                return;
-       }
 
        index_msb = get_count_order(smp_num_siblings);
        c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -793,6 +797,36 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_STIBP);
                set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
        }
+
+       if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
+               set_cpu_cap(c, X86_FEATURE_SSBD);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+               clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
+       }
+}
+
+static void init_cqm(struct cpuinfo_x86 *c)
+{
+       if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
+               c->x86_cache_max_rmid  = -1;
+               c->x86_cache_occ_scale = -1;
+               return;
+       }
+
+       /* will be overridden if occupancy monitoring exists */
+       c->x86_cache_max_rmid = cpuid_ebx(0xf);
+
+       if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
+           cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
+           cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
+               u32 eax, ebx, ecx, edx;
+
+               /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+               cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
+
+               c->x86_cache_max_rmid  = ecx;
+               c->x86_cache_occ_scale = ebx;
+       }
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -826,33 +860,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
                c->x86_capability[CPUID_D_1_EAX] = eax;
        }
 
-       /* Additional Intel-defined flags: level 0x0000000F */
-       if (c->cpuid_level >= 0x0000000F) {
-
-               /* QoS sub-leaf, EAX=0Fh, ECX=0 */
-               cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
-               c->x86_capability[CPUID_F_0_EDX] = edx;
-
-               if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
-                       /* will be overridden if occupancy monitoring exists */
-                       c->x86_cache_max_rmid = ebx;
-
-                       /* QoS sub-leaf, EAX=0Fh, ECX=1 */
-                       cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
-                       c->x86_capability[CPUID_F_1_EDX] = edx;
-
-                       if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
-                             ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
-                              (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
-                               c->x86_cache_max_rmid = ecx;
-                               c->x86_cache_occ_scale = ebx;
-                       }
-               } else {
-                       c->x86_cache_max_rmid = -1;
-                       c->x86_cache_occ_scale = -1;
-               }
-       }
-
        /* AMD-defined flags: level 0x80000001 */
        eax = cpuid_eax(0x80000000);
        c->extended_cpuid_level = eax;
@@ -890,6 +897,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
 
        init_scattered_cpuid_features(c);
        init_speculation_control(c);
+       init_cqm(c);
 
        /*
         * Clear/Set all flags overridden by options, after probe.
@@ -925,81 +933,136 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_CEDARVIEW,   X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_CLOVERVIEW,  X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_LINCROFT,    X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_PENWELL,     X86_FEATURE_ANY },
-       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_PINEVIEW,    X86_FEATURE_ANY },
-       { X86_VENDOR_CENTAUR,   5 },
-       { X86_VENDOR_INTEL,     5 },
-       { X86_VENDOR_NSC,       5 },
-       { X86_VENDOR_ANY,       4 },
-       {}
-};
+#define NO_SPECULATION         BIT(0)
+#define NO_MELTDOWN            BIT(1)
+#define NO_SSB                 BIT(2)
+#define NO_L1TF                        BIT(3)
+#define NO_MDS                 BIT(4)
+#define MSBDS_ONLY             BIT(5)
+#define NO_SWAPGS              BIT(6)
+#define NO_ITLB_MULTIHIT       BIT(7)
 
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
-       { X86_VENDOR_AMD },
-       {}
-};
+#define VULNWL(_vendor, _family, _model, _whitelist)   \
+       { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
 
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PINEVIEW        },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_LINCROFT        },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PENWELL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CLOVERVIEW      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CEDARVIEW       },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT1     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT2     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MERRIFIELD      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
-       { X86_VENDOR_CENTAUR,   5,                                      },
-       { X86_VENDOR_INTEL,     5,                                      },
-       { X86_VENDOR_NSC,       5,                                      },
-       { X86_VENDOR_AMD,       0x12,                                   },
-       { X86_VENDOR_AMD,       0x11,                                   },
-       { X86_VENDOR_AMD,       0x10,                                   },
-       { X86_VENDOR_AMD,       0xf,                                    },
-       { X86_VENDOR_ANY,       4,                                      },
-       {}
-};
+#define VULNWL_INTEL(model, whitelist)         \
+       VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
 
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
-       /* in addition to cpu_no_speculation */
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT1     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT2     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MERRIFIELD      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MOOREFIELD      },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT        },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_DENVERTON       },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GEMINI_LAKE     },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
-       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
+#define VULNWL_AMD(family, whitelist)          \
+       VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+       VULNWL(ANY,     4, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(CENTAUR, 5, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(INTEL,   5, X86_MODEL_ANY,       NO_SPECULATION),
+       VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
+
+       /* Intel Family 6 */
+       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION | NO_ITLB_MULTIHIT),
+
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+       VULNWL_INTEL(CORE_YONAH,                NO_SSB),
+
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+       /*
+        * Technically, swapgs isn't serializing on AMD (despite it previously
+        * being documented as such in the APM).  But according to AMD, %gs is
+        * updated non-speculatively, and the issuing of %gs-relative memory
+        * operands will be blocked until the %gs update completes, which is
+        * good enough for our purposes.
+        */
+
+       VULNWL_INTEL(ATOM_TREMONT_X,            NO_ITLB_MULTIHIT),
+
+       /* AMD Family 0xf - 0x12 */
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+       /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
        {}
 };
 
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+static bool __init cpu_matches(unsigned long which)
+{
+       const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+
+       return m && !!(m->driver_data & which);
+}
+
+u64 x86_read_arch_cap_msr(void)
 {
        u64 ia32_cap = 0;
 
-       if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
-       if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
-          !(ia32_cap & ARCH_CAP_SSB_NO))
-               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+       return ia32_cap;
+}
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+       u64 ia32_cap = x86_read_arch_cap_msr();
+
+       /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+       if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+               setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
 
-       if (x86_match_cpu(cpu_no_speculation))
+       if (cpu_matches(NO_SPECULATION))
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
        setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
-       if (x86_match_cpu(cpu_no_meltdown))
+       if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+          !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+       if (ia32_cap & ARCH_CAP_IBRS_ALL)
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
+       if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+               setup_force_cpu_bug(X86_BUG_MDS);
+               if (cpu_matches(MSBDS_ONLY))
+                       setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+       }
+
+       if (!cpu_matches(NO_SWAPGS))
+               setup_force_cpu_bug(X86_BUG_SWAPGS);
+
+       /*
+        * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
+        *      - TSX is supported or
+        *      - TSX_CTRL is present
+        *
+        * TSX_CTRL check is needed for cases when TSX could be disabled before
+        * the kernel boot e.g. kexec.
+        * TSX_CTRL check alone is not sufficient for cases when the microcode
+        * update is not present or running as guest that don't get TSX_CTRL.
+        */
+       if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+           (cpu_has(c, X86_FEATURE_RTM) ||
+            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+               setup_force_cpu_bug(X86_BUG_TAA);
+
+       if (cpu_matches(NO_MELTDOWN))
                return;
 
        /* Rogue Data Cache Load? No! */
@@ -1008,7 +1071,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
-       if (x86_match_cpu(cpu_no_l1tf))
+       if (cpu_matches(NO_L1TF))
                return;
 
        setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1039,11 +1102,15 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
        c->extended_cpuid_level = 0;
 
+       if (!have_cpuid_p())
+               identify_cpu_without_cpuid(c);
+
        /* cyrix could have cpuid enabled via c_identify()*/
        if (have_cpuid_p()) {
                cpu_detect(c);
                get_cpu_vendor(c);
                get_cpu_cap(c);
+               c->x86_cache_bits = c->x86_phys_bits;
                setup_force_cpu_cap(X86_FEATURE_CPUID);
 
                if (this_cpu->c_early_init)
@@ -1055,7 +1122,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                if (this_cpu->c_bsp_init)
                        this_cpu->c_bsp_init(c);
        } else {
-               identify_cpu_without_cpuid(c);
                setup_clear_cpu_cap(X86_FEATURE_CPUID);
        }
 
@@ -1171,6 +1237,8 @@ static void generic_identify(struct cpuinfo_x86 *c)
 
        get_cpu_cap(c);
 
+       c->x86_cache_bits = c->x86_phys_bits;
+
        if (c->cpuid_level >= 0x00000001) {
                c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
@@ -1410,6 +1478,8 @@ void __init identify_boot_cpu(void)
        enable_sep_cpu();
 #endif
        cpu_detect_tlb(&boot_cpu_data);
+
+       tsx_init();
 }
 
 void identify_secondary_cpu(struct cpuinfo_x86 *c)
@@ -1784,11 +1854,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, curr);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
        set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);