]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/kernel/cpu/common.c
x86/cpu: Sanitize FAM6_ATOM naming
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / common.c
index 39d7ea865207d102d2f03fc9ca779b2bdbb03c61..6bded63309e281e964d9847fa841b7a7add315c8 100644 (file)
@@ -47,6 +47,8 @@
 #include <asm/pat.h>
 #include <asm/microcode.h>
 #include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/uv/uv.h>
@@ -64,6 +66,13 @@ cpumask_var_t cpu_callin_mask;
 /* representing cpus for which sibling maps can be computed */
 cpumask_var_t cpu_sibling_setup_mask;
 
+/* Number of siblings per CPU package */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* Last level cache ID of each logical CPU */
+DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
+
 /* correctly size the local cpu masks */
 void __init setup_cpu_local_masks(void)
 {
@@ -575,6 +584,19 @@ static void get_model_name(struct cpuinfo_x86 *c)
        *(s + 1) = '\0';
 }
 
+void detect_num_cpu_cores(struct cpuinfo_x86 *c)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       c->x86_max_cores = 1;
+       if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
+               return;
+
+       cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
+       if (eax & 0x1f)
+               c->x86_max_cores = (eax >> 26) + 1;
+}
+
 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 {
        unsigned int n, dummy, ebx, ecx, edx, l2size;
@@ -636,33 +658,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
                tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 }
 
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        u32 eax, ebx, ecx, edx;
-       int index_msb, core_bits;
-       static bool printed;
 
        if (!cpu_has(c, X86_FEATURE_HT))
-               return;
+               return -1;
 
        if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-               goto out;
+               return -1;
 
        if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
-               return;
+               return -1;
 
        cpuid(1, &eax, &ebx, &ecx, &edx);
 
        smp_num_siblings = (ebx & 0xff0000) >> 16;
-
-       if (smp_num_siblings == 1) {
+       if (smp_num_siblings == 1)
                pr_info_once("CPU0: Hyper-Threading is disabled\n");
-               goto out;
-       }
+#endif
+       return 0;
+}
 
-       if (smp_num_siblings <= 1)
-               goto out;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       int index_msb, core_bits;
+
+       if (detect_ht_early(c) < 0)
+               return;
 
        index_msb = get_count_order(smp_num_siblings);
        c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -675,15 +700,6 @@ void detect_ht(struct cpuinfo_x86 *c)
 
        c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
                                       ((1 << core_bits) - 1);
-
-out:
-       if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
-               pr_info("CPU: Physical Processor ID: %d\n",
-                       c->phys_proc_id);
-               pr_info("CPU: Processor Core ID: %d\n",
-                       c->cpu_core_id);
-               printed = 1;
-       }
 #endif
 }
 
@@ -729,7 +745,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
                cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
                c->x86          = x86_family(tfms);
                c->x86_model    = x86_model(tfms);
-               c->x86_mask     = x86_stepping(tfms);
+               c->x86_stepping = x86_stepping(tfms);
 
                if (cap0 & (1<<19)) {
                        c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
@@ -748,6 +764,47 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
        }
 }
 
+static void init_speculation_control(struct cpuinfo_x86 *c)
+{
+       /*
+        * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+        * and they also have a different bit for STIBP support. Also,
+        * a hypervisor might have set the individual AMD bits even on
+        * Intel CPUs, for finer-grained selection of what's available.
+        */
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+               set_cpu_cap(c, X86_FEATURE_IBRS);
+               set_cpu_cap(c, X86_FEATURE_IBPB);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
+
+       if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+               set_cpu_cap(c, X86_FEATURE_STIBP);
+
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+           cpu_has(c, X86_FEATURE_VIRT_SSBD))
+               set_cpu_cap(c, X86_FEATURE_SSBD);
+
+       if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+               set_cpu_cap(c, X86_FEATURE_IBRS);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
+
+       if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+               set_cpu_cap(c, X86_FEATURE_IBPB);
+
+       if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+               set_cpu_cap(c, X86_FEATURE_STIBP);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
+
+       if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
+               set_cpu_cap(c, X86_FEATURE_SSBD);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+               clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
+       }
+}
+
 void get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 eax, ebx, ecx, edx;
@@ -769,6 +826,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
                cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
                c->x86_capability[CPUID_7_0_EBX] = ebx;
                c->x86_capability[CPUID_7_ECX] = ecx;
+               c->x86_capability[CPUID_7_EDX] = edx;
        }
 
        /* Extended state features: level 0x0000000d */
@@ -841,6 +899,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
                c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 
        init_scattered_cpuid_features(c);
+       init_speculation_control(c);
 
        /*
         * Clear/Set all flags overridden by options, after probe.
@@ -876,6 +935,91 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
+static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL,    X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL_TABLET,     X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SALTWELL_MID,        X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_BONNELL,     X86_FEATURE_ANY },
+       { X86_VENDOR_CENTAUR,   5 },
+       { X86_VENDOR_INTEL,     5 },
+       { X86_VENDOR_NSC,       5 },
+       { X86_VENDOR_ANY,       4 },
+       {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+       { X86_VENDOR_AMD },
+       {}
+};
+
+/* Only list CPUs which speculate but are non susceptible to SSB */
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_X    },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_MID  },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
+       { X86_VENDOR_AMD,       0x12,                                   },
+       { X86_VENDOR_AMD,       0x11,                                   },
+       { X86_VENDOR_AMD,       0x10,                                   },
+       { X86_VENDOR_AMD,       0xf,                                    },
+       {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+       /* in addition to cpu_no_speculation */
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_X    },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT_MID  },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT_MID     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT        },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT_X      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT_PLUS   },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
+       {}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+       u64 ia32_cap = 0;
+
+       if (x86_match_cpu(cpu_no_speculation))
+               return;
+
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+       if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+       if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+          !(ia32_cap & ARCH_CAP_SSB_NO) &&
+          !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+       if (ia32_cap & ARCH_CAP_IBRS_ALL)
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
+       if (x86_match_cpu(cpu_no_meltdown))
+               return;
+
+       /* Rogue Data Cache Load? No! */
+       if (ia32_cap & ARCH_CAP_RDCL_NO)
+               return;
+
+       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+       if (x86_match_cpu(cpu_no_l1tf))
+               return;
+
+       setup_force_cpu_bug(X86_BUG_L1TF);
+}
+
 /*
  * Do minimum CPU detection early.
  * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -906,6 +1050,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                cpu_detect(c);
                get_cpu_vendor(c);
                get_cpu_cap(c);
+               c->x86_cache_bits = c->x86_phys_bits;
                setup_force_cpu_cap(X86_FEATURE_CPUID);
 
                if (this_cpu->c_early_init)
@@ -923,8 +1068,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
-       if (c->x86_vendor != X86_VENDOR_AMD)
-               setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+       cpu_set_bug_bits(c);
 
        fpu__init_system(c);
 
@@ -1034,6 +1178,8 @@ static void generic_identify(struct cpuinfo_x86 *c)
 
        get_cpu_cap(c);
 
+       c->x86_cache_bits = c->x86_phys_bits;
+
        if (c->cpuid_level >= 0x00000001) {
                c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
@@ -1121,9 +1267,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        int i;
 
        c->loops_per_jiffy = loops_per_jiffy;
-       c->x86_cache_size = -1;
+       c->x86_cache_size = 0;
        c->x86_vendor = X86_VENDOR_UNKNOWN;
-       c->x86_model = c->x86_mask = 0; /* So far unknown... */
+       c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
        c->x86_vendor_id[0] = '\0'; /* Unset */
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
@@ -1284,6 +1430,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #endif
        mtrr_ap_init();
        validate_apic_and_package_id(c);
+       x86_spec_ctrl_setup_ap();
 }
 
 static __init int setup_noclflush(char *arg)
@@ -1315,8 +1462,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
 
        pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
 
-       if (c->x86_mask || c->cpuid_level >= 0)
-               pr_cont(", stepping: 0x%x)\n", c->x86_mask);
+       if (c->x86_stepping || c->cpuid_level >= 0)
+               pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
        else
                pr_cont(")\n");
 }
@@ -1646,11 +1793,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, curr);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
        set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);
 
@@ -1686,3 +1834,33 @@ static int __init init_cpu_syscore(void)
        return 0;
 }
 core_initcall(init_cpu_syscore);
+
+/*
+ * The microcode loader calls this upon late microcode load to recheck features,
+ * only when microcode has been updated. Caller holds microcode_mutex and CPU
+ * hotplug lock.
+ */
+void microcode_check(void)
+{
+       struct cpuinfo_x86 info;
+
+       perf_check_microcode();
+
+       /* Reload CPUID max function as it might've changed. */
+       info.cpuid_level = cpuid_eax(0);
+
+       /*
+        * Copy all capability leafs to pick up the synthetic ones so that
+        * memcmp() below doesn't fail on that. The ones coming from CPUID will
+        * get overwritten in get_cpu_cap().
+        */
+       memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
+
+       get_cpu_cap(&info);
+
+       if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
+               return;
+
+       pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
+       pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+}