*(s + 1) = '\0';
}
+void detect_num_cpu_cores(struct cpuinfo_x86 *c)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ c->x86_max_cores = 1;
+ if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
+ return;
+
+ cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
+ if (eax & 0x1f)
+ c->x86_max_cores = (eax >> 26) + 1;
+}
+
void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ebx, ecx, edx, l2size;
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
}
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
- int index_msb, core_bits;
- static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
- return;
+ return -1;
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
- goto out;
+ return -1;
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
- return;
+ return -1;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
-
- if (smp_num_siblings == 1) {
+ if (smp_num_siblings == 1)
pr_info_once("CPU0: Hyper-Threading is disabled\n");
- goto out;
- }
+#endif
+ return 0;
+}
- if (smp_num_siblings <= 1)
- goto out;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ int index_msb, core_bits;
+
+ if (detect_ht_early(c) < 0)
+ return;
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
-
-out:
- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
- pr_info("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- pr_info("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
#endif
}
set_cpu_cap(c, X86_FEATURE_STIBP);
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
}
+
+ if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
+ }
}
void get_cpu_cap(struct cpuinfo_x86 *c)
#endif
}
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_CENTAUR, 5 },
- { X86_VENDOR_INTEL, 5 },
- { X86_VENDOR_NSC, 5 },
- { X86_VENDOR_ANY, 4 },
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN BIT(1)
+#define NO_SSB BIT(2)
+#define NO_L1TF BIT(3)
+
+#define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+
+#define VULNWL_INTEL(model, whitelist) \
+ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist) \
+ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
+
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
+
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
{}
};
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
- { X86_VENDOR_AMD },
- {}
-};
+static bool __init cpu_matches(unsigned long which)
+{
+ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
- { X86_VENDOR_CENTAUR, 5, },
- { X86_VENDOR_INTEL, 5, },
- { X86_VENDOR_NSC, 5, },
- { X86_VENDOR_AMD, 0x12, },
- { X86_VENDOR_AMD, 0x11, },
- { X86_VENDOR_AMD, 0x10, },
- { X86_VENDOR_AMD, 0xf, },
- { X86_VENDOR_ANY, 4, },
- {}
-};
+ return m && !!(m->driver_data & which);
+}
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
+ if (cpu_matches(NO_SPECULATION))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_SSB_NO))
+ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
- if (x86_match_cpu(cpu_no_speculation))
- return;
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-
- if (x86_match_cpu(cpu_no_meltdown))
+ if (cpu_matches(NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+ if (cpu_matches(NO_L1TF))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_L1TF);
}
/*
cpu_detect(c);
get_cpu_vendor(c);
get_cpu_cap(c);
+ c->x86_cache_bits = c->x86_phys_bits;
setup_force_cpu_cap(X86_FEATURE_CPUID);
if (this_cpu->c_early_init)
get_cpu_cap(c);
+ c->x86_cache_bits = c->x86_phys_bits;
+
if (c->cpuid_level >= 0x00000001) {
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
#ifdef CONFIG_X86_32
enter_lazy_tlb(&init_mm, curr);
/*
- * Initialize the TSS. Don't bother initializing sp0, as the initial
- * task never enters user mode.
+ * Initialize the TSS. sp0 points to the entry trampoline stack
+ * regardless of what task is running.
*/
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();
+ load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
load_mm_ldt(&init_mm);