1 #include <linux/kernel.h>
3 #include <linux/string.h>
4 #include <linux/bitops.h>
6 #include <linux/sched.h>
7 #include <linux/sched/clock.h>
8 #include <linux/thread_info.h>
9 #include <linux/init.h>
10 #include <linux/uaccess.h>
12 #include <asm/cpufeature.h>
13 #include <asm/pgtable.h>
17 #include <asm/intel-family.h>
18 #include <asm/microcode_intel.h>
19 #include <asm/hwcap2.h>
23 #include <linux/topology.h>
28 #ifdef CONFIG_X86_LOCAL_APIC
29 #include <asm/mpspec.h>
34 * Just in case our CPU detection goes bad, or you have a weird system,
35 * allow a way to override the automatic disabling of MPX.
39 static int __init
forcempx_setup(char *__unused
)
45 __setup("intel-skd-046-workaround=disable", forcempx_setup
);
47 void check_mpx_erratum(struct cpuinfo_x86
*c
)
52 * Turn off the MPX feature on CPUs where SMEP is not
53 * available or disabled.
55 * Works around Intel Erratum SKD046: "Branch Instructions
56 * May Initialize MPX Bound Registers Incorrectly".
58 * This might falsely disable MPX on systems without
59 * SMEP, like Atom processors without SMEP. But there
60 * is no such hardware known at the moment.
62 if (cpu_has(c
, X86_FEATURE_MPX
) && !cpu_has(c
, X86_FEATURE_SMEP
)) {
63 setup_clear_cpu_cap(X86_FEATURE_MPX
);
64 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
68 static bool ring3mwait_disabled __read_mostly
;
70 static int __init
ring3mwait_disable(char *__unused
)
72 ring3mwait_disabled
= true;
75 __setup("ring3mwait=disable", ring3mwait_disable
);
77 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86
*c
)
80 * Ring 3 MONITOR/MWAIT feature cannot be detected without
81 * cpu model and family comparison.
85 switch (c
->x86_model
) {
86 case INTEL_FAM6_XEON_PHI_KNL
:
87 case INTEL_FAM6_XEON_PHI_KNM
:
93 if (ring3mwait_disabled
)
96 set_cpu_cap(c
, X86_FEATURE_RING3MWAIT
);
97 this_cpu_or(msr_misc_features_shadow
,
98 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT
);
100 if (c
== &boot_cpu_data
)
101 ELF_HWCAP2
|= HWCAP2_RING3MWAIT
;
104 static void early_init_intel(struct cpuinfo_x86
*c
)
108 /* Unmask CPUID levels if masked: */
109 if (c
->x86
> 6 || (c
->x86
== 6 && c
->x86_model
>= 0xd)) {
110 if (msr_clear_bit(MSR_IA32_MISC_ENABLE
,
111 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT
) > 0) {
112 c
->cpuid_level
= cpuid_eax(0);
117 if ((c
->x86
== 0xf && c
->x86_model
>= 0x03) ||
118 (c
->x86
== 0x6 && c
->x86_model
>= 0x0e))
119 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
121 if (c
->x86
>= 6 && !cpu_has(c
, X86_FEATURE_IA64
))
122 c
->microcode
= intel_get_microcode_revision();
125 * Atom erratum AAE44/AAF40/AAG38/AAH41:
127 * A race condition between speculative fetches and invalidating
128 * a large page. This is worked around in microcode, but we
129 * need the microcode to have already been loaded... so if it is
130 * not, recommend a BIOS update and disable large pages.
132 if (c
->x86
== 6 && c
->x86_model
== 0x1c && c
->x86_mask
<= 2 &&
133 c
->microcode
< 0x20e) {
134 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
135 clear_cpu_cap(c
, X86_FEATURE_PSE
);
139 set_cpu_cap(c
, X86_FEATURE_SYSENTER32
);
141 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
142 if (c
->x86
== 15 && c
->x86_cache_alignment
== 64)
143 c
->x86_cache_alignment
= 128;
146 /* CPUID workaround for 0F33/0F34 CPU */
147 if (c
->x86
== 0xF && c
->x86_model
== 0x3
148 && (c
->x86_mask
== 0x3 || c
->x86_mask
== 0x4))
149 c
->x86_phys_bits
= 36;
152 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
153 * with P/T states and does not stop in deep C-states.
155 * It is also reliable across cores and sockets. (but not across
156 * cabinets - we turn it off in that case explicitly.)
158 if (c
->x86_power
& (1 << 8)) {
159 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
160 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
163 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
165 switch (c
->x86_model
) {
166 case 0x27: /* Penwell */
167 case 0x35: /* Cloverview */
168 case 0x4a: /* Merrifield */
169 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC_S3
);
177 * There is a known erratum on Pentium III and Core Solo
179 * " Page with PAT set to WC while associated MTRR is UC
180 * may consolidate to UC "
181 * Because of this erratum, it is better to stick with
182 * setting WC in MTRR rather than using PAT on these CPUs.
184 * Enable PAT WC only on P4, Core 2 or later CPUs.
186 if (c
->x86
== 6 && c
->x86_model
< 15)
187 clear_cpu_cap(c
, X86_FEATURE_PAT
);
189 #ifdef CONFIG_KMEMCHECK
191 * P4s have a "fast strings" feature which causes single-
192 * stepping REP instructions to only generate a #DB on
193 * cache-line boundaries.
195 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
196 * (model 2) with the same problem.
199 if (msr_clear_bit(MSR_IA32_MISC_ENABLE
,
200 MSR_IA32_MISC_ENABLE_FAST_STRING_BIT
) > 0)
201 pr_info("kmemcheck: Disabling fast string operations\n");
205 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
206 * clear the fast string and enhanced fast string CPU capabilities.
208 if (c
->x86
> 6 || (c
->x86
== 6 && c
->x86_model
>= 0xd)) {
209 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
210 if (!(misc_enable
& MSR_IA32_MISC_ENABLE_FAST_STRING
)) {
211 pr_info("Disabled fast string operations\n");
212 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD
);
213 setup_clear_cpu_cap(X86_FEATURE_ERMS
);
218 * Intel Quark Core DevMan_001.pdf section 6.4.11
219 * "The operating system also is required to invalidate (i.e., flush)
220 * the TLB when any changes are made to any of the page table entries.
221 * The operating system must reload CR3 to cause the TLB to be flushed"
223 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
224 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
227 if (c
->x86
== 5 && c
->x86_model
== 9) {
228 pr_info("Disabling PGE capability bit\n");
229 setup_clear_cpu_cap(X86_FEATURE_PGE
);
232 if (c
->cpuid_level
>= 0x00000001) {
233 u32 eax
, ebx
, ecx
, edx
;
235 cpuid(0x00000001, &eax
, &ebx
, &ecx
, &edx
);
237 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
238 * apicids which are reserved per package. Store the resulting
239 * shift value for the package management code.
241 if (edx
& (1U << 28))
242 c
->x86_coreid_bits
= get_count_order((ebx
>> 16) & 0xff);
245 check_mpx_erratum(c
);
250 * Early probe support logic for ppro memory erratum #50
252 * This is called before we do cpu ident work
255 int ppro_with_ram_bug(void)
257 /* Uses data from early_cpu_detect now */
258 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
259 boot_cpu_data
.x86
== 6 &&
260 boot_cpu_data
.x86_model
== 1 &&
261 boot_cpu_data
.x86_mask
< 8) {
262 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
268 static void intel_smp_check(struct cpuinfo_x86
*c
)
270 /* calling is from identify_secondary_cpu() ? */
275 * Mask B, Pentium, but not Pentium MMX
278 c
->x86_mask
>= 1 && c
->x86_mask
<= 4 &&
281 * Remember we have B step Pentia with bugs
283 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
284 "with B stepping processors.\n");
289 static int __init
forcepae_setup(char *__unused
)
294 __setup("forcepae", forcepae_setup
);
296 static void intel_workarounds(struct cpuinfo_x86
*c
)
298 #ifdef CONFIG_X86_F00F_BUG
300 * All models of Pentium and Pentium with MMX technology CPUs
301 * have the F0 0F bug, which lets nonprivileged users lock up the
302 * system. Announce that the fault handler will be checking for it.
303 * The Quark is also family 5, but does not have the same bug.
305 clear_cpu_bug(c
, X86_BUG_F00F
);
306 if (c
->x86
== 5 && c
->x86_model
< 9) {
307 static int f00f_workaround_enabled
;
309 set_cpu_bug(c
, X86_BUG_F00F
);
310 if (!f00f_workaround_enabled
) {
311 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
312 f00f_workaround_enabled
= 1;
318 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
321 if ((c
->x86
<<8 | c
->x86_model
<<4 | c
->x86_mask
) < 0x633)
322 clear_cpu_cap(c
, X86_FEATURE_SEP
);
325 * PAE CPUID issue: many Pentium M report no PAE but may have a
326 * functionally usable PAE implementation.
327 * Forcefully enable PAE if kernel parameter "forcepae" is present.
330 pr_warn("PAE forced!\n");
331 set_cpu_cap(c
, X86_FEATURE_PAE
);
332 add_taint(TAINT_CPU_OUT_OF_SPEC
, LOCKDEP_NOW_UNRELIABLE
);
336 * P4 Xeon erratum 037 workaround.
337 * Hardware prefetcher may cause stale data to be loaded into the cache.
339 if ((c
->x86
== 15) && (c
->x86_model
== 1) && (c
->x86_mask
== 1)) {
340 if (msr_set_bit(MSR_IA32_MISC_ENABLE
,
341 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT
) > 0) {
342 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
343 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
348 * See if we have a good local APIC by checking for buggy Pentia,
349 * i.e. all B steppings and the C2 stepping of P54C when using their
350 * integrated APIC (see 11AP erratum in "Pentium Processor
351 * Specification Update").
353 if (boot_cpu_has(X86_FEATURE_APIC
) && (c
->x86
<<8 | c
->x86_model
<<4) == 0x520 &&
354 (c
->x86_mask
< 0x6 || c
->x86_mask
== 0xb))
355 set_cpu_bug(c
, X86_BUG_11AP
);
358 #ifdef CONFIG_X86_INTEL_USERCOPY
360 * Set up the preferred alignment for movsl bulk memory moves
363 case 4: /* 486: untested */
365 case 5: /* Old Pentia: untested */
367 case 6: /* PII/PIII only like movsl with 8-byte alignment */
370 case 15: /* P4 is OK down to 8-byte alignment */
379 static void intel_workarounds(struct cpuinfo_x86
*c
)
384 static void srat_detect_node(struct cpuinfo_x86
*c
)
388 int cpu
= smp_processor_id();
390 /* Don't do the funky fallback heuristics the AMD version employs
392 node
= numa_cpu_node(cpu
);
393 if (node
== NUMA_NO_NODE
|| !node_online(node
)) {
394 /* reuse the value from init_cpu_to_node() */
395 node
= cpu_to_node(cpu
);
397 numa_set_node(cpu
, node
);
402 * find out the number of processor cores on the die
404 static int intel_num_cpu_cores(struct cpuinfo_x86
*c
)
406 unsigned int eax
, ebx
, ecx
, edx
;
408 if (!IS_ENABLED(CONFIG_SMP
) || c
->cpuid_level
< 4)
411 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
412 cpuid_count(4, 0, &eax
, &ebx
, &ecx
, &edx
);
414 return (eax
>> 26) + 1;
419 static void detect_vmx_virtcap(struct cpuinfo_x86
*c
)
421 /* Intel VMX MSR indicated features */
422 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
423 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
424 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
425 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
426 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
427 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
429 u32 vmx_msr_low
, vmx_msr_high
, msr_ctl
, msr_ctl2
;
431 clear_cpu_cap(c
, X86_FEATURE_TPR_SHADOW
);
432 clear_cpu_cap(c
, X86_FEATURE_VNMI
);
433 clear_cpu_cap(c
, X86_FEATURE_FLEXPRIORITY
);
434 clear_cpu_cap(c
, X86_FEATURE_EPT
);
435 clear_cpu_cap(c
, X86_FEATURE_VPID
);
437 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
, vmx_msr_low
, vmx_msr_high
);
438 msr_ctl
= vmx_msr_high
| vmx_msr_low
;
439 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW
)
440 set_cpu_cap(c
, X86_FEATURE_TPR_SHADOW
);
441 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_VNMI
)
442 set_cpu_cap(c
, X86_FEATURE_VNMI
);
443 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS
) {
444 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
445 vmx_msr_low
, vmx_msr_high
);
446 msr_ctl2
= vmx_msr_high
| vmx_msr_low
;
447 if ((msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC
) &&
448 (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW
))
449 set_cpu_cap(c
, X86_FEATURE_FLEXPRIORITY
);
450 if (msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_EPT
)
451 set_cpu_cap(c
, X86_FEATURE_EPT
);
452 if (msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_VPID
)
453 set_cpu_cap(c
, X86_FEATURE_VPID
);
457 static void init_intel_energy_perf(struct cpuinfo_x86
*c
)
462 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
463 * (x86_energy_perf_policy(8) is available to change it at run-time.)
465 if (!cpu_has(c
, X86_FEATURE_EPB
))
468 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS
, epb
);
469 if ((epb
& 0xF) != ENERGY_PERF_BIAS_PERFORMANCE
)
472 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
473 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
474 epb
= (epb
& ~0xF) | ENERGY_PERF_BIAS_NORMAL
;
475 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS
, epb
);
478 static void intel_bsp_resume(struct cpuinfo_x86
*c
)
481 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
482 * so reinitialize it properly like during bootup:
484 init_intel_energy_perf(c
);
487 static void init_cpuid_fault(struct cpuinfo_x86
*c
)
491 if (!rdmsrl_safe(MSR_PLATFORM_INFO
, &msr
)) {
492 if (msr
& MSR_PLATFORM_INFO_CPUID_FAULT
)
493 set_cpu_cap(c
, X86_FEATURE_CPUID_FAULT
);
497 static void init_intel_misc_features(struct cpuinfo_x86
*c
)
501 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES
, &msr
))
504 /* Clear all MISC features */
505 this_cpu_write(msr_misc_features_shadow
, 0);
507 /* Check features and update capabilities and shadow control bits */
509 probe_xeon_phi_r3mwait(c
);
511 msr
= this_cpu_read(msr_misc_features_shadow
);
512 wrmsrl(MSR_MISC_FEATURES_ENABLES
, msr
);
515 static void init_intel(struct cpuinfo_x86
*c
)
521 intel_workarounds(c
);
524 * Detect the extended topology information if available. This
525 * will reinitialise the initial_apicid which will be used
526 * in init_intel_cacheinfo()
528 detect_extended_topology(c
);
530 if (!cpu_has(c
, X86_FEATURE_XTOPOLOGY
)) {
532 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
535 c
->x86_max_cores
= intel_num_cpu_cores(c
);
541 l2
= init_intel_cacheinfo(c
);
543 /* Detect legacy cache sizes if init_intel_cacheinfo did not */
545 cpu_detect_cache_sizes(c
);
546 l2
= c
->x86_cache_size
;
549 if (c
->cpuid_level
> 9) {
550 unsigned eax
= cpuid_eax(10);
551 /* Check for version and the number of counters */
552 if ((eax
& 0xff) && (((eax
>>8) & 0xff) > 1))
553 set_cpu_cap(c
, X86_FEATURE_ARCH_PERFMON
);
556 if (cpu_has(c
, X86_FEATURE_XMM2
))
557 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
559 if (boot_cpu_has(X86_FEATURE_DS
)) {
561 rdmsr(MSR_IA32_MISC_ENABLE
, l1
, l2
);
563 set_cpu_cap(c
, X86_FEATURE_BTS
);
565 set_cpu_cap(c
, X86_FEATURE_PEBS
);
568 if (c
->x86
== 6 && boot_cpu_has(X86_FEATURE_CLFLUSH
) &&
569 (c
->x86_model
== 29 || c
->x86_model
== 46 || c
->x86_model
== 47))
570 set_cpu_bug(c
, X86_BUG_CLFLUSH_MONITOR
);
572 if (c
->x86
== 6 && boot_cpu_has(X86_FEATURE_MWAIT
) &&
573 ((c
->x86_model
== INTEL_FAM6_ATOM_GOLDMONT
)))
574 set_cpu_bug(c
, X86_BUG_MONITOR
);
578 c
->x86_cache_alignment
= c
->x86_clflush_size
* 2;
580 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
583 * Names for the Pentium II/Celeron processors
584 * detectable only by also checking the cache size.
585 * Dixon is NOT a Celeron.
590 switch (c
->x86_model
) {
593 p
= "Celeron (Covington)";
595 p
= "Mobile Pentium II (Dixon)";
600 p
= "Celeron (Mendocino)";
601 else if (c
->x86_mask
== 0 || c
->x86_mask
== 5)
607 p
= "Celeron (Coppermine)";
612 strcpy(c
->x86_model_id
, p
);
616 set_cpu_cap(c
, X86_FEATURE_P4
);
618 set_cpu_cap(c
, X86_FEATURE_P3
);
621 /* Work around errata */
624 if (cpu_has(c
, X86_FEATURE_VMX
))
625 detect_vmx_virtcap(c
);
627 init_intel_energy_perf(c
);
629 init_intel_misc_features(c
);
633 static unsigned int intel_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
636 * Intel PIII Tualatin. This comes in two flavours.
637 * One has 256kb of cache, the other 512. We have no way
638 * to determine which, so we use a boottime override
639 * for the 512kb model, and assume 256 otherwise.
641 if ((c
->x86
== 6) && (c
->x86_model
== 11) && (size
== 0))
645 * Intel Quark SoC X1000 contains a 4-way set associative
646 * 16K cache with a 16 byte cache line and 256 lines per tag
648 if ((c
->x86
== 5) && (c
->x86_model
== 9))
654 #define TLB_INST_4K 0x01
655 #define TLB_INST_4M 0x02
656 #define TLB_INST_2M_4M 0x03
658 #define TLB_INST_ALL 0x05
659 #define TLB_INST_1G 0x06
661 #define TLB_DATA_4K 0x11
662 #define TLB_DATA_4M 0x12
663 #define TLB_DATA_2M_4M 0x13
664 #define TLB_DATA_4K_4M 0x14
666 #define TLB_DATA_1G 0x16
668 #define TLB_DATA0_4K 0x21
669 #define TLB_DATA0_4M 0x22
670 #define TLB_DATA0_2M_4M 0x23
673 #define STLB_4K_2M 0x42
675 static const struct _tlb_table intel_tlb_table
[] = {
676 { 0x01, TLB_INST_4K
, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
677 { 0x02, TLB_INST_4M
, 2, " TLB_INST 4 MByte pages, full associative" },
678 { 0x03, TLB_DATA_4K
, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
679 { 0x04, TLB_DATA_4M
, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
680 { 0x05, TLB_DATA_4M
, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
681 { 0x0b, TLB_INST_4M
, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
682 { 0x4f, TLB_INST_4K
, 32, " TLB_INST 4 KByte pages */" },
683 { 0x50, TLB_INST_ALL
, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
684 { 0x51, TLB_INST_ALL
, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
685 { 0x52, TLB_INST_ALL
, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
686 { 0x55, TLB_INST_2M_4M
, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
687 { 0x56, TLB_DATA0_4M
, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
688 { 0x57, TLB_DATA0_4K
, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
689 { 0x59, TLB_DATA0_4K
, 16, " TLB_DATA0 4 KByte pages, fully associative" },
690 { 0x5a, TLB_DATA0_2M_4M
, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
691 { 0x5b, TLB_DATA_4K_4M
, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
692 { 0x5c, TLB_DATA_4K_4M
, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
693 { 0x5d, TLB_DATA_4K_4M
, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
694 { 0x61, TLB_INST_4K
, 48, " TLB_INST 4 KByte pages, full associative" },
695 { 0x63, TLB_DATA_1G
, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
696 { 0x76, TLB_INST_2M_4M
, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
697 { 0xb0, TLB_INST_4K
, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
698 { 0xb1, TLB_INST_2M_4M
, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
699 { 0xb2, TLB_INST_4K
, 64, " TLB_INST 4KByte pages, 4-way set associative" },
700 { 0xb3, TLB_DATA_4K
, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
701 { 0xb4, TLB_DATA_4K
, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
702 { 0xb5, TLB_INST_4K
, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
703 { 0xb6, TLB_INST_4K
, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
704 { 0xba, TLB_DATA_4K
, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
705 { 0xc0, TLB_DATA_4K_4M
, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
706 { 0xc1, STLB_4K_2M
, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
707 { 0xc2, TLB_DATA_2M_4M
, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
708 { 0xca, STLB_4K
, 512, " STLB 4 KByte pages, 4-way associative" },
712 static void intel_tlb_lookup(const unsigned char desc
)
718 /* look up this descriptor in the table */
719 for (k
= 0; intel_tlb_table
[k
].descriptor
!= desc
&& \
720 intel_tlb_table
[k
].descriptor
!= 0; k
++)
723 if (intel_tlb_table
[k
].tlb_type
== 0)
726 switch (intel_tlb_table
[k
].tlb_type
) {
728 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
729 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
730 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
731 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
734 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
735 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
736 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
737 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
738 if (tlb_lli_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
739 tlb_lli_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
740 if (tlb_lld_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
741 tlb_lld_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
742 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
743 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
744 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
745 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
748 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
749 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
750 if (tlb_lli_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
751 tlb_lli_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
752 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
753 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
756 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
757 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
760 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
761 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
764 if (tlb_lli_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
765 tlb_lli_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
766 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
767 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
771 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
772 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
776 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
777 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
780 case TLB_DATA0_2M_4M
:
781 if (tlb_lld_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
782 tlb_lld_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
783 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
784 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
787 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
788 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
789 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
790 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
793 if (tlb_lld_1g
[ENTRIES
] < intel_tlb_table
[k
].entries
)
794 tlb_lld_1g
[ENTRIES
] = intel_tlb_table
[k
].entries
;
799 static void intel_detect_tlb(struct cpuinfo_x86
*c
)
802 unsigned int regs
[4];
803 unsigned char *desc
= (unsigned char *)regs
;
805 if (c
->cpuid_level
< 2)
808 /* Number of times to iterate */
809 n
= cpuid_eax(2) & 0xFF;
811 for (i
= 0 ; i
< n
; i
++) {
812 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
814 /* If bit 31 is set, this is an unknown format */
815 for (j
= 0 ; j
< 3 ; j
++)
816 if (regs
[j
] & (1 << 31))
819 /* Byte 0 is level count, not a descriptor */
820 for (j
= 1 ; j
< 16 ; j
++)
821 intel_tlb_lookup(desc
[j
]);
825 static const struct cpu_dev intel_cpu_dev
= {
827 .c_ident
= { "GenuineIntel" },
830 { .family
= 4, .model_names
=
832 [0] = "486 DX-25/33",
843 { .family
= 5, .model_names
=
845 [0] = "Pentium 60/66 A-step",
846 [1] = "Pentium 60/66",
847 [2] = "Pentium 75 - 200",
848 [3] = "OverDrive PODP5V83",
850 [7] = "Mobile Pentium 75 - 200",
851 [8] = "Mobile Pentium MMX",
852 [9] = "Quark SoC X1000",
855 { .family
= 6, .model_names
=
857 [0] = "Pentium Pro A-step",
859 [3] = "Pentium II (Klamath)",
860 [4] = "Pentium II (Deschutes)",
861 [5] = "Pentium II (Deschutes)",
862 [6] = "Mobile Pentium II",
863 [7] = "Pentium III (Katmai)",
864 [8] = "Pentium III (Coppermine)",
865 [10] = "Pentium III (Cascades)",
866 [11] = "Pentium III (Tualatin)",
869 { .family
= 15, .model_names
=
871 [0] = "Pentium 4 (Unknown)",
872 [1] = "Pentium 4 (Willamette)",
873 [2] = "Pentium 4 (Northwood)",
874 [4] = "Pentium 4 (Foster)",
875 [5] = "Pentium 4 (Foster)",
879 .legacy_cache_size
= intel_size_cache
,
881 .c_detect_tlb
= intel_detect_tlb
,
882 .c_early_init
= early_init_intel
,
883 .c_init
= init_intel
,
884 .c_bsp_resume
= intel_bsp_resume
,
885 .c_x86_vendor
= X86_VENDOR_INTEL
,
888 cpu_dev_register(intel_cpu_dev
);