1 #include <linux/init.h>
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/pgtable.h>
19 #include <linux/topology.h>
20 #include <asm/numa_64.h>
25 #ifdef CONFIG_X86_LOCAL_APIC
26 #include <asm/mpspec.h>
30 static void __cpuinit
early_init_intel(struct cpuinfo_x86
*c
)
32 /* Unmask CPUID levels if masked: */
33 if (c
->x86
> 6 || (c
->x86
== 6 && c
->x86_model
>= 0xd)) {
36 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
38 if (misc_enable
& MSR_IA32_MISC_ENABLE_LIMIT_CPUID
) {
39 misc_enable
&= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID
;
40 wrmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
41 c
->cpuid_level
= cpuid_eax(0);
46 if ((c
->x86
== 0xf && c
->x86_model
>= 0x03) ||
47 (c
->x86
== 0x6 && c
->x86_model
>= 0x0e))
48 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
51 * Atom erratum AAE44/AAF40/AAG38/AAH41:
53 * A race condition between speculative fetches and invalidating
54 * a large page. This is worked around in microcode, but we
55 * need the microcode to have already been loaded... so if it is
56 * not, recommend a BIOS update and disable large pages.
58 if (c
->x86
== 6 && c
->x86_model
== 0x1c && c
->x86_mask
<= 2) {
61 wrmsr(MSR_IA32_UCODE_REV
, 0, 0);
63 rdmsr(MSR_IA32_UCODE_REV
, junk
, ucode
);
66 printk(KERN_WARNING
"Atom PSE erratum detected, BIOS microcode update recommended\n");
67 clear_cpu_cap(c
, X86_FEATURE_PSE
);
72 set_cpu_cap(c
, X86_FEATURE_SYSENTER32
);
74 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
75 if (c
->x86
== 15 && c
->x86_cache_alignment
== 64)
76 c
->x86_cache_alignment
= 128;
79 /* CPUID workaround for 0F33/0F34 CPU */
80 if (c
->x86
== 0xF && c
->x86_model
== 0x3
81 && (c
->x86_mask
== 0x3 || c
->x86_mask
== 0x4))
82 c
->x86_phys_bits
= 36;
85 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
86 * with P/T states and does not stop in deep C-states.
88 * It is also reliable across cores and sockets. (but not across
89 * cabinets - we turn it off in that case explicitly.)
91 if (c
->x86_power
& (1 << 8)) {
92 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
93 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
94 if (!check_tsc_unstable())
95 sched_clock_stable
= 1;
99 * There is a known erratum on Pentium III and Core Solo
101 * " Page with PAT set to WC while associated MTRR is UC
102 * may consolidate to UC "
103 * Because of this erratum, it is better to stick with
104 * setting WC in MTRR rather than using PAT on these CPUs.
106 * Enable PAT WC only on P4, Core 2 or later CPUs.
108 if (c
->x86
== 6 && c
->x86_model
< 15)
109 clear_cpu_cap(c
, X86_FEATURE_PAT
);
111 #ifdef CONFIG_KMEMCHECK
113 * P4s have a "fast strings" feature which causes single-
114 * stepping REP instructions to only generate a #DB on
115 * cache-line boundaries.
117 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
118 * (model 2) with the same problem.
123 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
125 if (misc_enable
& MSR_IA32_MISC_ENABLE_FAST_STRING
) {
126 printk(KERN_INFO
"kmemcheck: Disabling fast string operations\n");
128 misc_enable
&= ~MSR_IA32_MISC_ENABLE_FAST_STRING
;
129 wrmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
137 * Early probe support logic for ppro memory erratum #50
139 * This is called before we do cpu ident work
142 int __cpuinit
ppro_with_ram_bug(void)
144 /* Uses data from early_cpu_detect now */
145 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
146 boot_cpu_data
.x86
== 6 &&
147 boot_cpu_data
.x86_model
== 1 &&
148 boot_cpu_data
.x86_mask
< 8) {
149 printk(KERN_INFO
"Pentium Pro with Errata#50 detected. Taking evasive action.\n");
155 #ifdef CONFIG_X86_F00F_BUG
156 static void __cpuinit
trap_init_f00f_bug(void)
158 __set_fixmap(FIX_F00F_IDT
, __pa(&idt_table
), PAGE_KERNEL_RO
);
161 * Update the IDT descriptor and reload the IDT so that
162 * it uses the read-only mapped virtual address.
164 idt_descr
.address
= fix_to_virt(FIX_F00F_IDT
);
165 load_idt(&idt_descr
);
169 static void __cpuinit
intel_smp_check(struct cpuinfo_x86
*c
)
172 /* calling is from identify_secondary_cpu() ? */
177 * Mask B, Pentium, but not Pentium MMX
180 c
->x86_mask
>= 1 && c
->x86_mask
<= 4 &&
183 * Remember we have B step Pentia with bugs
185 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
186 "with B stepping processors.\n");
191 static void __cpuinit
intel_workarounds(struct cpuinfo_x86
*c
)
193 unsigned long lo
, hi
;
195 #ifdef CONFIG_X86_F00F_BUG
197 * All current models of Pentium and Pentium with MMX technology CPUs
198 * have the F0 0F bug, which lets nonprivileged users lock up the
200 * Note that the workaround only should be initialized once...
203 if (!paravirt_enabled() && c
->x86
== 5) {
204 static int f00f_workaround_enabled
;
207 if (!f00f_workaround_enabled
) {
208 trap_init_f00f_bug();
209 printk(KERN_NOTICE
"Intel Pentium with F0 0F bug - workaround enabled.\n");
210 f00f_workaround_enabled
= 1;
216 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
219 if ((c
->x86
<<8 | c
->x86_model
<<4 | c
->x86_mask
) < 0x633)
220 clear_cpu_cap(c
, X86_FEATURE_SEP
);
223 * P4 Xeon errata 037 workaround.
224 * Hardware prefetcher may cause stale data to be loaded into the cache.
226 if ((c
->x86
== 15) && (c
->x86_model
== 1) && (c
->x86_mask
== 1)) {
227 rdmsr(MSR_IA32_MISC_ENABLE
, lo
, hi
);
228 if ((lo
& MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE
) == 0) {
229 printk (KERN_INFO
"CPU: C0 stepping P4 Xeon detected.\n");
230 printk (KERN_INFO
"CPU: Disabling hardware prefetching (Errata 037)\n");
231 lo
|= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE
;
232 wrmsr(MSR_IA32_MISC_ENABLE
, lo
, hi
);
237 * See if we have a good local APIC by checking for buggy Pentia,
238 * i.e. all B steppings and the C2 stepping of P54C when using their
239 * integrated APIC (see 11AP erratum in "Pentium Processor
240 * Specification Update").
242 if (cpu_has_apic
&& (c
->x86
<<8 | c
->x86_model
<<4) == 0x520 &&
243 (c
->x86_mask
< 0x6 || c
->x86_mask
== 0xb))
244 set_cpu_cap(c
, X86_FEATURE_11AP
);
247 #ifdef CONFIG_X86_INTEL_USERCOPY
249 * Set up the preferred alignment for movsl bulk memory moves
252 case 4: /* 486: untested */
254 case 5: /* Old Pentia: untested */
256 case 6: /* PII/PIII only like movsl with 8-byte alignment */
259 case 15: /* P4 is OK down to 8-byte alignment */
265 #ifdef CONFIG_X86_NUMAQ
272 static void __cpuinit
intel_workarounds(struct cpuinfo_x86
*c
)
277 static void __cpuinit
srat_detect_node(struct cpuinfo_x86
*c
)
281 int cpu
= smp_processor_id();
283 /* Don't do the funky fallback heuristics the AMD version employs
285 node
= numa_cpu_node(cpu
);
286 if (node
== NUMA_NO_NODE
|| !node_online(node
)) {
287 /* reuse the value from init_cpu_to_node() */
288 node
= cpu_to_node(cpu
);
290 numa_set_node(cpu
, node
);
295 * find out the number of processor cores on the die
297 static int __cpuinit
intel_num_cpu_cores(struct cpuinfo_x86
*c
)
299 unsigned int eax
, ebx
, ecx
, edx
;
301 if (c
->cpuid_level
< 4)
304 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
305 cpuid_count(4, 0, &eax
, &ebx
, &ecx
, &edx
);
307 return (eax
>> 26) + 1;
312 static void __cpuinit
detect_vmx_virtcap(struct cpuinfo_x86
*c
)
314 /* Intel VMX MSR indicated features */
315 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
316 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
317 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
318 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
319 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
320 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
322 u32 vmx_msr_low
, vmx_msr_high
, msr_ctl
, msr_ctl2
;
324 clear_cpu_cap(c
, X86_FEATURE_TPR_SHADOW
);
325 clear_cpu_cap(c
, X86_FEATURE_VNMI
);
326 clear_cpu_cap(c
, X86_FEATURE_FLEXPRIORITY
);
327 clear_cpu_cap(c
, X86_FEATURE_EPT
);
328 clear_cpu_cap(c
, X86_FEATURE_VPID
);
330 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
, vmx_msr_low
, vmx_msr_high
);
331 msr_ctl
= vmx_msr_high
| vmx_msr_low
;
332 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW
)
333 set_cpu_cap(c
, X86_FEATURE_TPR_SHADOW
);
334 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_VNMI
)
335 set_cpu_cap(c
, X86_FEATURE_VNMI
);
336 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS
) {
337 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
338 vmx_msr_low
, vmx_msr_high
);
339 msr_ctl2
= vmx_msr_high
| vmx_msr_low
;
340 if ((msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC
) &&
341 (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW
))
342 set_cpu_cap(c
, X86_FEATURE_FLEXPRIORITY
);
343 if (msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_EPT
)
344 set_cpu_cap(c
, X86_FEATURE_EPT
);
345 if (msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_VPID
)
346 set_cpu_cap(c
, X86_FEATURE_VPID
);
350 static void __cpuinit
init_intel(struct cpuinfo_x86
*c
)
356 intel_workarounds(c
);
359 * Detect the extended topology information if available. This
360 * will reinitialise the initial_apicid which will be used
361 * in init_intel_cacheinfo()
363 detect_extended_topology(c
);
365 l2
= init_intel_cacheinfo(c
);
366 if (c
->cpuid_level
> 9) {
367 unsigned eax
= cpuid_eax(10);
368 /* Check for version and the number of counters */
369 if ((eax
& 0xff) && (((eax
>>8) & 0xff) > 1))
370 set_cpu_cap(c
, X86_FEATURE_ARCH_PERFMON
);
374 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
377 rdmsr(MSR_IA32_MISC_ENABLE
, l1
, l2
);
379 set_cpu_cap(c
, X86_FEATURE_BTS
);
381 set_cpu_cap(c
, X86_FEATURE_PEBS
);
384 if (c
->x86
== 6 && c
->x86_model
== 29 && cpu_has_clflush
)
385 set_cpu_cap(c
, X86_FEATURE_CLFLUSH_MONITOR
);
389 c
->x86_cache_alignment
= c
->x86_clflush_size
* 2;
391 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
394 * Names for the Pentium II/Celeron processors
395 * detectable only by also checking the cache size.
396 * Dixon is NOT a Celeron.
401 switch (c
->x86_model
) {
403 if (c
->x86_mask
== 0) {
405 p
= "Celeron (Covington)";
407 p
= "Mobile Pentium II (Dixon)";
413 p
= "Celeron (Mendocino)";
414 else if (c
->x86_mask
== 0 || c
->x86_mask
== 5)
420 p
= "Celeron (Coppermine)";
425 strcpy(c
->x86_model_id
, p
);
429 set_cpu_cap(c
, X86_FEATURE_P4
);
431 set_cpu_cap(c
, X86_FEATURE_P3
);
434 if (!cpu_has(c
, X86_FEATURE_XTOPOLOGY
)) {
436 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
439 c
->x86_max_cores
= intel_num_cpu_cores(c
);
445 /* Work around errata */
448 if (cpu_has(c
, X86_FEATURE_VMX
))
449 detect_vmx_virtcap(c
);
453 static unsigned int __cpuinit
intel_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
456 * Intel PIII Tualatin. This comes in two flavours.
457 * One has 256kb of cache, the other 512. We have no way
458 * to determine which, so we use a boottime override
459 * for the 512kb model, and assume 256 otherwise.
461 if ((c
->x86
== 6) && (c
->x86_model
== 11) && (size
== 0))
467 static const struct cpu_dev __cpuinitconst intel_cpu_dev
= {
469 .c_ident
= { "GenuineIntel" },
472 { .vendor
= X86_VENDOR_INTEL
, .family
= 4, .model_names
=
474 [0] = "486 DX-25/33",
485 { .vendor
= X86_VENDOR_INTEL
, .family
= 5, .model_names
=
487 [0] = "Pentium 60/66 A-step",
488 [1] = "Pentium 60/66",
489 [2] = "Pentium 75 - 200",
490 [3] = "OverDrive PODP5V83",
492 [7] = "Mobile Pentium 75 - 200",
493 [8] = "Mobile Pentium MMX"
496 { .vendor
= X86_VENDOR_INTEL
, .family
= 6, .model_names
=
498 [0] = "Pentium Pro A-step",
500 [3] = "Pentium II (Klamath)",
501 [4] = "Pentium II (Deschutes)",
502 [5] = "Pentium II (Deschutes)",
503 [6] = "Mobile Pentium II",
504 [7] = "Pentium III (Katmai)",
505 [8] = "Pentium III (Coppermine)",
506 [10] = "Pentium III (Cascades)",
507 [11] = "Pentium III (Tualatin)",
510 { .vendor
= X86_VENDOR_INTEL
, .family
= 15, .model_names
=
512 [0] = "Pentium 4 (Unknown)",
513 [1] = "Pentium 4 (Willamette)",
514 [2] = "Pentium 4 (Northwood)",
515 [4] = "Pentium 4 (Foster)",
516 [5] = "Pentium 4 (Foster)",
520 .c_size_cache
= intel_size_cache
,
522 .c_early_init
= early_init_intel
,
523 .c_init
= init_intel
,
524 .c_x86_vendor
= X86_VENDOR_INTEL
,
527 cpu_dev_register(intel_cpu_dev
);