1 #include <linux/init.h>
2 #include <linux/bitops.h>
6 #include <asm/processor.h>
9 #include <asm/pci-direct.h>
12 # include <asm/numa_64.h>
13 # include <asm/mmconfig.h>
14 # include <asm/cacheflush.h>
21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
22 * misexecution of code under Linux. Owners of such processors should
23 * contact AMD for precise details and a CPU swap.
25 * See http://www.multimania.com/poulot/k6bug.html
26 * http://www.amd.com/K6/k6docs/revgd.html
28 * The following test is erm.. interesting. AMD neglected to up
29 * the chip setting when fixing the bug but they also tweaked some
30 * performance at the same time..
33 extern void vide(void);
34 __asm__(".align 4\nvide: ret");
36 static void __cpuinit
init_amd_k5(struct cpuinfo_x86
*c
)
39 * General Systems BIOSen alias the cpu frequency registers
40 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
41 * drivers subsequently pokes it, and changes the CPU speed.
42 * Workaround : Remove the unneeded alias.
44 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
45 #define CBAR_ENB (0x80000000)
46 #define CBAR_KEY (0X000000CB)
47 if (c
->x86_model
== 9 || c
->x86_model
== 10) {
48 if (inl (CBAR
) & CBAR_ENB
)
49 outl (0 | CBAR_KEY
, CBAR
);
54 static void __cpuinit
init_amd_k6(struct cpuinfo_x86
*c
)
57 int mbytes
= num_physpages
>> (20-PAGE_SHIFT
);
59 if (c
->x86_model
< 6) {
60 /* Based on AMD doc 20734R - June 2000 */
61 if (c
->x86_model
== 0) {
62 clear_cpu_cap(c
, X86_FEATURE_APIC
);
63 set_cpu_cap(c
, X86_FEATURE_PGE
);
68 if (c
->x86_model
== 6 && c
->x86_mask
== 1) {
69 const int K6_BUG_LOOP
= 1000000;
74 printk(KERN_INFO
"AMD K6 stepping B detected - ");
77 * It looks like AMD fixed the 2.6.2 bug and improved indirect
78 * calls at the same time.
89 if (d
> 20*K6_BUG_LOOP
)
90 printk("system stability may be impaired when more than 32 MB are used.\n");
92 printk("probably OK (after B9730xxxx).\n");
93 printk(KERN_INFO
"Please see http://membres.lycos.fr/poulot/k6bug.html\n");
96 /* K6 with old style WHCR */
97 if (c
->x86_model
< 8 ||
98 (c
->x86_model
== 8 && c
->x86_mask
< 8)) {
99 /* We can only write allocate on the low 508Mb */
103 rdmsr(MSR_K6_WHCR
, l
, h
);
104 if ((l
&0x0000FFFF) == 0) {
106 l
= (1<<0)|((mbytes
/4)<<1);
107 local_irq_save(flags
);
109 wrmsr(MSR_K6_WHCR
, l
, h
);
110 local_irq_restore(flags
);
111 printk(KERN_INFO
"Enabling old style K6 write allocation for %d Mb\n",
117 if ((c
->x86_model
== 8 && c
->x86_mask
> 7) ||
118 c
->x86_model
== 9 || c
->x86_model
== 13) {
119 /* The more serious chips .. */
124 rdmsr(MSR_K6_WHCR
, l
, h
);
125 if ((l
&0xFFFF0000) == 0) {
127 l
= ((mbytes
>>2)<<22)|(1<<16);
128 local_irq_save(flags
);
130 wrmsr(MSR_K6_WHCR
, l
, h
);
131 local_irq_restore(flags
);
132 printk(KERN_INFO
"Enabling new style K6 write allocation for %d Mb\n",
139 if (c
->x86_model
== 10) {
140 /* AMD Geode LX is model 10 */
141 /* placeholder for any needed mods */
146 static void __cpuinit
amd_k7_smp_check(struct cpuinfo_x86
*c
)
149 /* calling is from identify_secondary_cpu() ? */
150 if (c
->cpu_index
== boot_cpu_id
)
154 * Certain Athlons might work (for various values of 'work') in SMP
155 * but they are not certified as MP capable.
157 /* Athlon 660/661 is valid. */
158 if ((c
->x86_model
== 6) && ((c
->x86_mask
== 0) ||
162 /* Duron 670 is valid */
163 if ((c
->x86_model
== 7) && (c
->x86_mask
== 0))
167 * Athlon 662, Duron 671, and Athlon >model 7 have capability
168 * bit. It's worth noting that the A5 stepping (662) of some
169 * Athlon XP's have the MP bit set.
170 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
173 if (((c
->x86_model
== 6) && (c
->x86_mask
>= 2)) ||
174 ((c
->x86_model
== 7) && (c
->x86_mask
>= 1)) ||
179 /* If we get here, not a certified SMP capable AMD system. */
182 * Don't taint if we are running SMP kernel on a single non-MP
185 WARN_ONCE(1, "WARNING: This combination of AMD"
186 "processors is not suitable for SMP.\n");
187 if (!test_taint(TAINT_UNSAFE_SMP
))
188 add_taint(TAINT_UNSAFE_SMP
);
195 static void __cpuinit
init_amd_k7(struct cpuinfo_x86
*c
)
200 * Bit 15 of Athlon specific MSR 15, needs to be 0
201 * to enable SSE on Palomino/Morgan/Barton CPU's.
202 * If the BIOS didn't enable it already, enable it here.
204 if (c
->x86_model
>= 6 && c
->x86_model
<= 10) {
205 if (!cpu_has(c
, X86_FEATURE_XMM
)) {
206 printk(KERN_INFO
"Enabling disabled K7/SSE Support.\n");
207 rdmsr(MSR_K7_HWCR
, l
, h
);
209 wrmsr(MSR_K7_HWCR
, l
, h
);
210 set_cpu_cap(c
, X86_FEATURE_XMM
);
215 * It's been determined by AMD that Athlons since model 8 stepping 1
216 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
217 * As per AMD technical note 27212 0.2
219 if ((c
->x86_model
== 8 && c
->x86_mask
>= 1) || (c
->x86_model
> 8)) {
220 rdmsr(MSR_K7_CLK_CTL
, l
, h
);
221 if ((l
& 0xfff00000) != 0x20000000) {
222 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l
,
223 ((l
& 0x000fffff)|0x20000000));
224 wrmsr(MSR_K7_CLK_CTL
, (l
& 0x000fffff)|0x20000000, h
);
228 set_cpu_cap(c
, X86_FEATURE_K7
);
234 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
235 static int __cpuinit
nearby_node(int apicid
)
239 for (i
= apicid
- 1; i
>= 0; i
--) {
240 node
= apicid_to_node
[i
];
241 if (node
!= NUMA_NO_NODE
&& node_online(node
))
244 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
245 node
= apicid_to_node
[i
];
246 if (node
!= NUMA_NO_NODE
&& node_online(node
))
249 return first_node(node_online_map
); /* Shouldn't happen */
254 * Fixup core topology information for AMD multi-node processors.
255 * Assumption 1: Number of cores in each internal node is the same.
256 * Assumption 2: Mixed systems with both single-node and dual-node
257 * processors are not supported.
260 static void __cpuinit
amd_fixup_dcm(struct cpuinfo_x86
*c
)
265 int cpu
= smp_processor_id();
267 /* fixup topology information only once for a core */
268 if (cpu_has(c
, X86_FEATURE_AMD_DCM
))
271 /* check for multi-node processor on boot cpu */
272 t
= read_pci_config(0, 24, 3, 0xe8);
273 if (!(t
& (1 << 29)))
276 set_cpu_cap(c
, X86_FEATURE_AMD_DCM
);
278 /* cores per node: each internal node has half the number of cores */
279 cpn
= c
->x86_max_cores
>> 1;
281 /* even-numbered NB_id of this dual-node processor */
282 n
= c
->phys_proc_id
<< 1;
285 * determine internal node id and assign cores fifty-fifty to
286 * each node of the dual-node processor
288 t
= read_pci_config(0, 24 + n
, 3, 0xe8);
291 if (c
->cpu_core_id
< cpn
)
296 if (c
->cpu_core_id
< cpn
)
302 /* compute entire NodeID, use llc_shared_map to store sibling info */
303 per_cpu(cpu_llc_id
, cpu
) = (c
->phys_proc_id
<< 1) + n_id
;
305 /* fixup core id to be in range from 0 to cpn */
306 c
->cpu_core_id
= c
->cpu_core_id
% cpn
;
312 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
313 * Assumes number of cores is a power of two.
315 static void __cpuinit
amd_detect_cmp(struct cpuinfo_x86
*c
)
319 int cpu
= smp_processor_id();
321 bits
= c
->x86_coreid_bits
;
322 /* Low order bits define the core id (index of core in socket) */
323 c
->cpu_core_id
= c
->initial_apicid
& ((1 << bits
)-1);
324 /* Convert the initial APIC ID into the socket ID */
325 c
->phys_proc_id
= c
->initial_apicid
>> bits
;
326 /* use socket ID also for last level cache */
327 per_cpu(cpu_llc_id
, cpu
) = c
->phys_proc_id
;
328 /* fixup topology information on multi-node processors */
329 if ((c
->x86
== 0x10) && (c
->x86_model
== 9))
334 static void __cpuinit
srat_detect_node(struct cpuinfo_x86
*c
)
336 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
337 int cpu
= smp_processor_id();
339 unsigned apicid
= cpu_has_apic
? hard_smp_processor_id() : c
->apicid
;
341 node
= per_cpu(cpu_llc_id
, cpu
);
343 if (apicid_to_node
[apicid
] != NUMA_NO_NODE
)
344 node
= apicid_to_node
[apicid
];
345 if (!node_online(node
)) {
346 /* Two possibilities here:
347 - The CPU is missing memory and no node was created.
348 In that case try picking one from a nearby CPU
349 - The APIC IDs differ from the HyperTransport node IDs
350 which the K8 northbridge parsing fills in.
351 Assume they are all increased by a constant offset,
352 but in the same order as the HT nodeids.
353 If that doesn't result in a usable node fall back to the
354 path for the previous case. */
356 int ht_nodeid
= c
->initial_apicid
;
358 if (ht_nodeid
>= 0 &&
359 apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
360 node
= apicid_to_node
[ht_nodeid
];
361 /* Pick a nearby node */
362 if (!node_online(node
))
363 node
= nearby_node(apicid
);
365 numa_set_node(cpu
, node
);
367 printk(KERN_INFO
"CPU %d/0x%x -> Node %d\n", cpu
, apicid
, node
);
371 static void __cpuinit
early_init_amd_mc(struct cpuinfo_x86
*c
)
376 /* Multi core CPU? */
377 if (c
->extended_cpuid_level
< 0x80000008)
380 ecx
= cpuid_ecx(0x80000008);
382 c
->x86_max_cores
= (ecx
& 0xff) + 1;
384 /* CPU telling us the core id bits shift? */
385 bits
= (ecx
>> 12) & 0xF;
387 /* Otherwise recompute */
389 while ((1 << bits
) < c
->x86_max_cores
)
393 c
->x86_coreid_bits
= bits
;
397 static void __cpuinit
early_init_amd(struct cpuinfo_x86
*c
)
399 early_init_amd_mc(c
);
402 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
403 * with P/T states and does not stop in deep C-states
405 if (c
->x86_power
& (1 << 8)) {
406 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
407 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
411 set_cpu_cap(c
, X86_FEATURE_SYSCALL32
);
413 /* Set MTRR capability flag if appropriate */
415 if (c
->x86_model
== 13 || c
->x86_model
== 9 ||
416 (c
->x86_model
== 8 && c
->x86_mask
>= 8))
417 set_cpu_cap(c
, X86_FEATURE_K6_MTRR
);
419 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
420 /* check CPU config space for extended APIC ID */
421 if (cpu_has_apic
&& c
->x86
>= 0xf) {
423 val
= read_pci_config(0, 24, 0, 0x68);
424 if ((val
& ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
425 set_cpu_cap(c
, X86_FEATURE_EXTD_APICID
);
430 static void __cpuinit
init_amd(struct cpuinfo_x86
*c
)
433 unsigned long long value
;
436 * Disable TLB flush filter by setting HWCR.FFDIS on K8
437 * bit 6 of msr C001_0015
439 * Errata 63 for SH-B3 steppings
440 * Errata 122 for all steppings (F+ have it disabled by default)
443 rdmsrl(MSR_K7_HWCR
, value
);
445 wrmsrl(MSR_K7_HWCR
, value
);
452 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
453 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
455 clear_cpu_cap(c
, 0*32+31);
458 /* On C+ stepping K8 rep microcode works well for copy/memset */
462 level
= cpuid_eax(1);
463 if((level
>= 0x0f48 && level
< 0x0f50) || level
>= 0x0f58)
464 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
467 * Some BIOSes incorrectly force this feature, but only K8
468 * revision D (model = 0x14) and later actually support it.
469 * (AMD Erratum #110, docId: 25759).
471 if (c
->x86_model
< 0x14 && cpu_has(c
, X86_FEATURE_LAHF_LM
)) {
474 clear_cpu_cap(c
, X86_FEATURE_LAHF_LM
);
475 if (!rdmsrl_amd_safe(0xc001100d, &val
)) {
476 val
&= ~(1ULL << 32);
477 wrmsrl_amd_safe(0xc001100d, val
);
482 if (c
->x86
== 0x10 || c
->x86
== 0x11)
483 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
487 * FIXME: We should handle the K5 here. Set up the write
488 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
499 case 6: /* An Athlon/Duron */
504 /* K6s reports MCEs but don't actually have all the MSRs */
506 clear_cpu_cap(c
, X86_FEATURE_MCE
);
509 /* Enable workaround for FXSAVE leak */
511 set_cpu_cap(c
, X86_FEATURE_FXSAVE_LEAK
);
513 if (!c
->x86_model_id
[0]) {
516 /* Should distinguish Models here, but this is only
517 a fallback anyways. */
518 strcpy(c
->x86_model_id
, "Hammer");
523 display_cacheinfo(c
);
525 /* Multi core CPU? */
526 if (c
->extended_cpuid_level
>= 0x80000008) {
535 if (c
->extended_cpuid_level
>= 0x80000006) {
536 if ((c
->x86
>= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
537 num_cache_leaves
= 4;
539 num_cache_leaves
= 3;
542 if (c
->x86
>= 0xf && c
->x86
<= 0x11)
543 set_cpu_cap(c
, X86_FEATURE_K8
);
546 /* MFENCE stops RDTSC speculation */
547 set_cpu_cap(c
, X86_FEATURE_MFENCE_RDTSC
);
551 if (c
->x86
== 0x10) {
552 /* do this for boot cpu */
553 if (c
== &boot_cpu_data
)
554 check_enable_amd_mmconf_dmi();
556 fam10h_check_enable_mmcfg();
559 if (c
== &boot_cpu_data
&& c
->x86
>= 0xf && c
->x86
<= 0x11) {
560 unsigned long long tseg
;
563 * Split up direct mapping around the TSEG SMM area.
564 * Don't do it for gbpages because there seems very little
565 * benefit in doing so.
567 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR
, &tseg
)) {
568 printk(KERN_DEBUG
"tseg: %010llx\n", tseg
);
569 if ((tseg
>>PMD_SHIFT
) <
570 (max_low_pfn_mapped
>>(PMD_SHIFT
-PAGE_SHIFT
)) ||
572 (max_pfn_mapped
>>(PMD_SHIFT
-PAGE_SHIFT
)) &&
573 (tseg
>>PMD_SHIFT
) >= (1ULL<<(32 - PMD_SHIFT
))))
574 set_memory_4k((unsigned long)__va(tseg
), 1);
581 static unsigned int __cpuinit
amd_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
583 /* AMD errata T13 (order #21922) */
585 if (c
->x86_model
== 3 && c
->x86_mask
== 0) /* Duron Rev A0 */
587 if (c
->x86_model
== 4 &&
588 (c
->x86_mask
== 0 || c
->x86_mask
== 1)) /* Tbird rev A1/A2 */
595 static const struct cpu_dev __cpuinitconst amd_cpu_dev
= {
597 .c_ident
= { "AuthenticAMD" },
600 { .vendor
= X86_VENDOR_AMD
, .family
= 4, .model_names
=
611 .c_size_cache
= amd_size_cache
,
613 .c_early_init
= early_init_amd
,
615 .c_x86_vendor
= X86_VENDOR_AMD
,
618 cpu_dev_register(amd_cpu_dev
);