]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/export.h> | |
2 | #include <linux/init.h> | |
3 | #include <linux/bitops.h> | |
4 | #include <linux/elf.h> | |
5 | #include <linux/mm.h> | |
6 | ||
7 | #include <linux/io.h> | |
8 | #include <linux/sched.h> | |
9 | #include <asm/processor.h> | |
10 | #include <asm/apic.h> | |
11 | #include <asm/cpu.h> | |
12 | #include <asm/pci-direct.h> | |
13 | ||
14 | #ifdef CONFIG_X86_64 | |
15 | # include <asm/mmconfig.h> | |
16 | # include <asm/cacheflush.h> | |
17 | #endif | |
18 | ||
19 | #include "cpu.h" | |
20 | ||
21 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | |
22 | { | |
23 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | |
24 | u32 gprs[8] = { 0 }; | |
25 | int err; | |
26 | ||
27 | WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); | |
28 | ||
29 | gprs[1] = msr; | |
30 | gprs[7] = 0x9c5a203a; | |
31 | ||
32 | err = rdmsr_safe_regs(gprs); | |
33 | ||
34 | *p = gprs[0] | ((u64)gprs[2] << 32); | |
35 | ||
36 | return err; | |
37 | } | |
38 | ||
39 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | |
40 | { | |
41 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | |
42 | u32 gprs[8] = { 0 }; | |
43 | ||
44 | WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); | |
45 | ||
46 | gprs[0] = (u32)val; | |
47 | gprs[1] = msr; | |
48 | gprs[2] = val >> 32; | |
49 | gprs[7] = 0x9c5a203a; | |
50 | ||
51 | return wrmsr_safe_regs(gprs); | |
52 | } | |
53 | ||
54 | #ifdef CONFIG_X86_32 | |
55 | /* | |
56 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | |
57 | * misexecution of code under Linux. Owners of such processors should | |
58 | * contact AMD for precise details and a CPU swap. | |
59 | * | |
60 | * See http://www.multimania.com/poulot/k6bug.html | |
61 | * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" | |
62 | * (Publication # 21266 Issue Date: August 1998) | |
63 | * | |
64 | * The following test is erm.. interesting. AMD neglected to up | |
65 | * the chip setting when fixing the bug but they also tweaked some | |
66 | * performance at the same time.. | |
67 | */ | |
68 | ||
69 | extern void vide(void); | |
70 | __asm__(".align 4\nvide: ret"); | |
71 | ||
72 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |
73 | { | |
74 | /* | |
75 | * General Systems BIOSen alias the cpu frequency registers | |
76 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | |
77 | * drivers subsequently pokes it, and changes the CPU speed. | |
78 | * Workaround : Remove the unneeded alias. | |
79 | */ | |
80 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | |
81 | #define CBAR_ENB (0x80000000) | |
82 | #define CBAR_KEY (0X000000CB) | |
83 | if (c->x86_model == 9 || c->x86_model == 10) { | |
84 | if (inl(CBAR) & CBAR_ENB) | |
85 | outl(0 | CBAR_KEY, CBAR); | |
86 | } | |
87 | } | |
88 | ||
89 | ||
90 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |
91 | { | |
92 | u32 l, h; | |
93 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | |
94 | ||
95 | if (c->x86_model < 6) { | |
96 | /* Based on AMD doc 20734R - June 2000 */ | |
97 | if (c->x86_model == 0) { | |
98 | clear_cpu_cap(c, X86_FEATURE_APIC); | |
99 | set_cpu_cap(c, X86_FEATURE_PGE); | |
100 | } | |
101 | return; | |
102 | } | |
103 | ||
104 | if (c->x86_model == 6 && c->x86_mask == 1) { | |
105 | const int K6_BUG_LOOP = 1000000; | |
106 | int n; | |
107 | void (*f_vide)(void); | |
108 | unsigned long d, d2; | |
109 | ||
110 | printk(KERN_INFO "AMD K6 stepping B detected - "); | |
111 | ||
112 | /* | |
113 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | |
114 | * calls at the same time. | |
115 | */ | |
116 | ||
117 | n = K6_BUG_LOOP; | |
118 | f_vide = vide; | |
119 | rdtscl(d); | |
120 | while (n--) | |
121 | f_vide(); | |
122 | rdtscl(d2); | |
123 | d = d2-d; | |
124 | ||
125 | if (d > 20*K6_BUG_LOOP) | |
126 | printk(KERN_CONT | |
127 | "system stability may be impaired when more than 32 MB are used.\n"); | |
128 | else | |
129 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); | |
130 | } | |
131 | ||
132 | /* K6 with old style WHCR */ | |
133 | if (c->x86_model < 8 || | |
134 | (c->x86_model == 8 && c->x86_mask < 8)) { | |
135 | /* We can only write allocate on the low 508Mb */ | |
136 | if (mbytes > 508) | |
137 | mbytes = 508; | |
138 | ||
139 | rdmsr(MSR_K6_WHCR, l, h); | |
140 | if ((l&0x0000FFFF) == 0) { | |
141 | unsigned long flags; | |
142 | l = (1<<0)|((mbytes/4)<<1); | |
143 | local_irq_save(flags); | |
144 | wbinvd(); | |
145 | wrmsr(MSR_K6_WHCR, l, h); | |
146 | local_irq_restore(flags); | |
147 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | |
148 | mbytes); | |
149 | } | |
150 | return; | |
151 | } | |
152 | ||
153 | if ((c->x86_model == 8 && c->x86_mask > 7) || | |
154 | c->x86_model == 9 || c->x86_model == 13) { | |
155 | /* The more serious chips .. */ | |
156 | ||
157 | if (mbytes > 4092) | |
158 | mbytes = 4092; | |
159 | ||
160 | rdmsr(MSR_K6_WHCR, l, h); | |
161 | if ((l&0xFFFF0000) == 0) { | |
162 | unsigned long flags; | |
163 | l = ((mbytes>>2)<<22)|(1<<16); | |
164 | local_irq_save(flags); | |
165 | wbinvd(); | |
166 | wrmsr(MSR_K6_WHCR, l, h); | |
167 | local_irq_restore(flags); | |
168 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | |
169 | mbytes); | |
170 | } | |
171 | ||
172 | return; | |
173 | } | |
174 | ||
175 | if (c->x86_model == 10) { | |
176 | /* AMD Geode LX is model 10 */ | |
177 | /* placeholder for any needed mods */ | |
178 | return; | |
179 | } | |
180 | } | |
181 | ||
182 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | |
183 | { | |
184 | /* calling is from identify_secondary_cpu() ? */ | |
185 | if (!c->cpu_index) | |
186 | return; | |
187 | ||
188 | /* | |
189 | * Certain Athlons might work (for various values of 'work') in SMP | |
190 | * but they are not certified as MP capable. | |
191 | */ | |
192 | /* Athlon 660/661 is valid. */ | |
193 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
194 | (c->x86_mask == 1))) | |
195 | goto valid_k7; | |
196 | ||
197 | /* Duron 670 is valid */ | |
198 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
199 | goto valid_k7; | |
200 | ||
201 | /* | |
202 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
203 | * bit. It's worth noting that the A5 stepping (662) of some | |
204 | * Athlon XP's have the MP bit set. | |
205 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
206 | * more. | |
207 | */ | |
208 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
209 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
210 | (c->x86_model > 7)) | |
211 | if (cpu_has_mp) | |
212 | goto valid_k7; | |
213 | ||
214 | /* If we get here, not a certified SMP capable AMD system. */ | |
215 | ||
216 | /* | |
217 | * Don't taint if we are running SMP kernel on a single non-MP | |
218 | * approved Athlon | |
219 | */ | |
220 | WARN_ONCE(1, "WARNING: This combination of AMD" | |
221 | " processors is not suitable for SMP.\n"); | |
222 | add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); | |
223 | ||
224 | valid_k7: | |
225 | ; | |
226 | } | |
227 | ||
228 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |
229 | { | |
230 | u32 l, h; | |
231 | ||
232 | /* | |
233 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | |
234 | * to enable SSE on Palomino/Morgan/Barton CPU's. | |
235 | * If the BIOS didn't enable it already, enable it here. | |
236 | */ | |
237 | if (c->x86_model >= 6 && c->x86_model <= 10) { | |
238 | if (!cpu_has(c, X86_FEATURE_XMM)) { | |
239 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | |
240 | rdmsr(MSR_K7_HWCR, l, h); | |
241 | l &= ~0x00008000; | |
242 | wrmsr(MSR_K7_HWCR, l, h); | |
243 | set_cpu_cap(c, X86_FEATURE_XMM); | |
244 | } | |
245 | } | |
246 | ||
247 | /* | |
248 | * It's been determined by AMD that Athlons since model 8 stepping 1 | |
249 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | |
250 | * As per AMD technical note 27212 0.2 | |
251 | */ | |
252 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | |
253 | rdmsr(MSR_K7_CLK_CTL, l, h); | |
254 | if ((l & 0xfff00000) != 0x20000000) { | |
255 | printk(KERN_INFO | |
256 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | |
257 | l, ((l & 0x000fffff)|0x20000000)); | |
258 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | |
259 | } | |
260 | } | |
261 | ||
262 | set_cpu_cap(c, X86_FEATURE_K7); | |
263 | ||
264 | amd_k7_smp_check(c); | |
265 | } | |
266 | #endif | |
267 | ||
268 | #ifdef CONFIG_NUMA | |
269 | /* | |
270 | * To workaround broken NUMA config. Read the comment in | |
271 | * srat_detect_node(). | |
272 | */ | |
273 | static int __cpuinit nearby_node(int apicid) | |
274 | { | |
275 | int i, node; | |
276 | ||
277 | for (i = apicid - 1; i >= 0; i--) { | |
278 | node = __apicid_to_node[i]; | |
279 | if (node != NUMA_NO_NODE && node_online(node)) | |
280 | return node; | |
281 | } | |
282 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | |
283 | node = __apicid_to_node[i]; | |
284 | if (node != NUMA_NO_NODE && node_online(node)) | |
285 | return node; | |
286 | } | |
287 | return first_node(node_online_map); /* Shouldn't happen */ | |
288 | } | |
289 | #endif | |
290 | ||
291 | /* | |
292 | * Fixup core topology information for | |
293 | * (1) AMD multi-node processors | |
294 | * Assumption: Number of cores in each internal node is the same. | |
295 | * (2) AMD processors supporting compute units | |
296 | */ | |
297 | #ifdef CONFIG_X86_HT | |
298 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |
299 | { | |
300 | u32 nodes, cores_per_cu = 1; | |
301 | u8 node_id; | |
302 | int cpu = smp_processor_id(); | |
303 | ||
304 | /* get information required for multi-node processors */ | |
305 | if (cpu_has_topoext) { | |
306 | u32 eax, ebx, ecx, edx; | |
307 | ||
308 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); | |
309 | nodes = ((ecx >> 8) & 7) + 1; | |
310 | node_id = ecx & 7; | |
311 | ||
312 | /* get compute unit information */ | |
313 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | |
314 | c->compute_unit_id = ebx & 0xff; | |
315 | cores_per_cu += ((ebx >> 8) & 3); | |
316 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | |
317 | u64 value; | |
318 | ||
319 | rdmsrl(MSR_FAM10H_NODE_ID, value); | |
320 | nodes = ((value >> 3) & 7) + 1; | |
321 | node_id = value & 7; | |
322 | } else | |
323 | return; | |
324 | ||
325 | /* fixup multi-node processor information */ | |
326 | if (nodes > 1) { | |
327 | u32 cores_per_node; | |
328 | u32 cus_per_node; | |
329 | ||
330 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | |
331 | cores_per_node = c->x86_max_cores / nodes; | |
332 | cus_per_node = cores_per_node / cores_per_cu; | |
333 | ||
334 | /* store NodeID, use llc_shared_map to store sibling info */ | |
335 | per_cpu(cpu_llc_id, cpu) = node_id; | |
336 | ||
337 | /* core id has to be in the [0 .. cores_per_node - 1] range */ | |
338 | c->cpu_core_id %= cores_per_node; | |
339 | c->compute_unit_id %= cus_per_node; | |
340 | } | |
341 | } | |
342 | #endif | |
343 | ||
344 | /* | |
345 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | |
346 | * Assumes number of cores is a power of two. | |
347 | */ | |
348 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |
349 | { | |
350 | #ifdef CONFIG_X86_HT | |
351 | unsigned bits; | |
352 | int cpu = smp_processor_id(); | |
353 | ||
354 | bits = c->x86_coreid_bits; | |
355 | /* Low order bits define the core id (index of core in socket) */ | |
356 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | |
357 | /* Convert the initial APIC ID into the socket ID */ | |
358 | c->phys_proc_id = c->initial_apicid >> bits; | |
359 | /* use socket ID also for last level cache */ | |
360 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | |
361 | amd_get_topology(c); | |
362 | #endif | |
363 | } | |
364 | ||
365 | u16 amd_get_nb_id(int cpu) | |
366 | { | |
367 | u16 id = 0; | |
368 | #ifdef CONFIG_SMP | |
369 | id = per_cpu(cpu_llc_id, cpu); | |
370 | #endif | |
371 | return id; | |
372 | } | |
373 | EXPORT_SYMBOL_GPL(amd_get_nb_id); | |
374 | ||
375 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |
376 | { | |
377 | #ifdef CONFIG_NUMA | |
378 | int cpu = smp_processor_id(); | |
379 | int node; | |
380 | unsigned apicid = c->apicid; | |
381 | ||
382 | node = numa_cpu_node(cpu); | |
383 | if (node == NUMA_NO_NODE) | |
384 | node = per_cpu(cpu_llc_id, cpu); | |
385 | ||
386 | /* | |
387 | * On multi-fabric platform (e.g. Numascale NumaChip) a | |
388 | * platform-specific handler needs to be called to fixup some | |
389 | * IDs of the CPU. | |
390 | */ | |
391 | if (x86_cpuinit.fixup_cpu_id) | |
392 | x86_cpuinit.fixup_cpu_id(c, node); | |
393 | ||
394 | if (!node_online(node)) { | |
395 | /* | |
396 | * Two possibilities here: | |
397 | * | |
398 | * - The CPU is missing memory and no node was created. In | |
399 | * that case try picking one from a nearby CPU. | |
400 | * | |
401 | * - The APIC IDs differ from the HyperTransport node IDs | |
402 | * which the K8 northbridge parsing fills in. Assume | |
403 | * they are all increased by a constant offset, but in | |
404 | * the same order as the HT nodeids. If that doesn't | |
405 | * result in a usable node fall back to the path for the | |
406 | * previous case. | |
407 | * | |
408 | * This workaround operates directly on the mapping between | |
409 | * APIC ID and NUMA node, assuming certain relationship | |
410 | * between APIC ID, HT node ID and NUMA topology. As going | |
411 | * through CPU mapping may alter the outcome, directly | |
412 | * access __apicid_to_node[]. | |
413 | */ | |
414 | int ht_nodeid = c->initial_apicid; | |
415 | ||
416 | if (ht_nodeid >= 0 && | |
417 | __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | |
418 | node = __apicid_to_node[ht_nodeid]; | |
419 | /* Pick a nearby node */ | |
420 | if (!node_online(node)) | |
421 | node = nearby_node(apicid); | |
422 | } | |
423 | numa_set_node(cpu, node); | |
424 | #endif | |
425 | } | |
426 | ||
427 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | |
428 | { | |
429 | #ifdef CONFIG_X86_HT | |
430 | unsigned bits, ecx; | |
431 | ||
432 | /* Multi core CPU? */ | |
433 | if (c->extended_cpuid_level < 0x80000008) | |
434 | return; | |
435 | ||
436 | ecx = cpuid_ecx(0x80000008); | |
437 | ||
438 | c->x86_max_cores = (ecx & 0xff) + 1; | |
439 | ||
440 | /* CPU telling us the core id bits shift? */ | |
441 | bits = (ecx >> 12) & 0xF; | |
442 | ||
443 | /* Otherwise recompute */ | |
444 | if (bits == 0) { | |
445 | while ((1 << bits) < c->x86_max_cores) | |
446 | bits++; | |
447 | } | |
448 | ||
449 | c->x86_coreid_bits = bits; | |
450 | #endif | |
451 | } | |
452 | ||
453 | static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) | |
454 | { | |
455 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | |
456 | ||
457 | if (c->x86 > 0x10 || | |
458 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | |
459 | u64 val; | |
460 | ||
461 | rdmsrl(MSR_K7_HWCR, val); | |
462 | if (!(val & BIT(24))) | |
463 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | |
464 | "with P0 frequency!\n"); | |
465 | } | |
466 | } | |
467 | ||
468 | if (c->x86 == 0x15) { | |
469 | unsigned long upperbit; | |
470 | u32 cpuid, assoc; | |
471 | ||
472 | cpuid = cpuid_edx(0x80000005); | |
473 | assoc = cpuid >> 16 & 0xff; | |
474 | upperbit = ((cpuid >> 24) << 10) / assoc; | |
475 | ||
476 | va_align.mask = (upperbit - 1) & PAGE_MASK; | |
477 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; | |
478 | } | |
479 | } | |
480 | ||
481 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |
482 | { | |
483 | early_init_amd_mc(c); | |
484 | ||
485 | /* | |
486 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
487 | * with P/T states and does not stop in deep C-states | |
488 | */ | |
489 | if (c->x86_power & (1 << 8)) { | |
490 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
491 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | |
492 | if (!check_tsc_unstable()) | |
493 | sched_clock_stable = 1; | |
494 | } | |
495 | ||
496 | #ifdef CONFIG_X86_64 | |
497 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | |
498 | #else | |
499 | /* Set MTRR capability flag if appropriate */ | |
500 | if (c->x86 == 5) | |
501 | if (c->x86_model == 13 || c->x86_model == 9 || | |
502 | (c->x86_model == 8 && c->x86_mask >= 8)) | |
503 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | |
504 | #endif | |
505 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) | |
506 | /* check CPU config space for extended APIC ID */ | |
507 | if (cpu_has_apic && c->x86 >= 0xf) { | |
508 | unsigned int val; | |
509 | val = read_pci_config(0, 24, 0, 0x68); | |
510 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) | |
511 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | |
512 | } | |
513 | #endif | |
514 | } | |
515 | ||
516 | static const int amd_erratum_383[]; | |
517 | static const int amd_erratum_400[]; | |
518 | static bool cpu_has_amd_erratum(const int *erratum); | |
519 | ||
520 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |
521 | { | |
522 | u32 dummy; | |
523 | unsigned long long value; | |
524 | ||
525 | #ifdef CONFIG_SMP | |
526 | /* | |
527 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | |
528 | * bit 6 of msr C001_0015 | |
529 | * | |
530 | * Errata 63 for SH-B3 steppings | |
531 | * Errata 122 for all steppings (F+ have it disabled by default) | |
532 | */ | |
533 | if (c->x86 == 0xf) { | |
534 | rdmsrl(MSR_K7_HWCR, value); | |
535 | value |= 1 << 6; | |
536 | wrmsrl(MSR_K7_HWCR, value); | |
537 | } | |
538 | #endif | |
539 | ||
540 | early_init_amd(c); | |
541 | ||
542 | /* | |
543 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | |
544 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | |
545 | */ | |
546 | clear_cpu_cap(c, 0*32+31); | |
547 | ||
548 | #ifdef CONFIG_X86_64 | |
549 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | |
550 | if (c->x86 == 0xf) { | |
551 | u32 level; | |
552 | ||
553 | level = cpuid_eax(1); | |
554 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | |
555 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
556 | ||
557 | /* | |
558 | * Some BIOSes incorrectly force this feature, but only K8 | |
559 | * revision D (model = 0x14) and later actually support it. | |
560 | * (AMD Erratum #110, docId: 25759). | |
561 | */ | |
562 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { | |
563 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | |
564 | if (!rdmsrl_amd_safe(0xc001100d, &value)) { | |
565 | value &= ~(1ULL << 32); | |
566 | wrmsrl_amd_safe(0xc001100d, value); | |
567 | } | |
568 | } | |
569 | ||
570 | } | |
571 | if (c->x86 >= 0x10) | |
572 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
573 | ||
574 | /* get apicid instead of initial apic id from cpuid */ | |
575 | c->apicid = hard_smp_processor_id(); | |
576 | #else | |
577 | ||
578 | /* | |
579 | * FIXME: We should handle the K5 here. Set up the write | |
580 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | |
581 | * no bus pipeline) | |
582 | */ | |
583 | ||
584 | switch (c->x86) { | |
585 | case 4: | |
586 | init_amd_k5(c); | |
587 | break; | |
588 | case 5: | |
589 | init_amd_k6(c); | |
590 | break; | |
591 | case 6: /* An Athlon/Duron */ | |
592 | init_amd_k7(c); | |
593 | break; | |
594 | } | |
595 | ||
596 | /* K6s reports MCEs but don't actually have all the MSRs */ | |
597 | if (c->x86 < 6) | |
598 | clear_cpu_cap(c, X86_FEATURE_MCE); | |
599 | #endif | |
600 | ||
601 | /* Enable workaround for FXSAVE leak */ | |
602 | if (c->x86 >= 6) | |
603 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | |
604 | ||
605 | if (!c->x86_model_id[0]) { | |
606 | switch (c->x86) { | |
607 | case 0xf: | |
608 | /* Should distinguish Models here, but this is only | |
609 | a fallback anyways. */ | |
610 | strcpy(c->x86_model_id, "Hammer"); | |
611 | break; | |
612 | } | |
613 | } | |
614 | ||
615 | /* re-enable TopologyExtensions if switched off by BIOS */ | |
616 | if ((c->x86 == 0x15) && | |
617 | (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | |
618 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | |
619 | ||
620 | if (!rdmsrl_safe(0xc0011005, &value)) { | |
621 | value |= 1ULL << 54; | |
622 | wrmsrl_safe(0xc0011005, value); | |
623 | rdmsrl(0xc0011005, value); | |
624 | if (value & (1ULL << 54)) { | |
625 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | |
626 | printk(KERN_INFO FW_INFO "CPU: Re-enabling " | |
627 | "disabled Topology Extensions Support\n"); | |
628 | } | |
629 | } | |
630 | } | |
631 | ||
632 | /* | |
633 | * The way access filter has a performance penalty on some workloads. | |
634 | * Disable it on the affected CPUs. | |
635 | */ | |
636 | if ((c->x86 == 0x15) && | |
637 | (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | |
638 | ||
639 | if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { | |
640 | value |= 0x1E; | |
641 | wrmsrl_safe(0xc0011021, value); | |
642 | } | |
643 | } | |
644 | ||
645 | cpu_detect_cache_sizes(c); | |
646 | ||
647 | /* Multi core CPU? */ | |
648 | if (c->extended_cpuid_level >= 0x80000008) { | |
649 | amd_detect_cmp(c); | |
650 | srat_detect_node(c); | |
651 | } | |
652 | ||
653 | #ifdef CONFIG_X86_32 | |
654 | detect_ht(c); | |
655 | #endif | |
656 | ||
657 | init_amd_cacheinfo(c); | |
658 | ||
659 | if (c->x86 >= 0xf) | |
660 | set_cpu_cap(c, X86_FEATURE_K8); | |
661 | ||
662 | if (cpu_has_xmm2) { | |
663 | /* MFENCE stops RDTSC speculation */ | |
664 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | |
665 | } | |
666 | ||
667 | #ifdef CONFIG_X86_64 | |
668 | if (c->x86 == 0x10) { | |
669 | /* do this for boot cpu */ | |
670 | if (c == &boot_cpu_data) | |
671 | check_enable_amd_mmconf_dmi(); | |
672 | ||
673 | fam10h_check_enable_mmcfg(); | |
674 | } | |
675 | ||
676 | if (c == &boot_cpu_data && c->x86 >= 0xf) { | |
677 | unsigned long long tseg; | |
678 | ||
679 | /* | |
680 | * Split up direct mapping around the TSEG SMM area. | |
681 | * Don't do it for gbpages because there seems very little | |
682 | * benefit in doing so. | |
683 | */ | |
684 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | |
685 | unsigned long pfn = tseg >> PAGE_SHIFT; | |
686 | ||
687 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | |
688 | if (pfn_range_is_mapped(pfn, pfn + 1)) | |
689 | set_memory_4k((unsigned long)__va(tseg), 1); | |
690 | } | |
691 | } | |
692 | #endif | |
693 | ||
694 | /* | |
695 | * Family 0x12 and above processors have APIC timer | |
696 | * running in deep C states. | |
697 | */ | |
698 | if (c->x86 > 0x11) | |
699 | set_cpu_cap(c, X86_FEATURE_ARAT); | |
700 | ||
701 | if (c->x86 == 0x10) { | |
702 | /* | |
703 | * Disable GART TLB Walk Errors on Fam10h. We do this here | |
704 | * because this is always needed when GART is enabled, even in a | |
705 | * kernel which has no MCE support built in. | |
706 | * BIOS should disable GartTlbWlk Errors themself. If | |
707 | * it doesn't do it here as suggested by the BKDG. | |
708 | * | |
709 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | |
710 | */ | |
711 | u64 mask; | |
712 | int err; | |
713 | ||
714 | err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); | |
715 | if (err == 0) { | |
716 | mask |= (1 << 10); | |
717 | wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); | |
718 | } | |
719 | ||
720 | /* | |
721 | * On family 10h BIOS may not have properly enabled WC+ support, | |
722 | * causing it to be converted to CD memtype. This may result in | |
723 | * performance degradation for certain nested-paging guests. | |
724 | * Prevent this conversion by clearing bit 24 in | |
725 | * MSR_AMD64_BU_CFG2. | |
726 | * | |
727 | * NOTE: we want to use the _safe accessors so as not to #GP kvm | |
728 | * guests on older kvm hosts. | |
729 | */ | |
730 | ||
731 | rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); | |
732 | value &= ~(1ULL << 24); | |
733 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); | |
734 | ||
735 | if (cpu_has_amd_erratum(amd_erratum_383)) | |
736 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | |
737 | } | |
738 | ||
739 | if (cpu_has_amd_erratum(amd_erratum_400)) | |
740 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | |
741 | ||
742 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | |
743 | } | |
744 | ||
745 | #ifdef CONFIG_X86_32 | |
746 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, | |
747 | unsigned int size) | |
748 | { | |
749 | /* AMD errata T13 (order #21922) */ | |
750 | if ((c->x86 == 6)) { | |
751 | /* Duron Rev A0 */ | |
752 | if (c->x86_model == 3 && c->x86_mask == 0) | |
753 | size = 64; | |
754 | /* Tbird rev A1/A2 */ | |
755 | if (c->x86_model == 4 && | |
756 | (c->x86_mask == 0 || c->x86_mask == 1)) | |
757 | size = 256; | |
758 | } | |
759 | return size; | |
760 | } | |
761 | #endif | |
762 | ||
763 | static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) | |
764 | { | |
765 | tlb_flushall_shift = 5; | |
766 | ||
767 | if (c->x86 <= 0x11) | |
768 | tlb_flushall_shift = 4; | |
769 | } | |
770 | ||
771 | static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | |
772 | { | |
773 | u32 ebx, eax, ecx, edx; | |
774 | u16 mask = 0xfff; | |
775 | ||
776 | if (c->x86 < 0xf) | |
777 | return; | |
778 | ||
779 | if (c->extended_cpuid_level < 0x80000006) | |
780 | return; | |
781 | ||
782 | cpuid(0x80000006, &eax, &ebx, &ecx, &edx); | |
783 | ||
784 | tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; | |
785 | tlb_lli_4k[ENTRIES] = ebx & mask; | |
786 | ||
787 | /* | |
788 | * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB | |
789 | * characteristics from the CPUID function 0x80000005 instead. | |
790 | */ | |
791 | if (c->x86 == 0xf) { | |
792 | cpuid(0x80000005, &eax, &ebx, &ecx, &edx); | |
793 | mask = 0xff; | |
794 | } | |
795 | ||
796 | /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | |
797 | if (!((eax >> 16) & mask)) { | |
798 | u32 a, b, c, d; | |
799 | ||
800 | cpuid(0x80000005, &a, &b, &c, &d); | |
801 | tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff; | |
802 | } else { | |
803 | tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; | |
804 | } | |
805 | ||
806 | /* a 4M entry uses two 2M entries */ | |
807 | tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; | |
808 | ||
809 | /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | |
810 | if (!(eax & mask)) { | |
811 | /* Erratum 658 */ | |
812 | if (c->x86 == 0x15 && c->x86_model <= 0x1f) { | |
813 | tlb_lli_2m[ENTRIES] = 1024; | |
814 | } else { | |
815 | cpuid(0x80000005, &eax, &ebx, &ecx, &edx); | |
816 | tlb_lli_2m[ENTRIES] = eax & 0xff; | |
817 | } | |
818 | } else | |
819 | tlb_lli_2m[ENTRIES] = eax & mask; | |
820 | ||
821 | tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; | |
822 | ||
823 | cpu_set_tlb_flushall_shift(c); | |
824 | } | |
825 | ||
826 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { | |
827 | .c_vendor = "AMD", | |
828 | .c_ident = { "AuthenticAMD" }, | |
829 | #ifdef CONFIG_X86_32 | |
830 | .c_models = { | |
831 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | |
832 | { | |
833 | [3] = "486 DX/2", | |
834 | [7] = "486 DX/2-WB", | |
835 | [8] = "486 DX/4", | |
836 | [9] = "486 DX/4-WB", | |
837 | [14] = "Am5x86-WT", | |
838 | [15] = "Am5x86-WB" | |
839 | } | |
840 | }, | |
841 | }, | |
842 | .c_size_cache = amd_size_cache, | |
843 | #endif | |
844 | .c_early_init = early_init_amd, | |
845 | .c_detect_tlb = cpu_detect_tlb_amd, | |
846 | .c_bsp_init = bsp_init_amd, | |
847 | .c_init = init_amd, | |
848 | .c_x86_vendor = X86_VENDOR_AMD, | |
849 | }; | |
850 | ||
851 | cpu_dev_register(amd_cpu_dev); | |
852 | ||
853 | /* | |
854 | * AMD errata checking | |
855 | * | |
856 | * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or | |
857 | * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that | |
858 | * have an OSVW id assigned, which it takes as first argument. Both take a | |
859 | * variable number of family-specific model-stepping ranges created by | |
860 | * AMD_MODEL_RANGE(). | |
861 | * | |
862 | * Example: | |
863 | * | |
864 | * const int amd_erratum_319[] = | |
865 | * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), | |
866 | * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), | |
867 | * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); | |
868 | */ | |
869 | ||
870 | #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } | |
871 | #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } | |
872 | #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ | |
873 | ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) | |
874 | #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) | |
875 | #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) | |
876 | #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) | |
877 | ||
878 | static const int amd_erratum_400[] = | |
879 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), | |
880 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); | |
881 | ||
882 | static const int amd_erratum_383[] = | |
883 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); | |
884 | ||
885 | static bool cpu_has_amd_erratum(const int *erratum) | |
886 | { | |
887 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); | |
888 | int osvw_id = *erratum++; | |
889 | u32 range; | |
890 | u32 ms; | |
891 | ||
892 | /* | |
893 | * If called early enough that current_cpu_data hasn't been initialized | |
894 | * yet, fall back to boot_cpu_data. | |
895 | */ | |
896 | if (cpu->x86 == 0) | |
897 | cpu = &boot_cpu_data; | |
898 | ||
899 | if (cpu->x86_vendor != X86_VENDOR_AMD) | |
900 | return false; | |
901 | ||
902 | if (osvw_id >= 0 && osvw_id < 65536 && | |
903 | cpu_has(cpu, X86_FEATURE_OSVW)) { | |
904 | u64 osvw_len; | |
905 | ||
906 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); | |
907 | if (osvw_id < osvw_len) { | |
908 | u64 osvw_bits; | |
909 | ||
910 | rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), | |
911 | osvw_bits); | |
912 | return osvw_bits & (1ULL << (osvw_id & 0x3f)); | |
913 | } | |
914 | } | |
915 | ||
916 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ | |
917 | ms = (cpu->x86_model << 4) | cpu->x86_mask; | |
918 | while ((range = *erratum++)) | |
919 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && | |
920 | (ms >= AMD_MODEL_RANGE_START(range)) && | |
921 | (ms <= AMD_MODEL_RANGE_END(range))) | |
922 | return true; | |
923 | ||
924 | return false; | |
925 | } |