]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/amd.c
Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / amd.c
1 #include <linux/export.h>
2 #include <linux/bitops.h>
3 #include <linux/elf.h>
4 #include <linux/mm.h>
5
6 #include <linux/io.h>
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <asm/processor.h>
10 #include <asm/apic.h>
11 #include <asm/cpu.h>
12 #include <asm/smp.h>
13 #include <asm/pci-direct.h>
14 #include <asm/delay.h>
15
16 #ifdef CONFIG_X86_64
17 # include <asm/mmconfig.h>
18 # include <asm/cacheflush.h>
19 #endif
20
21 #include "cpu.h"
22
23 /*
24 * nodes_per_socket: Stores the number of nodes per socket.
25 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
26 * Node Identifiers[10:8]
27 */
28 static u32 nodes_per_socket = 1;
29
30 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
31 {
32 u32 gprs[8] = { 0 };
33 int err;
34
35 WARN_ONCE((boot_cpu_data.x86 != 0xf),
36 "%s should only be used on K8!\n", __func__);
37
38 gprs[1] = msr;
39 gprs[7] = 0x9c5a203a;
40
41 err = rdmsr_safe_regs(gprs);
42
43 *p = gprs[0] | ((u64)gprs[2] << 32);
44
45 return err;
46 }
47
48 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
49 {
50 u32 gprs[8] = { 0 };
51
52 WARN_ONCE((boot_cpu_data.x86 != 0xf),
53 "%s should only be used on K8!\n", __func__);
54
55 gprs[0] = (u32)val;
56 gprs[1] = msr;
57 gprs[2] = val >> 32;
58 gprs[7] = 0x9c5a203a;
59
60 return wrmsr_safe_regs(gprs);
61 }
62
63 /*
64 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
65 * misexecution of code under Linux. Owners of such processors should
66 * contact AMD for precise details and a CPU swap.
67 *
68 * See http://www.multimania.com/poulot/k6bug.html
69 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
70 * (Publication # 21266 Issue Date: August 1998)
71 *
72 * The following test is erm.. interesting. AMD neglected to up
73 * the chip setting when fixing the bug but they also tweaked some
74 * performance at the same time..
75 */
76
77 extern __visible void vide(void);
78 __asm__(".globl vide\n\t.align 4\nvide: ret");
79
80 static void init_amd_k5(struct cpuinfo_x86 *c)
81 {
82 #ifdef CONFIG_X86_32
83 /*
84 * General Systems BIOSen alias the cpu frequency registers
85 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
86 * drivers subsequently pokes it, and changes the CPU speed.
87 * Workaround : Remove the unneeded alias.
88 */
89 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
90 #define CBAR_ENB (0x80000000)
91 #define CBAR_KEY (0X000000CB)
92 if (c->x86_model == 9 || c->x86_model == 10) {
93 if (inl(CBAR) & CBAR_ENB)
94 outl(0 | CBAR_KEY, CBAR);
95 }
96 #endif
97 }
98
99 static void init_amd_k6(struct cpuinfo_x86 *c)
100 {
101 #ifdef CONFIG_X86_32
102 u32 l, h;
103 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
104
105 if (c->x86_model < 6) {
106 /* Based on AMD doc 20734R - June 2000 */
107 if (c->x86_model == 0) {
108 clear_cpu_cap(c, X86_FEATURE_APIC);
109 set_cpu_cap(c, X86_FEATURE_PGE);
110 }
111 return;
112 }
113
114 if (c->x86_model == 6 && c->x86_mask == 1) {
115 const int K6_BUG_LOOP = 1000000;
116 int n;
117 void (*f_vide)(void);
118 u64 d, d2;
119
120 pr_info("AMD K6 stepping B detected - ");
121
122 /*
123 * It looks like AMD fixed the 2.6.2 bug and improved indirect
124 * calls at the same time.
125 */
126
127 n = K6_BUG_LOOP;
128 f_vide = vide;
129 d = rdtsc();
130 while (n--)
131 f_vide();
132 d2 = rdtsc();
133 d = d2-d;
134
135 if (d > 20*K6_BUG_LOOP)
136 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
137 else
138 pr_cont("probably OK (after B9730xxxx).\n");
139 }
140
141 /* K6 with old style WHCR */
142 if (c->x86_model < 8 ||
143 (c->x86_model == 8 && c->x86_mask < 8)) {
144 /* We can only write allocate on the low 508Mb */
145 if (mbytes > 508)
146 mbytes = 508;
147
148 rdmsr(MSR_K6_WHCR, l, h);
149 if ((l&0x0000FFFF) == 0) {
150 unsigned long flags;
151 l = (1<<0)|((mbytes/4)<<1);
152 local_irq_save(flags);
153 wbinvd();
154 wrmsr(MSR_K6_WHCR, l, h);
155 local_irq_restore(flags);
156 pr_info("Enabling old style K6 write allocation for %d Mb\n",
157 mbytes);
158 }
159 return;
160 }
161
162 if ((c->x86_model == 8 && c->x86_mask > 7) ||
163 c->x86_model == 9 || c->x86_model == 13) {
164 /* The more serious chips .. */
165
166 if (mbytes > 4092)
167 mbytes = 4092;
168
169 rdmsr(MSR_K6_WHCR, l, h);
170 if ((l&0xFFFF0000) == 0) {
171 unsigned long flags;
172 l = ((mbytes>>2)<<22)|(1<<16);
173 local_irq_save(flags);
174 wbinvd();
175 wrmsr(MSR_K6_WHCR, l, h);
176 local_irq_restore(flags);
177 pr_info("Enabling new style K6 write allocation for %d Mb\n",
178 mbytes);
179 }
180
181 return;
182 }
183
184 if (c->x86_model == 10) {
185 /* AMD Geode LX is model 10 */
186 /* placeholder for any needed mods */
187 return;
188 }
189 #endif
190 }
191
192 static void init_amd_k7(struct cpuinfo_x86 *c)
193 {
194 #ifdef CONFIG_X86_32
195 u32 l, h;
196
197 /*
198 * Bit 15 of Athlon specific MSR 15, needs to be 0
199 * to enable SSE on Palomino/Morgan/Barton CPU's.
200 * If the BIOS didn't enable it already, enable it here.
201 */
202 if (c->x86_model >= 6 && c->x86_model <= 10) {
203 if (!cpu_has(c, X86_FEATURE_XMM)) {
204 pr_info("Enabling disabled K7/SSE Support.\n");
205 msr_clear_bit(MSR_K7_HWCR, 15);
206 set_cpu_cap(c, X86_FEATURE_XMM);
207 }
208 }
209
210 /*
211 * It's been determined by AMD that Athlons since model 8 stepping 1
212 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
213 * As per AMD technical note 27212 0.2
214 */
215 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
216 rdmsr(MSR_K7_CLK_CTL, l, h);
217 if ((l & 0xfff00000) != 0x20000000) {
218 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
219 l, ((l & 0x000fffff)|0x20000000));
220 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
221 }
222 }
223
224 set_cpu_cap(c, X86_FEATURE_K7);
225
226 /* calling is from identify_secondary_cpu() ? */
227 if (!c->cpu_index)
228 return;
229
230 /*
231 * Certain Athlons might work (for various values of 'work') in SMP
232 * but they are not certified as MP capable.
233 */
234 /* Athlon 660/661 is valid. */
235 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
236 (c->x86_mask == 1)))
237 return;
238
239 /* Duron 670 is valid */
240 if ((c->x86_model == 7) && (c->x86_mask == 0))
241 return;
242
243 /*
244 * Athlon 662, Duron 671, and Athlon >model 7 have capability
245 * bit. It's worth noting that the A5 stepping (662) of some
246 * Athlon XP's have the MP bit set.
247 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
248 * more.
249 */
250 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
251 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
252 (c->x86_model > 7))
253 if (cpu_has(c, X86_FEATURE_MP))
254 return;
255
256 /* If we get here, not a certified SMP capable AMD system. */
257
258 /*
259 * Don't taint if we are running SMP kernel on a single non-MP
260 * approved Athlon
261 */
262 WARN_ONCE(1, "WARNING: This combination of AMD"
263 " processors is not suitable for SMP.\n");
264 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
265 #endif
266 }
267
268 #ifdef CONFIG_NUMA
269 /*
270 * To workaround broken NUMA config. Read the comment in
271 * srat_detect_node().
272 */
273 static int nearby_node(int apicid)
274 {
275 int i, node;
276
277 for (i = apicid - 1; i >= 0; i--) {
278 node = __apicid_to_node[i];
279 if (node != NUMA_NO_NODE && node_online(node))
280 return node;
281 }
282 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
283 node = __apicid_to_node[i];
284 if (node != NUMA_NO_NODE && node_online(node))
285 return node;
286 }
287 return first_node(node_online_map); /* Shouldn't happen */
288 }
289 #endif
290
291 /*
292 * Fixup core topology information for
293 * (1) AMD multi-node processors
294 * Assumption: Number of cores in each internal node is the same.
295 * (2) AMD processors supporting compute units
296 */
297 #ifdef CONFIG_SMP
298 static void amd_get_topology(struct cpuinfo_x86 *c)
299 {
300 u32 cores_per_cu = 1;
301 u8 node_id;
302 int cpu = smp_processor_id();
303
304 /* get information required for multi-node processors */
305 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
306 u32 eax, ebx, ecx, edx;
307
308 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
309 nodes_per_socket = ((ecx >> 8) & 7) + 1;
310 node_id = ecx & 7;
311
312 /* get compute unit information */
313 smp_num_siblings = ((ebx >> 8) & 3) + 1;
314 c->compute_unit_id = ebx & 0xff;
315 cores_per_cu += ((ebx >> 8) & 3);
316 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
317 u64 value;
318
319 rdmsrl(MSR_FAM10H_NODE_ID, value);
320 nodes_per_socket = ((value >> 3) & 7) + 1;
321 node_id = value & 7;
322 } else
323 return;
324
325 /* fixup multi-node processor information */
326 if (nodes_per_socket > 1) {
327 u32 cores_per_node;
328 u32 cus_per_node;
329
330 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
331 cores_per_node = c->x86_max_cores / nodes_per_socket;
332 cus_per_node = cores_per_node / cores_per_cu;
333
334 /* store NodeID, use llc_shared_map to store sibling info */
335 per_cpu(cpu_llc_id, cpu) = node_id;
336
337 /* core id has to be in the [0 .. cores_per_node - 1] range */
338 c->cpu_core_id %= cores_per_node;
339 c->compute_unit_id %= cus_per_node;
340 }
341 }
342 #endif
343
344 /*
345 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
346 * Assumes number of cores is a power of two.
347 */
348 static void amd_detect_cmp(struct cpuinfo_x86 *c)
349 {
350 #ifdef CONFIG_SMP
351 unsigned bits;
352 int cpu = smp_processor_id();
353 unsigned int socket_id, core_complex_id;
354
355 bits = c->x86_coreid_bits;
356 /* Low order bits define the core id (index of core in socket) */
357 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
358 /* Convert the initial APIC ID into the socket ID */
359 c->phys_proc_id = c->initial_apicid >> bits;
360 /* use socket ID also for last level cache */
361 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
362 amd_get_topology(c);
363
364 /*
365 * Fix percpu cpu_llc_id here as LLC topology is different
366 * for Fam17h systems.
367 */
368 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
369 return;
370
371 socket_id = (c->apicid >> bits) - 1;
372 core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
373
374 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
375 #endif
376 }
377
378 u16 amd_get_nb_id(int cpu)
379 {
380 u16 id = 0;
381 #ifdef CONFIG_SMP
382 id = per_cpu(cpu_llc_id, cpu);
383 #endif
384 return id;
385 }
386 EXPORT_SYMBOL_GPL(amd_get_nb_id);
387
388 u32 amd_get_nodes_per_socket(void)
389 {
390 return nodes_per_socket;
391 }
392 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
393
394 static void srat_detect_node(struct cpuinfo_x86 *c)
395 {
396 #ifdef CONFIG_NUMA
397 int cpu = smp_processor_id();
398 int node;
399 unsigned apicid = c->apicid;
400
401 node = numa_cpu_node(cpu);
402 if (node == NUMA_NO_NODE)
403 node = per_cpu(cpu_llc_id, cpu);
404
405 /*
406 * On multi-fabric platform (e.g. Numascale NumaChip) a
407 * platform-specific handler needs to be called to fixup some
408 * IDs of the CPU.
409 */
410 if (x86_cpuinit.fixup_cpu_id)
411 x86_cpuinit.fixup_cpu_id(c, node);
412
413 if (!node_online(node)) {
414 /*
415 * Two possibilities here:
416 *
417 * - The CPU is missing memory and no node was created. In
418 * that case try picking one from a nearby CPU.
419 *
420 * - The APIC IDs differ from the HyperTransport node IDs
421 * which the K8 northbridge parsing fills in. Assume
422 * they are all increased by a constant offset, but in
423 * the same order as the HT nodeids. If that doesn't
424 * result in a usable node fall back to the path for the
425 * previous case.
426 *
427 * This workaround operates directly on the mapping between
428 * APIC ID and NUMA node, assuming certain relationship
429 * between APIC ID, HT node ID and NUMA topology. As going
430 * through CPU mapping may alter the outcome, directly
431 * access __apicid_to_node[].
432 */
433 int ht_nodeid = c->initial_apicid;
434
435 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
436 node = __apicid_to_node[ht_nodeid];
437 /* Pick a nearby node */
438 if (!node_online(node))
439 node = nearby_node(apicid);
440 }
441 numa_set_node(cpu, node);
442 #endif
443 }
444
445 static void early_init_amd_mc(struct cpuinfo_x86 *c)
446 {
447 #ifdef CONFIG_SMP
448 unsigned bits, ecx;
449
450 /* Multi core CPU? */
451 if (c->extended_cpuid_level < 0x80000008)
452 return;
453
454 ecx = cpuid_ecx(0x80000008);
455
456 c->x86_max_cores = (ecx & 0xff) + 1;
457
458 /* CPU telling us the core id bits shift? */
459 bits = (ecx >> 12) & 0xF;
460
461 /* Otherwise recompute */
462 if (bits == 0) {
463 while ((1 << bits) < c->x86_max_cores)
464 bits++;
465 }
466
467 c->x86_coreid_bits = bits;
468 #endif
469 }
470
471 static void bsp_init_amd(struct cpuinfo_x86 *c)
472 {
473
474 #ifdef CONFIG_X86_64
475 if (c->x86 >= 0xf) {
476 unsigned long long tseg;
477
478 /*
479 * Split up direct mapping around the TSEG SMM area.
480 * Don't do it for gbpages because there seems very little
481 * benefit in doing so.
482 */
483 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
484 unsigned long pfn = tseg >> PAGE_SHIFT;
485
486 pr_debug("tseg: %010llx\n", tseg);
487 if (pfn_range_is_mapped(pfn, pfn + 1))
488 set_memory_4k((unsigned long)__va(tseg), 1);
489 }
490 }
491 #endif
492
493 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
494
495 if (c->x86 > 0x10 ||
496 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
497 u64 val;
498
499 rdmsrl(MSR_K7_HWCR, val);
500 if (!(val & BIT(24)))
501 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
502 }
503 }
504
505 if (c->x86 == 0x15) {
506 unsigned long upperbit;
507 u32 cpuid, assoc;
508
509 cpuid = cpuid_edx(0x80000005);
510 assoc = cpuid >> 16 & 0xff;
511 upperbit = ((cpuid >> 24) << 10) / assoc;
512
513 va_align.mask = (upperbit - 1) & PAGE_MASK;
514 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
515
516 /* A random value per boot for bit slice [12:upper_bit) */
517 va_align.bits = get_random_int() & va_align.mask;
518 }
519
520 if (cpu_has(c, X86_FEATURE_MWAITX))
521 use_mwaitx_delay();
522 }
523
524 static void early_init_amd(struct cpuinfo_x86 *c)
525 {
526 early_init_amd_mc(c);
527
528 /*
529 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
530 * with P/T states and does not stop in deep C-states
531 */
532 if (c->x86_power & (1 << 8)) {
533 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
534 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
535 if (!check_tsc_unstable())
536 set_sched_clock_stable();
537 }
538
539 #ifdef CONFIG_X86_64
540 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
541 #else
542 /* Set MTRR capability flag if appropriate */
543 if (c->x86 == 5)
544 if (c->x86_model == 13 || c->x86_model == 9 ||
545 (c->x86_model == 8 && c->x86_mask >= 8))
546 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
547 #endif
548 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
549 /*
550 * ApicID can always be treated as an 8-bit value for AMD APIC versions
551 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
552 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
553 * after 16h.
554 */
555 if (cpu_has_apic && c->x86 > 0x16) {
556 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
557 } else if (cpu_has_apic && c->x86 >= 0xf) {
558 /* check CPU config space for extended APIC ID */
559 unsigned int val;
560 val = read_pci_config(0, 24, 0, 0x68);
561 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
562 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
563 }
564 #endif
565
566 /*
567 * This is only needed to tell the kernel whether to use VMCALL
568 * and VMMCALL. VMMCALL is never executed except under virt, so
569 * we can set it unconditionally.
570 */
571 set_cpu_cap(c, X86_FEATURE_VMMCALL);
572
573 /* F16h erratum 793, CVE-2013-6885 */
574 if (c->x86 == 0x16 && c->x86_model <= 0xf)
575 msr_set_bit(MSR_AMD64_LS_CFG, 15);
576 }
577
578 static const int amd_erratum_383[];
579 static const int amd_erratum_400[];
580 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
581
582 static void init_amd_k8(struct cpuinfo_x86 *c)
583 {
584 u32 level;
585 u64 value;
586
587 /* On C+ stepping K8 rep microcode works well for copy/memset */
588 level = cpuid_eax(1);
589 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
590 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
591
592 /*
593 * Some BIOSes incorrectly force this feature, but only K8 revision D
594 * (model = 0x14) and later actually support it.
595 * (AMD Erratum #110, docId: 25759).
596 */
597 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
598 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
599 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
600 value &= ~BIT_64(32);
601 wrmsrl_amd_safe(0xc001100d, value);
602 }
603 }
604
605 if (!c->x86_model_id[0])
606 strcpy(c->x86_model_id, "Hammer");
607
608 #ifdef CONFIG_SMP
609 /*
610 * Disable TLB flush filter by setting HWCR.FFDIS on K8
611 * bit 6 of msr C001_0015
612 *
613 * Errata 63 for SH-B3 steppings
614 * Errata 122 for all steppings (F+ have it disabled by default)
615 */
616 msr_set_bit(MSR_K7_HWCR, 6);
617 #endif
618 }
619
620 static void init_amd_gh(struct cpuinfo_x86 *c)
621 {
622 #ifdef CONFIG_X86_64
623 /* do this for boot cpu */
624 if (c == &boot_cpu_data)
625 check_enable_amd_mmconf_dmi();
626
627 fam10h_check_enable_mmcfg();
628 #endif
629
630 /*
631 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
632 * is always needed when GART is enabled, even in a kernel which has no
633 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
634 * If it doesn't, we do it here as suggested by the BKDG.
635 *
636 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
637 */
638 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
639
640 /*
641 * On family 10h BIOS may not have properly enabled WC+ support, causing
642 * it to be converted to CD memtype. This may result in performance
643 * degradation for certain nested-paging guests. Prevent this conversion
644 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
645 *
646 * NOTE: we want to use the _safe accessors so as not to #GP kvm
647 * guests on older kvm hosts.
648 */
649 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
650
651 if (cpu_has_amd_erratum(c, amd_erratum_383))
652 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
653 }
654
655 static void init_amd_bd(struct cpuinfo_x86 *c)
656 {
657 u64 value;
658
659 /* re-enable TopologyExtensions if switched off by BIOS */
660 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
661 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
662
663 if (msr_set_bit(0xc0011005, 54) > 0) {
664 rdmsrl(0xc0011005, value);
665 if (value & BIT_64(54)) {
666 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
667 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
668 }
669 }
670 }
671
672 /*
673 * The way access filter has a performance penalty on some workloads.
674 * Disable it on the affected CPUs.
675 */
676 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
677 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
678 value |= 0x1E;
679 wrmsrl_safe(MSR_F15H_IC_CFG, value);
680 }
681 }
682 }
683
684 static void init_amd(struct cpuinfo_x86 *c)
685 {
686 u32 dummy;
687
688 early_init_amd(c);
689
690 /*
691 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
692 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
693 */
694 clear_cpu_cap(c, 0*32+31);
695
696 if (c->x86 >= 0x10)
697 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
698
699 /* get apicid instead of initial apic id from cpuid */
700 c->apicid = hard_smp_processor_id();
701
702 /* K6s reports MCEs but don't actually have all the MSRs */
703 if (c->x86 < 6)
704 clear_cpu_cap(c, X86_FEATURE_MCE);
705
706 switch (c->x86) {
707 case 4: init_amd_k5(c); break;
708 case 5: init_amd_k6(c); break;
709 case 6: init_amd_k7(c); break;
710 case 0xf: init_amd_k8(c); break;
711 case 0x10: init_amd_gh(c); break;
712 case 0x15: init_amd_bd(c); break;
713 }
714
715 /* Enable workaround for FXSAVE leak */
716 if (c->x86 >= 6)
717 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
718
719 cpu_detect_cache_sizes(c);
720
721 /* Multi core CPU? */
722 if (c->extended_cpuid_level >= 0x80000008) {
723 amd_detect_cmp(c);
724 srat_detect_node(c);
725 }
726
727 #ifdef CONFIG_X86_32
728 detect_ht(c);
729 #endif
730
731 init_amd_cacheinfo(c);
732
733 if (c->x86 >= 0xf)
734 set_cpu_cap(c, X86_FEATURE_K8);
735
736 if (cpu_has_xmm2) {
737 /* MFENCE stops RDTSC speculation */
738 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
739 }
740
741 /*
742 * Family 0x12 and above processors have APIC timer
743 * running in deep C states.
744 */
745 if (c->x86 > 0x11)
746 set_cpu_cap(c, X86_FEATURE_ARAT);
747
748 if (cpu_has_amd_erratum(c, amd_erratum_400))
749 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
750
751 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
752
753 /* 3DNow or LM implies PREFETCHW */
754 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
755 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
756 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
757
758 /* AMD CPUs don't reset SS attributes on SYSRET */
759 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
760 }
761
762 #ifdef CONFIG_X86_32
763 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
764 {
765 /* AMD errata T13 (order #21922) */
766 if ((c->x86 == 6)) {
767 /* Duron Rev A0 */
768 if (c->x86_model == 3 && c->x86_mask == 0)
769 size = 64;
770 /* Tbird rev A1/A2 */
771 if (c->x86_model == 4 &&
772 (c->x86_mask == 0 || c->x86_mask == 1))
773 size = 256;
774 }
775 return size;
776 }
777 #endif
778
779 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
780 {
781 u32 ebx, eax, ecx, edx;
782 u16 mask = 0xfff;
783
784 if (c->x86 < 0xf)
785 return;
786
787 if (c->extended_cpuid_level < 0x80000006)
788 return;
789
790 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
791
792 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
793 tlb_lli_4k[ENTRIES] = ebx & mask;
794
795 /*
796 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
797 * characteristics from the CPUID function 0x80000005 instead.
798 */
799 if (c->x86 == 0xf) {
800 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
801 mask = 0xff;
802 }
803
804 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
805 if (!((eax >> 16) & mask))
806 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
807 else
808 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
809
810 /* a 4M entry uses two 2M entries */
811 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
812
813 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
814 if (!(eax & mask)) {
815 /* Erratum 658 */
816 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
817 tlb_lli_2m[ENTRIES] = 1024;
818 } else {
819 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
820 tlb_lli_2m[ENTRIES] = eax & 0xff;
821 }
822 } else
823 tlb_lli_2m[ENTRIES] = eax & mask;
824
825 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
826 }
827
828 static const struct cpu_dev amd_cpu_dev = {
829 .c_vendor = "AMD",
830 .c_ident = { "AuthenticAMD" },
831 #ifdef CONFIG_X86_32
832 .legacy_models = {
833 { .family = 4, .model_names =
834 {
835 [3] = "486 DX/2",
836 [7] = "486 DX/2-WB",
837 [8] = "486 DX/4",
838 [9] = "486 DX/4-WB",
839 [14] = "Am5x86-WT",
840 [15] = "Am5x86-WB"
841 }
842 },
843 },
844 .legacy_cache_size = amd_size_cache,
845 #endif
846 .c_early_init = early_init_amd,
847 .c_detect_tlb = cpu_detect_tlb_amd,
848 .c_bsp_init = bsp_init_amd,
849 .c_init = init_amd,
850 .c_x86_vendor = X86_VENDOR_AMD,
851 };
852
853 cpu_dev_register(amd_cpu_dev);
854
855 /*
856 * AMD errata checking
857 *
858 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
859 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
860 * have an OSVW id assigned, which it takes as first argument. Both take a
861 * variable number of family-specific model-stepping ranges created by
862 * AMD_MODEL_RANGE().
863 *
864 * Example:
865 *
866 * const int amd_erratum_319[] =
867 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
868 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
869 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
870 */
871
872 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
873 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
874 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
875 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
876 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
877 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
878 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
879
880 static const int amd_erratum_400[] =
881 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
882 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
883
884 static const int amd_erratum_383[] =
885 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
886
887
888 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
889 {
890 int osvw_id = *erratum++;
891 u32 range;
892 u32 ms;
893
894 if (osvw_id >= 0 && osvw_id < 65536 &&
895 cpu_has(cpu, X86_FEATURE_OSVW)) {
896 u64 osvw_len;
897
898 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
899 if (osvw_id < osvw_len) {
900 u64 osvw_bits;
901
902 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
903 osvw_bits);
904 return osvw_bits & (1ULL << (osvw_id & 0x3f));
905 }
906 }
907
908 /* OSVW unavailable or ID unknown, match family-model-stepping range */
909 ms = (cpu->x86_model << 4) | cpu->x86_mask;
910 while ((range = *erratum++))
911 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
912 (ms >= AMD_MODEL_RANGE_START(range)) &&
913 (ms <= AMD_MODEL_RANGE_END(range)))
914 return true;
915
916 return false;
917 }
918
919 void set_dr_addr_mask(unsigned long mask, int dr)
920 {
921 if (!boot_cpu_has(X86_FEATURE_BPEXT))
922 return;
923
924 switch (dr) {
925 case 0:
926 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
927 break;
928 case 1:
929 case 2:
930 case 3:
931 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
932 break;
933 default:
934 break;
935 }
936 }