]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/amd.c
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / amd.c
1 #include <linux/export.h>
2 #include <linux/bitops.h>
3 #include <linux/elf.h>
4 #include <linux/mm.h>
5
6 #include <linux/io.h>
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <asm/processor.h>
10 #include <asm/apic.h>
11 #include <asm/cpu.h>
12 #include <asm/smp.h>
13 #include <asm/pci-direct.h>
14 #include <asm/delay.h>
15
16 #ifdef CONFIG_X86_64
17 # include <asm/mmconfig.h>
18 # include <asm/cacheflush.h>
19 #endif
20
21 #include "cpu.h"
22
23 /*
24 * nodes_per_socket: Stores the number of nodes per socket.
25 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
26 * Node Identifiers[10:8]
27 */
28 static u32 nodes_per_socket = 1;
29
30 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
31 {
32 u32 gprs[8] = { 0 };
33 int err;
34
35 WARN_ONCE((boot_cpu_data.x86 != 0xf),
36 "%s should only be used on K8!\n", __func__);
37
38 gprs[1] = msr;
39 gprs[7] = 0x9c5a203a;
40
41 err = rdmsr_safe_regs(gprs);
42
43 *p = gprs[0] | ((u64)gprs[2] << 32);
44
45 return err;
46 }
47
48 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
49 {
50 u32 gprs[8] = { 0 };
51
52 WARN_ONCE((boot_cpu_data.x86 != 0xf),
53 "%s should only be used on K8!\n", __func__);
54
55 gprs[0] = (u32)val;
56 gprs[1] = msr;
57 gprs[2] = val >> 32;
58 gprs[7] = 0x9c5a203a;
59
60 return wrmsr_safe_regs(gprs);
61 }
62
63 /*
64 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
65 * misexecution of code under Linux. Owners of such processors should
66 * contact AMD for precise details and a CPU swap.
67 *
68 * See http://www.multimania.com/poulot/k6bug.html
69 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
70 * (Publication # 21266 Issue Date: August 1998)
71 *
72 * The following test is erm.. interesting. AMD neglected to up
73 * the chip setting when fixing the bug but they also tweaked some
74 * performance at the same time..
75 */
76
77 extern __visible void vide(void);
78 __asm__(".globl vide\n"
79 ".type vide, @function\n"
80 ".align 4\n"
81 "vide: ret\n");
82
83 static void init_amd_k5(struct cpuinfo_x86 *c)
84 {
85 #ifdef CONFIG_X86_32
86 /*
87 * General Systems BIOSen alias the cpu frequency registers
88 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
89 * drivers subsequently pokes it, and changes the CPU speed.
90 * Workaround : Remove the unneeded alias.
91 */
92 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
93 #define CBAR_ENB (0x80000000)
94 #define CBAR_KEY (0X000000CB)
95 if (c->x86_model == 9 || c->x86_model == 10) {
96 if (inl(CBAR) & CBAR_ENB)
97 outl(0 | CBAR_KEY, CBAR);
98 }
99 #endif
100 }
101
102 static void init_amd_k6(struct cpuinfo_x86 *c)
103 {
104 #ifdef CONFIG_X86_32
105 u32 l, h;
106 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
107
108 if (c->x86_model < 6) {
109 /* Based on AMD doc 20734R - June 2000 */
110 if (c->x86_model == 0) {
111 clear_cpu_cap(c, X86_FEATURE_APIC);
112 set_cpu_cap(c, X86_FEATURE_PGE);
113 }
114 return;
115 }
116
117 if (c->x86_model == 6 && c->x86_mask == 1) {
118 const int K6_BUG_LOOP = 1000000;
119 int n;
120 void (*f_vide)(void);
121 u64 d, d2;
122
123 pr_info("AMD K6 stepping B detected - ");
124
125 /*
126 * It looks like AMD fixed the 2.6.2 bug and improved indirect
127 * calls at the same time.
128 */
129
130 n = K6_BUG_LOOP;
131 f_vide = vide;
132 d = rdtsc();
133 while (n--)
134 f_vide();
135 d2 = rdtsc();
136 d = d2-d;
137
138 if (d > 20*K6_BUG_LOOP)
139 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
140 else
141 pr_cont("probably OK (after B9730xxxx).\n");
142 }
143
144 /* K6 with old style WHCR */
145 if (c->x86_model < 8 ||
146 (c->x86_model == 8 && c->x86_mask < 8)) {
147 /* We can only write allocate on the low 508Mb */
148 if (mbytes > 508)
149 mbytes = 508;
150
151 rdmsr(MSR_K6_WHCR, l, h);
152 if ((l&0x0000FFFF) == 0) {
153 unsigned long flags;
154 l = (1<<0)|((mbytes/4)<<1);
155 local_irq_save(flags);
156 wbinvd();
157 wrmsr(MSR_K6_WHCR, l, h);
158 local_irq_restore(flags);
159 pr_info("Enabling old style K6 write allocation for %d Mb\n",
160 mbytes);
161 }
162 return;
163 }
164
165 if ((c->x86_model == 8 && c->x86_mask > 7) ||
166 c->x86_model == 9 || c->x86_model == 13) {
167 /* The more serious chips .. */
168
169 if (mbytes > 4092)
170 mbytes = 4092;
171
172 rdmsr(MSR_K6_WHCR, l, h);
173 if ((l&0xFFFF0000) == 0) {
174 unsigned long flags;
175 l = ((mbytes>>2)<<22)|(1<<16);
176 local_irq_save(flags);
177 wbinvd();
178 wrmsr(MSR_K6_WHCR, l, h);
179 local_irq_restore(flags);
180 pr_info("Enabling new style K6 write allocation for %d Mb\n",
181 mbytes);
182 }
183
184 return;
185 }
186
187 if (c->x86_model == 10) {
188 /* AMD Geode LX is model 10 */
189 /* placeholder for any needed mods */
190 return;
191 }
192 #endif
193 }
194
195 static void init_amd_k7(struct cpuinfo_x86 *c)
196 {
197 #ifdef CONFIG_X86_32
198 u32 l, h;
199
200 /*
201 * Bit 15 of Athlon specific MSR 15, needs to be 0
202 * to enable SSE on Palomino/Morgan/Barton CPU's.
203 * If the BIOS didn't enable it already, enable it here.
204 */
205 if (c->x86_model >= 6 && c->x86_model <= 10) {
206 if (!cpu_has(c, X86_FEATURE_XMM)) {
207 pr_info("Enabling disabled K7/SSE Support.\n");
208 msr_clear_bit(MSR_K7_HWCR, 15);
209 set_cpu_cap(c, X86_FEATURE_XMM);
210 }
211 }
212
213 /*
214 * It's been determined by AMD that Athlons since model 8 stepping 1
215 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
216 * As per AMD technical note 27212 0.2
217 */
218 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
219 rdmsr(MSR_K7_CLK_CTL, l, h);
220 if ((l & 0xfff00000) != 0x20000000) {
221 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
222 l, ((l & 0x000fffff)|0x20000000));
223 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
224 }
225 }
226
227 set_cpu_cap(c, X86_FEATURE_K7);
228
229 /* calling is from identify_secondary_cpu() ? */
230 if (!c->cpu_index)
231 return;
232
233 /*
234 * Certain Athlons might work (for various values of 'work') in SMP
235 * but they are not certified as MP capable.
236 */
237 /* Athlon 660/661 is valid. */
238 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
239 (c->x86_mask == 1)))
240 return;
241
242 /* Duron 670 is valid */
243 if ((c->x86_model == 7) && (c->x86_mask == 0))
244 return;
245
246 /*
247 * Athlon 662, Duron 671, and Athlon >model 7 have capability
248 * bit. It's worth noting that the A5 stepping (662) of some
249 * Athlon XP's have the MP bit set.
250 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
251 * more.
252 */
253 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
254 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
255 (c->x86_model > 7))
256 if (cpu_has(c, X86_FEATURE_MP))
257 return;
258
259 /* If we get here, not a certified SMP capable AMD system. */
260
261 /*
262 * Don't taint if we are running SMP kernel on a single non-MP
263 * approved Athlon
264 */
265 WARN_ONCE(1, "WARNING: This combination of AMD"
266 " processors is not suitable for SMP.\n");
267 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
268 #endif
269 }
270
271 #ifdef CONFIG_NUMA
272 /*
273 * To workaround broken NUMA config. Read the comment in
274 * srat_detect_node().
275 */
276 static int nearby_node(int apicid)
277 {
278 int i, node;
279
280 for (i = apicid - 1; i >= 0; i--) {
281 node = __apicid_to_node[i];
282 if (node != NUMA_NO_NODE && node_online(node))
283 return node;
284 }
285 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
286 node = __apicid_to_node[i];
287 if (node != NUMA_NO_NODE && node_online(node))
288 return node;
289 }
290 return first_node(node_online_map); /* Shouldn't happen */
291 }
292 #endif
293
294 /*
295 * Fixup core topology information for
296 * (1) AMD multi-node processors
297 * Assumption: Number of cores in each internal node is the same.
298 * (2) AMD processors supporting compute units
299 */
300 #ifdef CONFIG_SMP
301 static void amd_get_topology(struct cpuinfo_x86 *c)
302 {
303 u32 cores_per_cu = 1;
304 u8 node_id;
305 int cpu = smp_processor_id();
306
307 /* get information required for multi-node processors */
308 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
309 u32 eax, ebx, ecx, edx;
310
311 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
312 nodes_per_socket = ((ecx >> 8) & 7) + 1;
313 node_id = ecx & 7;
314
315 /* get compute unit information */
316 smp_num_siblings = ((ebx >> 8) & 3) + 1;
317 c->compute_unit_id = ebx & 0xff;
318 cores_per_cu += ((ebx >> 8) & 3);
319 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
320 u64 value;
321
322 rdmsrl(MSR_FAM10H_NODE_ID, value);
323 nodes_per_socket = ((value >> 3) & 7) + 1;
324 node_id = value & 7;
325 } else
326 return;
327
328 /* fixup multi-node processor information */
329 if (nodes_per_socket > 1) {
330 u32 cores_per_node;
331 u32 cus_per_node;
332
333 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
334 cores_per_node = c->x86_max_cores / nodes_per_socket;
335 cus_per_node = cores_per_node / cores_per_cu;
336
337 /* store NodeID, use llc_shared_map to store sibling info */
338 per_cpu(cpu_llc_id, cpu) = node_id;
339
340 /* core id has to be in the [0 .. cores_per_node - 1] range */
341 c->cpu_core_id %= cores_per_node;
342 c->compute_unit_id %= cus_per_node;
343 }
344 }
345 #endif
346
347 /*
348 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
349 * Assumes number of cores is a power of two.
350 */
351 static void amd_detect_cmp(struct cpuinfo_x86 *c)
352 {
353 #ifdef CONFIG_SMP
354 unsigned bits;
355 int cpu = smp_processor_id();
356 unsigned int socket_id, core_complex_id;
357
358 bits = c->x86_coreid_bits;
359 /* Low order bits define the core id (index of core in socket) */
360 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
361 /* Convert the initial APIC ID into the socket ID */
362 c->phys_proc_id = c->initial_apicid >> bits;
363 /* use socket ID also for last level cache */
364 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
365 amd_get_topology(c);
366
367 /*
368 * Fix percpu cpu_llc_id here as LLC topology is different
369 * for Fam17h systems.
370 */
371 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
372 return;
373
374 socket_id = (c->apicid >> bits) - 1;
375 core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
376
377 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
378 #endif
379 }
380
381 u16 amd_get_nb_id(int cpu)
382 {
383 u16 id = 0;
384 #ifdef CONFIG_SMP
385 id = per_cpu(cpu_llc_id, cpu);
386 #endif
387 return id;
388 }
389 EXPORT_SYMBOL_GPL(amd_get_nb_id);
390
391 u32 amd_get_nodes_per_socket(void)
392 {
393 return nodes_per_socket;
394 }
395 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
396
397 static void srat_detect_node(struct cpuinfo_x86 *c)
398 {
399 #ifdef CONFIG_NUMA
400 int cpu = smp_processor_id();
401 int node;
402 unsigned apicid = c->apicid;
403
404 node = numa_cpu_node(cpu);
405 if (node == NUMA_NO_NODE)
406 node = per_cpu(cpu_llc_id, cpu);
407
408 /*
409 * On multi-fabric platform (e.g. Numascale NumaChip) a
410 * platform-specific handler needs to be called to fixup some
411 * IDs of the CPU.
412 */
413 if (x86_cpuinit.fixup_cpu_id)
414 x86_cpuinit.fixup_cpu_id(c, node);
415
416 if (!node_online(node)) {
417 /*
418 * Two possibilities here:
419 *
420 * - The CPU is missing memory and no node was created. In
421 * that case try picking one from a nearby CPU.
422 *
423 * - The APIC IDs differ from the HyperTransport node IDs
424 * which the K8 northbridge parsing fills in. Assume
425 * they are all increased by a constant offset, but in
426 * the same order as the HT nodeids. If that doesn't
427 * result in a usable node fall back to the path for the
428 * previous case.
429 *
430 * This workaround operates directly on the mapping between
431 * APIC ID and NUMA node, assuming certain relationship
432 * between APIC ID, HT node ID and NUMA topology. As going
433 * through CPU mapping may alter the outcome, directly
434 * access __apicid_to_node[].
435 */
436 int ht_nodeid = c->initial_apicid;
437
438 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
439 node = __apicid_to_node[ht_nodeid];
440 /* Pick a nearby node */
441 if (!node_online(node))
442 node = nearby_node(apicid);
443 }
444 numa_set_node(cpu, node);
445 #endif
446 }
447
448 static void early_init_amd_mc(struct cpuinfo_x86 *c)
449 {
450 #ifdef CONFIG_SMP
451 unsigned bits, ecx;
452
453 /* Multi core CPU? */
454 if (c->extended_cpuid_level < 0x80000008)
455 return;
456
457 ecx = cpuid_ecx(0x80000008);
458
459 c->x86_max_cores = (ecx & 0xff) + 1;
460
461 /* CPU telling us the core id bits shift? */
462 bits = (ecx >> 12) & 0xF;
463
464 /* Otherwise recompute */
465 if (bits == 0) {
466 while ((1 << bits) < c->x86_max_cores)
467 bits++;
468 }
469
470 c->x86_coreid_bits = bits;
471 #endif
472 }
473
474 static void bsp_init_amd(struct cpuinfo_x86 *c)
475 {
476
477 #ifdef CONFIG_X86_64
478 if (c->x86 >= 0xf) {
479 unsigned long long tseg;
480
481 /*
482 * Split up direct mapping around the TSEG SMM area.
483 * Don't do it for gbpages because there seems very little
484 * benefit in doing so.
485 */
486 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
487 unsigned long pfn = tseg >> PAGE_SHIFT;
488
489 pr_debug("tseg: %010llx\n", tseg);
490 if (pfn_range_is_mapped(pfn, pfn + 1))
491 set_memory_4k((unsigned long)__va(tseg), 1);
492 }
493 }
494 #endif
495
496 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
497
498 if (c->x86 > 0x10 ||
499 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
500 u64 val;
501
502 rdmsrl(MSR_K7_HWCR, val);
503 if (!(val & BIT(24)))
504 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
505 }
506 }
507
508 if (c->x86 == 0x15) {
509 unsigned long upperbit;
510 u32 cpuid, assoc;
511
512 cpuid = cpuid_edx(0x80000005);
513 assoc = cpuid >> 16 & 0xff;
514 upperbit = ((cpuid >> 24) << 10) / assoc;
515
516 va_align.mask = (upperbit - 1) & PAGE_MASK;
517 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
518
519 /* A random value per boot for bit slice [12:upper_bit) */
520 va_align.bits = get_random_int() & va_align.mask;
521 }
522
523 if (cpu_has(c, X86_FEATURE_MWAITX))
524 use_mwaitx_delay();
525 }
526
527 static void early_init_amd(struct cpuinfo_x86 *c)
528 {
529 early_init_amd_mc(c);
530
531 /*
532 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
533 * with P/T states and does not stop in deep C-states
534 */
535 if (c->x86_power & (1 << 8)) {
536 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
537 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
538 if (!check_tsc_unstable())
539 set_sched_clock_stable();
540 }
541
542 #ifdef CONFIG_X86_64
543 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
544 #else
545 /* Set MTRR capability flag if appropriate */
546 if (c->x86 == 5)
547 if (c->x86_model == 13 || c->x86_model == 9 ||
548 (c->x86_model == 8 && c->x86_mask >= 8))
549 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
550 #endif
551 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
552 /*
553 * ApicID can always be treated as an 8-bit value for AMD APIC versions
554 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
555 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
556 * after 16h.
557 */
558 if (cpu_has_apic && c->x86 > 0x16) {
559 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
560 } else if (cpu_has_apic && c->x86 >= 0xf) {
561 /* check CPU config space for extended APIC ID */
562 unsigned int val;
563 val = read_pci_config(0, 24, 0, 0x68);
564 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
565 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
566 }
567 #endif
568
569 /*
570 * This is only needed to tell the kernel whether to use VMCALL
571 * and VMMCALL. VMMCALL is never executed except under virt, so
572 * we can set it unconditionally.
573 */
574 set_cpu_cap(c, X86_FEATURE_VMMCALL);
575
576 /* F16h erratum 793, CVE-2013-6885 */
577 if (c->x86 == 0x16 && c->x86_model <= 0xf)
578 msr_set_bit(MSR_AMD64_LS_CFG, 15);
579 }
580
581 static const int amd_erratum_383[];
582 static const int amd_erratum_400[];
583 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
584
585 static void init_amd_k8(struct cpuinfo_x86 *c)
586 {
587 u32 level;
588 u64 value;
589
590 /* On C+ stepping K8 rep microcode works well for copy/memset */
591 level = cpuid_eax(1);
592 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
593 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
594
595 /*
596 * Some BIOSes incorrectly force this feature, but only K8 revision D
597 * (model = 0x14) and later actually support it.
598 * (AMD Erratum #110, docId: 25759).
599 */
600 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
601 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
602 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
603 value &= ~BIT_64(32);
604 wrmsrl_amd_safe(0xc001100d, value);
605 }
606 }
607
608 if (!c->x86_model_id[0])
609 strcpy(c->x86_model_id, "Hammer");
610
611 #ifdef CONFIG_SMP
612 /*
613 * Disable TLB flush filter by setting HWCR.FFDIS on K8
614 * bit 6 of msr C001_0015
615 *
616 * Errata 63 for SH-B3 steppings
617 * Errata 122 for all steppings (F+ have it disabled by default)
618 */
619 msr_set_bit(MSR_K7_HWCR, 6);
620 #endif
621 }
622
623 static void init_amd_gh(struct cpuinfo_x86 *c)
624 {
625 #ifdef CONFIG_X86_64
626 /* do this for boot cpu */
627 if (c == &boot_cpu_data)
628 check_enable_amd_mmconf_dmi();
629
630 fam10h_check_enable_mmcfg();
631 #endif
632
633 /*
634 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
635 * is always needed when GART is enabled, even in a kernel which has no
636 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
637 * If it doesn't, we do it here as suggested by the BKDG.
638 *
639 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
640 */
641 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
642
643 /*
644 * On family 10h BIOS may not have properly enabled WC+ support, causing
645 * it to be converted to CD memtype. This may result in performance
646 * degradation for certain nested-paging guests. Prevent this conversion
647 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
648 *
649 * NOTE: we want to use the _safe accessors so as not to #GP kvm
650 * guests on older kvm hosts.
651 */
652 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
653
654 if (cpu_has_amd_erratum(c, amd_erratum_383))
655 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
656 }
657
658 static void init_amd_bd(struct cpuinfo_x86 *c)
659 {
660 u64 value;
661
662 /* re-enable TopologyExtensions if switched off by BIOS */
663 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
664 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
665
666 if (msr_set_bit(0xc0011005, 54) > 0) {
667 rdmsrl(0xc0011005, value);
668 if (value & BIT_64(54)) {
669 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
670 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
671 }
672 }
673 }
674
675 /*
676 * The way access filter has a performance penalty on some workloads.
677 * Disable it on the affected CPUs.
678 */
679 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
680 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
681 value |= 0x1E;
682 wrmsrl_safe(MSR_F15H_IC_CFG, value);
683 }
684 }
685 }
686
687 static void init_amd(struct cpuinfo_x86 *c)
688 {
689 u32 dummy;
690
691 early_init_amd(c);
692
693 /*
694 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
695 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
696 */
697 clear_cpu_cap(c, 0*32+31);
698
699 if (c->x86 >= 0x10)
700 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
701
702 /* get apicid instead of initial apic id from cpuid */
703 c->apicid = hard_smp_processor_id();
704
705 /* K6s reports MCEs but don't actually have all the MSRs */
706 if (c->x86 < 6)
707 clear_cpu_cap(c, X86_FEATURE_MCE);
708
709 switch (c->x86) {
710 case 4: init_amd_k5(c); break;
711 case 5: init_amd_k6(c); break;
712 case 6: init_amd_k7(c); break;
713 case 0xf: init_amd_k8(c); break;
714 case 0x10: init_amd_gh(c); break;
715 case 0x15: init_amd_bd(c); break;
716 }
717
718 /* Enable workaround for FXSAVE leak */
719 if (c->x86 >= 6)
720 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
721
722 cpu_detect_cache_sizes(c);
723
724 /* Multi core CPU? */
725 if (c->extended_cpuid_level >= 0x80000008) {
726 amd_detect_cmp(c);
727 srat_detect_node(c);
728 }
729
730 #ifdef CONFIG_X86_32
731 detect_ht(c);
732 #endif
733
734 init_amd_cacheinfo(c);
735
736 if (c->x86 >= 0xf)
737 set_cpu_cap(c, X86_FEATURE_K8);
738
739 if (cpu_has_xmm2) {
740 /* MFENCE stops RDTSC speculation */
741 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
742 }
743
744 /*
745 * Family 0x12 and above processors have APIC timer
746 * running in deep C states.
747 */
748 if (c->x86 > 0x11)
749 set_cpu_cap(c, X86_FEATURE_ARAT);
750
751 if (cpu_has_amd_erratum(c, amd_erratum_400))
752 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
753
754 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
755
756 /* 3DNow or LM implies PREFETCHW */
757 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
758 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
759 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
760
761 /* AMD CPUs don't reset SS attributes on SYSRET */
762 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
763 }
764
765 #ifdef CONFIG_X86_32
766 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
767 {
768 /* AMD errata T13 (order #21922) */
769 if ((c->x86 == 6)) {
770 /* Duron Rev A0 */
771 if (c->x86_model == 3 && c->x86_mask == 0)
772 size = 64;
773 /* Tbird rev A1/A2 */
774 if (c->x86_model == 4 &&
775 (c->x86_mask == 0 || c->x86_mask == 1))
776 size = 256;
777 }
778 return size;
779 }
780 #endif
781
782 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
783 {
784 u32 ebx, eax, ecx, edx;
785 u16 mask = 0xfff;
786
787 if (c->x86 < 0xf)
788 return;
789
790 if (c->extended_cpuid_level < 0x80000006)
791 return;
792
793 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
794
795 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
796 tlb_lli_4k[ENTRIES] = ebx & mask;
797
798 /*
799 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
800 * characteristics from the CPUID function 0x80000005 instead.
801 */
802 if (c->x86 == 0xf) {
803 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
804 mask = 0xff;
805 }
806
807 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
808 if (!((eax >> 16) & mask))
809 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
810 else
811 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
812
813 /* a 4M entry uses two 2M entries */
814 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
815
816 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
817 if (!(eax & mask)) {
818 /* Erratum 658 */
819 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
820 tlb_lli_2m[ENTRIES] = 1024;
821 } else {
822 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
823 tlb_lli_2m[ENTRIES] = eax & 0xff;
824 }
825 } else
826 tlb_lli_2m[ENTRIES] = eax & mask;
827
828 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
829 }
830
831 static const struct cpu_dev amd_cpu_dev = {
832 .c_vendor = "AMD",
833 .c_ident = { "AuthenticAMD" },
834 #ifdef CONFIG_X86_32
835 .legacy_models = {
836 { .family = 4, .model_names =
837 {
838 [3] = "486 DX/2",
839 [7] = "486 DX/2-WB",
840 [8] = "486 DX/4",
841 [9] = "486 DX/4-WB",
842 [14] = "Am5x86-WT",
843 [15] = "Am5x86-WB"
844 }
845 },
846 },
847 .legacy_cache_size = amd_size_cache,
848 #endif
849 .c_early_init = early_init_amd,
850 .c_detect_tlb = cpu_detect_tlb_amd,
851 .c_bsp_init = bsp_init_amd,
852 .c_init = init_amd,
853 .c_x86_vendor = X86_VENDOR_AMD,
854 };
855
856 cpu_dev_register(amd_cpu_dev);
857
858 /*
859 * AMD errata checking
860 *
861 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
862 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
863 * have an OSVW id assigned, which it takes as first argument. Both take a
864 * variable number of family-specific model-stepping ranges created by
865 * AMD_MODEL_RANGE().
866 *
867 * Example:
868 *
869 * const int amd_erratum_319[] =
870 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
871 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
872 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
873 */
874
875 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
876 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
877 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
878 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
879 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
880 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
881 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
882
883 static const int amd_erratum_400[] =
884 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
885 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
886
887 static const int amd_erratum_383[] =
888 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
889
890
891 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
892 {
893 int osvw_id = *erratum++;
894 u32 range;
895 u32 ms;
896
897 if (osvw_id >= 0 && osvw_id < 65536 &&
898 cpu_has(cpu, X86_FEATURE_OSVW)) {
899 u64 osvw_len;
900
901 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
902 if (osvw_id < osvw_len) {
903 u64 osvw_bits;
904
905 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
906 osvw_bits);
907 return osvw_bits & (1ULL << (osvw_id & 0x3f));
908 }
909 }
910
911 /* OSVW unavailable or ID unknown, match family-model-stepping range */
912 ms = (cpu->x86_model << 4) | cpu->x86_mask;
913 while ((range = *erratum++))
914 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
915 (ms >= AMD_MODEL_RANGE_START(range)) &&
916 (ms <= AMD_MODEL_RANGE_END(range)))
917 return true;
918
919 return false;
920 }
921
922 void set_dr_addr_mask(unsigned long mask, int dr)
923 {
924 if (!boot_cpu_has(X86_FEATURE_BPEXT))
925 return;
926
927 switch (dr) {
928 case 0:
929 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
930 break;
931 case 1:
932 case 2:
933 case 3:
934 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
935 break;
936 default:
937 break;
938 }
939 }