1 #include <linux/export.h>
2 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/random.h>
10 #include <asm/processor.h>
13 #include <asm/spec-ctrl.h>
15 #include <asm/pci-direct.h>
16 #include <asm/delay.h>
19 # include <asm/mmconfig.h>
20 # include <asm/set_memory.h>
25 static const int amd_erratum_383
[];
26 static const int amd_erratum_400
[];
27 static bool cpu_has_amd_erratum(struct cpuinfo_x86
*cpu
, const int *erratum
);
30 * nodes_per_socket: Stores the number of nodes per socket.
31 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
32 * Node Identifiers[10:8]
34 static u32 nodes_per_socket
= 1;
36 static inline int rdmsrl_amd_safe(unsigned msr
, unsigned long long *p
)
41 WARN_ONCE((boot_cpu_data
.x86
!= 0xf),
42 "%s should only be used on K8!\n", __func__
);
47 err
= rdmsr_safe_regs(gprs
);
49 *p
= gprs
[0] | ((u64
)gprs
[2] << 32);
54 static inline int wrmsrl_amd_safe(unsigned msr
, unsigned long long val
)
58 WARN_ONCE((boot_cpu_data
.x86
!= 0xf),
59 "%s should only be used on K8!\n", __func__
);
66 return wrmsr_safe_regs(gprs
);
70 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
71 * misexecution of code under Linux. Owners of such processors should
72 * contact AMD for precise details and a CPU swap.
74 * See http://www.multimania.com/poulot/k6bug.html
75 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
76 * (Publication # 21266 Issue Date: August 1998)
78 * The following test is erm.. interesting. AMD neglected to up
79 * the chip setting when fixing the bug but they also tweaked some
80 * performance at the same time..
83 extern __visible
void vide(void);
84 __asm__(".globl vide\n"
85 ".type vide, @function\n"
89 static void init_amd_k5(struct cpuinfo_x86
*c
)
93 * General Systems BIOSen alias the cpu frequency registers
94 * of the Elan at 0x000df000. Unfortunately, one of the Linux
95 * drivers subsequently pokes it, and changes the CPU speed.
96 * Workaround : Remove the unneeded alias.
98 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
99 #define CBAR_ENB (0x80000000)
100 #define CBAR_KEY (0X000000CB)
101 if (c
->x86_model
== 9 || c
->x86_model
== 10) {
102 if (inl(CBAR
) & CBAR_ENB
)
103 outl(0 | CBAR_KEY
, CBAR
);
108 static void init_amd_k6(struct cpuinfo_x86
*c
)
112 int mbytes
= get_num_physpages() >> (20-PAGE_SHIFT
);
114 if (c
->x86_model
< 6) {
115 /* Based on AMD doc 20734R - June 2000 */
116 if (c
->x86_model
== 0) {
117 clear_cpu_cap(c
, X86_FEATURE_APIC
);
118 set_cpu_cap(c
, X86_FEATURE_PGE
);
123 if (c
->x86_model
== 6 && c
->x86_mask
== 1) {
124 const int K6_BUG_LOOP
= 1000000;
126 void (*f_vide
)(void);
129 pr_info("AMD K6 stepping B detected - ");
132 * It looks like AMD fixed the 2.6.2 bug and improved indirect
133 * calls at the same time.
138 OPTIMIZER_HIDE_VAR(f_vide
);
145 if (d
> 20*K6_BUG_LOOP
)
146 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
148 pr_cont("probably OK (after B9730xxxx).\n");
151 /* K6 with old style WHCR */
152 if (c
->x86_model
< 8 ||
153 (c
->x86_model
== 8 && c
->x86_mask
< 8)) {
154 /* We can only write allocate on the low 508Mb */
158 rdmsr(MSR_K6_WHCR
, l
, h
);
159 if ((l
&0x0000FFFF) == 0) {
161 l
= (1<<0)|((mbytes
/4)<<1);
162 local_irq_save(flags
);
164 wrmsr(MSR_K6_WHCR
, l
, h
);
165 local_irq_restore(flags
);
166 pr_info("Enabling old style K6 write allocation for %d Mb\n",
172 if ((c
->x86_model
== 8 && c
->x86_mask
> 7) ||
173 c
->x86_model
== 9 || c
->x86_model
== 13) {
174 /* The more serious chips .. */
179 rdmsr(MSR_K6_WHCR
, l
, h
);
180 if ((l
&0xFFFF0000) == 0) {
182 l
= ((mbytes
>>2)<<22)|(1<<16);
183 local_irq_save(flags
);
185 wrmsr(MSR_K6_WHCR
, l
, h
);
186 local_irq_restore(flags
);
187 pr_info("Enabling new style K6 write allocation for %d Mb\n",
194 if (c
->x86_model
== 10) {
195 /* AMD Geode LX is model 10 */
196 /* placeholder for any needed mods */
202 static void init_amd_k7(struct cpuinfo_x86
*c
)
208 * Bit 15 of Athlon specific MSR 15, needs to be 0
209 * to enable SSE on Palomino/Morgan/Barton CPU's.
210 * If the BIOS didn't enable it already, enable it here.
212 if (c
->x86_model
>= 6 && c
->x86_model
<= 10) {
213 if (!cpu_has(c
, X86_FEATURE_XMM
)) {
214 pr_info("Enabling disabled K7/SSE Support.\n");
215 msr_clear_bit(MSR_K7_HWCR
, 15);
216 set_cpu_cap(c
, X86_FEATURE_XMM
);
221 * It's been determined by AMD that Athlons since model 8 stepping 1
222 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
223 * As per AMD technical note 27212 0.2
225 if ((c
->x86_model
== 8 && c
->x86_mask
>= 1) || (c
->x86_model
> 8)) {
226 rdmsr(MSR_K7_CLK_CTL
, l
, h
);
227 if ((l
& 0xfff00000) != 0x20000000) {
228 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
229 l
, ((l
& 0x000fffff)|0x20000000));
230 wrmsr(MSR_K7_CLK_CTL
, (l
& 0x000fffff)|0x20000000, h
);
234 set_cpu_cap(c
, X86_FEATURE_K7
);
236 /* calling is from identify_secondary_cpu() ? */
241 * Certain Athlons might work (for various values of 'work') in SMP
242 * but they are not certified as MP capable.
244 /* Athlon 660/661 is valid. */
245 if ((c
->x86_model
== 6) && ((c
->x86_mask
== 0) ||
249 /* Duron 670 is valid */
250 if ((c
->x86_model
== 7) && (c
->x86_mask
== 0))
254 * Athlon 662, Duron 671, and Athlon >model 7 have capability
255 * bit. It's worth noting that the A5 stepping (662) of some
256 * Athlon XP's have the MP bit set.
257 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
260 if (((c
->x86_model
== 6) && (c
->x86_mask
>= 2)) ||
261 ((c
->x86_model
== 7) && (c
->x86_mask
>= 1)) ||
263 if (cpu_has(c
, X86_FEATURE_MP
))
266 /* If we get here, not a certified SMP capable AMD system. */
269 * Don't taint if we are running SMP kernel on a single non-MP
272 WARN_ONCE(1, "WARNING: This combination of AMD"
273 " processors is not suitable for SMP.\n");
274 add_taint(TAINT_CPU_OUT_OF_SPEC
, LOCKDEP_NOW_UNRELIABLE
);
280 * To workaround broken NUMA config. Read the comment in
281 * srat_detect_node().
283 static int nearby_node(int apicid
)
287 for (i
= apicid
- 1; i
>= 0; i
--) {
288 node
= __apicid_to_node
[i
];
289 if (node
!= NUMA_NO_NODE
&& node_online(node
))
292 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
293 node
= __apicid_to_node
[i
];
294 if (node
!= NUMA_NO_NODE
&& node_online(node
))
297 return first_node(node_online_map
); /* Shouldn't happen */
302 * Fixup core topology information for
303 * (1) AMD multi-node processors
304 * Assumption: Number of cores in each internal node is the same.
305 * (2) AMD processors supporting compute units
308 static void amd_get_topology(struct cpuinfo_x86
*c
)
311 int cpu
= smp_processor_id();
313 /* get information required for multi-node processors */
314 if (boot_cpu_has(X86_FEATURE_TOPOEXT
)) {
315 u32 eax
, ebx
, ecx
, edx
;
317 cpuid(0x8000001e, &eax
, &ebx
, &ecx
, &edx
);
319 node_id
= ecx
& 0xff;
320 smp_num_siblings
= ((ebx
>> 8) & 0xff) + 1;
323 c
->cu_id
= ebx
& 0xff;
325 if (c
->x86
>= 0x17) {
326 c
->cpu_core_id
= ebx
& 0xff;
328 if (smp_num_siblings
> 1)
329 c
->x86_max_cores
/= smp_num_siblings
;
333 * We may have multiple LLCs if L3 caches exist, so check if we
334 * have an L3 cache by looking at the L3 cache CPUID leaf.
336 if (cpuid_edx(0x80000006)) {
337 if (c
->x86
== 0x17) {
339 * LLC is at the core complex level.
340 * Core complex id is ApicId[3].
342 per_cpu(cpu_llc_id
, cpu
) = c
->apicid
>> 3;
344 /* LLC is at the node level. */
345 per_cpu(cpu_llc_id
, cpu
) = node_id
;
348 } else if (cpu_has(c
, X86_FEATURE_NODEID_MSR
)) {
351 rdmsrl(MSR_FAM10H_NODE_ID
, value
);
354 per_cpu(cpu_llc_id
, cpu
) = node_id
;
358 /* fixup multi-node processor information */
359 if (nodes_per_socket
> 1) {
362 set_cpu_cap(c
, X86_FEATURE_AMD_DCM
);
363 cus_per_node
= c
->x86_max_cores
/ nodes_per_socket
;
365 /* core id has to be in the [0 .. cores_per_node - 1] range */
366 c
->cpu_core_id
%= cus_per_node
;
372 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
373 * Assumes number of cores is a power of two.
375 static void amd_detect_cmp(struct cpuinfo_x86
*c
)
379 int cpu
= smp_processor_id();
381 bits
= c
->x86_coreid_bits
;
382 /* Low order bits define the core id (index of core in socket) */
383 c
->cpu_core_id
= c
->initial_apicid
& ((1 << bits
)-1);
384 /* Convert the initial APIC ID into the socket ID */
385 c
->phys_proc_id
= c
->initial_apicid
>> bits
;
386 /* use socket ID also for last level cache */
387 per_cpu(cpu_llc_id
, cpu
) = c
->phys_proc_id
;
392 u16
amd_get_nb_id(int cpu
)
396 id
= per_cpu(cpu_llc_id
, cpu
);
400 EXPORT_SYMBOL_GPL(amd_get_nb_id
);
402 u32
amd_get_nodes_per_socket(void)
404 return nodes_per_socket
;
406 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket
);
408 static void srat_detect_node(struct cpuinfo_x86
*c
)
411 int cpu
= smp_processor_id();
413 unsigned apicid
= c
->apicid
;
415 node
= numa_cpu_node(cpu
);
416 if (node
== NUMA_NO_NODE
)
417 node
= per_cpu(cpu_llc_id
, cpu
);
420 * On multi-fabric platform (e.g. Numascale NumaChip) a
421 * platform-specific handler needs to be called to fixup some
424 if (x86_cpuinit
.fixup_cpu_id
)
425 x86_cpuinit
.fixup_cpu_id(c
, node
);
427 if (!node_online(node
)) {
429 * Two possibilities here:
431 * - The CPU is missing memory and no node was created. In
432 * that case try picking one from a nearby CPU.
434 * - The APIC IDs differ from the HyperTransport node IDs
435 * which the K8 northbridge parsing fills in. Assume
436 * they are all increased by a constant offset, but in
437 * the same order as the HT nodeids. If that doesn't
438 * result in a usable node fall back to the path for the
441 * This workaround operates directly on the mapping between
442 * APIC ID and NUMA node, assuming certain relationship
443 * between APIC ID, HT node ID and NUMA topology. As going
444 * through CPU mapping may alter the outcome, directly
445 * access __apicid_to_node[].
447 int ht_nodeid
= c
->initial_apicid
;
449 if (__apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
450 node
= __apicid_to_node
[ht_nodeid
];
451 /* Pick a nearby node */
452 if (!node_online(node
))
453 node
= nearby_node(apicid
);
455 numa_set_node(cpu
, node
);
459 static void early_init_amd_mc(struct cpuinfo_x86
*c
)
464 /* Multi core CPU? */
465 if (c
->extended_cpuid_level
< 0x80000008)
468 ecx
= cpuid_ecx(0x80000008);
470 c
->x86_max_cores
= (ecx
& 0xff) + 1;
472 /* CPU telling us the core id bits shift? */
473 bits
= (ecx
>> 12) & 0xF;
475 /* Otherwise recompute */
477 while ((1 << bits
) < c
->x86_max_cores
)
481 c
->x86_coreid_bits
= bits
;
485 static void bsp_init_amd(struct cpuinfo_x86
*c
)
490 unsigned long long tseg
;
493 * Split up direct mapping around the TSEG SMM area.
494 * Don't do it for gbpages because there seems very little
495 * benefit in doing so.
497 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR
, &tseg
)) {
498 unsigned long pfn
= tseg
>> PAGE_SHIFT
;
500 pr_debug("tseg: %010llx\n", tseg
);
501 if (pfn_range_is_mapped(pfn
, pfn
+ 1))
502 set_memory_4k((unsigned long)__va(tseg
), 1);
507 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
)) {
510 (c
->x86
== 0x10 && c
->x86_model
>= 0x2)) {
513 rdmsrl(MSR_K7_HWCR
, val
);
514 if (!(val
& BIT(24)))
515 pr_warn(FW_BUG
"TSC doesn't count with P0 frequency!\n");
519 if (c
->x86
== 0x15) {
520 unsigned long upperbit
;
523 cpuid
= cpuid_edx(0x80000005);
524 assoc
= cpuid
>> 16 & 0xff;
525 upperbit
= ((cpuid
>> 24) << 10) / assoc
;
527 va_align
.mask
= (upperbit
- 1) & PAGE_MASK
;
528 va_align
.flags
= ALIGN_VA_32
| ALIGN_VA_64
;
530 /* A random value per boot for bit slice [12:upper_bit) */
531 va_align
.bits
= get_random_int() & va_align
.mask
;
534 if (cpu_has(c
, X86_FEATURE_MWAITX
))
537 if (boot_cpu_has(X86_FEATURE_TOPOEXT
)) {
540 ecx
= cpuid_ecx(0x8000001e);
541 nodes_per_socket
= ((ecx
>> 8) & 7) + 1;
542 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR
)) {
545 rdmsrl(MSR_FAM10H_NODE_ID
, value
);
546 nodes_per_socket
= ((value
>> 3) & 7) + 1;
549 if (c
->x86
>= 0x15 && c
->x86
<= 0x17) {
553 case 0x15: bit
= 54; break;
554 case 0x16: bit
= 33; break;
555 case 0x17: bit
= 10; break;
559 * Try to cache the base value so further operations can
560 * avoid RMW. If that faults, do not enable SSBD.
562 if (!rdmsrl_safe(MSR_AMD64_LS_CFG
, &x86_amd_ls_cfg_base
)) {
563 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD
);
564 setup_force_cpu_cap(X86_FEATURE_SSBD
);
565 x86_amd_ls_cfg_ssbd_mask
= 1ULL << bit
;
570 static void early_init_amd(struct cpuinfo_x86
*c
)
572 early_init_amd_mc(c
);
575 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
576 * with P/T states and does not stop in deep C-states
578 if (c
->x86_power
& (1 << 8)) {
579 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
580 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
583 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
584 if (c
->x86_power
& BIT(12))
585 set_cpu_cap(c
, X86_FEATURE_ACC_POWER
);
588 set_cpu_cap(c
, X86_FEATURE_SYSCALL32
);
590 /* Set MTRR capability flag if appropriate */
592 if (c
->x86_model
== 13 || c
->x86_model
== 9 ||
593 (c
->x86_model
== 8 && c
->x86_mask
>= 8))
594 set_cpu_cap(c
, X86_FEATURE_K6_MTRR
);
596 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
598 * ApicID can always be treated as an 8-bit value for AMD APIC versions
599 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
600 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
603 if (boot_cpu_has(X86_FEATURE_APIC
)) {
605 set_cpu_cap(c
, X86_FEATURE_EXTD_APICID
);
606 else if (c
->x86
>= 0xf) {
607 /* check CPU config space for extended APIC ID */
610 val
= read_pci_config(0, 24, 0, 0x68);
611 if ((val
>> 17 & 0x3) == 0x3)
612 set_cpu_cap(c
, X86_FEATURE_EXTD_APICID
);
618 * This is only needed to tell the kernel whether to use VMCALL
619 * and VMMCALL. VMMCALL is never executed except under virt, so
620 * we can set it unconditionally.
622 set_cpu_cap(c
, X86_FEATURE_VMMCALL
);
624 /* F16h erratum 793, CVE-2013-6885 */
625 if (c
->x86
== 0x16 && c
->x86_model
<= 0xf)
626 msr_set_bit(MSR_AMD64_LS_CFG
, 15);
629 * Check whether the machine is affected by erratum 400. This is
630 * used to select the proper idle routine and to enable the check
631 * whether the machine is affected in arch_post_acpi_init(), which
632 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
634 if (cpu_has_amd_erratum(c
, amd_erratum_400
))
635 set_cpu_bug(c
, X86_BUG_AMD_E400
);
638 static void init_amd_k8(struct cpuinfo_x86
*c
)
643 /* On C+ stepping K8 rep microcode works well for copy/memset */
644 level
= cpuid_eax(1);
645 if ((level
>= 0x0f48 && level
< 0x0f50) || level
>= 0x0f58)
646 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
649 * Some BIOSes incorrectly force this feature, but only K8 revision D
650 * (model = 0x14) and later actually support it.
651 * (AMD Erratum #110, docId: 25759).
653 if (c
->x86_model
< 0x14 && cpu_has(c
, X86_FEATURE_LAHF_LM
)) {
654 clear_cpu_cap(c
, X86_FEATURE_LAHF_LM
);
655 if (!rdmsrl_amd_safe(0xc001100d, &value
)) {
656 value
&= ~BIT_64(32);
657 wrmsrl_amd_safe(0xc001100d, value
);
661 if (!c
->x86_model_id
[0])
662 strcpy(c
->x86_model_id
, "Hammer");
666 * Disable TLB flush filter by setting HWCR.FFDIS on K8
667 * bit 6 of msr C001_0015
669 * Errata 63 for SH-B3 steppings
670 * Errata 122 for all steppings (F+ have it disabled by default)
672 msr_set_bit(MSR_K7_HWCR
, 6);
674 set_cpu_bug(c
, X86_BUG_SWAPGS_FENCE
);
677 static void init_amd_gh(struct cpuinfo_x86
*c
)
680 /* do this for boot cpu */
681 if (c
== &boot_cpu_data
)
682 check_enable_amd_mmconf_dmi();
684 fam10h_check_enable_mmcfg();
688 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
689 * is always needed when GART is enabled, even in a kernel which has no
690 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
691 * If it doesn't, we do it here as suggested by the BKDG.
693 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
695 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
698 * On family 10h BIOS may not have properly enabled WC+ support, causing
699 * it to be converted to CD memtype. This may result in performance
700 * degradation for certain nested-paging guests. Prevent this conversion
701 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
703 * NOTE: we want to use the _safe accessors so as not to #GP kvm
704 * guests on older kvm hosts.
706 msr_clear_bit(MSR_AMD64_BU_CFG2
, 24);
708 if (cpu_has_amd_erratum(c
, amd_erratum_383
))
709 set_cpu_bug(c
, X86_BUG_AMD_TLB_MMATCH
);
712 #define MSR_AMD64_DE_CFG 0xC0011029
714 static void init_amd_ln(struct cpuinfo_x86
*c
)
717 * Apply erratum 665 fix unconditionally so machines without a BIOS
720 msr_set_bit(MSR_AMD64_DE_CFG
, 31);
723 static void init_amd_bd(struct cpuinfo_x86
*c
)
727 /* re-enable TopologyExtensions if switched off by BIOS */
728 if ((c
->x86_model
>= 0x10) && (c
->x86_model
<= 0x6f) &&
729 !cpu_has(c
, X86_FEATURE_TOPOEXT
)) {
731 if (msr_set_bit(0xc0011005, 54) > 0) {
732 rdmsrl(0xc0011005, value
);
733 if (value
& BIT_64(54)) {
734 set_cpu_cap(c
, X86_FEATURE_TOPOEXT
);
735 pr_info_once(FW_INFO
"CPU: Re-enabling disabled Topology Extensions Support.\n");
741 * The way access filter has a performance penalty on some workloads.
742 * Disable it on the affected CPUs.
744 if ((c
->x86_model
>= 0x02) && (c
->x86_model
< 0x20)) {
745 if (!rdmsrl_safe(MSR_F15H_IC_CFG
, &value
) && !(value
& 0x1E)) {
747 wrmsrl_safe(MSR_F15H_IC_CFG
, value
);
752 static void init_amd_zn(struct cpuinfo_x86
*c
)
755 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
756 * all up to and including B1.
758 if (c
->x86_model
<= 1 && c
->x86_mask
<= 1)
759 set_cpu_cap(c
, X86_FEATURE_CPB
);
762 static void init_amd(struct cpuinfo_x86
*c
)
769 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
770 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
772 clear_cpu_cap(c
, 0*32+31);
775 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
777 /* get apicid instead of initial apic id from cpuid */
778 c
->apicid
= hard_smp_processor_id();
780 /* K6s reports MCEs but don't actually have all the MSRs */
782 clear_cpu_cap(c
, X86_FEATURE_MCE
);
785 case 4: init_amd_k5(c
); break;
786 case 5: init_amd_k6(c
); break;
787 case 6: init_amd_k7(c
); break;
788 case 0xf: init_amd_k8(c
); break;
789 case 0x10: init_amd_gh(c
); break;
790 case 0x12: init_amd_ln(c
); break;
791 case 0x15: init_amd_bd(c
); break;
792 case 0x17: init_amd_zn(c
); break;
796 * Enable workaround for FXSAVE leak on CPUs
797 * without a XSaveErPtr feature
799 if ((c
->x86
>= 6) && (!cpu_has(c
, X86_FEATURE_XSAVEERPTR
)))
800 set_cpu_bug(c
, X86_BUG_FXSAVE_LEAK
);
802 cpu_detect_cache_sizes(c
);
804 /* Multi core CPU? */
805 if (c
->extended_cpuid_level
>= 0x80000008) {
814 init_amd_cacheinfo(c
);
817 set_cpu_cap(c
, X86_FEATURE_K8
);
819 if (cpu_has(c
, X86_FEATURE_XMM2
)) {
820 unsigned long long val
;
824 * A serializing LFENCE has less overhead than MFENCE, so
825 * use it for execution serialization. On families which
826 * don't have that MSR, LFENCE is already serializing.
827 * msr_set_bit() uses the safe accessors, too, even if the MSR
830 msr_set_bit(MSR_F10H_DECFG
,
831 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT
);
834 * Verify that the MSR write was successful (could be running
835 * under a hypervisor) and only then assume that LFENCE is
838 ret
= rdmsrl_safe(MSR_F10H_DECFG
, &val
);
839 if (!ret
&& (val
& MSR_F10H_DECFG_LFENCE_SERIALIZE
)) {
840 /* A serializing LFENCE stops RDTSC speculation */
841 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
843 /* MFENCE stops RDTSC speculation */
844 set_cpu_cap(c
, X86_FEATURE_MFENCE_RDTSC
);
849 * Family 0x12 and above processors have APIC timer
850 * running in deep C states.
853 set_cpu_cap(c
, X86_FEATURE_ARAT
);
855 rdmsr_safe(MSR_AMD64_PATCH_LEVEL
, &c
->microcode
, &dummy
);
857 /* 3DNow or LM implies PREFETCHW */
858 if (!cpu_has(c
, X86_FEATURE_3DNOWPREFETCH
))
859 if (cpu_has(c
, X86_FEATURE_3DNOW
) || cpu_has(c
, X86_FEATURE_LM
))
860 set_cpu_cap(c
, X86_FEATURE_3DNOWPREFETCH
);
862 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
863 if (!cpu_has(c
, X86_FEATURE_XENPV
))
864 set_cpu_bug(c
, X86_BUG_SYSRET_SS_ATTRS
);
866 /* AMD speculative control support */
867 if (cpu_has(c
, X86_FEATURE_SPEC_CTRL
)) {
868 pr_info_once("FEATURE SPEC_CTRL Present\n");
869 set_ibrs_supported();
870 set_ibpb_supported();
872 sysctl_ibrs_enabled
= 1;
874 sysctl_ibpb_enabled
= 1;
875 set_cpu_cap(c
, X86_FEATURE_MSR_SPEC_CTRL
);
876 } else if (cpu_has(c
, X86_FEATURE_AMD_IBPB
)) {
877 pr_info_once("FEATURE SPEC_CTRL Not Present\n");
878 pr_info_once("FEATURE IBPB Present\n");
879 set_ibpb_supported();
881 sysctl_ibpb_enabled
= 1;
883 pr_info_once("FEATURE SPEC_CTRL Not Present\n");
884 pr_info_once("FEATURE IBPB Not Present\n");
886 * On AMD processors that do not support the speculative
887 * control features, IBPB type support can be achieved by
888 * disabling indirect branch predictor support.
890 if (!ibpb_disabled
) {
897 pr_info_once("Disabling indirect branch predictor support\n");
898 rdmsrl(MSR_F15H_IC_CFG
, val
);
899 val
|= MSR_F15H_IC_CFG_DIS_IND
;
900 wrmsrl(MSR_F15H_IC_CFG
, val
);
906 if (cpu_has(c
, X86_FEATURE_SPEC_CTRL_SSBD
))
907 set_cpu_cap(c
, X86_FEATURE_SSBD
);
911 static unsigned int amd_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
913 /* AMD errata T13 (order #21922) */
916 if (c
->x86_model
== 3 && c
->x86_mask
== 0)
918 /* Tbird rev A1/A2 */
919 if (c
->x86_model
== 4 &&
920 (c
->x86_mask
== 0 || c
->x86_mask
== 1))
927 static void cpu_detect_tlb_amd(struct cpuinfo_x86
*c
)
929 u32 ebx
, eax
, ecx
, edx
;
935 if (c
->extended_cpuid_level
< 0x80000006)
938 cpuid(0x80000006, &eax
, &ebx
, &ecx
, &edx
);
940 tlb_lld_4k
[ENTRIES
] = (ebx
>> 16) & mask
;
941 tlb_lli_4k
[ENTRIES
] = ebx
& mask
;
944 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
945 * characteristics from the CPUID function 0x80000005 instead.
948 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
952 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
953 if (!((eax
>> 16) & mask
))
954 tlb_lld_2m
[ENTRIES
] = (cpuid_eax(0x80000005) >> 16) & 0xff;
956 tlb_lld_2m
[ENTRIES
] = (eax
>> 16) & mask
;
958 /* a 4M entry uses two 2M entries */
959 tlb_lld_4m
[ENTRIES
] = tlb_lld_2m
[ENTRIES
] >> 1;
961 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
964 if (c
->x86
== 0x15 && c
->x86_model
<= 0x1f) {
965 tlb_lli_2m
[ENTRIES
] = 1024;
967 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
968 tlb_lli_2m
[ENTRIES
] = eax
& 0xff;
971 tlb_lli_2m
[ENTRIES
] = eax
& mask
;
973 tlb_lli_4m
[ENTRIES
] = tlb_lli_2m
[ENTRIES
] >> 1;
976 static const struct cpu_dev amd_cpu_dev
= {
978 .c_ident
= { "AuthenticAMD" },
981 { .family
= 4, .model_names
=
992 .legacy_cache_size
= amd_size_cache
,
994 .c_early_init
= early_init_amd
,
995 .c_detect_tlb
= cpu_detect_tlb_amd
,
996 .c_bsp_init
= bsp_init_amd
,
998 .c_x86_vendor
= X86_VENDOR_AMD
,
1001 cpu_dev_register(amd_cpu_dev
);
1004 * AMD errata checking
1006 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1007 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1008 * have an OSVW id assigned, which it takes as first argument. Both take a
1009 * variable number of family-specific model-stepping ranges created by
1010 * AMD_MODEL_RANGE().
1014 * const int amd_erratum_319[] =
1015 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1016 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1017 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1020 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1021 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1022 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1023 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1024 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1025 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1026 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1028 static const int amd_erratum_400
[] =
1029 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1030 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1032 static const int amd_erratum_383
[] =
1033 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1036 static bool cpu_has_amd_erratum(struct cpuinfo_x86
*cpu
, const int *erratum
)
1038 int osvw_id
= *erratum
++;
1042 if (osvw_id
>= 0 && osvw_id
< 65536 &&
1043 cpu_has(cpu
, X86_FEATURE_OSVW
)) {
1046 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH
, osvw_len
);
1047 if (osvw_id
< osvw_len
) {
1050 rdmsrl(MSR_AMD64_OSVW_STATUS
+ (osvw_id
>> 6),
1052 return osvw_bits
& (1ULL << (osvw_id
& 0x3f));
1056 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1057 ms
= (cpu
->x86_model
<< 4) | cpu
->x86_mask
;
1058 while ((range
= *erratum
++))
1059 if ((cpu
->x86
== AMD_MODEL_RANGE_FAMILY(range
)) &&
1060 (ms
>= AMD_MODEL_RANGE_START(range
)) &&
1061 (ms
<= AMD_MODEL_RANGE_END(range
)))
1067 void set_dr_addr_mask(unsigned long mask
, int dr
)
1069 if (!boot_cpu_has(X86_FEATURE_BPEXT
))
1074 wrmsr(MSR_F16H_DR0_ADDR_MASK
, mask
, 0);
1079 wrmsr(MSR_F16H_DR1_ADDR_MASK
- 1 + dr
, mask
, 0);