]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/amd.c
1 #include <linux/export.h>
2 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <asm/processor.h>
12 #include <asm/pci-direct.h>
15 # include <asm/mmconfig.h>
16 # include <asm/cacheflush.h>
21 static inline int rdmsrl_amd_safe(unsigned msr
, unsigned long long *p
)
26 WARN_ONCE((boot_cpu_data
.x86
!= 0xf),
27 "%s should only be used on K8!\n", __func__
);
32 err
= rdmsr_safe_regs(gprs
);
34 *p
= gprs
[0] | ((u64
)gprs
[2] << 32);
39 static inline int wrmsrl_amd_safe(unsigned msr
, unsigned long long val
)
43 WARN_ONCE((boot_cpu_data
.x86
!= 0xf),
44 "%s should only be used on K8!\n", __func__
);
51 return wrmsr_safe_regs(gprs
);
55 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
56 * misexecution of code under Linux. Owners of such processors should
57 * contact AMD for precise details and a CPU swap.
59 * See http://www.multimania.com/poulot/k6bug.html
60 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
61 * (Publication # 21266 Issue Date: August 1998)
63 * The following test is erm.. interesting. AMD neglected to up
64 * the chip setting when fixing the bug but they also tweaked some
65 * performance at the same time..
68 extern __visible
void vide(void);
69 __asm__(".globl vide\n\t.align 4\nvide: ret");
71 static void init_amd_k5(struct cpuinfo_x86
*c
)
75 * General Systems BIOSen alias the cpu frequency registers
76 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
77 * drivers subsequently pokes it, and changes the CPU speed.
78 * Workaround : Remove the unneeded alias.
80 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
81 #define CBAR_ENB (0x80000000)
82 #define CBAR_KEY (0X000000CB)
83 if (c
->x86_model
== 9 || c
->x86_model
== 10) {
84 if (inl(CBAR
) & CBAR_ENB
)
85 outl(0 | CBAR_KEY
, CBAR
);
90 static void init_amd_k6(struct cpuinfo_x86
*c
)
94 int mbytes
= get_num_physpages() >> (20-PAGE_SHIFT
);
96 if (c
->x86_model
< 6) {
97 /* Based on AMD doc 20734R - June 2000 */
98 if (c
->x86_model
== 0) {
99 clear_cpu_cap(c
, X86_FEATURE_APIC
);
100 set_cpu_cap(c
, X86_FEATURE_PGE
);
105 if (c
->x86_model
== 6 && c
->x86_mask
== 1) {
106 const int K6_BUG_LOOP
= 1000000;
108 void (*f_vide
)(void);
111 printk(KERN_INFO
"AMD K6 stepping B detected - ");
114 * It looks like AMD fixed the 2.6.2 bug and improved indirect
115 * calls at the same time.
126 if (d
> 20*K6_BUG_LOOP
)
128 "system stability may be impaired when more than 32 MB are used.\n");
130 printk(KERN_CONT
"probably OK (after B9730xxxx).\n");
133 /* K6 with old style WHCR */
134 if (c
->x86_model
< 8 ||
135 (c
->x86_model
== 8 && c
->x86_mask
< 8)) {
136 /* We can only write allocate on the low 508Mb */
140 rdmsr(MSR_K6_WHCR
, l
, h
);
141 if ((l
&0x0000FFFF) == 0) {
143 l
= (1<<0)|((mbytes
/4)<<1);
144 local_irq_save(flags
);
146 wrmsr(MSR_K6_WHCR
, l
, h
);
147 local_irq_restore(flags
);
148 printk(KERN_INFO
"Enabling old style K6 write allocation for %d Mb\n",
154 if ((c
->x86_model
== 8 && c
->x86_mask
> 7) ||
155 c
->x86_model
== 9 || c
->x86_model
== 13) {
156 /* The more serious chips .. */
161 rdmsr(MSR_K6_WHCR
, l
, h
);
162 if ((l
&0xFFFF0000) == 0) {
164 l
= ((mbytes
>>2)<<22)|(1<<16);
165 local_irq_save(flags
);
167 wrmsr(MSR_K6_WHCR
, l
, h
);
168 local_irq_restore(flags
);
169 printk(KERN_INFO
"Enabling new style K6 write allocation for %d Mb\n",
176 if (c
->x86_model
== 10) {
177 /* AMD Geode LX is model 10 */
178 /* placeholder for any needed mods */
184 static void init_amd_k7(struct cpuinfo_x86
*c
)
190 * Bit 15 of Athlon specific MSR 15, needs to be 0
191 * to enable SSE on Palomino/Morgan/Barton CPU's.
192 * If the BIOS didn't enable it already, enable it here.
194 if (c
->x86_model
>= 6 && c
->x86_model
<= 10) {
195 if (!cpu_has(c
, X86_FEATURE_XMM
)) {
196 printk(KERN_INFO
"Enabling disabled K7/SSE Support.\n");
197 msr_clear_bit(MSR_K7_HWCR
, 15);
198 set_cpu_cap(c
, X86_FEATURE_XMM
);
203 * It's been determined by AMD that Athlons since model 8 stepping 1
204 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
205 * As per AMD technical note 27212 0.2
207 if ((c
->x86_model
== 8 && c
->x86_mask
>= 1) || (c
->x86_model
> 8)) {
208 rdmsr(MSR_K7_CLK_CTL
, l
, h
);
209 if ((l
& 0xfff00000) != 0x20000000) {
211 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
212 l
, ((l
& 0x000fffff)|0x20000000));
213 wrmsr(MSR_K7_CLK_CTL
, (l
& 0x000fffff)|0x20000000, h
);
217 set_cpu_cap(c
, X86_FEATURE_K7
);
219 /* calling is from identify_secondary_cpu() ? */
224 * Certain Athlons might work (for various values of 'work') in SMP
225 * but they are not certified as MP capable.
227 /* Athlon 660/661 is valid. */
228 if ((c
->x86_model
== 6) && ((c
->x86_mask
== 0) ||
232 /* Duron 670 is valid */
233 if ((c
->x86_model
== 7) && (c
->x86_mask
== 0))
237 * Athlon 662, Duron 671, and Athlon >model 7 have capability
238 * bit. It's worth noting that the A5 stepping (662) of some
239 * Athlon XP's have the MP bit set.
240 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
243 if (((c
->x86_model
== 6) && (c
->x86_mask
>= 2)) ||
244 ((c
->x86_model
== 7) && (c
->x86_mask
>= 1)) ||
246 if (cpu_has(c
, X86_FEATURE_MP
))
249 /* If we get here, not a certified SMP capable AMD system. */
252 * Don't taint if we are running SMP kernel on a single non-MP
255 WARN_ONCE(1, "WARNING: This combination of AMD"
256 " processors is not suitable for SMP.\n");
257 add_taint(TAINT_CPU_OUT_OF_SPEC
, LOCKDEP_NOW_UNRELIABLE
);
263 * To workaround broken NUMA config. Read the comment in
264 * srat_detect_node().
266 static int nearby_node(int apicid
)
270 for (i
= apicid
- 1; i
>= 0; i
--) {
271 node
= __apicid_to_node
[i
];
272 if (node
!= NUMA_NO_NODE
&& node_online(node
))
275 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
276 node
= __apicid_to_node
[i
];
277 if (node
!= NUMA_NO_NODE
&& node_online(node
))
280 return first_node(node_online_map
); /* Shouldn't happen */
285 * Fixup core topology information for
286 * (1) AMD multi-node processors
287 * Assumption: Number of cores in each internal node is the same.
288 * (2) AMD processors supporting compute units
291 static void amd_get_topology(struct cpuinfo_x86
*c
)
293 u32 nodes
, cores_per_cu
= 1;
295 int cpu
= smp_processor_id();
297 /* get information required for multi-node processors */
298 if (cpu_has_topoext
) {
299 u32 eax
, ebx
, ecx
, edx
;
301 cpuid(0x8000001e, &eax
, &ebx
, &ecx
, &edx
);
302 nodes
= ((ecx
>> 8) & 7) + 1;
305 /* get compute unit information */
306 smp_num_siblings
= ((ebx
>> 8) & 3) + 1;
307 c
->compute_unit_id
= ebx
& 0xff;
308 cores_per_cu
+= ((ebx
>> 8) & 3);
309 } else if (cpu_has(c
, X86_FEATURE_NODEID_MSR
)) {
312 rdmsrl(MSR_FAM10H_NODE_ID
, value
);
313 nodes
= ((value
>> 3) & 7) + 1;
318 /* fixup multi-node processor information */
323 set_cpu_cap(c
, X86_FEATURE_AMD_DCM
);
324 cores_per_node
= c
->x86_max_cores
/ nodes
;
325 cus_per_node
= cores_per_node
/ cores_per_cu
;
327 /* store NodeID, use llc_shared_map to store sibling info */
328 per_cpu(cpu_llc_id
, cpu
) = node_id
;
330 /* core id has to be in the [0 .. cores_per_node - 1] range */
331 c
->cpu_core_id
%= cores_per_node
;
332 c
->compute_unit_id
%= cus_per_node
;
338 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
339 * Assumes number of cores is a power of two.
341 static void amd_detect_cmp(struct cpuinfo_x86
*c
)
345 int cpu
= smp_processor_id();
347 bits
= c
->x86_coreid_bits
;
348 /* Low order bits define the core id (index of core in socket) */
349 c
->cpu_core_id
= c
->initial_apicid
& ((1 << bits
)-1);
350 /* Convert the initial APIC ID into the socket ID */
351 c
->phys_proc_id
= c
->initial_apicid
>> bits
;
352 /* use socket ID also for last level cache */
353 per_cpu(cpu_llc_id
, cpu
) = c
->phys_proc_id
;
358 u16
amd_get_nb_id(int cpu
)
362 id
= per_cpu(cpu_llc_id
, cpu
);
366 EXPORT_SYMBOL_GPL(amd_get_nb_id
);
368 static void srat_detect_node(struct cpuinfo_x86
*c
)
371 int cpu
= smp_processor_id();
373 unsigned apicid
= c
->apicid
;
375 node
= numa_cpu_node(cpu
);
376 if (node
== NUMA_NO_NODE
)
377 node
= per_cpu(cpu_llc_id
, cpu
);
380 * On multi-fabric platform (e.g. Numascale NumaChip) a
381 * platform-specific handler needs to be called to fixup some
384 if (x86_cpuinit
.fixup_cpu_id
)
385 x86_cpuinit
.fixup_cpu_id(c
, node
);
387 if (!node_online(node
)) {
389 * Two possibilities here:
391 * - The CPU is missing memory and no node was created. In
392 * that case try picking one from a nearby CPU.
394 * - The APIC IDs differ from the HyperTransport node IDs
395 * which the K8 northbridge parsing fills in. Assume
396 * they are all increased by a constant offset, but in
397 * the same order as the HT nodeids. If that doesn't
398 * result in a usable node fall back to the path for the
401 * This workaround operates directly on the mapping between
402 * APIC ID and NUMA node, assuming certain relationship
403 * between APIC ID, HT node ID and NUMA topology. As going
404 * through CPU mapping may alter the outcome, directly
405 * access __apicid_to_node[].
407 int ht_nodeid
= c
->initial_apicid
;
409 if (ht_nodeid
>= 0 &&
410 __apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
411 node
= __apicid_to_node
[ht_nodeid
];
412 /* Pick a nearby node */
413 if (!node_online(node
))
414 node
= nearby_node(apicid
);
416 numa_set_node(cpu
, node
);
420 static void early_init_amd_mc(struct cpuinfo_x86
*c
)
425 /* Multi core CPU? */
426 if (c
->extended_cpuid_level
< 0x80000008)
429 ecx
= cpuid_ecx(0x80000008);
431 c
->x86_max_cores
= (ecx
& 0xff) + 1;
433 /* CPU telling us the core id bits shift? */
434 bits
= (ecx
>> 12) & 0xF;
436 /* Otherwise recompute */
438 while ((1 << bits
) < c
->x86_max_cores
)
442 c
->x86_coreid_bits
= bits
;
446 static void bsp_init_amd(struct cpuinfo_x86
*c
)
451 unsigned long long tseg
;
454 * Split up direct mapping around the TSEG SMM area.
455 * Don't do it for gbpages because there seems very little
456 * benefit in doing so.
458 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR
, &tseg
)) {
459 unsigned long pfn
= tseg
>> PAGE_SHIFT
;
461 printk(KERN_DEBUG
"tseg: %010llx\n", tseg
);
462 if (pfn_range_is_mapped(pfn
, pfn
+ 1))
463 set_memory_4k((unsigned long)__va(tseg
), 1);
468 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
)) {
471 (c
->x86
== 0x10 && c
->x86_model
>= 0x2)) {
474 rdmsrl(MSR_K7_HWCR
, val
);
475 if (!(val
& BIT(24)))
476 printk(KERN_WARNING FW_BUG
"TSC doesn't count "
477 "with P0 frequency!\n");
481 if (c
->x86
== 0x15) {
482 unsigned long upperbit
;
485 cpuid
= cpuid_edx(0x80000005);
486 assoc
= cpuid
>> 16 & 0xff;
487 upperbit
= ((cpuid
>> 24) << 10) / assoc
;
489 va_align
.mask
= (upperbit
- 1) & PAGE_MASK
;
490 va_align
.flags
= ALIGN_VA_32
| ALIGN_VA_64
;
494 static void early_init_amd(struct cpuinfo_x86
*c
)
496 early_init_amd_mc(c
);
499 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
500 * with P/T states and does not stop in deep C-states
502 if (c
->x86_power
& (1 << 8)) {
503 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
504 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
505 if (!check_tsc_unstable())
506 set_sched_clock_stable();
510 set_cpu_cap(c
, X86_FEATURE_SYSCALL32
);
512 /* Set MTRR capability flag if appropriate */
514 if (c
->x86_model
== 13 || c
->x86_model
== 9 ||
515 (c
->x86_model
== 8 && c
->x86_mask
>= 8))
516 set_cpu_cap(c
, X86_FEATURE_K6_MTRR
);
518 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
519 /* check CPU config space for extended APIC ID */
520 if (cpu_has_apic
&& c
->x86
>= 0xf) {
522 val
= read_pci_config(0, 24, 0, 0x68);
523 if ((val
& ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
524 set_cpu_cap(c
, X86_FEATURE_EXTD_APICID
);
529 * This is only needed to tell the kernel whether to use VMCALL
530 * and VMMCALL. VMMCALL is never executed except under virt, so
531 * we can set it unconditionally.
533 set_cpu_cap(c
, X86_FEATURE_VMMCALL
);
535 /* F16h erratum 793, CVE-2013-6885 */
536 if (c
->x86
== 0x16 && c
->x86_model
<= 0xf)
537 msr_set_bit(MSR_AMD64_LS_CFG
, 15);
540 static const int amd_erratum_383
[];
541 static const int amd_erratum_400
[];
542 static bool cpu_has_amd_erratum(struct cpuinfo_x86
*cpu
, const int *erratum
);
544 static void init_amd_k8(struct cpuinfo_x86
*c
)
549 /* On C+ stepping K8 rep microcode works well for copy/memset */
550 level
= cpuid_eax(1);
551 if ((level
>= 0x0f48 && level
< 0x0f50) || level
>= 0x0f58)
552 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
555 * Some BIOSes incorrectly force this feature, but only K8 revision D
556 * (model = 0x14) and later actually support it.
557 * (AMD Erratum #110, docId: 25759).
559 if (c
->x86_model
< 0x14 && cpu_has(c
, X86_FEATURE_LAHF_LM
)) {
560 clear_cpu_cap(c
, X86_FEATURE_LAHF_LM
);
561 if (!rdmsrl_amd_safe(0xc001100d, &value
)) {
562 value
&= ~BIT_64(32);
563 wrmsrl_amd_safe(0xc001100d, value
);
567 if (!c
->x86_model_id
[0])
568 strcpy(c
->x86_model_id
, "Hammer");
572 * Disable TLB flush filter by setting HWCR.FFDIS on K8
573 * bit 6 of msr C001_0015
575 * Errata 63 for SH-B3 steppings
576 * Errata 122 for all steppings (F+ have it disabled by default)
578 msr_set_bit(MSR_K7_HWCR
, 6);
582 static void init_amd_gh(struct cpuinfo_x86
*c
)
585 /* do this for boot cpu */
586 if (c
== &boot_cpu_data
)
587 check_enable_amd_mmconf_dmi();
589 fam10h_check_enable_mmcfg();
593 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
594 * is always needed when GART is enabled, even in a kernel which has no
595 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
596 * If it doesn't, we do it here as suggested by the BKDG.
598 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
600 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
603 * On family 10h BIOS may not have properly enabled WC+ support, causing
604 * it to be converted to CD memtype. This may result in performance
605 * degradation for certain nested-paging guests. Prevent this conversion
606 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
608 * NOTE: we want to use the _safe accessors so as not to #GP kvm
609 * guests on older kvm hosts.
611 msr_clear_bit(MSR_AMD64_BU_CFG2
, 24);
613 if (cpu_has_amd_erratum(c
, amd_erratum_383
))
614 set_cpu_bug(c
, X86_BUG_AMD_TLB_MMATCH
);
617 static void init_amd_bd(struct cpuinfo_x86
*c
)
621 /* re-enable TopologyExtensions if switched off by BIOS */
622 if ((c
->x86_model
>= 0x10) && (c
->x86_model
<= 0x1f) &&
623 !cpu_has(c
, X86_FEATURE_TOPOEXT
)) {
625 if (msr_set_bit(0xc0011005, 54) > 0) {
626 rdmsrl(0xc0011005, value
);
627 if (value
& BIT_64(54)) {
628 set_cpu_cap(c
, X86_FEATURE_TOPOEXT
);
629 pr_info(FW_INFO
"CPU: Re-enabling disabled Topology Extensions Support.\n");
635 * The way access filter has a performance penalty on some workloads.
636 * Disable it on the affected CPUs.
638 if ((c
->x86_model
>= 0x02) && (c
->x86_model
< 0x20)) {
639 if (!rdmsrl_safe(0xc0011021, &value
) && !(value
& 0x1E)) {
641 wrmsrl_safe(0xc0011021, value
);
646 static void init_amd(struct cpuinfo_x86
*c
)
653 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
654 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
656 clear_cpu_cap(c
, 0*32+31);
659 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
661 /* get apicid instead of initial apic id from cpuid */
662 c
->apicid
= hard_smp_processor_id();
664 /* K6s reports MCEs but don't actually have all the MSRs */
666 clear_cpu_cap(c
, X86_FEATURE_MCE
);
669 case 4: init_amd_k5(c
); break;
670 case 5: init_amd_k6(c
); break;
671 case 6: init_amd_k7(c
); break;
672 case 0xf: init_amd_k8(c
); break;
673 case 0x10: init_amd_gh(c
); break;
674 case 0x15: init_amd_bd(c
); break;
677 /* Enable workaround for FXSAVE leak */
679 set_cpu_bug(c
, X86_BUG_FXSAVE_LEAK
);
681 cpu_detect_cache_sizes(c
);
683 /* Multi core CPU? */
684 if (c
->extended_cpuid_level
>= 0x80000008) {
693 init_amd_cacheinfo(c
);
696 set_cpu_cap(c
, X86_FEATURE_K8
);
699 /* MFENCE stops RDTSC speculation */
700 set_cpu_cap(c
, X86_FEATURE_MFENCE_RDTSC
);
704 * Family 0x12 and above processors have APIC timer
705 * running in deep C states.
708 set_cpu_cap(c
, X86_FEATURE_ARAT
);
710 if (cpu_has_amd_erratum(c
, amd_erratum_400
))
711 set_cpu_bug(c
, X86_BUG_AMD_APIC_C1E
);
713 rdmsr_safe(MSR_AMD64_PATCH_LEVEL
, &c
->microcode
, &dummy
);
717 static unsigned int amd_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
719 /* AMD errata T13 (order #21922) */
722 if (c
->x86_model
== 3 && c
->x86_mask
== 0)
724 /* Tbird rev A1/A2 */
725 if (c
->x86_model
== 4 &&
726 (c
->x86_mask
== 0 || c
->x86_mask
== 1))
733 static void cpu_detect_tlb_amd(struct cpuinfo_x86
*c
)
735 u32 ebx
, eax
, ecx
, edx
;
741 if (c
->extended_cpuid_level
< 0x80000006)
744 cpuid(0x80000006, &eax
, &ebx
, &ecx
, &edx
);
746 tlb_lld_4k
[ENTRIES
] = (ebx
>> 16) & mask
;
747 tlb_lli_4k
[ENTRIES
] = ebx
& mask
;
750 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
751 * characteristics from the CPUID function 0x80000005 instead.
754 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
758 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
759 if (!((eax
>> 16) & mask
))
760 tlb_lld_2m
[ENTRIES
] = (cpuid_eax(0x80000005) >> 16) & 0xff;
762 tlb_lld_2m
[ENTRIES
] = (eax
>> 16) & mask
;
764 /* a 4M entry uses two 2M entries */
765 tlb_lld_4m
[ENTRIES
] = tlb_lld_2m
[ENTRIES
] >> 1;
767 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
770 if (c
->x86
== 0x15 && c
->x86_model
<= 0x1f) {
771 tlb_lli_2m
[ENTRIES
] = 1024;
773 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
774 tlb_lli_2m
[ENTRIES
] = eax
& 0xff;
777 tlb_lli_2m
[ENTRIES
] = eax
& mask
;
779 tlb_lli_4m
[ENTRIES
] = tlb_lli_2m
[ENTRIES
] >> 1;
782 static const struct cpu_dev amd_cpu_dev
= {
784 .c_ident
= { "AuthenticAMD" },
787 { .family
= 4, .model_names
=
798 .legacy_cache_size
= amd_size_cache
,
800 .c_early_init
= early_init_amd
,
801 .c_detect_tlb
= cpu_detect_tlb_amd
,
802 .c_bsp_init
= bsp_init_amd
,
804 .c_x86_vendor
= X86_VENDOR_AMD
,
807 cpu_dev_register(amd_cpu_dev
);
810 * AMD errata checking
812 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
813 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
814 * have an OSVW id assigned, which it takes as first argument. Both take a
815 * variable number of family-specific model-stepping ranges created by
820 * const int amd_erratum_319[] =
821 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
822 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
823 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
826 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
827 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
828 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
829 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
830 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
831 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
832 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
834 static const int amd_erratum_400
[] =
835 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
836 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
838 static const int amd_erratum_383
[] =
839 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
842 static bool cpu_has_amd_erratum(struct cpuinfo_x86
*cpu
, const int *erratum
)
844 int osvw_id
= *erratum
++;
848 if (osvw_id
>= 0 && osvw_id
< 65536 &&
849 cpu_has(cpu
, X86_FEATURE_OSVW
)) {
852 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH
, osvw_len
);
853 if (osvw_id
< osvw_len
) {
856 rdmsrl(MSR_AMD64_OSVW_STATUS
+ (osvw_id
>> 6),
858 return osvw_bits
& (1ULL << (osvw_id
& 0x3f));
862 /* OSVW unavailable or ID unknown, match family-model-stepping range */
863 ms
= (cpu
->x86_model
<< 4) | cpu
->x86_mask
;
864 while ((range
= *erratum
++))
865 if ((cpu
->x86
== AMD_MODEL_RANGE_FAMILY(range
)) &&
866 (ms
>= AMD_MODEL_RANGE_START(range
)) &&
867 (ms
<= AMD_MODEL_RANGE_END(range
)))