2 * (c) 2005-2016 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
8 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * All MC4_MISCi registers are shared between cores on a node.
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/string.h>
25 #include <asm/amd_nb.h>
29 #include <asm/trace/irq_vectors.h>
32 #define THRESHOLD_MAX 0xFFF
33 #define INT_TYPE_APIC 0x00020000
34 #define MASK_VALID_HI 0x80000000
35 #define MASK_CNTP_HI 0x40000000
36 #define MASK_LOCKED_HI 0x20000000
37 #define MASK_LVTOFF_HI 0x00F00000
38 #define MASK_COUNT_EN_HI 0x00080000
39 #define MASK_INT_TYPE_HI 0x00060000
40 #define MASK_OVERFLOW_HI 0x00010000
41 #define MASK_ERR_COUNT_HI 0x00000FFF
42 #define MASK_BLKPTR_LO 0xFF000000
43 #define MCG_XBLK_ADDR 0xC0000400
45 /* Deferred error settings */
46 #define MSR_CU_DEF_ERR 0xC0000410
47 #define MASK_DEF_LVTOFF 0x000000F0
48 #define MASK_DEF_INT_TYPE 0x00000006
49 #define DEF_LVT_OFF 0x2
50 #define DEF_INT_TYPE_APIC 0x2
54 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
55 #define SMCA_THR_LVT_OFF 0xF000
57 static bool thresholding_en
;
59 static const char * const th_names
[] = {
68 static const char * const smca_umc_block_names
[] = {
73 struct smca_bank_name
{
74 const char *name
; /* Short name for sysfs */
75 const char *long_name
; /* Long name for pretty-printing */
78 static struct smca_bank_name smca_names
[] = {
79 [SMCA_LS
] = { "load_store", "Load Store Unit" },
80 [SMCA_IF
] = { "insn_fetch", "Instruction Fetch Unit" },
81 [SMCA_L2_CACHE
] = { "l2_cache", "L2 Cache" },
82 [SMCA_DE
] = { "decode_unit", "Decode Unit" },
83 [SMCA_EX
] = { "execution_unit", "Execution Unit" },
84 [SMCA_FP
] = { "floating_point", "Floating Point Unit" },
85 [SMCA_L3_CACHE
] = { "l3_cache", "L3 Cache" },
86 [SMCA_CS
] = { "coherent_slave", "Coherent Slave" },
87 [SMCA_PIE
] = { "pie", "Power, Interrupts, etc." },
88 [SMCA_UMC
] = { "umc", "Unified Memory Controller" },
89 [SMCA_PB
] = { "param_block", "Parameter Block" },
90 [SMCA_PSP
] = { "psp", "Platform Security Processor" },
91 [SMCA_SMU
] = { "smu", "System Management Unit" },
94 const char *smca_get_name(enum smca_bank_types t
)
96 if (t
>= N_SMCA_BANK_TYPES
)
99 return smca_names
[t
].name
;
102 const char *smca_get_long_name(enum smca_bank_types t
)
104 if (t
>= N_SMCA_BANK_TYPES
)
107 return smca_names
[t
].long_name
;
109 EXPORT_SYMBOL_GPL(smca_get_long_name
);
111 static struct smca_hwid smca_hwid_mcatypes
[] = {
112 /* { bank_type, hwid_mcatype, xec_bitmap } */
114 /* ZN Core (HWID=0xB0) MCA types */
115 { SMCA_LS
, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
116 { SMCA_IF
, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
117 { SMCA_L2_CACHE
, HWID_MCATYPE(0xB0, 0x2), 0xF },
118 { SMCA_DE
, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
119 /* HWID 0xB0 MCATYPE 0x4 is Reserved */
120 { SMCA_EX
, HWID_MCATYPE(0xB0, 0x5), 0x7FF },
121 { SMCA_FP
, HWID_MCATYPE(0xB0, 0x6), 0x7F },
122 { SMCA_L3_CACHE
, HWID_MCATYPE(0xB0, 0x7), 0xFF },
124 /* Data Fabric MCA types */
125 { SMCA_CS
, HWID_MCATYPE(0x2E, 0x0), 0x1FF },
126 { SMCA_PIE
, HWID_MCATYPE(0x2E, 0x1), 0xF },
128 /* Unified Memory Controller MCA type */
129 { SMCA_UMC
, HWID_MCATYPE(0x96, 0x0), 0x3F },
131 /* Parameter Block MCA type */
132 { SMCA_PB
, HWID_MCATYPE(0x05, 0x0), 0x1 },
134 /* Platform Security Processor MCA type */
135 { SMCA_PSP
, HWID_MCATYPE(0xFF, 0x0), 0x1 },
137 /* System Management Unit MCA type */
138 { SMCA_SMU
, HWID_MCATYPE(0x01, 0x0), 0x1 },
141 struct smca_bank smca_banks
[MAX_NR_BANKS
];
142 EXPORT_SYMBOL_GPL(smca_banks
);
145 * In SMCA enabled processors, we can have multiple banks for a given IP type.
146 * So to define a unique name for each bank, we use a temp c-string to append
147 * the MCA_IPID[InstanceId] to type's name in get_name().
149 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
150 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
152 #define MAX_MCATYPE_NAME_LEN 30
153 static char buf_mcatype
[MAX_MCATYPE_NAME_LEN
];
155 static DEFINE_PER_CPU(struct threshold_bank
**, threshold_banks
);
156 static DEFINE_PER_CPU(unsigned int, bank_map
); /* see which banks are on */
158 static void amd_threshold_interrupt(void);
159 static void amd_deferred_error_interrupt(void);
161 static void default_deferred_error_interrupt(void)
163 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR
);
165 void (*deferred_error_int_vector
)(void) = default_deferred_error_interrupt
;
167 static void get_smca_bank_info(unsigned int bank
)
169 unsigned int i
, hwid_mcatype
, cpu
= smp_processor_id();
170 struct smca_hwid
*s_hwid
;
171 u32 high
, instance_id
;
173 /* Collect bank_info using CPU 0 for now. */
177 if (rdmsr_safe_on_cpu(cpu
, MSR_AMD64_SMCA_MCx_IPID(bank
), &instance_id
, &high
)) {
178 pr_warn("Failed to read MCA_IPID for bank %d\n", bank
);
182 hwid_mcatype
= HWID_MCATYPE(high
& MCI_IPID_HWID
,
183 (high
& MCI_IPID_MCATYPE
) >> 16);
185 for (i
= 0; i
< ARRAY_SIZE(smca_hwid_mcatypes
); i
++) {
186 s_hwid
= &smca_hwid_mcatypes
[i
];
187 if (hwid_mcatype
== s_hwid
->hwid_mcatype
) {
189 WARN(smca_banks
[bank
].hwid
,
190 "Bank %s already initialized!\n",
191 smca_get_name(s_hwid
->bank_type
));
193 smca_banks
[bank
].hwid
= s_hwid
;
194 smca_banks
[bank
].id
= instance_id
;
195 smca_banks
[bank
].sysfs_id
= s_hwid
->count
++;
201 struct thresh_restart
{
202 struct threshold_block
*b
;
209 static inline bool is_shared_bank(int bank
)
212 * Scalable MCA provides for only one core to have access to the MSRs of
218 /* Bank 4 is for northbridge reporting and is thus shared */
222 static const char *bank4_names(const struct threshold_block
*b
)
224 switch (b
->address
) {
236 WARN(1, "Funny MSR: 0x%08x\n", b
->address
);
242 static bool lvt_interrupt_supported(unsigned int bank
, u32 msr_high_bits
)
245 * bank 4 supports APIC LVT interrupts implicitly since forever.
251 * IntP: interrupt present; if this bit is set, the thresholding
252 * bank can generate APIC LVT interrupts
254 return msr_high_bits
& BIT(28);
257 static int lvt_off_valid(struct threshold_block
*b
, int apic
, u32 lo
, u32 hi
)
259 int msr
= (hi
& MASK_LVTOFF_HI
) >> 20;
262 pr_err(FW_BUG
"cpu %d, failed to setup threshold interrupt "
263 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b
->cpu
,
264 b
->bank
, b
->block
, b
->address
, hi
, lo
);
270 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
271 * the BIOS provides the value. The original field where LVT offset
272 * was set is reserved. Return early here:
277 pr_err(FW_BUG
"cpu %d, invalid threshold interrupt offset %d "
278 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
279 b
->cpu
, apic
, b
->bank
, b
->block
, b
->address
, hi
, lo
);
286 /* Reprogram MCx_MISC MSR behind this threshold bank. */
287 static void threshold_restart_bank(void *_tr
)
289 struct thresh_restart
*tr
= _tr
;
292 rdmsr(tr
->b
->address
, lo
, hi
);
294 if (tr
->b
->threshold_limit
< (hi
& THRESHOLD_MAX
))
295 tr
->reset
= 1; /* limit cannot be lower than err count */
297 if (tr
->reset
) { /* reset err count and overflow bit */
299 (hi
& ~(MASK_ERR_COUNT_HI
| MASK_OVERFLOW_HI
)) |
300 (THRESHOLD_MAX
- tr
->b
->threshold_limit
);
301 } else if (tr
->old_limit
) { /* change limit w/o reset */
302 int new_count
= (hi
& THRESHOLD_MAX
) +
303 (tr
->old_limit
- tr
->b
->threshold_limit
);
305 hi
= (hi
& ~MASK_ERR_COUNT_HI
) |
306 (new_count
& THRESHOLD_MAX
);
310 hi
&= ~MASK_INT_TYPE_HI
;
312 if (!tr
->b
->interrupt_capable
)
315 if (tr
->set_lvt_off
) {
316 if (lvt_off_valid(tr
->b
, tr
->lvt_off
, lo
, hi
)) {
317 /* set new lvt offset */
318 hi
&= ~MASK_LVTOFF_HI
;
319 hi
|= tr
->lvt_off
<< 20;
323 if (tr
->b
->interrupt_enable
)
328 hi
|= MASK_COUNT_EN_HI
;
329 wrmsr(tr
->b
->address
, lo
, hi
);
332 static void mce_threshold_block_init(struct threshold_block
*b
, int offset
)
334 struct thresh_restart tr
= {
340 b
->threshold_limit
= THRESHOLD_MAX
;
341 threshold_restart_bank(&tr
);
344 static int setup_APIC_mce_threshold(int reserved
, int new)
346 if (reserved
< 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR
,
347 APIC_EILVT_MSG_FIX
, 0))
353 static int setup_APIC_deferred_error(int reserved
, int new)
355 if (reserved
< 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR
,
356 APIC_EILVT_MSG_FIX
, 0))
362 static void deferred_error_interrupt_enable(struct cpuinfo_x86
*c
)
364 u32 low
= 0, high
= 0;
365 int def_offset
= -1, def_new
;
367 if (rdmsr_safe(MSR_CU_DEF_ERR
, &low
, &high
))
370 def_new
= (low
& MASK_DEF_LVTOFF
) >> 4;
371 if (!(low
& MASK_DEF_LVTOFF
)) {
372 pr_err(FW_BUG
"Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
373 def_new
= DEF_LVT_OFF
;
374 low
= (low
& ~MASK_DEF_LVTOFF
) | (DEF_LVT_OFF
<< 4);
377 def_offset
= setup_APIC_deferred_error(def_offset
, def_new
);
378 if ((def_offset
== def_new
) &&
379 (deferred_error_int_vector
!= amd_deferred_error_interrupt
))
380 deferred_error_int_vector
= amd_deferred_error_interrupt
;
382 low
= (low
& ~MASK_DEF_INT_TYPE
) | DEF_INT_TYPE_APIC
;
383 wrmsr(MSR_CU_DEF_ERR
, low
, high
);
386 static u32
get_block_address(unsigned int cpu
, u32 current_addr
, u32 low
, u32 high
,
387 unsigned int bank
, unsigned int block
)
389 u32 addr
= 0, offset
= 0;
391 if (mce_flags
.smca
) {
393 addr
= MSR_AMD64_SMCA_MCx_MISC(bank
);
396 * For SMCA enabled processors, BLKPTR field of the
397 * first MISC register (MCx_MISC0) indicates presence of
398 * additional MISC register set (MISC1-4).
402 if (rdmsr_safe_on_cpu(cpu
, MSR_AMD64_SMCA_MCx_CONFIG(bank
), &low
, &high
))
405 if (!(low
& MCI_CONFIG_MCAX
))
408 if (!rdmsr_safe_on_cpu(cpu
, MSR_AMD64_SMCA_MCx_MISC(bank
), &low
, &high
) &&
409 (low
& MASK_BLKPTR_LO
))
410 addr
= MSR_AMD64_SMCA_MCx_MISCy(bank
, block
- 1);
415 /* Fall back to method we used for older processors: */
418 addr
= msr_ops
.misc(bank
);
421 offset
= ((low
& MASK_BLKPTR_LO
) >> 21);
423 addr
= MCG_XBLK_ADDR
+ offset
;
426 addr
= ++current_addr
;
432 prepare_threshold_block(unsigned int bank
, unsigned int block
, u32 addr
,
433 int offset
, u32 misc_high
)
435 unsigned int cpu
= smp_processor_id();
436 u32 smca_low
, smca_high
, smca_addr
;
437 struct threshold_block b
;
441 per_cpu(bank_map
, cpu
) |= (1 << bank
);
443 memset(&b
, 0, sizeof(b
));
448 b
.interrupt_capable
= lvt_interrupt_supported(bank
, misc_high
);
450 if (!b
.interrupt_capable
)
453 b
.interrupt_enable
= 1;
455 if (!mce_flags
.smca
) {
456 new = (misc_high
& MASK_LVTOFF_HI
) >> 20;
460 smca_addr
= MSR_AMD64_SMCA_MCx_CONFIG(bank
);
462 if (!rdmsr_safe(smca_addr
, &smca_low
, &smca_high
)) {
464 * OS is required to set the MCAX bit to acknowledge that it is
465 * now using the new MSR ranges and new registers under each
466 * bank. It also means that the OS will configure deferred
467 * errors in the new MCx_CONFIG register. If the bit is not set,
468 * uncorrectable errors will cause a system panic.
470 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
475 * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
476 * registers with the option of additionally logging to
477 * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
479 * This bit is usually set by BIOS to retain the old behavior
480 * for OSes that don't use the new registers. Linux supports the
481 * new registers so let's disable that additional logging here.
483 * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
484 * portion of the MSR).
486 smca_high
&= ~BIT(2);
489 * SMCA sets the Deferred Error Interrupt type per bank.
491 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
492 * if the DeferredIntType bit field is available.
494 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
495 * high portion of the MSR). OS should set this to 0x1 to enable
496 * APIC based interrupt. First, check that no interrupt has been
499 if ((smca_low
& BIT(5)) && !((smca_high
>> 5) & 0x3))
502 wrmsr(smca_addr
, smca_low
, smca_high
);
505 /* Gather LVT offset for thresholding: */
506 if (rdmsr_safe(MSR_CU_DEF_ERR
, &smca_low
, &smca_high
))
509 new = (smca_low
& SMCA_THR_LVT_OFF
) >> 12;
512 offset
= setup_APIC_mce_threshold(offset
, new);
514 if ((offset
== new) && (mce_threshold_vector
!= amd_threshold_interrupt
))
515 mce_threshold_vector
= amd_threshold_interrupt
;
518 mce_threshold_block_init(&b
, offset
);
524 /* cpu init entry point, called from mce.c with preempt off */
525 void mce_amd_feature_init(struct cpuinfo_x86
*c
)
527 u32 low
= 0, high
= 0, address
= 0;
528 unsigned int bank
, block
, cpu
= smp_processor_id();
531 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
533 get_smca_bank_info(bank
);
535 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
536 address
= get_block_address(cpu
, address
, low
, high
, bank
, block
);
540 if (rdmsr_safe(address
, &low
, &high
))
543 if (!(high
& MASK_VALID_HI
))
546 if (!(high
& MASK_CNTP_HI
) ||
547 (high
& MASK_LOCKED_HI
))
550 offset
= prepare_threshold_block(bank
, block
, address
, offset
, high
);
554 if (mce_flags
.succor
)
555 deferred_error_interrupt_enable(c
);
558 int umc_normaddr_to_sysaddr(u64 norm_addr
, u16 nid
, u8 umc
, u64
*sys_addr
)
560 u64 dram_base_addr
, dram_limit_addr
, dram_hole_base
;
561 /* We start from the normalized address */
562 u64 ret_addr
= norm_addr
;
566 u8 die_id_shift
, die_id_mask
, socket_id_shift
, socket_id_mask
;
567 u8 intlv_num_dies
, intlv_num_chan
, intlv_num_sockets
;
568 u8 intlv_addr_sel
, intlv_addr_bit
;
569 u8 num_intlv_bits
, hashed_bit
;
570 u8 lgcy_mmio_hole_en
, base
= 0;
571 u8 cs_mask
, cs_id
= 0;
572 bool hash_enabled
= false;
574 /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
575 if (amd_df_indirect_read(nid
, 0, 0x1B4, umc
, &tmp
))
578 /* Remove HiAddrOffset from normalized address, if enabled: */
580 u64 hi_addr_offset
= (tmp
& GENMASK_ULL(31, 20)) << 8;
582 if (norm_addr
>= hi_addr_offset
) {
583 ret_addr
-= hi_addr_offset
;
588 /* Read D18F0x110 (DramBaseAddress). */
589 if (amd_df_indirect_read(nid
, 0, 0x110 + (8 * base
), umc
, &tmp
))
592 /* Check if address range is valid. */
593 if (!(tmp
& BIT(0))) {
594 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
599 lgcy_mmio_hole_en
= tmp
& BIT(1);
600 intlv_num_chan
= (tmp
>> 4) & 0xF;
601 intlv_addr_sel
= (tmp
>> 8) & 0x7;
602 dram_base_addr
= (tmp
& GENMASK_ULL(31, 12)) << 16;
604 /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
605 if (intlv_addr_sel
> 3) {
606 pr_err("%s: Invalid interleave address select %d.\n",
607 __func__
, intlv_addr_sel
);
611 /* Read D18F0x114 (DramLimitAddress). */
612 if (amd_df_indirect_read(nid
, 0, 0x114 + (8 * base
), umc
, &tmp
))
615 intlv_num_sockets
= (tmp
>> 8) & 0x1;
616 intlv_num_dies
= (tmp
>> 10) & 0x3;
617 dram_limit_addr
= ((tmp
& GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
619 intlv_addr_bit
= intlv_addr_sel
+ 8;
621 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
622 switch (intlv_num_chan
) {
623 case 0: intlv_num_chan
= 0; break;
624 case 1: intlv_num_chan
= 1; break;
625 case 3: intlv_num_chan
= 2; break;
626 case 5: intlv_num_chan
= 3; break;
627 case 7: intlv_num_chan
= 4; break;
629 case 8: intlv_num_chan
= 1;
633 pr_err("%s: Invalid number of interleaved channels %d.\n",
634 __func__
, intlv_num_chan
);
638 num_intlv_bits
= intlv_num_chan
;
640 if (intlv_num_dies
> 2) {
641 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
642 __func__
, intlv_num_dies
);
646 num_intlv_bits
+= intlv_num_dies
;
648 /* Add a bit if sockets are interleaved. */
649 num_intlv_bits
+= intlv_num_sockets
;
651 /* Assert num_intlv_bits <= 4 */
652 if (num_intlv_bits
> 4) {
653 pr_err("%s: Invalid interleave bits %d.\n",
654 __func__
, num_intlv_bits
);
658 if (num_intlv_bits
> 0) {
659 u64 temp_addr_x
, temp_addr_i
, temp_addr_y
;
660 u8 die_id_bit
, sock_id_bit
, cs_fabric_id
;
663 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
664 * This is the fabric id for this coherent slave. Use
665 * umc/channel# as instance id of the coherent slave
668 if (amd_df_indirect_read(nid
, 0, 0x50, umc
, &tmp
))
671 cs_fabric_id
= (tmp
>> 8) & 0xFF;
674 /* If interleaved over more than 1 channel: */
675 if (intlv_num_chan
) {
676 die_id_bit
= intlv_num_chan
;
677 cs_mask
= (1 << die_id_bit
) - 1;
678 cs_id
= cs_fabric_id
& cs_mask
;
681 sock_id_bit
= die_id_bit
;
683 /* Read D18F1x208 (SystemFabricIdMask). */
684 if (intlv_num_dies
|| intlv_num_sockets
)
685 if (amd_df_indirect_read(nid
, 1, 0x208, umc
, &tmp
))
688 /* If interleaved over more than 1 die. */
689 if (intlv_num_dies
) {
690 sock_id_bit
= die_id_bit
+ intlv_num_dies
;
691 die_id_shift
= (tmp
>> 24) & 0xF;
692 die_id_mask
= (tmp
>> 8) & 0xFF;
694 cs_id
|= ((cs_fabric_id
& die_id_mask
) >> die_id_shift
) << die_id_bit
;
697 /* If interleaved over more than 1 socket. */
698 if (intlv_num_sockets
) {
699 socket_id_shift
= (tmp
>> 28) & 0xF;
700 socket_id_mask
= (tmp
>> 16) & 0xFF;
702 cs_id
|= ((cs_fabric_id
& socket_id_mask
) >> socket_id_shift
) << sock_id_bit
;
706 * The pre-interleaved address consists of XXXXXXIIIYYYYY
707 * where III is the ID for this CS, and XXXXXXYYYYY are the
708 * address bits from the post-interleaved address.
709 * "num_intlv_bits" has been calculated to tell us how many "I"
710 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
711 * there are (where "I" starts).
713 temp_addr_y
= ret_addr
& GENMASK_ULL(intlv_addr_bit
-1, 0);
714 temp_addr_i
= (cs_id
<< intlv_addr_bit
);
715 temp_addr_x
= (ret_addr
& GENMASK_ULL(63, intlv_addr_bit
)) << num_intlv_bits
;
716 ret_addr
= temp_addr_x
| temp_addr_i
| temp_addr_y
;
719 /* Add dram base address */
720 ret_addr
+= dram_base_addr
;
722 /* If legacy MMIO hole enabled */
723 if (lgcy_mmio_hole_en
) {
724 if (amd_df_indirect_read(nid
, 0, 0x104, umc
, &tmp
))
727 dram_hole_base
= tmp
& GENMASK(31, 24);
728 if (ret_addr
>= dram_hole_base
)
729 ret_addr
+= (BIT_ULL(32) - dram_hole_base
);
733 /* Save some parentheses and grab ls-bit at the end. */
734 hashed_bit
= (ret_addr
>> 12) ^
740 hashed_bit
&= BIT(0);
742 if (hashed_bit
!= ((ret_addr
>> intlv_addr_bit
) & BIT(0)))
743 ret_addr
^= BIT(intlv_addr_bit
);
746 /* Is calculated system address is above DRAM limit address? */
747 if (ret_addr
> dram_limit_addr
)
750 *sys_addr
= ret_addr
;
756 EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr
);
759 __log_error(unsigned int bank
, bool deferred_err
, bool threshold_err
, u64 misc
)
761 u32 msr_status
= msr_ops
.status(bank
);
762 u32 msr_addr
= msr_ops
.addr(bank
);
766 WARN_ON_ONCE(deferred_err
&& threshold_err
);
768 if (deferred_err
&& mce_flags
.smca
) {
769 msr_status
= MSR_AMD64_SMCA_MCx_DESTAT(bank
);
770 msr_addr
= MSR_AMD64_SMCA_MCx_DEADDR(bank
);
773 rdmsrl(msr_status
, status
);
775 if (!(status
& MCI_STATUS_VAL
))
787 if (m
.status
& MCI_STATUS_ADDRV
) {
788 rdmsrl(msr_addr
, m
.addr
);
791 * Extract [55:<lsb>] where lsb is the least significant
792 * *valid* bit of the address bits.
794 if (mce_flags
.smca
) {
795 u8 lsb
= (m
.addr
>> 56) & 0x3f;
797 m
.addr
&= GENMASK_ULL(55, lsb
);
801 if (mce_flags
.smca
) {
802 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank
), m
.ipid
);
804 if (m
.status
& MCI_STATUS_SYNDV
)
805 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank
), m
.synd
);
810 wrmsrl(msr_status
, 0);
813 static inline void __smp_deferred_error_interrupt(void)
815 inc_irq_stat(irq_deferred_error_count
);
816 deferred_error_int_vector();
819 asmlinkage __visible
void __irq_entry
smp_deferred_error_interrupt(void)
822 __smp_deferred_error_interrupt();
826 asmlinkage __visible
void __irq_entry
smp_trace_deferred_error_interrupt(void)
829 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR
);
830 __smp_deferred_error_interrupt();
831 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR
);
835 /* APIC interrupt handler for deferred errors */
836 static void amd_deferred_error_interrupt(void)
842 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
843 msr_status
= (mce_flags
.smca
) ? MSR_AMD64_SMCA_MCx_DESTAT(bank
)
844 : msr_ops
.status(bank
);
846 rdmsrl(msr_status
, status
);
848 if (!(status
& MCI_STATUS_VAL
) ||
849 !(status
& MCI_STATUS_DEFERRED
))
852 __log_error(bank
, true, false, 0);
858 * APIC Interrupt Handler
862 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
863 * the interrupt goes off when error_count reaches threshold_limit.
864 * the handler will simply log mcelog w/ software defined bank number.
867 static void amd_threshold_interrupt(void)
869 u32 low
= 0, high
= 0, address
= 0;
870 unsigned int bank
, block
, cpu
= smp_processor_id();
871 struct thresh_restart tr
;
873 /* assume first bank caused it */
874 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
875 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
877 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
878 address
= get_block_address(cpu
, address
, low
, high
, bank
, block
);
882 if (rdmsr_safe(address
, &low
, &high
))
885 if (!(high
& MASK_VALID_HI
)) {
892 if (!(high
& MASK_CNTP_HI
) ||
893 (high
& MASK_LOCKED_HI
))
897 * Log the machine check that caused the threshold
900 if (high
& MASK_OVERFLOW_HI
)
907 __log_error(bank
, false, true, ((u64
)high
<< 32) | low
);
909 /* Reset threshold block after logging error. */
910 memset(&tr
, 0, sizeof(tr
));
911 tr
.b
= &per_cpu(threshold_banks
, cpu
)[bank
]->blocks
[block
];
912 threshold_restart_bank(&tr
);
919 struct threshold_attr
{
920 struct attribute attr
;
921 ssize_t (*show
) (struct threshold_block
*, char *);
922 ssize_t (*store
) (struct threshold_block
*, const char *, size_t count
);
925 #define SHOW_FIELDS(name) \
926 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
928 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
930 SHOW_FIELDS(interrupt_enable
)
931 SHOW_FIELDS(threshold_limit
)
934 store_interrupt_enable(struct threshold_block
*b
, const char *buf
, size_t size
)
936 struct thresh_restart tr
;
939 if (!b
->interrupt_capable
)
942 if (kstrtoul(buf
, 0, &new) < 0)
945 b
->interrupt_enable
= !!new;
947 memset(&tr
, 0, sizeof(tr
));
950 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
956 store_threshold_limit(struct threshold_block
*b
, const char *buf
, size_t size
)
958 struct thresh_restart tr
;
961 if (kstrtoul(buf
, 0, &new) < 0)
964 if (new > THRESHOLD_MAX
)
969 memset(&tr
, 0, sizeof(tr
));
970 tr
.old_limit
= b
->threshold_limit
;
971 b
->threshold_limit
= new;
974 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
979 static ssize_t
show_error_count(struct threshold_block
*b
, char *buf
)
983 rdmsr_on_cpu(b
->cpu
, b
->address
, &lo
, &hi
);
985 return sprintf(buf
, "%u\n", ((hi
& THRESHOLD_MAX
) -
986 (THRESHOLD_MAX
- b
->threshold_limit
)));
989 static struct threshold_attr error_count
= {
990 .attr
= {.name
= __stringify(error_count
), .mode
= 0444 },
991 .show
= show_error_count
,
994 #define RW_ATTR(val) \
995 static struct threshold_attr val = { \
996 .attr = {.name = __stringify(val), .mode = 0644 }, \
997 .show = show_## val, \
998 .store = store_## val, \
1001 RW_ATTR(interrupt_enable
);
1002 RW_ATTR(threshold_limit
);
1004 static struct attribute
*default_attrs
[] = {
1005 &threshold_limit
.attr
,
1007 NULL
, /* possibly interrupt_enable if supported, see below */
1011 #define to_block(k) container_of(k, struct threshold_block, kobj)
1012 #define to_attr(a) container_of(a, struct threshold_attr, attr)
1014 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
1016 struct threshold_block
*b
= to_block(kobj
);
1017 struct threshold_attr
*a
= to_attr(attr
);
1020 ret
= a
->show
? a
->show(b
, buf
) : -EIO
;
1025 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
1026 const char *buf
, size_t count
)
1028 struct threshold_block
*b
= to_block(kobj
);
1029 struct threshold_attr
*a
= to_attr(attr
);
1032 ret
= a
->store
? a
->store(b
, buf
, count
) : -EIO
;
1037 static const struct sysfs_ops threshold_ops
= {
1042 static struct kobj_type threshold_ktype
= {
1043 .sysfs_ops
= &threshold_ops
,
1044 .default_attrs
= default_attrs
,
1047 static const char *get_name(unsigned int bank
, struct threshold_block
*b
)
1049 unsigned int bank_type
;
1051 if (!mce_flags
.smca
) {
1053 return bank4_names(b
);
1055 return th_names
[bank
];
1058 if (!smca_banks
[bank
].hwid
)
1061 bank_type
= smca_banks
[bank
].hwid
->bank_type
;
1063 if (b
&& bank_type
== SMCA_UMC
) {
1064 if (b
->block
< ARRAY_SIZE(smca_umc_block_names
))
1065 return smca_umc_block_names
[b
->block
];
1069 if (smca_banks
[bank
].hwid
->count
== 1)
1070 return smca_get_name(bank_type
);
1072 snprintf(buf_mcatype
, MAX_MCATYPE_NAME_LEN
,
1073 "%s_%x", smca_get_name(bank_type
),
1074 smca_banks
[bank
].sysfs_id
);
1078 static int allocate_threshold_blocks(unsigned int cpu
, unsigned int bank
,
1079 unsigned int block
, u32 address
)
1081 struct threshold_block
*b
= NULL
;
1085 if ((bank
>= mca_cfg
.banks
) || (block
>= NR_BLOCKS
))
1088 if (rdmsr_safe_on_cpu(cpu
, address
, &low
, &high
))
1091 if (!(high
& MASK_VALID_HI
)) {
1098 if (!(high
& MASK_CNTP_HI
) ||
1099 (high
& MASK_LOCKED_HI
))
1102 b
= kzalloc(sizeof(struct threshold_block
), GFP_KERNEL
);
1109 b
->address
= address
;
1110 b
->interrupt_enable
= 0;
1111 b
->interrupt_capable
= lvt_interrupt_supported(bank
, high
);
1112 b
->threshold_limit
= THRESHOLD_MAX
;
1114 if (b
->interrupt_capable
) {
1115 threshold_ktype
.default_attrs
[2] = &interrupt_enable
.attr
;
1116 b
->interrupt_enable
= 1;
1118 threshold_ktype
.default_attrs
[2] = NULL
;
1121 INIT_LIST_HEAD(&b
->miscj
);
1123 if (per_cpu(threshold_banks
, cpu
)[bank
]->blocks
) {
1125 &per_cpu(threshold_banks
, cpu
)[bank
]->blocks
->miscj
);
1127 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= b
;
1130 err
= kobject_init_and_add(&b
->kobj
, &threshold_ktype
,
1131 per_cpu(threshold_banks
, cpu
)[bank
]->kobj
,
1136 address
= get_block_address(cpu
, address
, low
, high
, bank
, ++block
);
1140 err
= allocate_threshold_blocks(cpu
, bank
, block
, address
);
1145 kobject_uevent(&b
->kobj
, KOBJ_ADD
);
1151 kobject_put(&b
->kobj
);
1152 list_del(&b
->miscj
);
1158 static int __threshold_add_blocks(struct threshold_bank
*b
)
1160 struct list_head
*head
= &b
->blocks
->miscj
;
1161 struct threshold_block
*pos
= NULL
;
1162 struct threshold_block
*tmp
= NULL
;
1165 err
= kobject_add(&b
->blocks
->kobj
, b
->kobj
, b
->blocks
->kobj
.name
);
1169 list_for_each_entry_safe(pos
, tmp
, head
, miscj
) {
1171 err
= kobject_add(&pos
->kobj
, b
->kobj
, pos
->kobj
.name
);
1173 list_for_each_entry_safe_reverse(pos
, tmp
, head
, miscj
)
1174 kobject_del(&pos
->kobj
);
1182 static int threshold_create_bank(unsigned int cpu
, unsigned int bank
)
1184 struct device
*dev
= per_cpu(mce_device
, cpu
);
1185 struct amd_northbridge
*nb
= NULL
;
1186 struct threshold_bank
*b
= NULL
;
1187 const char *name
= get_name(bank
, NULL
);
1193 if (is_shared_bank(bank
)) {
1194 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
1196 /* threshold descriptor already initialized on this node? */
1197 if (nb
&& nb
->bank4
) {
1200 err
= kobject_add(b
->kobj
, &dev
->kobj
, name
);
1204 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
1205 atomic_inc(&b
->cpus
);
1207 err
= __threshold_add_blocks(b
);
1213 b
= kzalloc(sizeof(struct threshold_bank
), GFP_KERNEL
);
1219 b
->kobj
= kobject_create_and_add(name
, &dev
->kobj
);
1225 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
1227 if (is_shared_bank(bank
)) {
1228 atomic_set(&b
->cpus
, 1);
1230 /* nb is already initialized, see above */
1237 err
= allocate_threshold_blocks(cpu
, bank
, 0, msr_ops
.misc(bank
));
1248 static void deallocate_threshold_block(unsigned int cpu
,
1251 struct threshold_block
*pos
= NULL
;
1252 struct threshold_block
*tmp
= NULL
;
1253 struct threshold_bank
*head
= per_cpu(threshold_banks
, cpu
)[bank
];
1258 list_for_each_entry_safe(pos
, tmp
, &head
->blocks
->miscj
, miscj
) {
1259 kobject_put(&pos
->kobj
);
1260 list_del(&pos
->miscj
);
1264 kfree(per_cpu(threshold_banks
, cpu
)[bank
]->blocks
);
1265 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= NULL
;
1268 static void __threshold_remove_blocks(struct threshold_bank
*b
)
1270 struct threshold_block
*pos
= NULL
;
1271 struct threshold_block
*tmp
= NULL
;
1273 kobject_del(b
->kobj
);
1275 list_for_each_entry_safe(pos
, tmp
, &b
->blocks
->miscj
, miscj
)
1276 kobject_del(&pos
->kobj
);
1279 static void threshold_remove_bank(unsigned int cpu
, int bank
)
1281 struct amd_northbridge
*nb
;
1282 struct threshold_bank
*b
;
1284 b
= per_cpu(threshold_banks
, cpu
)[bank
];
1291 if (is_shared_bank(bank
)) {
1292 if (!atomic_dec_and_test(&b
->cpus
)) {
1293 __threshold_remove_blocks(b
);
1294 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
1298 * the last CPU on this node using the shared bank is
1299 * going away, remove that bank now.
1301 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
1306 deallocate_threshold_block(cpu
, bank
);
1309 kobject_del(b
->kobj
);
1310 kobject_put(b
->kobj
);
1312 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
1315 int mce_threshold_remove_device(unsigned int cpu
)
1319 if (!thresholding_en
)
1322 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
1323 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
1325 threshold_remove_bank(cpu
, bank
);
1327 kfree(per_cpu(threshold_banks
, cpu
));
1328 per_cpu(threshold_banks
, cpu
) = NULL
;
1332 /* create dir/files for all valid threshold banks */
1333 int mce_threshold_create_device(unsigned int cpu
)
1336 struct threshold_bank
**bp
;
1339 if (!thresholding_en
)
1342 bp
= per_cpu(threshold_banks
, cpu
);
1346 bp
= kzalloc(sizeof(struct threshold_bank
*) * mca_cfg
.banks
,
1351 per_cpu(threshold_banks
, cpu
) = bp
;
1353 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
1354 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
1356 err
= threshold_create_bank(cpu
, bank
);
1362 mce_threshold_remove_device(cpu
);
1366 static __init
int threshold_init_device(void)
1370 if (mce_threshold_vector
== amd_threshold_interrupt
)
1371 thresholding_en
= true;
1373 /* to hit CPUs online before the notifier is up */
1374 for_each_online_cpu(lcpu
) {
1375 int err
= mce_threshold_create_device(lcpu
);
1384 * there are 3 funcs which need to be _initcalled in a logic sequence:
1385 * 1. xen_late_init_mcelog
1386 * 2. mcheck_init_device
1387 * 3. threshold_init_device
1389 * xen_late_init_mcelog must register xen_mce_chrdev_device before
1390 * native mce_chrdev_device registration if running under xen platform;
1392 * mcheck_init_device should be inited before threshold_init_device to
1393 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
1395 * so we use following _initcalls
1396 * 1. device_initcall(xen_late_init_mcelog);
1397 * 2. device_initcall_sync(mcheck_init_device);
1398 * 3. late_initcall(threshold_init_device);
1400 * when running under xen, the initcall order is 1,2,3;
1401 * on baremetal, we skip 1 and we do only 2 and 3.
1403 late_initcall(threshold_init_device
);