2 * (c) 2005-2016 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
8 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * All MC4_MISCi registers are shared between cores on a node.
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
24 #include <asm/amd_nb.h>
29 #include <asm/trace/irq_vectors.h>
32 #define THRESHOLD_MAX 0xFFF
33 #define INT_TYPE_APIC 0x00020000
34 #define MASK_VALID_HI 0x80000000
35 #define MASK_CNTP_HI 0x40000000
36 #define MASK_LOCKED_HI 0x20000000
37 #define MASK_LVTOFF_HI 0x00F00000
38 #define MASK_COUNT_EN_HI 0x00080000
39 #define MASK_INT_TYPE_HI 0x00060000
40 #define MASK_OVERFLOW_HI 0x00010000
41 #define MASK_ERR_COUNT_HI 0x00000FFF
42 #define MASK_BLKPTR_LO 0xFF000000
43 #define MCG_XBLK_ADDR 0xC0000400
45 /* Deferred error settings */
46 #define MSR_CU_DEF_ERR 0xC0000410
47 #define MASK_DEF_LVTOFF 0x000000F0
48 #define MASK_DEF_INT_TYPE 0x00000006
49 #define DEF_LVT_OFF 0x2
50 #define DEF_INT_TYPE_APIC 0x2
54 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
55 #define SMCA_THR_LVT_OFF 0xF000
58 * OS is required to set the MCAX bit to acknowledge that it is now using the
59 * new MSR ranges and new registers under each bank. It also means that the OS
60 * will configure deferred errors in the new MCx_CONFIG register. If the bit is
61 * not set, uncorrectable errors will cause a system panic.
63 #define SMCA_MCAX_EN_OFF 0x1
65 static const char * const th_names
[] = {
74 /* Define HWID to IP type mappings for Scalable MCA */
75 struct amd_hwid amd_hwids
[] = {
76 [SMCA_F17H_CORE
] = { "f17h_core", 0xB0 },
77 [SMCA_DF
] = { "data_fabric", 0x2E },
78 [SMCA_UMC
] = { "umc", 0x96 },
79 [SMCA_PB
] = { "param_block", 0x5 },
80 [SMCA_PSP
] = { "psp", 0xFF },
81 [SMCA_SMU
] = { "smu", 0x1 },
83 EXPORT_SYMBOL_GPL(amd_hwids
);
85 const char * const amd_core_mcablock_names
[] = {
86 [SMCA_LS
] = "load_store",
87 [SMCA_IF
] = "insn_fetch",
88 [SMCA_L2_CACHE
] = "l2_cache",
89 [SMCA_DE
] = "decode_unit",
91 [SMCA_EX
] = "execution_unit",
92 [SMCA_FP
] = "floating_point",
93 [SMCA_L3_CACHE
] = "l3_cache",
95 EXPORT_SYMBOL_GPL(amd_core_mcablock_names
);
97 const char * const amd_df_mcablock_names
[] = {
98 [SMCA_CS
] = "coherent_slave",
101 EXPORT_SYMBOL_GPL(amd_df_mcablock_names
);
103 static DEFINE_PER_CPU(struct threshold_bank
**, threshold_banks
);
104 static DEFINE_PER_CPU(unsigned char, bank_map
); /* see which banks are on */
106 static void amd_threshold_interrupt(void);
107 static void amd_deferred_error_interrupt(void);
109 static void default_deferred_error_interrupt(void)
111 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR
);
113 void (*deferred_error_int_vector
)(void) = default_deferred_error_interrupt
;
119 struct thresh_restart
{
120 struct threshold_block
*b
;
127 static inline bool is_shared_bank(int bank
)
130 * Scalable MCA provides for only one core to have access to the MSRs of
136 /* Bank 4 is for northbridge reporting and is thus shared */
140 static const char *bank4_names(const struct threshold_block
*b
)
142 switch (b
->address
) {
154 WARN(1, "Funny MSR: 0x%08x\n", b
->address
);
160 static bool lvt_interrupt_supported(unsigned int bank
, u32 msr_high_bits
)
163 * bank 4 supports APIC LVT interrupts implicitly since forever.
169 * IntP: interrupt present; if this bit is set, the thresholding
170 * bank can generate APIC LVT interrupts
172 return msr_high_bits
& BIT(28);
175 static int lvt_off_valid(struct threshold_block
*b
, int apic
, u32 lo
, u32 hi
)
177 int msr
= (hi
& MASK_LVTOFF_HI
) >> 20;
180 pr_err(FW_BUG
"cpu %d, failed to setup threshold interrupt "
181 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b
->cpu
,
182 b
->bank
, b
->block
, b
->address
, hi
, lo
);
188 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
189 * the BIOS provides the value. The original field where LVT offset
190 * was set is reserved. Return early here:
195 pr_err(FW_BUG
"cpu %d, invalid threshold interrupt offset %d "
196 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
197 b
->cpu
, apic
, b
->bank
, b
->block
, b
->address
, hi
, lo
);
204 /* Reprogram MCx_MISC MSR behind this threshold bank. */
205 static void threshold_restart_bank(void *_tr
)
207 struct thresh_restart
*tr
= _tr
;
210 rdmsr(tr
->b
->address
, lo
, hi
);
212 if (tr
->b
->threshold_limit
< (hi
& THRESHOLD_MAX
))
213 tr
->reset
= 1; /* limit cannot be lower than err count */
215 if (tr
->reset
) { /* reset err count and overflow bit */
217 (hi
& ~(MASK_ERR_COUNT_HI
| MASK_OVERFLOW_HI
)) |
218 (THRESHOLD_MAX
- tr
->b
->threshold_limit
);
219 } else if (tr
->old_limit
) { /* change limit w/o reset */
220 int new_count
= (hi
& THRESHOLD_MAX
) +
221 (tr
->old_limit
- tr
->b
->threshold_limit
);
223 hi
= (hi
& ~MASK_ERR_COUNT_HI
) |
224 (new_count
& THRESHOLD_MAX
);
228 hi
&= ~MASK_INT_TYPE_HI
;
230 if (!tr
->b
->interrupt_capable
)
233 if (tr
->set_lvt_off
) {
234 if (lvt_off_valid(tr
->b
, tr
->lvt_off
, lo
, hi
)) {
235 /* set new lvt offset */
236 hi
&= ~MASK_LVTOFF_HI
;
237 hi
|= tr
->lvt_off
<< 20;
241 if (tr
->b
->interrupt_enable
)
246 hi
|= MASK_COUNT_EN_HI
;
247 wrmsr(tr
->b
->address
, lo
, hi
);
250 static void mce_threshold_block_init(struct threshold_block
*b
, int offset
)
252 struct thresh_restart tr
= {
258 b
->threshold_limit
= THRESHOLD_MAX
;
259 threshold_restart_bank(&tr
);
262 static int setup_APIC_mce_threshold(int reserved
, int new)
264 if (reserved
< 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR
,
265 APIC_EILVT_MSG_FIX
, 0))
271 static int setup_APIC_deferred_error(int reserved
, int new)
273 if (reserved
< 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR
,
274 APIC_EILVT_MSG_FIX
, 0))
280 static void deferred_error_interrupt_enable(struct cpuinfo_x86
*c
)
282 u32 low
= 0, high
= 0;
283 int def_offset
= -1, def_new
;
285 if (rdmsr_safe(MSR_CU_DEF_ERR
, &low
, &high
))
288 def_new
= (low
& MASK_DEF_LVTOFF
) >> 4;
289 if (!(low
& MASK_DEF_LVTOFF
)) {
290 pr_err(FW_BUG
"Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
291 def_new
= DEF_LVT_OFF
;
292 low
= (low
& ~MASK_DEF_LVTOFF
) | (DEF_LVT_OFF
<< 4);
295 def_offset
= setup_APIC_deferred_error(def_offset
, def_new
);
296 if ((def_offset
== def_new
) &&
297 (deferred_error_int_vector
!= amd_deferred_error_interrupt
))
298 deferred_error_int_vector
= amd_deferred_error_interrupt
;
300 low
= (low
& ~MASK_DEF_INT_TYPE
) | DEF_INT_TYPE_APIC
;
301 wrmsr(MSR_CU_DEF_ERR
, low
, high
);
304 static u32
get_block_address(u32 current_addr
, u32 low
, u32 high
,
305 unsigned int bank
, unsigned int block
)
307 u32 addr
= 0, offset
= 0;
309 if (mce_flags
.smca
) {
311 addr
= MSR_AMD64_SMCA_MCx_MISC(bank
);
314 * For SMCA enabled processors, BLKPTR field of the
315 * first MISC register (MCx_MISC0) indicates presence of
316 * additional MISC register set (MISC1-4).
320 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank
), &low
, &high
))
323 if (!(low
& MCI_CONFIG_MCAX
))
326 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank
), &low
, &high
) &&
327 (low
& MASK_BLKPTR_LO
))
328 addr
= MSR_AMD64_SMCA_MCx_MISCy(bank
, block
- 1);
333 /* Fall back to method we used for older processors: */
336 addr
= MSR_IA32_MCx_MISC(bank
);
339 offset
= ((low
& MASK_BLKPTR_LO
) >> 21);
341 addr
= MCG_XBLK_ADDR
+ offset
;
344 addr
= ++current_addr
;
350 prepare_threshold_block(unsigned int bank
, unsigned int block
, u32 addr
,
351 int offset
, u32 misc_high
)
353 unsigned int cpu
= smp_processor_id();
354 struct threshold_block b
;
358 per_cpu(bank_map
, cpu
) |= (1 << bank
);
360 memset(&b
, 0, sizeof(b
));
365 b
.interrupt_capable
= lvt_interrupt_supported(bank
, misc_high
);
367 if (!b
.interrupt_capable
)
370 b
.interrupt_enable
= 1;
372 if (mce_flags
.smca
) {
373 u32 smca_low
, smca_high
;
374 u32 smca_addr
= MSR_AMD64_SMCA_MCx_CONFIG(bank
);
376 if (!rdmsr_safe(smca_addr
, &smca_low
, &smca_high
)) {
377 smca_high
|= SMCA_MCAX_EN_OFF
;
378 wrmsr(smca_addr
, smca_low
, smca_high
);
381 /* Gather LVT offset for thresholding: */
382 if (rdmsr_safe(MSR_CU_DEF_ERR
, &smca_low
, &smca_high
))
385 new = (smca_low
& SMCA_THR_LVT_OFF
) >> 12;
387 new = (misc_high
& MASK_LVTOFF_HI
) >> 20;
390 offset
= setup_APIC_mce_threshold(offset
, new);
392 if ((offset
== new) && (mce_threshold_vector
!= amd_threshold_interrupt
))
393 mce_threshold_vector
= amd_threshold_interrupt
;
396 mce_threshold_block_init(&b
, offset
);
402 /* cpu init entry point, called from mce.c with preempt off */
403 void mce_amd_feature_init(struct cpuinfo_x86
*c
)
405 u32 low
= 0, high
= 0, address
= 0;
406 unsigned int bank
, block
;
409 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
410 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
411 address
= get_block_address(address
, low
, high
, bank
, block
);
415 if (rdmsr_safe(address
, &low
, &high
))
418 if (!(high
& MASK_VALID_HI
))
421 if (!(high
& MASK_CNTP_HI
) ||
422 (high
& MASK_LOCKED_HI
))
425 offset
= prepare_threshold_block(bank
, block
, address
, offset
, high
);
429 if (mce_flags
.succor
)
430 deferred_error_interrupt_enable(c
);
433 static void __log_error(unsigned int bank
, bool threshold_err
, u64 misc
)
438 rdmsrl(MSR_IA32_MCx_STATUS(bank
), status
);
439 if (!(status
& MCI_STATUS_VAL
))
450 if (m
.status
& MCI_STATUS_ADDRV
)
451 rdmsrl(MSR_IA32_MCx_ADDR(bank
), m
.addr
);
454 wrmsrl(MSR_IA32_MCx_STATUS(bank
), 0);
457 static inline void __smp_deferred_error_interrupt(void)
459 inc_irq_stat(irq_deferred_error_count
);
460 deferred_error_int_vector();
463 asmlinkage __visible
void smp_deferred_error_interrupt(void)
466 __smp_deferred_error_interrupt();
470 asmlinkage __visible
void smp_trace_deferred_error_interrupt(void)
473 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR
);
474 __smp_deferred_error_interrupt();
475 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR
);
479 /* APIC interrupt handler for deferred errors */
480 static void amd_deferred_error_interrupt(void)
485 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
486 rdmsrl(MSR_IA32_MCx_STATUS(bank
), status
);
488 if (!(status
& MCI_STATUS_VAL
) ||
489 !(status
& MCI_STATUS_DEFERRED
))
492 __log_error(bank
, false, 0);
498 * APIC Interrupt Handler
502 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
503 * the interrupt goes off when error_count reaches threshold_limit.
504 * the handler will simply log mcelog w/ software defined bank number.
507 static void amd_threshold_interrupt(void)
509 u32 low
= 0, high
= 0, address
= 0;
510 int cpu
= smp_processor_id();
511 unsigned int bank
, block
;
513 /* assume first bank caused it */
514 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
515 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
517 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
518 address
= get_block_address(address
, low
, high
, bank
, block
);
522 if (rdmsr_safe(address
, &low
, &high
))
525 if (!(high
& MASK_VALID_HI
)) {
532 if (!(high
& MASK_CNTP_HI
) ||
533 (high
& MASK_LOCKED_HI
))
537 * Log the machine check that caused the threshold
540 if (high
& MASK_OVERFLOW_HI
)
547 __log_error(bank
, true, ((u64
)high
<< 32) | low
);
554 struct threshold_attr
{
555 struct attribute attr
;
556 ssize_t (*show
) (struct threshold_block
*, char *);
557 ssize_t (*store
) (struct threshold_block
*, const char *, size_t count
);
560 #define SHOW_FIELDS(name) \
561 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
563 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
565 SHOW_FIELDS(interrupt_enable
)
566 SHOW_FIELDS(threshold_limit
)
569 store_interrupt_enable(struct threshold_block
*b
, const char *buf
, size_t size
)
571 struct thresh_restart tr
;
574 if (!b
->interrupt_capable
)
577 if (kstrtoul(buf
, 0, &new) < 0)
580 b
->interrupt_enable
= !!new;
582 memset(&tr
, 0, sizeof(tr
));
585 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
591 store_threshold_limit(struct threshold_block
*b
, const char *buf
, size_t size
)
593 struct thresh_restart tr
;
596 if (kstrtoul(buf
, 0, &new) < 0)
599 if (new > THRESHOLD_MAX
)
604 memset(&tr
, 0, sizeof(tr
));
605 tr
.old_limit
= b
->threshold_limit
;
606 b
->threshold_limit
= new;
609 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
614 static ssize_t
show_error_count(struct threshold_block
*b
, char *buf
)
618 rdmsr_on_cpu(b
->cpu
, b
->address
, &lo
, &hi
);
620 return sprintf(buf
, "%u\n", ((hi
& THRESHOLD_MAX
) -
621 (THRESHOLD_MAX
- b
->threshold_limit
)));
624 static struct threshold_attr error_count
= {
625 .attr
= {.name
= __stringify(error_count
), .mode
= 0444 },
626 .show
= show_error_count
,
629 #define RW_ATTR(val) \
630 static struct threshold_attr val = { \
631 .attr = {.name = __stringify(val), .mode = 0644 }, \
632 .show = show_## val, \
633 .store = store_## val, \
636 RW_ATTR(interrupt_enable
);
637 RW_ATTR(threshold_limit
);
639 static struct attribute
*default_attrs
[] = {
640 &threshold_limit
.attr
,
642 NULL
, /* possibly interrupt_enable if supported, see below */
646 #define to_block(k) container_of(k, struct threshold_block, kobj)
647 #define to_attr(a) container_of(a, struct threshold_attr, attr)
649 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
651 struct threshold_block
*b
= to_block(kobj
);
652 struct threshold_attr
*a
= to_attr(attr
);
655 ret
= a
->show
? a
->show(b
, buf
) : -EIO
;
660 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
661 const char *buf
, size_t count
)
663 struct threshold_block
*b
= to_block(kobj
);
664 struct threshold_attr
*a
= to_attr(attr
);
667 ret
= a
->store
? a
->store(b
, buf
, count
) : -EIO
;
672 static const struct sysfs_ops threshold_ops
= {
677 static struct kobj_type threshold_ktype
= {
678 .sysfs_ops
= &threshold_ops
,
679 .default_attrs
= default_attrs
,
682 static int allocate_threshold_blocks(unsigned int cpu
, unsigned int bank
,
683 unsigned int block
, u32 address
)
685 struct threshold_block
*b
= NULL
;
689 if ((bank
>= mca_cfg
.banks
) || (block
>= NR_BLOCKS
))
692 if (rdmsr_safe_on_cpu(cpu
, address
, &low
, &high
))
695 if (!(high
& MASK_VALID_HI
)) {
702 if (!(high
& MASK_CNTP_HI
) ||
703 (high
& MASK_LOCKED_HI
))
706 b
= kzalloc(sizeof(struct threshold_block
), GFP_KERNEL
);
713 b
->address
= address
;
714 b
->interrupt_enable
= 0;
715 b
->interrupt_capable
= lvt_interrupt_supported(bank
, high
);
716 b
->threshold_limit
= THRESHOLD_MAX
;
718 if (b
->interrupt_capable
) {
719 threshold_ktype
.default_attrs
[2] = &interrupt_enable
.attr
;
720 b
->interrupt_enable
= 1;
722 threshold_ktype
.default_attrs
[2] = NULL
;
725 INIT_LIST_HEAD(&b
->miscj
);
727 if (per_cpu(threshold_banks
, cpu
)[bank
]->blocks
) {
729 &per_cpu(threshold_banks
, cpu
)[bank
]->blocks
->miscj
);
731 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= b
;
734 err
= kobject_init_and_add(&b
->kobj
, &threshold_ktype
,
735 per_cpu(threshold_banks
, cpu
)[bank
]->kobj
,
736 (bank
== 4 ? bank4_names(b
) : th_names
[bank
]));
740 address
= get_block_address(address
, low
, high
, bank
, ++block
);
744 err
= allocate_threshold_blocks(cpu
, bank
, block
, address
);
749 kobject_uevent(&b
->kobj
, KOBJ_ADD
);
755 kobject_put(&b
->kobj
);
762 static int __threshold_add_blocks(struct threshold_bank
*b
)
764 struct list_head
*head
= &b
->blocks
->miscj
;
765 struct threshold_block
*pos
= NULL
;
766 struct threshold_block
*tmp
= NULL
;
769 err
= kobject_add(&b
->blocks
->kobj
, b
->kobj
, b
->blocks
->kobj
.name
);
773 list_for_each_entry_safe(pos
, tmp
, head
, miscj
) {
775 err
= kobject_add(&pos
->kobj
, b
->kobj
, pos
->kobj
.name
);
777 list_for_each_entry_safe_reverse(pos
, tmp
, head
, miscj
)
778 kobject_del(&pos
->kobj
);
786 static int threshold_create_bank(unsigned int cpu
, unsigned int bank
)
788 struct device
*dev
= per_cpu(mce_device
, cpu
);
789 struct amd_northbridge
*nb
= NULL
;
790 struct threshold_bank
*b
= NULL
;
791 const char *name
= th_names
[bank
];
794 if (is_shared_bank(bank
)) {
795 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
797 /* threshold descriptor already initialized on this node? */
798 if (nb
&& nb
->bank4
) {
801 err
= kobject_add(b
->kobj
, &dev
->kobj
, name
);
805 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
806 atomic_inc(&b
->cpus
);
808 err
= __threshold_add_blocks(b
);
814 b
= kzalloc(sizeof(struct threshold_bank
), GFP_KERNEL
);
820 b
->kobj
= kobject_create_and_add(name
, &dev
->kobj
);
826 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
828 if (is_shared_bank(bank
)) {
829 atomic_set(&b
->cpus
, 1);
831 /* nb is already initialized, see above */
838 err
= allocate_threshold_blocks(cpu
, bank
, 0, MSR_IA32_MCx_MISC(bank
));
849 /* create dir/files for all valid threshold banks */
850 static int threshold_create_device(unsigned int cpu
)
853 struct threshold_bank
**bp
;
856 bp
= kzalloc(sizeof(struct threshold_bank
*) * mca_cfg
.banks
,
861 per_cpu(threshold_banks
, cpu
) = bp
;
863 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
864 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
866 err
= threshold_create_bank(cpu
, bank
);
874 static void deallocate_threshold_block(unsigned int cpu
,
877 struct threshold_block
*pos
= NULL
;
878 struct threshold_block
*tmp
= NULL
;
879 struct threshold_bank
*head
= per_cpu(threshold_banks
, cpu
)[bank
];
884 list_for_each_entry_safe(pos
, tmp
, &head
->blocks
->miscj
, miscj
) {
885 kobject_put(&pos
->kobj
);
886 list_del(&pos
->miscj
);
890 kfree(per_cpu(threshold_banks
, cpu
)[bank
]->blocks
);
891 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= NULL
;
894 static void __threshold_remove_blocks(struct threshold_bank
*b
)
896 struct threshold_block
*pos
= NULL
;
897 struct threshold_block
*tmp
= NULL
;
899 kobject_del(b
->kobj
);
901 list_for_each_entry_safe(pos
, tmp
, &b
->blocks
->miscj
, miscj
)
902 kobject_del(&pos
->kobj
);
905 static void threshold_remove_bank(unsigned int cpu
, int bank
)
907 struct amd_northbridge
*nb
;
908 struct threshold_bank
*b
;
910 b
= per_cpu(threshold_banks
, cpu
)[bank
];
917 if (is_shared_bank(bank
)) {
918 if (!atomic_dec_and_test(&b
->cpus
)) {
919 __threshold_remove_blocks(b
);
920 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
924 * the last CPU on this node using the shared bank is
925 * going away, remove that bank now.
927 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
932 deallocate_threshold_block(cpu
, bank
);
935 kobject_del(b
->kobj
);
936 kobject_put(b
->kobj
);
938 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
941 static void threshold_remove_device(unsigned int cpu
)
945 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
946 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
948 threshold_remove_bank(cpu
, bank
);
950 kfree(per_cpu(threshold_banks
, cpu
));
953 /* get notified when a cpu comes on/off */
955 amd_64_threshold_cpu_callback(unsigned long action
, unsigned int cpu
)
959 case CPU_ONLINE_FROZEN
:
960 threshold_create_device(cpu
);
963 case CPU_DEAD_FROZEN
:
964 threshold_remove_device(cpu
);
971 static __init
int threshold_init_device(void)
975 /* to hit CPUs online before the notifier is up */
976 for_each_online_cpu(lcpu
) {
977 int err
= threshold_create_device(lcpu
);
982 threshold_cpu_callback
= amd_64_threshold_cpu_callback
;
987 * there are 3 funcs which need to be _initcalled in a logic sequence:
988 * 1. xen_late_init_mcelog
989 * 2. mcheck_init_device
990 * 3. threshold_init_device
992 * xen_late_init_mcelog must register xen_mce_chrdev_device before
993 * native mce_chrdev_device registration if running under xen platform;
995 * mcheck_init_device should be inited before threshold_init_device to
996 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
998 * so we use following _initcalls
999 * 1. device_initcall(xen_late_init_mcelog);
1000 * 2. device_initcall_sync(mcheck_init_device);
1001 * 3. late_initcall(threshold_init_device);
1003 * when running under xen, the initcall order is 1,2,3;
1004 * on baremetal, we skip 1 and we do only 2 and 3.
1006 late_initcall(threshold_init_device
);