]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/mcheck/mce_amd.c
x86: delete __cpuinit usage from all x86 files
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce_amd.c
CommitLineData
89b831ef 1/*
11122570 2 * (c) 2005-2012 Advanced Micro Devices, Inc.
89b831ef
JS
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 *
7 * Written by Jacob Shin - AMD, Inc.
8 *
e6d41e8c 9 * Maintained by: Borislav Petkov <bp@alien8.de>
89b831ef 10 *
95268664
JS
11 * April 2006
12 * - added support for AMD Family 0x10 processors
11122570
BP
13 * May 2012
14 * - major scrubbing
89b831ef 15 *
95268664 16 * All MC4_MISCi registers are shared between multi-cores
89b831ef 17 */
89b831ef 18#include <linux/interrupt.h>
89b831ef 19#include <linux/notifier.h>
1cb2a8e1 20#include <linux/kobject.h>
34fa1967 21#include <linux/percpu.h>
1cb2a8e1
IM
22#include <linux/errno.h>
23#include <linux/sched.h>
89b831ef 24#include <linux/sysfs.h>
5a0e3ad6 25#include <linux/slab.h>
1cb2a8e1
IM
26#include <linux/init.h>
27#include <linux/cpu.h>
28#include <linux/smp.h>
29
019f34fc 30#include <asm/amd_nb.h>
89b831ef 31#include <asm/apic.h>
1cb2a8e1 32#include <asm/idle.h>
89b831ef
JS
33#include <asm/mce.h>
34#include <asm/msr.h>
89b831ef 35
2903ee85
JS
36#define NR_BLOCKS 9
37#define THRESHOLD_MAX 0xFFF
38#define INT_TYPE_APIC 0x00020000
39#define MASK_VALID_HI 0x80000000
24ce0e96
JB
40#define MASK_CNTP_HI 0x40000000
41#define MASK_LOCKED_HI 0x20000000
2903ee85
JS
42#define MASK_LVTOFF_HI 0x00F00000
43#define MASK_COUNT_EN_HI 0x00080000
44#define MASK_INT_TYPE_HI 0x00060000
45#define MASK_OVERFLOW_HI 0x00010000
89b831ef 46#define MASK_ERR_COUNT_HI 0x00000FFF
95268664
JS
47#define MASK_BLKPTR_LO 0xFF000000
48#define MCG_XBLK_ADDR 0xC0000400
89b831ef 49
336d335a
BP
50static const char * const th_names[] = {
51 "load_store",
52 "insn_fetch",
53 "combined_unit",
54 "",
55 "northbridge",
56 "execution_unit",
57};
58
bafcdd3b 59static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
89b831ef
JS
60static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
61
b2762686
AK
62static void amd_threshold_interrupt(void);
63
89b831ef
JS
64/*
65 * CPU Initialization
66 */
67
4cd4601d 68struct thresh_restart {
1cb2a8e1
IM
69 struct threshold_block *b;
70 int reset;
9c37c9d8
RR
71 int set_lvt_off;
72 int lvt_off;
1cb2a8e1 73 u16 old_limit;
4cd4601d
MT
74};
75
c76e8164
BO
76static inline bool is_shared_bank(int bank)
77{
78 /* Bank 4 is for northbridge reporting and is thus shared */
79 return (bank == 4);
80}
81
336d335a
BP
82static const char * const bank4_names(struct threshold_block *b)
83{
84 switch (b->address) {
85 /* MSR4_MISC0 */
86 case 0x00000413:
87 return "dram";
88
89 case 0xc0000408:
90 return "ht_links";
91
92 case 0xc0000409:
93 return "l3_cache";
94
95 default:
96 WARN(1, "Funny MSR: 0x%08x\n", b->address);
97 return "";
98 }
99};
100
101
f227d430
BP
102static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
103{
104 /*
105 * bank 4 supports APIC LVT interrupts implicitly since forever.
106 */
107 if (bank == 4)
108 return true;
109
110 /*
111 * IntP: interrupt present; if this bit is set, the thresholding
112 * bank can generate APIC LVT interrupts
113 */
114 return msr_high_bits & BIT(28);
115}
116
bbaff08d
RR
117static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
118{
119 int msr = (hi & MASK_LVTOFF_HI) >> 20;
120
121 if (apic < 0) {
122 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
123 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
124 b->bank, b->block, b->address, hi, lo);
125 return 0;
126 }
127
128 if (apic != msr) {
129 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
130 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
131 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
132 return 0;
133 }
134
135 return 1;
136};
137
f227d430
BP
138/*
139 * Called via smp_call_function_single(), must be called with correct
140 * cpu affinity.
141 */
a6b6a14e 142static void threshold_restart_bank(void *_tr)
89b831ef 143{
4cd4601d 144 struct thresh_restart *tr = _tr;
7203a049 145 u32 hi, lo;
89b831ef 146
7203a049 147 rdmsr(tr->b->address, lo, hi);
89b831ef 148
7203a049 149 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
4cd4601d 150 tr->reset = 1; /* limit cannot be lower than err count */
89b831ef 151
4cd4601d 152 if (tr->reset) { /* reset err count and overflow bit */
7203a049
RR
153 hi =
154 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
4cd4601d
MT
155 (THRESHOLD_MAX - tr->b->threshold_limit);
156 } else if (tr->old_limit) { /* change limit w/o reset */
7203a049 157 int new_count = (hi & THRESHOLD_MAX) +
4cd4601d 158 (tr->old_limit - tr->b->threshold_limit);
1cb2a8e1 159
7203a049 160 hi = (hi & ~MASK_ERR_COUNT_HI) |
89b831ef
JS
161 (new_count & THRESHOLD_MAX);
162 }
163
f227d430
BP
164 /* clear IntType */
165 hi &= ~MASK_INT_TYPE_HI;
166
167 if (!tr->b->interrupt_capable)
168 goto done;
169
9c37c9d8 170 if (tr->set_lvt_off) {
bbaff08d
RR
171 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
172 /* set new lvt offset */
173 hi &= ~MASK_LVTOFF_HI;
174 hi |= tr->lvt_off << 20;
175 }
9c37c9d8
RR
176 }
177
f227d430
BP
178 if (tr->b->interrupt_enable)
179 hi |= INT_TYPE_APIC;
180
181 done:
89b831ef 182
7203a049
RR
183 hi |= MASK_COUNT_EN_HI;
184 wrmsr(tr->b->address, lo, hi);
89b831ef
JS
185}
186
9c37c9d8
RR
187static void mce_threshold_block_init(struct threshold_block *b, int offset)
188{
189 struct thresh_restart tr = {
190 .b = b,
191 .set_lvt_off = 1,
192 .lvt_off = offset,
193 };
194
195 b->threshold_limit = THRESHOLD_MAX;
196 threshold_restart_bank(&tr);
197};
198
bbaff08d
RR
199static int setup_APIC_mce(int reserved, int new)
200{
201 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
202 APIC_EILVT_MSG_FIX, 0))
203 return new;
204
205 return reserved;
206}
207
95268664 208/* cpu init entry point, called from mce.c with preempt off */
cc3ca220 209void mce_amd_feature_init(struct cpuinfo_x86 *c)
89b831ef 210{
9c37c9d8 211 struct threshold_block b;
89b831ef 212 unsigned int cpu = smp_processor_id();
95268664 213 u32 low = 0, high = 0, address = 0;
1cb2a8e1 214 unsigned int bank, block;
bbaff08d 215 int offset = -1;
89b831ef 216
bafcdd3b 217 for (bank = 0; bank < mca_cfg.banks; ++bank) {
95268664
JS
218 for (block = 0; block < NR_BLOCKS; ++block) {
219 if (block == 0)
220 address = MSR_IA32_MC0_MISC + bank * 4;
24ce0e96
JB
221 else if (block == 1) {
222 address = (low & MASK_BLKPTR_LO) >> 21;
223 if (!address)
224 break;
6dcbfe4f 225
24ce0e96 226 address += MCG_XBLK_ADDR;
1cb2a8e1 227 } else
95268664
JS
228 ++address;
229
230 if (rdmsr_safe(address, &low, &high))
24ce0e96 231 break;
95268664 232
6dcbfe4f
BP
233 if (!(high & MASK_VALID_HI))
234 continue;
95268664 235
24ce0e96
JB
236 if (!(high & MASK_CNTP_HI) ||
237 (high & MASK_LOCKED_HI))
95268664
JS
238 continue;
239
240 if (!block)
241 per_cpu(bank_map, cpu) |= (1 << bank);
141168c3 242
9c37c9d8 243 memset(&b, 0, sizeof(b));
f227d430
BP
244 b.cpu = cpu;
245 b.bank = bank;
246 b.block = block;
247 b.address = address;
248 b.interrupt_capable = lvt_interrupt_supported(bank, high);
249
250 if (b.interrupt_capable) {
251 int new = (high & MASK_LVTOFF_HI) >> 20;
252 offset = setup_APIC_mce(offset, new);
253 }
b2762686 254
9c37c9d8 255 mce_threshold_block_init(&b, offset);
b2762686 256 mce_threshold_vector = amd_threshold_interrupt;
95268664 257 }
89b831ef
JS
258 }
259}
260
261/*
262 * APIC Interrupt Handler
263 */
264
265/*
266 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
267 * the interrupt goes off when error_count reaches threshold_limit.
268 * the handler will simply log mcelog w/ software defined bank number.
269 */
b2762686 270static void amd_threshold_interrupt(void)
89b831ef 271{
1cb2a8e1 272 u32 low = 0, high = 0, address = 0;
95268664 273 unsigned int bank, block;
89b831ef
JS
274 struct mce m;
275
b5f2fa4e 276 mce_setup(&m);
89b831ef
JS
277
278 /* assume first bank caused it */
bafcdd3b 279 for (bank = 0; bank < mca_cfg.banks; ++bank) {
24ce0e96
JB
280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
281 continue;
95268664 282 for (block = 0; block < NR_BLOCKS; ++block) {
1cb2a8e1 283 if (block == 0) {
95268664 284 address = MSR_IA32_MC0_MISC + bank * 4;
1cb2a8e1 285 } else if (block == 1) {
24ce0e96
JB
286 address = (low & MASK_BLKPTR_LO) >> 21;
287 if (!address)
288 break;
289 address += MCG_XBLK_ADDR;
1cb2a8e1 290 } else {
95268664 291 ++address;
1cb2a8e1 292 }
95268664
JS
293
294 if (rdmsr_safe(address, &low, &high))
24ce0e96 295 break;
95268664
JS
296
297 if (!(high & MASK_VALID_HI)) {
298 if (block)
299 continue;
300 else
301 break;
302 }
303
24ce0e96
JB
304 if (!(high & MASK_CNTP_HI) ||
305 (high & MASK_LOCKED_HI))
95268664
JS
306 continue;
307
1cb2a8e1
IM
308 /*
309 * Log the machine check that caused the threshold
310 * event.
311 */
ee031c31
AK
312 machine_check_poll(MCP_TIMESTAMP,
313 &__get_cpu_var(mce_poll_banks));
a98f0dd3 314
95268664
JS
315 if (high & MASK_OVERFLOW_HI) {
316 rdmsrl(address, m.misc);
317 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
318 m.status);
319 m.bank = K8_MCE_THRESHOLD_BASE
320 + bank * NR_BLOCKS
321 + block;
322 mce_log(&m);
b2762686 323 return;
95268664 324 }
89b831ef
JS
325 }
326 }
89b831ef
JS
327}
328
329/*
330 * Sysfs Interface
331 */
332
89b831ef 333struct threshold_attr {
2903ee85 334 struct attribute attr;
1cb2a8e1
IM
335 ssize_t (*show) (struct threshold_block *, char *);
336 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
89b831ef
JS
337};
338
1cb2a8e1
IM
339#define SHOW_FIELDS(name) \
340static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
341{ \
18c20f37 342 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
2903ee85 343}
89b831ef
JS
344SHOW_FIELDS(interrupt_enable)
345SHOW_FIELDS(threshold_limit)
346
1cb2a8e1 347static ssize_t
9319cec8 348store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
89b831ef 349{
4cd4601d 350 struct thresh_restart tr;
1cb2a8e1 351 unsigned long new;
1cb2a8e1 352
f227d430
BP
353 if (!b->interrupt_capable)
354 return -EINVAL;
355
9319cec8 356 if (strict_strtoul(buf, 0, &new) < 0)
89b831ef 357 return -EINVAL;
1cb2a8e1 358
89b831ef
JS
359 b->interrupt_enable = !!new;
360
9c37c9d8 361 memset(&tr, 0, sizeof(tr));
1cb2a8e1 362 tr.b = b;
1cb2a8e1 363
a6b6a14e 364 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef 365
9319cec8 366 return size;
89b831ef
JS
367}
368
1cb2a8e1 369static ssize_t
9319cec8 370store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
89b831ef 371{
4cd4601d 372 struct thresh_restart tr;
1cb2a8e1 373 unsigned long new;
1cb2a8e1 374
9319cec8 375 if (strict_strtoul(buf, 0, &new) < 0)
89b831ef 376 return -EINVAL;
1cb2a8e1 377
89b831ef
JS
378 if (new > THRESHOLD_MAX)
379 new = THRESHOLD_MAX;
380 if (new < 1)
381 new = 1;
1cb2a8e1 382
9c37c9d8 383 memset(&tr, 0, sizeof(tr));
4cd4601d 384 tr.old_limit = b->threshold_limit;
89b831ef 385 b->threshold_limit = new;
4cd4601d 386 tr.b = b;
89b831ef 387
a6b6a14e 388 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef 389
9319cec8 390 return size;
89b831ef
JS
391}
392
4cd4601d
MT
393static ssize_t show_error_count(struct threshold_block *b, char *buf)
394{
2c9c42fa
BP
395 u32 lo, hi;
396
397 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
a6b6a14e 398
2c9c42fa
BP
399 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
400 (THRESHOLD_MAX - b->threshold_limit)));
89b831ef
JS
401}
402
6e927361
BP
403static struct threshold_attr error_count = {
404 .attr = {.name = __stringify(error_count), .mode = 0444 },
405 .show = show_error_count,
406};
89b831ef 407
34fa1967
HS
408#define RW_ATTR(val) \
409static struct threshold_attr val = { \
410 .attr = {.name = __stringify(val), .mode = 0644 }, \
411 .show = show_## val, \
412 .store = store_## val, \
89b831ef
JS
413};
414
2903ee85
JS
415RW_ATTR(interrupt_enable);
416RW_ATTR(threshold_limit);
89b831ef
JS
417
418static struct attribute *default_attrs[] = {
89b831ef
JS
419 &threshold_limit.attr,
420 &error_count.attr,
d26ecc48
BP
421 NULL, /* possibly interrupt_enable if supported, see below */
422 NULL,
89b831ef
JS
423};
424
1cb2a8e1
IM
425#define to_block(k) container_of(k, struct threshold_block, kobj)
426#define to_attr(a) container_of(a, struct threshold_attr, attr)
89b831ef
JS
427
428static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
429{
95268664 430 struct threshold_block *b = to_block(kobj);
89b831ef
JS
431 struct threshold_attr *a = to_attr(attr);
432 ssize_t ret;
1cb2a8e1 433
89b831ef 434 ret = a->show ? a->show(b, buf) : -EIO;
1cb2a8e1 435
89b831ef
JS
436 return ret;
437}
438
439static ssize_t store(struct kobject *kobj, struct attribute *attr,
440 const char *buf, size_t count)
441{
95268664 442 struct threshold_block *b = to_block(kobj);
89b831ef
JS
443 struct threshold_attr *a = to_attr(attr);
444 ssize_t ret;
1cb2a8e1 445
89b831ef 446 ret = a->store ? a->store(b, buf, count) : -EIO;
1cb2a8e1 447
89b831ef
JS
448 return ret;
449}
450
52cf25d0 451static const struct sysfs_ops threshold_ops = {
1cb2a8e1
IM
452 .show = show,
453 .store = store,
89b831ef
JS
454};
455
456static struct kobj_type threshold_ktype = {
1cb2a8e1
IM
457 .sysfs_ops = &threshold_ops,
458 .default_attrs = default_attrs,
89b831ef
JS
459};
460
148f9bb8
PG
461static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
462 unsigned int block, u32 address)
95268664 463{
95268664 464 struct threshold_block *b = NULL;
1cb2a8e1
IM
465 u32 low, high;
466 int err;
95268664 467
bafcdd3b 468 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
95268664
JS
469 return 0;
470
a6b6a14e 471 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
24ce0e96 472 return 0;
95268664
JS
473
474 if (!(high & MASK_VALID_HI)) {
475 if (block)
476 goto recurse;
477 else
478 return 0;
479 }
480
24ce0e96
JB
481 if (!(high & MASK_CNTP_HI) ||
482 (high & MASK_LOCKED_HI))
95268664
JS
483 goto recurse;
484
485 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
486 if (!b)
487 return -ENOMEM;
95268664 488
1cb2a8e1
IM
489 b->block = block;
490 b->bank = bank;
491 b->cpu = cpu;
492 b->address = address;
493 b->interrupt_enable = 0;
f227d430 494 b->interrupt_capable = lvt_interrupt_supported(bank, high);
1cb2a8e1 495 b->threshold_limit = THRESHOLD_MAX;
95268664 496
d26ecc48
BP
497 if (b->interrupt_capable)
498 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
499 else
500 threshold_ktype.default_attrs[2] = NULL;
501
95268664
JS
502 INIT_LIST_HEAD(&b->miscj);
503
1cb2a8e1 504 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
95268664
JS
505 list_add(&b->miscj,
506 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
1cb2a8e1 507 } else {
95268664 508 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
1cb2a8e1 509 }
95268664 510
542eb75a
GKH
511 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
512 per_cpu(threshold_banks, cpu)[bank]->kobj,
336d335a 513 (bank == 4 ? bank4_names(b) : th_names[bank]));
95268664
JS
514 if (err)
515 goto out_free;
516recurse:
517 if (!block) {
518 address = (low & MASK_BLKPTR_LO) >> 21;
519 if (!address)
520 return 0;
521 address += MCG_XBLK_ADDR;
1cb2a8e1 522 } else {
95268664 523 ++address;
1cb2a8e1 524 }
95268664
JS
525
526 err = allocate_threshold_blocks(cpu, bank, ++block, address);
527 if (err)
528 goto out_free;
529
213eca7f
GKH
530 if (b)
531 kobject_uevent(&b->kobj, KOBJ_ADD);
542eb75a 532
95268664
JS
533 return err;
534
535out_free:
536 if (b) {
38a382ae 537 kobject_put(&b->kobj);
d9a5ac9e 538 list_del(&b->miscj);
95268664
JS
539 kfree(b);
540 }
541 return err;
542}
543
148f9bb8 544static int __threshold_add_blocks(struct threshold_bank *b)
019f34fc
BP
545{
546 struct list_head *head = &b->blocks->miscj;
547 struct threshold_block *pos = NULL;
548 struct threshold_block *tmp = NULL;
549 int err = 0;
550
551 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
552 if (err)
553 return err;
554
555 list_for_each_entry_safe(pos, tmp, head, miscj) {
556
557 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
558 if (err) {
559 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
560 kobject_del(&pos->kobj);
561
562 return err;
563 }
564 }
565 return err;
566}
567
148f9bb8 568static int threshold_create_bank(unsigned int cpu, unsigned int bank)
89b831ef 569{
d6126ef5 570 struct device *dev = per_cpu(mce_device, cpu);
019f34fc 571 struct amd_northbridge *nb = NULL;
92e26e2a 572 struct threshold_bank *b = NULL;
336d335a 573 const char *name = th_names[bank];
92e26e2a 574 int err = 0;
95268664 575
c76e8164 576 if (is_shared_bank(bank)) {
019f34fc 577 nb = node_to_amd_nb(amd_get_nb_id(cpu));
019f34fc
BP
578
579 /* threshold descriptor already initialized on this node? */
21c5e50e 580 if (nb && nb->bank4) {
019f34fc
BP
581 /* yes, use it */
582 b = nb->bank4;
583 err = kobject_add(b->kobj, &dev->kobj, name);
584 if (err)
585 goto out;
586
587 per_cpu(threshold_banks, cpu)[bank] = b;
588 atomic_inc(&b->cpus);
589
590 err = __threshold_add_blocks(b);
591
592 goto out;
593 }
594 }
595
95268664 596 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
89b831ef
JS
597 if (!b) {
598 err = -ENOMEM;
599 goto out;
600 }
89b831ef 601
e032d807 602 b->kobj = kobject_create_and_add(name, &dev->kobj);
92e26e2a
BP
603 if (!b->kobj) {
604 err = -EINVAL;
a521cf20 605 goto out_free;
92e26e2a 606 }
95268664 607
89b831ef 608 per_cpu(threshold_banks, cpu)[bank] = b;
95268664 609
c76e8164 610 if (is_shared_bank(bank)) {
019f34fc
BP
611 atomic_set(&b->cpus, 1);
612
613 /* nb is already initialized, see above */
21c5e50e
DB
614 if (nb) {
615 WARN_ON(nb->bank4);
616 nb->bank4 = b;
617 }
019f34fc
BP
618 }
619
26ab256e
BP
620 err = allocate_threshold_blocks(cpu, bank, 0,
621 MSR_IA32_MC0_MISC + bank * 4);
92e26e2a
BP
622 if (!err)
623 goto out;
95268664 624
019f34fc 625 out_free:
95268664 626 kfree(b);
019f34fc
BP
627
628 out:
89b831ef
JS
629 return err;
630}
631
632/* create dir/files for all valid threshold banks */
148f9bb8 633static int threshold_create_device(unsigned int cpu)
89b831ef 634{
2903ee85 635 unsigned int bank;
bafcdd3b 636 struct threshold_bank **bp;
89b831ef
JS
637 int err = 0;
638
bafcdd3b
BO
639 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
640 GFP_KERNEL);
641 if (!bp)
642 return -ENOMEM;
643
644 per_cpu(threshold_banks, cpu) = bp;
645
646 for (bank = 0; bank < mca_cfg.banks; ++bank) {
5a96f4a5 647 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
89b831ef
JS
648 continue;
649 err = threshold_create_bank(cpu, bank);
650 if (err)
0a17941e 651 return err;
89b831ef 652 }
0a17941e 653
89b831ef
JS
654 return err;
655}
656
be6b5a35 657static void deallocate_threshold_block(unsigned int cpu,
95268664
JS
658 unsigned int bank)
659{
660 struct threshold_block *pos = NULL;
661 struct threshold_block *tmp = NULL;
662 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
663
664 if (!head)
665 return;
666
667 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
38a382ae 668 kobject_put(&pos->kobj);
95268664
JS
669 list_del(&pos->miscj);
670 kfree(pos);
671 }
672
673 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
674 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
675}
676
019f34fc
BP
677static void __threshold_remove_blocks(struct threshold_bank *b)
678{
679 struct threshold_block *pos = NULL;
680 struct threshold_block *tmp = NULL;
681
682 kobject_del(b->kobj);
683
684 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
685 kobject_del(&pos->kobj);
686}
687
be6b5a35 688static void threshold_remove_bank(unsigned int cpu, int bank)
89b831ef 689{
019f34fc 690 struct amd_northbridge *nb;
89b831ef 691 struct threshold_bank *b;
89b831ef
JS
692
693 b = per_cpu(threshold_banks, cpu)[bank];
694 if (!b)
695 return;
019f34fc 696
95268664
JS
697 if (!b->blocks)
698 goto free_out;
699
c76e8164 700 if (is_shared_bank(bank)) {
019f34fc
BP
701 if (!atomic_dec_and_test(&b->cpus)) {
702 __threshold_remove_blocks(b);
703 per_cpu(threshold_banks, cpu)[bank] = NULL;
704 return;
705 } else {
706 /*
707 * the last CPU on this node using the shared bank is
708 * going away, remove that bank now.
709 */
710 nb = node_to_amd_nb(amd_get_nb_id(cpu));
711 nb->bank4 = NULL;
712 }
713 }
714
95268664
JS
715 deallocate_threshold_block(cpu, bank);
716
717free_out:
8735728e 718 kobject_del(b->kobj);
38a382ae 719 kobject_put(b->kobj);
95268664
JS
720 kfree(b);
721 per_cpu(threshold_banks, cpu)[bank] = NULL;
89b831ef
JS
722}
723
be6b5a35 724static void threshold_remove_device(unsigned int cpu)
89b831ef 725{
2903ee85 726 unsigned int bank;
89b831ef 727
bafcdd3b 728 for (bank = 0; bank < mca_cfg.banks; ++bank) {
5a96f4a5 729 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
89b831ef
JS
730 continue;
731 threshold_remove_bank(cpu, bank);
732 }
bafcdd3b 733 kfree(per_cpu(threshold_banks, cpu));
89b831ef
JS
734}
735
89b831ef 736/* get notified when a cpu comes on/off */
148f9bb8 737static void
1cb2a8e1 738amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
89b831ef 739{
89b831ef
JS
740 switch (action) {
741 case CPU_ONLINE:
8bb78442 742 case CPU_ONLINE_FROZEN:
89b831ef 743 threshold_create_device(cpu);
89b831ef
JS
744 break;
745 case CPU_DEAD:
8bb78442 746 case CPU_DEAD_FROZEN:
89b831ef
JS
747 threshold_remove_device(cpu);
748 break;
749 default:
750 break;
751 }
89b831ef
JS
752}
753
89b831ef
JS
754static __init int threshold_init_device(void)
755{
2903ee85 756 unsigned lcpu = 0;
89b831ef 757
89b831ef
JS
758 /* to hit CPUs online before the notifier is up */
759 for_each_online_cpu(lcpu) {
fff2e89f 760 int err = threshold_create_device(lcpu);
1cb2a8e1 761
89b831ef 762 if (err)
fff2e89f 763 return err;
89b831ef 764 }
8735728e 765 threshold_cpu_callback = amd_64_threshold_cpu_callback;
1cb2a8e1 766
fff2e89f 767 return 0;
89b831ef 768}
a8fccdb0
LJ
769/*
770 * there are 3 funcs which need to be _initcalled in a logic sequence:
771 * 1. xen_late_init_mcelog
772 * 2. mcheck_init_device
773 * 3. threshold_init_device
774 *
775 * xen_late_init_mcelog must register xen_mce_chrdev_device before
776 * native mce_chrdev_device registration if running under xen platform;
777 *
778 * mcheck_init_device should be inited before threshold_init_device to
779 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
780 *
781 * so we use following _initcalls
782 * 1. device_initcall(xen_late_init_mcelog);
783 * 2. device_initcall_sync(mcheck_init_device);
784 * 3. late_initcall(threshold_init_device);
785 *
786 * when running under xen, the initcall order is 1,2,3;
787 * on baremetal, we skip 1 and we do only 2 and 3.
788 */
789late_initcall(threshold_init_device);