]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/x86/kernel/cpu/mcheck/mce_64.c
x86: fix cpu-hotplug regression
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce_64.c
1 /*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
7
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/rcupdate.h>
14 #include <linux/kallsyms.h>
15 #include <linux/sysdev.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/capability.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <linux/poll.h>
22 #include <linux/thread_info.h>
23 #include <linux/ctype.h>
24 #include <linux/kmod.h>
25 #include <linux/kdebug.h>
26 #include <asm/processor.h>
27 #include <asm/msr.h>
28 #include <asm/mce.h>
29 #include <asm/uaccess.h>
30 #include <asm/smp.h>
31 #include <asm/idle.h>
32
33 #define MISC_MCELOG_MINOR 227
34 #define NR_BANKS 6
35
36 atomic_t mce_entry;
37
38 static int mce_dont_init;
39
40 /*
41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
46 */
47 static int tolerant = 1;
48 static int banks;
49 static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
50 static unsigned long notify_user;
51 static int rip_msr;
52 static int mce_bootlog = 1;
53 static atomic_t mce_events;
54
55 static char trigger[128];
56 static char *trigger_argv[2] = { trigger, NULL };
57
58 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
59
60 /*
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */
65
66 struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN,
69 };
70
71 void mce_log(struct mce *mce)
72 {
73 unsigned next, entry;
74 atomic_inc(&mce_events);
75 mce->finished = 0;
76 wmb();
77 for (;;) {
78 entry = rcu_dereference(mcelog.next);
79 for (;;) {
80 /* When the buffer fills up discard new entries. Assume
81 that the earlier errors are the more interesting. */
82 if (entry >= MCE_LOG_LEN) {
83 set_bit(MCE_OVERFLOW, &mcelog.flags);
84 return;
85 }
86 /* Old left over entry. Skip. */
87 if (mcelog.entry[entry].finished) {
88 entry++;
89 continue;
90 }
91 break;
92 }
93 smp_rmb();
94 next = entry + 1;
95 if (cmpxchg(&mcelog.next, entry, next) == entry)
96 break;
97 }
98 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
99 wmb();
100 mcelog.entry[entry].finished = 1;
101 wmb();
102
103 set_bit(0, &notify_user);
104 }
105
106 static void print_mce(struct mce *m)
107 {
108 printk(KERN_EMERG "\n"
109 KERN_EMERG "HARDWARE ERROR\n"
110 KERN_EMERG
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->rip) {
114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
116 m->cs, m->rip);
117 if (m->cs == __KERNEL_CS)
118 print_symbol("{%s}", m->rip);
119 printk("\n");
120 }
121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
122 if (m->addr)
123 printk("ADDR %Lx ", m->addr);
124 if (m->misc)
125 printk("MISC %Lx ", m->misc);
126 printk("\n");
127 printk(KERN_EMERG "This is not a software problem!\n");
128 printk(KERN_EMERG "Run through mcelog --ascii to decode "
129 "and contact your hardware vendor\n");
130 }
131
132 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
133 {
134 int i;
135
136 oops_begin();
137 for (i = 0; i < MCE_LOG_LEN; i++) {
138 unsigned long tsc = mcelog.entry[i].tsc;
139
140 if (time_before(tsc, start))
141 continue;
142 print_mce(&mcelog.entry[i]);
143 if (backup && mcelog.entry[i].tsc == backup->tsc)
144 backup = NULL;
145 }
146 if (backup)
147 print_mce(backup);
148 panic(msg);
149 }
150
151 static int mce_available(struct cpuinfo_x86 *c)
152 {
153 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
154 }
155
156 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
157 {
158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
159 m->rip = regs->rip;
160 m->cs = regs->cs;
161 } else {
162 m->rip = 0;
163 m->cs = 0;
164 }
165 if (rip_msr) {
166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m->mcgstatus |= MCG_STATUS_EIPV;
168 rdmsrl(rip_msr, m->rip);
169 m->cs = 0;
170 }
171 }
172
173 /*
174 * The actual machine check handler
175 */
176 void do_machine_check(struct pt_regs * regs, long error_code)
177 {
178 struct mce m, panicm;
179 u64 mcestart = 0;
180 int i;
181 int panicm_found = 0;
182 /*
183 * If no_way_out gets set, there is no safe way to recover from this
184 * MCE. If tolerant is cranked up, we'll try anyway.
185 */
186 int no_way_out = 0;
187 /*
188 * If kill_it gets set, there might be a way to recover from this
189 * error.
190 */
191 int kill_it = 0;
192
193 atomic_inc(&mce_entry);
194
195 if (regs)
196 notify_die(DIE_NMI, "machine check", regs, error_code, 18,
197 SIGKILL);
198 if (!banks)
199 goto out2;
200
201 memset(&m, 0, sizeof(struct mce));
202 m.cpu = smp_processor_id();
203 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
204 /* if the restart IP is not valid, we're done for */
205 if (!(m.mcgstatus & MCG_STATUS_RIPV))
206 no_way_out = 1;
207
208 rdtscll(mcestart);
209 barrier();
210
211 for (i = 0; i < banks; i++) {
212 if (!bank[i])
213 continue;
214
215 m.misc = 0;
216 m.addr = 0;
217 m.bank = i;
218 m.tsc = 0;
219
220 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
221 if ((m.status & MCI_STATUS_VAL) == 0)
222 continue;
223
224 if (m.status & MCI_STATUS_EN) {
225 /* if PCC was set, there's no way out */
226 no_way_out |= !!(m.status & MCI_STATUS_PCC);
227 /*
228 * If this error was uncorrectable and there was
229 * an overflow, we're in trouble. If no overflow,
230 * we might get away with just killing a task.
231 */
232 if (m.status & MCI_STATUS_UC) {
233 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
234 no_way_out = 1;
235 kill_it = 1;
236 }
237 }
238
239 if (m.status & MCI_STATUS_MISCV)
240 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
241 if (m.status & MCI_STATUS_ADDRV)
242 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
243
244 mce_get_rip(&m, regs);
245 if (error_code >= 0)
246 rdtscll(m.tsc);
247 if (error_code != -2)
248 mce_log(&m);
249
250 /* Did this bank cause the exception? */
251 /* Assume that the bank with uncorrectable errors did it,
252 and that there is only a single one. */
253 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
254 panicm = m;
255 panicm_found = 1;
256 }
257
258 add_taint(TAINT_MACHINE_CHECK);
259 }
260
261 /* Never do anything final in the polling timer */
262 if (!regs)
263 goto out;
264
265 /* If we didn't find an uncorrectable error, pick
266 the last one (shouldn't happen, just being safe). */
267 if (!panicm_found)
268 panicm = m;
269
270 /*
271 * If we have decided that we just CAN'T continue, and the user
272 * has not set tolerant to an insane level, give up and die.
273 */
274 if (no_way_out && tolerant < 3)
275 mce_panic("Machine check", &panicm, mcestart);
276
277 /*
278 * If the error seems to be unrecoverable, something should be
279 * done. Try to kill as little as possible. If we can kill just
280 * one task, do that. If the user has set the tolerance very
281 * high, don't try to do anything at all.
282 */
283 if (kill_it && tolerant < 3) {
284 int user_space = 0;
285
286 /*
287 * If the EIPV bit is set, it means the saved IP is the
288 * instruction which caused the MCE.
289 */
290 if (m.mcgstatus & MCG_STATUS_EIPV)
291 user_space = panicm.rip && (panicm.cs & 3);
292
293 /*
294 * If we know that the error was in user space, send a
295 * SIGBUS. Otherwise, panic if tolerance is low.
296 *
297 * do_exit() takes an awful lot of locks and has a slight
298 * risk of deadlocking.
299 */
300 if (user_space) {
301 do_exit(SIGBUS);
302 } else if (panic_on_oops || tolerant < 2) {
303 mce_panic("Uncorrected machine check",
304 &panicm, mcestart);
305 }
306 }
307
308 /* notify userspace ASAP */
309 set_thread_flag(TIF_MCE_NOTIFY);
310
311 out:
312 /* the last thing we do is clear state */
313 for (i = 0; i < banks; i++)
314 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
315 wrmsrl(MSR_IA32_MCG_STATUS, 0);
316 out2:
317 atomic_dec(&mce_entry);
318 }
319
320 #ifdef CONFIG_X86_MCE_INTEL
321 /***
322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
323 * @cpu: The CPU on which the event occurred.
324 * @status: Event status information
325 *
326 * This function should be called by the thermal interrupt after the
327 * event has been processed and the decision was made to log the event
328 * further.
329 *
330 * The status parameter will be saved to the 'status' field of 'struct mce'
331 * and historically has been the register value of the
332 * MSR_IA32_THERMAL_STATUS (Intel) msr.
333 */
334 void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
335 {
336 struct mce m;
337
338 memset(&m, 0, sizeof(m));
339 m.cpu = cpu;
340 m.bank = MCE_THERMAL_BANK;
341 m.status = status;
342 rdtscll(m.tsc);
343 mce_log(&m);
344 }
345 #endif /* CONFIG_X86_MCE_INTEL */
346
347 /*
348 * Periodic polling timer for "silent" machine check errors. If the
349 * poller finds an MCE, poll 2x faster. When the poller finds no more
350 * errors, poll 2x slower (up to check_interval seconds).
351 */
352
353 static int check_interval = 5 * 60; /* 5 minutes */
354 static int next_interval; /* in jiffies */
355 static void mcheck_timer(struct work_struct *work);
356 static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
357
358 static void mcheck_check_cpu(void *info)
359 {
360 if (mce_available(&current_cpu_data))
361 do_machine_check(NULL, 0);
362 }
363
364 static void mcheck_timer(struct work_struct *work)
365 {
366 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
367
368 /*
369 * Alert userspace if needed. If we logged an MCE, reduce the
370 * polling interval, otherwise increase the polling interval.
371 */
372 if (mce_notify_user()) {
373 next_interval = max(next_interval/2, HZ/100);
374 } else {
375 next_interval = min(next_interval * 2,
376 (int)round_jiffies_relative(check_interval*HZ));
377 }
378
379 schedule_delayed_work(&mcheck_work, next_interval);
380 }
381
382 /*
383 * This is only called from process context. This is where we do
384 * anything we need to alert userspace about new MCEs. This is called
385 * directly from the poller and also from entry.S and idle, thanks to
386 * TIF_MCE_NOTIFY.
387 */
388 int mce_notify_user(void)
389 {
390 clear_thread_flag(TIF_MCE_NOTIFY);
391 if (test_and_clear_bit(0, &notify_user)) {
392 static unsigned long last_print;
393 unsigned long now = jiffies;
394
395 wake_up_interruptible(&mce_wait);
396 if (trigger[0])
397 call_usermodehelper(trigger, trigger_argv, NULL,
398 UMH_NO_WAIT);
399
400 if (time_after_eq(now, last_print + (check_interval*HZ))) {
401 last_print = now;
402 printk(KERN_INFO "Machine check events logged\n");
403 }
404
405 return 1;
406 }
407 return 0;
408 }
409
410 /* see if the idle task needs to notify userspace */
411 static int
412 mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
413 {
414 /* IDLE_END should be safe - interrupts are back on */
415 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
416 mce_notify_user();
417
418 return NOTIFY_OK;
419 }
420
421 static struct notifier_block mce_idle_notifier = {
422 .notifier_call = mce_idle_callback,
423 };
424
425 static __init int periodic_mcheck_init(void)
426 {
427 next_interval = check_interval * HZ;
428 if (next_interval)
429 schedule_delayed_work(&mcheck_work,
430 round_jiffies_relative(next_interval));
431 idle_notifier_register(&mce_idle_notifier);
432 return 0;
433 }
434 __initcall(periodic_mcheck_init);
435
436
437 /*
438 * Initialize Machine Checks for a CPU.
439 */
440 static void mce_init(void *dummy)
441 {
442 u64 cap;
443 int i;
444
445 rdmsrl(MSR_IA32_MCG_CAP, cap);
446 banks = cap & 0xff;
447 if (banks > NR_BANKS) {
448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
449 banks = NR_BANKS;
450 }
451 /* Use accurate RIP reporting if available. */
452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
453 rip_msr = MSR_IA32_MCG_EIP;
454
455 /* Log the machine checks left over from the previous reset.
456 This also clears all registers */
457 do_machine_check(NULL, mce_bootlog ? -1 : -2);
458
459 set_in_cr4(X86_CR4_MCE);
460
461 if (cap & MCG_CTL_P)
462 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
463
464 for (i = 0; i < banks; i++) {
465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
467 }
468 }
469
470 /* Add per CPU specific workarounds here */
471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
472 {
473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
475 /* disable GART TBL walk error reporting, which trips off
476 incorrectly with the IOMMU & 3ware & Cerberus. */
477 clear_bit(10, &bank[4]);
478 /* Lots of broken BIOS around that don't clear them
479 by default and leave crap in there. Don't log. */
480 mce_bootlog = 0;
481 }
482
483 }
484
485 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
486 {
487 switch (c->x86_vendor) {
488 case X86_VENDOR_INTEL:
489 mce_intel_feature_init(c);
490 break;
491 case X86_VENDOR_AMD:
492 mce_amd_feature_init(c);
493 break;
494 default:
495 break;
496 }
497 }
498
499 /*
500 * Called for each booted CPU to set up machine checks.
501 * Must be called with preempt off.
502 */
503 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
504 {
505 static cpumask_t mce_cpus = CPU_MASK_NONE;
506
507 mce_cpu_quirks(c);
508
509 if (mce_dont_init ||
510 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
511 !mce_available(c))
512 return;
513
514 mce_init(NULL);
515 mce_cpu_features(c);
516 }
517
518 /*
519 * Character device to read and clear the MCE log.
520 */
521
522 static DEFINE_SPINLOCK(mce_state_lock);
523 static int open_count; /* #times opened */
524 static int open_exclu; /* already open exclusive? */
525
526 static int mce_open(struct inode *inode, struct file *file)
527 {
528 spin_lock(&mce_state_lock);
529
530 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
531 spin_unlock(&mce_state_lock);
532 return -EBUSY;
533 }
534
535 if (file->f_flags & O_EXCL)
536 open_exclu = 1;
537 open_count++;
538
539 spin_unlock(&mce_state_lock);
540
541 return nonseekable_open(inode, file);
542 }
543
544 static int mce_release(struct inode *inode, struct file *file)
545 {
546 spin_lock(&mce_state_lock);
547
548 open_count--;
549 open_exclu = 0;
550
551 spin_unlock(&mce_state_lock);
552
553 return 0;
554 }
555
556 static void collect_tscs(void *data)
557 {
558 unsigned long *cpu_tsc = (unsigned long *)data;
559
560 rdtscll(cpu_tsc[smp_processor_id()]);
561 }
562
563 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
564 loff_t *off)
565 {
566 unsigned long *cpu_tsc;
567 static DECLARE_MUTEX(mce_read_sem);
568 unsigned next;
569 char __user *buf = ubuf;
570 int i, err;
571
572 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
573 if (!cpu_tsc)
574 return -ENOMEM;
575
576 down(&mce_read_sem);
577 next = rcu_dereference(mcelog.next);
578
579 /* Only supports full reads right now */
580 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
581 up(&mce_read_sem);
582 kfree(cpu_tsc);
583 return -EINVAL;
584 }
585
586 err = 0;
587 for (i = 0; i < next; i++) {
588 unsigned long start = jiffies;
589
590 while (!mcelog.entry[i].finished) {
591 if (time_after_eq(jiffies, start + 2)) {
592 memset(mcelog.entry + i,0, sizeof(struct mce));
593 goto timeout;
594 }
595 cpu_relax();
596 }
597 smp_rmb();
598 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
599 buf += sizeof(struct mce);
600 timeout:
601 ;
602 }
603
604 memset(mcelog.entry, 0, next * sizeof(struct mce));
605 mcelog.next = 0;
606
607 synchronize_sched();
608
609 /*
610 * Collect entries that were still getting written before the
611 * synchronize.
612 */
613 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
614 for (i = next; i < MCE_LOG_LEN; i++) {
615 if (mcelog.entry[i].finished &&
616 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
617 err |= copy_to_user(buf, mcelog.entry+i,
618 sizeof(struct mce));
619 smp_rmb();
620 buf += sizeof(struct mce);
621 memset(&mcelog.entry[i], 0, sizeof(struct mce));
622 }
623 }
624 up(&mce_read_sem);
625 kfree(cpu_tsc);
626 return err ? -EFAULT : buf - ubuf;
627 }
628
629 static unsigned int mce_poll(struct file *file, poll_table *wait)
630 {
631 poll_wait(file, &mce_wait, wait);
632 if (rcu_dereference(mcelog.next))
633 return POLLIN | POLLRDNORM;
634 return 0;
635 }
636
637 static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd,
638 unsigned long arg)
639 {
640 int __user *p = (int __user *)arg;
641
642 if (!capable(CAP_SYS_ADMIN))
643 return -EPERM;
644 switch (cmd) {
645 case MCE_GET_RECORD_LEN:
646 return put_user(sizeof(struct mce), p);
647 case MCE_GET_LOG_LEN:
648 return put_user(MCE_LOG_LEN, p);
649 case MCE_GETCLEAR_FLAGS: {
650 unsigned flags;
651
652 do {
653 flags = mcelog.flags;
654 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
655 return put_user(flags, p);
656 }
657 default:
658 return -ENOTTY;
659 }
660 }
661
662 static const struct file_operations mce_chrdev_ops = {
663 .open = mce_open,
664 .release = mce_release,
665 .read = mce_read,
666 .poll = mce_poll,
667 .ioctl = mce_ioctl,
668 };
669
670 static struct miscdevice mce_log_device = {
671 MISC_MCELOG_MINOR,
672 "mcelog",
673 &mce_chrdev_ops,
674 };
675
676 static unsigned long old_cr4 __initdata;
677
678 void __init stop_mce(void)
679 {
680 old_cr4 = read_cr4();
681 clear_in_cr4(X86_CR4_MCE);
682 }
683
684 void __init restart_mce(void)
685 {
686 if (old_cr4 & X86_CR4_MCE)
687 set_in_cr4(X86_CR4_MCE);
688 }
689
690 /*
691 * Old style boot options parsing. Only for compatibility.
692 */
693 static int __init mcheck_disable(char *str)
694 {
695 mce_dont_init = 1;
696 return 1;
697 }
698
699 /* mce=off disables machine check. Note you can re-enable it later
700 using sysfs.
701 mce=TOLERANCELEVEL (number, see above)
702 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
703 mce=nobootlog Don't log MCEs from before booting. */
704 static int __init mcheck_enable(char *str)
705 {
706 if (!strcmp(str, "off"))
707 mce_dont_init = 1;
708 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
709 mce_bootlog = str[0] == 'b';
710 else if (isdigit(str[0]))
711 get_option(&str, &tolerant);
712 else
713 printk("mce= argument %s ignored. Please use /sys", str);
714 return 1;
715 }
716
717 __setup("nomce", mcheck_disable);
718 __setup("mce=", mcheck_enable);
719
720 /*
721 * Sysfs support
722 */
723
724 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
725 Only one CPU is active at this time, the others get readded later using
726 CPU hotplug. */
727 static int mce_resume(struct sys_device *dev)
728 {
729 mce_init(NULL);
730 return 0;
731 }
732
733 /* Reinit MCEs after user configuration changes */
734 static void mce_restart(void)
735 {
736 if (next_interval)
737 cancel_delayed_work(&mcheck_work);
738 /* Timer race is harmless here */
739 on_each_cpu(mce_init, NULL, 1, 1);
740 next_interval = check_interval * HZ;
741 if (next_interval)
742 schedule_delayed_work(&mcheck_work,
743 round_jiffies_relative(next_interval));
744 }
745
746 static struct sysdev_class mce_sysclass = {
747 .resume = mce_resume,
748 set_kset_name("machinecheck"),
749 };
750
751 DEFINE_PER_CPU(struct sys_device, device_mce);
752
753 /* Why are there no generic functions for this? */
754 #define ACCESSOR(name, var, start) \
755 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
756 return sprintf(buf, "%lx\n", (unsigned long)var); \
757 } \
758 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
759 char *end; \
760 unsigned long new = simple_strtoul(buf, &end, 0); \
761 if (end == buf) return -EINVAL; \
762 var = new; \
763 start; \
764 return end-buf; \
765 } \
766 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
767
768 /* TBD should generate these dynamically based on number of available banks */
769 ACCESSOR(bank0ctl,bank[0],mce_restart())
770 ACCESSOR(bank1ctl,bank[1],mce_restart())
771 ACCESSOR(bank2ctl,bank[2],mce_restart())
772 ACCESSOR(bank3ctl,bank[3],mce_restart())
773 ACCESSOR(bank4ctl,bank[4],mce_restart())
774 ACCESSOR(bank5ctl,bank[5],mce_restart())
775
776 static ssize_t show_trigger(struct sys_device *s, char *buf)
777 {
778 strcpy(buf, trigger);
779 strcat(buf, "\n");
780 return strlen(trigger) + 1;
781 }
782
783 static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
784 {
785 char *p;
786 int len;
787 strncpy(trigger, buf, sizeof(trigger));
788 trigger[sizeof(trigger)-1] = 0;
789 len = strlen(trigger);
790 p = strchr(trigger, '\n');
791 if (*p) *p = 0;
792 return len;
793 }
794
795 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
796 ACCESSOR(tolerant,tolerant,)
797 ACCESSOR(check_interval,check_interval,mce_restart())
798 static struct sysdev_attribute *mce_attributes[] = {
799 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
800 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
801 &attr_tolerant, &attr_check_interval, &attr_trigger,
802 NULL
803 };
804
805 static cpumask_t mce_device_initialized = CPU_MASK_NONE;
806
807 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
808 static __cpuinit int mce_create_device(unsigned int cpu)
809 {
810 int err;
811 int i;
812
813 if (!mce_available(&boot_cpu_data))
814 return -EIO;
815
816 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
817 per_cpu(device_mce,cpu).id = cpu;
818 per_cpu(device_mce,cpu).cls = &mce_sysclass;
819
820 err = sysdev_register(&per_cpu(device_mce,cpu));
821 if (err)
822 return err;
823
824 for (i = 0; mce_attributes[i]; i++) {
825 err = sysdev_create_file(&per_cpu(device_mce,cpu),
826 mce_attributes[i]);
827 if (err)
828 goto error;
829 }
830 cpu_set(cpu, mce_device_initialized);
831
832 return 0;
833 error:
834 while (i--) {
835 sysdev_remove_file(&per_cpu(device_mce,cpu),
836 mce_attributes[i]);
837 }
838 sysdev_unregister(&per_cpu(device_mce,cpu));
839
840 return err;
841 }
842
843 static void mce_remove_device(unsigned int cpu)
844 {
845 int i;
846
847 if (!cpu_isset(cpu, mce_device_initialized))
848 return;
849
850 for (i = 0; mce_attributes[i]; i++)
851 sysdev_remove_file(&per_cpu(device_mce,cpu),
852 mce_attributes[i]);
853 sysdev_unregister(&per_cpu(device_mce,cpu));
854 cpu_clear(cpu, mce_device_initialized);
855 }
856
857 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
858 static int
859 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
860 {
861 unsigned int cpu = (unsigned long)hcpu;
862
863 switch (action) {
864 case CPU_ONLINE:
865 case CPU_ONLINE_FROZEN:
866 mce_create_device(cpu);
867 break;
868 case CPU_DEAD:
869 case CPU_DEAD_FROZEN:
870 mce_remove_device(cpu);
871 break;
872 }
873 return NOTIFY_OK;
874 }
875
876 static struct notifier_block mce_cpu_notifier = {
877 .notifier_call = mce_cpu_callback,
878 };
879
880 static __init int mce_init_device(void)
881 {
882 int err;
883 int i = 0;
884
885 if (!mce_available(&boot_cpu_data))
886 return -EIO;
887 err = sysdev_class_register(&mce_sysclass);
888 if (err)
889 return err;
890
891 for_each_online_cpu(i) {
892 err = mce_create_device(i);
893 if (err)
894 return err;
895 }
896
897 register_hotcpu_notifier(&mce_cpu_notifier);
898 misc_register(&mce_log_device);
899 return err;
900 }
901
902 device_initcall(mce_init_device);