]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/x86/kernel/cpu/mcheck/mce.c
Merge branch 'irq/numa' into x86/mce3
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce.c
1 /*
2 * Machine check handler.
3 *
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
9 */
10 #include <linux/thread_info.h>
11 #include <linux/capability.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ratelimit.h>
14 #include <linux/kallsyms.h>
15 #include <linux/rcupdate.h>
16 #include <linux/kobject.h>
17 #include <linux/uaccess.h>
18 #include <linux/kdebug.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/string.h>
22 #include <linux/sysdev.h>
23 #include <linux/ctype.h>
24 #include <linux/sched.h>
25 #include <linux/sysfs.h>
26 #include <linux/types.h>
27 #include <linux/init.h>
28 #include <linux/kmod.h>
29 #include <linux/poll.h>
30 #include <linux/cpu.h>
31 #include <linux/smp.h>
32 #include <linux/fs.h>
33
34 #include <asm/processor.h>
35 #include <asm/idle.h>
36 #include <asm/mce.h>
37 #include <asm/msr.h>
38
39 #include "mce.h"
40
41 /* Handle unconfigured int18 (should never happen) */
42 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
43 {
44 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
45 smp_processor_id());
46 }
47
48 /* Call the installed machine check handler for this CPU setup. */
49 void (*machine_check_vector)(struct pt_regs *, long error_code) =
50 unexpected_machine_check;
51
52 int mce_disabled;
53
54 #ifdef CONFIG_X86_NEW_MCE
55
56 #define MISC_MCELOG_MINOR 227
57
58 atomic_t mce_entry;
59
60 /*
61 * Tolerant levels:
62 * 0: always panic on uncorrected errors, log corrected errors
63 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
64 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
65 * 3: never panic or SIGBUS, log all errors (for testing only)
66 */
67 static int tolerant = 1;
68 static int banks;
69 static u64 *bank;
70 static unsigned long notify_user;
71 static int rip_msr;
72 static int mce_bootlog = -1;
73
74 static char trigger[128];
75 static char *trigger_argv[2] = { trigger, NULL };
76
77 static unsigned long dont_init_banks;
78
79 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
80
81 /* MCA banks polled by the period polling timer for corrected events */
82 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
83 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
84 };
85
86 static inline int skip_bank_init(int i)
87 {
88 return i < BITS_PER_LONG && test_bit(i, &dont_init_banks);
89 }
90
91 /* Do initial initialization of a struct mce */
92 void mce_setup(struct mce *m)
93 {
94 memset(m, 0, sizeof(struct mce));
95 m->cpu = smp_processor_id();
96 rdtscll(m->tsc);
97 }
98
99 DEFINE_PER_CPU(struct mce, injectm);
100 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
101
102 /*
103 * Lockless MCE logging infrastructure.
104 * This avoids deadlocks on printk locks without having to break locks. Also
105 * separate MCEs from kernel messages to avoid bogus bug reports.
106 */
107
108 static struct mce_log mcelog = {
109 MCE_LOG_SIGNATURE,
110 MCE_LOG_LEN,
111 };
112
113 void mce_log(struct mce *mce)
114 {
115 unsigned next, entry;
116
117 mce->finished = 0;
118 wmb();
119 for (;;) {
120 entry = rcu_dereference(mcelog.next);
121 for (;;) {
122 /*
123 * When the buffer fills up discard new entries.
124 * Assume that the earlier errors are the more
125 * interesting ones:
126 */
127 if (entry >= MCE_LOG_LEN) {
128 set_bit(MCE_OVERFLOW,
129 (unsigned long *)&mcelog.flags);
130 return;
131 }
132 /* Old left over entry. Skip: */
133 if (mcelog.entry[entry].finished) {
134 entry++;
135 continue;
136 }
137 break;
138 }
139 smp_rmb();
140 next = entry + 1;
141 if (cmpxchg(&mcelog.next, entry, next) == entry)
142 break;
143 }
144 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
145 wmb();
146 mcelog.entry[entry].finished = 1;
147 wmb();
148
149 set_bit(0, &notify_user);
150 }
151
152 static void print_mce(struct mce *m)
153 {
154 printk(KERN_EMERG "\n"
155 KERN_EMERG "HARDWARE ERROR\n"
156 KERN_EMERG
157 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
158 m->cpu, m->mcgstatus, m->bank, m->status);
159 if (m->ip) {
160 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
161 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
162 m->cs, m->ip);
163 if (m->cs == __KERNEL_CS)
164 print_symbol("{%s}", m->ip);
165 printk("\n");
166 }
167 printk(KERN_EMERG "TSC %llx ", m->tsc);
168 if (m->addr)
169 printk("ADDR %llx ", m->addr);
170 if (m->misc)
171 printk("MISC %llx ", m->misc);
172 printk("\n");
173 printk(KERN_EMERG "This is not a software problem!\n");
174 printk(KERN_EMERG "Run through mcelog --ascii to decode "
175 "and contact your hardware vendor\n");
176 }
177
178 static void mce_panic(char *msg, struct mce *backup, u64 start)
179 {
180 int i;
181
182 bust_spinlocks(1);
183 console_verbose();
184 for (i = 0; i < MCE_LOG_LEN; i++) {
185 u64 tsc = mcelog.entry[i].tsc;
186
187 if ((s64)(tsc - start) < 0)
188 continue;
189 print_mce(&mcelog.entry[i]);
190 if (backup && mcelog.entry[i].tsc == backup->tsc)
191 backup = NULL;
192 }
193 if (backup)
194 print_mce(backup);
195 panic(msg);
196 }
197
198 /* Support code for software error injection */
199
200 static int msr_to_offset(u32 msr)
201 {
202 unsigned bank = __get_cpu_var(injectm.bank);
203 if (msr == rip_msr)
204 return offsetof(struct mce, ip);
205 if (msr == MSR_IA32_MC0_STATUS + bank*4)
206 return offsetof(struct mce, status);
207 if (msr == MSR_IA32_MC0_ADDR + bank*4)
208 return offsetof(struct mce, addr);
209 if (msr == MSR_IA32_MC0_MISC + bank*4)
210 return offsetof(struct mce, misc);
211 if (msr == MSR_IA32_MCG_STATUS)
212 return offsetof(struct mce, mcgstatus);
213 return -1;
214 }
215
216 /* MSR access wrappers used for error injection */
217 static u64 mce_rdmsrl(u32 msr)
218 {
219 u64 v;
220 if (__get_cpu_var(injectm).finished) {
221 int offset = msr_to_offset(msr);
222 if (offset < 0)
223 return 0;
224 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
225 }
226 rdmsrl(msr, v);
227 return v;
228 }
229
230 static void mce_wrmsrl(u32 msr, u64 v)
231 {
232 if (__get_cpu_var(injectm).finished) {
233 int offset = msr_to_offset(msr);
234 if (offset >= 0)
235 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
236 return;
237 }
238 wrmsrl(msr, v);
239 }
240
241 int mce_available(struct cpuinfo_x86 *c)
242 {
243 if (mce_disabled)
244 return 0;
245 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
246 }
247
248 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
249 {
250 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
251 m->ip = regs->ip;
252 m->cs = regs->cs;
253 } else {
254 m->ip = 0;
255 m->cs = 0;
256 }
257 if (rip_msr) {
258 /* Assume the RIP in the MSR is exact. Is this true? */
259 m->mcgstatus |= MCG_STATUS_EIPV;
260 m->ip = mce_rdmsrl(rip_msr);
261 m->cs = 0;
262 }
263 }
264
265 /*
266 * Poll for corrected events or events that happened before reset.
267 * Those are just logged through /dev/mcelog.
268 *
269 * This is executed in standard interrupt context.
270 */
271 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
272 {
273 struct mce m;
274 int i;
275
276 mce_setup(&m);
277
278 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
279 for (i = 0; i < banks; i++) {
280 if (!bank[i] || !test_bit(i, *b))
281 continue;
282
283 m.misc = 0;
284 m.addr = 0;
285 m.bank = i;
286 m.tsc = 0;
287
288 barrier();
289 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
290 if (!(m.status & MCI_STATUS_VAL))
291 continue;
292
293 /*
294 * Uncorrected events are handled by the exception handler
295 * when it is enabled. But when the exception is disabled log
296 * everything.
297 *
298 * TBD do the same check for MCI_STATUS_EN here?
299 */
300 if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
301 continue;
302
303 if (m.status & MCI_STATUS_MISCV)
304 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
305 if (m.status & MCI_STATUS_ADDRV)
306 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
307
308 if (!(flags & MCP_TIMESTAMP))
309 m.tsc = 0;
310 /*
311 * Don't get the IP here because it's unlikely to
312 * have anything to do with the actual error location.
313 */
314 if (!(flags & MCP_DONTLOG)) {
315 mce_log(&m);
316 add_taint(TAINT_MACHINE_CHECK);
317 }
318
319 /*
320 * Clear state for this bank.
321 */
322 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
323 }
324
325 /*
326 * Don't clear MCG_STATUS here because it's only defined for
327 * exceptions.
328 */
329
330 sync_core();
331 }
332 EXPORT_SYMBOL_GPL(machine_check_poll);
333
334 /*
335 * The actual machine check handler. This only handles real
336 * exceptions when something got corrupted coming in through int 18.
337 *
338 * This is executed in NMI context not subject to normal locking rules. This
339 * implies that most kernel services cannot be safely used. Don't even
340 * think about putting a printk in there!
341 */
342 void do_machine_check(struct pt_regs *regs, long error_code)
343 {
344 struct mce m, panicm;
345 int panicm_found = 0;
346 u64 mcestart = 0;
347 int i;
348 /*
349 * If no_way_out gets set, there is no safe way to recover from this
350 * MCE. If tolerant is cranked up, we'll try anyway.
351 */
352 int no_way_out = 0;
353 /*
354 * If kill_it gets set, there might be a way to recover from this
355 * error.
356 */
357 int kill_it = 0;
358 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
359
360 atomic_inc(&mce_entry);
361
362 if (notify_die(DIE_NMI, "machine check", regs, error_code,
363 18, SIGKILL) == NOTIFY_STOP)
364 goto out;
365 if (!banks)
366 goto out;
367
368 mce_setup(&m);
369
370 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
371
372 /* if the restart IP is not valid, we're done for */
373 if (!(m.mcgstatus & MCG_STATUS_RIPV))
374 no_way_out = 1;
375
376 rdtscll(mcestart);
377 barrier();
378
379 for (i = 0; i < banks; i++) {
380 __clear_bit(i, toclear);
381 if (!bank[i])
382 continue;
383
384 m.misc = 0;
385 m.addr = 0;
386 m.bank = i;
387
388 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
389 if ((m.status & MCI_STATUS_VAL) == 0)
390 continue;
391
392 /*
393 * Non uncorrected errors are handled by machine_check_poll
394 * Leave them alone.
395 */
396 if ((m.status & MCI_STATUS_UC) == 0)
397 continue;
398
399 /*
400 * Set taint even when machine check was not enabled.
401 */
402 add_taint(TAINT_MACHINE_CHECK);
403
404 __set_bit(i, toclear);
405
406 if (m.status & MCI_STATUS_EN) {
407 /* if PCC was set, there's no way out */
408 no_way_out |= !!(m.status & MCI_STATUS_PCC);
409 /*
410 * If this error was uncorrectable and there was
411 * an overflow, we're in trouble. If no overflow,
412 * we might get away with just killing a task.
413 */
414 if (m.status & MCI_STATUS_UC) {
415 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
416 no_way_out = 1;
417 kill_it = 1;
418 }
419 } else {
420 /*
421 * Machine check event was not enabled. Clear, but
422 * ignore.
423 */
424 continue;
425 }
426
427 if (m.status & MCI_STATUS_MISCV)
428 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
429 if (m.status & MCI_STATUS_ADDRV)
430 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
431
432 mce_get_rip(&m, regs);
433 mce_log(&m);
434
435 /*
436 * Did this bank cause the exception?
437 *
438 * Assume that the bank with uncorrectable errors did it,
439 * and that there is only a single one:
440 */
441 if ((m.status & MCI_STATUS_UC) &&
442 (m.status & MCI_STATUS_EN)) {
443 panicm = m;
444 panicm_found = 1;
445 }
446 }
447
448 /*
449 * If we didn't find an uncorrectable error, pick
450 * the last one (shouldn't happen, just being safe).
451 */
452 if (!panicm_found)
453 panicm = m;
454
455 /*
456 * If we have decided that we just CAN'T continue, and the user
457 * has not set tolerant to an insane level, give up and die.
458 */
459 if (no_way_out && tolerant < 3)
460 mce_panic("Machine check", &panicm, mcestart);
461
462 /*
463 * If the error seems to be unrecoverable, something should be
464 * done. Try to kill as little as possible. If we can kill just
465 * one task, do that. If the user has set the tolerance very
466 * high, don't try to do anything at all.
467 */
468 if (kill_it && tolerant < 3) {
469 int user_space = 0;
470
471 /*
472 * If the EIPV bit is set, it means the saved IP is the
473 * instruction which caused the MCE.
474 */
475 if (m.mcgstatus & MCG_STATUS_EIPV)
476 user_space = panicm.ip && (panicm.cs & 3);
477
478 /*
479 * If we know that the error was in user space, send a
480 * SIGBUS. Otherwise, panic if tolerance is low.
481 *
482 * force_sig() takes an awful lot of locks and has a slight
483 * risk of deadlocking.
484 */
485 if (user_space) {
486 force_sig(SIGBUS, current);
487 } else if (panic_on_oops || tolerant < 2) {
488 mce_panic("Uncorrected machine check",
489 &panicm, mcestart);
490 }
491 }
492
493 /* notify userspace ASAP */
494 set_thread_flag(TIF_MCE_NOTIFY);
495
496 /* the last thing we do is clear state */
497 for (i = 0; i < banks; i++) {
498 if (test_bit(i, toclear))
499 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
500 }
501 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
502 out:
503 atomic_dec(&mce_entry);
504 sync_core();
505 }
506 EXPORT_SYMBOL_GPL(do_machine_check);
507
508 #ifdef CONFIG_X86_MCE_INTEL
509 /***
510 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
511 * @cpu: The CPU on which the event occurred.
512 * @status: Event status information
513 *
514 * This function should be called by the thermal interrupt after the
515 * event has been processed and the decision was made to log the event
516 * further.
517 *
518 * The status parameter will be saved to the 'status' field of 'struct mce'
519 * and historically has been the register value of the
520 * MSR_IA32_THERMAL_STATUS (Intel) msr.
521 */
522 void mce_log_therm_throt_event(__u64 status)
523 {
524 struct mce m;
525
526 mce_setup(&m);
527 m.bank = MCE_THERMAL_BANK;
528 m.status = status;
529 mce_log(&m);
530 }
531 #endif /* CONFIG_X86_MCE_INTEL */
532
533 /*
534 * Periodic polling timer for "silent" machine check errors. If the
535 * poller finds an MCE, poll 2x faster. When the poller finds no more
536 * errors, poll 2x slower (up to check_interval seconds).
537 */
538 static int check_interval = 5 * 60; /* 5 minutes */
539
540 static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
541 static DEFINE_PER_CPU(struct timer_list, mce_timer);
542
543 static void mcheck_timer(unsigned long data)
544 {
545 struct timer_list *t = &per_cpu(mce_timer, data);
546 int *n;
547
548 WARN_ON(smp_processor_id() != data);
549
550 if (mce_available(&current_cpu_data)) {
551 machine_check_poll(MCP_TIMESTAMP,
552 &__get_cpu_var(mce_poll_banks));
553 }
554
555 /*
556 * Alert userspace if needed. If we logged an MCE, reduce the
557 * polling interval, otherwise increase the polling interval.
558 */
559 n = &__get_cpu_var(next_interval);
560 if (mce_notify_user())
561 *n = max(*n/2, HZ/100);
562 else
563 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
564
565 t->expires = jiffies + *n;
566 add_timer(t);
567 }
568
569 static void mce_do_trigger(struct work_struct *work)
570 {
571 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
572 }
573
574 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
575
576 /*
577 * Notify the user(s) about new machine check events.
578 * Can be called from interrupt context, but not from machine check/NMI
579 * context.
580 */
581 int mce_notify_user(void)
582 {
583 /* Not more than two messages every minute */
584 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
585
586 clear_thread_flag(TIF_MCE_NOTIFY);
587
588 if (test_and_clear_bit(0, &notify_user)) {
589 wake_up_interruptible(&mce_wait);
590
591 /*
592 * There is no risk of missing notifications because
593 * work_pending is always cleared before the function is
594 * executed.
595 */
596 if (trigger[0] && !work_pending(&mce_trigger_work))
597 schedule_work(&mce_trigger_work);
598
599 if (__ratelimit(&ratelimit))
600 printk(KERN_INFO "Machine check events logged\n");
601
602 return 1;
603 }
604 return 0;
605 }
606 EXPORT_SYMBOL_GPL(mce_notify_user);
607
608 /*
609 * Initialize Machine Checks for a CPU.
610 */
611 static int mce_cap_init(void)
612 {
613 unsigned b;
614 u64 cap;
615
616 rdmsrl(MSR_IA32_MCG_CAP, cap);
617
618 b = cap & MCG_BANKCNT_MASK;
619 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
620
621 if (b > MAX_NR_BANKS) {
622 printk(KERN_WARNING
623 "MCE: Using only %u machine check banks out of %u\n",
624 MAX_NR_BANKS, b);
625 b = MAX_NR_BANKS;
626 }
627
628 /* Don't support asymmetric configurations today */
629 WARN_ON(banks != 0 && b != banks);
630 banks = b;
631 if (!bank) {
632 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
633 if (!bank)
634 return -ENOMEM;
635 memset(bank, 0xff, banks * sizeof(u64));
636 }
637
638 /* Use accurate RIP reporting if available. */
639 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
640 rip_msr = MSR_IA32_MCG_EIP;
641
642 return 0;
643 }
644
645 static void mce_init(void)
646 {
647 mce_banks_t all_banks;
648 u64 cap;
649 int i;
650
651 /*
652 * Log the machine checks left over from the previous reset.
653 */
654 bitmap_fill(all_banks, MAX_NR_BANKS);
655 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
656
657 set_in_cr4(X86_CR4_MCE);
658
659 rdmsrl(MSR_IA32_MCG_CAP, cap);
660 if (cap & MCG_CTL_P)
661 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
662
663 for (i = 0; i < banks; i++) {
664 if (skip_bank_init(i))
665 continue;
666 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
667 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
668 }
669 }
670
671 /* Add per CPU specific workarounds here */
672 static void mce_cpu_quirks(struct cpuinfo_x86 *c)
673 {
674 /* This should be disabled by the BIOS, but isn't always */
675 if (c->x86_vendor == X86_VENDOR_AMD) {
676 if (c->x86 == 15 && banks > 4) {
677 /*
678 * disable GART TBL walk error reporting, which
679 * trips off incorrectly with the IOMMU & 3ware
680 * & Cerberus:
681 */
682 clear_bit(10, (unsigned long *)&bank[4]);
683 }
684 if (c->x86 <= 17 && mce_bootlog < 0) {
685 /*
686 * Lots of broken BIOS around that don't clear them
687 * by default and leave crap in there. Don't log:
688 */
689 mce_bootlog = 0;
690 }
691 /*
692 * Various K7s with broken bank 0 around. Always disable
693 * by default.
694 */
695 if (c->x86 == 6)
696 bank[0] = 0;
697 }
698
699 if (c->x86_vendor == X86_VENDOR_INTEL) {
700 /*
701 * SDM documents that on family 6 bank 0 should not be written
702 * because it aliases to another special BIOS controlled
703 * register.
704 * But it's not aliased anymore on model 0x1a+
705 * Don't ignore bank 0 completely because there could be a
706 * valid event later, merely don't write CTL0.
707 */
708
709 if (c->x86 == 6 && c->x86_model < 0x1A)
710 __set_bit(0, &dont_init_banks);
711 }
712 }
713
714 static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
715 {
716 if (c->x86 != 5)
717 return;
718 switch (c->x86_vendor) {
719 case X86_VENDOR_INTEL:
720 if (mce_p5_enabled())
721 intel_p5_mcheck_init(c);
722 break;
723 case X86_VENDOR_CENTAUR:
724 winchip_mcheck_init(c);
725 break;
726 }
727 }
728
729 static void mce_cpu_features(struct cpuinfo_x86 *c)
730 {
731 switch (c->x86_vendor) {
732 case X86_VENDOR_INTEL:
733 mce_intel_feature_init(c);
734 break;
735 case X86_VENDOR_AMD:
736 mce_amd_feature_init(c);
737 break;
738 default:
739 break;
740 }
741 }
742
743 static void mce_init_timer(void)
744 {
745 struct timer_list *t = &__get_cpu_var(mce_timer);
746 int *n = &__get_cpu_var(next_interval);
747
748 *n = check_interval * HZ;
749 if (!*n)
750 return;
751 setup_timer(t, mcheck_timer, smp_processor_id());
752 t->expires = round_jiffies(jiffies + *n);
753 add_timer(t);
754 }
755
756 /*
757 * Called for each booted CPU to set up machine checks.
758 * Must be called with preempt off:
759 */
760 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
761 {
762 if (mce_disabled)
763 return;
764
765 mce_ancient_init(c);
766
767 if (!mce_available(c))
768 return;
769
770 if (mce_cap_init() < 0) {
771 mce_disabled = 1;
772 return;
773 }
774 mce_cpu_quirks(c);
775
776 machine_check_vector = do_machine_check;
777
778 mce_init();
779 mce_cpu_features(c);
780 mce_init_timer();
781 }
782
783 /*
784 * Character device to read and clear the MCE log.
785 */
786
787 static DEFINE_SPINLOCK(mce_state_lock);
788 static int open_count; /* #times opened */
789 static int open_exclu; /* already open exclusive? */
790
791 static int mce_open(struct inode *inode, struct file *file)
792 {
793 spin_lock(&mce_state_lock);
794
795 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
796 spin_unlock(&mce_state_lock);
797
798 return -EBUSY;
799 }
800
801 if (file->f_flags & O_EXCL)
802 open_exclu = 1;
803 open_count++;
804
805 spin_unlock(&mce_state_lock);
806
807 return nonseekable_open(inode, file);
808 }
809
810 static int mce_release(struct inode *inode, struct file *file)
811 {
812 spin_lock(&mce_state_lock);
813
814 open_count--;
815 open_exclu = 0;
816
817 spin_unlock(&mce_state_lock);
818
819 return 0;
820 }
821
822 static void collect_tscs(void *data)
823 {
824 unsigned long *cpu_tsc = (unsigned long *)data;
825
826 rdtscll(cpu_tsc[smp_processor_id()]);
827 }
828
829 static DEFINE_MUTEX(mce_read_mutex);
830
831 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
832 loff_t *off)
833 {
834 char __user *buf = ubuf;
835 unsigned long *cpu_tsc;
836 unsigned prev, next;
837 int i, err;
838
839 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
840 if (!cpu_tsc)
841 return -ENOMEM;
842
843 mutex_lock(&mce_read_mutex);
844 next = rcu_dereference(mcelog.next);
845
846 /* Only supports full reads right now */
847 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
848 mutex_unlock(&mce_read_mutex);
849 kfree(cpu_tsc);
850
851 return -EINVAL;
852 }
853
854 err = 0;
855 prev = 0;
856 do {
857 for (i = prev; i < next; i++) {
858 unsigned long start = jiffies;
859
860 while (!mcelog.entry[i].finished) {
861 if (time_after_eq(jiffies, start + 2)) {
862 memset(mcelog.entry + i, 0,
863 sizeof(struct mce));
864 goto timeout;
865 }
866 cpu_relax();
867 }
868 smp_rmb();
869 err |= copy_to_user(buf, mcelog.entry + i,
870 sizeof(struct mce));
871 buf += sizeof(struct mce);
872 timeout:
873 ;
874 }
875
876 memset(mcelog.entry + prev, 0,
877 (next - prev) * sizeof(struct mce));
878 prev = next;
879 next = cmpxchg(&mcelog.next, prev, 0);
880 } while (next != prev);
881
882 synchronize_sched();
883
884 /*
885 * Collect entries that were still getting written before the
886 * synchronize.
887 */
888 on_each_cpu(collect_tscs, cpu_tsc, 1);
889
890 for (i = next; i < MCE_LOG_LEN; i++) {
891 if (mcelog.entry[i].finished &&
892 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
893 err |= copy_to_user(buf, mcelog.entry+i,
894 sizeof(struct mce));
895 smp_rmb();
896 buf += sizeof(struct mce);
897 memset(&mcelog.entry[i], 0, sizeof(struct mce));
898 }
899 }
900 mutex_unlock(&mce_read_mutex);
901 kfree(cpu_tsc);
902
903 return err ? -EFAULT : buf - ubuf;
904 }
905
906 static unsigned int mce_poll(struct file *file, poll_table *wait)
907 {
908 poll_wait(file, &mce_wait, wait);
909 if (rcu_dereference(mcelog.next))
910 return POLLIN | POLLRDNORM;
911 return 0;
912 }
913
914 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
915 {
916 int __user *p = (int __user *)arg;
917
918 if (!capable(CAP_SYS_ADMIN))
919 return -EPERM;
920
921 switch (cmd) {
922 case MCE_GET_RECORD_LEN:
923 return put_user(sizeof(struct mce), p);
924 case MCE_GET_LOG_LEN:
925 return put_user(MCE_LOG_LEN, p);
926 case MCE_GETCLEAR_FLAGS: {
927 unsigned flags;
928
929 do {
930 flags = mcelog.flags;
931 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
932
933 return put_user(flags, p);
934 }
935 default:
936 return -ENOTTY;
937 }
938 }
939
940 /* Modified in mce-inject.c, so not static or const */
941 struct file_operations mce_chrdev_ops = {
942 .open = mce_open,
943 .release = mce_release,
944 .read = mce_read,
945 .poll = mce_poll,
946 .unlocked_ioctl = mce_ioctl,
947 };
948 EXPORT_SYMBOL_GPL(mce_chrdev_ops);
949
950 static struct miscdevice mce_log_device = {
951 MISC_MCELOG_MINOR,
952 "mcelog",
953 &mce_chrdev_ops,
954 };
955
956 /*
957 * mce=off disables machine check
958 * mce=TOLERANCELEVEL (number, see above)
959 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
960 * mce=nobootlog Don't log MCEs from before booting.
961 */
962 static int __init mcheck_enable(char *str)
963 {
964 if (*str == 0)
965 enable_p5_mce();
966 if (*str == '=')
967 str++;
968 if (!strcmp(str, "off"))
969 mce_disabled = 1;
970 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
971 mce_bootlog = (str[0] == 'b');
972 else if (isdigit(str[0]))
973 get_option(&str, &tolerant);
974 else {
975 printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
976 str);
977 return 0;
978 }
979 return 1;
980 }
981 __setup("mce", mcheck_enable);
982
983 /*
984 * Sysfs support
985 */
986
987 /*
988 * Disable machine checks on suspend and shutdown. We can't really handle
989 * them later.
990 */
991 static int mce_disable(void)
992 {
993 int i;
994
995 for (i = 0; i < banks; i++) {
996 if (!skip_bank_init(i))
997 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
998 }
999 return 0;
1000 }
1001
1002 static int mce_suspend(struct sys_device *dev, pm_message_t state)
1003 {
1004 return mce_disable();
1005 }
1006
1007 static int mce_shutdown(struct sys_device *dev)
1008 {
1009 return mce_disable();
1010 }
1011
1012 /*
1013 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
1014 * Only one CPU is active at this time, the others get re-added later using
1015 * CPU hotplug:
1016 */
1017 static int mce_resume(struct sys_device *dev)
1018 {
1019 mce_init();
1020 mce_cpu_features(&current_cpu_data);
1021
1022 return 0;
1023 }
1024
1025 static void mce_cpu_restart(void *data)
1026 {
1027 del_timer_sync(&__get_cpu_var(mce_timer));
1028 if (mce_available(&current_cpu_data))
1029 mce_init();
1030 mce_init_timer();
1031 }
1032
1033 /* Reinit MCEs after user configuration changes */
1034 static void mce_restart(void)
1035 {
1036 on_each_cpu(mce_cpu_restart, NULL, 1);
1037 }
1038
1039 static struct sysdev_class mce_sysclass = {
1040 .suspend = mce_suspend,
1041 .shutdown = mce_shutdown,
1042 .resume = mce_resume,
1043 .name = "machinecheck",
1044 };
1045
1046 DEFINE_PER_CPU(struct sys_device, mce_dev);
1047
1048 __cpuinitdata
1049 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1050
1051 static struct sysdev_attribute *bank_attrs;
1052
1053 static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
1054 char *buf)
1055 {
1056 u64 b = bank[attr - bank_attrs];
1057
1058 return sprintf(buf, "%llx\n", b);
1059 }
1060
1061 static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
1062 const char *buf, size_t size)
1063 {
1064 u64 new;
1065
1066 if (strict_strtoull(buf, 0, &new) < 0)
1067 return -EINVAL;
1068
1069 bank[attr - bank_attrs] = new;
1070 mce_restart();
1071
1072 return size;
1073 }
1074
1075 static ssize_t
1076 show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
1077 {
1078 strcpy(buf, trigger);
1079 strcat(buf, "\n");
1080 return strlen(trigger) + 1;
1081 }
1082
1083 static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
1084 const char *buf, size_t siz)
1085 {
1086 char *p;
1087 int len;
1088
1089 strncpy(trigger, buf, sizeof(trigger));
1090 trigger[sizeof(trigger)-1] = 0;
1091 len = strlen(trigger);
1092 p = strchr(trigger, '\n');
1093
1094 if (*p)
1095 *p = 0;
1096
1097 return len;
1098 }
1099
1100 static ssize_t store_int_with_restart(struct sys_device *s,
1101 struct sysdev_attribute *attr,
1102 const char *buf, size_t size)
1103 {
1104 ssize_t ret = sysdev_store_int(s, attr, buf, size);
1105 mce_restart();
1106 return ret;
1107 }
1108
1109 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
1110 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
1111
1112 static struct sysdev_ext_attribute attr_check_interval = {
1113 _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
1114 store_int_with_restart),
1115 &check_interval
1116 };
1117
1118 static struct sysdev_attribute *mce_attrs[] = {
1119 &attr_tolerant.attr, &attr_check_interval.attr, &attr_trigger,
1120 NULL
1121 };
1122
1123 static cpumask_var_t mce_dev_initialized;
1124
1125 /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
1126 static __cpuinit int mce_create_device(unsigned int cpu)
1127 {
1128 int err;
1129 int i;
1130
1131 if (!mce_available(&boot_cpu_data))
1132 return -EIO;
1133
1134 memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
1135 per_cpu(mce_dev, cpu).id = cpu;
1136 per_cpu(mce_dev, cpu).cls = &mce_sysclass;
1137
1138 err = sysdev_register(&per_cpu(mce_dev, cpu));
1139 if (err)
1140 return err;
1141
1142 for (i = 0; mce_attrs[i]; i++) {
1143 err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1144 if (err)
1145 goto error;
1146 }
1147 for (i = 0; i < banks; i++) {
1148 err = sysdev_create_file(&per_cpu(mce_dev, cpu),
1149 &bank_attrs[i]);
1150 if (err)
1151 goto error2;
1152 }
1153 cpumask_set_cpu(cpu, mce_dev_initialized);
1154
1155 return 0;
1156 error2:
1157 while (--i >= 0)
1158 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1159 error:
1160 while (--i >= 0)
1161 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1162
1163 sysdev_unregister(&per_cpu(mce_dev, cpu));
1164
1165 return err;
1166 }
1167
1168 static __cpuinit void mce_remove_device(unsigned int cpu)
1169 {
1170 int i;
1171
1172 if (!cpumask_test_cpu(cpu, mce_dev_initialized))
1173 return;
1174
1175 for (i = 0; mce_attrs[i]; i++)
1176 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1177
1178 for (i = 0; i < banks; i++)
1179 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1180
1181 sysdev_unregister(&per_cpu(mce_dev, cpu));
1182 cpumask_clear_cpu(cpu, mce_dev_initialized);
1183 }
1184
1185 /* Make sure there are no machine checks on offlined CPUs. */
1186 static void mce_disable_cpu(void *h)
1187 {
1188 unsigned long action = *(unsigned long *)h;
1189 int i;
1190
1191 if (!mce_available(&current_cpu_data))
1192 return;
1193 if (!(action & CPU_TASKS_FROZEN))
1194 cmci_clear();
1195 for (i = 0; i < banks; i++) {
1196 if (!skip_bank_init(i))
1197 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1198 }
1199 }
1200
1201 static void mce_reenable_cpu(void *h)
1202 {
1203 unsigned long action = *(unsigned long *)h;
1204 int i;
1205
1206 if (!mce_available(&current_cpu_data))
1207 return;
1208
1209 if (!(action & CPU_TASKS_FROZEN))
1210 cmci_reenable();
1211 for (i = 0; i < banks; i++) {
1212 if (!skip_bank_init(i))
1213 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1214 }
1215 }
1216
1217 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
1218 static int __cpuinit
1219 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1220 {
1221 unsigned int cpu = (unsigned long)hcpu;
1222 struct timer_list *t = &per_cpu(mce_timer, cpu);
1223
1224 switch (action) {
1225 case CPU_ONLINE:
1226 case CPU_ONLINE_FROZEN:
1227 mce_create_device(cpu);
1228 if (threshold_cpu_callback)
1229 threshold_cpu_callback(action, cpu);
1230 break;
1231 case CPU_DEAD:
1232 case CPU_DEAD_FROZEN:
1233 if (threshold_cpu_callback)
1234 threshold_cpu_callback(action, cpu);
1235 mce_remove_device(cpu);
1236 break;
1237 case CPU_DOWN_PREPARE:
1238 case CPU_DOWN_PREPARE_FROZEN:
1239 del_timer_sync(t);
1240 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
1241 break;
1242 case CPU_DOWN_FAILED:
1243 case CPU_DOWN_FAILED_FROZEN:
1244 t->expires = round_jiffies(jiffies +
1245 __get_cpu_var(next_interval));
1246 add_timer_on(t, cpu);
1247 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1248 break;
1249 case CPU_POST_DEAD:
1250 /* intentionally ignoring frozen here */
1251 cmci_rediscover(cpu);
1252 break;
1253 }
1254 return NOTIFY_OK;
1255 }
1256
1257 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
1258 .notifier_call = mce_cpu_callback,
1259 };
1260
1261 static __init int mce_init_banks(void)
1262 {
1263 int i;
1264
1265 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1266 GFP_KERNEL);
1267 if (!bank_attrs)
1268 return -ENOMEM;
1269
1270 for (i = 0; i < banks; i++) {
1271 struct sysdev_attribute *a = &bank_attrs[i];
1272
1273 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
1274 if (!a->attr.name)
1275 goto nomem;
1276
1277 a->attr.mode = 0644;
1278 a->show = show_bank;
1279 a->store = set_bank;
1280 }
1281 return 0;
1282
1283 nomem:
1284 while (--i >= 0)
1285 kfree(bank_attrs[i].attr.name);
1286 kfree(bank_attrs);
1287 bank_attrs = NULL;
1288
1289 return -ENOMEM;
1290 }
1291
1292 static __init int mce_init_device(void)
1293 {
1294 int err;
1295 int i = 0;
1296
1297 if (!mce_available(&boot_cpu_data))
1298 return -EIO;
1299
1300 alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
1301
1302 err = mce_init_banks();
1303 if (err)
1304 return err;
1305
1306 err = sysdev_class_register(&mce_sysclass);
1307 if (err)
1308 return err;
1309
1310 for_each_online_cpu(i) {
1311 err = mce_create_device(i);
1312 if (err)
1313 return err;
1314 }
1315
1316 register_hotcpu_notifier(&mce_cpu_notifier);
1317 misc_register(&mce_log_device);
1318
1319 return err;
1320 }
1321
1322 device_initcall(mce_init_device);
1323
1324 #else /* CONFIG_X86_OLD_MCE: */
1325
1326 int nr_mce_banks;
1327 EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
1328
1329 /* This has to be run for each processor */
1330 void mcheck_init(struct cpuinfo_x86 *c)
1331 {
1332 if (mce_disabled == 1)
1333 return;
1334
1335 switch (c->x86_vendor) {
1336 case X86_VENDOR_AMD:
1337 amd_mcheck_init(c);
1338 break;
1339
1340 case X86_VENDOR_INTEL:
1341 if (c->x86 == 5)
1342 intel_p5_mcheck_init(c);
1343 if (c->x86 == 6)
1344 intel_p6_mcheck_init(c);
1345 if (c->x86 == 15)
1346 intel_p4_mcheck_init(c);
1347 break;
1348
1349 case X86_VENDOR_CENTAUR:
1350 if (c->x86 == 5)
1351 winchip_mcheck_init(c);
1352 break;
1353
1354 default:
1355 break;
1356 }
1357 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks);
1358 }
1359
1360 static int __init mcheck_enable(char *str)
1361 {
1362 mce_disabled = -1;
1363 return 1;
1364 }
1365
1366 __setup("mce", mcheck_enable);
1367
1368 #endif /* CONFIG_X86_OLD_MCE */
1369
1370 /*
1371 * Old style boot options parsing. Only for compatibility.
1372 */
1373 static int __init mcheck_disable(char *str)
1374 {
1375 mce_disabled = 1;
1376 return 1;
1377 }
1378 __setup("nomce", mcheck_disable);