]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/mcheck/mce.c
iommu/vt-d: Don't over-free page table directories
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce.c
1 /*
2 * Machine check handler.
3 *
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/thread_info.h>
14 #include <linux/capability.h>
15 #include <linux/miscdevice.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kallsyms.h>
18 #include <linux/rcupdate.h>
19 #include <linux/kobject.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kernel.h>
23 #include <linux/percpu.h>
24 #include <linux/string.h>
25 #include <linux/device.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/delay.h>
28 #include <linux/ctype.h>
29 #include <linux/sched.h>
30 #include <linux/sysfs.h>
31 #include <linux/types.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/kmod.h>
35 #include <linux/poll.h>
36 #include <linux/nmi.h>
37 #include <linux/cpu.h>
38 #include <linux/smp.h>
39 #include <linux/fs.h>
40 #include <linux/mm.h>
41 #include <linux/debugfs.h>
42 #include <linux/irq_work.h>
43 #include <linux/export.h>
44 #include <linux/jump_label.h>
45
46 #include <asm/intel-family.h>
47 #include <asm/processor.h>
48 #include <asm/traps.h>
49 #include <asm/tlbflush.h>
50 #include <asm/mce.h>
51 #include <asm/msr.h>
52
53 #include "mce-internal.h"
54
55 static DEFINE_MUTEX(mce_chrdev_read_mutex);
56
57 #define mce_log_get_idx_check(p) \
58 ({ \
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
60 !lockdep_is_held(&mce_chrdev_read_mutex), \
61 "suspicious mce_log_get_idx_check() usage"); \
62 smp_load_acquire(&(p)); \
63 })
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/mce.h>
67
68 #define SPINUNIT 100 /* 100ns */
69
70 DEFINE_PER_CPU(unsigned, mce_exception_count);
71
72 struct mce_bank *mce_banks __read_mostly;
73 struct mce_vendor_flags mce_flags __read_mostly;
74
75 struct mca_config mca_cfg __read_mostly = {
76 .bootlog = -1,
77 /*
78 * Tolerant levels:
79 * 0: always panic on uncorrected errors, log corrected errors
80 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
81 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
82 * 3: never panic or SIGBUS, log all errors (for testing only)
83 */
84 .tolerant = 1,
85 .monarch_timeout = -1
86 };
87
88 /* User mode helper program triggered by machine check event */
89 static unsigned long mce_need_notify;
90 static char mce_helper[128];
91 static char *mce_helper_argv[2] = { mce_helper, NULL };
92
93 static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
94
95 static DEFINE_PER_CPU(struct mce, mces_seen);
96 static int cpu_missing;
97
98 /*
99 * MCA banks polled by the period polling timer for corrected events.
100 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
101 */
102 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
103 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
104 };
105
106 /*
107 * MCA banks controlled through firmware first for corrected errors.
108 * This is a global list of banks for which we won't enable CMCI and we
109 * won't poll. Firmware controls these banks and is responsible for
110 * reporting corrected errors through GHES. Uncorrected/recoverable
111 * errors are still notified through a machine check.
112 */
113 mce_banks_t mce_banks_ce_disabled;
114
115 static struct work_struct mce_work;
116 static struct irq_work mce_irq_work;
117
118 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
119
120 /*
121 * CPU/chipset specific EDAC code can register a notifier call here to print
122 * MCE errors in a human-readable form.
123 */
124 ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
125
126 /* Do initial initialization of a struct mce */
127 void mce_setup(struct mce *m)
128 {
129 memset(m, 0, sizeof(struct mce));
130 m->cpu = m->extcpu = smp_processor_id();
131 m->tsc = rdtsc();
132 /* We hope get_seconds stays lockless */
133 m->time = get_seconds();
134 m->cpuvendor = boot_cpu_data.x86_vendor;
135 m->cpuid = cpuid_eax(1);
136 m->socketid = cpu_data(m->extcpu).phys_proc_id;
137 m->apicid = cpu_data(m->extcpu).initial_apicid;
138 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
139
140 if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
141 rdmsrl(MSR_PPIN, m->ppin);
142 }
143
144 DEFINE_PER_CPU(struct mce, injectm);
145 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
146
147 /*
148 * Lockless MCE logging infrastructure.
149 * This avoids deadlocks on printk locks without having to break locks. Also
150 * separate MCEs from kernel messages to avoid bogus bug reports.
151 */
152
153 static struct mce_log mcelog = {
154 .signature = MCE_LOG_SIGNATURE,
155 .len = MCE_LOG_LEN,
156 .recordlen = sizeof(struct mce),
157 };
158
159 void mce_log(struct mce *mce)
160 {
161 unsigned next, entry;
162
163 /* Emit the trace record: */
164 trace_mce_record(mce);
165
166 if (!mce_gen_pool_add(mce))
167 irq_work_queue(&mce_irq_work);
168
169 wmb();
170 for (;;) {
171 entry = mce_log_get_idx_check(mcelog.next);
172 for (;;) {
173
174 /*
175 * When the buffer fills up discard new entries.
176 * Assume that the earlier errors are the more
177 * interesting ones:
178 */
179 if (entry >= MCE_LOG_LEN) {
180 set_bit(MCE_OVERFLOW,
181 (unsigned long *)&mcelog.flags);
182 return;
183 }
184 /* Old left over entry. Skip: */
185 if (mcelog.entry[entry].finished) {
186 entry++;
187 continue;
188 }
189 break;
190 }
191 smp_rmb();
192 next = entry + 1;
193 if (cmpxchg(&mcelog.next, entry, next) == entry)
194 break;
195 }
196 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
197 wmb();
198 mcelog.entry[entry].finished = 1;
199 wmb();
200
201 set_bit(0, &mce_need_notify);
202 }
203
204 void mce_inject_log(struct mce *m)
205 {
206 mutex_lock(&mce_chrdev_read_mutex);
207 mce_log(m);
208 mutex_unlock(&mce_chrdev_read_mutex);
209 }
210 EXPORT_SYMBOL_GPL(mce_inject_log);
211
212 static struct notifier_block mce_srao_nb;
213
214 static atomic_t num_notifiers;
215
216 void mce_register_decode_chain(struct notifier_block *nb)
217 {
218 atomic_inc(&num_notifiers);
219
220 /* Ensure SRAO notifier has the highest priority in the decode chain. */
221 if (nb != &mce_srao_nb && nb->priority == INT_MAX)
222 nb->priority -= 1;
223
224 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
225 }
226 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
227
228 void mce_unregister_decode_chain(struct notifier_block *nb)
229 {
230 atomic_dec(&num_notifiers);
231
232 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
233 }
234 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
235
236 static inline u32 ctl_reg(int bank)
237 {
238 return MSR_IA32_MCx_CTL(bank);
239 }
240
241 static inline u32 status_reg(int bank)
242 {
243 return MSR_IA32_MCx_STATUS(bank);
244 }
245
246 static inline u32 addr_reg(int bank)
247 {
248 return MSR_IA32_MCx_ADDR(bank);
249 }
250
251 static inline u32 misc_reg(int bank)
252 {
253 return MSR_IA32_MCx_MISC(bank);
254 }
255
256 static inline u32 smca_ctl_reg(int bank)
257 {
258 return MSR_AMD64_SMCA_MCx_CTL(bank);
259 }
260
261 static inline u32 smca_status_reg(int bank)
262 {
263 return MSR_AMD64_SMCA_MCx_STATUS(bank);
264 }
265
266 static inline u32 smca_addr_reg(int bank)
267 {
268 return MSR_AMD64_SMCA_MCx_ADDR(bank);
269 }
270
271 static inline u32 smca_misc_reg(int bank)
272 {
273 return MSR_AMD64_SMCA_MCx_MISC(bank);
274 }
275
276 struct mca_msr_regs msr_ops = {
277 .ctl = ctl_reg,
278 .status = status_reg,
279 .addr = addr_reg,
280 .misc = misc_reg
281 };
282
283 static void __print_mce(struct mce *m)
284 {
285 pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
286 m->extcpu,
287 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
288 m->mcgstatus, m->bank, m->status);
289
290 if (m->ip) {
291 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
292 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
293 m->cs, m->ip);
294
295 if (m->cs == __KERNEL_CS)
296 print_symbol("{%s}", m->ip);
297 pr_cont("\n");
298 }
299
300 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
301 if (m->addr)
302 pr_cont("ADDR %llx ", m->addr);
303 if (m->misc)
304 pr_cont("MISC %llx ", m->misc);
305
306 if (mce_flags.smca) {
307 if (m->synd)
308 pr_cont("SYND %llx ", m->synd);
309 if (m->ipid)
310 pr_cont("IPID %llx ", m->ipid);
311 }
312
313 pr_cont("\n");
314 /*
315 * Note this output is parsed by external tools and old fields
316 * should not be changed.
317 */
318 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
319 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
320 cpu_data(m->extcpu).microcode);
321 }
322
323 static void print_mce(struct mce *m)
324 {
325 int ret = 0;
326
327 __print_mce(m);
328
329 /*
330 * Print out human-readable details about the MCE error,
331 * (if the CPU has an implementation for that)
332 */
333 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
334 if (ret == NOTIFY_STOP)
335 return;
336
337 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
338 }
339
340 #define PANIC_TIMEOUT 5 /* 5 seconds */
341
342 static atomic_t mce_panicked;
343
344 static int fake_panic;
345 static atomic_t mce_fake_panicked;
346
347 /* Panic in progress. Enable interrupts and wait for final IPI */
348 static void wait_for_panic(void)
349 {
350 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
351
352 preempt_disable();
353 local_irq_enable();
354 while (timeout-- > 0)
355 udelay(1);
356 if (panic_timeout == 0)
357 panic_timeout = mca_cfg.panic_timeout;
358 panic("Panicing machine check CPU died");
359 }
360
361 static void mce_panic(const char *msg, struct mce *final, char *exp)
362 {
363 int apei_err = 0;
364 struct llist_node *pending;
365 struct mce_evt_llist *l;
366
367 if (!fake_panic) {
368 /*
369 * Make sure only one CPU runs in machine check panic
370 */
371 if (atomic_inc_return(&mce_panicked) > 1)
372 wait_for_panic();
373 barrier();
374
375 bust_spinlocks(1);
376 console_verbose();
377 } else {
378 /* Don't log too much for fake panic */
379 if (atomic_inc_return(&mce_fake_panicked) > 1)
380 return;
381 }
382 pending = mce_gen_pool_prepare_records();
383 /* First print corrected ones that are still unlogged */
384 llist_for_each_entry(l, pending, llnode) {
385 struct mce *m = &l->mce;
386 if (!(m->status & MCI_STATUS_UC)) {
387 print_mce(m);
388 if (!apei_err)
389 apei_err = apei_write_mce(m);
390 }
391 }
392 /* Now print uncorrected but with the final one last */
393 llist_for_each_entry(l, pending, llnode) {
394 struct mce *m = &l->mce;
395 if (!(m->status & MCI_STATUS_UC))
396 continue;
397 if (!final || mce_cmp(m, final)) {
398 print_mce(m);
399 if (!apei_err)
400 apei_err = apei_write_mce(m);
401 }
402 }
403 if (final) {
404 print_mce(final);
405 if (!apei_err)
406 apei_err = apei_write_mce(final);
407 }
408 if (cpu_missing)
409 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
410 if (exp)
411 pr_emerg(HW_ERR "Machine check: %s\n", exp);
412 if (!fake_panic) {
413 if (panic_timeout == 0)
414 panic_timeout = mca_cfg.panic_timeout;
415 panic(msg);
416 } else
417 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
418 }
419
420 /* Support code for software error injection */
421
422 static int msr_to_offset(u32 msr)
423 {
424 unsigned bank = __this_cpu_read(injectm.bank);
425
426 if (msr == mca_cfg.rip_msr)
427 return offsetof(struct mce, ip);
428 if (msr == msr_ops.status(bank))
429 return offsetof(struct mce, status);
430 if (msr == msr_ops.addr(bank))
431 return offsetof(struct mce, addr);
432 if (msr == msr_ops.misc(bank))
433 return offsetof(struct mce, misc);
434 if (msr == MSR_IA32_MCG_STATUS)
435 return offsetof(struct mce, mcgstatus);
436 return -1;
437 }
438
439 /* MSR access wrappers used for error injection */
440 static u64 mce_rdmsrl(u32 msr)
441 {
442 u64 v;
443
444 if (__this_cpu_read(injectm.finished)) {
445 int offset = msr_to_offset(msr);
446
447 if (offset < 0)
448 return 0;
449 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
450 }
451
452 if (rdmsrl_safe(msr, &v)) {
453 WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
454 /*
455 * Return zero in case the access faulted. This should
456 * not happen normally but can happen if the CPU does
457 * something weird, or if the code is buggy.
458 */
459 v = 0;
460 }
461
462 return v;
463 }
464
465 static void mce_wrmsrl(u32 msr, u64 v)
466 {
467 if (__this_cpu_read(injectm.finished)) {
468 int offset = msr_to_offset(msr);
469
470 if (offset >= 0)
471 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
472 return;
473 }
474 wrmsrl(msr, v);
475 }
476
477 /*
478 * Collect all global (w.r.t. this processor) status about this machine
479 * check into our "mce" struct so that we can use it later to assess
480 * the severity of the problem as we read per-bank specific details.
481 */
482 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
483 {
484 mce_setup(m);
485
486 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
487 if (regs) {
488 /*
489 * Get the address of the instruction at the time of
490 * the machine check error.
491 */
492 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
493 m->ip = regs->ip;
494 m->cs = regs->cs;
495
496 /*
497 * When in VM86 mode make the cs look like ring 3
498 * always. This is a lie, but it's better than passing
499 * the additional vm86 bit around everywhere.
500 */
501 if (v8086_mode(regs))
502 m->cs |= 3;
503 }
504 /* Use accurate RIP reporting if available. */
505 if (mca_cfg.rip_msr)
506 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
507 }
508 }
509
510 int mce_available(struct cpuinfo_x86 *c)
511 {
512 if (mca_cfg.disabled)
513 return 0;
514 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
515 }
516
517 static void mce_schedule_work(void)
518 {
519 if (!mce_gen_pool_empty())
520 schedule_work(&mce_work);
521 }
522
523 static void mce_irq_work_cb(struct irq_work *entry)
524 {
525 mce_notify_irq();
526 mce_schedule_work();
527 }
528
529 static void mce_report_event(struct pt_regs *regs)
530 {
531 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
532 mce_notify_irq();
533 /*
534 * Triggering the work queue here is just an insurance
535 * policy in case the syscall exit notify handler
536 * doesn't run soon enough or ends up running on the
537 * wrong CPU (can happen when audit sleeps)
538 */
539 mce_schedule_work();
540 return;
541 }
542
543 irq_work_queue(&mce_irq_work);
544 }
545
546 /*
547 * Check if the address reported by the CPU is in a format we can parse.
548 * It would be possible to add code for most other cases, but all would
549 * be somewhat complicated (e.g. segment offset would require an instruction
550 * parser). So only support physical addresses up to page granuality for now.
551 */
552 static int mce_usable_address(struct mce *m)
553 {
554 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
555 return 0;
556
557 /* Checks after this one are Intel-specific: */
558 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
559 return 1;
560
561 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
562 return 0;
563 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
564 return 0;
565 return 1;
566 }
567
568 static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
569 void *data)
570 {
571 struct mce *mce = (struct mce *)data;
572 unsigned long pfn;
573
574 if (!mce)
575 return NOTIFY_DONE;
576
577 if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
578 pfn = mce->addr >> PAGE_SHIFT;
579 memory_failure(pfn, MCE_VECTOR, 0);
580 }
581
582 return NOTIFY_OK;
583 }
584 static struct notifier_block mce_srao_nb = {
585 .notifier_call = srao_decode_notifier,
586 .priority = INT_MAX,
587 };
588
589 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
590 void *data)
591 {
592 struct mce *m = (struct mce *)data;
593
594 if (!m)
595 return NOTIFY_DONE;
596
597 /*
598 * Run the default notifier if we have only the SRAO
599 * notifier and us registered.
600 */
601 if (atomic_read(&num_notifiers) > 2)
602 return NOTIFY_DONE;
603
604 __print_mce(m);
605
606 return NOTIFY_DONE;
607 }
608
609 static struct notifier_block mce_default_nb = {
610 .notifier_call = mce_default_notifier,
611 /* lowest prio, we want it to run last. */
612 .priority = 0,
613 };
614
615 /*
616 * Read ADDR and MISC registers.
617 */
618 static void mce_read_aux(struct mce *m, int i)
619 {
620 if (m->status & MCI_STATUS_MISCV)
621 m->misc = mce_rdmsrl(msr_ops.misc(i));
622
623 if (m->status & MCI_STATUS_ADDRV) {
624 m->addr = mce_rdmsrl(msr_ops.addr(i));
625
626 /*
627 * Mask the reported address by the reported granularity.
628 */
629 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
630 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
631 m->addr >>= shift;
632 m->addr <<= shift;
633 }
634
635 /*
636 * Extract [55:<lsb>] where lsb is the least significant
637 * *valid* bit of the address bits.
638 */
639 if (mce_flags.smca) {
640 u8 lsb = (m->addr >> 56) & 0x3f;
641
642 m->addr &= GENMASK_ULL(55, lsb);
643 }
644 }
645
646 if (mce_flags.smca) {
647 m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
648
649 if (m->status & MCI_STATUS_SYNDV)
650 m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
651 }
652 }
653
654 static bool memory_error(struct mce *m)
655 {
656 struct cpuinfo_x86 *c = &boot_cpu_data;
657
658 if (c->x86_vendor == X86_VENDOR_AMD) {
659 /* ErrCodeExt[20:16] */
660 u8 xec = (m->status >> 16) & 0x1f;
661
662 return (xec == 0x0 || xec == 0x8);
663 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
664 /*
665 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
666 *
667 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
668 * indicating a memory error. Bit 8 is used for indicating a
669 * cache hierarchy error. The combination of bit 2 and bit 3
670 * is used for indicating a `generic' cache hierarchy error
671 * But we can't just blindly check the above bits, because if
672 * bit 11 is set, then it is a bus/interconnect error - and
673 * either way the above bits just gives more detail on what
674 * bus/interconnect error happened. Note that bit 12 can be
675 * ignored, as it's the "filter" bit.
676 */
677 return (m->status & 0xef80) == BIT(7) ||
678 (m->status & 0xef00) == BIT(8) ||
679 (m->status & 0xeffc) == 0xc;
680 }
681
682 return false;
683 }
684
685 DEFINE_PER_CPU(unsigned, mce_poll_count);
686
687 /*
688 * Poll for corrected events or events that happened before reset.
689 * Those are just logged through /dev/mcelog.
690 *
691 * This is executed in standard interrupt context.
692 *
693 * Note: spec recommends to panic for fatal unsignalled
694 * errors here. However this would be quite problematic --
695 * we would need to reimplement the Monarch handling and
696 * it would mess up the exclusion between exception handler
697 * and poll hander -- * so we skip this for now.
698 * These cases should not happen anyways, or only when the CPU
699 * is already totally * confused. In this case it's likely it will
700 * not fully execute the machine check handler either.
701 */
702 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
703 {
704 bool error_seen = false;
705 struct mce m;
706 int severity;
707 int i;
708
709 this_cpu_inc(mce_poll_count);
710
711 mce_gather_info(&m, NULL);
712
713 /*
714 * m.tsc was set in mce_setup(). Clear it if not requested.
715 *
716 * FIXME: Propagate @flags to mce_gather_info/mce_setup() to avoid
717 * that dance.
718 */
719 if (!(flags & MCP_TIMESTAMP))
720 m.tsc = 0;
721
722 for (i = 0; i < mca_cfg.banks; i++) {
723 if (!mce_banks[i].ctl || !test_bit(i, *b))
724 continue;
725
726 m.misc = 0;
727 m.addr = 0;
728 m.bank = i;
729
730 barrier();
731 m.status = mce_rdmsrl(msr_ops.status(i));
732 if (!(m.status & MCI_STATUS_VAL))
733 continue;
734
735 /*
736 * Uncorrected or signalled events are handled by the exception
737 * handler when it is enabled, so don't process those here.
738 *
739 * TBD do the same check for MCI_STATUS_EN here?
740 */
741 if (!(flags & MCP_UC) &&
742 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
743 continue;
744
745 error_seen = true;
746
747 mce_read_aux(&m, i);
748
749 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
750
751 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
752 if (m.status & MCI_STATUS_ADDRV)
753 m.severity = severity;
754
755 /*
756 * Don't get the IP here because it's unlikely to
757 * have anything to do with the actual error location.
758 */
759 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
760 mce_log(&m);
761 else if (mce_usable_address(&m)) {
762 /*
763 * Although we skipped logging this, we still want
764 * to take action. Add to the pool so the registered
765 * notifiers will see it.
766 */
767 if (!mce_gen_pool_add(&m))
768 mce_schedule_work();
769 }
770
771 /*
772 * Clear state for this bank.
773 */
774 mce_wrmsrl(msr_ops.status(i), 0);
775 }
776
777 /*
778 * Don't clear MCG_STATUS here because it's only defined for
779 * exceptions.
780 */
781
782 sync_core();
783
784 return error_seen;
785 }
786 EXPORT_SYMBOL_GPL(machine_check_poll);
787
788 /*
789 * Do a quick check if any of the events requires a panic.
790 * This decides if we keep the events around or clear them.
791 */
792 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
793 struct pt_regs *regs)
794 {
795 int i, ret = 0;
796 char *tmp;
797
798 for (i = 0; i < mca_cfg.banks; i++) {
799 m->status = mce_rdmsrl(msr_ops.status(i));
800 if (m->status & MCI_STATUS_VAL) {
801 __set_bit(i, validp);
802 if (quirk_no_way_out)
803 quirk_no_way_out(i, m, regs);
804 }
805
806 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
807 *msg = tmp;
808 ret = 1;
809 }
810 }
811 return ret;
812 }
813
814 /*
815 * Variable to establish order between CPUs while scanning.
816 * Each CPU spins initially until executing is equal its number.
817 */
818 static atomic_t mce_executing;
819
820 /*
821 * Defines order of CPUs on entry. First CPU becomes Monarch.
822 */
823 static atomic_t mce_callin;
824
825 /*
826 * Check if a timeout waiting for other CPUs happened.
827 */
828 static int mce_timed_out(u64 *t, const char *msg)
829 {
830 /*
831 * The others already did panic for some reason.
832 * Bail out like in a timeout.
833 * rmb() to tell the compiler that system_state
834 * might have been modified by someone else.
835 */
836 rmb();
837 if (atomic_read(&mce_panicked))
838 wait_for_panic();
839 if (!mca_cfg.monarch_timeout)
840 goto out;
841 if ((s64)*t < SPINUNIT) {
842 if (mca_cfg.tolerant <= 1)
843 mce_panic(msg, NULL, NULL);
844 cpu_missing = 1;
845 return 1;
846 }
847 *t -= SPINUNIT;
848 out:
849 touch_nmi_watchdog();
850 return 0;
851 }
852
853 /*
854 * The Monarch's reign. The Monarch is the CPU who entered
855 * the machine check handler first. It waits for the others to
856 * raise the exception too and then grades them. When any
857 * error is fatal panic. Only then let the others continue.
858 *
859 * The other CPUs entering the MCE handler will be controlled by the
860 * Monarch. They are called Subjects.
861 *
862 * This way we prevent any potential data corruption in a unrecoverable case
863 * and also makes sure always all CPU's errors are examined.
864 *
865 * Also this detects the case of a machine check event coming from outer
866 * space (not detected by any CPUs) In this case some external agent wants
867 * us to shut down, so panic too.
868 *
869 * The other CPUs might still decide to panic if the handler happens
870 * in a unrecoverable place, but in this case the system is in a semi-stable
871 * state and won't corrupt anything by itself. It's ok to let the others
872 * continue for a bit first.
873 *
874 * All the spin loops have timeouts; when a timeout happens a CPU
875 * typically elects itself to be Monarch.
876 */
877 static void mce_reign(void)
878 {
879 int cpu;
880 struct mce *m = NULL;
881 int global_worst = 0;
882 char *msg = NULL;
883 char *nmsg = NULL;
884
885 /*
886 * This CPU is the Monarch and the other CPUs have run
887 * through their handlers.
888 * Grade the severity of the errors of all the CPUs.
889 */
890 for_each_possible_cpu(cpu) {
891 int severity = mce_severity(&per_cpu(mces_seen, cpu),
892 mca_cfg.tolerant,
893 &nmsg, true);
894 if (severity > global_worst) {
895 msg = nmsg;
896 global_worst = severity;
897 m = &per_cpu(mces_seen, cpu);
898 }
899 }
900
901 /*
902 * Cannot recover? Panic here then.
903 * This dumps all the mces in the log buffer and stops the
904 * other CPUs.
905 */
906 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
907 mce_panic("Fatal machine check", m, msg);
908
909 /*
910 * For UC somewhere we let the CPU who detects it handle it.
911 * Also must let continue the others, otherwise the handling
912 * CPU could deadlock on a lock.
913 */
914
915 /*
916 * No machine check event found. Must be some external
917 * source or one CPU is hung. Panic.
918 */
919 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
920 mce_panic("Fatal machine check from unknown source", NULL, NULL);
921
922 /*
923 * Now clear all the mces_seen so that they don't reappear on
924 * the next mce.
925 */
926 for_each_possible_cpu(cpu)
927 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
928 }
929
930 static atomic_t global_nwo;
931
932 /*
933 * Start of Monarch synchronization. This waits until all CPUs have
934 * entered the exception handler and then determines if any of them
935 * saw a fatal event that requires panic. Then it executes them
936 * in the entry order.
937 * TBD double check parallel CPU hotunplug
938 */
939 static int mce_start(int *no_way_out)
940 {
941 int order;
942 int cpus = num_online_cpus();
943 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
944
945 if (!timeout)
946 return -1;
947
948 atomic_add(*no_way_out, &global_nwo);
949 /*
950 * Rely on the implied barrier below, such that global_nwo
951 * is updated before mce_callin.
952 */
953 order = atomic_inc_return(&mce_callin);
954
955 /*
956 * Wait for everyone.
957 */
958 while (atomic_read(&mce_callin) != cpus) {
959 if (mce_timed_out(&timeout,
960 "Timeout: Not all CPUs entered broadcast exception handler")) {
961 atomic_set(&global_nwo, 0);
962 return -1;
963 }
964 ndelay(SPINUNIT);
965 }
966
967 /*
968 * mce_callin should be read before global_nwo
969 */
970 smp_rmb();
971
972 if (order == 1) {
973 /*
974 * Monarch: Starts executing now, the others wait.
975 */
976 atomic_set(&mce_executing, 1);
977 } else {
978 /*
979 * Subject: Now start the scanning loop one by one in
980 * the original callin order.
981 * This way when there are any shared banks it will be
982 * only seen by one CPU before cleared, avoiding duplicates.
983 */
984 while (atomic_read(&mce_executing) < order) {
985 if (mce_timed_out(&timeout,
986 "Timeout: Subject CPUs unable to finish machine check processing")) {
987 atomic_set(&global_nwo, 0);
988 return -1;
989 }
990 ndelay(SPINUNIT);
991 }
992 }
993
994 /*
995 * Cache the global no_way_out state.
996 */
997 *no_way_out = atomic_read(&global_nwo);
998
999 return order;
1000 }
1001
1002 /*
1003 * Synchronize between CPUs after main scanning loop.
1004 * This invokes the bulk of the Monarch processing.
1005 */
1006 static int mce_end(int order)
1007 {
1008 int ret = -1;
1009 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1010
1011 if (!timeout)
1012 goto reset;
1013 if (order < 0)
1014 goto reset;
1015
1016 /*
1017 * Allow others to run.
1018 */
1019 atomic_inc(&mce_executing);
1020
1021 if (order == 1) {
1022 /* CHECKME: Can this race with a parallel hotplug? */
1023 int cpus = num_online_cpus();
1024
1025 /*
1026 * Monarch: Wait for everyone to go through their scanning
1027 * loops.
1028 */
1029 while (atomic_read(&mce_executing) <= cpus) {
1030 if (mce_timed_out(&timeout,
1031 "Timeout: Monarch CPU unable to finish machine check processing"))
1032 goto reset;
1033 ndelay(SPINUNIT);
1034 }
1035
1036 mce_reign();
1037 barrier();
1038 ret = 0;
1039 } else {
1040 /*
1041 * Subject: Wait for Monarch to finish.
1042 */
1043 while (atomic_read(&mce_executing) != 0) {
1044 if (mce_timed_out(&timeout,
1045 "Timeout: Monarch CPU did not finish machine check processing"))
1046 goto reset;
1047 ndelay(SPINUNIT);
1048 }
1049
1050 /*
1051 * Don't reset anything. That's done by the Monarch.
1052 */
1053 return 0;
1054 }
1055
1056 /*
1057 * Reset all global state.
1058 */
1059 reset:
1060 atomic_set(&global_nwo, 0);
1061 atomic_set(&mce_callin, 0);
1062 barrier();
1063
1064 /*
1065 * Let others run again.
1066 */
1067 atomic_set(&mce_executing, 0);
1068 return ret;
1069 }
1070
1071 static void mce_clear_state(unsigned long *toclear)
1072 {
1073 int i;
1074
1075 for (i = 0; i < mca_cfg.banks; i++) {
1076 if (test_bit(i, toclear))
1077 mce_wrmsrl(msr_ops.status(i), 0);
1078 }
1079 }
1080
1081 static int do_memory_failure(struct mce *m)
1082 {
1083 int flags = MF_ACTION_REQUIRED;
1084 int ret;
1085
1086 pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
1087 if (!(m->mcgstatus & MCG_STATUS_RIPV))
1088 flags |= MF_MUST_KILL;
1089 ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
1090 if (ret)
1091 pr_err("Memory error not recovered");
1092 return ret;
1093 }
1094
1095 /*
1096 * The actual machine check handler. This only handles real
1097 * exceptions when something got corrupted coming in through int 18.
1098 *
1099 * This is executed in NMI context not subject to normal locking rules. This
1100 * implies that most kernel services cannot be safely used. Don't even
1101 * think about putting a printk in there!
1102 *
1103 * On Intel systems this is entered on all CPUs in parallel through
1104 * MCE broadcast. However some CPUs might be broken beyond repair,
1105 * so be always careful when synchronizing with others.
1106 */
1107 void do_machine_check(struct pt_regs *regs, long error_code)
1108 {
1109 struct mca_config *cfg = &mca_cfg;
1110 struct mce m, *final;
1111 int i;
1112 int worst = 0;
1113 int severity;
1114
1115 /*
1116 * Establish sequential order between the CPUs entering the machine
1117 * check handler.
1118 */
1119 int order = -1;
1120 /*
1121 * If no_way_out gets set, there is no safe way to recover from this
1122 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1123 */
1124 int no_way_out = 0;
1125 /*
1126 * If kill_it gets set, there might be a way to recover from this
1127 * error.
1128 */
1129 int kill_it = 0;
1130 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1131 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1132 char *msg = "Unknown";
1133
1134 /*
1135 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1136 * on Intel.
1137 */
1138 int lmce = 1;
1139
1140 /* If this CPU is offline, just bail out. */
1141 if (cpu_is_offline(smp_processor_id())) {
1142 u64 mcgstatus;
1143
1144 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1145 if (mcgstatus & MCG_STATUS_RIPV) {
1146 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1147 return;
1148 }
1149 }
1150
1151 ist_enter(regs);
1152
1153 this_cpu_inc(mce_exception_count);
1154
1155 if (!cfg->banks)
1156 goto out;
1157
1158 mce_gather_info(&m, regs);
1159
1160 final = this_cpu_ptr(&mces_seen);
1161 *final = m;
1162
1163 memset(valid_banks, 0, sizeof(valid_banks));
1164 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1165
1166 barrier();
1167
1168 /*
1169 * When no restart IP might need to kill or panic.
1170 * Assume the worst for now, but if we find the
1171 * severity is MCE_AR_SEVERITY we have other options.
1172 */
1173 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1174 kill_it = 1;
1175
1176 /*
1177 * Check if this MCE is signaled to only this logical processor,
1178 * on Intel only.
1179 */
1180 if (m.cpuvendor == X86_VENDOR_INTEL)
1181 lmce = m.mcgstatus & MCG_STATUS_LMCES;
1182
1183 /*
1184 * Go through all banks in exclusion of the other CPUs. This way we
1185 * don't report duplicated events on shared banks because the first one
1186 * to see it will clear it. If this is a Local MCE, then no need to
1187 * perform rendezvous.
1188 */
1189 if (!lmce)
1190 order = mce_start(&no_way_out);
1191
1192 for (i = 0; i < cfg->banks; i++) {
1193 __clear_bit(i, toclear);
1194 if (!test_bit(i, valid_banks))
1195 continue;
1196 if (!mce_banks[i].ctl)
1197 continue;
1198
1199 m.misc = 0;
1200 m.addr = 0;
1201 m.bank = i;
1202
1203 m.status = mce_rdmsrl(msr_ops.status(i));
1204 if ((m.status & MCI_STATUS_VAL) == 0)
1205 continue;
1206
1207 /*
1208 * Non uncorrected or non signaled errors are handled by
1209 * machine_check_poll. Leave them alone, unless this panics.
1210 */
1211 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1212 !no_way_out)
1213 continue;
1214
1215 /*
1216 * Set taint even when machine check was not enabled.
1217 */
1218 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1219
1220 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1221
1222 /*
1223 * When machine check was for corrected/deferred handler don't
1224 * touch, unless we're panicing.
1225 */
1226 if ((severity == MCE_KEEP_SEVERITY ||
1227 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1228 continue;
1229 __set_bit(i, toclear);
1230 if (severity == MCE_NO_SEVERITY) {
1231 /*
1232 * Machine check event was not enabled. Clear, but
1233 * ignore.
1234 */
1235 continue;
1236 }
1237
1238 mce_read_aux(&m, i);
1239
1240 /* assuming valid severity level != 0 */
1241 m.severity = severity;
1242
1243 mce_log(&m);
1244
1245 if (severity > worst) {
1246 *final = m;
1247 worst = severity;
1248 }
1249 }
1250
1251 /* mce_clear_state will clear *final, save locally for use later */
1252 m = *final;
1253
1254 if (!no_way_out)
1255 mce_clear_state(toclear);
1256
1257 /*
1258 * Do most of the synchronization with other CPUs.
1259 * When there's any problem use only local no_way_out state.
1260 */
1261 if (!lmce) {
1262 if (mce_end(order) < 0)
1263 no_way_out = worst >= MCE_PANIC_SEVERITY;
1264 } else {
1265 /*
1266 * Local MCE skipped calling mce_reign()
1267 * If we found a fatal error, we need to panic here.
1268 */
1269 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1270 mce_panic("Machine check from unknown source",
1271 NULL, NULL);
1272 }
1273
1274 /*
1275 * If tolerant is at an insane level we drop requests to kill
1276 * processes and continue even when there is no way out.
1277 */
1278 if (cfg->tolerant == 3)
1279 kill_it = 0;
1280 else if (no_way_out)
1281 mce_panic("Fatal machine check on current CPU", &m, msg);
1282
1283 if (worst > 0)
1284 mce_report_event(regs);
1285 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1286 out:
1287 sync_core();
1288
1289 if (worst != MCE_AR_SEVERITY && !kill_it)
1290 goto out_ist;
1291
1292 /* Fault was in user mode and we need to take some action */
1293 if ((m.cs & 3) == 3) {
1294 ist_begin_non_atomic(regs);
1295 local_irq_enable();
1296
1297 if (kill_it || do_memory_failure(&m))
1298 force_sig(SIGBUS, current);
1299 local_irq_disable();
1300 ist_end_non_atomic();
1301 } else {
1302 if (!fixup_exception(regs, X86_TRAP_MC))
1303 mce_panic("Failed kernel mode recovery", &m, NULL);
1304 }
1305
1306 out_ist:
1307 ist_exit(regs);
1308 }
1309 EXPORT_SYMBOL_GPL(do_machine_check);
1310
1311 #ifndef CONFIG_MEMORY_FAILURE
1312 int memory_failure(unsigned long pfn, int vector, int flags)
1313 {
1314 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1315 BUG_ON(flags & MF_ACTION_REQUIRED);
1316 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1317 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1318 pfn);
1319
1320 return 0;
1321 }
1322 #endif
1323
1324 /*
1325 * Action optional processing happens here (picking up
1326 * from the list of faulting pages that do_machine_check()
1327 * placed into the genpool).
1328 */
1329 static void mce_process_work(struct work_struct *dummy)
1330 {
1331 mce_gen_pool_process();
1332 }
1333
1334 #ifdef CONFIG_X86_MCE_INTEL
1335 /***
1336 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1337 * @cpu: The CPU on which the event occurred.
1338 * @status: Event status information
1339 *
1340 * This function should be called by the thermal interrupt after the
1341 * event has been processed and the decision was made to log the event
1342 * further.
1343 *
1344 * The status parameter will be saved to the 'status' field of 'struct mce'
1345 * and historically has been the register value of the
1346 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1347 */
1348 void mce_log_therm_throt_event(__u64 status)
1349 {
1350 struct mce m;
1351
1352 mce_setup(&m);
1353 m.bank = MCE_THERMAL_BANK;
1354 m.status = status;
1355 mce_log(&m);
1356 }
1357 #endif /* CONFIG_X86_MCE_INTEL */
1358
1359 /*
1360 * Periodic polling timer for "silent" machine check errors. If the
1361 * poller finds an MCE, poll 2x faster. When the poller finds no more
1362 * errors, poll 2x slower (up to check_interval seconds).
1363 */
1364 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1365
1366 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1367 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1368
1369 static unsigned long mce_adjust_timer_default(unsigned long interval)
1370 {
1371 return interval;
1372 }
1373
1374 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1375
1376 static void __restart_timer(struct timer_list *t, unsigned long interval)
1377 {
1378 unsigned long when = jiffies + interval;
1379 unsigned long flags;
1380
1381 local_irq_save(flags);
1382
1383 if (timer_pending(t)) {
1384 if (time_before(when, t->expires))
1385 mod_timer(t, when);
1386 } else {
1387 t->expires = round_jiffies(when);
1388 add_timer_on(t, smp_processor_id());
1389 }
1390
1391 local_irq_restore(flags);
1392 }
1393
1394 static void mce_timer_fn(unsigned long data)
1395 {
1396 struct timer_list *t = this_cpu_ptr(&mce_timer);
1397 int cpu = smp_processor_id();
1398 unsigned long iv;
1399
1400 WARN_ON(cpu != data);
1401
1402 iv = __this_cpu_read(mce_next_interval);
1403
1404 if (mce_available(this_cpu_ptr(&cpu_info))) {
1405 machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1406
1407 if (mce_intel_cmci_poll()) {
1408 iv = mce_adjust_timer(iv);
1409 goto done;
1410 }
1411 }
1412
1413 /*
1414 * Alert userspace if needed. If we logged an MCE, reduce the polling
1415 * interval, otherwise increase the polling interval.
1416 */
1417 if (mce_notify_irq())
1418 iv = max(iv / 2, (unsigned long) HZ/100);
1419 else
1420 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1421
1422 done:
1423 __this_cpu_write(mce_next_interval, iv);
1424 __restart_timer(t, iv);
1425 }
1426
1427 /*
1428 * Ensure that the timer is firing in @interval from now.
1429 */
1430 void mce_timer_kick(unsigned long interval)
1431 {
1432 struct timer_list *t = this_cpu_ptr(&mce_timer);
1433 unsigned long iv = __this_cpu_read(mce_next_interval);
1434
1435 __restart_timer(t, interval);
1436
1437 if (interval < iv)
1438 __this_cpu_write(mce_next_interval, interval);
1439 }
1440
1441 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1442 static void mce_timer_delete_all(void)
1443 {
1444 int cpu;
1445
1446 for_each_online_cpu(cpu)
1447 del_timer_sync(&per_cpu(mce_timer, cpu));
1448 }
1449
1450 static void mce_do_trigger(struct work_struct *work)
1451 {
1452 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1453 }
1454
1455 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1456
1457 /*
1458 * Notify the user(s) about new machine check events.
1459 * Can be called from interrupt context, but not from machine check/NMI
1460 * context.
1461 */
1462 int mce_notify_irq(void)
1463 {
1464 /* Not more than two messages every minute */
1465 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1466
1467 if (test_and_clear_bit(0, &mce_need_notify)) {
1468 /* wake processes polling /dev/mcelog */
1469 wake_up_interruptible(&mce_chrdev_wait);
1470
1471 if (mce_helper[0])
1472 schedule_work(&mce_trigger_work);
1473
1474 if (__ratelimit(&ratelimit))
1475 pr_info(HW_ERR "Machine check events logged\n");
1476
1477 return 1;
1478 }
1479 return 0;
1480 }
1481 EXPORT_SYMBOL_GPL(mce_notify_irq);
1482
1483 static int __mcheck_cpu_mce_banks_init(void)
1484 {
1485 int i;
1486 u8 num_banks = mca_cfg.banks;
1487
1488 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1489 if (!mce_banks)
1490 return -ENOMEM;
1491
1492 for (i = 0; i < num_banks; i++) {
1493 struct mce_bank *b = &mce_banks[i];
1494
1495 b->ctl = -1ULL;
1496 b->init = 1;
1497 }
1498 return 0;
1499 }
1500
1501 /*
1502 * Initialize Machine Checks for a CPU.
1503 */
1504 static int __mcheck_cpu_cap_init(void)
1505 {
1506 unsigned b;
1507 u64 cap;
1508
1509 rdmsrl(MSR_IA32_MCG_CAP, cap);
1510
1511 b = cap & MCG_BANKCNT_MASK;
1512 if (!mca_cfg.banks)
1513 pr_info("CPU supports %d MCE banks\n", b);
1514
1515 if (b > MAX_NR_BANKS) {
1516 pr_warn("Using only %u machine check banks out of %u\n",
1517 MAX_NR_BANKS, b);
1518 b = MAX_NR_BANKS;
1519 }
1520
1521 /* Don't support asymmetric configurations today */
1522 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1523 mca_cfg.banks = b;
1524
1525 if (!mce_banks) {
1526 int err = __mcheck_cpu_mce_banks_init();
1527
1528 if (err)
1529 return err;
1530 }
1531
1532 /* Use accurate RIP reporting if available. */
1533 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1534 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1535
1536 if (cap & MCG_SER_P)
1537 mca_cfg.ser = true;
1538
1539 return 0;
1540 }
1541
1542 static void __mcheck_cpu_init_generic(void)
1543 {
1544 enum mcp_flags m_fl = 0;
1545 mce_banks_t all_banks;
1546 u64 cap;
1547
1548 if (!mca_cfg.bootlog)
1549 m_fl = MCP_DONTLOG;
1550
1551 /*
1552 * Log the machine checks left over from the previous reset.
1553 */
1554 bitmap_fill(all_banks, MAX_NR_BANKS);
1555 machine_check_poll(MCP_UC | m_fl, &all_banks);
1556
1557 cr4_set_bits(X86_CR4_MCE);
1558
1559 rdmsrl(MSR_IA32_MCG_CAP, cap);
1560 if (cap & MCG_CTL_P)
1561 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1562 }
1563
1564 static void __mcheck_cpu_init_clear_banks(void)
1565 {
1566 int i;
1567
1568 for (i = 0; i < mca_cfg.banks; i++) {
1569 struct mce_bank *b = &mce_banks[i];
1570
1571 if (!b->init)
1572 continue;
1573 wrmsrl(msr_ops.ctl(i), b->ctl);
1574 wrmsrl(msr_ops.status(i), 0);
1575 }
1576 }
1577
1578 /*
1579 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1580 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1581 * Vol 3B Table 15-20). But this confuses both the code that determines
1582 * whether the machine check occurred in kernel or user mode, and also
1583 * the severity assessment code. Pretend that EIPV was set, and take the
1584 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1585 */
1586 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1587 {
1588 if (bank != 0)
1589 return;
1590 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1591 return;
1592 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1593 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1594 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1595 MCACOD)) !=
1596 (MCI_STATUS_UC|MCI_STATUS_EN|
1597 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1598 MCI_STATUS_AR|MCACOD_INSTR))
1599 return;
1600
1601 m->mcgstatus |= MCG_STATUS_EIPV;
1602 m->ip = regs->ip;
1603 m->cs = regs->cs;
1604 }
1605
1606 /* Add per CPU specific workarounds here */
1607 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1608 {
1609 struct mca_config *cfg = &mca_cfg;
1610
1611 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1612 pr_info("unknown CPU type - not enabling MCE support\n");
1613 return -EOPNOTSUPP;
1614 }
1615
1616 /* This should be disabled by the BIOS, but isn't always */
1617 if (c->x86_vendor == X86_VENDOR_AMD) {
1618 if (c->x86 == 15 && cfg->banks > 4) {
1619 /*
1620 * disable GART TBL walk error reporting, which
1621 * trips off incorrectly with the IOMMU & 3ware
1622 * & Cerberus:
1623 */
1624 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1625 }
1626 if (c->x86 < 17 && cfg->bootlog < 0) {
1627 /*
1628 * Lots of broken BIOS around that don't clear them
1629 * by default and leave crap in there. Don't log:
1630 */
1631 cfg->bootlog = 0;
1632 }
1633 /*
1634 * Various K7s with broken bank 0 around. Always disable
1635 * by default.
1636 */
1637 if (c->x86 == 6 && cfg->banks > 0)
1638 mce_banks[0].ctl = 0;
1639
1640 /*
1641 * overflow_recov is supported for F15h Models 00h-0fh
1642 * even though we don't have a CPUID bit for it.
1643 */
1644 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1645 mce_flags.overflow_recov = 1;
1646
1647 /*
1648 * Turn off MC4_MISC thresholding banks on those models since
1649 * they're not supported there.
1650 */
1651 if (c->x86 == 0x15 &&
1652 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1653 int i;
1654 u64 hwcr;
1655 bool need_toggle;
1656 u32 msrs[] = {
1657 0x00000413, /* MC4_MISC0 */
1658 0xc0000408, /* MC4_MISC1 */
1659 };
1660
1661 rdmsrl(MSR_K7_HWCR, hwcr);
1662
1663 /* McStatusWrEn has to be set */
1664 need_toggle = !(hwcr & BIT(18));
1665
1666 if (need_toggle)
1667 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1668
1669 /* Clear CntP bit safely */
1670 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1671 msr_clear_bit(msrs[i], 62);
1672
1673 /* restore old settings */
1674 if (need_toggle)
1675 wrmsrl(MSR_K7_HWCR, hwcr);
1676 }
1677 }
1678
1679 if (c->x86_vendor == X86_VENDOR_INTEL) {
1680 /*
1681 * SDM documents that on family 6 bank 0 should not be written
1682 * because it aliases to another special BIOS controlled
1683 * register.
1684 * But it's not aliased anymore on model 0x1a+
1685 * Don't ignore bank 0 completely because there could be a
1686 * valid event later, merely don't write CTL0.
1687 */
1688
1689 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1690 mce_banks[0].init = 0;
1691
1692 /*
1693 * All newer Intel systems support MCE broadcasting. Enable
1694 * synchronization with a one second timeout.
1695 */
1696 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1697 cfg->monarch_timeout < 0)
1698 cfg->monarch_timeout = USEC_PER_SEC;
1699
1700 /*
1701 * There are also broken BIOSes on some Pentium M and
1702 * earlier systems:
1703 */
1704 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1705 cfg->bootlog = 0;
1706
1707 if (c->x86 == 6 && c->x86_model == 45)
1708 quirk_no_way_out = quirk_sandybridge_ifu;
1709 }
1710 if (cfg->monarch_timeout < 0)
1711 cfg->monarch_timeout = 0;
1712 if (cfg->bootlog != 0)
1713 cfg->panic_timeout = 30;
1714
1715 return 0;
1716 }
1717
1718 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1719 {
1720 if (c->x86 != 5)
1721 return 0;
1722
1723 switch (c->x86_vendor) {
1724 case X86_VENDOR_INTEL:
1725 intel_p5_mcheck_init(c);
1726 return 1;
1727 break;
1728 case X86_VENDOR_CENTAUR:
1729 winchip_mcheck_init(c);
1730 return 1;
1731 break;
1732 default:
1733 return 0;
1734 }
1735
1736 return 0;
1737 }
1738
1739 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1740 {
1741 switch (c->x86_vendor) {
1742 case X86_VENDOR_INTEL:
1743 mce_intel_feature_init(c);
1744 mce_adjust_timer = cmci_intel_adjust_timer;
1745 break;
1746
1747 case X86_VENDOR_AMD: {
1748 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1749 mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
1750 mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
1751
1752 /*
1753 * Install proper ops for Scalable MCA enabled processors
1754 */
1755 if (mce_flags.smca) {
1756 msr_ops.ctl = smca_ctl_reg;
1757 msr_ops.status = smca_status_reg;
1758 msr_ops.addr = smca_addr_reg;
1759 msr_ops.misc = smca_misc_reg;
1760 }
1761 mce_amd_feature_init(c);
1762
1763 break;
1764 }
1765
1766 default:
1767 break;
1768 }
1769 }
1770
1771 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1772 {
1773 switch (c->x86_vendor) {
1774 case X86_VENDOR_INTEL:
1775 mce_intel_feature_clear(c);
1776 break;
1777 default:
1778 break;
1779 }
1780 }
1781
1782 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1783 {
1784 unsigned long iv = check_interval * HZ;
1785
1786 if (mca_cfg.ignore_ce || !iv)
1787 return;
1788
1789 per_cpu(mce_next_interval, cpu) = iv;
1790
1791 t->expires = round_jiffies(jiffies + iv);
1792 add_timer_on(t, cpu);
1793 }
1794
1795 static void __mcheck_cpu_setup_timer(void)
1796 {
1797 struct timer_list *t = this_cpu_ptr(&mce_timer);
1798 unsigned int cpu = smp_processor_id();
1799
1800 setup_pinned_timer(t, mce_timer_fn, cpu);
1801 }
1802
1803 static void __mcheck_cpu_init_timer(void)
1804 {
1805 struct timer_list *t = this_cpu_ptr(&mce_timer);
1806 unsigned int cpu = smp_processor_id();
1807
1808 setup_pinned_timer(t, mce_timer_fn, cpu);
1809 mce_start_timer(cpu, t);
1810 }
1811
1812 /* Handle unconfigured int18 (should never happen) */
1813 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1814 {
1815 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1816 smp_processor_id());
1817 }
1818
1819 /* Call the installed machine check handler for this CPU setup. */
1820 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1821 unexpected_machine_check;
1822
1823 /*
1824 * Called for each booted CPU to set up machine checks.
1825 * Must be called with preempt off:
1826 */
1827 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1828 {
1829 if (mca_cfg.disabled)
1830 return;
1831
1832 if (__mcheck_cpu_ancient_init(c))
1833 return;
1834
1835 if (!mce_available(c))
1836 return;
1837
1838 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1839 mca_cfg.disabled = true;
1840 return;
1841 }
1842
1843 if (mce_gen_pool_init()) {
1844 mca_cfg.disabled = true;
1845 pr_emerg("Couldn't allocate MCE records pool!\n");
1846 return;
1847 }
1848
1849 machine_check_vector = do_machine_check;
1850
1851 __mcheck_cpu_init_generic();
1852 __mcheck_cpu_init_vendor(c);
1853 __mcheck_cpu_init_clear_banks();
1854 __mcheck_cpu_setup_timer();
1855 }
1856
1857 /*
1858 * Called for each booted CPU to clear some machine checks opt-ins
1859 */
1860 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1861 {
1862 if (mca_cfg.disabled)
1863 return;
1864
1865 if (!mce_available(c))
1866 return;
1867
1868 /*
1869 * Possibly to clear general settings generic to x86
1870 * __mcheck_cpu_clear_generic(c);
1871 */
1872 __mcheck_cpu_clear_vendor(c);
1873
1874 }
1875
1876 /*
1877 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1878 */
1879
1880 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1881 static int mce_chrdev_open_count; /* #times opened */
1882 static int mce_chrdev_open_exclu; /* already open exclusive? */
1883
1884 static int mce_chrdev_open(struct inode *inode, struct file *file)
1885 {
1886 spin_lock(&mce_chrdev_state_lock);
1887
1888 if (mce_chrdev_open_exclu ||
1889 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1890 spin_unlock(&mce_chrdev_state_lock);
1891
1892 return -EBUSY;
1893 }
1894
1895 if (file->f_flags & O_EXCL)
1896 mce_chrdev_open_exclu = 1;
1897 mce_chrdev_open_count++;
1898
1899 spin_unlock(&mce_chrdev_state_lock);
1900
1901 return nonseekable_open(inode, file);
1902 }
1903
1904 static int mce_chrdev_release(struct inode *inode, struct file *file)
1905 {
1906 spin_lock(&mce_chrdev_state_lock);
1907
1908 mce_chrdev_open_count--;
1909 mce_chrdev_open_exclu = 0;
1910
1911 spin_unlock(&mce_chrdev_state_lock);
1912
1913 return 0;
1914 }
1915
1916 static void collect_tscs(void *data)
1917 {
1918 unsigned long *cpu_tsc = (unsigned long *)data;
1919
1920 cpu_tsc[smp_processor_id()] = rdtsc();
1921 }
1922
1923 static int mce_apei_read_done;
1924
1925 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1926 static int __mce_read_apei(char __user **ubuf, size_t usize)
1927 {
1928 int rc;
1929 u64 record_id;
1930 struct mce m;
1931
1932 if (usize < sizeof(struct mce))
1933 return -EINVAL;
1934
1935 rc = apei_read_mce(&m, &record_id);
1936 /* Error or no more MCE record */
1937 if (rc <= 0) {
1938 mce_apei_read_done = 1;
1939 /*
1940 * When ERST is disabled, mce_chrdev_read() should return
1941 * "no record" instead of "no device."
1942 */
1943 if (rc == -ENODEV)
1944 return 0;
1945 return rc;
1946 }
1947 rc = -EFAULT;
1948 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1949 return rc;
1950 /*
1951 * In fact, we should have cleared the record after that has
1952 * been flushed to the disk or sent to network in
1953 * /sbin/mcelog, but we have no interface to support that now,
1954 * so just clear it to avoid duplication.
1955 */
1956 rc = apei_clear_mce(record_id);
1957 if (rc) {
1958 mce_apei_read_done = 1;
1959 return rc;
1960 }
1961 *ubuf += sizeof(struct mce);
1962
1963 return 0;
1964 }
1965
1966 static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1967 size_t usize, loff_t *off)
1968 {
1969 char __user *buf = ubuf;
1970 unsigned long *cpu_tsc;
1971 unsigned prev, next;
1972 int i, err;
1973
1974 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1975 if (!cpu_tsc)
1976 return -ENOMEM;
1977
1978 mutex_lock(&mce_chrdev_read_mutex);
1979
1980 if (!mce_apei_read_done) {
1981 err = __mce_read_apei(&buf, usize);
1982 if (err || buf != ubuf)
1983 goto out;
1984 }
1985
1986 next = mce_log_get_idx_check(mcelog.next);
1987
1988 /* Only supports full reads right now */
1989 err = -EINVAL;
1990 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1991 goto out;
1992
1993 err = 0;
1994 prev = 0;
1995 do {
1996 for (i = prev; i < next; i++) {
1997 unsigned long start = jiffies;
1998 struct mce *m = &mcelog.entry[i];
1999
2000 while (!m->finished) {
2001 if (time_after_eq(jiffies, start + 2)) {
2002 memset(m, 0, sizeof(*m));
2003 goto timeout;
2004 }
2005 cpu_relax();
2006 }
2007 smp_rmb();
2008 err |= copy_to_user(buf, m, sizeof(*m));
2009 buf += sizeof(*m);
2010 timeout:
2011 ;
2012 }
2013
2014 memset(mcelog.entry + prev, 0,
2015 (next - prev) * sizeof(struct mce));
2016 prev = next;
2017 next = cmpxchg(&mcelog.next, prev, 0);
2018 } while (next != prev);
2019
2020 synchronize_sched();
2021
2022 /*
2023 * Collect entries that were still getting written before the
2024 * synchronize.
2025 */
2026 on_each_cpu(collect_tscs, cpu_tsc, 1);
2027
2028 for (i = next; i < MCE_LOG_LEN; i++) {
2029 struct mce *m = &mcelog.entry[i];
2030
2031 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
2032 err |= copy_to_user(buf, m, sizeof(*m));
2033 smp_rmb();
2034 buf += sizeof(*m);
2035 memset(m, 0, sizeof(*m));
2036 }
2037 }
2038
2039 if (err)
2040 err = -EFAULT;
2041
2042 out:
2043 mutex_unlock(&mce_chrdev_read_mutex);
2044 kfree(cpu_tsc);
2045
2046 return err ? err : buf - ubuf;
2047 }
2048
2049 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
2050 {
2051 poll_wait(file, &mce_chrdev_wait, wait);
2052 if (READ_ONCE(mcelog.next))
2053 return POLLIN | POLLRDNORM;
2054 if (!mce_apei_read_done && apei_check_mce())
2055 return POLLIN | POLLRDNORM;
2056 return 0;
2057 }
2058
2059 static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
2060 unsigned long arg)
2061 {
2062 int __user *p = (int __user *)arg;
2063
2064 if (!capable(CAP_SYS_ADMIN))
2065 return -EPERM;
2066
2067 switch (cmd) {
2068 case MCE_GET_RECORD_LEN:
2069 return put_user(sizeof(struct mce), p);
2070 case MCE_GET_LOG_LEN:
2071 return put_user(MCE_LOG_LEN, p);
2072 case MCE_GETCLEAR_FLAGS: {
2073 unsigned flags;
2074
2075 do {
2076 flags = mcelog.flags;
2077 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
2078
2079 return put_user(flags, p);
2080 }
2081 default:
2082 return -ENOTTY;
2083 }
2084 }
2085
2086 static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
2087 size_t usize, loff_t *off);
2088
2089 void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
2090 const char __user *ubuf,
2091 size_t usize, loff_t *off))
2092 {
2093 mce_write = fn;
2094 }
2095 EXPORT_SYMBOL_GPL(register_mce_write_callback);
2096
2097 static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
2098 size_t usize, loff_t *off)
2099 {
2100 if (mce_write)
2101 return mce_write(filp, ubuf, usize, off);
2102 else
2103 return -EINVAL;
2104 }
2105
2106 static const struct file_operations mce_chrdev_ops = {
2107 .open = mce_chrdev_open,
2108 .release = mce_chrdev_release,
2109 .read = mce_chrdev_read,
2110 .write = mce_chrdev_write,
2111 .poll = mce_chrdev_poll,
2112 .unlocked_ioctl = mce_chrdev_ioctl,
2113 .llseek = no_llseek,
2114 };
2115
2116 static struct miscdevice mce_chrdev_device = {
2117 MISC_MCELOG_MINOR,
2118 "mcelog",
2119 &mce_chrdev_ops,
2120 };
2121
2122 static void __mce_disable_bank(void *arg)
2123 {
2124 int bank = *((int *)arg);
2125 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2126 cmci_disable_bank(bank);
2127 }
2128
2129 void mce_disable_bank(int bank)
2130 {
2131 if (bank >= mca_cfg.banks) {
2132 pr_warn(FW_BUG
2133 "Ignoring request to disable invalid MCA bank %d.\n",
2134 bank);
2135 return;
2136 }
2137 set_bit(bank, mce_banks_ce_disabled);
2138 on_each_cpu(__mce_disable_bank, &bank, 1);
2139 }
2140
2141 /*
2142 * mce=off Disables machine check
2143 * mce=no_cmci Disables CMCI
2144 * mce=no_lmce Disables LMCE
2145 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2146 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2147 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2148 * monarchtimeout is how long to wait for other CPUs on machine
2149 * check, or 0 to not wait
2150 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
2151 * mce=nobootlog Don't log MCEs from before booting.
2152 * mce=bios_cmci_threshold Don't program the CMCI threshold
2153 * mce=recovery force enable memcpy_mcsafe()
2154 */
2155 static int __init mcheck_enable(char *str)
2156 {
2157 struct mca_config *cfg = &mca_cfg;
2158
2159 if (*str == 0) {
2160 enable_p5_mce();
2161 return 1;
2162 }
2163 if (*str == '=')
2164 str++;
2165 if (!strcmp(str, "off"))
2166 cfg->disabled = true;
2167 else if (!strcmp(str, "no_cmci"))
2168 cfg->cmci_disabled = true;
2169 else if (!strcmp(str, "no_lmce"))
2170 cfg->lmce_disabled = true;
2171 else if (!strcmp(str, "dont_log_ce"))
2172 cfg->dont_log_ce = true;
2173 else if (!strcmp(str, "ignore_ce"))
2174 cfg->ignore_ce = true;
2175 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2176 cfg->bootlog = (str[0] == 'b');
2177 else if (!strcmp(str, "bios_cmci_threshold"))
2178 cfg->bios_cmci_threshold = true;
2179 else if (!strcmp(str, "recovery"))
2180 cfg->recovery = true;
2181 else if (isdigit(str[0])) {
2182 if (get_option(&str, &cfg->tolerant) == 2)
2183 get_option(&str, &(cfg->monarch_timeout));
2184 } else {
2185 pr_info("mce argument %s ignored. Please use /sys\n", str);
2186 return 0;
2187 }
2188 return 1;
2189 }
2190 __setup("mce", mcheck_enable);
2191
2192 int __init mcheck_init(void)
2193 {
2194 mcheck_intel_therm_init();
2195 mce_register_decode_chain(&mce_srao_nb);
2196 mce_register_decode_chain(&mce_default_nb);
2197 mcheck_vendor_init_severity();
2198
2199 INIT_WORK(&mce_work, mce_process_work);
2200 init_irq_work(&mce_irq_work, mce_irq_work_cb);
2201
2202 return 0;
2203 }
2204
2205 /*
2206 * mce_syscore: PM support
2207 */
2208
2209 /*
2210 * Disable machine checks on suspend and shutdown. We can't really handle
2211 * them later.
2212 */
2213 static void mce_disable_error_reporting(void)
2214 {
2215 int i;
2216
2217 for (i = 0; i < mca_cfg.banks; i++) {
2218 struct mce_bank *b = &mce_banks[i];
2219
2220 if (b->init)
2221 wrmsrl(msr_ops.ctl(i), 0);
2222 }
2223 return;
2224 }
2225
2226 static void vendor_disable_error_reporting(void)
2227 {
2228 /*
2229 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
2230 * Disabling them for just a single offlined CPU is bad, since it will
2231 * inhibit reporting for all shared resources on the socket like the
2232 * last level cache (LLC), the integrated memory controller (iMC), etc.
2233 */
2234 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2235 return;
2236
2237 mce_disable_error_reporting();
2238 }
2239
2240 static int mce_syscore_suspend(void)
2241 {
2242 vendor_disable_error_reporting();
2243 return 0;
2244 }
2245
2246 static void mce_syscore_shutdown(void)
2247 {
2248 vendor_disable_error_reporting();
2249 }
2250
2251 /*
2252 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2253 * Only one CPU is active at this time, the others get re-added later using
2254 * CPU hotplug:
2255 */
2256 static void mce_syscore_resume(void)
2257 {
2258 __mcheck_cpu_init_generic();
2259 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2260 __mcheck_cpu_init_clear_banks();
2261 }
2262
2263 static struct syscore_ops mce_syscore_ops = {
2264 .suspend = mce_syscore_suspend,
2265 .shutdown = mce_syscore_shutdown,
2266 .resume = mce_syscore_resume,
2267 };
2268
2269 /*
2270 * mce_device: Sysfs support
2271 */
2272
2273 static void mce_cpu_restart(void *data)
2274 {
2275 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2276 return;
2277 __mcheck_cpu_init_generic();
2278 __mcheck_cpu_init_clear_banks();
2279 __mcheck_cpu_init_timer();
2280 }
2281
2282 /* Reinit MCEs after user configuration changes */
2283 static void mce_restart(void)
2284 {
2285 mce_timer_delete_all();
2286 on_each_cpu(mce_cpu_restart, NULL, 1);
2287 }
2288
2289 /* Toggle features for corrected errors */
2290 static void mce_disable_cmci(void *data)
2291 {
2292 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2293 return;
2294 cmci_clear();
2295 }
2296
2297 static void mce_enable_ce(void *all)
2298 {
2299 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2300 return;
2301 cmci_reenable();
2302 cmci_recheck();
2303 if (all)
2304 __mcheck_cpu_init_timer();
2305 }
2306
2307 static struct bus_type mce_subsys = {
2308 .name = "machinecheck",
2309 .dev_name = "machinecheck",
2310 };
2311
2312 DEFINE_PER_CPU(struct device *, mce_device);
2313
2314 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2315 {
2316 return container_of(attr, struct mce_bank, attr);
2317 }
2318
2319 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2320 char *buf)
2321 {
2322 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2323 }
2324
2325 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2326 const char *buf, size_t size)
2327 {
2328 u64 new;
2329
2330 if (kstrtou64(buf, 0, &new) < 0)
2331 return -EINVAL;
2332
2333 attr_to_bank(attr)->ctl = new;
2334 mce_restart();
2335
2336 return size;
2337 }
2338
2339 static ssize_t
2340 show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2341 {
2342 strcpy(buf, mce_helper);
2343 strcat(buf, "\n");
2344 return strlen(mce_helper) + 1;
2345 }
2346
2347 static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2348 const char *buf, size_t siz)
2349 {
2350 char *p;
2351
2352 strncpy(mce_helper, buf, sizeof(mce_helper));
2353 mce_helper[sizeof(mce_helper)-1] = 0;
2354 p = strchr(mce_helper, '\n');
2355
2356 if (p)
2357 *p = 0;
2358
2359 return strlen(mce_helper) + !!p;
2360 }
2361
2362 static ssize_t set_ignore_ce(struct device *s,
2363 struct device_attribute *attr,
2364 const char *buf, size_t size)
2365 {
2366 u64 new;
2367
2368 if (kstrtou64(buf, 0, &new) < 0)
2369 return -EINVAL;
2370
2371 if (mca_cfg.ignore_ce ^ !!new) {
2372 if (new) {
2373 /* disable ce features */
2374 mce_timer_delete_all();
2375 on_each_cpu(mce_disable_cmci, NULL, 1);
2376 mca_cfg.ignore_ce = true;
2377 } else {
2378 /* enable ce features */
2379 mca_cfg.ignore_ce = false;
2380 on_each_cpu(mce_enable_ce, (void *)1, 1);
2381 }
2382 }
2383 return size;
2384 }
2385
2386 static ssize_t set_cmci_disabled(struct device *s,
2387 struct device_attribute *attr,
2388 const char *buf, size_t size)
2389 {
2390 u64 new;
2391
2392 if (kstrtou64(buf, 0, &new) < 0)
2393 return -EINVAL;
2394
2395 if (mca_cfg.cmci_disabled ^ !!new) {
2396 if (new) {
2397 /* disable cmci */
2398 on_each_cpu(mce_disable_cmci, NULL, 1);
2399 mca_cfg.cmci_disabled = true;
2400 } else {
2401 /* enable cmci */
2402 mca_cfg.cmci_disabled = false;
2403 on_each_cpu(mce_enable_ce, NULL, 1);
2404 }
2405 }
2406 return size;
2407 }
2408
2409 static ssize_t store_int_with_restart(struct device *s,
2410 struct device_attribute *attr,
2411 const char *buf, size_t size)
2412 {
2413 ssize_t ret = device_store_int(s, attr, buf, size);
2414 mce_restart();
2415 return ret;
2416 }
2417
2418 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2419 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2420 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2421 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2422
2423 static struct dev_ext_attribute dev_attr_check_interval = {
2424 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2425 &check_interval
2426 };
2427
2428 static struct dev_ext_attribute dev_attr_ignore_ce = {
2429 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2430 &mca_cfg.ignore_ce
2431 };
2432
2433 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2434 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2435 &mca_cfg.cmci_disabled
2436 };
2437
2438 static struct device_attribute *mce_device_attrs[] = {
2439 &dev_attr_tolerant.attr,
2440 &dev_attr_check_interval.attr,
2441 &dev_attr_trigger,
2442 &dev_attr_monarch_timeout.attr,
2443 &dev_attr_dont_log_ce.attr,
2444 &dev_attr_ignore_ce.attr,
2445 &dev_attr_cmci_disabled.attr,
2446 NULL
2447 };
2448
2449 static cpumask_var_t mce_device_initialized;
2450
2451 static void mce_device_release(struct device *dev)
2452 {
2453 kfree(dev);
2454 }
2455
2456 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2457 static int mce_device_create(unsigned int cpu)
2458 {
2459 struct device *dev;
2460 int err;
2461 int i, j;
2462
2463 if (!mce_available(&boot_cpu_data))
2464 return -EIO;
2465
2466 dev = per_cpu(mce_device, cpu);
2467 if (dev)
2468 return 0;
2469
2470 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2471 if (!dev)
2472 return -ENOMEM;
2473 dev->id = cpu;
2474 dev->bus = &mce_subsys;
2475 dev->release = &mce_device_release;
2476
2477 err = device_register(dev);
2478 if (err) {
2479 put_device(dev);
2480 return err;
2481 }
2482
2483 for (i = 0; mce_device_attrs[i]; i++) {
2484 err = device_create_file(dev, mce_device_attrs[i]);
2485 if (err)
2486 goto error;
2487 }
2488 for (j = 0; j < mca_cfg.banks; j++) {
2489 err = device_create_file(dev, &mce_banks[j].attr);
2490 if (err)
2491 goto error2;
2492 }
2493 cpumask_set_cpu(cpu, mce_device_initialized);
2494 per_cpu(mce_device, cpu) = dev;
2495
2496 return 0;
2497 error2:
2498 while (--j >= 0)
2499 device_remove_file(dev, &mce_banks[j].attr);
2500 error:
2501 while (--i >= 0)
2502 device_remove_file(dev, mce_device_attrs[i]);
2503
2504 device_unregister(dev);
2505
2506 return err;
2507 }
2508
2509 static void mce_device_remove(unsigned int cpu)
2510 {
2511 struct device *dev = per_cpu(mce_device, cpu);
2512 int i;
2513
2514 if (!cpumask_test_cpu(cpu, mce_device_initialized))
2515 return;
2516
2517 for (i = 0; mce_device_attrs[i]; i++)
2518 device_remove_file(dev, mce_device_attrs[i]);
2519
2520 for (i = 0; i < mca_cfg.banks; i++)
2521 device_remove_file(dev, &mce_banks[i].attr);
2522
2523 device_unregister(dev);
2524 cpumask_clear_cpu(cpu, mce_device_initialized);
2525 per_cpu(mce_device, cpu) = NULL;
2526 }
2527
2528 /* Make sure there are no machine checks on offlined CPUs. */
2529 static void mce_disable_cpu(void)
2530 {
2531 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2532 return;
2533
2534 if (!cpuhp_tasks_frozen)
2535 cmci_clear();
2536
2537 vendor_disable_error_reporting();
2538 }
2539
2540 static void mce_reenable_cpu(void)
2541 {
2542 int i;
2543
2544 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2545 return;
2546
2547 if (!cpuhp_tasks_frozen)
2548 cmci_reenable();
2549 for (i = 0; i < mca_cfg.banks; i++) {
2550 struct mce_bank *b = &mce_banks[i];
2551
2552 if (b->init)
2553 wrmsrl(msr_ops.ctl(i), b->ctl);
2554 }
2555 }
2556
2557 static int mce_cpu_dead(unsigned int cpu)
2558 {
2559 mce_intel_hcpu_update(cpu);
2560
2561 /* intentionally ignoring frozen here */
2562 if (!cpuhp_tasks_frozen)
2563 cmci_rediscover();
2564 return 0;
2565 }
2566
2567 static int mce_cpu_online(unsigned int cpu)
2568 {
2569 struct timer_list *t = &per_cpu(mce_timer, cpu);
2570 int ret;
2571
2572 mce_device_create(cpu);
2573
2574 ret = mce_threshold_create_device(cpu);
2575 if (ret) {
2576 mce_device_remove(cpu);
2577 return ret;
2578 }
2579 mce_reenable_cpu();
2580 mce_start_timer(cpu, t);
2581 return 0;
2582 }
2583
2584 static int mce_cpu_pre_down(unsigned int cpu)
2585 {
2586 struct timer_list *t = &per_cpu(mce_timer, cpu);
2587
2588 mce_disable_cpu();
2589 del_timer_sync(t);
2590 mce_threshold_remove_device(cpu);
2591 mce_device_remove(cpu);
2592 return 0;
2593 }
2594
2595 static __init void mce_init_banks(void)
2596 {
2597 int i;
2598
2599 for (i = 0; i < mca_cfg.banks; i++) {
2600 struct mce_bank *b = &mce_banks[i];
2601 struct device_attribute *a = &b->attr;
2602
2603 sysfs_attr_init(&a->attr);
2604 a->attr.name = b->attrname;
2605 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2606
2607 a->attr.mode = 0644;
2608 a->show = show_bank;
2609 a->store = set_bank;
2610 }
2611 }
2612
2613 static __init int mcheck_init_device(void)
2614 {
2615 enum cpuhp_state hp_online;
2616 int err;
2617
2618 if (!mce_available(&boot_cpu_data)) {
2619 err = -EIO;
2620 goto err_out;
2621 }
2622
2623 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2624 err = -ENOMEM;
2625 goto err_out;
2626 }
2627
2628 mce_init_banks();
2629
2630 err = subsys_system_register(&mce_subsys, NULL);
2631 if (err)
2632 goto err_out_mem;
2633
2634 err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2635 mce_cpu_dead);
2636 if (err)
2637 goto err_out_mem;
2638
2639 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2640 mce_cpu_online, mce_cpu_pre_down);
2641 if (err < 0)
2642 goto err_out_online;
2643 hp_online = err;
2644
2645 register_syscore_ops(&mce_syscore_ops);
2646
2647 /* register character device /dev/mcelog */
2648 err = misc_register(&mce_chrdev_device);
2649 if (err)
2650 goto err_register;
2651
2652 return 0;
2653
2654 err_register:
2655 unregister_syscore_ops(&mce_syscore_ops);
2656 cpuhp_remove_state(hp_online);
2657
2658 err_out_online:
2659 cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2660
2661 err_out_mem:
2662 free_cpumask_var(mce_device_initialized);
2663
2664 err_out:
2665 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
2666
2667 return err;
2668 }
2669 device_initcall_sync(mcheck_init_device);
2670
2671 /*
2672 * Old style boot options parsing. Only for compatibility.
2673 */
2674 static int __init mcheck_disable(char *str)
2675 {
2676 mca_cfg.disabled = true;
2677 return 1;
2678 }
2679 __setup("nomce", mcheck_disable);
2680
2681 #ifdef CONFIG_DEBUG_FS
2682 struct dentry *mce_get_debugfs_dir(void)
2683 {
2684 static struct dentry *dmce;
2685
2686 if (!dmce)
2687 dmce = debugfs_create_dir("mce", NULL);
2688
2689 return dmce;
2690 }
2691
2692 static void mce_reset(void)
2693 {
2694 cpu_missing = 0;
2695 atomic_set(&mce_fake_panicked, 0);
2696 atomic_set(&mce_executing, 0);
2697 atomic_set(&mce_callin, 0);
2698 atomic_set(&global_nwo, 0);
2699 }
2700
2701 static int fake_panic_get(void *data, u64 *val)
2702 {
2703 *val = fake_panic;
2704 return 0;
2705 }
2706
2707 static int fake_panic_set(void *data, u64 val)
2708 {
2709 mce_reset();
2710 fake_panic = val;
2711 return 0;
2712 }
2713
2714 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2715 fake_panic_set, "%llu\n");
2716
2717 static int __init mcheck_debugfs_init(void)
2718 {
2719 struct dentry *dmce, *ffake_panic;
2720
2721 dmce = mce_get_debugfs_dir();
2722 if (!dmce)
2723 return -ENOMEM;
2724 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2725 &fake_panic_fops);
2726 if (!ffake_panic)
2727 return -ENOMEM;
2728
2729 return 0;
2730 }
2731 #else
2732 static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2733 #endif
2734
2735 DEFINE_STATIC_KEY_FALSE(mcsafe_key);
2736 EXPORT_SYMBOL_GPL(mcsafe_key);
2737
2738 static int __init mcheck_late_init(void)
2739 {
2740 if (mca_cfg.recovery)
2741 static_branch_inc(&mcsafe_key);
2742
2743 mcheck_debugfs_init();
2744
2745 /*
2746 * Flush out everything that has been logged during early boot, now that
2747 * everything has been initialized (workqueues, decoders, ...).
2748 */
2749 mce_schedule_work();
2750
2751 return 0;
2752 }
2753 late_initcall(mcheck_late_init);