]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/x86/kernel/cpu/mcheck/mce.c
x86/mce: Do not enter deferred errors into the generic pool twice
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
e9eee03e 3 *
1da177e4 4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
b79109c3
AK
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
1da177e4 9 */
c767a54b
JP
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
e9eee03e
IM
13#include <linux/thread_info.h>
14#include <linux/capability.h>
15#include <linux/miscdevice.h>
16#include <linux/ratelimit.h>
17#include <linux/kallsyms.h>
18#include <linux/rcupdate.h>
e9eee03e 19#include <linux/kobject.h>
14a02530 20#include <linux/uaccess.h>
e9eee03e
IM
21#include <linux/kdebug.h>
22#include <linux/kernel.h>
23#include <linux/percpu.h>
1da177e4 24#include <linux/string.h>
8a25a2fd 25#include <linux/device.h>
f3c6ea1b 26#include <linux/syscore_ops.h>
3c079792 27#include <linux/delay.h>
8c566ef5 28#include <linux/ctype.h>
e9eee03e 29#include <linux/sched.h>
0d7482e3 30#include <linux/sysfs.h>
e9eee03e 31#include <linux/types.h>
5a0e3ad6 32#include <linux/slab.h>
e9eee03e
IM
33#include <linux/init.h>
34#include <linux/kmod.h>
35#include <linux/poll.h>
3c079792 36#include <linux/nmi.h>
e9eee03e 37#include <linux/cpu.h>
14a02530 38#include <linux/smp.h>
e9eee03e 39#include <linux/fs.h>
9b1beaf2 40#include <linux/mm.h>
5be9ed25 41#include <linux/debugfs.h>
b77e70bf 42#include <linux/irq_work.h>
69c60c88 43#include <linux/export.h>
e9eee03e 44
d88203d1 45#include <asm/processor.h>
95927475 46#include <asm/traps.h>
375074cc 47#include <asm/tlbflush.h>
e9eee03e
IM
48#include <asm/mce.h>
49#include <asm/msr.h>
1da177e4 50
bd19a5e6 51#include "mce-internal.h"
711c2e48 52
93b62c3c 53static DEFINE_MUTEX(mce_chrdev_read_mutex);
2aa2b50d 54
9a7783d0 55#define mce_log_get_idx_check(p) \
e90328b8 56({ \
f78f5b90
PM
57 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
58 !lockdep_is_held(&mce_chrdev_read_mutex), \
3959df1d 59 "suspicious mce_log_get_idx_check() usage"); \
e90328b8
PM
60 smp_load_acquire(&(p)); \
61})
f56e8a07 62
8968f9d3
HS
63#define CREATE_TRACE_POINTS
64#include <trace/events/mce.h>
65
3f2f0680 66#define SPINUNIT 100 /* 100ns */
3c079792 67
01ca79f1
AK
68DEFINE_PER_CPU(unsigned, mce_exception_count);
69
1462594b 70struct mce_bank *mce_banks __read_mostly;
bf80bbd7 71struct mce_vendor_flags mce_flags __read_mostly;
cebe1820 72
d203f0b8 73struct mca_config mca_cfg __read_mostly = {
84c2559d 74 .bootlog = -1,
d203f0b8
BP
75 /*
76 * Tolerant levels:
77 * 0: always panic on uncorrected errors, log corrected errors
78 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
79 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
80 * 3: never panic or SIGBUS, log all errors (for testing only)
81 */
84c2559d
BP
82 .tolerant = 1,
83 .monarch_timeout = -1
d203f0b8
BP
84};
85
1020bcbc
HS
86/* User mode helper program triggered by machine check event */
87static unsigned long mce_need_notify;
88static char mce_helper[128];
89static char *mce_helper_argv[2] = { mce_helper, NULL };
1da177e4 90
93b62c3c
HS
91static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
92
3c079792
AK
93static DEFINE_PER_CPU(struct mce, mces_seen);
94static int cpu_missing;
95
0644414e
NR
96/*
97 * MCA banks polled by the period polling timer for corrected events.
98 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
99 */
ee031c31
AK
100DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
101 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
102};
103
c3d1fb56
NR
104/*
105 * MCA banks controlled through firmware first for corrected errors.
106 * This is a global list of banks for which we won't enable CMCI and we
107 * won't poll. Firmware controls these banks and is responsible for
108 * reporting corrected errors through GHES. Uncorrected/recoverable
109 * errors are still notified through a machine check.
110 */
111mce_banks_t mce_banks_ce_disabled;
112
061120ae
CG
113static struct work_struct mce_work;
114static struct irq_work mce_irq_work;
9b1beaf2 115
61b0fccd 116static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
fd4cf79f 117static int mce_usable_address(struct mce *m);
61b0fccd 118
3653ada5
BP
119/*
120 * CPU/chipset specific EDAC code can register a notifier call here to print
121 * MCE errors in a human-readable form.
122 */
648ed940 123ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
3653ada5 124
b5f2fa4e
AK
125/* Do initial initialization of a struct mce */
126void mce_setup(struct mce *m)
127{
128 memset(m, 0, sizeof(struct mce));
d620c67f 129 m->cpu = m->extcpu = smp_processor_id();
4ea1636b 130 m->tsc = rdtsc();
8ee08347
AK
131 /* We hope get_seconds stays lockless */
132 m->time = get_seconds();
133 m->cpuvendor = boot_cpu_data.x86_vendor;
134 m->cpuid = cpuid_eax(1);
8ee08347 135 m->socketid = cpu_data(m->extcpu).phys_proc_id;
8ee08347
AK
136 m->apicid = cpu_data(m->extcpu).initial_apicid;
137 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
b5f2fa4e
AK
138}
139
ea149b36
AK
140DEFINE_PER_CPU(struct mce, injectm);
141EXPORT_PER_CPU_SYMBOL_GPL(injectm);
142
1da177e4
LT
143/*
144 * Lockless MCE logging infrastructure.
145 * This avoids deadlocks on printk locks without having to break locks. Also
146 * separate MCEs from kernel messages to avoid bogus bug reports.
147 */
148
231fd906 149static struct mce_log mcelog = {
f6fb0ac0
AK
150 .signature = MCE_LOG_SIGNATURE,
151 .len = MCE_LOG_LEN,
152 .recordlen = sizeof(struct mce),
d88203d1 153};
1da177e4
LT
154
155void mce_log(struct mce *mce)
156{
157 unsigned next, entry;
e9eee03e 158
8968f9d3
HS
159 /* Emit the trace record: */
160 trace_mce_record(mce);
161
f29a7aff
CG
162 if (!mce_gen_pool_add(mce))
163 irq_work_queue(&mce_irq_work);
f0cb5452 164
1da177e4 165 mce->finished = 0;
7644143c 166 wmb();
1da177e4 167 for (;;) {
9a7783d0 168 entry = mce_log_get_idx_check(mcelog.next);
673242c1 169 for (;;) {
696e409d 170
e9eee03e
IM
171 /*
172 * When the buffer fills up discard new entries.
173 * Assume that the earlier errors are the more
174 * interesting ones:
175 */
673242c1 176 if (entry >= MCE_LOG_LEN) {
14a02530
HS
177 set_bit(MCE_OVERFLOW,
178 (unsigned long *)&mcelog.flags);
673242c1
AK
179 return;
180 }
e9eee03e 181 /* Old left over entry. Skip: */
673242c1
AK
182 if (mcelog.entry[entry].finished) {
183 entry++;
184 continue;
185 }
7644143c 186 break;
1da177e4 187 }
1da177e4
LT
188 smp_rmb();
189 next = entry + 1;
190 if (cmpxchg(&mcelog.next, entry, next) == entry)
191 break;
192 }
193 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 194 wmb();
1da177e4 195 mcelog.entry[entry].finished = 1;
7644143c 196 wmb();
1da177e4 197
a0189c70 198 mce->finished = 1;
1020bcbc 199 set_bit(0, &mce_need_notify);
1da177e4
LT
200}
201
a79da384 202void mce_inject_log(struct mce *m)
09371957 203{
a79da384
BP
204 mutex_lock(&mce_chrdev_read_mutex);
205 mce_log(m);
206 mutex_unlock(&mce_chrdev_read_mutex);
09371957 207}
a79da384 208EXPORT_SYMBOL_GPL(mce_inject_log);
09371957 209
fd4cf79f 210static struct notifier_block mce_srao_nb;
09371957 211
3653ada5
BP
212void mce_register_decode_chain(struct notifier_block *nb)
213{
fd4cf79f
CG
214 /* Ensure SRAO notifier has the highest priority in the decode chain. */
215 if (nb != &mce_srao_nb && nb->priority == INT_MAX)
216 nb->priority -= 1;
217
3653ada5
BP
218 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
219}
220EXPORT_SYMBOL_GPL(mce_register_decode_chain);
221
222void mce_unregister_decode_chain(struct notifier_block *nb)
223{
224 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
225}
226EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
227
77e26cca 228static void print_mce(struct mce *m)
1da177e4 229{
dffa4b2f
BP
230 int ret = 0;
231
a2d7b0d4 232 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
d620c67f 233 m->extcpu, m->mcgstatus, m->bank, m->status);
f436f8bb 234
65ea5b03 235 if (m->ip) {
a2d7b0d4 236 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
f436f8bb
IM
237 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
238 m->cs, m->ip);
239
1da177e4 240 if (m->cs == __KERNEL_CS)
65ea5b03 241 print_symbol("{%s}", m->ip);
f436f8bb 242 pr_cont("\n");
1da177e4 243 }
f436f8bb 244
a2d7b0d4 245 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
1da177e4 246 if (m->addr)
f436f8bb 247 pr_cont("ADDR %llx ", m->addr);
1da177e4 248 if (m->misc)
f436f8bb 249 pr_cont("MISC %llx ", m->misc);
549d042d 250
f436f8bb 251 pr_cont("\n");
506ed6b5
AK
252 /*
253 * Note this output is parsed by external tools and old fields
254 * should not be changed.
255 */
881e23e5 256 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
506ed6b5
AK
257 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
258 cpu_data(m->extcpu).microcode);
f436f8bb
IM
259
260 /*
261 * Print out human-readable details about the MCE error,
fb253195 262 * (if the CPU has an implementation for that)
f436f8bb 263 */
dffa4b2f
BP
264 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
265 if (ret == NOTIFY_STOP)
266 return;
267
268 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
86503560
AK
269}
270
f94b61c2
AK
271#define PANIC_TIMEOUT 5 /* 5 seconds */
272
c7c9b392 273static atomic_t mce_panicked;
f94b61c2 274
bf783f9f 275static int fake_panic;
c7c9b392 276static atomic_t mce_fake_panicked;
bf783f9f 277
f94b61c2
AK
278/* Panic in progress. Enable interrupts and wait for final IPI */
279static void wait_for_panic(void)
280{
281 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
f436f8bb 282
f94b61c2
AK
283 preempt_disable();
284 local_irq_enable();
285 while (timeout-- > 0)
286 udelay(1);
29b0f591 287 if (panic_timeout == 0)
7af19e4a 288 panic_timeout = mca_cfg.panic_timeout;
f94b61c2
AK
289 panic("Panicing machine check CPU died");
290}
291
6c80f87e 292static void mce_panic(const char *msg, struct mce *final, char *exp)
d88203d1 293{
482908b4 294 int i, apei_err = 0;
e02e68d3 295
bf783f9f
HY
296 if (!fake_panic) {
297 /*
298 * Make sure only one CPU runs in machine check panic
299 */
c7c9b392 300 if (atomic_inc_return(&mce_panicked) > 1)
bf783f9f
HY
301 wait_for_panic();
302 barrier();
f94b61c2 303
bf783f9f
HY
304 bust_spinlocks(1);
305 console_verbose();
306 } else {
307 /* Don't log too much for fake panic */
c7c9b392 308 if (atomic_inc_return(&mce_fake_panicked) > 1)
bf783f9f
HY
309 return;
310 }
a0189c70 311 /* First print corrected ones that are still unlogged */
1da177e4 312 for (i = 0; i < MCE_LOG_LEN; i++) {
a0189c70 313 struct mce *m = &mcelog.entry[i];
77e26cca
HS
314 if (!(m->status & MCI_STATUS_VAL))
315 continue;
482908b4 316 if (!(m->status & MCI_STATUS_UC)) {
77e26cca 317 print_mce(m);
482908b4
HY
318 if (!apei_err)
319 apei_err = apei_write_mce(m);
320 }
a0189c70
AK
321 }
322 /* Now print uncorrected but with the final one last */
323 for (i = 0; i < MCE_LOG_LEN; i++) {
324 struct mce *m = &mcelog.entry[i];
325 if (!(m->status & MCI_STATUS_VAL))
1da177e4 326 continue;
77e26cca
HS
327 if (!(m->status & MCI_STATUS_UC))
328 continue;
482908b4 329 if (!final || memcmp(m, final, sizeof(struct mce))) {
77e26cca 330 print_mce(m);
482908b4
HY
331 if (!apei_err)
332 apei_err = apei_write_mce(m);
333 }
1da177e4 334 }
482908b4 335 if (final) {
77e26cca 336 print_mce(final);
482908b4
HY
337 if (!apei_err)
338 apei_err = apei_write_mce(final);
339 }
3c079792 340 if (cpu_missing)
a2d7b0d4 341 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
bd19a5e6 342 if (exp)
a2d7b0d4 343 pr_emerg(HW_ERR "Machine check: %s\n", exp);
bf783f9f
HY
344 if (!fake_panic) {
345 if (panic_timeout == 0)
7af19e4a 346 panic_timeout = mca_cfg.panic_timeout;
bf783f9f
HY
347 panic(msg);
348 } else
a2d7b0d4 349 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
d88203d1 350}
1da177e4 351
ea149b36
AK
352/* Support code for software error injection */
353
354static int msr_to_offset(u32 msr)
355{
0a3aee0d 356 unsigned bank = __this_cpu_read(injectm.bank);
f436f8bb 357
84c2559d 358 if (msr == mca_cfg.rip_msr)
ea149b36 359 return offsetof(struct mce, ip);
a2d32bcb 360 if (msr == MSR_IA32_MCx_STATUS(bank))
ea149b36 361 return offsetof(struct mce, status);
a2d32bcb 362 if (msr == MSR_IA32_MCx_ADDR(bank))
ea149b36 363 return offsetof(struct mce, addr);
a2d32bcb 364 if (msr == MSR_IA32_MCx_MISC(bank))
ea149b36
AK
365 return offsetof(struct mce, misc);
366 if (msr == MSR_IA32_MCG_STATUS)
367 return offsetof(struct mce, mcgstatus);
368 return -1;
369}
370
5f8c1a54
AK
371/* MSR access wrappers used for error injection */
372static u64 mce_rdmsrl(u32 msr)
373{
374 u64 v;
11868a2d 375
0a3aee0d 376 if (__this_cpu_read(injectm.finished)) {
ea149b36 377 int offset = msr_to_offset(msr);
11868a2d 378
ea149b36
AK
379 if (offset < 0)
380 return 0;
89cbc767 381 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
ea149b36 382 }
11868a2d
IM
383
384 if (rdmsrl_safe(msr, &v)) {
385 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
386 /*
387 * Return zero in case the access faulted. This should
388 * not happen normally but can happen if the CPU does
389 * something weird, or if the code is buggy.
390 */
391 v = 0;
392 }
393
5f8c1a54
AK
394 return v;
395}
396
397static void mce_wrmsrl(u32 msr, u64 v)
398{
0a3aee0d 399 if (__this_cpu_read(injectm.finished)) {
ea149b36 400 int offset = msr_to_offset(msr);
11868a2d 401
ea149b36 402 if (offset >= 0)
89cbc767 403 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
ea149b36
AK
404 return;
405 }
5f8c1a54
AK
406 wrmsrl(msr, v);
407}
408
b8325c5b
HS
409/*
410 * Collect all global (w.r.t. this processor) status about this machine
411 * check into our "mce" struct so that we can use it later to assess
412 * the severity of the problem as we read per-bank specific details.
413 */
414static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
415{
416 mce_setup(m);
417
418 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
419 if (regs) {
420 /*
421 * Get the address of the instruction at the time of
422 * the machine check error.
423 */
424 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
425 m->ip = regs->ip;
426 m->cs = regs->cs;
a129a7c8
AK
427
428 /*
429 * When in VM86 mode make the cs look like ring 3
430 * always. This is a lie, but it's better than passing
431 * the additional vm86 bit around everywhere.
432 */
433 if (v8086_mode(regs))
434 m->cs |= 3;
b8325c5b
HS
435 }
436 /* Use accurate RIP reporting if available. */
84c2559d
BP
437 if (mca_cfg.rip_msr)
438 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
b8325c5b
HS
439 }
440}
441
88ccbedd 442int mce_available(struct cpuinfo_x86 *c)
1da177e4 443{
1462594b 444 if (mca_cfg.disabled)
5b4408fd 445 return 0;
3d1712c9 446 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
447}
448
9b1beaf2
AK
449static void mce_schedule_work(void)
450{
fd4cf79f 451 if (!mce_gen_pool_empty() && keventd_up())
061120ae 452 schedule_work(&mce_work);
9b1beaf2
AK
453}
454
b77e70bf 455static void mce_irq_work_cb(struct irq_work *entry)
ccc3c319 456{
9ff36ee9 457 mce_notify_irq();
9b1beaf2 458 mce_schedule_work();
ccc3c319 459}
ccc3c319
AK
460
461static void mce_report_event(struct pt_regs *regs)
462{
463 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
9ff36ee9 464 mce_notify_irq();
9b1beaf2
AK
465 /*
466 * Triggering the work queue here is just an insurance
467 * policy in case the syscall exit notify handler
468 * doesn't run soon enough or ends up running on the
469 * wrong CPU (can happen when audit sleeps)
470 */
471 mce_schedule_work();
ccc3c319
AK
472 return;
473 }
474
061120ae 475 irq_work_queue(&mce_irq_work);
ccc3c319
AK
476}
477
fd4cf79f
CG
478static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
479 void *data)
480{
481 struct mce *mce = (struct mce *)data;
482 unsigned long pfn;
483
484 if (!mce)
485 return NOTIFY_DONE;
486
487 if (mce->usable_addr && (mce->severity == MCE_AO_SEVERITY)) {
488 pfn = mce->addr >> PAGE_SHIFT;
489 memory_failure(pfn, MCE_VECTOR, 0);
490 }
491
492 return NOTIFY_OK;
ccc3c319 493}
fd4cf79f
CG
494static struct notifier_block mce_srao_nb = {
495 .notifier_call = srao_decode_notifier,
496 .priority = INT_MAX,
497};
ccc3c319 498
85f92694
TL
499/*
500 * Read ADDR and MISC registers.
501 */
502static void mce_read_aux(struct mce *m, int i)
503{
504 if (m->status & MCI_STATUS_MISCV)
505 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
506 if (m->status & MCI_STATUS_ADDRV) {
507 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
508
509 /*
510 * Mask the reported address by the reported granularity.
511 */
1462594b 512 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
85f92694
TL
513 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
514 m->addr >>= shift;
515 m->addr <<= shift;
516 }
517 }
518}
519
fa92c586
CY
520static bool memory_error(struct mce *m)
521{
522 struct cpuinfo_x86 *c = &boot_cpu_data;
523
524 if (c->x86_vendor == X86_VENDOR_AMD) {
525 /*
526 * coming soon
527 */
528 return false;
529 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
530 /*
531 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
532 *
533 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
534 * indicating a memory error. Bit 8 is used for indicating a
535 * cache hierarchy error. The combination of bit 2 and bit 3
536 * is used for indicating a `generic' cache hierarchy error
537 * But we can't just blindly check the above bits, because if
538 * bit 11 is set, then it is a bus/interconnect error - and
539 * either way the above bits just gives more detail on what
540 * bus/interconnect error happened. Note that bit 12 can be
541 * ignored, as it's the "filter" bit.
542 */
543 return (m->status & 0xef80) == BIT(7) ||
544 (m->status & 0xef00) == BIT(8) ||
545 (m->status & 0xeffc) == 0xc;
546 }
547
548 return false;
549}
550
ca84f696
AK
551DEFINE_PER_CPU(unsigned, mce_poll_count);
552
d88203d1 553/*
b79109c3
AK
554 * Poll for corrected events or events that happened before reset.
555 * Those are just logged through /dev/mcelog.
556 *
557 * This is executed in standard interrupt context.
ed7290d0
AK
558 *
559 * Note: spec recommends to panic for fatal unsignalled
560 * errors here. However this would be quite problematic --
561 * we would need to reimplement the Monarch handling and
562 * it would mess up the exclusion between exception handler
563 * and poll hander -- * so we skip this for now.
564 * These cases should not happen anyways, or only when the CPU
565 * is already totally * confused. In this case it's likely it will
566 * not fully execute the machine check handler either.
b79109c3 567 */
3f2f0680 568bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
b79109c3 569{
8b38937b 570 bool error_seen = false;
b79109c3 571 struct mce m;
fa92c586 572 int severity;
b79109c3
AK
573 int i;
574
c6ae41e7 575 this_cpu_inc(mce_poll_count);
ca84f696 576
b8325c5b 577 mce_gather_info(&m, NULL);
b79109c3 578
d203f0b8 579 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 580 if (!mce_banks[i].ctl || !test_bit(i, *b))
b79109c3
AK
581 continue;
582
583 m.misc = 0;
584 m.addr = 0;
585 m.bank = i;
586 m.tsc = 0;
587
588 barrier();
a2d32bcb 589 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
b79109c3
AK
590 if (!(m.status & MCI_STATUS_VAL))
591 continue;
592
3f2f0680 593
b79109c3 594 /*
ed7290d0
AK
595 * Uncorrected or signalled events are handled by the exception
596 * handler when it is enabled, so don't process those here.
b79109c3
AK
597 *
598 * TBD do the same check for MCI_STATUS_EN here?
599 */
ed7290d0 600 if (!(flags & MCP_UC) &&
1462594b 601 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
b79109c3
AK
602 continue;
603
8b38937b
TL
604 error_seen = true;
605
85f92694 606 mce_read_aux(&m, i);
b79109c3
AK
607
608 if (!(flags & MCP_TIMESTAMP))
609 m.tsc = 0;
fa92c586
CY
610
611 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
612
fa92c586
CY
613 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
614 if (m.status & MCI_STATUS_ADDRV) {
fd4cf79f
CG
615 m.severity = severity;
616 m.usable_addr = mce_usable_address(&m);
fa92c586
CY
617 }
618 }
619
b79109c3
AK
620 /*
621 * Don't get the IP here because it's unlikely to
622 * have anything to do with the actual error location.
623 */
8b38937b 624 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
5679af4c 625 mce_log(&m);
8b38937b
TL
626 else if (m.usable_addr) {
627 /*
628 * Although we skipped logging this, we still want
629 * to take action. Add to the pool so the registered
630 * notifiers will see it.
631 */
632 if (!mce_gen_pool_add(&m))
633 mce_schedule_work();
3f2f0680 634 }
b79109c3
AK
635
636 /*
637 * Clear state for this bank.
638 */
a2d32bcb 639 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
b79109c3
AK
640 }
641
642 /*
643 * Don't clear MCG_STATUS here because it's only defined for
644 * exceptions.
645 */
88921be3
AK
646
647 sync_core();
3f2f0680 648
8b38937b 649 return error_seen;
b79109c3 650}
ea149b36 651EXPORT_SYMBOL_GPL(machine_check_poll);
b79109c3 652
bd19a5e6
AK
653/*
654 * Do a quick check if any of the events requires a panic.
655 * This decides if we keep the events around or clear them.
656 */
61b0fccd
TL
657static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
658 struct pt_regs *regs)
bd19a5e6 659{
95022b8c 660 int i, ret = 0;
17fea54b 661 char *tmp;
bd19a5e6 662
d203f0b8 663 for (i = 0; i < mca_cfg.banks; i++) {
a2d32bcb 664 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
61b0fccd 665 if (m->status & MCI_STATUS_VAL) {
95022b8c 666 __set_bit(i, validp);
61b0fccd
TL
667 if (quirk_no_way_out)
668 quirk_no_way_out(i, m, regs);
669 }
17fea54b
BP
670
671 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
672 *msg = tmp;
95022b8c 673 ret = 1;
17fea54b 674 }
bd19a5e6 675 }
95022b8c 676 return ret;
bd19a5e6
AK
677}
678
3c079792
AK
679/*
680 * Variable to establish order between CPUs while scanning.
681 * Each CPU spins initially until executing is equal its number.
682 */
683static atomic_t mce_executing;
684
685/*
686 * Defines order of CPUs on entry. First CPU becomes Monarch.
687 */
688static atomic_t mce_callin;
689
690/*
691 * Check if a timeout waiting for other CPUs happened.
692 */
6c80f87e 693static int mce_timed_out(u64 *t, const char *msg)
3c079792
AK
694{
695 /*
696 * The others already did panic for some reason.
697 * Bail out like in a timeout.
698 * rmb() to tell the compiler that system_state
699 * might have been modified by someone else.
700 */
701 rmb();
c7c9b392 702 if (atomic_read(&mce_panicked))
3c079792 703 wait_for_panic();
84c2559d 704 if (!mca_cfg.monarch_timeout)
3c079792
AK
705 goto out;
706 if ((s64)*t < SPINUNIT) {
716079f6 707 if (mca_cfg.tolerant <= 1)
6c80f87e 708 mce_panic(msg, NULL, NULL);
3c079792
AK
709 cpu_missing = 1;
710 return 1;
711 }
712 *t -= SPINUNIT;
713out:
714 touch_nmi_watchdog();
715 return 0;
716}
717
718/*
719 * The Monarch's reign. The Monarch is the CPU who entered
720 * the machine check handler first. It waits for the others to
721 * raise the exception too and then grades them. When any
722 * error is fatal panic. Only then let the others continue.
723 *
724 * The other CPUs entering the MCE handler will be controlled by the
725 * Monarch. They are called Subjects.
726 *
727 * This way we prevent any potential data corruption in a unrecoverable case
728 * and also makes sure always all CPU's errors are examined.
729 *
680b6cfd 730 * Also this detects the case of a machine check event coming from outer
3c079792
AK
731 * space (not detected by any CPUs) In this case some external agent wants
732 * us to shut down, so panic too.
733 *
734 * The other CPUs might still decide to panic if the handler happens
735 * in a unrecoverable place, but in this case the system is in a semi-stable
736 * state and won't corrupt anything by itself. It's ok to let the others
737 * continue for a bit first.
738 *
739 * All the spin loops have timeouts; when a timeout happens a CPU
740 * typically elects itself to be Monarch.
741 */
742static void mce_reign(void)
743{
744 int cpu;
745 struct mce *m = NULL;
746 int global_worst = 0;
747 char *msg = NULL;
748 char *nmsg = NULL;
749
750 /*
751 * This CPU is the Monarch and the other CPUs have run
752 * through their handlers.
753 * Grade the severity of the errors of all the CPUs.
754 */
755 for_each_possible_cpu(cpu) {
d203f0b8
BP
756 int severity = mce_severity(&per_cpu(mces_seen, cpu),
757 mca_cfg.tolerant,
e3480271 758 &nmsg, true);
3c079792
AK
759 if (severity > global_worst) {
760 msg = nmsg;
761 global_worst = severity;
762 m = &per_cpu(mces_seen, cpu);
763 }
764 }
765
766 /*
767 * Cannot recover? Panic here then.
768 * This dumps all the mces in the log buffer and stops the
769 * other CPUs.
770 */
d203f0b8 771 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
8af7043a 772 mce_panic("Fatal machine check", m, msg);
3c079792
AK
773
774 /*
775 * For UC somewhere we let the CPU who detects it handle it.
776 * Also must let continue the others, otherwise the handling
777 * CPU could deadlock on a lock.
778 */
779
780 /*
781 * No machine check event found. Must be some external
782 * source or one CPU is hung. Panic.
783 */
d203f0b8 784 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
8af7043a 785 mce_panic("Fatal machine check from unknown source", NULL, NULL);
3c079792
AK
786
787 /*
788 * Now clear all the mces_seen so that they don't reappear on
789 * the next mce.
790 */
791 for_each_possible_cpu(cpu)
792 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
793}
794
795static atomic_t global_nwo;
796
797/*
798 * Start of Monarch synchronization. This waits until all CPUs have
799 * entered the exception handler and then determines if any of them
800 * saw a fatal event that requires panic. Then it executes them
801 * in the entry order.
802 * TBD double check parallel CPU hotunplug
803 */
7fb06fc9 804static int mce_start(int *no_way_out)
3c079792 805{
7fb06fc9 806 int order;
3c079792 807 int cpus = num_online_cpus();
84c2559d 808 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
3c079792 809
7fb06fc9
HS
810 if (!timeout)
811 return -1;
3c079792 812
7fb06fc9 813 atomic_add(*no_way_out, &global_nwo);
184e1fdf
HY
814 /*
815 * global_nwo should be updated before mce_callin
816 */
817 smp_wmb();
a95436e4 818 order = atomic_inc_return(&mce_callin);
3c079792
AK
819
820 /*
821 * Wait for everyone.
822 */
823 while (atomic_read(&mce_callin) != cpus) {
6c80f87e
AL
824 if (mce_timed_out(&timeout,
825 "Timeout: Not all CPUs entered broadcast exception handler")) {
3c079792 826 atomic_set(&global_nwo, 0);
7fb06fc9 827 return -1;
3c079792
AK
828 }
829 ndelay(SPINUNIT);
830 }
831
184e1fdf
HY
832 /*
833 * mce_callin should be read before global_nwo
834 */
835 smp_rmb();
3c079792 836
7fb06fc9
HS
837 if (order == 1) {
838 /*
839 * Monarch: Starts executing now, the others wait.
840 */
3c079792 841 atomic_set(&mce_executing, 1);
7fb06fc9
HS
842 } else {
843 /*
844 * Subject: Now start the scanning loop one by one in
845 * the original callin order.
846 * This way when there are any shared banks it will be
847 * only seen by one CPU before cleared, avoiding duplicates.
848 */
849 while (atomic_read(&mce_executing) < order) {
6c80f87e
AL
850 if (mce_timed_out(&timeout,
851 "Timeout: Subject CPUs unable to finish machine check processing")) {
7fb06fc9
HS
852 atomic_set(&global_nwo, 0);
853 return -1;
854 }
855 ndelay(SPINUNIT);
856 }
3c079792
AK
857 }
858
859 /*
7fb06fc9 860 * Cache the global no_way_out state.
3c079792 861 */
7fb06fc9
HS
862 *no_way_out = atomic_read(&global_nwo);
863
864 return order;
3c079792
AK
865}
866
867/*
868 * Synchronize between CPUs after main scanning loop.
869 * This invokes the bulk of the Monarch processing.
870 */
871static int mce_end(int order)
872{
873 int ret = -1;
84c2559d 874 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
3c079792
AK
875
876 if (!timeout)
877 goto reset;
878 if (order < 0)
879 goto reset;
880
881 /*
882 * Allow others to run.
883 */
884 atomic_inc(&mce_executing);
885
886 if (order == 1) {
887 /* CHECKME: Can this race with a parallel hotplug? */
888 int cpus = num_online_cpus();
889
890 /*
891 * Monarch: Wait for everyone to go through their scanning
892 * loops.
893 */
894 while (atomic_read(&mce_executing) <= cpus) {
6c80f87e
AL
895 if (mce_timed_out(&timeout,
896 "Timeout: Monarch CPU unable to finish machine check processing"))
3c079792
AK
897 goto reset;
898 ndelay(SPINUNIT);
899 }
900
901 mce_reign();
902 barrier();
903 ret = 0;
904 } else {
905 /*
906 * Subject: Wait for Monarch to finish.
907 */
908 while (atomic_read(&mce_executing) != 0) {
6c80f87e
AL
909 if (mce_timed_out(&timeout,
910 "Timeout: Monarch CPU did not finish machine check processing"))
3c079792
AK
911 goto reset;
912 ndelay(SPINUNIT);
913 }
914
915 /*
916 * Don't reset anything. That's done by the Monarch.
917 */
918 return 0;
919 }
920
921 /*
922 * Reset all global state.
923 */
924reset:
925 atomic_set(&global_nwo, 0);
926 atomic_set(&mce_callin, 0);
927 barrier();
928
929 /*
930 * Let others run again.
931 */
932 atomic_set(&mce_executing, 0);
933 return ret;
934}
935
9b1beaf2
AK
936/*
937 * Check if the address reported by the CPU is in a format we can parse.
938 * It would be possible to add code for most other cases, but all would
939 * be somewhat complicated (e.g. segment offset would require an instruction
0d2eb44f 940 * parser). So only support physical addresses up to page granuality for now.
9b1beaf2
AK
941 */
942static int mce_usable_address(struct mce *m)
943{
944 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
945 return 0;
2b90e77e 946 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
9b1beaf2 947 return 0;
2b90e77e 948 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
9b1beaf2
AK
949 return 0;
950 return 1;
951}
952
3c079792
AK
953static void mce_clear_state(unsigned long *toclear)
954{
955 int i;
956
d203f0b8 957 for (i = 0; i < mca_cfg.banks; i++) {
3c079792 958 if (test_bit(i, toclear))
a2d32bcb 959 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
3c079792
AK
960 }
961}
962
b79109c3
AK
963/*
964 * The actual machine check handler. This only handles real
965 * exceptions when something got corrupted coming in through int 18.
966 *
967 * This is executed in NMI context not subject to normal locking rules. This
968 * implies that most kernel services cannot be safely used. Don't even
969 * think about putting a printk in there!
3c079792
AK
970 *
971 * On Intel systems this is entered on all CPUs in parallel through
972 * MCE broadcast. However some CPUs might be broken beyond repair,
973 * so be always careful when synchronizing with others.
1da177e4 974 */
e9eee03e 975void do_machine_check(struct pt_regs *regs, long error_code)
1da177e4 976{
1462594b 977 struct mca_config *cfg = &mca_cfg;
3c079792 978 struct mce m, *final;
1da177e4 979 int i;
3c079792
AK
980 int worst = 0;
981 int severity;
982 /*
983 * Establish sequential order between the CPUs entering the machine
984 * check handler.
985 */
7fb06fc9 986 int order;
bd78432c
TH
987 /*
988 * If no_way_out gets set, there is no safe way to recover from this
d203f0b8 989 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
bd78432c
TH
990 */
991 int no_way_out = 0;
992 /*
993 * If kill_it gets set, there might be a way to recover from this
994 * error.
995 */
996 int kill_it = 0;
b79109c3 997 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
95022b8c 998 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
bd19a5e6 999 char *msg = "Unknown";
d4812e16
TL
1000 u64 recover_paddr = ~0ull;
1001 int flags = MF_ACTION_REQUIRED;
243d657e 1002 int lmce = 0;
1da177e4 1003
8c84014f 1004 ist_enter(regs);
95927475 1005
c6ae41e7 1006 this_cpu_inc(mce_exception_count);
01ca79f1 1007
1462594b 1008 if (!cfg->banks)
32561696 1009 goto out;
1da177e4 1010
b8325c5b 1011 mce_gather_info(&m, regs);
b5f2fa4e 1012
89cbc767 1013 final = this_cpu_ptr(&mces_seen);
3c079792
AK
1014 *final = m;
1015
95022b8c 1016 memset(valid_banks, 0, sizeof(valid_banks));
61b0fccd 1017 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
680b6cfd 1018
1da177e4
LT
1019 barrier();
1020
ed7290d0 1021 /*
a8c321fb
TL
1022 * When no restart IP might need to kill or panic.
1023 * Assume the worst for now, but if we find the
1024 * severity is MCE_AR_SEVERITY we have other options.
ed7290d0
AK
1025 */
1026 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1027 kill_it = 1;
1028
3c079792 1029 /*
243d657e 1030 * Check if this MCE is signaled to only this logical processor
3c079792 1031 */
243d657e
AR
1032 if (m.mcgstatus & MCG_STATUS_LMCES)
1033 lmce = 1;
1034 else {
1035 /*
1036 * Go through all the banks in exclusion of the other CPUs.
1037 * This way we don't report duplicated events on shared banks
1038 * because the first one to see it will clear it.
1039 * If this is a Local MCE, then no need to perform rendezvous.
1040 */
1041 order = mce_start(&no_way_out);
1042 }
1043
1462594b 1044 for (i = 0; i < cfg->banks; i++) {
b79109c3 1045 __clear_bit(i, toclear);
95022b8c
TL
1046 if (!test_bit(i, valid_banks))
1047 continue;
cebe1820 1048 if (!mce_banks[i].ctl)
1da177e4 1049 continue;
d88203d1
TG
1050
1051 m.misc = 0;
1da177e4
LT
1052 m.addr = 0;
1053 m.bank = i;
1da177e4 1054
a2d32bcb 1055 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1da177e4
LT
1056 if ((m.status & MCI_STATUS_VAL) == 0)
1057 continue;
1058
b79109c3 1059 /*
ed7290d0
AK
1060 * Non uncorrected or non signaled errors are handled by
1061 * machine_check_poll. Leave them alone, unless this panics.
b79109c3 1062 */
1462594b 1063 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
ed7290d0 1064 !no_way_out)
b79109c3
AK
1065 continue;
1066
1067 /*
1068 * Set taint even when machine check was not enabled.
1069 */
373d4d09 1070 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
b79109c3 1071
e3480271 1072 severity = mce_severity(&m, cfg->tolerant, NULL, true);
b79109c3 1073
ed7290d0 1074 /*
e3480271
CY
1075 * When machine check was for corrected/deferred handler don't
1076 * touch, unless we're panicing.
ed7290d0 1077 */
e3480271
CY
1078 if ((severity == MCE_KEEP_SEVERITY ||
1079 severity == MCE_UCNA_SEVERITY) && !no_way_out)
ed7290d0
AK
1080 continue;
1081 __set_bit(i, toclear);
1082 if (severity == MCE_NO_SEVERITY) {
b79109c3
AK
1083 /*
1084 * Machine check event was not enabled. Clear, but
1085 * ignore.
1086 */
1087 continue;
1da177e4
LT
1088 }
1089
85f92694 1090 mce_read_aux(&m, i);
1da177e4 1091
fd4cf79f
CG
1092 /* assuming valid severity level != 0 */
1093 m.severity = severity;
1094 m.usable_addr = mce_usable_address(&m);
9b1beaf2 1095
b79109c3 1096 mce_log(&m);
1da177e4 1097
3c079792
AK
1098 if (severity > worst) {
1099 *final = m;
1100 worst = severity;
1da177e4 1101 }
1da177e4
LT
1102 }
1103
a8c321fb
TL
1104 /* mce_clear_state will clear *final, save locally for use later */
1105 m = *final;
1106
3c079792
AK
1107 if (!no_way_out)
1108 mce_clear_state(toclear);
1109
e9eee03e 1110 /*
3c079792
AK
1111 * Do most of the synchronization with other CPUs.
1112 * When there's any problem use only local no_way_out state.
e9eee03e 1113 */
243d657e
AR
1114 if (!lmce) {
1115 if (mce_end(order) < 0)
1116 no_way_out = worst >= MCE_PANIC_SEVERITY;
1117 } else {
1118 /*
1119 * Local MCE skipped calling mce_reign()
1120 * If we found a fatal error, we need to panic here.
1121 */
1122 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1123 mce_panic("Machine check from unknown source",
1124 NULL, NULL);
1125 }
bd78432c
TH
1126
1127 /*
a8c321fb
TL
1128 * At insane "tolerant" levels we take no action. Otherwise
1129 * we only die if we have no other choice. For less serious
1130 * issues we try to recover, or limit damage to the current
1131 * process.
bd78432c 1132 */
1462594b 1133 if (cfg->tolerant < 3) {
a8c321fb
TL
1134 if (no_way_out)
1135 mce_panic("Fatal machine check on current CPU", &m, msg);
1136 if (worst == MCE_AR_SEVERITY) {
d4812e16
TL
1137 recover_paddr = m.addr;
1138 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1139 flags |= MF_MUST_KILL;
a8c321fb
TL
1140 } else if (kill_it) {
1141 force_sig(SIGBUS, current);
1142 }
1143 }
e02e68d3 1144
3c079792
AK
1145 if (worst > 0)
1146 mce_report_event(regs);
5f8c1a54 1147 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
32561696 1148out:
88921be3 1149 sync_core();
d4812e16
TL
1150
1151 if (recover_paddr == ~0ull)
1152 goto done;
1153
1154 pr_err("Uncorrected hardware memory error in user-access at %llx",
1155 recover_paddr);
1156 /*
1157 * We must call memory_failure() here even if the current process is
1158 * doomed. We still need to mark the page as poisoned and alert any
1159 * other users of the page.
1160 */
1161 ist_begin_non_atomic(regs);
1162 local_irq_enable();
1163 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1164 pr_err("Memory error not recovered");
1165 force_sig(SIGBUS, current);
1166 }
1167 local_irq_disable();
1168 ist_end_non_atomic();
1169done:
8c84014f 1170 ist_exit(regs);
1da177e4 1171}
ea149b36 1172EXPORT_SYMBOL_GPL(do_machine_check);
1da177e4 1173
cd42f4a3
TL
1174#ifndef CONFIG_MEMORY_FAILURE
1175int memory_failure(unsigned long pfn, int vector, int flags)
9b1beaf2 1176{
a8c321fb
TL
1177 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1178 BUG_ON(flags & MF_ACTION_REQUIRED);
c767a54b
JP
1179 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1180 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1181 pfn);
cd42f4a3
TL
1182
1183 return 0;
9b1beaf2 1184}
cd42f4a3 1185#endif
9b1beaf2 1186
a8c321fb
TL
1187/*
1188 * Action optional processing happens here (picking up
1189 * from the list of faulting pages that do_machine_check()
fd4cf79f 1190 * placed into the genpool).
a8c321fb 1191 */
9b1beaf2
AK
1192static void mce_process_work(struct work_struct *dummy)
1193{
fd4cf79f 1194 mce_gen_pool_process();
9b1beaf2
AK
1195}
1196
15d5f839
DZ
1197#ifdef CONFIG_X86_MCE_INTEL
1198/***
1199 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 1200 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
1201 * @status: Event status information
1202 *
1203 * This function should be called by the thermal interrupt after the
1204 * event has been processed and the decision was made to log the event
1205 * further.
1206 *
1207 * The status parameter will be saved to the 'status' field of 'struct mce'
1208 * and historically has been the register value of the
1209 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1210 */
b5f2fa4e 1211void mce_log_therm_throt_event(__u64 status)
15d5f839
DZ
1212{
1213 struct mce m;
1214
b5f2fa4e 1215 mce_setup(&m);
15d5f839
DZ
1216 m.bank = MCE_THERMAL_BANK;
1217 m.status = status;
15d5f839
DZ
1218 mce_log(&m);
1219}
1220#endif /* CONFIG_X86_MCE_INTEL */
1221
1da177e4 1222/*
8a336b0a
TH
1223 * Periodic polling timer for "silent" machine check errors. If the
1224 * poller finds an MCE, poll 2x faster. When the poller finds no more
1225 * errors, poll 2x slower (up to check_interval seconds).
1da177e4 1226 */
3f2f0680 1227static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
e9eee03e 1228
82f7af09 1229static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
52d168e2 1230static DEFINE_PER_CPU(struct timer_list, mce_timer);
1da177e4 1231
55babd8f
CG
1232static unsigned long mce_adjust_timer_default(unsigned long interval)
1233{
1234 return interval;
1235}
1236
3f2f0680 1237static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
55babd8f 1238
3f2f0680 1239static void __restart_timer(struct timer_list *t, unsigned long interval)
27f6c573 1240{
3f2f0680
BP
1241 unsigned long when = jiffies + interval;
1242 unsigned long flags;
27f6c573 1243
3f2f0680 1244 local_irq_save(flags);
27f6c573 1245
3f2f0680
BP
1246 if (timer_pending(t)) {
1247 if (time_before(when, t->expires))
1248 mod_timer_pinned(t, when);
1249 } else {
1250 t->expires = round_jiffies(when);
1251 add_timer_on(t, smp_processor_id());
1252 }
1253
1254 local_irq_restore(flags);
27f6c573
CG
1255}
1256
82f7af09 1257static void mce_timer_fn(unsigned long data)
1da177e4 1258{
89cbc767 1259 struct timer_list *t = this_cpu_ptr(&mce_timer);
3f2f0680 1260 int cpu = smp_processor_id();
82f7af09 1261 unsigned long iv;
52d168e2 1262
3f2f0680
BP
1263 WARN_ON(cpu != data);
1264
1265 iv = __this_cpu_read(mce_next_interval);
52d168e2 1266
89cbc767 1267 if (mce_available(this_cpu_ptr(&cpu_info))) {
3f2f0680
BP
1268 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1269
1270 if (mce_intel_cmci_poll()) {
1271 iv = mce_adjust_timer(iv);
1272 goto done;
1273 }
e9eee03e 1274 }
1da177e4
LT
1275
1276 /*
3f2f0680
BP
1277 * Alert userspace if needed. If we logged an MCE, reduce the polling
1278 * interval, otherwise increase the polling interval.
1da177e4 1279 */
3f2f0680 1280 if (mce_notify_irq())
958fb3c5 1281 iv = max(iv / 2, (unsigned long) HZ/100);
3f2f0680 1282 else
82f7af09 1283 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
3f2f0680
BP
1284
1285done:
82f7af09 1286 __this_cpu_write(mce_next_interval, iv);
3f2f0680 1287 __restart_timer(t, iv);
55babd8f 1288}
e02e68d3 1289
55babd8f
CG
1290/*
1291 * Ensure that the timer is firing in @interval from now.
1292 */
1293void mce_timer_kick(unsigned long interval)
1294{
89cbc767 1295 struct timer_list *t = this_cpu_ptr(&mce_timer);
55babd8f
CG
1296 unsigned long iv = __this_cpu_read(mce_next_interval);
1297
3f2f0680
BP
1298 __restart_timer(t, interval);
1299
55babd8f
CG
1300 if (interval < iv)
1301 __this_cpu_write(mce_next_interval, interval);
e02e68d3
TH
1302}
1303
9aaef96f
HS
1304/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1305static void mce_timer_delete_all(void)
1306{
1307 int cpu;
1308
1309 for_each_online_cpu(cpu)
1310 del_timer_sync(&per_cpu(mce_timer, cpu));
1311}
1312
9bd98405
AK
1313static void mce_do_trigger(struct work_struct *work)
1314{
1020bcbc 1315 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
9bd98405
AK
1316}
1317
1318static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1319
e02e68d3 1320/*
9bd98405
AK
1321 * Notify the user(s) about new machine check events.
1322 * Can be called from interrupt context, but not from machine check/NMI
1323 * context.
e02e68d3 1324 */
9ff36ee9 1325int mce_notify_irq(void)
e02e68d3 1326{
8457c84d
AK
1327 /* Not more than two messages every minute */
1328 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1329
1020bcbc 1330 if (test_and_clear_bit(0, &mce_need_notify)) {
93b62c3c
HS
1331 /* wake processes polling /dev/mcelog */
1332 wake_up_interruptible(&mce_chrdev_wait);
9bd98405 1333
4d899be5 1334 if (mce_helper[0])
9bd98405 1335 schedule_work(&mce_trigger_work);
e02e68d3 1336
8457c84d 1337 if (__ratelimit(&ratelimit))
a2d7b0d4 1338 pr_info(HW_ERR "Machine check events logged\n");
e02e68d3
TH
1339
1340 return 1;
1da177e4 1341 }
e02e68d3
TH
1342 return 0;
1343}
9ff36ee9 1344EXPORT_SYMBOL_GPL(mce_notify_irq);
8a336b0a 1345
148f9bb8 1346static int __mcheck_cpu_mce_banks_init(void)
cebe1820
AK
1347{
1348 int i;
d203f0b8 1349 u8 num_banks = mca_cfg.banks;
cebe1820 1350
d203f0b8 1351 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
cebe1820
AK
1352 if (!mce_banks)
1353 return -ENOMEM;
d203f0b8
BP
1354
1355 for (i = 0; i < num_banks; i++) {
cebe1820 1356 struct mce_bank *b = &mce_banks[i];
11868a2d 1357
cebe1820
AK
1358 b->ctl = -1ULL;
1359 b->init = 1;
1360 }
1361 return 0;
1362}
1363
d88203d1 1364/*
1da177e4
LT
1365 * Initialize Machine Checks for a CPU.
1366 */
148f9bb8 1367static int __mcheck_cpu_cap_init(void)
1da177e4 1368{
0d7482e3 1369 unsigned b;
e9eee03e 1370 u64 cap;
1da177e4
LT
1371
1372 rdmsrl(MSR_IA32_MCG_CAP, cap);
01c6680a
TG
1373
1374 b = cap & MCG_BANKCNT_MASK;
d203f0b8 1375 if (!mca_cfg.banks)
c767a54b 1376 pr_info("CPU supports %d MCE banks\n", b);
b659294b 1377
0d7482e3 1378 if (b > MAX_NR_BANKS) {
c767a54b 1379 pr_warn("Using only %u machine check banks out of %u\n",
0d7482e3
AK
1380 MAX_NR_BANKS, b);
1381 b = MAX_NR_BANKS;
1382 }
1383
1384 /* Don't support asymmetric configurations today */
d203f0b8
BP
1385 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1386 mca_cfg.banks = b;
1387
cebe1820 1388 if (!mce_banks) {
cffd377e 1389 int err = __mcheck_cpu_mce_banks_init();
11868a2d 1390
cebe1820
AK
1391 if (err)
1392 return err;
1da177e4 1393 }
0d7482e3 1394
94ad8474 1395 /* Use accurate RIP reporting if available. */
01c6680a 1396 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
84c2559d 1397 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1da177e4 1398
ed7290d0 1399 if (cap & MCG_SER_P)
1462594b 1400 mca_cfg.ser = true;
ed7290d0 1401
0d7482e3
AK
1402 return 0;
1403}
1404
5e09954a 1405static void __mcheck_cpu_init_generic(void)
0d7482e3 1406{
84c2559d 1407 enum mcp_flags m_fl = 0;
e9eee03e 1408 mce_banks_t all_banks;
0d7482e3
AK
1409 u64 cap;
1410 int i;
1411
84c2559d
BP
1412 if (!mca_cfg.bootlog)
1413 m_fl = MCP_DONTLOG;
1414
b79109c3
AK
1415 /*
1416 * Log the machine checks left over from the previous reset.
1417 */
ee031c31 1418 bitmap_fill(all_banks, MAX_NR_BANKS);
84c2559d 1419 machine_check_poll(MCP_UC | m_fl, &all_banks);
1da177e4 1420
375074cc 1421 cr4_set_bits(X86_CR4_MCE);
1da177e4 1422
0d7482e3 1423 rdmsrl(MSR_IA32_MCG_CAP, cap);
1da177e4
LT
1424 if (cap & MCG_CTL_P)
1425 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1426
d203f0b8 1427 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 1428 struct mce_bank *b = &mce_banks[i];
11868a2d 1429
cebe1820 1430 if (!b->init)
06b7a7a5 1431 continue;
a2d32bcb
AK
1432 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1433 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
d88203d1 1434 }
1da177e4
LT
1435}
1436
61b0fccd
TL
1437/*
1438 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1439 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1440 * Vol 3B Table 15-20). But this confuses both the code that determines
1441 * whether the machine check occurred in kernel or user mode, and also
1442 * the severity assessment code. Pretend that EIPV was set, and take the
1443 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1444 */
1445static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1446{
1447 if (bank != 0)
1448 return;
1449 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1450 return;
1451 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1452 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1453 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1454 MCACOD)) !=
1455 (MCI_STATUS_UC|MCI_STATUS_EN|
1456 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1457 MCI_STATUS_AR|MCACOD_INSTR))
1458 return;
1459
1460 m->mcgstatus |= MCG_STATUS_EIPV;
1461 m->ip = regs->ip;
1462 m->cs = regs->cs;
1463}
1464
1da177e4 1465/* Add per CPU specific workarounds here */
148f9bb8 1466static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
d88203d1 1467{
d203f0b8
BP
1468 struct mca_config *cfg = &mca_cfg;
1469
e412cd25 1470 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
c767a54b 1471 pr_info("unknown CPU type - not enabling MCE support\n");
e412cd25
IM
1472 return -EOPNOTSUPP;
1473 }
1474
1da177e4 1475 /* This should be disabled by the BIOS, but isn't always */
911f6a7b 1476 if (c->x86_vendor == X86_VENDOR_AMD) {
d203f0b8 1477 if (c->x86 == 15 && cfg->banks > 4) {
e9eee03e
IM
1478 /*
1479 * disable GART TBL walk error reporting, which
1480 * trips off incorrectly with the IOMMU & 3ware
1481 * & Cerberus:
1482 */
cebe1820 1483 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
e9eee03e 1484 }
84c2559d 1485 if (c->x86 <= 17 && cfg->bootlog < 0) {
e9eee03e
IM
1486 /*
1487 * Lots of broken BIOS around that don't clear them
1488 * by default and leave crap in there. Don't log:
1489 */
84c2559d 1490 cfg->bootlog = 0;
e9eee03e 1491 }
2e6f694f
AK
1492 /*
1493 * Various K7s with broken bank 0 around. Always disable
1494 * by default.
1495 */
c9ce8712 1496 if (c->x86 == 6 && cfg->banks > 0)
cebe1820 1497 mce_banks[0].ctl = 0;
575203b4 1498
bf80bbd7
AG
1499 /*
1500 * overflow_recov is supported for F15h Models 00h-0fh
1501 * even though we don't have a CPUID bit for it.
1502 */
1503 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1504 mce_flags.overflow_recov = 1;
1505
c9ce8712
BP
1506 /*
1507 * Turn off MC4_MISC thresholding banks on those models since
1508 * they're not supported there.
1509 */
1510 if (c->x86 == 0x15 &&
1511 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1512 int i;
1513 u64 hwcr;
1514 bool need_toggle;
1515 u32 msrs[] = {
575203b4
BP
1516 0x00000413, /* MC4_MISC0 */
1517 0xc0000408, /* MC4_MISC1 */
c9ce8712 1518 };
575203b4 1519
c9ce8712 1520 rdmsrl(MSR_K7_HWCR, hwcr);
575203b4 1521
c9ce8712
BP
1522 /* McStatusWrEn has to be set */
1523 need_toggle = !(hwcr & BIT(18));
575203b4 1524
c9ce8712
BP
1525 if (need_toggle)
1526 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
575203b4 1527
c9ce8712
BP
1528 /* Clear CntP bit safely */
1529 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1530 msr_clear_bit(msrs[i], 62);
575203b4 1531
c9ce8712
BP
1532 /* restore old settings */
1533 if (need_toggle)
1534 wrmsrl(MSR_K7_HWCR, hwcr);
1535 }
1da177e4 1536 }
e583538f 1537
06b7a7a5
AK
1538 if (c->x86_vendor == X86_VENDOR_INTEL) {
1539 /*
1540 * SDM documents that on family 6 bank 0 should not be written
1541 * because it aliases to another special BIOS controlled
1542 * register.
1543 * But it's not aliased anymore on model 0x1a+
1544 * Don't ignore bank 0 completely because there could be a
1545 * valid event later, merely don't write CTL0.
1546 */
1547
d203f0b8 1548 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
cebe1820 1549 mce_banks[0].init = 0;
3c079792
AK
1550
1551 /*
1552 * All newer Intel systems support MCE broadcasting. Enable
1553 * synchronization with a one second timeout.
1554 */
1555 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
84c2559d
BP
1556 cfg->monarch_timeout < 0)
1557 cfg->monarch_timeout = USEC_PER_SEC;
c7f6fa44 1558
e412cd25
IM
1559 /*
1560 * There are also broken BIOSes on some Pentium M and
1561 * earlier systems:
1562 */
84c2559d
BP
1563 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1564 cfg->bootlog = 0;
61b0fccd
TL
1565
1566 if (c->x86 == 6 && c->x86_model == 45)
1567 quirk_no_way_out = quirk_sandybridge_ifu;
06b7a7a5 1568 }
84c2559d
BP
1569 if (cfg->monarch_timeout < 0)
1570 cfg->monarch_timeout = 0;
1571 if (cfg->bootlog != 0)
7af19e4a 1572 cfg->panic_timeout = 30;
e412cd25
IM
1573
1574 return 0;
d88203d1 1575}
1da177e4 1576
148f9bb8 1577static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
4efc0670
AK
1578{
1579 if (c->x86 != 5)
3a97fc34
HS
1580 return 0;
1581
4efc0670
AK
1582 switch (c->x86_vendor) {
1583 case X86_VENDOR_INTEL:
c6978369 1584 intel_p5_mcheck_init(c);
3a97fc34 1585 return 1;
4efc0670
AK
1586 break;
1587 case X86_VENDOR_CENTAUR:
1588 winchip_mcheck_init(c);
3a97fc34 1589 return 1;
4efc0670 1590 break;
dc34bdd2
BP
1591 default:
1592 return 0;
4efc0670 1593 }
3a97fc34
HS
1594
1595 return 0;
4efc0670
AK
1596}
1597
5e09954a 1598static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
1599{
1600 switch (c->x86_vendor) {
1601 case X86_VENDOR_INTEL:
1602 mce_intel_feature_init(c);
3f2f0680 1603 mce_adjust_timer = cmci_intel_adjust_timer;
1da177e4 1604 break;
7559e13f
AG
1605
1606 case X86_VENDOR_AMD: {
1607 u32 ebx = cpuid_ebx(0x80000007);
1608
89b831ef 1609 mce_amd_feature_init(c);
7559e13f
AG
1610 mce_flags.overflow_recov = !!(ebx & BIT(0));
1611 mce_flags.succor = !!(ebx & BIT(1));
c7f54d21
AG
1612 mce_flags.smca = !!(ebx & BIT(3));
1613
89b831ef 1614 break;
7559e13f
AG
1615 }
1616
1da177e4
LT
1617 default:
1618 break;
1619 }
1620}
1621
8838eb6c
AR
1622static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1623{
1624 switch (c->x86_vendor) {
1625 case X86_VENDOR_INTEL:
1626 mce_intel_feature_clear(c);
1627 break;
1628 default:
1629 break;
1630 }
1631}
1632
26c3c283 1633static void mce_start_timer(unsigned int cpu, struct timer_list *t)
52d168e2 1634{
4f75d841 1635 unsigned long iv = check_interval * HZ;
bc09effa 1636
7af19e4a 1637 if (mca_cfg.ignore_ce || !iv)
62fdac59
HS
1638 return;
1639
4f75d841
BP
1640 per_cpu(mce_next_interval, cpu) = iv;
1641
82f7af09 1642 t->expires = round_jiffies(jiffies + iv);
4f75d841 1643 add_timer_on(t, cpu);
52d168e2
AK
1644}
1645
26c3c283
TG
1646static void __mcheck_cpu_init_timer(void)
1647{
89cbc767 1648 struct timer_list *t = this_cpu_ptr(&mce_timer);
26c3c283
TG
1649 unsigned int cpu = smp_processor_id();
1650
1651 setup_timer(t, mce_timer_fn, cpu);
1652 mce_start_timer(cpu, t);
1653}
1654
9eda8cb3
AK
1655/* Handle unconfigured int18 (should never happen) */
1656static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1657{
c767a54b 1658 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
9eda8cb3
AK
1659 smp_processor_id());
1660}
1661
1662/* Call the installed machine check handler for this CPU setup. */
1663void (*machine_check_vector)(struct pt_regs *, long error_code) =
1664 unexpected_machine_check;
1665
d88203d1 1666/*
1da177e4 1667 * Called for each booted CPU to set up machine checks.
e9eee03e 1668 * Must be called with preempt off:
1da177e4 1669 */
148f9bb8 1670void mcheck_cpu_init(struct cpuinfo_x86 *c)
1da177e4 1671{
1462594b 1672 if (mca_cfg.disabled)
4efc0670
AK
1673 return;
1674
3a97fc34
HS
1675 if (__mcheck_cpu_ancient_init(c))
1676 return;
4efc0670 1677
5b4408fd 1678 if (!mce_available(c))
1da177e4
LT
1679 return;
1680
5e09954a 1681 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1462594b 1682 mca_cfg.disabled = true;
0d7482e3
AK
1683 return;
1684 }
0d7482e3 1685
648ed940
CG
1686 if (mce_gen_pool_init()) {
1687 mca_cfg.disabled = true;
1688 pr_emerg("Couldn't allocate MCE records pool!\n");
1689 return;
1690 }
1691
5d727926
AK
1692 machine_check_vector = do_machine_check;
1693
5e09954a
BP
1694 __mcheck_cpu_init_generic();
1695 __mcheck_cpu_init_vendor(c);
1696 __mcheck_cpu_init_timer();
1da177e4
LT
1697}
1698
8838eb6c
AR
1699/*
1700 * Called for each booted CPU to clear some machine checks opt-ins
1701 */
1702void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1703{
1704 if (mca_cfg.disabled)
1705 return;
1706
1707 if (!mce_available(c))
1708 return;
1709
1710 /*
1711 * Possibly to clear general settings generic to x86
1712 * __mcheck_cpu_clear_generic(c);
1713 */
1714 __mcheck_cpu_clear_vendor(c);
1715
1da177e4
LT
1716}
1717
1718/*
93b62c3c 1719 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1da177e4
LT
1720 */
1721
93b62c3c
HS
1722static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1723static int mce_chrdev_open_count; /* #times opened */
1724static int mce_chrdev_open_exclu; /* already open exclusive? */
f528e7ba 1725
93b62c3c 1726static int mce_chrdev_open(struct inode *inode, struct file *file)
f528e7ba 1727{
93b62c3c 1728 spin_lock(&mce_chrdev_state_lock);
f528e7ba 1729
93b62c3c
HS
1730 if (mce_chrdev_open_exclu ||
1731 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1732 spin_unlock(&mce_chrdev_state_lock);
e9eee03e 1733
f528e7ba
TH
1734 return -EBUSY;
1735 }
1736
1737 if (file->f_flags & O_EXCL)
93b62c3c
HS
1738 mce_chrdev_open_exclu = 1;
1739 mce_chrdev_open_count++;
f528e7ba 1740
93b62c3c 1741 spin_unlock(&mce_chrdev_state_lock);
f528e7ba 1742
bd78432c 1743 return nonseekable_open(inode, file);
f528e7ba
TH
1744}
1745
93b62c3c 1746static int mce_chrdev_release(struct inode *inode, struct file *file)
f528e7ba 1747{
93b62c3c 1748 spin_lock(&mce_chrdev_state_lock);
f528e7ba 1749
93b62c3c
HS
1750 mce_chrdev_open_count--;
1751 mce_chrdev_open_exclu = 0;
f528e7ba 1752
93b62c3c 1753 spin_unlock(&mce_chrdev_state_lock);
f528e7ba
TH
1754
1755 return 0;
1756}
1757
d88203d1
TG
1758static void collect_tscs(void *data)
1759{
1da177e4 1760 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 1761
4ea1636b 1762 cpu_tsc[smp_processor_id()] = rdtsc();
d88203d1 1763}
1da177e4 1764
482908b4
HY
1765static int mce_apei_read_done;
1766
1767/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1768static int __mce_read_apei(char __user **ubuf, size_t usize)
1769{
1770 int rc;
1771 u64 record_id;
1772 struct mce m;
1773
1774 if (usize < sizeof(struct mce))
1775 return -EINVAL;
1776
1777 rc = apei_read_mce(&m, &record_id);
1778 /* Error or no more MCE record */
1779 if (rc <= 0) {
1780 mce_apei_read_done = 1;
fadd85f1
NH
1781 /*
1782 * When ERST is disabled, mce_chrdev_read() should return
1783 * "no record" instead of "no device."
1784 */
1785 if (rc == -ENODEV)
1786 return 0;
482908b4
HY
1787 return rc;
1788 }
1789 rc = -EFAULT;
1790 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1791 return rc;
1792 /*
1793 * In fact, we should have cleared the record after that has
1794 * been flushed to the disk or sent to network in
1795 * /sbin/mcelog, but we have no interface to support that now,
1796 * so just clear it to avoid duplication.
1797 */
1798 rc = apei_clear_mce(record_id);
1799 if (rc) {
1800 mce_apei_read_done = 1;
1801 return rc;
1802 }
1803 *ubuf += sizeof(struct mce);
1804
1805 return 0;
1806}
1807
93b62c3c
HS
1808static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1809 size_t usize, loff_t *off)
1da177e4 1810{
e9eee03e 1811 char __user *buf = ubuf;
f0de53bb 1812 unsigned long *cpu_tsc;
ef41df43 1813 unsigned prev, next;
1da177e4
LT
1814 int i, err;
1815
6bca67f9 1816 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
f0de53bb
AK
1817 if (!cpu_tsc)
1818 return -ENOMEM;
1819
93b62c3c 1820 mutex_lock(&mce_chrdev_read_mutex);
482908b4
HY
1821
1822 if (!mce_apei_read_done) {
1823 err = __mce_read_apei(&buf, usize);
1824 if (err || buf != ubuf)
1825 goto out;
1826 }
1827
9a7783d0 1828 next = mce_log_get_idx_check(mcelog.next);
1da177e4
LT
1829
1830 /* Only supports full reads right now */
482908b4
HY
1831 err = -EINVAL;
1832 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1833 goto out;
1da177e4
LT
1834
1835 err = 0;
ef41df43
HY
1836 prev = 0;
1837 do {
1838 for (i = prev; i < next; i++) {
1839 unsigned long start = jiffies;
559faa6b 1840 struct mce *m = &mcelog.entry[i];
ef41df43 1841
559faa6b 1842 while (!m->finished) {
ef41df43 1843 if (time_after_eq(jiffies, start + 2)) {
559faa6b 1844 memset(m, 0, sizeof(*m));
ef41df43
HY
1845 goto timeout;
1846 }
1847 cpu_relax();
673242c1 1848 }
ef41df43 1849 smp_rmb();
559faa6b
HS
1850 err |= copy_to_user(buf, m, sizeof(*m));
1851 buf += sizeof(*m);
ef41df43
HY
1852timeout:
1853 ;
673242c1 1854 }
1da177e4 1855
ef41df43
HY
1856 memset(mcelog.entry + prev, 0,
1857 (next - prev) * sizeof(struct mce));
1858 prev = next;
1859 next = cmpxchg(&mcelog.next, prev, 0);
1860 } while (next != prev);
1da177e4 1861
b2b18660 1862 synchronize_sched();
1da177e4 1863
d88203d1
TG
1864 /*
1865 * Collect entries that were still getting written before the
1866 * synchronize.
1867 */
15c8b6c1 1868 on_each_cpu(collect_tscs, cpu_tsc, 1);
e9eee03e 1869
d88203d1 1870 for (i = next; i < MCE_LOG_LEN; i++) {
559faa6b
HS
1871 struct mce *m = &mcelog.entry[i];
1872
1873 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1874 err |= copy_to_user(buf, m, sizeof(*m));
1da177e4 1875 smp_rmb();
559faa6b
HS
1876 buf += sizeof(*m);
1877 memset(m, 0, sizeof(*m));
1da177e4 1878 }
d88203d1 1879 }
482908b4
HY
1880
1881 if (err)
1882 err = -EFAULT;
1883
1884out:
93b62c3c 1885 mutex_unlock(&mce_chrdev_read_mutex);
f0de53bb 1886 kfree(cpu_tsc);
e9eee03e 1887
482908b4 1888 return err ? err : buf - ubuf;
1da177e4
LT
1889}
1890
93b62c3c 1891static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
e02e68d3 1892{
93b62c3c 1893 poll_wait(file, &mce_chrdev_wait, wait);
e90328b8 1894 if (READ_ONCE(mcelog.next))
e02e68d3 1895 return POLLIN | POLLRDNORM;
482908b4
HY
1896 if (!mce_apei_read_done && apei_check_mce())
1897 return POLLIN | POLLRDNORM;
e02e68d3
TH
1898 return 0;
1899}
1900
93b62c3c
HS
1901static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1902 unsigned long arg)
1da177e4
LT
1903{
1904 int __user *p = (int __user *)arg;
d88203d1 1905
1da177e4 1906 if (!capable(CAP_SYS_ADMIN))
d88203d1 1907 return -EPERM;
e9eee03e 1908
1da177e4 1909 switch (cmd) {
d88203d1 1910 case MCE_GET_RECORD_LEN:
1da177e4
LT
1911 return put_user(sizeof(struct mce), p);
1912 case MCE_GET_LOG_LEN:
d88203d1 1913 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
1914 case MCE_GETCLEAR_FLAGS: {
1915 unsigned flags;
d88203d1
TG
1916
1917 do {
1da177e4 1918 flags = mcelog.flags;
d88203d1 1919 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
e9eee03e 1920
d88203d1 1921 return put_user(flags, p);
1da177e4
LT
1922 }
1923 default:
d88203d1
TG
1924 return -ENOTTY;
1925 }
1da177e4
LT
1926}
1927
66f5ddf3
TL
1928static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1929 size_t usize, loff_t *off);
1930
1931void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1932 const char __user *ubuf,
1933 size_t usize, loff_t *off))
1934{
1935 mce_write = fn;
1936}
1937EXPORT_SYMBOL_GPL(register_mce_write_callback);
1938
29c6820f
PM
1939static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1940 size_t usize, loff_t *off)
66f5ddf3
TL
1941{
1942 if (mce_write)
1943 return mce_write(filp, ubuf, usize, off);
1944 else
1945 return -EINVAL;
1946}
1947
1948static const struct file_operations mce_chrdev_ops = {
93b62c3c
HS
1949 .open = mce_chrdev_open,
1950 .release = mce_chrdev_release,
1951 .read = mce_chrdev_read,
66f5ddf3 1952 .write = mce_chrdev_write,
93b62c3c
HS
1953 .poll = mce_chrdev_poll,
1954 .unlocked_ioctl = mce_chrdev_ioctl,
1955 .llseek = no_llseek,
1da177e4
LT
1956};
1957
93b62c3c 1958static struct miscdevice mce_chrdev_device = {
1da177e4
LT
1959 MISC_MCELOG_MINOR,
1960 "mcelog",
1961 &mce_chrdev_ops,
1962};
1963
c3d1fb56
NR
1964static void __mce_disable_bank(void *arg)
1965{
1966 int bank = *((int *)arg);
89cbc767 1967 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
c3d1fb56
NR
1968 cmci_disable_bank(bank);
1969}
1970
1971void mce_disable_bank(int bank)
1972{
1973 if (bank >= mca_cfg.banks) {
1974 pr_warn(FW_BUG
1975 "Ignoring request to disable invalid MCA bank %d.\n",
1976 bank);
1977 return;
1978 }
1979 set_bit(bank, mce_banks_ce_disabled);
1980 on_each_cpu(__mce_disable_bank, &bank, 1);
1981}
1982
13503fa9 1983/*
62fdac59
HS
1984 * mce=off Disables machine check
1985 * mce=no_cmci Disables CMCI
88d53867 1986 * mce=no_lmce Disables LMCE
62fdac59
HS
1987 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1988 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
3c079792
AK
1989 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1990 * monarchtimeout is how long to wait for other CPUs on machine
1991 * check, or 0 to not wait
13503fa9
HS
1992 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1993 * mce=nobootlog Don't log MCEs from before booting.
450cc201 1994 * mce=bios_cmci_threshold Don't program the CMCI threshold
13503fa9 1995 */
1da177e4
LT
1996static int __init mcheck_enable(char *str)
1997{
d203f0b8
BP
1998 struct mca_config *cfg = &mca_cfg;
1999
e3346fc4 2000 if (*str == 0) {
4efc0670 2001 enable_p5_mce();
e3346fc4
BZ
2002 return 1;
2003 }
4efc0670
AK
2004 if (*str == '=')
2005 str++;
1da177e4 2006 if (!strcmp(str, "off"))
1462594b 2007 cfg->disabled = true;
62fdac59 2008 else if (!strcmp(str, "no_cmci"))
7af19e4a 2009 cfg->cmci_disabled = true;
88d53867
AR
2010 else if (!strcmp(str, "no_lmce"))
2011 cfg->lmce_disabled = true;
62fdac59 2012 else if (!strcmp(str, "dont_log_ce"))
d203f0b8 2013 cfg->dont_log_ce = true;
62fdac59 2014 else if (!strcmp(str, "ignore_ce"))
7af19e4a 2015 cfg->ignore_ce = true;
13503fa9 2016 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
84c2559d 2017 cfg->bootlog = (str[0] == 'b');
450cc201 2018 else if (!strcmp(str, "bios_cmci_threshold"))
1462594b 2019 cfg->bios_cmci_threshold = true;
3c079792 2020 else if (isdigit(str[0])) {
5c31b280 2021 if (get_option(&str, &cfg->tolerant) == 2)
84c2559d 2022 get_option(&str, &(cfg->monarch_timeout));
3c079792 2023 } else {
c767a54b 2024 pr_info("mce argument %s ignored. Please use /sys\n", str);
13503fa9
HS
2025 return 0;
2026 }
9b41046c 2027 return 1;
1da177e4 2028}
4efc0670 2029__setup("mce", mcheck_enable);
1da177e4 2030
a2202aa2 2031int __init mcheck_init(void)
b33a6363 2032{
a2202aa2 2033 mcheck_intel_therm_init();
eef4dfa0 2034 mce_register_decode_chain(&mce_srao_nb);
43eaa2a1 2035 mcheck_vendor_init_severity();
a2202aa2 2036
061120ae
CG
2037 INIT_WORK(&mce_work, mce_process_work);
2038 init_irq_work(&mce_irq_work, mce_irq_work_cb);
2039
b33a6363
BP
2040 return 0;
2041}
b33a6363 2042
d88203d1 2043/*
c7cece89 2044 * mce_syscore: PM support
d88203d1 2045 */
1da177e4 2046
973a2dd1
AK
2047/*
2048 * Disable machine checks on suspend and shutdown. We can't really handle
2049 * them later.
2050 */
6e06780a 2051static void mce_disable_error_reporting(void)
973a2dd1
AK
2052{
2053 int i;
2054
d203f0b8 2055 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2056 struct mce_bank *b = &mce_banks[i];
11868a2d 2057
cebe1820 2058 if (b->init)
a2d32bcb 2059 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
06b7a7a5 2060 }
6e06780a
AR
2061 return;
2062}
2063
2064static void vendor_disable_error_reporting(void)
2065{
2066 /*
2067 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
2068 * Disabling them for just a single offlined CPU is bad, since it will
2069 * inhibit reporting for all shared resources on the socket like the
2070 * last level cache (LLC), the integrated memory controller (iMC), etc.
2071 */
2072 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2073 return;
2074
2075 mce_disable_error_reporting();
973a2dd1
AK
2076}
2077
c7cece89 2078static int mce_syscore_suspend(void)
973a2dd1 2079{
6e06780a
AR
2080 vendor_disable_error_reporting();
2081 return 0;
973a2dd1
AK
2082}
2083
c7cece89 2084static void mce_syscore_shutdown(void)
973a2dd1 2085{
6e06780a 2086 vendor_disable_error_reporting();
973a2dd1
AK
2087}
2088
e9eee03e
IM
2089/*
2090 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2091 * Only one CPU is active at this time, the others get re-added later using
2092 * CPU hotplug:
2093 */
c7cece89 2094static void mce_syscore_resume(void)
1da177e4 2095{
5e09954a 2096 __mcheck_cpu_init_generic();
89cbc767 2097 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
1da177e4
LT
2098}
2099
f3c6ea1b 2100static struct syscore_ops mce_syscore_ops = {
c7cece89
HS
2101 .suspend = mce_syscore_suspend,
2102 .shutdown = mce_syscore_shutdown,
2103 .resume = mce_syscore_resume,
f3c6ea1b
RW
2104};
2105
c7cece89 2106/*
8a25a2fd 2107 * mce_device: Sysfs support
c7cece89
HS
2108 */
2109
52d168e2
AK
2110static void mce_cpu_restart(void *data)
2111{
89cbc767 2112 if (!mce_available(raw_cpu_ptr(&cpu_info)))
33edbf02 2113 return;
5e09954a
BP
2114 __mcheck_cpu_init_generic();
2115 __mcheck_cpu_init_timer();
52d168e2
AK
2116}
2117
1da177e4 2118/* Reinit MCEs after user configuration changes */
d88203d1
TG
2119static void mce_restart(void)
2120{
9aaef96f 2121 mce_timer_delete_all();
52d168e2 2122 on_each_cpu(mce_cpu_restart, NULL, 1);
1da177e4
LT
2123}
2124
9af43b54 2125/* Toggle features for corrected errors */
9aaef96f 2126static void mce_disable_cmci(void *data)
9af43b54 2127{
89cbc767 2128 if (!mce_available(raw_cpu_ptr(&cpu_info)))
9af43b54 2129 return;
9af43b54
HS
2130 cmci_clear();
2131}
2132
2133static void mce_enable_ce(void *all)
2134{
89cbc767 2135 if (!mce_available(raw_cpu_ptr(&cpu_info)))
9af43b54
HS
2136 return;
2137 cmci_reenable();
2138 cmci_recheck();
2139 if (all)
5e09954a 2140 __mcheck_cpu_init_timer();
9af43b54
HS
2141}
2142
8a25a2fd 2143static struct bus_type mce_subsys = {
e9eee03e 2144 .name = "machinecheck",
8a25a2fd 2145 .dev_name = "machinecheck",
1da177e4
LT
2146};
2147
d6126ef5 2148DEFINE_PER_CPU(struct device *, mce_device);
e9eee03e 2149
e9eee03e 2150void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1da177e4 2151
8a25a2fd 2152static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
cebe1820
AK
2153{
2154 return container_of(attr, struct mce_bank, attr);
2155}
0d7482e3 2156
8a25a2fd 2157static ssize_t show_bank(struct device *s, struct device_attribute *attr,
0d7482e3
AK
2158 char *buf)
2159{
cebe1820 2160 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
0d7482e3
AK
2161}
2162
8a25a2fd 2163static ssize_t set_bank(struct device *s, struct device_attribute *attr,
9319cec8 2164 const char *buf, size_t size)
0d7482e3 2165{
9319cec8 2166 u64 new;
e9eee03e 2167
164109e3 2168 if (kstrtou64(buf, 0, &new) < 0)
0d7482e3 2169 return -EINVAL;
e9eee03e 2170
cebe1820 2171 attr_to_bank(attr)->ctl = new;
0d7482e3 2172 mce_restart();
e9eee03e 2173
9319cec8 2174 return size;
0d7482e3 2175}
a98f0dd3 2176
e9eee03e 2177static ssize_t
8a25a2fd 2178show_trigger(struct device *s, struct device_attribute *attr, char *buf)
a98f0dd3 2179{
1020bcbc 2180 strcpy(buf, mce_helper);
a98f0dd3 2181 strcat(buf, "\n");
1020bcbc 2182 return strlen(mce_helper) + 1;
a98f0dd3
AK
2183}
2184
8a25a2fd 2185static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
e9eee03e 2186 const char *buf, size_t siz)
a98f0dd3
AK
2187{
2188 char *p;
e9eee03e 2189
1020bcbc
HS
2190 strncpy(mce_helper, buf, sizeof(mce_helper));
2191 mce_helper[sizeof(mce_helper)-1] = 0;
1020bcbc 2192 p = strchr(mce_helper, '\n');
e9eee03e 2193
e9084ec9 2194 if (p)
e9eee03e
IM
2195 *p = 0;
2196
e9084ec9 2197 return strlen(mce_helper) + !!p;
a98f0dd3
AK
2198}
2199
8a25a2fd
KS
2200static ssize_t set_ignore_ce(struct device *s,
2201 struct device_attribute *attr,
9af43b54
HS
2202 const char *buf, size_t size)
2203{
2204 u64 new;
2205
164109e3 2206 if (kstrtou64(buf, 0, &new) < 0)
9af43b54
HS
2207 return -EINVAL;
2208
7af19e4a 2209 if (mca_cfg.ignore_ce ^ !!new) {
9af43b54
HS
2210 if (new) {
2211 /* disable ce features */
9aaef96f
HS
2212 mce_timer_delete_all();
2213 on_each_cpu(mce_disable_cmci, NULL, 1);
7af19e4a 2214 mca_cfg.ignore_ce = true;
9af43b54
HS
2215 } else {
2216 /* enable ce features */
7af19e4a 2217 mca_cfg.ignore_ce = false;
9af43b54
HS
2218 on_each_cpu(mce_enable_ce, (void *)1, 1);
2219 }
2220 }
2221 return size;
2222}
2223
8a25a2fd
KS
2224static ssize_t set_cmci_disabled(struct device *s,
2225 struct device_attribute *attr,
9af43b54
HS
2226 const char *buf, size_t size)
2227{
2228 u64 new;
2229
164109e3 2230 if (kstrtou64(buf, 0, &new) < 0)
9af43b54
HS
2231 return -EINVAL;
2232
7af19e4a 2233 if (mca_cfg.cmci_disabled ^ !!new) {
9af43b54
HS
2234 if (new) {
2235 /* disable cmci */
9aaef96f 2236 on_each_cpu(mce_disable_cmci, NULL, 1);
7af19e4a 2237 mca_cfg.cmci_disabled = true;
9af43b54
HS
2238 } else {
2239 /* enable cmci */
7af19e4a 2240 mca_cfg.cmci_disabled = false;
9af43b54
HS
2241 on_each_cpu(mce_enable_ce, NULL, 1);
2242 }
2243 }
2244 return size;
2245}
2246
8a25a2fd
KS
2247static ssize_t store_int_with_restart(struct device *s,
2248 struct device_attribute *attr,
b56f642d
AK
2249 const char *buf, size_t size)
2250{
8a25a2fd 2251 ssize_t ret = device_store_int(s, attr, buf, size);
b56f642d
AK
2252 mce_restart();
2253 return ret;
2254}
2255
8a25a2fd 2256static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
d203f0b8 2257static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
84c2559d 2258static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
d203f0b8 2259static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
e9eee03e 2260
8a25a2fd
KS
2261static struct dev_ext_attribute dev_attr_check_interval = {
2262 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
b56f642d
AK
2263 &check_interval
2264};
e9eee03e 2265
8a25a2fd 2266static struct dev_ext_attribute dev_attr_ignore_ce = {
7af19e4a
BP
2267 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2268 &mca_cfg.ignore_ce
9af43b54
HS
2269};
2270
8a25a2fd 2271static struct dev_ext_attribute dev_attr_cmci_disabled = {
7af19e4a
BP
2272 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2273 &mca_cfg.cmci_disabled
9af43b54
HS
2274};
2275
8a25a2fd
KS
2276static struct device_attribute *mce_device_attrs[] = {
2277 &dev_attr_tolerant.attr,
2278 &dev_attr_check_interval.attr,
2279 &dev_attr_trigger,
2280 &dev_attr_monarch_timeout.attr,
2281 &dev_attr_dont_log_ce.attr,
2282 &dev_attr_ignore_ce.attr,
2283 &dev_attr_cmci_disabled.attr,
a98f0dd3
AK
2284 NULL
2285};
1da177e4 2286
8a25a2fd 2287static cpumask_var_t mce_device_initialized;
bae19fe0 2288
e032d807
GKH
2289static void mce_device_release(struct device *dev)
2290{
2291 kfree(dev);
2292}
2293
8a25a2fd 2294/* Per cpu device init. All of the cpus still share the same ctrl bank: */
148f9bb8 2295static int mce_device_create(unsigned int cpu)
1da177e4 2296{
e032d807 2297 struct device *dev;
1da177e4 2298 int err;
b1f49f95 2299 int i, j;
92cb7612 2300
90367556 2301 if (!mce_available(&boot_cpu_data))
91c6d400
AK
2302 return -EIO;
2303
e032d807
GKH
2304 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2305 if (!dev)
2306 return -ENOMEM;
8a25a2fd
KS
2307 dev->id = cpu;
2308 dev->bus = &mce_subsys;
e032d807 2309 dev->release = &mce_device_release;
91c6d400 2310
8a25a2fd 2311 err = device_register(dev);
853d9b18
LK
2312 if (err) {
2313 put_device(dev);
d435d862 2314 return err;
853d9b18 2315 }
d435d862 2316
8a25a2fd
KS
2317 for (i = 0; mce_device_attrs[i]; i++) {
2318 err = device_create_file(dev, mce_device_attrs[i]);
d435d862
AM
2319 if (err)
2320 goto error;
2321 }
d203f0b8 2322 for (j = 0; j < mca_cfg.banks; j++) {
8a25a2fd 2323 err = device_create_file(dev, &mce_banks[j].attr);
0d7482e3
AK
2324 if (err)
2325 goto error2;
2326 }
8a25a2fd 2327 cpumask_set_cpu(cpu, mce_device_initialized);
d6126ef5 2328 per_cpu(mce_device, cpu) = dev;
91c6d400 2329
d435d862 2330 return 0;
0d7482e3 2331error2:
b1f49f95 2332 while (--j >= 0)
8a25a2fd 2333 device_remove_file(dev, &mce_banks[j].attr);
d435d862 2334error:
cb491fca 2335 while (--i >= 0)
8a25a2fd 2336 device_remove_file(dev, mce_device_attrs[i]);
cb491fca 2337
8a25a2fd 2338 device_unregister(dev);
d435d862 2339
91c6d400
AK
2340 return err;
2341}
2342
148f9bb8 2343static void mce_device_remove(unsigned int cpu)
91c6d400 2344{
d6126ef5 2345 struct device *dev = per_cpu(mce_device, cpu);
73ca5358
SL
2346 int i;
2347
8a25a2fd 2348 if (!cpumask_test_cpu(cpu, mce_device_initialized))
bae19fe0
AH
2349 return;
2350
8a25a2fd
KS
2351 for (i = 0; mce_device_attrs[i]; i++)
2352 device_remove_file(dev, mce_device_attrs[i]);
cb491fca 2353
d203f0b8 2354 for (i = 0; i < mca_cfg.banks; i++)
8a25a2fd 2355 device_remove_file(dev, &mce_banks[i].attr);
cb491fca 2356
8a25a2fd
KS
2357 device_unregister(dev);
2358 cpumask_clear_cpu(cpu, mce_device_initialized);
d6126ef5 2359 per_cpu(mce_device, cpu) = NULL;
91c6d400 2360}
91c6d400 2361
d6b75584 2362/* Make sure there are no machine checks on offlined CPUs. */
148f9bb8 2363static void mce_disable_cpu(void *h)
d6b75584 2364{
88ccbedd 2365 unsigned long action = *(unsigned long *)h;
d6b75584 2366
89cbc767 2367 if (!mce_available(raw_cpu_ptr(&cpu_info)))
d6b75584 2368 return;
767df1bd 2369
88ccbedd
AK
2370 if (!(action & CPU_TASKS_FROZEN))
2371 cmci_clear();
11868a2d 2372
6e06780a 2373 vendor_disable_error_reporting();
d6b75584
AK
2374}
2375
148f9bb8 2376static void mce_reenable_cpu(void *h)
d6b75584 2377{
88ccbedd 2378 unsigned long action = *(unsigned long *)h;
e9eee03e 2379 int i;
d6b75584 2380
89cbc767 2381 if (!mce_available(raw_cpu_ptr(&cpu_info)))
d6b75584 2382 return;
e9eee03e 2383
88ccbedd
AK
2384 if (!(action & CPU_TASKS_FROZEN))
2385 cmci_reenable();
d203f0b8 2386 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2387 struct mce_bank *b = &mce_banks[i];
11868a2d 2388
cebe1820 2389 if (b->init)
a2d32bcb 2390 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
06b7a7a5 2391 }
d6b75584
AK
2392}
2393
91c6d400 2394/* Get notified when a cpu comes on/off. Be hotplug friendly. */
148f9bb8 2395static int
e9eee03e 2396mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
91c6d400
AK
2397{
2398 unsigned int cpu = (unsigned long)hcpu;
52d168e2 2399 struct timer_list *t = &per_cpu(mce_timer, cpu);
91c6d400 2400
1a65f970 2401 switch (action & ~CPU_TASKS_FROZEN) {
bae19fe0 2402 case CPU_ONLINE:
8a25a2fd 2403 mce_device_create(cpu);
8735728e
RW
2404 if (threshold_cpu_callback)
2405 threshold_cpu_callback(action, cpu);
91c6d400 2406 break;
91c6d400 2407 case CPU_DEAD:
8735728e
RW
2408 if (threshold_cpu_callback)
2409 threshold_cpu_callback(action, cpu);
8a25a2fd 2410 mce_device_remove(cpu);
55babd8f 2411 mce_intel_hcpu_update(cpu);
38356c1f
BP
2412
2413 /* intentionally ignoring frozen here */
2414 if (!(action & CPU_TASKS_FROZEN))
2415 cmci_rediscover();
91c6d400 2416 break;
52d168e2 2417 case CPU_DOWN_PREPARE:
88ccbedd 2418 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
55babd8f 2419 del_timer_sync(t);
52d168e2
AK
2420 break;
2421 case CPU_DOWN_FAILED:
88ccbedd 2422 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
26c3c283 2423 mce_start_timer(cpu, t);
88ccbedd 2424 break;
1a65f970
TG
2425 }
2426
bae19fe0 2427 return NOTIFY_OK;
91c6d400
AK
2428}
2429
148f9bb8 2430static struct notifier_block mce_cpu_notifier = {
91c6d400
AK
2431 .notifier_call = mce_cpu_callback,
2432};
2433
cebe1820 2434static __init void mce_init_banks(void)
0d7482e3
AK
2435{
2436 int i;
2437
d203f0b8 2438 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2439 struct mce_bank *b = &mce_banks[i];
8a25a2fd 2440 struct device_attribute *a = &b->attr;
e9eee03e 2441
a07e4156 2442 sysfs_attr_init(&a->attr);
cebe1820
AK
2443 a->attr.name = b->attrname;
2444 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
e9eee03e
IM
2445
2446 a->attr.mode = 0644;
2447 a->show = show_bank;
2448 a->store = set_bank;
0d7482e3 2449 }
0d7482e3
AK
2450}
2451
5e09954a 2452static __init int mcheck_init_device(void)
91c6d400
AK
2453{
2454 int err;
2455 int i = 0;
2456
9c15a24b
MS
2457 if (!mce_available(&boot_cpu_data)) {
2458 err = -EIO;
2459 goto err_out;
2460 }
0d7482e3 2461
9c15a24b
MS
2462 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2463 err = -ENOMEM;
2464 goto err_out;
2465 }
996867d0 2466
cebe1820 2467 mce_init_banks();
0d7482e3 2468
8a25a2fd 2469 err = subsys_system_register(&mce_subsys, NULL);
d435d862 2470 if (err)
9c15a24b 2471 goto err_out_mem;
91c6d400 2472
82a8f131 2473 cpu_notifier_register_begin();
91c6d400 2474 for_each_online_cpu(i) {
8a25a2fd 2475 err = mce_device_create(i);
82a8f131 2476 if (err) {
27c93415
BP
2477 /*
2478 * Register notifier anyway (and do not unreg it) so
2479 * that we don't leave undeleted timers, see notifier
2480 * callback above.
2481 */
2482 __register_hotcpu_notifier(&mce_cpu_notifier);
82a8f131 2483 cpu_notifier_register_done();
9c15a24b 2484 goto err_device_create;
82a8f131 2485 }
91c6d400
AK
2486 }
2487
82a8f131
SB
2488 __register_hotcpu_notifier(&mce_cpu_notifier);
2489 cpu_notifier_register_done();
93b62c3c 2490
9c15a24b
MS
2491 register_syscore_ops(&mce_syscore_ops);
2492
93b62c3c 2493 /* register character device /dev/mcelog */
9c15a24b
MS
2494 err = misc_register(&mce_chrdev_device);
2495 if (err)
2496 goto err_register;
2497
2498 return 0;
2499
2500err_register:
2501 unregister_syscore_ops(&mce_syscore_ops);
2502
9c15a24b
MS
2503err_device_create:
2504 /*
2505 * We didn't keep track of which devices were created above, but
2506 * even if we had, the set of online cpus might have changed.
2507 * Play safe and remove for every possible cpu, since
2508 * mce_device_remove() will do the right thing.
2509 */
2510 for_each_possible_cpu(i)
2511 mce_device_remove(i);
2512
2513err_out_mem:
2514 free_cpumask_var(mce_device_initialized);
2515
2516err_out:
2517 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
e9eee03e 2518
1da177e4 2519 return err;
1da177e4 2520}
cef12ee5 2521device_initcall_sync(mcheck_init_device);
a988d334 2522
d7c3c9a6
AK
2523/*
2524 * Old style boot options parsing. Only for compatibility.
2525 */
2526static int __init mcheck_disable(char *str)
2527{
1462594b 2528 mca_cfg.disabled = true;
d7c3c9a6
AK
2529 return 1;
2530}
2531__setup("nomce", mcheck_disable);
a988d334 2532
5be9ed25
HY
2533#ifdef CONFIG_DEBUG_FS
2534struct dentry *mce_get_debugfs_dir(void)
a988d334 2535{
5be9ed25 2536 static struct dentry *dmce;
a988d334 2537
5be9ed25
HY
2538 if (!dmce)
2539 dmce = debugfs_create_dir("mce", NULL);
a988d334 2540
5be9ed25
HY
2541 return dmce;
2542}
a988d334 2543
bf783f9f
HY
2544static void mce_reset(void)
2545{
2546 cpu_missing = 0;
c7c9b392 2547 atomic_set(&mce_fake_panicked, 0);
bf783f9f
HY
2548 atomic_set(&mce_executing, 0);
2549 atomic_set(&mce_callin, 0);
2550 atomic_set(&global_nwo, 0);
2551}
a988d334 2552
bf783f9f
HY
2553static int fake_panic_get(void *data, u64 *val)
2554{
2555 *val = fake_panic;
2556 return 0;
a988d334
IM
2557}
2558
bf783f9f 2559static int fake_panic_set(void *data, u64 val)
a988d334 2560{
bf783f9f
HY
2561 mce_reset();
2562 fake_panic = val;
2563 return 0;
a988d334 2564}
a988d334 2565
bf783f9f
HY
2566DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2567 fake_panic_set, "%llu\n");
d7c3c9a6 2568
5e09954a 2569static int __init mcheck_debugfs_init(void)
d7c3c9a6 2570{
bf783f9f
HY
2571 struct dentry *dmce, *ffake_panic;
2572
2573 dmce = mce_get_debugfs_dir();
2574 if (!dmce)
2575 return -ENOMEM;
2576 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2577 &fake_panic_fops);
2578 if (!ffake_panic)
2579 return -ENOMEM;
2580
2581 return 0;
d7c3c9a6 2582}
fd4cf79f
CG
2583#else
2584static int __init mcheck_debugfs_init(void) { return -EINVAL; }
5be9ed25 2585#endif
fd4cf79f
CG
2586
2587static int __init mcheck_late_init(void)
2588{
2589 mcheck_debugfs_init();
2590
2591 /*
2592 * Flush out everything that has been logged during early boot, now that
2593 * everything has been initialized (workqueues, decoders, ...).
2594 */
2595 mce_schedule_work();
2596
2597 return 0;
2598}
2599late_initcall(mcheck_late_init);