]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/x86/kernel/cpu/mcheck/mce.c
Merge tag 'armsoc-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
e9eee03e 3 *
1da177e4 4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
b79109c3
AK
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
1da177e4 9 */
c767a54b
JP
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
e9eee03e
IM
13#include <linux/thread_info.h>
14#include <linux/capability.h>
15#include <linux/miscdevice.h>
16#include <linux/ratelimit.h>
17#include <linux/kallsyms.h>
18#include <linux/rcupdate.h>
e9eee03e 19#include <linux/kobject.h>
14a02530 20#include <linux/uaccess.h>
e9eee03e
IM
21#include <linux/kdebug.h>
22#include <linux/kernel.h>
23#include <linux/percpu.h>
1da177e4 24#include <linux/string.h>
8a25a2fd 25#include <linux/device.h>
f3c6ea1b 26#include <linux/syscore_ops.h>
3c079792 27#include <linux/delay.h>
8c566ef5 28#include <linux/ctype.h>
e9eee03e 29#include <linux/sched.h>
0d7482e3 30#include <linux/sysfs.h>
e9eee03e 31#include <linux/types.h>
5a0e3ad6 32#include <linux/slab.h>
e9eee03e
IM
33#include <linux/init.h>
34#include <linux/kmod.h>
35#include <linux/poll.h>
3c079792 36#include <linux/nmi.h>
e9eee03e 37#include <linux/cpu.h>
14a02530 38#include <linux/smp.h>
e9eee03e 39#include <linux/fs.h>
9b1beaf2 40#include <linux/mm.h>
5be9ed25 41#include <linux/debugfs.h>
b77e70bf 42#include <linux/irq_work.h>
69c60c88 43#include <linux/export.h>
e9eee03e 44
d88203d1 45#include <asm/processor.h>
95927475 46#include <asm/traps.h>
375074cc 47#include <asm/tlbflush.h>
e9eee03e
IM
48#include <asm/mce.h>
49#include <asm/msr.h>
1da177e4 50
bd19a5e6 51#include "mce-internal.h"
711c2e48 52
93b62c3c 53static DEFINE_MUTEX(mce_chrdev_read_mutex);
2aa2b50d 54
9a7783d0 55#define mce_log_get_idx_check(p) \
e90328b8 56({ \
f78f5b90
PM
57 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
58 !lockdep_is_held(&mce_chrdev_read_mutex), \
3959df1d 59 "suspicious mce_log_get_idx_check() usage"); \
e90328b8
PM
60 smp_load_acquire(&(p)); \
61})
f56e8a07 62
8968f9d3
HS
63#define CREATE_TRACE_POINTS
64#include <trace/events/mce.h>
65
3f2f0680 66#define SPINUNIT 100 /* 100ns */
3c079792 67
01ca79f1
AK
68DEFINE_PER_CPU(unsigned, mce_exception_count);
69
1462594b 70struct mce_bank *mce_banks __read_mostly;
bf80bbd7 71struct mce_vendor_flags mce_flags __read_mostly;
cebe1820 72
d203f0b8 73struct mca_config mca_cfg __read_mostly = {
84c2559d 74 .bootlog = -1,
d203f0b8
BP
75 /*
76 * Tolerant levels:
77 * 0: always panic on uncorrected errors, log corrected errors
78 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
79 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
80 * 3: never panic or SIGBUS, log all errors (for testing only)
81 */
84c2559d
BP
82 .tolerant = 1,
83 .monarch_timeout = -1
d203f0b8
BP
84};
85
1020bcbc
HS
86/* User mode helper program triggered by machine check event */
87static unsigned long mce_need_notify;
88static char mce_helper[128];
89static char *mce_helper_argv[2] = { mce_helper, NULL };
1da177e4 90
93b62c3c
HS
91static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
92
3c079792
AK
93static DEFINE_PER_CPU(struct mce, mces_seen);
94static int cpu_missing;
95
0644414e
NR
96/*
97 * MCA banks polled by the period polling timer for corrected events.
98 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
99 */
ee031c31
AK
100DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
101 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
102};
103
c3d1fb56
NR
104/*
105 * MCA banks controlled through firmware first for corrected errors.
106 * This is a global list of banks for which we won't enable CMCI and we
107 * won't poll. Firmware controls these banks and is responsible for
108 * reporting corrected errors through GHES. Uncorrected/recoverable
109 * errors are still notified through a machine check.
110 */
111mce_banks_t mce_banks_ce_disabled;
112
061120ae
CG
113static struct work_struct mce_work;
114static struct irq_work mce_irq_work;
9b1beaf2 115
61b0fccd 116static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
fd4cf79f 117static int mce_usable_address(struct mce *m);
61b0fccd 118
3653ada5
BP
119/*
120 * CPU/chipset specific EDAC code can register a notifier call here to print
121 * MCE errors in a human-readable form.
122 */
648ed940 123ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
3653ada5 124
b5f2fa4e
AK
125/* Do initial initialization of a struct mce */
126void mce_setup(struct mce *m)
127{
128 memset(m, 0, sizeof(struct mce));
d620c67f 129 m->cpu = m->extcpu = smp_processor_id();
4ea1636b 130 m->tsc = rdtsc();
8ee08347
AK
131 /* We hope get_seconds stays lockless */
132 m->time = get_seconds();
133 m->cpuvendor = boot_cpu_data.x86_vendor;
134 m->cpuid = cpuid_eax(1);
8ee08347 135 m->socketid = cpu_data(m->extcpu).phys_proc_id;
8ee08347
AK
136 m->apicid = cpu_data(m->extcpu).initial_apicid;
137 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
b5f2fa4e
AK
138}
139
ea149b36
AK
140DEFINE_PER_CPU(struct mce, injectm);
141EXPORT_PER_CPU_SYMBOL_GPL(injectm);
142
1da177e4
LT
143/*
144 * Lockless MCE logging infrastructure.
145 * This avoids deadlocks on printk locks without having to break locks. Also
146 * separate MCEs from kernel messages to avoid bogus bug reports.
147 */
148
231fd906 149static struct mce_log mcelog = {
f6fb0ac0
AK
150 .signature = MCE_LOG_SIGNATURE,
151 .len = MCE_LOG_LEN,
152 .recordlen = sizeof(struct mce),
d88203d1 153};
1da177e4
LT
154
155void mce_log(struct mce *mce)
156{
157 unsigned next, entry;
e9eee03e 158
8968f9d3
HS
159 /* Emit the trace record: */
160 trace_mce_record(mce);
161
f29a7aff
CG
162 if (!mce_gen_pool_add(mce))
163 irq_work_queue(&mce_irq_work);
f0cb5452 164
1da177e4 165 mce->finished = 0;
7644143c 166 wmb();
1da177e4 167 for (;;) {
9a7783d0 168 entry = mce_log_get_idx_check(mcelog.next);
673242c1 169 for (;;) {
696e409d 170
e9eee03e
IM
171 /*
172 * When the buffer fills up discard new entries.
173 * Assume that the earlier errors are the more
174 * interesting ones:
175 */
673242c1 176 if (entry >= MCE_LOG_LEN) {
14a02530
HS
177 set_bit(MCE_OVERFLOW,
178 (unsigned long *)&mcelog.flags);
673242c1
AK
179 return;
180 }
e9eee03e 181 /* Old left over entry. Skip: */
673242c1
AK
182 if (mcelog.entry[entry].finished) {
183 entry++;
184 continue;
185 }
7644143c 186 break;
1da177e4 187 }
1da177e4
LT
188 smp_rmb();
189 next = entry + 1;
190 if (cmpxchg(&mcelog.next, entry, next) == entry)
191 break;
192 }
193 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 194 wmb();
1da177e4 195 mcelog.entry[entry].finished = 1;
7644143c 196 wmb();
1da177e4 197
a0189c70 198 mce->finished = 1;
1020bcbc 199 set_bit(0, &mce_need_notify);
1da177e4
LT
200}
201
a79da384 202void mce_inject_log(struct mce *m)
09371957 203{
a79da384
BP
204 mutex_lock(&mce_chrdev_read_mutex);
205 mce_log(m);
206 mutex_unlock(&mce_chrdev_read_mutex);
09371957 207}
a79da384 208EXPORT_SYMBOL_GPL(mce_inject_log);
09371957 209
fd4cf79f 210static struct notifier_block mce_srao_nb;
09371957 211
3653ada5
BP
212void mce_register_decode_chain(struct notifier_block *nb)
213{
fd4cf79f
CG
214 /* Ensure SRAO notifier has the highest priority in the decode chain. */
215 if (nb != &mce_srao_nb && nb->priority == INT_MAX)
216 nb->priority -= 1;
217
3653ada5
BP
218 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
219}
220EXPORT_SYMBOL_GPL(mce_register_decode_chain);
221
222void mce_unregister_decode_chain(struct notifier_block *nb)
223{
224 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
225}
226EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
227
77e26cca 228static void print_mce(struct mce *m)
1da177e4 229{
dffa4b2f
BP
230 int ret = 0;
231
a2d7b0d4 232 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
d620c67f 233 m->extcpu, m->mcgstatus, m->bank, m->status);
f436f8bb 234
65ea5b03 235 if (m->ip) {
a2d7b0d4 236 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
f436f8bb
IM
237 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
238 m->cs, m->ip);
239
1da177e4 240 if (m->cs == __KERNEL_CS)
65ea5b03 241 print_symbol("{%s}", m->ip);
f436f8bb 242 pr_cont("\n");
1da177e4 243 }
f436f8bb 244
a2d7b0d4 245 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
1da177e4 246 if (m->addr)
f436f8bb 247 pr_cont("ADDR %llx ", m->addr);
1da177e4 248 if (m->misc)
f436f8bb 249 pr_cont("MISC %llx ", m->misc);
549d042d 250
f436f8bb 251 pr_cont("\n");
506ed6b5
AK
252 /*
253 * Note this output is parsed by external tools and old fields
254 * should not be changed.
255 */
881e23e5 256 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
506ed6b5
AK
257 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
258 cpu_data(m->extcpu).microcode);
f436f8bb
IM
259
260 /*
261 * Print out human-readable details about the MCE error,
fb253195 262 * (if the CPU has an implementation for that)
f436f8bb 263 */
dffa4b2f
BP
264 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
265 if (ret == NOTIFY_STOP)
266 return;
267
268 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
86503560
AK
269}
270
f94b61c2
AK
271#define PANIC_TIMEOUT 5 /* 5 seconds */
272
c7c9b392 273static atomic_t mce_panicked;
f94b61c2 274
bf783f9f 275static int fake_panic;
c7c9b392 276static atomic_t mce_fake_panicked;
bf783f9f 277
f94b61c2
AK
278/* Panic in progress. Enable interrupts and wait for final IPI */
279static void wait_for_panic(void)
280{
281 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
f436f8bb 282
f94b61c2
AK
283 preempt_disable();
284 local_irq_enable();
285 while (timeout-- > 0)
286 udelay(1);
29b0f591 287 if (panic_timeout == 0)
7af19e4a 288 panic_timeout = mca_cfg.panic_timeout;
f94b61c2
AK
289 panic("Panicing machine check CPU died");
290}
291
6c80f87e 292static void mce_panic(const char *msg, struct mce *final, char *exp)
d88203d1 293{
482908b4 294 int i, apei_err = 0;
e02e68d3 295
bf783f9f
HY
296 if (!fake_panic) {
297 /*
298 * Make sure only one CPU runs in machine check panic
299 */
c7c9b392 300 if (atomic_inc_return(&mce_panicked) > 1)
bf783f9f
HY
301 wait_for_panic();
302 barrier();
f94b61c2 303
bf783f9f
HY
304 bust_spinlocks(1);
305 console_verbose();
306 } else {
307 /* Don't log too much for fake panic */
c7c9b392 308 if (atomic_inc_return(&mce_fake_panicked) > 1)
bf783f9f
HY
309 return;
310 }
a0189c70 311 /* First print corrected ones that are still unlogged */
1da177e4 312 for (i = 0; i < MCE_LOG_LEN; i++) {
a0189c70 313 struct mce *m = &mcelog.entry[i];
77e26cca
HS
314 if (!(m->status & MCI_STATUS_VAL))
315 continue;
482908b4 316 if (!(m->status & MCI_STATUS_UC)) {
77e26cca 317 print_mce(m);
482908b4
HY
318 if (!apei_err)
319 apei_err = apei_write_mce(m);
320 }
a0189c70
AK
321 }
322 /* Now print uncorrected but with the final one last */
323 for (i = 0; i < MCE_LOG_LEN; i++) {
324 struct mce *m = &mcelog.entry[i];
325 if (!(m->status & MCI_STATUS_VAL))
1da177e4 326 continue;
77e26cca
HS
327 if (!(m->status & MCI_STATUS_UC))
328 continue;
482908b4 329 if (!final || memcmp(m, final, sizeof(struct mce))) {
77e26cca 330 print_mce(m);
482908b4
HY
331 if (!apei_err)
332 apei_err = apei_write_mce(m);
333 }
1da177e4 334 }
482908b4 335 if (final) {
77e26cca 336 print_mce(final);
482908b4
HY
337 if (!apei_err)
338 apei_err = apei_write_mce(final);
339 }
3c079792 340 if (cpu_missing)
a2d7b0d4 341 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
bd19a5e6 342 if (exp)
a2d7b0d4 343 pr_emerg(HW_ERR "Machine check: %s\n", exp);
bf783f9f
HY
344 if (!fake_panic) {
345 if (panic_timeout == 0)
7af19e4a 346 panic_timeout = mca_cfg.panic_timeout;
bf783f9f
HY
347 panic(msg);
348 } else
a2d7b0d4 349 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
d88203d1 350}
1da177e4 351
ea149b36
AK
352/* Support code for software error injection */
353
354static int msr_to_offset(u32 msr)
355{
0a3aee0d 356 unsigned bank = __this_cpu_read(injectm.bank);
f436f8bb 357
84c2559d 358 if (msr == mca_cfg.rip_msr)
ea149b36 359 return offsetof(struct mce, ip);
a2d32bcb 360 if (msr == MSR_IA32_MCx_STATUS(bank))
ea149b36 361 return offsetof(struct mce, status);
a2d32bcb 362 if (msr == MSR_IA32_MCx_ADDR(bank))
ea149b36 363 return offsetof(struct mce, addr);
a2d32bcb 364 if (msr == MSR_IA32_MCx_MISC(bank))
ea149b36
AK
365 return offsetof(struct mce, misc);
366 if (msr == MSR_IA32_MCG_STATUS)
367 return offsetof(struct mce, mcgstatus);
368 return -1;
369}
370
5f8c1a54
AK
371/* MSR access wrappers used for error injection */
372static u64 mce_rdmsrl(u32 msr)
373{
374 u64 v;
11868a2d 375
0a3aee0d 376 if (__this_cpu_read(injectm.finished)) {
ea149b36 377 int offset = msr_to_offset(msr);
11868a2d 378
ea149b36
AK
379 if (offset < 0)
380 return 0;
89cbc767 381 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
ea149b36 382 }
11868a2d
IM
383
384 if (rdmsrl_safe(msr, &v)) {
385 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
386 /*
387 * Return zero in case the access faulted. This should
388 * not happen normally but can happen if the CPU does
389 * something weird, or if the code is buggy.
390 */
391 v = 0;
392 }
393
5f8c1a54
AK
394 return v;
395}
396
397static void mce_wrmsrl(u32 msr, u64 v)
398{
0a3aee0d 399 if (__this_cpu_read(injectm.finished)) {
ea149b36 400 int offset = msr_to_offset(msr);
11868a2d 401
ea149b36 402 if (offset >= 0)
89cbc767 403 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
ea149b36
AK
404 return;
405 }
5f8c1a54
AK
406 wrmsrl(msr, v);
407}
408
b8325c5b
HS
409/*
410 * Collect all global (w.r.t. this processor) status about this machine
411 * check into our "mce" struct so that we can use it later to assess
412 * the severity of the problem as we read per-bank specific details.
413 */
414static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
415{
416 mce_setup(m);
417
418 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
419 if (regs) {
420 /*
421 * Get the address of the instruction at the time of
422 * the machine check error.
423 */
424 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
425 m->ip = regs->ip;
426 m->cs = regs->cs;
a129a7c8
AK
427
428 /*
429 * When in VM86 mode make the cs look like ring 3
430 * always. This is a lie, but it's better than passing
431 * the additional vm86 bit around everywhere.
432 */
433 if (v8086_mode(regs))
434 m->cs |= 3;
b8325c5b
HS
435 }
436 /* Use accurate RIP reporting if available. */
84c2559d
BP
437 if (mca_cfg.rip_msr)
438 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
b8325c5b
HS
439 }
440}
441
88ccbedd 442int mce_available(struct cpuinfo_x86 *c)
1da177e4 443{
1462594b 444 if (mca_cfg.disabled)
5b4408fd 445 return 0;
3d1712c9 446 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
447}
448
9b1beaf2
AK
449static void mce_schedule_work(void)
450{
fd4cf79f 451 if (!mce_gen_pool_empty() && keventd_up())
061120ae 452 schedule_work(&mce_work);
9b1beaf2
AK
453}
454
b77e70bf 455static void mce_irq_work_cb(struct irq_work *entry)
ccc3c319 456{
9ff36ee9 457 mce_notify_irq();
9b1beaf2 458 mce_schedule_work();
ccc3c319 459}
ccc3c319
AK
460
461static void mce_report_event(struct pt_regs *regs)
462{
463 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
9ff36ee9 464 mce_notify_irq();
9b1beaf2
AK
465 /*
466 * Triggering the work queue here is just an insurance
467 * policy in case the syscall exit notify handler
468 * doesn't run soon enough or ends up running on the
469 * wrong CPU (can happen when audit sleeps)
470 */
471 mce_schedule_work();
ccc3c319
AK
472 return;
473 }
474
061120ae 475 irq_work_queue(&mce_irq_work);
ccc3c319
AK
476}
477
fd4cf79f
CG
478static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
479 void *data)
480{
481 struct mce *mce = (struct mce *)data;
482 unsigned long pfn;
483
484 if (!mce)
485 return NOTIFY_DONE;
486
487 if (mce->usable_addr && (mce->severity == MCE_AO_SEVERITY)) {
488 pfn = mce->addr >> PAGE_SHIFT;
489 memory_failure(pfn, MCE_VECTOR, 0);
490 }
491
492 return NOTIFY_OK;
ccc3c319 493}
fd4cf79f
CG
494static struct notifier_block mce_srao_nb = {
495 .notifier_call = srao_decode_notifier,
496 .priority = INT_MAX,
497};
ccc3c319 498
85f92694
TL
499/*
500 * Read ADDR and MISC registers.
501 */
502static void mce_read_aux(struct mce *m, int i)
503{
504 if (m->status & MCI_STATUS_MISCV)
505 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
506 if (m->status & MCI_STATUS_ADDRV) {
507 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
508
509 /*
510 * Mask the reported address by the reported granularity.
511 */
1462594b 512 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
85f92694
TL
513 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
514 m->addr >>= shift;
515 m->addr <<= shift;
516 }
517 }
518}
519
fa92c586
CY
520static bool memory_error(struct mce *m)
521{
522 struct cpuinfo_x86 *c = &boot_cpu_data;
523
524 if (c->x86_vendor == X86_VENDOR_AMD) {
525 /*
526 * coming soon
527 */
528 return false;
529 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
530 /*
531 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
532 *
533 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
534 * indicating a memory error. Bit 8 is used for indicating a
535 * cache hierarchy error. The combination of bit 2 and bit 3
536 * is used for indicating a `generic' cache hierarchy error
537 * But we can't just blindly check the above bits, because if
538 * bit 11 is set, then it is a bus/interconnect error - and
539 * either way the above bits just gives more detail on what
540 * bus/interconnect error happened. Note that bit 12 can be
541 * ignored, as it's the "filter" bit.
542 */
543 return (m->status & 0xef80) == BIT(7) ||
544 (m->status & 0xef00) == BIT(8) ||
545 (m->status & 0xeffc) == 0xc;
546 }
547
548 return false;
549}
550
ca84f696
AK
551DEFINE_PER_CPU(unsigned, mce_poll_count);
552
d88203d1 553/*
b79109c3
AK
554 * Poll for corrected events or events that happened before reset.
555 * Those are just logged through /dev/mcelog.
556 *
557 * This is executed in standard interrupt context.
ed7290d0
AK
558 *
559 * Note: spec recommends to panic for fatal unsignalled
560 * errors here. However this would be quite problematic --
561 * we would need to reimplement the Monarch handling and
562 * it would mess up the exclusion between exception handler
563 * and poll hander -- * so we skip this for now.
564 * These cases should not happen anyways, or only when the CPU
565 * is already totally * confused. In this case it's likely it will
566 * not fully execute the machine check handler either.
b79109c3 567 */
3f2f0680 568bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
b79109c3 569{
3f2f0680 570 bool error_logged = false;
b79109c3 571 struct mce m;
fa92c586 572 int severity;
b79109c3
AK
573 int i;
574
c6ae41e7 575 this_cpu_inc(mce_poll_count);
ca84f696 576
b8325c5b 577 mce_gather_info(&m, NULL);
b79109c3 578
d203f0b8 579 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 580 if (!mce_banks[i].ctl || !test_bit(i, *b))
b79109c3
AK
581 continue;
582
583 m.misc = 0;
584 m.addr = 0;
585 m.bank = i;
586 m.tsc = 0;
587
588 barrier();
a2d32bcb 589 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
b79109c3
AK
590 if (!(m.status & MCI_STATUS_VAL))
591 continue;
592
3f2f0680 593
b79109c3 594 /*
ed7290d0
AK
595 * Uncorrected or signalled events are handled by the exception
596 * handler when it is enabled, so don't process those here.
b79109c3
AK
597 *
598 * TBD do the same check for MCI_STATUS_EN here?
599 */
ed7290d0 600 if (!(flags & MCP_UC) &&
1462594b 601 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
b79109c3
AK
602 continue;
603
85f92694 604 mce_read_aux(&m, i);
b79109c3
AK
605
606 if (!(flags & MCP_TIMESTAMP))
607 m.tsc = 0;
fa92c586
CY
608
609 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
610
611 /*
612 * In the cases where we don't have a valid address after all,
613 * do not add it into the ring buffer.
614 */
615 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
616 if (m.status & MCI_STATUS_ADDRV) {
fd4cf79f
CG
617 m.severity = severity;
618 m.usable_addr = mce_usable_address(&m);
619
620 if (!mce_gen_pool_add(&m))
621 mce_schedule_work();
fa92c586
CY
622 }
623 }
624
b79109c3
AK
625 /*
626 * Don't get the IP here because it's unlikely to
627 * have anything to do with the actual error location.
628 */
3f2f0680
BP
629 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) {
630 error_logged = true;
5679af4c 631 mce_log(&m);
3f2f0680 632 }
b79109c3
AK
633
634 /*
635 * Clear state for this bank.
636 */
a2d32bcb 637 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
b79109c3
AK
638 }
639
640 /*
641 * Don't clear MCG_STATUS here because it's only defined for
642 * exceptions.
643 */
88921be3
AK
644
645 sync_core();
3f2f0680
BP
646
647 return error_logged;
b79109c3 648}
ea149b36 649EXPORT_SYMBOL_GPL(machine_check_poll);
b79109c3 650
bd19a5e6
AK
651/*
652 * Do a quick check if any of the events requires a panic.
653 * This decides if we keep the events around or clear them.
654 */
61b0fccd
TL
655static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
656 struct pt_regs *regs)
bd19a5e6 657{
95022b8c 658 int i, ret = 0;
17fea54b 659 char *tmp;
bd19a5e6 660
d203f0b8 661 for (i = 0; i < mca_cfg.banks; i++) {
a2d32bcb 662 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
61b0fccd 663 if (m->status & MCI_STATUS_VAL) {
95022b8c 664 __set_bit(i, validp);
61b0fccd
TL
665 if (quirk_no_way_out)
666 quirk_no_way_out(i, m, regs);
667 }
17fea54b
BP
668
669 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
670 *msg = tmp;
95022b8c 671 ret = 1;
17fea54b 672 }
bd19a5e6 673 }
95022b8c 674 return ret;
bd19a5e6
AK
675}
676
3c079792
AK
677/*
678 * Variable to establish order between CPUs while scanning.
679 * Each CPU spins initially until executing is equal its number.
680 */
681static atomic_t mce_executing;
682
683/*
684 * Defines order of CPUs on entry. First CPU becomes Monarch.
685 */
686static atomic_t mce_callin;
687
688/*
689 * Check if a timeout waiting for other CPUs happened.
690 */
6c80f87e 691static int mce_timed_out(u64 *t, const char *msg)
3c079792
AK
692{
693 /*
694 * The others already did panic for some reason.
695 * Bail out like in a timeout.
696 * rmb() to tell the compiler that system_state
697 * might have been modified by someone else.
698 */
699 rmb();
c7c9b392 700 if (atomic_read(&mce_panicked))
3c079792 701 wait_for_panic();
84c2559d 702 if (!mca_cfg.monarch_timeout)
3c079792
AK
703 goto out;
704 if ((s64)*t < SPINUNIT) {
716079f6 705 if (mca_cfg.tolerant <= 1)
6c80f87e 706 mce_panic(msg, NULL, NULL);
3c079792
AK
707 cpu_missing = 1;
708 return 1;
709 }
710 *t -= SPINUNIT;
711out:
712 touch_nmi_watchdog();
713 return 0;
714}
715
716/*
717 * The Monarch's reign. The Monarch is the CPU who entered
718 * the machine check handler first. It waits for the others to
719 * raise the exception too and then grades them. When any
720 * error is fatal panic. Only then let the others continue.
721 *
722 * The other CPUs entering the MCE handler will be controlled by the
723 * Monarch. They are called Subjects.
724 *
725 * This way we prevent any potential data corruption in a unrecoverable case
726 * and also makes sure always all CPU's errors are examined.
727 *
680b6cfd 728 * Also this detects the case of a machine check event coming from outer
3c079792
AK
729 * space (not detected by any CPUs) In this case some external agent wants
730 * us to shut down, so panic too.
731 *
732 * The other CPUs might still decide to panic if the handler happens
733 * in a unrecoverable place, but in this case the system is in a semi-stable
734 * state and won't corrupt anything by itself. It's ok to let the others
735 * continue for a bit first.
736 *
737 * All the spin loops have timeouts; when a timeout happens a CPU
738 * typically elects itself to be Monarch.
739 */
740static void mce_reign(void)
741{
742 int cpu;
743 struct mce *m = NULL;
744 int global_worst = 0;
745 char *msg = NULL;
746 char *nmsg = NULL;
747
748 /*
749 * This CPU is the Monarch and the other CPUs have run
750 * through their handlers.
751 * Grade the severity of the errors of all the CPUs.
752 */
753 for_each_possible_cpu(cpu) {
d203f0b8
BP
754 int severity = mce_severity(&per_cpu(mces_seen, cpu),
755 mca_cfg.tolerant,
e3480271 756 &nmsg, true);
3c079792
AK
757 if (severity > global_worst) {
758 msg = nmsg;
759 global_worst = severity;
760 m = &per_cpu(mces_seen, cpu);
761 }
762 }
763
764 /*
765 * Cannot recover? Panic here then.
766 * This dumps all the mces in the log buffer and stops the
767 * other CPUs.
768 */
d203f0b8 769 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
8af7043a 770 mce_panic("Fatal machine check", m, msg);
3c079792
AK
771
772 /*
773 * For UC somewhere we let the CPU who detects it handle it.
774 * Also must let continue the others, otherwise the handling
775 * CPU could deadlock on a lock.
776 */
777
778 /*
779 * No machine check event found. Must be some external
780 * source or one CPU is hung. Panic.
781 */
d203f0b8 782 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
8af7043a 783 mce_panic("Fatal machine check from unknown source", NULL, NULL);
3c079792
AK
784
785 /*
786 * Now clear all the mces_seen so that they don't reappear on
787 * the next mce.
788 */
789 for_each_possible_cpu(cpu)
790 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
791}
792
793static atomic_t global_nwo;
794
795/*
796 * Start of Monarch synchronization. This waits until all CPUs have
797 * entered the exception handler and then determines if any of them
798 * saw a fatal event that requires panic. Then it executes them
799 * in the entry order.
800 * TBD double check parallel CPU hotunplug
801 */
7fb06fc9 802static int mce_start(int *no_way_out)
3c079792 803{
7fb06fc9 804 int order;
3c079792 805 int cpus = num_online_cpus();
84c2559d 806 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
3c079792 807
7fb06fc9
HS
808 if (!timeout)
809 return -1;
3c079792 810
7fb06fc9 811 atomic_add(*no_way_out, &global_nwo);
184e1fdf
HY
812 /*
813 * global_nwo should be updated before mce_callin
814 */
815 smp_wmb();
a95436e4 816 order = atomic_inc_return(&mce_callin);
3c079792
AK
817
818 /*
819 * Wait for everyone.
820 */
821 while (atomic_read(&mce_callin) != cpus) {
6c80f87e
AL
822 if (mce_timed_out(&timeout,
823 "Timeout: Not all CPUs entered broadcast exception handler")) {
3c079792 824 atomic_set(&global_nwo, 0);
7fb06fc9 825 return -1;
3c079792
AK
826 }
827 ndelay(SPINUNIT);
828 }
829
184e1fdf
HY
830 /*
831 * mce_callin should be read before global_nwo
832 */
833 smp_rmb();
3c079792 834
7fb06fc9
HS
835 if (order == 1) {
836 /*
837 * Monarch: Starts executing now, the others wait.
838 */
3c079792 839 atomic_set(&mce_executing, 1);
7fb06fc9
HS
840 } else {
841 /*
842 * Subject: Now start the scanning loop one by one in
843 * the original callin order.
844 * This way when there are any shared banks it will be
845 * only seen by one CPU before cleared, avoiding duplicates.
846 */
847 while (atomic_read(&mce_executing) < order) {
6c80f87e
AL
848 if (mce_timed_out(&timeout,
849 "Timeout: Subject CPUs unable to finish machine check processing")) {
7fb06fc9
HS
850 atomic_set(&global_nwo, 0);
851 return -1;
852 }
853 ndelay(SPINUNIT);
854 }
3c079792
AK
855 }
856
857 /*
7fb06fc9 858 * Cache the global no_way_out state.
3c079792 859 */
7fb06fc9
HS
860 *no_way_out = atomic_read(&global_nwo);
861
862 return order;
3c079792
AK
863}
864
865/*
866 * Synchronize between CPUs after main scanning loop.
867 * This invokes the bulk of the Monarch processing.
868 */
869static int mce_end(int order)
870{
871 int ret = -1;
84c2559d 872 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
3c079792
AK
873
874 if (!timeout)
875 goto reset;
876 if (order < 0)
877 goto reset;
878
879 /*
880 * Allow others to run.
881 */
882 atomic_inc(&mce_executing);
883
884 if (order == 1) {
885 /* CHECKME: Can this race with a parallel hotplug? */
886 int cpus = num_online_cpus();
887
888 /*
889 * Monarch: Wait for everyone to go through their scanning
890 * loops.
891 */
892 while (atomic_read(&mce_executing) <= cpus) {
6c80f87e
AL
893 if (mce_timed_out(&timeout,
894 "Timeout: Monarch CPU unable to finish machine check processing"))
3c079792
AK
895 goto reset;
896 ndelay(SPINUNIT);
897 }
898
899 mce_reign();
900 barrier();
901 ret = 0;
902 } else {
903 /*
904 * Subject: Wait for Monarch to finish.
905 */
906 while (atomic_read(&mce_executing) != 0) {
6c80f87e
AL
907 if (mce_timed_out(&timeout,
908 "Timeout: Monarch CPU did not finish machine check processing"))
3c079792
AK
909 goto reset;
910 ndelay(SPINUNIT);
911 }
912
913 /*
914 * Don't reset anything. That's done by the Monarch.
915 */
916 return 0;
917 }
918
919 /*
920 * Reset all global state.
921 */
922reset:
923 atomic_set(&global_nwo, 0);
924 atomic_set(&mce_callin, 0);
925 barrier();
926
927 /*
928 * Let others run again.
929 */
930 atomic_set(&mce_executing, 0);
931 return ret;
932}
933
9b1beaf2
AK
934/*
935 * Check if the address reported by the CPU is in a format we can parse.
936 * It would be possible to add code for most other cases, but all would
937 * be somewhat complicated (e.g. segment offset would require an instruction
0d2eb44f 938 * parser). So only support physical addresses up to page granuality for now.
9b1beaf2
AK
939 */
940static int mce_usable_address(struct mce *m)
941{
942 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
943 return 0;
2b90e77e 944 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
9b1beaf2 945 return 0;
2b90e77e 946 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
9b1beaf2
AK
947 return 0;
948 return 1;
949}
950
3c079792
AK
951static void mce_clear_state(unsigned long *toclear)
952{
953 int i;
954
d203f0b8 955 for (i = 0; i < mca_cfg.banks; i++) {
3c079792 956 if (test_bit(i, toclear))
a2d32bcb 957 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
3c079792
AK
958 }
959}
960
b79109c3
AK
961/*
962 * The actual machine check handler. This only handles real
963 * exceptions when something got corrupted coming in through int 18.
964 *
965 * This is executed in NMI context not subject to normal locking rules. This
966 * implies that most kernel services cannot be safely used. Don't even
967 * think about putting a printk in there!
3c079792
AK
968 *
969 * On Intel systems this is entered on all CPUs in parallel through
970 * MCE broadcast. However some CPUs might be broken beyond repair,
971 * so be always careful when synchronizing with others.
1da177e4 972 */
e9eee03e 973void do_machine_check(struct pt_regs *regs, long error_code)
1da177e4 974{
1462594b 975 struct mca_config *cfg = &mca_cfg;
3c079792 976 struct mce m, *final;
1da177e4 977 int i;
3c079792
AK
978 int worst = 0;
979 int severity;
980 /*
981 * Establish sequential order between the CPUs entering the machine
982 * check handler.
983 */
7fb06fc9 984 int order;
bd78432c
TH
985 /*
986 * If no_way_out gets set, there is no safe way to recover from this
d203f0b8 987 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
bd78432c
TH
988 */
989 int no_way_out = 0;
990 /*
991 * If kill_it gets set, there might be a way to recover from this
992 * error.
993 */
994 int kill_it = 0;
b79109c3 995 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
95022b8c 996 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
bd19a5e6 997 char *msg = "Unknown";
d4812e16
TL
998 u64 recover_paddr = ~0ull;
999 int flags = MF_ACTION_REQUIRED;
243d657e 1000 int lmce = 0;
1da177e4 1001
8c84014f 1002 ist_enter(regs);
95927475 1003
c6ae41e7 1004 this_cpu_inc(mce_exception_count);
01ca79f1 1005
1462594b 1006 if (!cfg->banks)
32561696 1007 goto out;
1da177e4 1008
b8325c5b 1009 mce_gather_info(&m, regs);
b5f2fa4e 1010
89cbc767 1011 final = this_cpu_ptr(&mces_seen);
3c079792
AK
1012 *final = m;
1013
95022b8c 1014 memset(valid_banks, 0, sizeof(valid_banks));
61b0fccd 1015 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
680b6cfd 1016
1da177e4
LT
1017 barrier();
1018
ed7290d0 1019 /*
a8c321fb
TL
1020 * When no restart IP might need to kill or panic.
1021 * Assume the worst for now, but if we find the
1022 * severity is MCE_AR_SEVERITY we have other options.
ed7290d0
AK
1023 */
1024 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1025 kill_it = 1;
1026
3c079792 1027 /*
243d657e 1028 * Check if this MCE is signaled to only this logical processor
3c079792 1029 */
243d657e
AR
1030 if (m.mcgstatus & MCG_STATUS_LMCES)
1031 lmce = 1;
1032 else {
1033 /*
1034 * Go through all the banks in exclusion of the other CPUs.
1035 * This way we don't report duplicated events on shared banks
1036 * because the first one to see it will clear it.
1037 * If this is a Local MCE, then no need to perform rendezvous.
1038 */
1039 order = mce_start(&no_way_out);
1040 }
1041
1462594b 1042 for (i = 0; i < cfg->banks; i++) {
b79109c3 1043 __clear_bit(i, toclear);
95022b8c
TL
1044 if (!test_bit(i, valid_banks))
1045 continue;
cebe1820 1046 if (!mce_banks[i].ctl)
1da177e4 1047 continue;
d88203d1
TG
1048
1049 m.misc = 0;
1da177e4
LT
1050 m.addr = 0;
1051 m.bank = i;
1da177e4 1052
a2d32bcb 1053 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1da177e4
LT
1054 if ((m.status & MCI_STATUS_VAL) == 0)
1055 continue;
1056
b79109c3 1057 /*
ed7290d0
AK
1058 * Non uncorrected or non signaled errors are handled by
1059 * machine_check_poll. Leave them alone, unless this panics.
b79109c3 1060 */
1462594b 1061 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
ed7290d0 1062 !no_way_out)
b79109c3
AK
1063 continue;
1064
1065 /*
1066 * Set taint even when machine check was not enabled.
1067 */
373d4d09 1068 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
b79109c3 1069
e3480271 1070 severity = mce_severity(&m, cfg->tolerant, NULL, true);
b79109c3 1071
ed7290d0 1072 /*
e3480271
CY
1073 * When machine check was for corrected/deferred handler don't
1074 * touch, unless we're panicing.
ed7290d0 1075 */
e3480271
CY
1076 if ((severity == MCE_KEEP_SEVERITY ||
1077 severity == MCE_UCNA_SEVERITY) && !no_way_out)
ed7290d0
AK
1078 continue;
1079 __set_bit(i, toclear);
1080 if (severity == MCE_NO_SEVERITY) {
b79109c3
AK
1081 /*
1082 * Machine check event was not enabled. Clear, but
1083 * ignore.
1084 */
1085 continue;
1da177e4
LT
1086 }
1087
85f92694 1088 mce_read_aux(&m, i);
1da177e4 1089
fd4cf79f
CG
1090 /* assuming valid severity level != 0 */
1091 m.severity = severity;
1092 m.usable_addr = mce_usable_address(&m);
9b1beaf2 1093
b79109c3 1094 mce_log(&m);
1da177e4 1095
3c079792
AK
1096 if (severity > worst) {
1097 *final = m;
1098 worst = severity;
1da177e4 1099 }
1da177e4
LT
1100 }
1101
a8c321fb
TL
1102 /* mce_clear_state will clear *final, save locally for use later */
1103 m = *final;
1104
3c079792
AK
1105 if (!no_way_out)
1106 mce_clear_state(toclear);
1107
e9eee03e 1108 /*
3c079792
AK
1109 * Do most of the synchronization with other CPUs.
1110 * When there's any problem use only local no_way_out state.
e9eee03e 1111 */
243d657e
AR
1112 if (!lmce) {
1113 if (mce_end(order) < 0)
1114 no_way_out = worst >= MCE_PANIC_SEVERITY;
1115 } else {
1116 /*
1117 * Local MCE skipped calling mce_reign()
1118 * If we found a fatal error, we need to panic here.
1119 */
1120 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1121 mce_panic("Machine check from unknown source",
1122 NULL, NULL);
1123 }
bd78432c
TH
1124
1125 /*
a8c321fb
TL
1126 * At insane "tolerant" levels we take no action. Otherwise
1127 * we only die if we have no other choice. For less serious
1128 * issues we try to recover, or limit damage to the current
1129 * process.
bd78432c 1130 */
1462594b 1131 if (cfg->tolerant < 3) {
a8c321fb
TL
1132 if (no_way_out)
1133 mce_panic("Fatal machine check on current CPU", &m, msg);
1134 if (worst == MCE_AR_SEVERITY) {
d4812e16
TL
1135 recover_paddr = m.addr;
1136 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1137 flags |= MF_MUST_KILL;
a8c321fb
TL
1138 } else if (kill_it) {
1139 force_sig(SIGBUS, current);
1140 }
1141 }
e02e68d3 1142
3c079792
AK
1143 if (worst > 0)
1144 mce_report_event(regs);
5f8c1a54 1145 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
32561696 1146out:
88921be3 1147 sync_core();
d4812e16
TL
1148
1149 if (recover_paddr == ~0ull)
1150 goto done;
1151
1152 pr_err("Uncorrected hardware memory error in user-access at %llx",
1153 recover_paddr);
1154 /*
1155 * We must call memory_failure() here even if the current process is
1156 * doomed. We still need to mark the page as poisoned and alert any
1157 * other users of the page.
1158 */
1159 ist_begin_non_atomic(regs);
1160 local_irq_enable();
1161 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1162 pr_err("Memory error not recovered");
1163 force_sig(SIGBUS, current);
1164 }
1165 local_irq_disable();
1166 ist_end_non_atomic();
1167done:
8c84014f 1168 ist_exit(regs);
1da177e4 1169}
ea149b36 1170EXPORT_SYMBOL_GPL(do_machine_check);
1da177e4 1171
cd42f4a3
TL
1172#ifndef CONFIG_MEMORY_FAILURE
1173int memory_failure(unsigned long pfn, int vector, int flags)
9b1beaf2 1174{
a8c321fb
TL
1175 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1176 BUG_ON(flags & MF_ACTION_REQUIRED);
c767a54b
JP
1177 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1178 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1179 pfn);
cd42f4a3
TL
1180
1181 return 0;
9b1beaf2 1182}
cd42f4a3 1183#endif
9b1beaf2 1184
a8c321fb
TL
1185/*
1186 * Action optional processing happens here (picking up
1187 * from the list of faulting pages that do_machine_check()
fd4cf79f 1188 * placed into the genpool).
a8c321fb 1189 */
9b1beaf2
AK
1190static void mce_process_work(struct work_struct *dummy)
1191{
fd4cf79f 1192 mce_gen_pool_process();
9b1beaf2
AK
1193}
1194
15d5f839
DZ
1195#ifdef CONFIG_X86_MCE_INTEL
1196/***
1197 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 1198 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
1199 * @status: Event status information
1200 *
1201 * This function should be called by the thermal interrupt after the
1202 * event has been processed and the decision was made to log the event
1203 * further.
1204 *
1205 * The status parameter will be saved to the 'status' field of 'struct mce'
1206 * and historically has been the register value of the
1207 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1208 */
b5f2fa4e 1209void mce_log_therm_throt_event(__u64 status)
15d5f839
DZ
1210{
1211 struct mce m;
1212
b5f2fa4e 1213 mce_setup(&m);
15d5f839
DZ
1214 m.bank = MCE_THERMAL_BANK;
1215 m.status = status;
15d5f839
DZ
1216 mce_log(&m);
1217}
1218#endif /* CONFIG_X86_MCE_INTEL */
1219
1da177e4 1220/*
8a336b0a
TH
1221 * Periodic polling timer for "silent" machine check errors. If the
1222 * poller finds an MCE, poll 2x faster. When the poller finds no more
1223 * errors, poll 2x slower (up to check_interval seconds).
1da177e4 1224 */
3f2f0680 1225static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
e9eee03e 1226
82f7af09 1227static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
52d168e2 1228static DEFINE_PER_CPU(struct timer_list, mce_timer);
1da177e4 1229
55babd8f
CG
1230static unsigned long mce_adjust_timer_default(unsigned long interval)
1231{
1232 return interval;
1233}
1234
3f2f0680 1235static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
55babd8f 1236
3f2f0680 1237static void __restart_timer(struct timer_list *t, unsigned long interval)
27f6c573 1238{
3f2f0680
BP
1239 unsigned long when = jiffies + interval;
1240 unsigned long flags;
27f6c573 1241
3f2f0680 1242 local_irq_save(flags);
27f6c573 1243
3f2f0680
BP
1244 if (timer_pending(t)) {
1245 if (time_before(when, t->expires))
1246 mod_timer_pinned(t, when);
1247 } else {
1248 t->expires = round_jiffies(when);
1249 add_timer_on(t, smp_processor_id());
1250 }
1251
1252 local_irq_restore(flags);
27f6c573
CG
1253}
1254
82f7af09 1255static void mce_timer_fn(unsigned long data)
1da177e4 1256{
89cbc767 1257 struct timer_list *t = this_cpu_ptr(&mce_timer);
3f2f0680 1258 int cpu = smp_processor_id();
82f7af09 1259 unsigned long iv;
52d168e2 1260
3f2f0680
BP
1261 WARN_ON(cpu != data);
1262
1263 iv = __this_cpu_read(mce_next_interval);
52d168e2 1264
89cbc767 1265 if (mce_available(this_cpu_ptr(&cpu_info))) {
3f2f0680
BP
1266 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1267
1268 if (mce_intel_cmci_poll()) {
1269 iv = mce_adjust_timer(iv);
1270 goto done;
1271 }
e9eee03e 1272 }
1da177e4
LT
1273
1274 /*
3f2f0680
BP
1275 * Alert userspace if needed. If we logged an MCE, reduce the polling
1276 * interval, otherwise increase the polling interval.
1da177e4 1277 */
3f2f0680 1278 if (mce_notify_irq())
958fb3c5 1279 iv = max(iv / 2, (unsigned long) HZ/100);
3f2f0680 1280 else
82f7af09 1281 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
3f2f0680
BP
1282
1283done:
82f7af09 1284 __this_cpu_write(mce_next_interval, iv);
3f2f0680 1285 __restart_timer(t, iv);
55babd8f 1286}
e02e68d3 1287
55babd8f
CG
1288/*
1289 * Ensure that the timer is firing in @interval from now.
1290 */
1291void mce_timer_kick(unsigned long interval)
1292{
89cbc767 1293 struct timer_list *t = this_cpu_ptr(&mce_timer);
55babd8f
CG
1294 unsigned long iv = __this_cpu_read(mce_next_interval);
1295
3f2f0680
BP
1296 __restart_timer(t, interval);
1297
55babd8f
CG
1298 if (interval < iv)
1299 __this_cpu_write(mce_next_interval, interval);
e02e68d3
TH
1300}
1301
9aaef96f
HS
1302/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1303static void mce_timer_delete_all(void)
1304{
1305 int cpu;
1306
1307 for_each_online_cpu(cpu)
1308 del_timer_sync(&per_cpu(mce_timer, cpu));
1309}
1310
9bd98405
AK
1311static void mce_do_trigger(struct work_struct *work)
1312{
1020bcbc 1313 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
9bd98405
AK
1314}
1315
1316static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1317
e02e68d3 1318/*
9bd98405
AK
1319 * Notify the user(s) about new machine check events.
1320 * Can be called from interrupt context, but not from machine check/NMI
1321 * context.
e02e68d3 1322 */
9ff36ee9 1323int mce_notify_irq(void)
e02e68d3 1324{
8457c84d
AK
1325 /* Not more than two messages every minute */
1326 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1327
1020bcbc 1328 if (test_and_clear_bit(0, &mce_need_notify)) {
93b62c3c
HS
1329 /* wake processes polling /dev/mcelog */
1330 wake_up_interruptible(&mce_chrdev_wait);
9bd98405 1331
4d899be5 1332 if (mce_helper[0])
9bd98405 1333 schedule_work(&mce_trigger_work);
e02e68d3 1334
8457c84d 1335 if (__ratelimit(&ratelimit))
a2d7b0d4 1336 pr_info(HW_ERR "Machine check events logged\n");
e02e68d3
TH
1337
1338 return 1;
1da177e4 1339 }
e02e68d3
TH
1340 return 0;
1341}
9ff36ee9 1342EXPORT_SYMBOL_GPL(mce_notify_irq);
8a336b0a 1343
148f9bb8 1344static int __mcheck_cpu_mce_banks_init(void)
cebe1820
AK
1345{
1346 int i;
d203f0b8 1347 u8 num_banks = mca_cfg.banks;
cebe1820 1348
d203f0b8 1349 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
cebe1820
AK
1350 if (!mce_banks)
1351 return -ENOMEM;
d203f0b8
BP
1352
1353 for (i = 0; i < num_banks; i++) {
cebe1820 1354 struct mce_bank *b = &mce_banks[i];
11868a2d 1355
cebe1820
AK
1356 b->ctl = -1ULL;
1357 b->init = 1;
1358 }
1359 return 0;
1360}
1361
d88203d1 1362/*
1da177e4
LT
1363 * Initialize Machine Checks for a CPU.
1364 */
148f9bb8 1365static int __mcheck_cpu_cap_init(void)
1da177e4 1366{
0d7482e3 1367 unsigned b;
e9eee03e 1368 u64 cap;
1da177e4
LT
1369
1370 rdmsrl(MSR_IA32_MCG_CAP, cap);
01c6680a
TG
1371
1372 b = cap & MCG_BANKCNT_MASK;
d203f0b8 1373 if (!mca_cfg.banks)
c767a54b 1374 pr_info("CPU supports %d MCE banks\n", b);
b659294b 1375
0d7482e3 1376 if (b > MAX_NR_BANKS) {
c767a54b 1377 pr_warn("Using only %u machine check banks out of %u\n",
0d7482e3
AK
1378 MAX_NR_BANKS, b);
1379 b = MAX_NR_BANKS;
1380 }
1381
1382 /* Don't support asymmetric configurations today */
d203f0b8
BP
1383 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1384 mca_cfg.banks = b;
1385
cebe1820 1386 if (!mce_banks) {
cffd377e 1387 int err = __mcheck_cpu_mce_banks_init();
11868a2d 1388
cebe1820
AK
1389 if (err)
1390 return err;
1da177e4 1391 }
0d7482e3 1392
94ad8474 1393 /* Use accurate RIP reporting if available. */
01c6680a 1394 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
84c2559d 1395 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1da177e4 1396
ed7290d0 1397 if (cap & MCG_SER_P)
1462594b 1398 mca_cfg.ser = true;
ed7290d0 1399
0d7482e3
AK
1400 return 0;
1401}
1402
5e09954a 1403static void __mcheck_cpu_init_generic(void)
0d7482e3 1404{
84c2559d 1405 enum mcp_flags m_fl = 0;
e9eee03e 1406 mce_banks_t all_banks;
0d7482e3
AK
1407 u64 cap;
1408 int i;
1409
84c2559d
BP
1410 if (!mca_cfg.bootlog)
1411 m_fl = MCP_DONTLOG;
1412
b79109c3
AK
1413 /*
1414 * Log the machine checks left over from the previous reset.
1415 */
ee031c31 1416 bitmap_fill(all_banks, MAX_NR_BANKS);
84c2559d 1417 machine_check_poll(MCP_UC | m_fl, &all_banks);
1da177e4 1418
375074cc 1419 cr4_set_bits(X86_CR4_MCE);
1da177e4 1420
0d7482e3 1421 rdmsrl(MSR_IA32_MCG_CAP, cap);
1da177e4
LT
1422 if (cap & MCG_CTL_P)
1423 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1424
d203f0b8 1425 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 1426 struct mce_bank *b = &mce_banks[i];
11868a2d 1427
cebe1820 1428 if (!b->init)
06b7a7a5 1429 continue;
a2d32bcb
AK
1430 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1431 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
d88203d1 1432 }
1da177e4
LT
1433}
1434
61b0fccd
TL
1435/*
1436 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1437 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1438 * Vol 3B Table 15-20). But this confuses both the code that determines
1439 * whether the machine check occurred in kernel or user mode, and also
1440 * the severity assessment code. Pretend that EIPV was set, and take the
1441 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1442 */
1443static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1444{
1445 if (bank != 0)
1446 return;
1447 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1448 return;
1449 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1450 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1451 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1452 MCACOD)) !=
1453 (MCI_STATUS_UC|MCI_STATUS_EN|
1454 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1455 MCI_STATUS_AR|MCACOD_INSTR))
1456 return;
1457
1458 m->mcgstatus |= MCG_STATUS_EIPV;
1459 m->ip = regs->ip;
1460 m->cs = regs->cs;
1461}
1462
1da177e4 1463/* Add per CPU specific workarounds here */
148f9bb8 1464static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
d88203d1 1465{
d203f0b8
BP
1466 struct mca_config *cfg = &mca_cfg;
1467
e412cd25 1468 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
c767a54b 1469 pr_info("unknown CPU type - not enabling MCE support\n");
e412cd25
IM
1470 return -EOPNOTSUPP;
1471 }
1472
1da177e4 1473 /* This should be disabled by the BIOS, but isn't always */
911f6a7b 1474 if (c->x86_vendor == X86_VENDOR_AMD) {
d203f0b8 1475 if (c->x86 == 15 && cfg->banks > 4) {
e9eee03e
IM
1476 /*
1477 * disable GART TBL walk error reporting, which
1478 * trips off incorrectly with the IOMMU & 3ware
1479 * & Cerberus:
1480 */
cebe1820 1481 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
e9eee03e 1482 }
84c2559d 1483 if (c->x86 <= 17 && cfg->bootlog < 0) {
e9eee03e
IM
1484 /*
1485 * Lots of broken BIOS around that don't clear them
1486 * by default and leave crap in there. Don't log:
1487 */
84c2559d 1488 cfg->bootlog = 0;
e9eee03e 1489 }
2e6f694f
AK
1490 /*
1491 * Various K7s with broken bank 0 around. Always disable
1492 * by default.
1493 */
c9ce8712 1494 if (c->x86 == 6 && cfg->banks > 0)
cebe1820 1495 mce_banks[0].ctl = 0;
575203b4 1496
bf80bbd7
AG
1497 /*
1498 * overflow_recov is supported for F15h Models 00h-0fh
1499 * even though we don't have a CPUID bit for it.
1500 */
1501 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1502 mce_flags.overflow_recov = 1;
1503
c9ce8712
BP
1504 /*
1505 * Turn off MC4_MISC thresholding banks on those models since
1506 * they're not supported there.
1507 */
1508 if (c->x86 == 0x15 &&
1509 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1510 int i;
1511 u64 hwcr;
1512 bool need_toggle;
1513 u32 msrs[] = {
575203b4
BP
1514 0x00000413, /* MC4_MISC0 */
1515 0xc0000408, /* MC4_MISC1 */
c9ce8712 1516 };
575203b4 1517
c9ce8712 1518 rdmsrl(MSR_K7_HWCR, hwcr);
575203b4 1519
c9ce8712
BP
1520 /* McStatusWrEn has to be set */
1521 need_toggle = !(hwcr & BIT(18));
575203b4 1522
c9ce8712
BP
1523 if (need_toggle)
1524 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
575203b4 1525
c9ce8712
BP
1526 /* Clear CntP bit safely */
1527 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1528 msr_clear_bit(msrs[i], 62);
575203b4 1529
c9ce8712
BP
1530 /* restore old settings */
1531 if (need_toggle)
1532 wrmsrl(MSR_K7_HWCR, hwcr);
1533 }
1da177e4 1534 }
e583538f 1535
06b7a7a5
AK
1536 if (c->x86_vendor == X86_VENDOR_INTEL) {
1537 /*
1538 * SDM documents that on family 6 bank 0 should not be written
1539 * because it aliases to another special BIOS controlled
1540 * register.
1541 * But it's not aliased anymore on model 0x1a+
1542 * Don't ignore bank 0 completely because there could be a
1543 * valid event later, merely don't write CTL0.
1544 */
1545
d203f0b8 1546 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
cebe1820 1547 mce_banks[0].init = 0;
3c079792
AK
1548
1549 /*
1550 * All newer Intel systems support MCE broadcasting. Enable
1551 * synchronization with a one second timeout.
1552 */
1553 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
84c2559d
BP
1554 cfg->monarch_timeout < 0)
1555 cfg->monarch_timeout = USEC_PER_SEC;
c7f6fa44 1556
e412cd25
IM
1557 /*
1558 * There are also broken BIOSes on some Pentium M and
1559 * earlier systems:
1560 */
84c2559d
BP
1561 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1562 cfg->bootlog = 0;
61b0fccd
TL
1563
1564 if (c->x86 == 6 && c->x86_model == 45)
1565 quirk_no_way_out = quirk_sandybridge_ifu;
06b7a7a5 1566 }
84c2559d
BP
1567 if (cfg->monarch_timeout < 0)
1568 cfg->monarch_timeout = 0;
1569 if (cfg->bootlog != 0)
7af19e4a 1570 cfg->panic_timeout = 30;
e412cd25
IM
1571
1572 return 0;
d88203d1 1573}
1da177e4 1574
148f9bb8 1575static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
4efc0670
AK
1576{
1577 if (c->x86 != 5)
3a97fc34
HS
1578 return 0;
1579
4efc0670
AK
1580 switch (c->x86_vendor) {
1581 case X86_VENDOR_INTEL:
c6978369 1582 intel_p5_mcheck_init(c);
3a97fc34 1583 return 1;
4efc0670
AK
1584 break;
1585 case X86_VENDOR_CENTAUR:
1586 winchip_mcheck_init(c);
3a97fc34 1587 return 1;
4efc0670
AK
1588 break;
1589 }
3a97fc34
HS
1590
1591 return 0;
4efc0670
AK
1592}
1593
5e09954a 1594static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
1595{
1596 switch (c->x86_vendor) {
1597 case X86_VENDOR_INTEL:
1598 mce_intel_feature_init(c);
3f2f0680 1599 mce_adjust_timer = cmci_intel_adjust_timer;
1da177e4 1600 break;
7559e13f
AG
1601
1602 case X86_VENDOR_AMD: {
1603 u32 ebx = cpuid_ebx(0x80000007);
1604
89b831ef 1605 mce_amd_feature_init(c);
7559e13f
AG
1606 mce_flags.overflow_recov = !!(ebx & BIT(0));
1607 mce_flags.succor = !!(ebx & BIT(1));
89b831ef 1608 break;
7559e13f
AG
1609 }
1610
1da177e4
LT
1611 default:
1612 break;
1613 }
1614}
1615
8838eb6c
AR
1616static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1617{
1618 switch (c->x86_vendor) {
1619 case X86_VENDOR_INTEL:
1620 mce_intel_feature_clear(c);
1621 break;
1622 default:
1623 break;
1624 }
1625}
1626
26c3c283 1627static void mce_start_timer(unsigned int cpu, struct timer_list *t)
52d168e2 1628{
4f75d841 1629 unsigned long iv = check_interval * HZ;
bc09effa 1630
7af19e4a 1631 if (mca_cfg.ignore_ce || !iv)
62fdac59
HS
1632 return;
1633
4f75d841
BP
1634 per_cpu(mce_next_interval, cpu) = iv;
1635
82f7af09 1636 t->expires = round_jiffies(jiffies + iv);
4f75d841 1637 add_timer_on(t, cpu);
52d168e2
AK
1638}
1639
26c3c283
TG
1640static void __mcheck_cpu_init_timer(void)
1641{
89cbc767 1642 struct timer_list *t = this_cpu_ptr(&mce_timer);
26c3c283
TG
1643 unsigned int cpu = smp_processor_id();
1644
1645 setup_timer(t, mce_timer_fn, cpu);
1646 mce_start_timer(cpu, t);
1647}
1648
9eda8cb3
AK
1649/* Handle unconfigured int18 (should never happen) */
1650static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1651{
c767a54b 1652 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
9eda8cb3
AK
1653 smp_processor_id());
1654}
1655
1656/* Call the installed machine check handler for this CPU setup. */
1657void (*machine_check_vector)(struct pt_regs *, long error_code) =
1658 unexpected_machine_check;
1659
d88203d1 1660/*
1da177e4 1661 * Called for each booted CPU to set up machine checks.
e9eee03e 1662 * Must be called with preempt off:
1da177e4 1663 */
148f9bb8 1664void mcheck_cpu_init(struct cpuinfo_x86 *c)
1da177e4 1665{
1462594b 1666 if (mca_cfg.disabled)
4efc0670
AK
1667 return;
1668
3a97fc34
HS
1669 if (__mcheck_cpu_ancient_init(c))
1670 return;
4efc0670 1671
5b4408fd 1672 if (!mce_available(c))
1da177e4
LT
1673 return;
1674
5e09954a 1675 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1462594b 1676 mca_cfg.disabled = true;
0d7482e3
AK
1677 return;
1678 }
0d7482e3 1679
648ed940
CG
1680 if (mce_gen_pool_init()) {
1681 mca_cfg.disabled = true;
1682 pr_emerg("Couldn't allocate MCE records pool!\n");
1683 return;
1684 }
1685
5d727926
AK
1686 machine_check_vector = do_machine_check;
1687
5e09954a
BP
1688 __mcheck_cpu_init_generic();
1689 __mcheck_cpu_init_vendor(c);
1690 __mcheck_cpu_init_timer();
1da177e4
LT
1691}
1692
8838eb6c
AR
1693/*
1694 * Called for each booted CPU to clear some machine checks opt-ins
1695 */
1696void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1697{
1698 if (mca_cfg.disabled)
1699 return;
1700
1701 if (!mce_available(c))
1702 return;
1703
1704 /*
1705 * Possibly to clear general settings generic to x86
1706 * __mcheck_cpu_clear_generic(c);
1707 */
1708 __mcheck_cpu_clear_vendor(c);
1709
1da177e4
LT
1710}
1711
1712/*
93b62c3c 1713 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1da177e4
LT
1714 */
1715
93b62c3c
HS
1716static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1717static int mce_chrdev_open_count; /* #times opened */
1718static int mce_chrdev_open_exclu; /* already open exclusive? */
f528e7ba 1719
93b62c3c 1720static int mce_chrdev_open(struct inode *inode, struct file *file)
f528e7ba 1721{
93b62c3c 1722 spin_lock(&mce_chrdev_state_lock);
f528e7ba 1723
93b62c3c
HS
1724 if (mce_chrdev_open_exclu ||
1725 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1726 spin_unlock(&mce_chrdev_state_lock);
e9eee03e 1727
f528e7ba
TH
1728 return -EBUSY;
1729 }
1730
1731 if (file->f_flags & O_EXCL)
93b62c3c
HS
1732 mce_chrdev_open_exclu = 1;
1733 mce_chrdev_open_count++;
f528e7ba 1734
93b62c3c 1735 spin_unlock(&mce_chrdev_state_lock);
f528e7ba 1736
bd78432c 1737 return nonseekable_open(inode, file);
f528e7ba
TH
1738}
1739
93b62c3c 1740static int mce_chrdev_release(struct inode *inode, struct file *file)
f528e7ba 1741{
93b62c3c 1742 spin_lock(&mce_chrdev_state_lock);
f528e7ba 1743
93b62c3c
HS
1744 mce_chrdev_open_count--;
1745 mce_chrdev_open_exclu = 0;
f528e7ba 1746
93b62c3c 1747 spin_unlock(&mce_chrdev_state_lock);
f528e7ba
TH
1748
1749 return 0;
1750}
1751
d88203d1
TG
1752static void collect_tscs(void *data)
1753{
1da177e4 1754 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 1755
4ea1636b 1756 cpu_tsc[smp_processor_id()] = rdtsc();
d88203d1 1757}
1da177e4 1758
482908b4
HY
1759static int mce_apei_read_done;
1760
1761/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1762static int __mce_read_apei(char __user **ubuf, size_t usize)
1763{
1764 int rc;
1765 u64 record_id;
1766 struct mce m;
1767
1768 if (usize < sizeof(struct mce))
1769 return -EINVAL;
1770
1771 rc = apei_read_mce(&m, &record_id);
1772 /* Error or no more MCE record */
1773 if (rc <= 0) {
1774 mce_apei_read_done = 1;
fadd85f1
NH
1775 /*
1776 * When ERST is disabled, mce_chrdev_read() should return
1777 * "no record" instead of "no device."
1778 */
1779 if (rc == -ENODEV)
1780 return 0;
482908b4
HY
1781 return rc;
1782 }
1783 rc = -EFAULT;
1784 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1785 return rc;
1786 /*
1787 * In fact, we should have cleared the record after that has
1788 * been flushed to the disk or sent to network in
1789 * /sbin/mcelog, but we have no interface to support that now,
1790 * so just clear it to avoid duplication.
1791 */
1792 rc = apei_clear_mce(record_id);
1793 if (rc) {
1794 mce_apei_read_done = 1;
1795 return rc;
1796 }
1797 *ubuf += sizeof(struct mce);
1798
1799 return 0;
1800}
1801
93b62c3c
HS
1802static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1803 size_t usize, loff_t *off)
1da177e4 1804{
e9eee03e 1805 char __user *buf = ubuf;
f0de53bb 1806 unsigned long *cpu_tsc;
ef41df43 1807 unsigned prev, next;
1da177e4
LT
1808 int i, err;
1809
6bca67f9 1810 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
f0de53bb
AK
1811 if (!cpu_tsc)
1812 return -ENOMEM;
1813
93b62c3c 1814 mutex_lock(&mce_chrdev_read_mutex);
482908b4
HY
1815
1816 if (!mce_apei_read_done) {
1817 err = __mce_read_apei(&buf, usize);
1818 if (err || buf != ubuf)
1819 goto out;
1820 }
1821
9a7783d0 1822 next = mce_log_get_idx_check(mcelog.next);
1da177e4
LT
1823
1824 /* Only supports full reads right now */
482908b4
HY
1825 err = -EINVAL;
1826 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1827 goto out;
1da177e4
LT
1828
1829 err = 0;
ef41df43
HY
1830 prev = 0;
1831 do {
1832 for (i = prev; i < next; i++) {
1833 unsigned long start = jiffies;
559faa6b 1834 struct mce *m = &mcelog.entry[i];
ef41df43 1835
559faa6b 1836 while (!m->finished) {
ef41df43 1837 if (time_after_eq(jiffies, start + 2)) {
559faa6b 1838 memset(m, 0, sizeof(*m));
ef41df43
HY
1839 goto timeout;
1840 }
1841 cpu_relax();
673242c1 1842 }
ef41df43 1843 smp_rmb();
559faa6b
HS
1844 err |= copy_to_user(buf, m, sizeof(*m));
1845 buf += sizeof(*m);
ef41df43
HY
1846timeout:
1847 ;
673242c1 1848 }
1da177e4 1849
ef41df43
HY
1850 memset(mcelog.entry + prev, 0,
1851 (next - prev) * sizeof(struct mce));
1852 prev = next;
1853 next = cmpxchg(&mcelog.next, prev, 0);
1854 } while (next != prev);
1da177e4 1855
b2b18660 1856 synchronize_sched();
1da177e4 1857
d88203d1
TG
1858 /*
1859 * Collect entries that were still getting written before the
1860 * synchronize.
1861 */
15c8b6c1 1862 on_each_cpu(collect_tscs, cpu_tsc, 1);
e9eee03e 1863
d88203d1 1864 for (i = next; i < MCE_LOG_LEN; i++) {
559faa6b
HS
1865 struct mce *m = &mcelog.entry[i];
1866
1867 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1868 err |= copy_to_user(buf, m, sizeof(*m));
1da177e4 1869 smp_rmb();
559faa6b
HS
1870 buf += sizeof(*m);
1871 memset(m, 0, sizeof(*m));
1da177e4 1872 }
d88203d1 1873 }
482908b4
HY
1874
1875 if (err)
1876 err = -EFAULT;
1877
1878out:
93b62c3c 1879 mutex_unlock(&mce_chrdev_read_mutex);
f0de53bb 1880 kfree(cpu_tsc);
e9eee03e 1881
482908b4 1882 return err ? err : buf - ubuf;
1da177e4
LT
1883}
1884
93b62c3c 1885static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
e02e68d3 1886{
93b62c3c 1887 poll_wait(file, &mce_chrdev_wait, wait);
e90328b8 1888 if (READ_ONCE(mcelog.next))
e02e68d3 1889 return POLLIN | POLLRDNORM;
482908b4
HY
1890 if (!mce_apei_read_done && apei_check_mce())
1891 return POLLIN | POLLRDNORM;
e02e68d3
TH
1892 return 0;
1893}
1894
93b62c3c
HS
1895static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1896 unsigned long arg)
1da177e4
LT
1897{
1898 int __user *p = (int __user *)arg;
d88203d1 1899
1da177e4 1900 if (!capable(CAP_SYS_ADMIN))
d88203d1 1901 return -EPERM;
e9eee03e 1902
1da177e4 1903 switch (cmd) {
d88203d1 1904 case MCE_GET_RECORD_LEN:
1da177e4
LT
1905 return put_user(sizeof(struct mce), p);
1906 case MCE_GET_LOG_LEN:
d88203d1 1907 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
1908 case MCE_GETCLEAR_FLAGS: {
1909 unsigned flags;
d88203d1
TG
1910
1911 do {
1da177e4 1912 flags = mcelog.flags;
d88203d1 1913 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
e9eee03e 1914
d88203d1 1915 return put_user(flags, p);
1da177e4
LT
1916 }
1917 default:
d88203d1
TG
1918 return -ENOTTY;
1919 }
1da177e4
LT
1920}
1921
66f5ddf3
TL
1922static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1923 size_t usize, loff_t *off);
1924
1925void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1926 const char __user *ubuf,
1927 size_t usize, loff_t *off))
1928{
1929 mce_write = fn;
1930}
1931EXPORT_SYMBOL_GPL(register_mce_write_callback);
1932
29c6820f
PM
1933static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1934 size_t usize, loff_t *off)
66f5ddf3
TL
1935{
1936 if (mce_write)
1937 return mce_write(filp, ubuf, usize, off);
1938 else
1939 return -EINVAL;
1940}
1941
1942static const struct file_operations mce_chrdev_ops = {
93b62c3c
HS
1943 .open = mce_chrdev_open,
1944 .release = mce_chrdev_release,
1945 .read = mce_chrdev_read,
66f5ddf3 1946 .write = mce_chrdev_write,
93b62c3c
HS
1947 .poll = mce_chrdev_poll,
1948 .unlocked_ioctl = mce_chrdev_ioctl,
1949 .llseek = no_llseek,
1da177e4
LT
1950};
1951
93b62c3c 1952static struct miscdevice mce_chrdev_device = {
1da177e4
LT
1953 MISC_MCELOG_MINOR,
1954 "mcelog",
1955 &mce_chrdev_ops,
1956};
1957
c3d1fb56
NR
1958static void __mce_disable_bank(void *arg)
1959{
1960 int bank = *((int *)arg);
89cbc767 1961 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
c3d1fb56
NR
1962 cmci_disable_bank(bank);
1963}
1964
1965void mce_disable_bank(int bank)
1966{
1967 if (bank >= mca_cfg.banks) {
1968 pr_warn(FW_BUG
1969 "Ignoring request to disable invalid MCA bank %d.\n",
1970 bank);
1971 return;
1972 }
1973 set_bit(bank, mce_banks_ce_disabled);
1974 on_each_cpu(__mce_disable_bank, &bank, 1);
1975}
1976
13503fa9 1977/*
62fdac59
HS
1978 * mce=off Disables machine check
1979 * mce=no_cmci Disables CMCI
88d53867 1980 * mce=no_lmce Disables LMCE
62fdac59
HS
1981 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1982 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
3c079792
AK
1983 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1984 * monarchtimeout is how long to wait for other CPUs on machine
1985 * check, or 0 to not wait
13503fa9
HS
1986 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1987 * mce=nobootlog Don't log MCEs from before booting.
450cc201 1988 * mce=bios_cmci_threshold Don't program the CMCI threshold
13503fa9 1989 */
1da177e4
LT
1990static int __init mcheck_enable(char *str)
1991{
d203f0b8
BP
1992 struct mca_config *cfg = &mca_cfg;
1993
e3346fc4 1994 if (*str == 0) {
4efc0670 1995 enable_p5_mce();
e3346fc4
BZ
1996 return 1;
1997 }
4efc0670
AK
1998 if (*str == '=')
1999 str++;
1da177e4 2000 if (!strcmp(str, "off"))
1462594b 2001 cfg->disabled = true;
62fdac59 2002 else if (!strcmp(str, "no_cmci"))
7af19e4a 2003 cfg->cmci_disabled = true;
88d53867
AR
2004 else if (!strcmp(str, "no_lmce"))
2005 cfg->lmce_disabled = true;
62fdac59 2006 else if (!strcmp(str, "dont_log_ce"))
d203f0b8 2007 cfg->dont_log_ce = true;
62fdac59 2008 else if (!strcmp(str, "ignore_ce"))
7af19e4a 2009 cfg->ignore_ce = true;
13503fa9 2010 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
84c2559d 2011 cfg->bootlog = (str[0] == 'b');
450cc201 2012 else if (!strcmp(str, "bios_cmci_threshold"))
1462594b 2013 cfg->bios_cmci_threshold = true;
3c079792 2014 else if (isdigit(str[0])) {
5c31b280 2015 if (get_option(&str, &cfg->tolerant) == 2)
84c2559d 2016 get_option(&str, &(cfg->monarch_timeout));
3c079792 2017 } else {
c767a54b 2018 pr_info("mce argument %s ignored. Please use /sys\n", str);
13503fa9
HS
2019 return 0;
2020 }
9b41046c 2021 return 1;
1da177e4 2022}
4efc0670 2023__setup("mce", mcheck_enable);
1da177e4 2024
a2202aa2 2025int __init mcheck_init(void)
b33a6363 2026{
a2202aa2 2027 mcheck_intel_therm_init();
eef4dfa0 2028 mce_register_decode_chain(&mce_srao_nb);
43eaa2a1 2029 mcheck_vendor_init_severity();
a2202aa2 2030
061120ae
CG
2031 INIT_WORK(&mce_work, mce_process_work);
2032 init_irq_work(&mce_irq_work, mce_irq_work_cb);
2033
b33a6363
BP
2034 return 0;
2035}
b33a6363 2036
d88203d1 2037/*
c7cece89 2038 * mce_syscore: PM support
d88203d1 2039 */
1da177e4 2040
973a2dd1
AK
2041/*
2042 * Disable machine checks on suspend and shutdown. We can't really handle
2043 * them later.
2044 */
5e09954a 2045static int mce_disable_error_reporting(void)
973a2dd1
AK
2046{
2047 int i;
2048
d203f0b8 2049 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2050 struct mce_bank *b = &mce_banks[i];
11868a2d 2051
cebe1820 2052 if (b->init)
a2d32bcb 2053 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
06b7a7a5 2054 }
973a2dd1
AK
2055 return 0;
2056}
2057
c7cece89 2058static int mce_syscore_suspend(void)
973a2dd1 2059{
5e09954a 2060 return mce_disable_error_reporting();
973a2dd1
AK
2061}
2062
c7cece89 2063static void mce_syscore_shutdown(void)
973a2dd1 2064{
f3c6ea1b 2065 mce_disable_error_reporting();
973a2dd1
AK
2066}
2067
e9eee03e
IM
2068/*
2069 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2070 * Only one CPU is active at this time, the others get re-added later using
2071 * CPU hotplug:
2072 */
c7cece89 2073static void mce_syscore_resume(void)
1da177e4 2074{
5e09954a 2075 __mcheck_cpu_init_generic();
89cbc767 2076 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
1da177e4
LT
2077}
2078
f3c6ea1b 2079static struct syscore_ops mce_syscore_ops = {
c7cece89
HS
2080 .suspend = mce_syscore_suspend,
2081 .shutdown = mce_syscore_shutdown,
2082 .resume = mce_syscore_resume,
f3c6ea1b
RW
2083};
2084
c7cece89 2085/*
8a25a2fd 2086 * mce_device: Sysfs support
c7cece89
HS
2087 */
2088
52d168e2
AK
2089static void mce_cpu_restart(void *data)
2090{
89cbc767 2091 if (!mce_available(raw_cpu_ptr(&cpu_info)))
33edbf02 2092 return;
5e09954a
BP
2093 __mcheck_cpu_init_generic();
2094 __mcheck_cpu_init_timer();
52d168e2
AK
2095}
2096
1da177e4 2097/* Reinit MCEs after user configuration changes */
d88203d1
TG
2098static void mce_restart(void)
2099{
9aaef96f 2100 mce_timer_delete_all();
52d168e2 2101 on_each_cpu(mce_cpu_restart, NULL, 1);
1da177e4
LT
2102}
2103
9af43b54 2104/* Toggle features for corrected errors */
9aaef96f 2105static void mce_disable_cmci(void *data)
9af43b54 2106{
89cbc767 2107 if (!mce_available(raw_cpu_ptr(&cpu_info)))
9af43b54 2108 return;
9af43b54
HS
2109 cmci_clear();
2110}
2111
2112static void mce_enable_ce(void *all)
2113{
89cbc767 2114 if (!mce_available(raw_cpu_ptr(&cpu_info)))
9af43b54
HS
2115 return;
2116 cmci_reenable();
2117 cmci_recheck();
2118 if (all)
5e09954a 2119 __mcheck_cpu_init_timer();
9af43b54
HS
2120}
2121
8a25a2fd 2122static struct bus_type mce_subsys = {
e9eee03e 2123 .name = "machinecheck",
8a25a2fd 2124 .dev_name = "machinecheck",
1da177e4
LT
2125};
2126
d6126ef5 2127DEFINE_PER_CPU(struct device *, mce_device);
e9eee03e 2128
e9eee03e 2129void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1da177e4 2130
8a25a2fd 2131static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
cebe1820
AK
2132{
2133 return container_of(attr, struct mce_bank, attr);
2134}
0d7482e3 2135
8a25a2fd 2136static ssize_t show_bank(struct device *s, struct device_attribute *attr,
0d7482e3
AK
2137 char *buf)
2138{
cebe1820 2139 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
0d7482e3
AK
2140}
2141
8a25a2fd 2142static ssize_t set_bank(struct device *s, struct device_attribute *attr,
9319cec8 2143 const char *buf, size_t size)
0d7482e3 2144{
9319cec8 2145 u64 new;
e9eee03e 2146
164109e3 2147 if (kstrtou64(buf, 0, &new) < 0)
0d7482e3 2148 return -EINVAL;
e9eee03e 2149
cebe1820 2150 attr_to_bank(attr)->ctl = new;
0d7482e3 2151 mce_restart();
e9eee03e 2152
9319cec8 2153 return size;
0d7482e3 2154}
a98f0dd3 2155
e9eee03e 2156static ssize_t
8a25a2fd 2157show_trigger(struct device *s, struct device_attribute *attr, char *buf)
a98f0dd3 2158{
1020bcbc 2159 strcpy(buf, mce_helper);
a98f0dd3 2160 strcat(buf, "\n");
1020bcbc 2161 return strlen(mce_helper) + 1;
a98f0dd3
AK
2162}
2163
8a25a2fd 2164static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
e9eee03e 2165 const char *buf, size_t siz)
a98f0dd3
AK
2166{
2167 char *p;
e9eee03e 2168
1020bcbc
HS
2169 strncpy(mce_helper, buf, sizeof(mce_helper));
2170 mce_helper[sizeof(mce_helper)-1] = 0;
1020bcbc 2171 p = strchr(mce_helper, '\n');
e9eee03e 2172
e9084ec9 2173 if (p)
e9eee03e
IM
2174 *p = 0;
2175
e9084ec9 2176 return strlen(mce_helper) + !!p;
a98f0dd3
AK
2177}
2178
8a25a2fd
KS
2179static ssize_t set_ignore_ce(struct device *s,
2180 struct device_attribute *attr,
9af43b54
HS
2181 const char *buf, size_t size)
2182{
2183 u64 new;
2184
164109e3 2185 if (kstrtou64(buf, 0, &new) < 0)
9af43b54
HS
2186 return -EINVAL;
2187
7af19e4a 2188 if (mca_cfg.ignore_ce ^ !!new) {
9af43b54
HS
2189 if (new) {
2190 /* disable ce features */
9aaef96f
HS
2191 mce_timer_delete_all();
2192 on_each_cpu(mce_disable_cmci, NULL, 1);
7af19e4a 2193 mca_cfg.ignore_ce = true;
9af43b54
HS
2194 } else {
2195 /* enable ce features */
7af19e4a 2196 mca_cfg.ignore_ce = false;
9af43b54
HS
2197 on_each_cpu(mce_enable_ce, (void *)1, 1);
2198 }
2199 }
2200 return size;
2201}
2202
8a25a2fd
KS
2203static ssize_t set_cmci_disabled(struct device *s,
2204 struct device_attribute *attr,
9af43b54
HS
2205 const char *buf, size_t size)
2206{
2207 u64 new;
2208
164109e3 2209 if (kstrtou64(buf, 0, &new) < 0)
9af43b54
HS
2210 return -EINVAL;
2211
7af19e4a 2212 if (mca_cfg.cmci_disabled ^ !!new) {
9af43b54
HS
2213 if (new) {
2214 /* disable cmci */
9aaef96f 2215 on_each_cpu(mce_disable_cmci, NULL, 1);
7af19e4a 2216 mca_cfg.cmci_disabled = true;
9af43b54
HS
2217 } else {
2218 /* enable cmci */
7af19e4a 2219 mca_cfg.cmci_disabled = false;
9af43b54
HS
2220 on_each_cpu(mce_enable_ce, NULL, 1);
2221 }
2222 }
2223 return size;
2224}
2225
8a25a2fd
KS
2226static ssize_t store_int_with_restart(struct device *s,
2227 struct device_attribute *attr,
b56f642d
AK
2228 const char *buf, size_t size)
2229{
8a25a2fd 2230 ssize_t ret = device_store_int(s, attr, buf, size);
b56f642d
AK
2231 mce_restart();
2232 return ret;
2233}
2234
8a25a2fd 2235static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
d203f0b8 2236static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
84c2559d 2237static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
d203f0b8 2238static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
e9eee03e 2239
8a25a2fd
KS
2240static struct dev_ext_attribute dev_attr_check_interval = {
2241 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
b56f642d
AK
2242 &check_interval
2243};
e9eee03e 2244
8a25a2fd 2245static struct dev_ext_attribute dev_attr_ignore_ce = {
7af19e4a
BP
2246 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2247 &mca_cfg.ignore_ce
9af43b54
HS
2248};
2249
8a25a2fd 2250static struct dev_ext_attribute dev_attr_cmci_disabled = {
7af19e4a
BP
2251 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2252 &mca_cfg.cmci_disabled
9af43b54
HS
2253};
2254
8a25a2fd
KS
2255static struct device_attribute *mce_device_attrs[] = {
2256 &dev_attr_tolerant.attr,
2257 &dev_attr_check_interval.attr,
2258 &dev_attr_trigger,
2259 &dev_attr_monarch_timeout.attr,
2260 &dev_attr_dont_log_ce.attr,
2261 &dev_attr_ignore_ce.attr,
2262 &dev_attr_cmci_disabled.attr,
a98f0dd3
AK
2263 NULL
2264};
1da177e4 2265
8a25a2fd 2266static cpumask_var_t mce_device_initialized;
bae19fe0 2267
e032d807
GKH
2268static void mce_device_release(struct device *dev)
2269{
2270 kfree(dev);
2271}
2272
8a25a2fd 2273/* Per cpu device init. All of the cpus still share the same ctrl bank: */
148f9bb8 2274static int mce_device_create(unsigned int cpu)
1da177e4 2275{
e032d807 2276 struct device *dev;
1da177e4 2277 int err;
b1f49f95 2278 int i, j;
92cb7612 2279
90367556 2280 if (!mce_available(&boot_cpu_data))
91c6d400
AK
2281 return -EIO;
2282
e032d807
GKH
2283 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2284 if (!dev)
2285 return -ENOMEM;
8a25a2fd
KS
2286 dev->id = cpu;
2287 dev->bus = &mce_subsys;
e032d807 2288 dev->release = &mce_device_release;
91c6d400 2289
8a25a2fd 2290 err = device_register(dev);
853d9b18
LK
2291 if (err) {
2292 put_device(dev);
d435d862 2293 return err;
853d9b18 2294 }
d435d862 2295
8a25a2fd
KS
2296 for (i = 0; mce_device_attrs[i]; i++) {
2297 err = device_create_file(dev, mce_device_attrs[i]);
d435d862
AM
2298 if (err)
2299 goto error;
2300 }
d203f0b8 2301 for (j = 0; j < mca_cfg.banks; j++) {
8a25a2fd 2302 err = device_create_file(dev, &mce_banks[j].attr);
0d7482e3
AK
2303 if (err)
2304 goto error2;
2305 }
8a25a2fd 2306 cpumask_set_cpu(cpu, mce_device_initialized);
d6126ef5 2307 per_cpu(mce_device, cpu) = dev;
91c6d400 2308
d435d862 2309 return 0;
0d7482e3 2310error2:
b1f49f95 2311 while (--j >= 0)
8a25a2fd 2312 device_remove_file(dev, &mce_banks[j].attr);
d435d862 2313error:
cb491fca 2314 while (--i >= 0)
8a25a2fd 2315 device_remove_file(dev, mce_device_attrs[i]);
cb491fca 2316
8a25a2fd 2317 device_unregister(dev);
d435d862 2318
91c6d400
AK
2319 return err;
2320}
2321
148f9bb8 2322static void mce_device_remove(unsigned int cpu)
91c6d400 2323{
d6126ef5 2324 struct device *dev = per_cpu(mce_device, cpu);
73ca5358
SL
2325 int i;
2326
8a25a2fd 2327 if (!cpumask_test_cpu(cpu, mce_device_initialized))
bae19fe0
AH
2328 return;
2329
8a25a2fd
KS
2330 for (i = 0; mce_device_attrs[i]; i++)
2331 device_remove_file(dev, mce_device_attrs[i]);
cb491fca 2332
d203f0b8 2333 for (i = 0; i < mca_cfg.banks; i++)
8a25a2fd 2334 device_remove_file(dev, &mce_banks[i].attr);
cb491fca 2335
8a25a2fd
KS
2336 device_unregister(dev);
2337 cpumask_clear_cpu(cpu, mce_device_initialized);
d6126ef5 2338 per_cpu(mce_device, cpu) = NULL;
91c6d400 2339}
91c6d400 2340
d6b75584 2341/* Make sure there are no machine checks on offlined CPUs. */
148f9bb8 2342static void mce_disable_cpu(void *h)
d6b75584 2343{
88ccbedd 2344 unsigned long action = *(unsigned long *)h;
cb491fca 2345 int i;
d6b75584 2346
89cbc767 2347 if (!mce_available(raw_cpu_ptr(&cpu_info)))
d6b75584 2348 return;
767df1bd 2349
88ccbedd
AK
2350 if (!(action & CPU_TASKS_FROZEN))
2351 cmci_clear();
d203f0b8 2352 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2353 struct mce_bank *b = &mce_banks[i];
11868a2d 2354
cebe1820 2355 if (b->init)
a2d32bcb 2356 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
06b7a7a5 2357 }
d6b75584
AK
2358}
2359
148f9bb8 2360static void mce_reenable_cpu(void *h)
d6b75584 2361{
88ccbedd 2362 unsigned long action = *(unsigned long *)h;
e9eee03e 2363 int i;
d6b75584 2364
89cbc767 2365 if (!mce_available(raw_cpu_ptr(&cpu_info)))
d6b75584 2366 return;
e9eee03e 2367
88ccbedd
AK
2368 if (!(action & CPU_TASKS_FROZEN))
2369 cmci_reenable();
d203f0b8 2370 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2371 struct mce_bank *b = &mce_banks[i];
11868a2d 2372
cebe1820 2373 if (b->init)
a2d32bcb 2374 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
06b7a7a5 2375 }
d6b75584
AK
2376}
2377
91c6d400 2378/* Get notified when a cpu comes on/off. Be hotplug friendly. */
148f9bb8 2379static int
e9eee03e 2380mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
91c6d400
AK
2381{
2382 unsigned int cpu = (unsigned long)hcpu;
52d168e2 2383 struct timer_list *t = &per_cpu(mce_timer, cpu);
91c6d400 2384
1a65f970 2385 switch (action & ~CPU_TASKS_FROZEN) {
bae19fe0 2386 case CPU_ONLINE:
8a25a2fd 2387 mce_device_create(cpu);
8735728e
RW
2388 if (threshold_cpu_callback)
2389 threshold_cpu_callback(action, cpu);
91c6d400 2390 break;
91c6d400 2391 case CPU_DEAD:
8735728e
RW
2392 if (threshold_cpu_callback)
2393 threshold_cpu_callback(action, cpu);
8a25a2fd 2394 mce_device_remove(cpu);
55babd8f 2395 mce_intel_hcpu_update(cpu);
38356c1f
BP
2396
2397 /* intentionally ignoring frozen here */
2398 if (!(action & CPU_TASKS_FROZEN))
2399 cmci_rediscover();
91c6d400 2400 break;
52d168e2 2401 case CPU_DOWN_PREPARE:
88ccbedd 2402 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
55babd8f 2403 del_timer_sync(t);
52d168e2
AK
2404 break;
2405 case CPU_DOWN_FAILED:
88ccbedd 2406 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
26c3c283 2407 mce_start_timer(cpu, t);
88ccbedd 2408 break;
1a65f970
TG
2409 }
2410
bae19fe0 2411 return NOTIFY_OK;
91c6d400
AK
2412}
2413
148f9bb8 2414static struct notifier_block mce_cpu_notifier = {
91c6d400
AK
2415 .notifier_call = mce_cpu_callback,
2416};
2417
cebe1820 2418static __init void mce_init_banks(void)
0d7482e3
AK
2419{
2420 int i;
2421
d203f0b8 2422 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2423 struct mce_bank *b = &mce_banks[i];
8a25a2fd 2424 struct device_attribute *a = &b->attr;
e9eee03e 2425
a07e4156 2426 sysfs_attr_init(&a->attr);
cebe1820
AK
2427 a->attr.name = b->attrname;
2428 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
e9eee03e
IM
2429
2430 a->attr.mode = 0644;
2431 a->show = show_bank;
2432 a->store = set_bank;
0d7482e3 2433 }
0d7482e3
AK
2434}
2435
5e09954a 2436static __init int mcheck_init_device(void)
91c6d400
AK
2437{
2438 int err;
2439 int i = 0;
2440
9c15a24b
MS
2441 if (!mce_available(&boot_cpu_data)) {
2442 err = -EIO;
2443 goto err_out;
2444 }
0d7482e3 2445
9c15a24b
MS
2446 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2447 err = -ENOMEM;
2448 goto err_out;
2449 }
996867d0 2450
cebe1820 2451 mce_init_banks();
0d7482e3 2452
8a25a2fd 2453 err = subsys_system_register(&mce_subsys, NULL);
d435d862 2454 if (err)
9c15a24b 2455 goto err_out_mem;
91c6d400 2456
82a8f131 2457 cpu_notifier_register_begin();
91c6d400 2458 for_each_online_cpu(i) {
8a25a2fd 2459 err = mce_device_create(i);
82a8f131 2460 if (err) {
27c93415
BP
2461 /*
2462 * Register notifier anyway (and do not unreg it) so
2463 * that we don't leave undeleted timers, see notifier
2464 * callback above.
2465 */
2466 __register_hotcpu_notifier(&mce_cpu_notifier);
82a8f131 2467 cpu_notifier_register_done();
9c15a24b 2468 goto err_device_create;
82a8f131 2469 }
91c6d400
AK
2470 }
2471
82a8f131
SB
2472 __register_hotcpu_notifier(&mce_cpu_notifier);
2473 cpu_notifier_register_done();
93b62c3c 2474
9c15a24b
MS
2475 register_syscore_ops(&mce_syscore_ops);
2476
93b62c3c 2477 /* register character device /dev/mcelog */
9c15a24b
MS
2478 err = misc_register(&mce_chrdev_device);
2479 if (err)
2480 goto err_register;
2481
2482 return 0;
2483
2484err_register:
2485 unregister_syscore_ops(&mce_syscore_ops);
2486
9c15a24b
MS
2487err_device_create:
2488 /*
2489 * We didn't keep track of which devices were created above, but
2490 * even if we had, the set of online cpus might have changed.
2491 * Play safe and remove for every possible cpu, since
2492 * mce_device_remove() will do the right thing.
2493 */
2494 for_each_possible_cpu(i)
2495 mce_device_remove(i);
2496
2497err_out_mem:
2498 free_cpumask_var(mce_device_initialized);
2499
2500err_out:
2501 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
e9eee03e 2502
1da177e4 2503 return err;
1da177e4 2504}
cef12ee5 2505device_initcall_sync(mcheck_init_device);
a988d334 2506
d7c3c9a6
AK
2507/*
2508 * Old style boot options parsing. Only for compatibility.
2509 */
2510static int __init mcheck_disable(char *str)
2511{
1462594b 2512 mca_cfg.disabled = true;
d7c3c9a6
AK
2513 return 1;
2514}
2515__setup("nomce", mcheck_disable);
a988d334 2516
5be9ed25
HY
2517#ifdef CONFIG_DEBUG_FS
2518struct dentry *mce_get_debugfs_dir(void)
a988d334 2519{
5be9ed25 2520 static struct dentry *dmce;
a988d334 2521
5be9ed25
HY
2522 if (!dmce)
2523 dmce = debugfs_create_dir("mce", NULL);
a988d334 2524
5be9ed25
HY
2525 return dmce;
2526}
a988d334 2527
bf783f9f
HY
2528static void mce_reset(void)
2529{
2530 cpu_missing = 0;
c7c9b392 2531 atomic_set(&mce_fake_panicked, 0);
bf783f9f
HY
2532 atomic_set(&mce_executing, 0);
2533 atomic_set(&mce_callin, 0);
2534 atomic_set(&global_nwo, 0);
2535}
a988d334 2536
bf783f9f
HY
2537static int fake_panic_get(void *data, u64 *val)
2538{
2539 *val = fake_panic;
2540 return 0;
a988d334
IM
2541}
2542
bf783f9f 2543static int fake_panic_set(void *data, u64 val)
a988d334 2544{
bf783f9f
HY
2545 mce_reset();
2546 fake_panic = val;
2547 return 0;
a988d334 2548}
a988d334 2549
bf783f9f
HY
2550DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2551 fake_panic_set, "%llu\n");
d7c3c9a6 2552
5e09954a 2553static int __init mcheck_debugfs_init(void)
d7c3c9a6 2554{
bf783f9f
HY
2555 struct dentry *dmce, *ffake_panic;
2556
2557 dmce = mce_get_debugfs_dir();
2558 if (!dmce)
2559 return -ENOMEM;
2560 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2561 &fake_panic_fops);
2562 if (!ffake_panic)
2563 return -ENOMEM;
2564
2565 return 0;
d7c3c9a6 2566}
fd4cf79f
CG
2567#else
2568static int __init mcheck_debugfs_init(void) { return -EINVAL; }
5be9ed25 2569#endif
fd4cf79f
CG
2570
2571static int __init mcheck_late_init(void)
2572{
2573 mcheck_debugfs_init();
2574
2575 /*
2576 * Flush out everything that has been logged during early boot, now that
2577 * everything has been initialized (workqueues, decoders, ...).
2578 */
2579 mce_schedule_work();
2580
2581 return 0;
2582}
2583late_initcall(mcheck_late_init);