]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/mcheck/mce.c
x86/mce: Add a Scalable MCA vendor flags bit
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / mcheck / mce.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
e9eee03e 3 *
1da177e4 4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
b79109c3
AK
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
1da177e4 9 */
c767a54b
JP
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
e9eee03e
IM
13#include <linux/thread_info.h>
14#include <linux/capability.h>
15#include <linux/miscdevice.h>
16#include <linux/ratelimit.h>
17#include <linux/kallsyms.h>
18#include <linux/rcupdate.h>
e9eee03e 19#include <linux/kobject.h>
14a02530 20#include <linux/uaccess.h>
e9eee03e
IM
21#include <linux/kdebug.h>
22#include <linux/kernel.h>
23#include <linux/percpu.h>
1da177e4 24#include <linux/string.h>
8a25a2fd 25#include <linux/device.h>
f3c6ea1b 26#include <linux/syscore_ops.h>
3c079792 27#include <linux/delay.h>
8c566ef5 28#include <linux/ctype.h>
e9eee03e 29#include <linux/sched.h>
0d7482e3 30#include <linux/sysfs.h>
e9eee03e 31#include <linux/types.h>
5a0e3ad6 32#include <linux/slab.h>
e9eee03e
IM
33#include <linux/init.h>
34#include <linux/kmod.h>
35#include <linux/poll.h>
3c079792 36#include <linux/nmi.h>
e9eee03e 37#include <linux/cpu.h>
14a02530 38#include <linux/smp.h>
e9eee03e 39#include <linux/fs.h>
9b1beaf2 40#include <linux/mm.h>
5be9ed25 41#include <linux/debugfs.h>
b77e70bf 42#include <linux/irq_work.h>
69c60c88 43#include <linux/export.h>
e9eee03e 44
d88203d1 45#include <asm/processor.h>
95927475 46#include <asm/traps.h>
375074cc 47#include <asm/tlbflush.h>
e9eee03e
IM
48#include <asm/mce.h>
49#include <asm/msr.h>
1da177e4 50
bd19a5e6 51#include "mce-internal.h"
711c2e48 52
93b62c3c 53static DEFINE_MUTEX(mce_chrdev_read_mutex);
2aa2b50d 54
9a7783d0 55#define mce_log_get_idx_check(p) \
e90328b8 56({ \
f78f5b90
PM
57 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
58 !lockdep_is_held(&mce_chrdev_read_mutex), \
3959df1d 59 "suspicious mce_log_get_idx_check() usage"); \
e90328b8
PM
60 smp_load_acquire(&(p)); \
61})
f56e8a07 62
8968f9d3
HS
63#define CREATE_TRACE_POINTS
64#include <trace/events/mce.h>
65
3f2f0680 66#define SPINUNIT 100 /* 100ns */
3c079792 67
01ca79f1
AK
68DEFINE_PER_CPU(unsigned, mce_exception_count);
69
1462594b 70struct mce_bank *mce_banks __read_mostly;
bf80bbd7 71struct mce_vendor_flags mce_flags __read_mostly;
cebe1820 72
d203f0b8 73struct mca_config mca_cfg __read_mostly = {
84c2559d 74 .bootlog = -1,
d203f0b8
BP
75 /*
76 * Tolerant levels:
77 * 0: always panic on uncorrected errors, log corrected errors
78 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
79 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
80 * 3: never panic or SIGBUS, log all errors (for testing only)
81 */
84c2559d
BP
82 .tolerant = 1,
83 .monarch_timeout = -1
d203f0b8
BP
84};
85
1020bcbc
HS
86/* User mode helper program triggered by machine check event */
87static unsigned long mce_need_notify;
88static char mce_helper[128];
89static char *mce_helper_argv[2] = { mce_helper, NULL };
1da177e4 90
93b62c3c
HS
91static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
92
3c079792
AK
93static DEFINE_PER_CPU(struct mce, mces_seen);
94static int cpu_missing;
95
0644414e
NR
96/*
97 * MCA banks polled by the period polling timer for corrected events.
98 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
99 */
ee031c31
AK
100DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
101 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
102};
103
c3d1fb56
NR
104/*
105 * MCA banks controlled through firmware first for corrected errors.
106 * This is a global list of banks for which we won't enable CMCI and we
107 * won't poll. Firmware controls these banks and is responsible for
108 * reporting corrected errors through GHES. Uncorrected/recoverable
109 * errors are still notified through a machine check.
110 */
111mce_banks_t mce_banks_ce_disabled;
112
061120ae
CG
113static struct work_struct mce_work;
114static struct irq_work mce_irq_work;
9b1beaf2 115
61b0fccd 116static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
fd4cf79f 117static int mce_usable_address(struct mce *m);
61b0fccd 118
3653ada5
BP
119/*
120 * CPU/chipset specific EDAC code can register a notifier call here to print
121 * MCE errors in a human-readable form.
122 */
648ed940 123ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
3653ada5 124
b5f2fa4e
AK
125/* Do initial initialization of a struct mce */
126void mce_setup(struct mce *m)
127{
128 memset(m, 0, sizeof(struct mce));
d620c67f 129 m->cpu = m->extcpu = smp_processor_id();
4ea1636b 130 m->tsc = rdtsc();
8ee08347
AK
131 /* We hope get_seconds stays lockless */
132 m->time = get_seconds();
133 m->cpuvendor = boot_cpu_data.x86_vendor;
134 m->cpuid = cpuid_eax(1);
8ee08347 135 m->socketid = cpu_data(m->extcpu).phys_proc_id;
8ee08347
AK
136 m->apicid = cpu_data(m->extcpu).initial_apicid;
137 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
b5f2fa4e
AK
138}
139
ea149b36
AK
140DEFINE_PER_CPU(struct mce, injectm);
141EXPORT_PER_CPU_SYMBOL_GPL(injectm);
142
1da177e4
LT
143/*
144 * Lockless MCE logging infrastructure.
145 * This avoids deadlocks on printk locks without having to break locks. Also
146 * separate MCEs from kernel messages to avoid bogus bug reports.
147 */
148
231fd906 149static struct mce_log mcelog = {
f6fb0ac0
AK
150 .signature = MCE_LOG_SIGNATURE,
151 .len = MCE_LOG_LEN,
152 .recordlen = sizeof(struct mce),
d88203d1 153};
1da177e4
LT
154
155void mce_log(struct mce *mce)
156{
157 unsigned next, entry;
e9eee03e 158
8968f9d3
HS
159 /* Emit the trace record: */
160 trace_mce_record(mce);
161
f29a7aff
CG
162 if (!mce_gen_pool_add(mce))
163 irq_work_queue(&mce_irq_work);
f0cb5452 164
1da177e4 165 mce->finished = 0;
7644143c 166 wmb();
1da177e4 167 for (;;) {
9a7783d0 168 entry = mce_log_get_idx_check(mcelog.next);
673242c1 169 for (;;) {
696e409d 170
e9eee03e
IM
171 /*
172 * When the buffer fills up discard new entries.
173 * Assume that the earlier errors are the more
174 * interesting ones:
175 */
673242c1 176 if (entry >= MCE_LOG_LEN) {
14a02530
HS
177 set_bit(MCE_OVERFLOW,
178 (unsigned long *)&mcelog.flags);
673242c1
AK
179 return;
180 }
e9eee03e 181 /* Old left over entry. Skip: */
673242c1
AK
182 if (mcelog.entry[entry].finished) {
183 entry++;
184 continue;
185 }
7644143c 186 break;
1da177e4 187 }
1da177e4
LT
188 smp_rmb();
189 next = entry + 1;
190 if (cmpxchg(&mcelog.next, entry, next) == entry)
191 break;
192 }
193 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 194 wmb();
1da177e4 195 mcelog.entry[entry].finished = 1;
7644143c 196 wmb();
1da177e4 197
a0189c70 198 mce->finished = 1;
1020bcbc 199 set_bit(0, &mce_need_notify);
1da177e4
LT
200}
201
a79da384 202void mce_inject_log(struct mce *m)
09371957 203{
a79da384
BP
204 mutex_lock(&mce_chrdev_read_mutex);
205 mce_log(m);
206 mutex_unlock(&mce_chrdev_read_mutex);
09371957 207}
a79da384 208EXPORT_SYMBOL_GPL(mce_inject_log);
09371957 209
fd4cf79f 210static struct notifier_block mce_srao_nb;
09371957 211
3653ada5
BP
212void mce_register_decode_chain(struct notifier_block *nb)
213{
fd4cf79f
CG
214 /* Ensure SRAO notifier has the highest priority in the decode chain. */
215 if (nb != &mce_srao_nb && nb->priority == INT_MAX)
216 nb->priority -= 1;
217
3653ada5
BP
218 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
219}
220EXPORT_SYMBOL_GPL(mce_register_decode_chain);
221
222void mce_unregister_decode_chain(struct notifier_block *nb)
223{
224 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
225}
226EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
227
77e26cca 228static void print_mce(struct mce *m)
1da177e4 229{
dffa4b2f
BP
230 int ret = 0;
231
a2d7b0d4 232 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
d620c67f 233 m->extcpu, m->mcgstatus, m->bank, m->status);
f436f8bb 234
65ea5b03 235 if (m->ip) {
a2d7b0d4 236 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
f436f8bb
IM
237 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
238 m->cs, m->ip);
239
1da177e4 240 if (m->cs == __KERNEL_CS)
65ea5b03 241 print_symbol("{%s}", m->ip);
f436f8bb 242 pr_cont("\n");
1da177e4 243 }
f436f8bb 244
a2d7b0d4 245 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
1da177e4 246 if (m->addr)
f436f8bb 247 pr_cont("ADDR %llx ", m->addr);
1da177e4 248 if (m->misc)
f436f8bb 249 pr_cont("MISC %llx ", m->misc);
549d042d 250
f436f8bb 251 pr_cont("\n");
506ed6b5
AK
252 /*
253 * Note this output is parsed by external tools and old fields
254 * should not be changed.
255 */
881e23e5 256 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
506ed6b5
AK
257 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
258 cpu_data(m->extcpu).microcode);
f436f8bb
IM
259
260 /*
261 * Print out human-readable details about the MCE error,
fb253195 262 * (if the CPU has an implementation for that)
f436f8bb 263 */
dffa4b2f
BP
264 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
265 if (ret == NOTIFY_STOP)
266 return;
267
268 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
86503560
AK
269}
270
f94b61c2
AK
271#define PANIC_TIMEOUT 5 /* 5 seconds */
272
c7c9b392 273static atomic_t mce_panicked;
f94b61c2 274
bf783f9f 275static int fake_panic;
c7c9b392 276static atomic_t mce_fake_panicked;
bf783f9f 277
f94b61c2
AK
278/* Panic in progress. Enable interrupts and wait for final IPI */
279static void wait_for_panic(void)
280{
281 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
f436f8bb 282
f94b61c2
AK
283 preempt_disable();
284 local_irq_enable();
285 while (timeout-- > 0)
286 udelay(1);
29b0f591 287 if (panic_timeout == 0)
7af19e4a 288 panic_timeout = mca_cfg.panic_timeout;
f94b61c2
AK
289 panic("Panicing machine check CPU died");
290}
291
6c80f87e 292static void mce_panic(const char *msg, struct mce *final, char *exp)
d88203d1 293{
482908b4 294 int i, apei_err = 0;
e02e68d3 295
bf783f9f
HY
296 if (!fake_panic) {
297 /*
298 * Make sure only one CPU runs in machine check panic
299 */
c7c9b392 300 if (atomic_inc_return(&mce_panicked) > 1)
bf783f9f
HY
301 wait_for_panic();
302 barrier();
f94b61c2 303
bf783f9f
HY
304 bust_spinlocks(1);
305 console_verbose();
306 } else {
307 /* Don't log too much for fake panic */
c7c9b392 308 if (atomic_inc_return(&mce_fake_panicked) > 1)
bf783f9f
HY
309 return;
310 }
a0189c70 311 /* First print corrected ones that are still unlogged */
1da177e4 312 for (i = 0; i < MCE_LOG_LEN; i++) {
a0189c70 313 struct mce *m = &mcelog.entry[i];
77e26cca
HS
314 if (!(m->status & MCI_STATUS_VAL))
315 continue;
482908b4 316 if (!(m->status & MCI_STATUS_UC)) {
77e26cca 317 print_mce(m);
482908b4
HY
318 if (!apei_err)
319 apei_err = apei_write_mce(m);
320 }
a0189c70
AK
321 }
322 /* Now print uncorrected but with the final one last */
323 for (i = 0; i < MCE_LOG_LEN; i++) {
324 struct mce *m = &mcelog.entry[i];
325 if (!(m->status & MCI_STATUS_VAL))
1da177e4 326 continue;
77e26cca
HS
327 if (!(m->status & MCI_STATUS_UC))
328 continue;
482908b4 329 if (!final || memcmp(m, final, sizeof(struct mce))) {
77e26cca 330 print_mce(m);
482908b4
HY
331 if (!apei_err)
332 apei_err = apei_write_mce(m);
333 }
1da177e4 334 }
482908b4 335 if (final) {
77e26cca 336 print_mce(final);
482908b4
HY
337 if (!apei_err)
338 apei_err = apei_write_mce(final);
339 }
3c079792 340 if (cpu_missing)
a2d7b0d4 341 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
bd19a5e6 342 if (exp)
a2d7b0d4 343 pr_emerg(HW_ERR "Machine check: %s\n", exp);
bf783f9f
HY
344 if (!fake_panic) {
345 if (panic_timeout == 0)
7af19e4a 346 panic_timeout = mca_cfg.panic_timeout;
bf783f9f
HY
347 panic(msg);
348 } else
a2d7b0d4 349 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
d88203d1 350}
1da177e4 351
ea149b36
AK
352/* Support code for software error injection */
353
354static int msr_to_offset(u32 msr)
355{
0a3aee0d 356 unsigned bank = __this_cpu_read(injectm.bank);
f436f8bb 357
84c2559d 358 if (msr == mca_cfg.rip_msr)
ea149b36 359 return offsetof(struct mce, ip);
a2d32bcb 360 if (msr == MSR_IA32_MCx_STATUS(bank))
ea149b36 361 return offsetof(struct mce, status);
a2d32bcb 362 if (msr == MSR_IA32_MCx_ADDR(bank))
ea149b36 363 return offsetof(struct mce, addr);
a2d32bcb 364 if (msr == MSR_IA32_MCx_MISC(bank))
ea149b36
AK
365 return offsetof(struct mce, misc);
366 if (msr == MSR_IA32_MCG_STATUS)
367 return offsetof(struct mce, mcgstatus);
368 return -1;
369}
370
5f8c1a54
AK
371/* MSR access wrappers used for error injection */
372static u64 mce_rdmsrl(u32 msr)
373{
374 u64 v;
11868a2d 375
0a3aee0d 376 if (__this_cpu_read(injectm.finished)) {
ea149b36 377 int offset = msr_to_offset(msr);
11868a2d 378
ea149b36
AK
379 if (offset < 0)
380 return 0;
89cbc767 381 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
ea149b36 382 }
11868a2d
IM
383
384 if (rdmsrl_safe(msr, &v)) {
385 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
386 /*
387 * Return zero in case the access faulted. This should
388 * not happen normally but can happen if the CPU does
389 * something weird, or if the code is buggy.
390 */
391 v = 0;
392 }
393
5f8c1a54
AK
394 return v;
395}
396
397static void mce_wrmsrl(u32 msr, u64 v)
398{
0a3aee0d 399 if (__this_cpu_read(injectm.finished)) {
ea149b36 400 int offset = msr_to_offset(msr);
11868a2d 401
ea149b36 402 if (offset >= 0)
89cbc767 403 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
ea149b36
AK
404 return;
405 }
5f8c1a54
AK
406 wrmsrl(msr, v);
407}
408
b8325c5b
HS
409/*
410 * Collect all global (w.r.t. this processor) status about this machine
411 * check into our "mce" struct so that we can use it later to assess
412 * the severity of the problem as we read per-bank specific details.
413 */
414static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
415{
416 mce_setup(m);
417
418 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
419 if (regs) {
420 /*
421 * Get the address of the instruction at the time of
422 * the machine check error.
423 */
424 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
425 m->ip = regs->ip;
426 m->cs = regs->cs;
a129a7c8
AK
427
428 /*
429 * When in VM86 mode make the cs look like ring 3
430 * always. This is a lie, but it's better than passing
431 * the additional vm86 bit around everywhere.
432 */
433 if (v8086_mode(regs))
434 m->cs |= 3;
b8325c5b
HS
435 }
436 /* Use accurate RIP reporting if available. */
84c2559d
BP
437 if (mca_cfg.rip_msr)
438 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
b8325c5b
HS
439 }
440}
441
88ccbedd 442int mce_available(struct cpuinfo_x86 *c)
1da177e4 443{
1462594b 444 if (mca_cfg.disabled)
5b4408fd 445 return 0;
3d1712c9 446 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
447}
448
9b1beaf2
AK
449static void mce_schedule_work(void)
450{
fd4cf79f 451 if (!mce_gen_pool_empty() && keventd_up())
061120ae 452 schedule_work(&mce_work);
9b1beaf2
AK
453}
454
b77e70bf 455static void mce_irq_work_cb(struct irq_work *entry)
ccc3c319 456{
9ff36ee9 457 mce_notify_irq();
9b1beaf2 458 mce_schedule_work();
ccc3c319 459}
ccc3c319
AK
460
461static void mce_report_event(struct pt_regs *regs)
462{
463 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
9ff36ee9 464 mce_notify_irq();
9b1beaf2
AK
465 /*
466 * Triggering the work queue here is just an insurance
467 * policy in case the syscall exit notify handler
468 * doesn't run soon enough or ends up running on the
469 * wrong CPU (can happen when audit sleeps)
470 */
471 mce_schedule_work();
ccc3c319
AK
472 return;
473 }
474
061120ae 475 irq_work_queue(&mce_irq_work);
ccc3c319
AK
476}
477
fd4cf79f
CG
478static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
479 void *data)
480{
481 struct mce *mce = (struct mce *)data;
482 unsigned long pfn;
483
484 if (!mce)
485 return NOTIFY_DONE;
486
487 if (mce->usable_addr && (mce->severity == MCE_AO_SEVERITY)) {
488 pfn = mce->addr >> PAGE_SHIFT;
489 memory_failure(pfn, MCE_VECTOR, 0);
490 }
491
492 return NOTIFY_OK;
ccc3c319 493}
fd4cf79f
CG
494static struct notifier_block mce_srao_nb = {
495 .notifier_call = srao_decode_notifier,
496 .priority = INT_MAX,
497};
ccc3c319 498
85f92694
TL
499/*
500 * Read ADDR and MISC registers.
501 */
502static void mce_read_aux(struct mce *m, int i)
503{
504 if (m->status & MCI_STATUS_MISCV)
505 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
506 if (m->status & MCI_STATUS_ADDRV) {
507 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
508
509 /*
510 * Mask the reported address by the reported granularity.
511 */
1462594b 512 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
85f92694
TL
513 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
514 m->addr >>= shift;
515 m->addr <<= shift;
516 }
517 }
518}
519
fa92c586
CY
520static bool memory_error(struct mce *m)
521{
522 struct cpuinfo_x86 *c = &boot_cpu_data;
523
524 if (c->x86_vendor == X86_VENDOR_AMD) {
525 /*
526 * coming soon
527 */
528 return false;
529 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
530 /*
531 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
532 *
533 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
534 * indicating a memory error. Bit 8 is used for indicating a
535 * cache hierarchy error. The combination of bit 2 and bit 3
536 * is used for indicating a `generic' cache hierarchy error
537 * But we can't just blindly check the above bits, because if
538 * bit 11 is set, then it is a bus/interconnect error - and
539 * either way the above bits just gives more detail on what
540 * bus/interconnect error happened. Note that bit 12 can be
541 * ignored, as it's the "filter" bit.
542 */
543 return (m->status & 0xef80) == BIT(7) ||
544 (m->status & 0xef00) == BIT(8) ||
545 (m->status & 0xeffc) == 0xc;
546 }
547
548 return false;
549}
550
ca84f696
AK
551DEFINE_PER_CPU(unsigned, mce_poll_count);
552
d88203d1 553/*
b79109c3
AK
554 * Poll for corrected events or events that happened before reset.
555 * Those are just logged through /dev/mcelog.
556 *
557 * This is executed in standard interrupt context.
ed7290d0
AK
558 *
559 * Note: spec recommends to panic for fatal unsignalled
560 * errors here. However this would be quite problematic --
561 * we would need to reimplement the Monarch handling and
562 * it would mess up the exclusion between exception handler
563 * and poll hander -- * so we skip this for now.
564 * These cases should not happen anyways, or only when the CPU
565 * is already totally * confused. In this case it's likely it will
566 * not fully execute the machine check handler either.
b79109c3 567 */
3f2f0680 568bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
b79109c3 569{
3f2f0680 570 bool error_logged = false;
b79109c3 571 struct mce m;
fa92c586 572 int severity;
b79109c3
AK
573 int i;
574
c6ae41e7 575 this_cpu_inc(mce_poll_count);
ca84f696 576
b8325c5b 577 mce_gather_info(&m, NULL);
b79109c3 578
d203f0b8 579 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 580 if (!mce_banks[i].ctl || !test_bit(i, *b))
b79109c3
AK
581 continue;
582
583 m.misc = 0;
584 m.addr = 0;
585 m.bank = i;
586 m.tsc = 0;
587
588 barrier();
a2d32bcb 589 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
b79109c3
AK
590 if (!(m.status & MCI_STATUS_VAL))
591 continue;
592
3f2f0680 593
b79109c3 594 /*
ed7290d0
AK
595 * Uncorrected or signalled events are handled by the exception
596 * handler when it is enabled, so don't process those here.
b79109c3
AK
597 *
598 * TBD do the same check for MCI_STATUS_EN here?
599 */
ed7290d0 600 if (!(flags & MCP_UC) &&
1462594b 601 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
b79109c3
AK
602 continue;
603
85f92694 604 mce_read_aux(&m, i);
b79109c3
AK
605
606 if (!(flags & MCP_TIMESTAMP))
607 m.tsc = 0;
fa92c586
CY
608
609 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
610
611 /*
612 * In the cases where we don't have a valid address after all,
613 * do not add it into the ring buffer.
614 */
615 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
616 if (m.status & MCI_STATUS_ADDRV) {
fd4cf79f
CG
617 m.severity = severity;
618 m.usable_addr = mce_usable_address(&m);
619
620 if (!mce_gen_pool_add(&m))
621 mce_schedule_work();
fa92c586
CY
622 }
623 }
624
b79109c3
AK
625 /*
626 * Don't get the IP here because it's unlikely to
627 * have anything to do with the actual error location.
628 */
3f2f0680
BP
629 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) {
630 error_logged = true;
5679af4c 631 mce_log(&m);
3f2f0680 632 }
b79109c3
AK
633
634 /*
635 * Clear state for this bank.
636 */
a2d32bcb 637 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
b79109c3
AK
638 }
639
640 /*
641 * Don't clear MCG_STATUS here because it's only defined for
642 * exceptions.
643 */
88921be3
AK
644
645 sync_core();
3f2f0680
BP
646
647 return error_logged;
b79109c3 648}
ea149b36 649EXPORT_SYMBOL_GPL(machine_check_poll);
b79109c3 650
bd19a5e6
AK
651/*
652 * Do a quick check if any of the events requires a panic.
653 * This decides if we keep the events around or clear them.
654 */
61b0fccd
TL
655static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
656 struct pt_regs *regs)
bd19a5e6 657{
95022b8c 658 int i, ret = 0;
17fea54b 659 char *tmp;
bd19a5e6 660
d203f0b8 661 for (i = 0; i < mca_cfg.banks; i++) {
a2d32bcb 662 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
61b0fccd 663 if (m->status & MCI_STATUS_VAL) {
95022b8c 664 __set_bit(i, validp);
61b0fccd
TL
665 if (quirk_no_way_out)
666 quirk_no_way_out(i, m, regs);
667 }
17fea54b
BP
668
669 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
670 *msg = tmp;
95022b8c 671 ret = 1;
17fea54b 672 }
bd19a5e6 673 }
95022b8c 674 return ret;
bd19a5e6
AK
675}
676
3c079792
AK
677/*
678 * Variable to establish order between CPUs while scanning.
679 * Each CPU spins initially until executing is equal its number.
680 */
681static atomic_t mce_executing;
682
683/*
684 * Defines order of CPUs on entry. First CPU becomes Monarch.
685 */
686static atomic_t mce_callin;
687
688/*
689 * Check if a timeout waiting for other CPUs happened.
690 */
6c80f87e 691static int mce_timed_out(u64 *t, const char *msg)
3c079792
AK
692{
693 /*
694 * The others already did panic for some reason.
695 * Bail out like in a timeout.
696 * rmb() to tell the compiler that system_state
697 * might have been modified by someone else.
698 */
699 rmb();
c7c9b392 700 if (atomic_read(&mce_panicked))
3c079792 701 wait_for_panic();
84c2559d 702 if (!mca_cfg.monarch_timeout)
3c079792
AK
703 goto out;
704 if ((s64)*t < SPINUNIT) {
716079f6 705 if (mca_cfg.tolerant <= 1)
6c80f87e 706 mce_panic(msg, NULL, NULL);
3c079792
AK
707 cpu_missing = 1;
708 return 1;
709 }
710 *t -= SPINUNIT;
711out:
712 touch_nmi_watchdog();
713 return 0;
714}
715
716/*
717 * The Monarch's reign. The Monarch is the CPU who entered
718 * the machine check handler first. It waits for the others to
719 * raise the exception too and then grades them. When any
720 * error is fatal panic. Only then let the others continue.
721 *
722 * The other CPUs entering the MCE handler will be controlled by the
723 * Monarch. They are called Subjects.
724 *
725 * This way we prevent any potential data corruption in a unrecoverable case
726 * and also makes sure always all CPU's errors are examined.
727 *
680b6cfd 728 * Also this detects the case of a machine check event coming from outer
3c079792
AK
729 * space (not detected by any CPUs) In this case some external agent wants
730 * us to shut down, so panic too.
731 *
732 * The other CPUs might still decide to panic if the handler happens
733 * in a unrecoverable place, but in this case the system is in a semi-stable
734 * state and won't corrupt anything by itself. It's ok to let the others
735 * continue for a bit first.
736 *
737 * All the spin loops have timeouts; when a timeout happens a CPU
738 * typically elects itself to be Monarch.
739 */
740static void mce_reign(void)
741{
742 int cpu;
743 struct mce *m = NULL;
744 int global_worst = 0;
745 char *msg = NULL;
746 char *nmsg = NULL;
747
748 /*
749 * This CPU is the Monarch and the other CPUs have run
750 * through their handlers.
751 * Grade the severity of the errors of all the CPUs.
752 */
753 for_each_possible_cpu(cpu) {
d203f0b8
BP
754 int severity = mce_severity(&per_cpu(mces_seen, cpu),
755 mca_cfg.tolerant,
e3480271 756 &nmsg, true);
3c079792
AK
757 if (severity > global_worst) {
758 msg = nmsg;
759 global_worst = severity;
760 m = &per_cpu(mces_seen, cpu);
761 }
762 }
763
764 /*
765 * Cannot recover? Panic here then.
766 * This dumps all the mces in the log buffer and stops the
767 * other CPUs.
768 */
d203f0b8 769 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
8af7043a 770 mce_panic("Fatal machine check", m, msg);
3c079792
AK
771
772 /*
773 * For UC somewhere we let the CPU who detects it handle it.
774 * Also must let continue the others, otherwise the handling
775 * CPU could deadlock on a lock.
776 */
777
778 /*
779 * No machine check event found. Must be some external
780 * source or one CPU is hung. Panic.
781 */
d203f0b8 782 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
8af7043a 783 mce_panic("Fatal machine check from unknown source", NULL, NULL);
3c079792
AK
784
785 /*
786 * Now clear all the mces_seen so that they don't reappear on
787 * the next mce.
788 */
789 for_each_possible_cpu(cpu)
790 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
791}
792
793static atomic_t global_nwo;
794
795/*
796 * Start of Monarch synchronization. This waits until all CPUs have
797 * entered the exception handler and then determines if any of them
798 * saw a fatal event that requires panic. Then it executes them
799 * in the entry order.
800 * TBD double check parallel CPU hotunplug
801 */
7fb06fc9 802static int mce_start(int *no_way_out)
3c079792 803{
7fb06fc9 804 int order;
3c079792 805 int cpus = num_online_cpus();
84c2559d 806 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
3c079792 807
7fb06fc9
HS
808 if (!timeout)
809 return -1;
3c079792 810
7fb06fc9 811 atomic_add(*no_way_out, &global_nwo);
184e1fdf
HY
812 /*
813 * global_nwo should be updated before mce_callin
814 */
815 smp_wmb();
a95436e4 816 order = atomic_inc_return(&mce_callin);
3c079792
AK
817
818 /*
819 * Wait for everyone.
820 */
821 while (atomic_read(&mce_callin) != cpus) {
6c80f87e
AL
822 if (mce_timed_out(&timeout,
823 "Timeout: Not all CPUs entered broadcast exception handler")) {
3c079792 824 atomic_set(&global_nwo, 0);
7fb06fc9 825 return -1;
3c079792
AK
826 }
827 ndelay(SPINUNIT);
828 }
829
184e1fdf
HY
830 /*
831 * mce_callin should be read before global_nwo
832 */
833 smp_rmb();
3c079792 834
7fb06fc9
HS
835 if (order == 1) {
836 /*
837 * Monarch: Starts executing now, the others wait.
838 */
3c079792 839 atomic_set(&mce_executing, 1);
7fb06fc9
HS
840 } else {
841 /*
842 * Subject: Now start the scanning loop one by one in
843 * the original callin order.
844 * This way when there are any shared banks it will be
845 * only seen by one CPU before cleared, avoiding duplicates.
846 */
847 while (atomic_read(&mce_executing) < order) {
6c80f87e
AL
848 if (mce_timed_out(&timeout,
849 "Timeout: Subject CPUs unable to finish machine check processing")) {
7fb06fc9
HS
850 atomic_set(&global_nwo, 0);
851 return -1;
852 }
853 ndelay(SPINUNIT);
854 }
3c079792
AK
855 }
856
857 /*
7fb06fc9 858 * Cache the global no_way_out state.
3c079792 859 */
7fb06fc9
HS
860 *no_way_out = atomic_read(&global_nwo);
861
862 return order;
3c079792
AK
863}
864
865/*
866 * Synchronize between CPUs after main scanning loop.
867 * This invokes the bulk of the Monarch processing.
868 */
869static int mce_end(int order)
870{
871 int ret = -1;
84c2559d 872 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
3c079792
AK
873
874 if (!timeout)
875 goto reset;
876 if (order < 0)
877 goto reset;
878
879 /*
880 * Allow others to run.
881 */
882 atomic_inc(&mce_executing);
883
884 if (order == 1) {
885 /* CHECKME: Can this race with a parallel hotplug? */
886 int cpus = num_online_cpus();
887
888 /*
889 * Monarch: Wait for everyone to go through their scanning
890 * loops.
891 */
892 while (atomic_read(&mce_executing) <= cpus) {
6c80f87e
AL
893 if (mce_timed_out(&timeout,
894 "Timeout: Monarch CPU unable to finish machine check processing"))
3c079792
AK
895 goto reset;
896 ndelay(SPINUNIT);
897 }
898
899 mce_reign();
900 barrier();
901 ret = 0;
902 } else {
903 /*
904 * Subject: Wait for Monarch to finish.
905 */
906 while (atomic_read(&mce_executing) != 0) {
6c80f87e
AL
907 if (mce_timed_out(&timeout,
908 "Timeout: Monarch CPU did not finish machine check processing"))
3c079792
AK
909 goto reset;
910 ndelay(SPINUNIT);
911 }
912
913 /*
914 * Don't reset anything. That's done by the Monarch.
915 */
916 return 0;
917 }
918
919 /*
920 * Reset all global state.
921 */
922reset:
923 atomic_set(&global_nwo, 0);
924 atomic_set(&mce_callin, 0);
925 barrier();
926
927 /*
928 * Let others run again.
929 */
930 atomic_set(&mce_executing, 0);
931 return ret;
932}
933
9b1beaf2
AK
934/*
935 * Check if the address reported by the CPU is in a format we can parse.
936 * It would be possible to add code for most other cases, but all would
937 * be somewhat complicated (e.g. segment offset would require an instruction
0d2eb44f 938 * parser). So only support physical addresses up to page granuality for now.
9b1beaf2
AK
939 */
940static int mce_usable_address(struct mce *m)
941{
942 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
943 return 0;
2b90e77e 944 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
9b1beaf2 945 return 0;
2b90e77e 946 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
9b1beaf2
AK
947 return 0;
948 return 1;
949}
950
3c079792
AK
951static void mce_clear_state(unsigned long *toclear)
952{
953 int i;
954
d203f0b8 955 for (i = 0; i < mca_cfg.banks; i++) {
3c079792 956 if (test_bit(i, toclear))
a2d32bcb 957 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
3c079792
AK
958 }
959}
960
b79109c3
AK
961/*
962 * The actual machine check handler. This only handles real
963 * exceptions when something got corrupted coming in through int 18.
964 *
965 * This is executed in NMI context not subject to normal locking rules. This
966 * implies that most kernel services cannot be safely used. Don't even
967 * think about putting a printk in there!
3c079792
AK
968 *
969 * On Intel systems this is entered on all CPUs in parallel through
970 * MCE broadcast. However some CPUs might be broken beyond repair,
971 * so be always careful when synchronizing with others.
1da177e4 972 */
e9eee03e 973void do_machine_check(struct pt_regs *regs, long error_code)
1da177e4 974{
1462594b 975 struct mca_config *cfg = &mca_cfg;
3c079792 976 struct mce m, *final;
1da177e4 977 int i;
3c079792
AK
978 int worst = 0;
979 int severity;
980 /*
981 * Establish sequential order between the CPUs entering the machine
982 * check handler.
983 */
7fb06fc9 984 int order;
bd78432c
TH
985 /*
986 * If no_way_out gets set, there is no safe way to recover from this
d203f0b8 987 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
bd78432c
TH
988 */
989 int no_way_out = 0;
990 /*
991 * If kill_it gets set, there might be a way to recover from this
992 * error.
993 */
994 int kill_it = 0;
b79109c3 995 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
95022b8c 996 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
bd19a5e6 997 char *msg = "Unknown";
d4812e16
TL
998 u64 recover_paddr = ~0ull;
999 int flags = MF_ACTION_REQUIRED;
243d657e 1000 int lmce = 0;
1da177e4 1001
8c84014f 1002 ist_enter(regs);
95927475 1003
c6ae41e7 1004 this_cpu_inc(mce_exception_count);
01ca79f1 1005
1462594b 1006 if (!cfg->banks)
32561696 1007 goto out;
1da177e4 1008
b8325c5b 1009 mce_gather_info(&m, regs);
b5f2fa4e 1010
89cbc767 1011 final = this_cpu_ptr(&mces_seen);
3c079792
AK
1012 *final = m;
1013
95022b8c 1014 memset(valid_banks, 0, sizeof(valid_banks));
61b0fccd 1015 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
680b6cfd 1016
1da177e4
LT
1017 barrier();
1018
ed7290d0 1019 /*
a8c321fb
TL
1020 * When no restart IP might need to kill or panic.
1021 * Assume the worst for now, but if we find the
1022 * severity is MCE_AR_SEVERITY we have other options.
ed7290d0
AK
1023 */
1024 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1025 kill_it = 1;
1026
3c079792 1027 /*
243d657e 1028 * Check if this MCE is signaled to only this logical processor
3c079792 1029 */
243d657e
AR
1030 if (m.mcgstatus & MCG_STATUS_LMCES)
1031 lmce = 1;
1032 else {
1033 /*
1034 * Go through all the banks in exclusion of the other CPUs.
1035 * This way we don't report duplicated events on shared banks
1036 * because the first one to see it will clear it.
1037 * If this is a Local MCE, then no need to perform rendezvous.
1038 */
1039 order = mce_start(&no_way_out);
1040 }
1041
1462594b 1042 for (i = 0; i < cfg->banks; i++) {
b79109c3 1043 __clear_bit(i, toclear);
95022b8c
TL
1044 if (!test_bit(i, valid_banks))
1045 continue;
cebe1820 1046 if (!mce_banks[i].ctl)
1da177e4 1047 continue;
d88203d1
TG
1048
1049 m.misc = 0;
1da177e4
LT
1050 m.addr = 0;
1051 m.bank = i;
1da177e4 1052
a2d32bcb 1053 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1da177e4
LT
1054 if ((m.status & MCI_STATUS_VAL) == 0)
1055 continue;
1056
b79109c3 1057 /*
ed7290d0
AK
1058 * Non uncorrected or non signaled errors are handled by
1059 * machine_check_poll. Leave them alone, unless this panics.
b79109c3 1060 */
1462594b 1061 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
ed7290d0 1062 !no_way_out)
b79109c3
AK
1063 continue;
1064
1065 /*
1066 * Set taint even when machine check was not enabled.
1067 */
373d4d09 1068 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
b79109c3 1069
e3480271 1070 severity = mce_severity(&m, cfg->tolerant, NULL, true);
b79109c3 1071
ed7290d0 1072 /*
e3480271
CY
1073 * When machine check was for corrected/deferred handler don't
1074 * touch, unless we're panicing.
ed7290d0 1075 */
e3480271
CY
1076 if ((severity == MCE_KEEP_SEVERITY ||
1077 severity == MCE_UCNA_SEVERITY) && !no_way_out)
ed7290d0
AK
1078 continue;
1079 __set_bit(i, toclear);
1080 if (severity == MCE_NO_SEVERITY) {
b79109c3
AK
1081 /*
1082 * Machine check event was not enabled. Clear, but
1083 * ignore.
1084 */
1085 continue;
1da177e4
LT
1086 }
1087
85f92694 1088 mce_read_aux(&m, i);
1da177e4 1089
fd4cf79f
CG
1090 /* assuming valid severity level != 0 */
1091 m.severity = severity;
1092 m.usable_addr = mce_usable_address(&m);
9b1beaf2 1093
b79109c3 1094 mce_log(&m);
1da177e4 1095
3c079792
AK
1096 if (severity > worst) {
1097 *final = m;
1098 worst = severity;
1da177e4 1099 }
1da177e4
LT
1100 }
1101
a8c321fb
TL
1102 /* mce_clear_state will clear *final, save locally for use later */
1103 m = *final;
1104
3c079792
AK
1105 if (!no_way_out)
1106 mce_clear_state(toclear);
1107
e9eee03e 1108 /*
3c079792
AK
1109 * Do most of the synchronization with other CPUs.
1110 * When there's any problem use only local no_way_out state.
e9eee03e 1111 */
243d657e
AR
1112 if (!lmce) {
1113 if (mce_end(order) < 0)
1114 no_way_out = worst >= MCE_PANIC_SEVERITY;
1115 } else {
1116 /*
1117 * Local MCE skipped calling mce_reign()
1118 * If we found a fatal error, we need to panic here.
1119 */
1120 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1121 mce_panic("Machine check from unknown source",
1122 NULL, NULL);
1123 }
bd78432c
TH
1124
1125 /*
a8c321fb
TL
1126 * At insane "tolerant" levels we take no action. Otherwise
1127 * we only die if we have no other choice. For less serious
1128 * issues we try to recover, or limit damage to the current
1129 * process.
bd78432c 1130 */
1462594b 1131 if (cfg->tolerant < 3) {
a8c321fb
TL
1132 if (no_way_out)
1133 mce_panic("Fatal machine check on current CPU", &m, msg);
1134 if (worst == MCE_AR_SEVERITY) {
d4812e16
TL
1135 recover_paddr = m.addr;
1136 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1137 flags |= MF_MUST_KILL;
a8c321fb
TL
1138 } else if (kill_it) {
1139 force_sig(SIGBUS, current);
1140 }
1141 }
e02e68d3 1142
3c079792
AK
1143 if (worst > 0)
1144 mce_report_event(regs);
5f8c1a54 1145 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
32561696 1146out:
88921be3 1147 sync_core();
d4812e16
TL
1148
1149 if (recover_paddr == ~0ull)
1150 goto done;
1151
1152 pr_err("Uncorrected hardware memory error in user-access at %llx",
1153 recover_paddr);
1154 /*
1155 * We must call memory_failure() here even if the current process is
1156 * doomed. We still need to mark the page as poisoned and alert any
1157 * other users of the page.
1158 */
1159 ist_begin_non_atomic(regs);
1160 local_irq_enable();
1161 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1162 pr_err("Memory error not recovered");
1163 force_sig(SIGBUS, current);
1164 }
1165 local_irq_disable();
1166 ist_end_non_atomic();
1167done:
8c84014f 1168 ist_exit(regs);
1da177e4 1169}
ea149b36 1170EXPORT_SYMBOL_GPL(do_machine_check);
1da177e4 1171
cd42f4a3
TL
1172#ifndef CONFIG_MEMORY_FAILURE
1173int memory_failure(unsigned long pfn, int vector, int flags)
9b1beaf2 1174{
a8c321fb
TL
1175 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1176 BUG_ON(flags & MF_ACTION_REQUIRED);
c767a54b
JP
1177 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1178 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1179 pfn);
cd42f4a3
TL
1180
1181 return 0;
9b1beaf2 1182}
cd42f4a3 1183#endif
9b1beaf2 1184
a8c321fb
TL
1185/*
1186 * Action optional processing happens here (picking up
1187 * from the list of faulting pages that do_machine_check()
fd4cf79f 1188 * placed into the genpool).
a8c321fb 1189 */
9b1beaf2
AK
1190static void mce_process_work(struct work_struct *dummy)
1191{
fd4cf79f 1192 mce_gen_pool_process();
9b1beaf2
AK
1193}
1194
15d5f839
DZ
1195#ifdef CONFIG_X86_MCE_INTEL
1196/***
1197 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 1198 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
1199 * @status: Event status information
1200 *
1201 * This function should be called by the thermal interrupt after the
1202 * event has been processed and the decision was made to log the event
1203 * further.
1204 *
1205 * The status parameter will be saved to the 'status' field of 'struct mce'
1206 * and historically has been the register value of the
1207 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1208 */
b5f2fa4e 1209void mce_log_therm_throt_event(__u64 status)
15d5f839
DZ
1210{
1211 struct mce m;
1212
b5f2fa4e 1213 mce_setup(&m);
15d5f839
DZ
1214 m.bank = MCE_THERMAL_BANK;
1215 m.status = status;
15d5f839
DZ
1216 mce_log(&m);
1217}
1218#endif /* CONFIG_X86_MCE_INTEL */
1219
1da177e4 1220/*
8a336b0a
TH
1221 * Periodic polling timer for "silent" machine check errors. If the
1222 * poller finds an MCE, poll 2x faster. When the poller finds no more
1223 * errors, poll 2x slower (up to check_interval seconds).
1da177e4 1224 */
3f2f0680 1225static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
e9eee03e 1226
82f7af09 1227static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
52d168e2 1228static DEFINE_PER_CPU(struct timer_list, mce_timer);
1da177e4 1229
55babd8f
CG
1230static unsigned long mce_adjust_timer_default(unsigned long interval)
1231{
1232 return interval;
1233}
1234
3f2f0680 1235static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
55babd8f 1236
3f2f0680 1237static void __restart_timer(struct timer_list *t, unsigned long interval)
27f6c573 1238{
3f2f0680
BP
1239 unsigned long when = jiffies + interval;
1240 unsigned long flags;
27f6c573 1241
3f2f0680 1242 local_irq_save(flags);
27f6c573 1243
3f2f0680
BP
1244 if (timer_pending(t)) {
1245 if (time_before(when, t->expires))
1246 mod_timer_pinned(t, when);
1247 } else {
1248 t->expires = round_jiffies(when);
1249 add_timer_on(t, smp_processor_id());
1250 }
1251
1252 local_irq_restore(flags);
27f6c573
CG
1253}
1254
82f7af09 1255static void mce_timer_fn(unsigned long data)
1da177e4 1256{
89cbc767 1257 struct timer_list *t = this_cpu_ptr(&mce_timer);
3f2f0680 1258 int cpu = smp_processor_id();
82f7af09 1259 unsigned long iv;
52d168e2 1260
3f2f0680
BP
1261 WARN_ON(cpu != data);
1262
1263 iv = __this_cpu_read(mce_next_interval);
52d168e2 1264
89cbc767 1265 if (mce_available(this_cpu_ptr(&cpu_info))) {
3f2f0680
BP
1266 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1267
1268 if (mce_intel_cmci_poll()) {
1269 iv = mce_adjust_timer(iv);
1270 goto done;
1271 }
e9eee03e 1272 }
1da177e4
LT
1273
1274 /*
3f2f0680
BP
1275 * Alert userspace if needed. If we logged an MCE, reduce the polling
1276 * interval, otherwise increase the polling interval.
1da177e4 1277 */
3f2f0680 1278 if (mce_notify_irq())
958fb3c5 1279 iv = max(iv / 2, (unsigned long) HZ/100);
3f2f0680 1280 else
82f7af09 1281 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
3f2f0680
BP
1282
1283done:
82f7af09 1284 __this_cpu_write(mce_next_interval, iv);
3f2f0680 1285 __restart_timer(t, iv);
55babd8f 1286}
e02e68d3 1287
55babd8f
CG
1288/*
1289 * Ensure that the timer is firing in @interval from now.
1290 */
1291void mce_timer_kick(unsigned long interval)
1292{
89cbc767 1293 struct timer_list *t = this_cpu_ptr(&mce_timer);
55babd8f
CG
1294 unsigned long iv = __this_cpu_read(mce_next_interval);
1295
3f2f0680
BP
1296 __restart_timer(t, interval);
1297
55babd8f
CG
1298 if (interval < iv)
1299 __this_cpu_write(mce_next_interval, interval);
e02e68d3
TH
1300}
1301
9aaef96f
HS
1302/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1303static void mce_timer_delete_all(void)
1304{
1305 int cpu;
1306
1307 for_each_online_cpu(cpu)
1308 del_timer_sync(&per_cpu(mce_timer, cpu));
1309}
1310
9bd98405
AK
1311static void mce_do_trigger(struct work_struct *work)
1312{
1020bcbc 1313 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
9bd98405
AK
1314}
1315
1316static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1317
e02e68d3 1318/*
9bd98405
AK
1319 * Notify the user(s) about new machine check events.
1320 * Can be called from interrupt context, but not from machine check/NMI
1321 * context.
e02e68d3 1322 */
9ff36ee9 1323int mce_notify_irq(void)
e02e68d3 1324{
8457c84d
AK
1325 /* Not more than two messages every minute */
1326 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1327
1020bcbc 1328 if (test_and_clear_bit(0, &mce_need_notify)) {
93b62c3c
HS
1329 /* wake processes polling /dev/mcelog */
1330 wake_up_interruptible(&mce_chrdev_wait);
9bd98405 1331
4d899be5 1332 if (mce_helper[0])
9bd98405 1333 schedule_work(&mce_trigger_work);
e02e68d3 1334
8457c84d 1335 if (__ratelimit(&ratelimit))
a2d7b0d4 1336 pr_info(HW_ERR "Machine check events logged\n");
e02e68d3
TH
1337
1338 return 1;
1da177e4 1339 }
e02e68d3
TH
1340 return 0;
1341}
9ff36ee9 1342EXPORT_SYMBOL_GPL(mce_notify_irq);
8a336b0a 1343
148f9bb8 1344static int __mcheck_cpu_mce_banks_init(void)
cebe1820
AK
1345{
1346 int i;
d203f0b8 1347 u8 num_banks = mca_cfg.banks;
cebe1820 1348
d203f0b8 1349 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
cebe1820
AK
1350 if (!mce_banks)
1351 return -ENOMEM;
d203f0b8
BP
1352
1353 for (i = 0; i < num_banks; i++) {
cebe1820 1354 struct mce_bank *b = &mce_banks[i];
11868a2d 1355
cebe1820
AK
1356 b->ctl = -1ULL;
1357 b->init = 1;
1358 }
1359 return 0;
1360}
1361
d88203d1 1362/*
1da177e4
LT
1363 * Initialize Machine Checks for a CPU.
1364 */
148f9bb8 1365static int __mcheck_cpu_cap_init(void)
1da177e4 1366{
0d7482e3 1367 unsigned b;
e9eee03e 1368 u64 cap;
1da177e4
LT
1369
1370 rdmsrl(MSR_IA32_MCG_CAP, cap);
01c6680a
TG
1371
1372 b = cap & MCG_BANKCNT_MASK;
d203f0b8 1373 if (!mca_cfg.banks)
c767a54b 1374 pr_info("CPU supports %d MCE banks\n", b);
b659294b 1375
0d7482e3 1376 if (b > MAX_NR_BANKS) {
c767a54b 1377 pr_warn("Using only %u machine check banks out of %u\n",
0d7482e3
AK
1378 MAX_NR_BANKS, b);
1379 b = MAX_NR_BANKS;
1380 }
1381
1382 /* Don't support asymmetric configurations today */
d203f0b8
BP
1383 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1384 mca_cfg.banks = b;
1385
cebe1820 1386 if (!mce_banks) {
cffd377e 1387 int err = __mcheck_cpu_mce_banks_init();
11868a2d 1388
cebe1820
AK
1389 if (err)
1390 return err;
1da177e4 1391 }
0d7482e3 1392
94ad8474 1393 /* Use accurate RIP reporting if available. */
01c6680a 1394 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
84c2559d 1395 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1da177e4 1396
ed7290d0 1397 if (cap & MCG_SER_P)
1462594b 1398 mca_cfg.ser = true;
ed7290d0 1399
0d7482e3
AK
1400 return 0;
1401}
1402
5e09954a 1403static void __mcheck_cpu_init_generic(void)
0d7482e3 1404{
84c2559d 1405 enum mcp_flags m_fl = 0;
e9eee03e 1406 mce_banks_t all_banks;
0d7482e3
AK
1407 u64 cap;
1408 int i;
1409
84c2559d
BP
1410 if (!mca_cfg.bootlog)
1411 m_fl = MCP_DONTLOG;
1412
b79109c3
AK
1413 /*
1414 * Log the machine checks left over from the previous reset.
1415 */
ee031c31 1416 bitmap_fill(all_banks, MAX_NR_BANKS);
84c2559d 1417 machine_check_poll(MCP_UC | m_fl, &all_banks);
1da177e4 1418
375074cc 1419 cr4_set_bits(X86_CR4_MCE);
1da177e4 1420
0d7482e3 1421 rdmsrl(MSR_IA32_MCG_CAP, cap);
1da177e4
LT
1422 if (cap & MCG_CTL_P)
1423 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1424
d203f0b8 1425 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 1426 struct mce_bank *b = &mce_banks[i];
11868a2d 1427
cebe1820 1428 if (!b->init)
06b7a7a5 1429 continue;
a2d32bcb
AK
1430 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1431 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
d88203d1 1432 }
1da177e4
LT
1433}
1434
61b0fccd
TL
1435/*
1436 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1437 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1438 * Vol 3B Table 15-20). But this confuses both the code that determines
1439 * whether the machine check occurred in kernel or user mode, and also
1440 * the severity assessment code. Pretend that EIPV was set, and take the
1441 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1442 */
1443static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1444{
1445 if (bank != 0)
1446 return;
1447 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1448 return;
1449 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1450 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1451 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1452 MCACOD)) !=
1453 (MCI_STATUS_UC|MCI_STATUS_EN|
1454 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1455 MCI_STATUS_AR|MCACOD_INSTR))
1456 return;
1457
1458 m->mcgstatus |= MCG_STATUS_EIPV;
1459 m->ip = regs->ip;
1460 m->cs = regs->cs;
1461}
1462
1da177e4 1463/* Add per CPU specific workarounds here */
148f9bb8 1464static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
d88203d1 1465{
d203f0b8
BP
1466 struct mca_config *cfg = &mca_cfg;
1467
e412cd25 1468 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
c767a54b 1469 pr_info("unknown CPU type - not enabling MCE support\n");
e412cd25
IM
1470 return -EOPNOTSUPP;
1471 }
1472
1da177e4 1473 /* This should be disabled by the BIOS, but isn't always */
911f6a7b 1474 if (c->x86_vendor == X86_VENDOR_AMD) {
d203f0b8 1475 if (c->x86 == 15 && cfg->banks > 4) {
e9eee03e
IM
1476 /*
1477 * disable GART TBL walk error reporting, which
1478 * trips off incorrectly with the IOMMU & 3ware
1479 * & Cerberus:
1480 */
cebe1820 1481 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
e9eee03e 1482 }
84c2559d 1483 if (c->x86 <= 17 && cfg->bootlog < 0) {
e9eee03e
IM
1484 /*
1485 * Lots of broken BIOS around that don't clear them
1486 * by default and leave crap in there. Don't log:
1487 */
84c2559d 1488 cfg->bootlog = 0;
e9eee03e 1489 }
2e6f694f
AK
1490 /*
1491 * Various K7s with broken bank 0 around. Always disable
1492 * by default.
1493 */
c9ce8712 1494 if (c->x86 == 6 && cfg->banks > 0)
cebe1820 1495 mce_banks[0].ctl = 0;
575203b4 1496
bf80bbd7
AG
1497 /*
1498 * overflow_recov is supported for F15h Models 00h-0fh
1499 * even though we don't have a CPUID bit for it.
1500 */
1501 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1502 mce_flags.overflow_recov = 1;
1503
c9ce8712
BP
1504 /*
1505 * Turn off MC4_MISC thresholding banks on those models since
1506 * they're not supported there.
1507 */
1508 if (c->x86 == 0x15 &&
1509 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1510 int i;
1511 u64 hwcr;
1512 bool need_toggle;
1513 u32 msrs[] = {
575203b4
BP
1514 0x00000413, /* MC4_MISC0 */
1515 0xc0000408, /* MC4_MISC1 */
c9ce8712 1516 };
575203b4 1517
c9ce8712 1518 rdmsrl(MSR_K7_HWCR, hwcr);
575203b4 1519
c9ce8712
BP
1520 /* McStatusWrEn has to be set */
1521 need_toggle = !(hwcr & BIT(18));
575203b4 1522
c9ce8712
BP
1523 if (need_toggle)
1524 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
575203b4 1525
c9ce8712
BP
1526 /* Clear CntP bit safely */
1527 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1528 msr_clear_bit(msrs[i], 62);
575203b4 1529
c9ce8712
BP
1530 /* restore old settings */
1531 if (need_toggle)
1532 wrmsrl(MSR_K7_HWCR, hwcr);
1533 }
1da177e4 1534 }
e583538f 1535
06b7a7a5
AK
1536 if (c->x86_vendor == X86_VENDOR_INTEL) {
1537 /*
1538 * SDM documents that on family 6 bank 0 should not be written
1539 * because it aliases to another special BIOS controlled
1540 * register.
1541 * But it's not aliased anymore on model 0x1a+
1542 * Don't ignore bank 0 completely because there could be a
1543 * valid event later, merely don't write CTL0.
1544 */
1545
d203f0b8 1546 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
cebe1820 1547 mce_banks[0].init = 0;
3c079792
AK
1548
1549 /*
1550 * All newer Intel systems support MCE broadcasting. Enable
1551 * synchronization with a one second timeout.
1552 */
1553 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
84c2559d
BP
1554 cfg->monarch_timeout < 0)
1555 cfg->monarch_timeout = USEC_PER_SEC;
c7f6fa44 1556
e412cd25
IM
1557 /*
1558 * There are also broken BIOSes on some Pentium M and
1559 * earlier systems:
1560 */
84c2559d
BP
1561 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1562 cfg->bootlog = 0;
61b0fccd
TL
1563
1564 if (c->x86 == 6 && c->x86_model == 45)
1565 quirk_no_way_out = quirk_sandybridge_ifu;
06b7a7a5 1566 }
84c2559d
BP
1567 if (cfg->monarch_timeout < 0)
1568 cfg->monarch_timeout = 0;
1569 if (cfg->bootlog != 0)
7af19e4a 1570 cfg->panic_timeout = 30;
e412cd25
IM
1571
1572 return 0;
d88203d1 1573}
1da177e4 1574
148f9bb8 1575static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
4efc0670
AK
1576{
1577 if (c->x86 != 5)
3a97fc34
HS
1578 return 0;
1579
4efc0670
AK
1580 switch (c->x86_vendor) {
1581 case X86_VENDOR_INTEL:
c6978369 1582 intel_p5_mcheck_init(c);
3a97fc34 1583 return 1;
4efc0670
AK
1584 break;
1585 case X86_VENDOR_CENTAUR:
1586 winchip_mcheck_init(c);
3a97fc34 1587 return 1;
4efc0670
AK
1588 break;
1589 }
3a97fc34
HS
1590
1591 return 0;
4efc0670
AK
1592}
1593
5e09954a 1594static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
1595{
1596 switch (c->x86_vendor) {
1597 case X86_VENDOR_INTEL:
1598 mce_intel_feature_init(c);
3f2f0680 1599 mce_adjust_timer = cmci_intel_adjust_timer;
1da177e4 1600 break;
7559e13f
AG
1601
1602 case X86_VENDOR_AMD: {
1603 u32 ebx = cpuid_ebx(0x80000007);
1604
89b831ef 1605 mce_amd_feature_init(c);
7559e13f
AG
1606 mce_flags.overflow_recov = !!(ebx & BIT(0));
1607 mce_flags.succor = !!(ebx & BIT(1));
c7f54d21
AG
1608 mce_flags.smca = !!(ebx & BIT(3));
1609
89b831ef 1610 break;
7559e13f
AG
1611 }
1612
1da177e4
LT
1613 default:
1614 break;
1615 }
1616}
1617
8838eb6c
AR
1618static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1619{
1620 switch (c->x86_vendor) {
1621 case X86_VENDOR_INTEL:
1622 mce_intel_feature_clear(c);
1623 break;
1624 default:
1625 break;
1626 }
1627}
1628
26c3c283 1629static void mce_start_timer(unsigned int cpu, struct timer_list *t)
52d168e2 1630{
4f75d841 1631 unsigned long iv = check_interval * HZ;
bc09effa 1632
7af19e4a 1633 if (mca_cfg.ignore_ce || !iv)
62fdac59
HS
1634 return;
1635
4f75d841
BP
1636 per_cpu(mce_next_interval, cpu) = iv;
1637
82f7af09 1638 t->expires = round_jiffies(jiffies + iv);
4f75d841 1639 add_timer_on(t, cpu);
52d168e2
AK
1640}
1641
26c3c283
TG
1642static void __mcheck_cpu_init_timer(void)
1643{
89cbc767 1644 struct timer_list *t = this_cpu_ptr(&mce_timer);
26c3c283
TG
1645 unsigned int cpu = smp_processor_id();
1646
1647 setup_timer(t, mce_timer_fn, cpu);
1648 mce_start_timer(cpu, t);
1649}
1650
9eda8cb3
AK
1651/* Handle unconfigured int18 (should never happen) */
1652static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1653{
c767a54b 1654 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
9eda8cb3
AK
1655 smp_processor_id());
1656}
1657
1658/* Call the installed machine check handler for this CPU setup. */
1659void (*machine_check_vector)(struct pt_regs *, long error_code) =
1660 unexpected_machine_check;
1661
d88203d1 1662/*
1da177e4 1663 * Called for each booted CPU to set up machine checks.
e9eee03e 1664 * Must be called with preempt off:
1da177e4 1665 */
148f9bb8 1666void mcheck_cpu_init(struct cpuinfo_x86 *c)
1da177e4 1667{
1462594b 1668 if (mca_cfg.disabled)
4efc0670
AK
1669 return;
1670
3a97fc34
HS
1671 if (__mcheck_cpu_ancient_init(c))
1672 return;
4efc0670 1673
5b4408fd 1674 if (!mce_available(c))
1da177e4
LT
1675 return;
1676
5e09954a 1677 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1462594b 1678 mca_cfg.disabled = true;
0d7482e3
AK
1679 return;
1680 }
0d7482e3 1681
648ed940
CG
1682 if (mce_gen_pool_init()) {
1683 mca_cfg.disabled = true;
1684 pr_emerg("Couldn't allocate MCE records pool!\n");
1685 return;
1686 }
1687
5d727926
AK
1688 machine_check_vector = do_machine_check;
1689
5e09954a
BP
1690 __mcheck_cpu_init_generic();
1691 __mcheck_cpu_init_vendor(c);
1692 __mcheck_cpu_init_timer();
1da177e4
LT
1693}
1694
8838eb6c
AR
1695/*
1696 * Called for each booted CPU to clear some machine checks opt-ins
1697 */
1698void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1699{
1700 if (mca_cfg.disabled)
1701 return;
1702
1703 if (!mce_available(c))
1704 return;
1705
1706 /*
1707 * Possibly to clear general settings generic to x86
1708 * __mcheck_cpu_clear_generic(c);
1709 */
1710 __mcheck_cpu_clear_vendor(c);
1711
1da177e4
LT
1712}
1713
1714/*
93b62c3c 1715 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1da177e4
LT
1716 */
1717
93b62c3c
HS
1718static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1719static int mce_chrdev_open_count; /* #times opened */
1720static int mce_chrdev_open_exclu; /* already open exclusive? */
f528e7ba 1721
93b62c3c 1722static int mce_chrdev_open(struct inode *inode, struct file *file)
f528e7ba 1723{
93b62c3c 1724 spin_lock(&mce_chrdev_state_lock);
f528e7ba 1725
93b62c3c
HS
1726 if (mce_chrdev_open_exclu ||
1727 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1728 spin_unlock(&mce_chrdev_state_lock);
e9eee03e 1729
f528e7ba
TH
1730 return -EBUSY;
1731 }
1732
1733 if (file->f_flags & O_EXCL)
93b62c3c
HS
1734 mce_chrdev_open_exclu = 1;
1735 mce_chrdev_open_count++;
f528e7ba 1736
93b62c3c 1737 spin_unlock(&mce_chrdev_state_lock);
f528e7ba 1738
bd78432c 1739 return nonseekable_open(inode, file);
f528e7ba
TH
1740}
1741
93b62c3c 1742static int mce_chrdev_release(struct inode *inode, struct file *file)
f528e7ba 1743{
93b62c3c 1744 spin_lock(&mce_chrdev_state_lock);
f528e7ba 1745
93b62c3c
HS
1746 mce_chrdev_open_count--;
1747 mce_chrdev_open_exclu = 0;
f528e7ba 1748
93b62c3c 1749 spin_unlock(&mce_chrdev_state_lock);
f528e7ba
TH
1750
1751 return 0;
1752}
1753
d88203d1
TG
1754static void collect_tscs(void *data)
1755{
1da177e4 1756 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 1757
4ea1636b 1758 cpu_tsc[smp_processor_id()] = rdtsc();
d88203d1 1759}
1da177e4 1760
482908b4
HY
1761static int mce_apei_read_done;
1762
1763/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1764static int __mce_read_apei(char __user **ubuf, size_t usize)
1765{
1766 int rc;
1767 u64 record_id;
1768 struct mce m;
1769
1770 if (usize < sizeof(struct mce))
1771 return -EINVAL;
1772
1773 rc = apei_read_mce(&m, &record_id);
1774 /* Error or no more MCE record */
1775 if (rc <= 0) {
1776 mce_apei_read_done = 1;
fadd85f1
NH
1777 /*
1778 * When ERST is disabled, mce_chrdev_read() should return
1779 * "no record" instead of "no device."
1780 */
1781 if (rc == -ENODEV)
1782 return 0;
482908b4
HY
1783 return rc;
1784 }
1785 rc = -EFAULT;
1786 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1787 return rc;
1788 /*
1789 * In fact, we should have cleared the record after that has
1790 * been flushed to the disk or sent to network in
1791 * /sbin/mcelog, but we have no interface to support that now,
1792 * so just clear it to avoid duplication.
1793 */
1794 rc = apei_clear_mce(record_id);
1795 if (rc) {
1796 mce_apei_read_done = 1;
1797 return rc;
1798 }
1799 *ubuf += sizeof(struct mce);
1800
1801 return 0;
1802}
1803
93b62c3c
HS
1804static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1805 size_t usize, loff_t *off)
1da177e4 1806{
e9eee03e 1807 char __user *buf = ubuf;
f0de53bb 1808 unsigned long *cpu_tsc;
ef41df43 1809 unsigned prev, next;
1da177e4
LT
1810 int i, err;
1811
6bca67f9 1812 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
f0de53bb
AK
1813 if (!cpu_tsc)
1814 return -ENOMEM;
1815
93b62c3c 1816 mutex_lock(&mce_chrdev_read_mutex);
482908b4
HY
1817
1818 if (!mce_apei_read_done) {
1819 err = __mce_read_apei(&buf, usize);
1820 if (err || buf != ubuf)
1821 goto out;
1822 }
1823
9a7783d0 1824 next = mce_log_get_idx_check(mcelog.next);
1da177e4
LT
1825
1826 /* Only supports full reads right now */
482908b4
HY
1827 err = -EINVAL;
1828 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1829 goto out;
1da177e4
LT
1830
1831 err = 0;
ef41df43
HY
1832 prev = 0;
1833 do {
1834 for (i = prev; i < next; i++) {
1835 unsigned long start = jiffies;
559faa6b 1836 struct mce *m = &mcelog.entry[i];
ef41df43 1837
559faa6b 1838 while (!m->finished) {
ef41df43 1839 if (time_after_eq(jiffies, start + 2)) {
559faa6b 1840 memset(m, 0, sizeof(*m));
ef41df43
HY
1841 goto timeout;
1842 }
1843 cpu_relax();
673242c1 1844 }
ef41df43 1845 smp_rmb();
559faa6b
HS
1846 err |= copy_to_user(buf, m, sizeof(*m));
1847 buf += sizeof(*m);
ef41df43
HY
1848timeout:
1849 ;
673242c1 1850 }
1da177e4 1851
ef41df43
HY
1852 memset(mcelog.entry + prev, 0,
1853 (next - prev) * sizeof(struct mce));
1854 prev = next;
1855 next = cmpxchg(&mcelog.next, prev, 0);
1856 } while (next != prev);
1da177e4 1857
b2b18660 1858 synchronize_sched();
1da177e4 1859
d88203d1
TG
1860 /*
1861 * Collect entries that were still getting written before the
1862 * synchronize.
1863 */
15c8b6c1 1864 on_each_cpu(collect_tscs, cpu_tsc, 1);
e9eee03e 1865
d88203d1 1866 for (i = next; i < MCE_LOG_LEN; i++) {
559faa6b
HS
1867 struct mce *m = &mcelog.entry[i];
1868
1869 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1870 err |= copy_to_user(buf, m, sizeof(*m));
1da177e4 1871 smp_rmb();
559faa6b
HS
1872 buf += sizeof(*m);
1873 memset(m, 0, sizeof(*m));
1da177e4 1874 }
d88203d1 1875 }
482908b4
HY
1876
1877 if (err)
1878 err = -EFAULT;
1879
1880out:
93b62c3c 1881 mutex_unlock(&mce_chrdev_read_mutex);
f0de53bb 1882 kfree(cpu_tsc);
e9eee03e 1883
482908b4 1884 return err ? err : buf - ubuf;
1da177e4
LT
1885}
1886
93b62c3c 1887static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
e02e68d3 1888{
93b62c3c 1889 poll_wait(file, &mce_chrdev_wait, wait);
e90328b8 1890 if (READ_ONCE(mcelog.next))
e02e68d3 1891 return POLLIN | POLLRDNORM;
482908b4
HY
1892 if (!mce_apei_read_done && apei_check_mce())
1893 return POLLIN | POLLRDNORM;
e02e68d3
TH
1894 return 0;
1895}
1896
93b62c3c
HS
1897static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1898 unsigned long arg)
1da177e4
LT
1899{
1900 int __user *p = (int __user *)arg;
d88203d1 1901
1da177e4 1902 if (!capable(CAP_SYS_ADMIN))
d88203d1 1903 return -EPERM;
e9eee03e 1904
1da177e4 1905 switch (cmd) {
d88203d1 1906 case MCE_GET_RECORD_LEN:
1da177e4
LT
1907 return put_user(sizeof(struct mce), p);
1908 case MCE_GET_LOG_LEN:
d88203d1 1909 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
1910 case MCE_GETCLEAR_FLAGS: {
1911 unsigned flags;
d88203d1
TG
1912
1913 do {
1da177e4 1914 flags = mcelog.flags;
d88203d1 1915 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
e9eee03e 1916
d88203d1 1917 return put_user(flags, p);
1da177e4
LT
1918 }
1919 default:
d88203d1
TG
1920 return -ENOTTY;
1921 }
1da177e4
LT
1922}
1923
66f5ddf3
TL
1924static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1925 size_t usize, loff_t *off);
1926
1927void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1928 const char __user *ubuf,
1929 size_t usize, loff_t *off))
1930{
1931 mce_write = fn;
1932}
1933EXPORT_SYMBOL_GPL(register_mce_write_callback);
1934
29c6820f
PM
1935static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1936 size_t usize, loff_t *off)
66f5ddf3
TL
1937{
1938 if (mce_write)
1939 return mce_write(filp, ubuf, usize, off);
1940 else
1941 return -EINVAL;
1942}
1943
1944static const struct file_operations mce_chrdev_ops = {
93b62c3c
HS
1945 .open = mce_chrdev_open,
1946 .release = mce_chrdev_release,
1947 .read = mce_chrdev_read,
66f5ddf3 1948 .write = mce_chrdev_write,
93b62c3c
HS
1949 .poll = mce_chrdev_poll,
1950 .unlocked_ioctl = mce_chrdev_ioctl,
1951 .llseek = no_llseek,
1da177e4
LT
1952};
1953
93b62c3c 1954static struct miscdevice mce_chrdev_device = {
1da177e4
LT
1955 MISC_MCELOG_MINOR,
1956 "mcelog",
1957 &mce_chrdev_ops,
1958};
1959
c3d1fb56
NR
1960static void __mce_disable_bank(void *arg)
1961{
1962 int bank = *((int *)arg);
89cbc767 1963 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
c3d1fb56
NR
1964 cmci_disable_bank(bank);
1965}
1966
1967void mce_disable_bank(int bank)
1968{
1969 if (bank >= mca_cfg.banks) {
1970 pr_warn(FW_BUG
1971 "Ignoring request to disable invalid MCA bank %d.\n",
1972 bank);
1973 return;
1974 }
1975 set_bit(bank, mce_banks_ce_disabled);
1976 on_each_cpu(__mce_disable_bank, &bank, 1);
1977}
1978
13503fa9 1979/*
62fdac59
HS
1980 * mce=off Disables machine check
1981 * mce=no_cmci Disables CMCI
88d53867 1982 * mce=no_lmce Disables LMCE
62fdac59
HS
1983 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1984 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
3c079792
AK
1985 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1986 * monarchtimeout is how long to wait for other CPUs on machine
1987 * check, or 0 to not wait
13503fa9
HS
1988 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1989 * mce=nobootlog Don't log MCEs from before booting.
450cc201 1990 * mce=bios_cmci_threshold Don't program the CMCI threshold
13503fa9 1991 */
1da177e4
LT
1992static int __init mcheck_enable(char *str)
1993{
d203f0b8
BP
1994 struct mca_config *cfg = &mca_cfg;
1995
e3346fc4 1996 if (*str == 0) {
4efc0670 1997 enable_p5_mce();
e3346fc4
BZ
1998 return 1;
1999 }
4efc0670
AK
2000 if (*str == '=')
2001 str++;
1da177e4 2002 if (!strcmp(str, "off"))
1462594b 2003 cfg->disabled = true;
62fdac59 2004 else if (!strcmp(str, "no_cmci"))
7af19e4a 2005 cfg->cmci_disabled = true;
88d53867
AR
2006 else if (!strcmp(str, "no_lmce"))
2007 cfg->lmce_disabled = true;
62fdac59 2008 else if (!strcmp(str, "dont_log_ce"))
d203f0b8 2009 cfg->dont_log_ce = true;
62fdac59 2010 else if (!strcmp(str, "ignore_ce"))
7af19e4a 2011 cfg->ignore_ce = true;
13503fa9 2012 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
84c2559d 2013 cfg->bootlog = (str[0] == 'b');
450cc201 2014 else if (!strcmp(str, "bios_cmci_threshold"))
1462594b 2015 cfg->bios_cmci_threshold = true;
3c079792 2016 else if (isdigit(str[0])) {
5c31b280 2017 if (get_option(&str, &cfg->tolerant) == 2)
84c2559d 2018 get_option(&str, &(cfg->monarch_timeout));
3c079792 2019 } else {
c767a54b 2020 pr_info("mce argument %s ignored. Please use /sys\n", str);
13503fa9
HS
2021 return 0;
2022 }
9b41046c 2023 return 1;
1da177e4 2024}
4efc0670 2025__setup("mce", mcheck_enable);
1da177e4 2026
a2202aa2 2027int __init mcheck_init(void)
b33a6363 2028{
a2202aa2 2029 mcheck_intel_therm_init();
eef4dfa0 2030 mce_register_decode_chain(&mce_srao_nb);
43eaa2a1 2031 mcheck_vendor_init_severity();
a2202aa2 2032
061120ae
CG
2033 INIT_WORK(&mce_work, mce_process_work);
2034 init_irq_work(&mce_irq_work, mce_irq_work_cb);
2035
b33a6363
BP
2036 return 0;
2037}
b33a6363 2038
d88203d1 2039/*
c7cece89 2040 * mce_syscore: PM support
d88203d1 2041 */
1da177e4 2042
973a2dd1
AK
2043/*
2044 * Disable machine checks on suspend and shutdown. We can't really handle
2045 * them later.
2046 */
6e06780a 2047static void mce_disable_error_reporting(void)
973a2dd1
AK
2048{
2049 int i;
2050
d203f0b8 2051 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2052 struct mce_bank *b = &mce_banks[i];
11868a2d 2053
cebe1820 2054 if (b->init)
a2d32bcb 2055 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
06b7a7a5 2056 }
6e06780a
AR
2057 return;
2058}
2059
2060static void vendor_disable_error_reporting(void)
2061{
2062 /*
2063 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
2064 * Disabling them for just a single offlined CPU is bad, since it will
2065 * inhibit reporting for all shared resources on the socket like the
2066 * last level cache (LLC), the integrated memory controller (iMC), etc.
2067 */
2068 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2069 return;
2070
2071 mce_disable_error_reporting();
973a2dd1
AK
2072}
2073
c7cece89 2074static int mce_syscore_suspend(void)
973a2dd1 2075{
6e06780a
AR
2076 vendor_disable_error_reporting();
2077 return 0;
973a2dd1
AK
2078}
2079
c7cece89 2080static void mce_syscore_shutdown(void)
973a2dd1 2081{
6e06780a 2082 vendor_disable_error_reporting();
973a2dd1
AK
2083}
2084
e9eee03e
IM
2085/*
2086 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2087 * Only one CPU is active at this time, the others get re-added later using
2088 * CPU hotplug:
2089 */
c7cece89 2090static void mce_syscore_resume(void)
1da177e4 2091{
5e09954a 2092 __mcheck_cpu_init_generic();
89cbc767 2093 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
1da177e4
LT
2094}
2095
f3c6ea1b 2096static struct syscore_ops mce_syscore_ops = {
c7cece89
HS
2097 .suspend = mce_syscore_suspend,
2098 .shutdown = mce_syscore_shutdown,
2099 .resume = mce_syscore_resume,
f3c6ea1b
RW
2100};
2101
c7cece89 2102/*
8a25a2fd 2103 * mce_device: Sysfs support
c7cece89
HS
2104 */
2105
52d168e2
AK
2106static void mce_cpu_restart(void *data)
2107{
89cbc767 2108 if (!mce_available(raw_cpu_ptr(&cpu_info)))
33edbf02 2109 return;
5e09954a
BP
2110 __mcheck_cpu_init_generic();
2111 __mcheck_cpu_init_timer();
52d168e2
AK
2112}
2113
1da177e4 2114/* Reinit MCEs after user configuration changes */
d88203d1
TG
2115static void mce_restart(void)
2116{
9aaef96f 2117 mce_timer_delete_all();
52d168e2 2118 on_each_cpu(mce_cpu_restart, NULL, 1);
1da177e4
LT
2119}
2120
9af43b54 2121/* Toggle features for corrected errors */
9aaef96f 2122static void mce_disable_cmci(void *data)
9af43b54 2123{
89cbc767 2124 if (!mce_available(raw_cpu_ptr(&cpu_info)))
9af43b54 2125 return;
9af43b54
HS
2126 cmci_clear();
2127}
2128
2129static void mce_enable_ce(void *all)
2130{
89cbc767 2131 if (!mce_available(raw_cpu_ptr(&cpu_info)))
9af43b54
HS
2132 return;
2133 cmci_reenable();
2134 cmci_recheck();
2135 if (all)
5e09954a 2136 __mcheck_cpu_init_timer();
9af43b54
HS
2137}
2138
8a25a2fd 2139static struct bus_type mce_subsys = {
e9eee03e 2140 .name = "machinecheck",
8a25a2fd 2141 .dev_name = "machinecheck",
1da177e4
LT
2142};
2143
d6126ef5 2144DEFINE_PER_CPU(struct device *, mce_device);
e9eee03e 2145
e9eee03e 2146void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1da177e4 2147
8a25a2fd 2148static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
cebe1820
AK
2149{
2150 return container_of(attr, struct mce_bank, attr);
2151}
0d7482e3 2152
8a25a2fd 2153static ssize_t show_bank(struct device *s, struct device_attribute *attr,
0d7482e3
AK
2154 char *buf)
2155{
cebe1820 2156 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
0d7482e3
AK
2157}
2158
8a25a2fd 2159static ssize_t set_bank(struct device *s, struct device_attribute *attr,
9319cec8 2160 const char *buf, size_t size)
0d7482e3 2161{
9319cec8 2162 u64 new;
e9eee03e 2163
164109e3 2164 if (kstrtou64(buf, 0, &new) < 0)
0d7482e3 2165 return -EINVAL;
e9eee03e 2166
cebe1820 2167 attr_to_bank(attr)->ctl = new;
0d7482e3 2168 mce_restart();
e9eee03e 2169
9319cec8 2170 return size;
0d7482e3 2171}
a98f0dd3 2172
e9eee03e 2173static ssize_t
8a25a2fd 2174show_trigger(struct device *s, struct device_attribute *attr, char *buf)
a98f0dd3 2175{
1020bcbc 2176 strcpy(buf, mce_helper);
a98f0dd3 2177 strcat(buf, "\n");
1020bcbc 2178 return strlen(mce_helper) + 1;
a98f0dd3
AK
2179}
2180
8a25a2fd 2181static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
e9eee03e 2182 const char *buf, size_t siz)
a98f0dd3
AK
2183{
2184 char *p;
e9eee03e 2185
1020bcbc
HS
2186 strncpy(mce_helper, buf, sizeof(mce_helper));
2187 mce_helper[sizeof(mce_helper)-1] = 0;
1020bcbc 2188 p = strchr(mce_helper, '\n');
e9eee03e 2189
e9084ec9 2190 if (p)
e9eee03e
IM
2191 *p = 0;
2192
e9084ec9 2193 return strlen(mce_helper) + !!p;
a98f0dd3
AK
2194}
2195
8a25a2fd
KS
2196static ssize_t set_ignore_ce(struct device *s,
2197 struct device_attribute *attr,
9af43b54
HS
2198 const char *buf, size_t size)
2199{
2200 u64 new;
2201
164109e3 2202 if (kstrtou64(buf, 0, &new) < 0)
9af43b54
HS
2203 return -EINVAL;
2204
7af19e4a 2205 if (mca_cfg.ignore_ce ^ !!new) {
9af43b54
HS
2206 if (new) {
2207 /* disable ce features */
9aaef96f
HS
2208 mce_timer_delete_all();
2209 on_each_cpu(mce_disable_cmci, NULL, 1);
7af19e4a 2210 mca_cfg.ignore_ce = true;
9af43b54
HS
2211 } else {
2212 /* enable ce features */
7af19e4a 2213 mca_cfg.ignore_ce = false;
9af43b54
HS
2214 on_each_cpu(mce_enable_ce, (void *)1, 1);
2215 }
2216 }
2217 return size;
2218}
2219
8a25a2fd
KS
2220static ssize_t set_cmci_disabled(struct device *s,
2221 struct device_attribute *attr,
9af43b54
HS
2222 const char *buf, size_t size)
2223{
2224 u64 new;
2225
164109e3 2226 if (kstrtou64(buf, 0, &new) < 0)
9af43b54
HS
2227 return -EINVAL;
2228
7af19e4a 2229 if (mca_cfg.cmci_disabled ^ !!new) {
9af43b54
HS
2230 if (new) {
2231 /* disable cmci */
9aaef96f 2232 on_each_cpu(mce_disable_cmci, NULL, 1);
7af19e4a 2233 mca_cfg.cmci_disabled = true;
9af43b54
HS
2234 } else {
2235 /* enable cmci */
7af19e4a 2236 mca_cfg.cmci_disabled = false;
9af43b54
HS
2237 on_each_cpu(mce_enable_ce, NULL, 1);
2238 }
2239 }
2240 return size;
2241}
2242
8a25a2fd
KS
2243static ssize_t store_int_with_restart(struct device *s,
2244 struct device_attribute *attr,
b56f642d
AK
2245 const char *buf, size_t size)
2246{
8a25a2fd 2247 ssize_t ret = device_store_int(s, attr, buf, size);
b56f642d
AK
2248 mce_restart();
2249 return ret;
2250}
2251
8a25a2fd 2252static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
d203f0b8 2253static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
84c2559d 2254static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
d203f0b8 2255static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
e9eee03e 2256
8a25a2fd
KS
2257static struct dev_ext_attribute dev_attr_check_interval = {
2258 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
b56f642d
AK
2259 &check_interval
2260};
e9eee03e 2261
8a25a2fd 2262static struct dev_ext_attribute dev_attr_ignore_ce = {
7af19e4a
BP
2263 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2264 &mca_cfg.ignore_ce
9af43b54
HS
2265};
2266
8a25a2fd 2267static struct dev_ext_attribute dev_attr_cmci_disabled = {
7af19e4a
BP
2268 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2269 &mca_cfg.cmci_disabled
9af43b54
HS
2270};
2271
8a25a2fd
KS
2272static struct device_attribute *mce_device_attrs[] = {
2273 &dev_attr_tolerant.attr,
2274 &dev_attr_check_interval.attr,
2275 &dev_attr_trigger,
2276 &dev_attr_monarch_timeout.attr,
2277 &dev_attr_dont_log_ce.attr,
2278 &dev_attr_ignore_ce.attr,
2279 &dev_attr_cmci_disabled.attr,
a98f0dd3
AK
2280 NULL
2281};
1da177e4 2282
8a25a2fd 2283static cpumask_var_t mce_device_initialized;
bae19fe0 2284
e032d807
GKH
2285static void mce_device_release(struct device *dev)
2286{
2287 kfree(dev);
2288}
2289
8a25a2fd 2290/* Per cpu device init. All of the cpus still share the same ctrl bank: */
148f9bb8 2291static int mce_device_create(unsigned int cpu)
1da177e4 2292{
e032d807 2293 struct device *dev;
1da177e4 2294 int err;
b1f49f95 2295 int i, j;
92cb7612 2296
90367556 2297 if (!mce_available(&boot_cpu_data))
91c6d400
AK
2298 return -EIO;
2299
e032d807
GKH
2300 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2301 if (!dev)
2302 return -ENOMEM;
8a25a2fd
KS
2303 dev->id = cpu;
2304 dev->bus = &mce_subsys;
e032d807 2305 dev->release = &mce_device_release;
91c6d400 2306
8a25a2fd 2307 err = device_register(dev);
853d9b18
LK
2308 if (err) {
2309 put_device(dev);
d435d862 2310 return err;
853d9b18 2311 }
d435d862 2312
8a25a2fd
KS
2313 for (i = 0; mce_device_attrs[i]; i++) {
2314 err = device_create_file(dev, mce_device_attrs[i]);
d435d862
AM
2315 if (err)
2316 goto error;
2317 }
d203f0b8 2318 for (j = 0; j < mca_cfg.banks; j++) {
8a25a2fd 2319 err = device_create_file(dev, &mce_banks[j].attr);
0d7482e3
AK
2320 if (err)
2321 goto error2;
2322 }
8a25a2fd 2323 cpumask_set_cpu(cpu, mce_device_initialized);
d6126ef5 2324 per_cpu(mce_device, cpu) = dev;
91c6d400 2325
d435d862 2326 return 0;
0d7482e3 2327error2:
b1f49f95 2328 while (--j >= 0)
8a25a2fd 2329 device_remove_file(dev, &mce_banks[j].attr);
d435d862 2330error:
cb491fca 2331 while (--i >= 0)
8a25a2fd 2332 device_remove_file(dev, mce_device_attrs[i]);
cb491fca 2333
8a25a2fd 2334 device_unregister(dev);
d435d862 2335
91c6d400
AK
2336 return err;
2337}
2338
148f9bb8 2339static void mce_device_remove(unsigned int cpu)
91c6d400 2340{
d6126ef5 2341 struct device *dev = per_cpu(mce_device, cpu);
73ca5358
SL
2342 int i;
2343
8a25a2fd 2344 if (!cpumask_test_cpu(cpu, mce_device_initialized))
bae19fe0
AH
2345 return;
2346
8a25a2fd
KS
2347 for (i = 0; mce_device_attrs[i]; i++)
2348 device_remove_file(dev, mce_device_attrs[i]);
cb491fca 2349
d203f0b8 2350 for (i = 0; i < mca_cfg.banks; i++)
8a25a2fd 2351 device_remove_file(dev, &mce_banks[i].attr);
cb491fca 2352
8a25a2fd
KS
2353 device_unregister(dev);
2354 cpumask_clear_cpu(cpu, mce_device_initialized);
d6126ef5 2355 per_cpu(mce_device, cpu) = NULL;
91c6d400 2356}
91c6d400 2357
d6b75584 2358/* Make sure there are no machine checks on offlined CPUs. */
148f9bb8 2359static void mce_disable_cpu(void *h)
d6b75584 2360{
88ccbedd 2361 unsigned long action = *(unsigned long *)h;
d6b75584 2362
89cbc767 2363 if (!mce_available(raw_cpu_ptr(&cpu_info)))
d6b75584 2364 return;
767df1bd 2365
88ccbedd
AK
2366 if (!(action & CPU_TASKS_FROZEN))
2367 cmci_clear();
11868a2d 2368
6e06780a 2369 vendor_disable_error_reporting();
d6b75584
AK
2370}
2371
148f9bb8 2372static void mce_reenable_cpu(void *h)
d6b75584 2373{
88ccbedd 2374 unsigned long action = *(unsigned long *)h;
e9eee03e 2375 int i;
d6b75584 2376
89cbc767 2377 if (!mce_available(raw_cpu_ptr(&cpu_info)))
d6b75584 2378 return;
e9eee03e 2379
88ccbedd
AK
2380 if (!(action & CPU_TASKS_FROZEN))
2381 cmci_reenable();
d203f0b8 2382 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2383 struct mce_bank *b = &mce_banks[i];
11868a2d 2384
cebe1820 2385 if (b->init)
a2d32bcb 2386 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
06b7a7a5 2387 }
d6b75584
AK
2388}
2389
91c6d400 2390/* Get notified when a cpu comes on/off. Be hotplug friendly. */
148f9bb8 2391static int
e9eee03e 2392mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
91c6d400
AK
2393{
2394 unsigned int cpu = (unsigned long)hcpu;
52d168e2 2395 struct timer_list *t = &per_cpu(mce_timer, cpu);
91c6d400 2396
1a65f970 2397 switch (action & ~CPU_TASKS_FROZEN) {
bae19fe0 2398 case CPU_ONLINE:
8a25a2fd 2399 mce_device_create(cpu);
8735728e
RW
2400 if (threshold_cpu_callback)
2401 threshold_cpu_callback(action, cpu);
91c6d400 2402 break;
91c6d400 2403 case CPU_DEAD:
8735728e
RW
2404 if (threshold_cpu_callback)
2405 threshold_cpu_callback(action, cpu);
8a25a2fd 2406 mce_device_remove(cpu);
55babd8f 2407 mce_intel_hcpu_update(cpu);
38356c1f
BP
2408
2409 /* intentionally ignoring frozen here */
2410 if (!(action & CPU_TASKS_FROZEN))
2411 cmci_rediscover();
91c6d400 2412 break;
52d168e2 2413 case CPU_DOWN_PREPARE:
88ccbedd 2414 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
55babd8f 2415 del_timer_sync(t);
52d168e2
AK
2416 break;
2417 case CPU_DOWN_FAILED:
88ccbedd 2418 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
26c3c283 2419 mce_start_timer(cpu, t);
88ccbedd 2420 break;
1a65f970
TG
2421 }
2422
bae19fe0 2423 return NOTIFY_OK;
91c6d400
AK
2424}
2425
148f9bb8 2426static struct notifier_block mce_cpu_notifier = {
91c6d400
AK
2427 .notifier_call = mce_cpu_callback,
2428};
2429
cebe1820 2430static __init void mce_init_banks(void)
0d7482e3
AK
2431{
2432 int i;
2433
d203f0b8 2434 for (i = 0; i < mca_cfg.banks; i++) {
cebe1820 2435 struct mce_bank *b = &mce_banks[i];
8a25a2fd 2436 struct device_attribute *a = &b->attr;
e9eee03e 2437
a07e4156 2438 sysfs_attr_init(&a->attr);
cebe1820
AK
2439 a->attr.name = b->attrname;
2440 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
e9eee03e
IM
2441
2442 a->attr.mode = 0644;
2443 a->show = show_bank;
2444 a->store = set_bank;
0d7482e3 2445 }
0d7482e3
AK
2446}
2447
5e09954a 2448static __init int mcheck_init_device(void)
91c6d400
AK
2449{
2450 int err;
2451 int i = 0;
2452
9c15a24b
MS
2453 if (!mce_available(&boot_cpu_data)) {
2454 err = -EIO;
2455 goto err_out;
2456 }
0d7482e3 2457
9c15a24b
MS
2458 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2459 err = -ENOMEM;
2460 goto err_out;
2461 }
996867d0 2462
cebe1820 2463 mce_init_banks();
0d7482e3 2464
8a25a2fd 2465 err = subsys_system_register(&mce_subsys, NULL);
d435d862 2466 if (err)
9c15a24b 2467 goto err_out_mem;
91c6d400 2468
82a8f131 2469 cpu_notifier_register_begin();
91c6d400 2470 for_each_online_cpu(i) {
8a25a2fd 2471 err = mce_device_create(i);
82a8f131 2472 if (err) {
27c93415
BP
2473 /*
2474 * Register notifier anyway (and do not unreg it) so
2475 * that we don't leave undeleted timers, see notifier
2476 * callback above.
2477 */
2478 __register_hotcpu_notifier(&mce_cpu_notifier);
82a8f131 2479 cpu_notifier_register_done();
9c15a24b 2480 goto err_device_create;
82a8f131 2481 }
91c6d400
AK
2482 }
2483
82a8f131
SB
2484 __register_hotcpu_notifier(&mce_cpu_notifier);
2485 cpu_notifier_register_done();
93b62c3c 2486
9c15a24b
MS
2487 register_syscore_ops(&mce_syscore_ops);
2488
93b62c3c 2489 /* register character device /dev/mcelog */
9c15a24b
MS
2490 err = misc_register(&mce_chrdev_device);
2491 if (err)
2492 goto err_register;
2493
2494 return 0;
2495
2496err_register:
2497 unregister_syscore_ops(&mce_syscore_ops);
2498
9c15a24b
MS
2499err_device_create:
2500 /*
2501 * We didn't keep track of which devices were created above, but
2502 * even if we had, the set of online cpus might have changed.
2503 * Play safe and remove for every possible cpu, since
2504 * mce_device_remove() will do the right thing.
2505 */
2506 for_each_possible_cpu(i)
2507 mce_device_remove(i);
2508
2509err_out_mem:
2510 free_cpumask_var(mce_device_initialized);
2511
2512err_out:
2513 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
e9eee03e 2514
1da177e4 2515 return err;
1da177e4 2516}
cef12ee5 2517device_initcall_sync(mcheck_init_device);
a988d334 2518
d7c3c9a6
AK
2519/*
2520 * Old style boot options parsing. Only for compatibility.
2521 */
2522static int __init mcheck_disable(char *str)
2523{
1462594b 2524 mca_cfg.disabled = true;
d7c3c9a6
AK
2525 return 1;
2526}
2527__setup("nomce", mcheck_disable);
a988d334 2528
5be9ed25
HY
2529#ifdef CONFIG_DEBUG_FS
2530struct dentry *mce_get_debugfs_dir(void)
a988d334 2531{
5be9ed25 2532 static struct dentry *dmce;
a988d334 2533
5be9ed25
HY
2534 if (!dmce)
2535 dmce = debugfs_create_dir("mce", NULL);
a988d334 2536
5be9ed25
HY
2537 return dmce;
2538}
a988d334 2539
bf783f9f
HY
2540static void mce_reset(void)
2541{
2542 cpu_missing = 0;
c7c9b392 2543 atomic_set(&mce_fake_panicked, 0);
bf783f9f
HY
2544 atomic_set(&mce_executing, 0);
2545 atomic_set(&mce_callin, 0);
2546 atomic_set(&global_nwo, 0);
2547}
a988d334 2548
bf783f9f
HY
2549static int fake_panic_get(void *data, u64 *val)
2550{
2551 *val = fake_panic;
2552 return 0;
a988d334
IM
2553}
2554
bf783f9f 2555static int fake_panic_set(void *data, u64 val)
a988d334 2556{
bf783f9f
HY
2557 mce_reset();
2558 fake_panic = val;
2559 return 0;
a988d334 2560}
a988d334 2561
bf783f9f
HY
2562DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2563 fake_panic_set, "%llu\n");
d7c3c9a6 2564
5e09954a 2565static int __init mcheck_debugfs_init(void)
d7c3c9a6 2566{
bf783f9f
HY
2567 struct dentry *dmce, *ffake_panic;
2568
2569 dmce = mce_get_debugfs_dir();
2570 if (!dmce)
2571 return -ENOMEM;
2572 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2573 &fake_panic_fops);
2574 if (!ffake_panic)
2575 return -ENOMEM;
2576
2577 return 0;
d7c3c9a6 2578}
fd4cf79f
CG
2579#else
2580static int __init mcheck_debugfs_init(void) { return -EINVAL; }
5be9ed25 2581#endif
fd4cf79f
CG
2582
2583static int __init mcheck_late_init(void)
2584{
2585 mcheck_debugfs_init();
2586
2587 /*
2588 * Flush out everything that has been logged during early boot, now that
2589 * everything has been initialized (workqueues, decoders, ...).
2590 */
2591 mce_schedule_work();
2592
2593 return 0;
2594}
2595late_initcall(mcheck_late_init);