]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/apic/hw_nmi.c
lockup_detector: Combine nmi_watchdog and softlockup detector
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / apic / hw_nmi.c
CommitLineData
1fb9d6ad
DZ
1/*
2 * HW NMI watchdog support
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Arch specific calls to support NMI watchdog
7 *
8 * Bits copied from original nmi.c file
9 *
10 */
11
12#include <asm/apic.h>
13#include <linux/smp.h>
14#include <linux/cpumask.h>
15#include <linux/sched.h>
16#include <linux/percpu.h>
17#include <linux/cpumask.h>
18#include <linux/kernel_stat.h>
19#include <asm/mce.h>
20
21#include <linux/nmi.h>
22#include <linux/module.h>
23
24/* For reliability, we're prepared to waste bits here. */
25static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
26
27static DEFINE_PER_CPU(unsigned, last_irq_sum);
28
29/*
30 * Take the local apic timer and PIT/HPET into account. We don't
31 * know which one is active, when we have highres/dyntick on
32 */
33static inline unsigned int get_timer_irqs(int cpu)
34{
504d7cf1
DZ
35 unsigned int irqs = per_cpu(irq_stat, cpu).irq0_irqs;
36
37#if defined(CONFIG_X86_LOCAL_APIC)
38 irqs += per_cpu(irq_stat, cpu).apic_timer_irqs;
39#endif
40
47195d57 41 return irqs;
1fb9d6ad
DZ
42}
43
44static inline int mce_in_progress(void)
45{
46#if defined(CONFIG_X86_MCE)
47195d57 47 return atomic_read(&mce_entry) > 0;
1fb9d6ad 48#endif
47195d57 49 return 0;
1fb9d6ad
DZ
50}
51
52int hw_nmi_is_cpu_stuck(struct pt_regs *regs)
53{
54 unsigned int sum;
55 int cpu = smp_processor_id();
56
57 /* FIXME: cheap hack for this check, probably should get its own
58 * die_notifier handler
59 */
60 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
61 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
62
63 spin_lock(&lock);
64 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
65 show_regs(regs);
66 dump_stack();
67 spin_unlock(&lock);
68 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
69 }
70
71 /* if we are doing an mce, just assume the cpu is not stuck */
47195d57
DZ
72 /* Could check oops_in_progress here too, but it's safer not to */
73 if (mce_in_progress())
74 return 0;
1fb9d6ad
DZ
75
76 /* We determine if the cpu is stuck by checking whether any
77 * interrupts have happened since we last checked. Of course
78 * an nmi storm could create false positives, but the higher
79 * level logic should account for that
80 */
81 sum = get_timer_irqs(cpu);
82 if (__get_cpu_var(last_irq_sum) == sum) {
83 return 1;
84 } else {
85 __get_cpu_var(last_irq_sum) = sum;
86 return 0;
87 }
88}
89
504d7cf1
DZ
90u64 hw_nmi_get_sample_period(void)
91{
58687acb 92 return (u64)(cpu_khz) * 1000 * 60;
504d7cf1
DZ
93}
94
2cc4452b 95#ifdef ARCH_HAS_NMI_WATCHDOG
1fb9d6ad
DZ
96void arch_trigger_all_cpu_backtrace(void)
97{
98 int i;
99
100 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
101
102 printk(KERN_INFO "sending NMI to all CPUs:\n");
103 apic->send_IPI_all(NMI_VECTOR);
104
105 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
106 for (i = 0; i < 10 * 1000; i++) {
107 if (cpumask_empty(to_cpumask(backtrace_mask)))
108 break;
109 mdelay(1);
110 }
111}
2cc4452b 112#endif
1fb9d6ad
DZ
113
114/* STUB calls to mimic old nmi_watchdog behaviour */
504d7cf1 115#if defined(CONFIG_X86_LOCAL_APIC)
1fb9d6ad
DZ
116unsigned int nmi_watchdog = NMI_NONE;
117EXPORT_SYMBOL(nmi_watchdog);
504d7cf1
DZ
118void acpi_nmi_enable(void) { return; }
119void acpi_nmi_disable(void) { return; }
120#endif
1fb9d6ad
DZ
121atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
122EXPORT_SYMBOL(nmi_active);
1fb9d6ad
DZ
123int unknown_nmi_panic;
124void cpu_nmi_set_wd_enabled(void) { return; }
1fb9d6ad
DZ
125void stop_apic_nmi_watchdog(void *unused) { return; }
126void setup_apic_nmi_watchdog(void *unused) { return; }
127int __init check_nmi_watchdog(void) { return 0; }