]>
Commit | Line | Data |
---|---|---|
e5553a6d DM |
1 | /* Pseudo NMI support on sparc64 systems. |
2 | * | |
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | |
4 | * | |
5 | * The NMI watchdog support and infrastructure is based almost | |
6 | * entirely upon the x86 NMI support code. | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/param.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/percpu.h> | |
12 | #include <linux/nmi.h> | |
066bcaca | 13 | #include <linux/export.h> |
e5553a6d DM |
14 | #include <linux/kprobes.h> |
15 | #include <linux/kernel_stat.h> | |
ffaba674 | 16 | #include <linux/reboot.h> |
e5553a6d DM |
17 | #include <linux/slab.h> |
18 | #include <linux/kdebug.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/smp.h> | |
21 | ||
cdd6c482 | 22 | #include <asm/perf_event.h> |
e5553a6d | 23 | #include <asm/ptrace.h> |
e5553a6d DM |
24 | #include <asm/pcr.h> |
25 | ||
ec687886 DM |
26 | #include "kstack.h" |
27 | ||
e5553a6d DM |
28 | /* We don't have a real NMI on sparc64, but we can fake one |
29 | * up using profiling counter overflow interrupts and interrupt | |
30 | * levels. | |
31 | * | |
32 | * The profile overflow interrupts at level 15, so we use | |
33 | * level 14 as our IRQ off level. | |
34 | */ | |
35 | ||
e5553a6d DM |
36 | static int panic_on_timeout; |
37 | ||
a8f22264 DM |
38 | /* nmi_active: |
39 | * >0: the NMI watchdog is active, but can be disabled | |
40 | * <0: the NMI watchdog has not been set up, and cannot be enabled | |
41 | * 0: the NMI watchdog is disabled, but can be enabled | |
42 | */ | |
43 | atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ | |
44 | EXPORT_SYMBOL(nmi_active); | |
e5553a6d DM |
45 | |
46 | static unsigned int nmi_hz = HZ; | |
a8f22264 DM |
47 | static DEFINE_PER_CPU(short, wd_enabled); |
48 | static int endflag __initdata; | |
e5553a6d DM |
49 | |
50 | static DEFINE_PER_CPU(unsigned int, last_irq_sum); | |
494f6a9e | 51 | static DEFINE_PER_CPU(long, alert_counter); |
e5553a6d DM |
52 | static DEFINE_PER_CPU(int, nmi_touch); |
53 | ||
54 | void touch_nmi_watchdog(void) | |
55 | { | |
d89be56b | 56 | if (atomic_read(&nmi_active)) { |
e5553a6d DM |
57 | int cpu; |
58 | ||
59 | for_each_present_cpu(cpu) { | |
60 | if (per_cpu(nmi_touch, cpu) != 1) | |
61 | per_cpu(nmi_touch, cpu) = 1; | |
62 | } | |
63 | } | |
64 | ||
65 | touch_softlockup_watchdog(); | |
66 | } | |
67 | EXPORT_SYMBOL(touch_nmi_watchdog); | |
68 | ||
69 | static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) | |
70 | { | |
71 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, | |
72 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) | |
73 | return; | |
74 | ||
75 | console_verbose(); | |
76 | bust_spinlocks(1); | |
77 | ||
78 | printk(KERN_EMERG "%s", str); | |
79 | printk(" on CPU%d, ip %08lx, registers:\n", | |
80 | smp_processor_id(), regs->tpc); | |
81 | show_regs(regs); | |
dc4ff585 | 82 | dump_stack(); |
e5553a6d DM |
83 | |
84 | bust_spinlocks(0); | |
85 | ||
86 | if (do_panic || panic_on_oops) | |
87 | panic("Non maskable interrupt"); | |
88 | ||
2d0740c4 | 89 | nmi_exit(); |
e5553a6d DM |
90 | local_irq_enable(); |
91 | do_exit(SIGBUS); | |
92 | } | |
93 | ||
94 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |
95 | { | |
96 | unsigned int sum, touched = 0; | |
ec687886 | 97 | void *orig_sp; |
e5553a6d DM |
98 | |
99 | clear_softint(1 << irq); | |
e5553a6d DM |
100 | |
101 | local_cpu_data().__nmi_count++; | |
102 | ||
2d0740c4 DM |
103 | nmi_enter(); |
104 | ||
ec687886 DM |
105 | orig_sp = set_hardirq_stack(); |
106 | ||
e5553a6d DM |
107 | if (notify_die(DIE_NMI, "nmi", regs, 0, |
108 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) | |
109 | touched = 1; | |
8183e2b3 | 110 | else |
ce4a925c | 111 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
e5553a6d | 112 | |
daecbf58 | 113 | sum = local_cpu_data().irq0_irqs; |
e5553a6d DM |
114 | if (__get_cpu_var(nmi_touch)) { |
115 | __get_cpu_var(nmi_touch) = 0; | |
116 | touched = 1; | |
117 | } | |
118 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | |
dd17c8f7 RR |
119 | __this_cpu_inc(alert_counter); |
120 | if (__this_cpu_read(alert_counter) == 30 * nmi_hz) | |
e5553a6d DM |
121 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
122 | regs, panic_on_timeout); | |
123 | } else { | |
124 | __get_cpu_var(last_irq_sum) = sum; | |
dd17c8f7 | 125 | __this_cpu_write(alert_counter, 0); |
e5553a6d | 126 | } |
a8f22264 | 127 | if (__get_cpu_var(wd_enabled)) { |
73a6b053 | 128 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); |
ce4a925c | 129 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); |
e5553a6d | 130 | } |
2d0740c4 | 131 | |
ec687886 DM |
132 | restore_hardirq_stack(orig_sp); |
133 | ||
2d0740c4 | 134 | nmi_exit(); |
e5553a6d DM |
135 | } |
136 | ||
137 | static inline unsigned int get_nmi_count(int cpu) | |
138 | { | |
139 | return cpu_data(cpu).__nmi_count; | |
140 | } | |
141 | ||
e5553a6d DM |
142 | static __init void nmi_cpu_busy(void *data) |
143 | { | |
144 | local_irq_enable_in_hardirq(); | |
145 | while (endflag == 0) | |
146 | mb(); | |
147 | } | |
148 | ||
149 | static void report_broken_nmi(int cpu, int *prev_nmi_count) | |
150 | { | |
151 | printk(KERN_CONT "\n"); | |
152 | ||
153 | printk(KERN_WARNING | |
154 | "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", | |
155 | cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); | |
156 | ||
157 | printk(KERN_WARNING | |
158 | "Please report this to bugzilla.kernel.org,\n"); | |
159 | printk(KERN_WARNING | |
160 | "and attach the output of the 'dmesg' command.\n"); | |
161 | ||
a8f22264 DM |
162 | per_cpu(wd_enabled, cpu) = 0; |
163 | atomic_dec(&nmi_active); | |
e5553a6d DM |
164 | } |
165 | ||
59abbd1e | 166 | void stop_nmi_watchdog(void *unused) |
e5553a6d | 167 | { |
ce4a925c | 168 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
a8f22264 DM |
169 | __get_cpu_var(wd_enabled) = 0; |
170 | atomic_dec(&nmi_active); | |
e5553a6d DM |
171 | } |
172 | ||
173 | static int __init check_nmi_watchdog(void) | |
174 | { | |
175 | unsigned int *prev_nmi_count; | |
176 | int cpu, err; | |
177 | ||
a8f22264 DM |
178 | if (!atomic_read(&nmi_active)) |
179 | return 0; | |
180 | ||
e5553a6d DM |
181 | prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); |
182 | if (!prev_nmi_count) { | |
183 | err = -ENOMEM; | |
184 | goto error; | |
185 | } | |
186 | ||
187 | printk(KERN_INFO "Testing NMI watchdog ... "); | |
188 | ||
189 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); | |
190 | ||
191 | for_each_possible_cpu(cpu) | |
192 | prev_nmi_count[cpu] = get_nmi_count(cpu); | |
193 | local_irq_enable(); | |
194 | mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ | |
195 | ||
196 | for_each_online_cpu(cpu) { | |
a8f22264 DM |
197 | if (!per_cpu(wd_enabled, cpu)) |
198 | continue; | |
e5553a6d DM |
199 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) |
200 | report_broken_nmi(cpu, prev_nmi_count); | |
201 | } | |
202 | endflag = 1; | |
a8f22264 | 203 | if (!atomic_read(&nmi_active)) { |
e5553a6d | 204 | kfree(prev_nmi_count); |
a8f22264 | 205 | atomic_set(&nmi_active, -1); |
e5553a6d DM |
206 | err = -ENODEV; |
207 | goto error; | |
208 | } | |
209 | printk("OK.\n"); | |
210 | ||
211 | nmi_hz = 1; | |
212 | ||
213 | kfree(prev_nmi_count); | |
214 | return 0; | |
215 | error: | |
a8f22264 | 216 | on_each_cpu(stop_nmi_watchdog, NULL, 1); |
e5553a6d DM |
217 | return err; |
218 | } | |
219 | ||
59abbd1e | 220 | void start_nmi_watchdog(void *unused) |
a8f22264 DM |
221 | { |
222 | __get_cpu_var(wd_enabled) = 1; | |
223 | atomic_inc(&nmi_active); | |
224 | ||
ce4a925c | 225 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
73a6b053 | 226 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); |
a8f22264 | 227 | |
ce4a925c | 228 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); |
a8f22264 DM |
229 | } |
230 | ||
231 | static void nmi_adjust_hz_one(void *unused) | |
e5553a6d | 232 | { |
a8f22264 DM |
233 | if (!__get_cpu_var(wd_enabled)) |
234 | return; | |
235 | ||
ce4a925c | 236 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
73a6b053 | 237 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); |
e5553a6d | 238 | |
ce4a925c | 239 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); |
e5553a6d DM |
240 | } |
241 | ||
242 | void nmi_adjust_hz(unsigned int new_hz) | |
243 | { | |
244 | nmi_hz = new_hz; | |
a8f22264 | 245 | on_each_cpu(nmi_adjust_hz_one, NULL, 1); |
e5553a6d DM |
246 | } |
247 | EXPORT_SYMBOL_GPL(nmi_adjust_hz); | |
248 | ||
ffaba674 DM |
249 | static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) |
250 | { | |
a8f22264 | 251 | on_each_cpu(stop_nmi_watchdog, NULL, 1); |
ffaba674 DM |
252 | return 0; |
253 | } | |
254 | ||
255 | static struct notifier_block nmi_reboot_notifier = { | |
256 | .notifier_call = nmi_shutdown, | |
257 | }; | |
258 | ||
e5553a6d DM |
259 | int __init nmi_init(void) |
260 | { | |
ffaba674 DM |
261 | int err; |
262 | ||
a8f22264 | 263 | on_each_cpu(start_nmi_watchdog, NULL, 1); |
e5553a6d | 264 | |
ffaba674 DM |
265 | err = check_nmi_watchdog(); |
266 | if (!err) { | |
267 | err = register_reboot_notifier(&nmi_reboot_notifier); | |
268 | if (err) { | |
a8f22264 DM |
269 | on_each_cpu(stop_nmi_watchdog, NULL, 1); |
270 | atomic_set(&nmi_active, -1); | |
ffaba674 DM |
271 | } |
272 | } | |
59abbd1e | 273 | |
ffaba674 | 274 | return err; |
e5553a6d DM |
275 | } |
276 | ||
277 | static int __init setup_nmi_watchdog(char *str) | |
278 | { | |
279 | if (!strncmp(str, "panic", 5)) | |
280 | panic_on_timeout = 1; | |
281 | ||
282 | return 0; | |
283 | } | |
284 | __setup("nmi_watchdog=", setup_nmi_watchdog); |