]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/nmi.c | |
3 | * | |
4 | * NMI watchdog support on APIC systems | |
5 | * | |
6 | * Started by Ingo Molnar <mingo@redhat.com> | |
7 | * | |
8 | * Fixes: | |
9 | * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. | |
10 | * Mikael Pettersson : Power Management for local APIC NMI watchdog. | |
11 | * Pavel Machek and | |
12 | * Mikael Pettersson : PM converted to driver model. Disable/enable API. | |
13 | */ | |
14 | ||
1da177e4 | 15 | #include <linux/mm.h> |
1da177e4 | 16 | #include <linux/delay.h> |
1da177e4 | 17 | #include <linux/interrupt.h> |
1da177e4 LT |
18 | #include <linux/module.h> |
19 | #include <linux/sysdev.h> | |
20 | #include <linux/nmi.h> | |
21 | #include <linux/sysctl.h> | |
eddb6fb9 | 22 | #include <linux/kprobes.h> |
1da177e4 LT |
23 | |
24 | #include <asm/smp.h> | |
1da177e4 | 25 | #include <asm/nmi.h> |
1da177e4 LT |
26 | #include <asm/proto.h> |
27 | #include <asm/kdebug.h> | |
553f265f | 28 | #include <asm/mce.h> |
1da177e4 | 29 | |
828f0afd DZ |
30 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: |
31 | * evtsel_nmi_owner tracks the ownership of the event selection | |
32 | * - different performance counters/ event selection may be reserved for | |
33 | * different subsystems this reservation system just tries to coordinate | |
34 | * things a little | |
35 | */ | |
36 | static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner); | |
37 | static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]); | |
38 | ||
39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | |
40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | |
41 | */ | |
42 | #define NMI_MAX_COUNTER_BITS 66 | |
43 | ||
1da177e4 LT |
44 | /* |
45 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: | |
46 | * - it may be reserved by some other driver, or not | |
47 | * - when not reserved by some other driver, it may be used for | |
48 | * the NMI watchdog, or not | |
49 | * | |
50 | * This is maintained separately from nmi_active because the NMI | |
51 | * watchdog may also be driven from the I/O APIC timer. | |
52 | */ | |
53 | static DEFINE_SPINLOCK(lapic_nmi_owner_lock); | |
54 | static unsigned int lapic_nmi_owner; | |
55 | #define LAPIC_NMI_WATCHDOG (1<<0) | |
56 | #define LAPIC_NMI_RESERVED (1<<1) | |
57 | ||
58 | /* nmi_active: | |
59 | * +1: the lapic NMI watchdog is active, but can be disabled | |
60 | * 0: the lapic NMI watchdog has not been set up, and cannot | |
61 | * be enabled | |
62 | * -1: the lapic NMI watchdog is disabled, but can be enabled | |
63 | */ | |
64 | int nmi_active; /* oprofile uses this */ | |
65 | int panic_on_timeout; | |
66 | ||
67 | unsigned int nmi_watchdog = NMI_DEFAULT; | |
68 | static unsigned int nmi_hz = HZ; | |
75152114 AK |
69 | static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ |
70 | static unsigned int nmi_p4_cccr_val; | |
1da177e4 LT |
71 | |
72 | /* Note that these events don't tick when the CPU idles. This means | |
73 | the frequency varies with CPU load. */ | |
74 | ||
75 | #define K7_EVNTSEL_ENABLE (1 << 22) | |
76 | #define K7_EVNTSEL_INT (1 << 20) | |
77 | #define K7_EVNTSEL_OS (1 << 17) | |
78 | #define K7_EVNTSEL_USR (1 << 16) | |
79 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | |
80 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | |
81 | ||
75152114 AK |
82 | #define MSR_P4_MISC_ENABLE 0x1A0 |
83 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | |
84 | #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) | |
85 | #define MSR_P4_PERFCTR0 0x300 | |
86 | #define MSR_P4_CCCR0 0x360 | |
87 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | |
88 | #define P4_ESCR_OS (1<<3) | |
89 | #define P4_ESCR_USR (1<<2) | |
90 | #define P4_CCCR_OVF_PMI0 (1<<26) | |
91 | #define P4_CCCR_OVF_PMI1 (1<<27) | |
92 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | |
93 | #define P4_CCCR_COMPLEMENT (1<<19) | |
94 | #define P4_CCCR_COMPARE (1<<18) | |
95 | #define P4_CCCR_REQUIRED (3<<16) | |
96 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | |
97 | #define P4_CCCR_ENABLE (1<<12) | |
98 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | |
99 | CRU_ESCR0 (with any non-null event selector) through a complemented | |
100 | max threshold. [IA32-Vol3, Section 14.9.9] */ | |
101 | #define MSR_P4_IQ_COUNTER0 0x30C | |
102 | #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR) | |
103 | #define P4_NMI_IQ_CCCR0 \ | |
104 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | |
105 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | |
106 | ||
828f0afd DZ |
107 | /* converts an msr to an appropriate reservation bit */ |
108 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |
109 | { | |
110 | /* returns the bit offset of the performance counter register */ | |
111 | switch (boot_cpu_data.x86_vendor) { | |
112 | case X86_VENDOR_AMD: | |
113 | return (msr - MSR_K7_PERFCTR0); | |
114 | case X86_VENDOR_INTEL: | |
115 | return (msr - MSR_P4_BPU_PERFCTR0); | |
116 | } | |
117 | return 0; | |
118 | } | |
119 | ||
120 | /* converts an msr to an appropriate reservation bit */ | |
121 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |
122 | { | |
123 | /* returns the bit offset of the event selection register */ | |
124 | switch (boot_cpu_data.x86_vendor) { | |
125 | case X86_VENDOR_AMD: | |
126 | return (msr - MSR_K7_EVNTSEL0); | |
127 | case X86_VENDOR_INTEL: | |
128 | return (msr - MSR_P4_BSU_ESCR0); | |
129 | } | |
130 | return 0; | |
131 | } | |
132 | ||
133 | /* checks for a bit availability (hack for oprofile) */ | |
134 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |
135 | { | |
136 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
137 | ||
138 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | |
139 | } | |
140 | ||
141 | /* checks the an msr for availability */ | |
142 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | |
143 | { | |
144 | unsigned int counter; | |
145 | ||
146 | counter = nmi_perfctr_msr_to_bit(msr); | |
147 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
148 | ||
149 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | |
150 | } | |
151 | ||
152 | int reserve_perfctr_nmi(unsigned int msr) | |
153 | { | |
154 | unsigned int counter; | |
155 | ||
156 | counter = nmi_perfctr_msr_to_bit(msr); | |
157 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
158 | ||
159 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | |
160 | return 1; | |
161 | return 0; | |
162 | } | |
163 | ||
164 | void release_perfctr_nmi(unsigned int msr) | |
165 | { | |
166 | unsigned int counter; | |
167 | ||
168 | counter = nmi_perfctr_msr_to_bit(msr); | |
169 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
170 | ||
171 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | |
172 | } | |
173 | ||
174 | int reserve_evntsel_nmi(unsigned int msr) | |
175 | { | |
176 | unsigned int counter; | |
177 | ||
178 | counter = nmi_evntsel_msr_to_bit(msr); | |
179 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
180 | ||
181 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner))) | |
182 | return 1; | |
183 | return 0; | |
184 | } | |
185 | ||
186 | void release_evntsel_nmi(unsigned int msr) | |
187 | { | |
188 | unsigned int counter; | |
189 | ||
190 | counter = nmi_evntsel_msr_to_bit(msr); | |
191 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
192 | ||
193 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)); | |
194 | } | |
195 | ||
e6982c67 | 196 | static __cpuinit inline int nmi_known_cpu(void) |
75152114 AK |
197 | { |
198 | switch (boot_cpu_data.x86_vendor) { | |
199 | case X86_VENDOR_AMD: | |
200 | return boot_cpu_data.x86 == 15; | |
201 | case X86_VENDOR_INTEL: | |
b07f8915 | 202 | return boot_cpu_data.x86 == 15; |
75152114 AK |
203 | } |
204 | return 0; | |
205 | } | |
1da177e4 LT |
206 | |
207 | /* Run after command line and cpu_init init, but before all other checks */ | |
e6982c67 | 208 | void __cpuinit nmi_watchdog_default(void) |
1da177e4 LT |
209 | { |
210 | if (nmi_watchdog != NMI_DEFAULT) | |
211 | return; | |
75152114 AK |
212 | if (nmi_known_cpu()) |
213 | nmi_watchdog = NMI_LOCAL_APIC; | |
214 | else | |
1da177e4 | 215 | nmi_watchdog = NMI_IO_APIC; |
1da177e4 LT |
216 | } |
217 | ||
75152114 AK |
218 | #ifdef CONFIG_SMP |
219 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | |
220 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | |
221 | * CPUs during the test make them busy. | |
222 | */ | |
223 | static __init void nmi_cpu_busy(void *data) | |
1da177e4 | 224 | { |
75152114 | 225 | volatile int *endflag = data; |
366c7f55 | 226 | local_irq_enable_in_hardirq(); |
75152114 AK |
227 | /* Intentionally don't use cpu_relax here. This is |
228 | to make sure that the performance counter really ticks, | |
229 | even if there is a simulator or similar that catches the | |
230 | pause instruction. On a real HT machine this is fine because | |
231 | all other CPUs are busy with "useless" delay loops and don't | |
232 | care if they get somewhat less cycles. */ | |
233 | while (*endflag == 0) | |
234 | barrier(); | |
1da177e4 | 235 | } |
75152114 | 236 | #endif |
1da177e4 | 237 | |
75152114 | 238 | int __init check_nmi_watchdog (void) |
1da177e4 | 239 | { |
75152114 | 240 | volatile int endflag = 0; |
ac6b931c | 241 | int *counts; |
1da177e4 LT |
242 | int cpu; |
243 | ||
75152114 AK |
244 | counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
245 | if (!counts) | |
246 | return -1; | |
1da177e4 | 247 | |
75152114 | 248 | printk(KERN_INFO "testing NMI watchdog ... "); |
ac6b931c | 249 | |
7554c3f0 | 250 | #ifdef CONFIG_SMP |
75152114 AK |
251 | if (nmi_watchdog == NMI_LOCAL_APIC) |
252 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | |
7554c3f0 | 253 | #endif |
1da177e4 LT |
254 | |
255 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
df79efde | 256 | counts[cpu] = cpu_pda(cpu)->__nmi_count; |
1da177e4 LT |
257 | local_irq_enable(); |
258 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | |
259 | ||
394e3902 | 260 | for_each_online_cpu(cpu) { |
df79efde | 261 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { |
75152114 AK |
262 | endflag = 1; |
263 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", | |
1da177e4 | 264 | cpu, |
75152114 | 265 | counts[cpu], |
df79efde | 266 | cpu_pda(cpu)->__nmi_count); |
1da177e4 LT |
267 | nmi_active = 0; |
268 | lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; | |
75152114 | 269 | nmi_perfctr_msr = 0; |
ac6b931c | 270 | kfree(counts); |
1da177e4 LT |
271 | return -1; |
272 | } | |
273 | } | |
75152114 | 274 | endflag = 1; |
1da177e4 LT |
275 | printk("OK.\n"); |
276 | ||
277 | /* now that we know it works we can reduce NMI frequency to | |
278 | something more reasonable; makes a difference in some configs */ | |
279 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
280 | nmi_hz = 1; | |
281 | ||
ac6b931c | 282 | kfree(counts); |
1da177e4 LT |
283 | return 0; |
284 | } | |
285 | ||
286 | int __init setup_nmi_watchdog(char *str) | |
287 | { | |
288 | int nmi; | |
289 | ||
290 | if (!strncmp(str,"panic",5)) { | |
291 | panic_on_timeout = 1; | |
292 | str = strchr(str, ','); | |
293 | if (!str) | |
294 | return 1; | |
295 | ++str; | |
296 | } | |
297 | ||
298 | get_option(&str, &nmi); | |
299 | ||
300 | if (nmi >= NMI_INVALID) | |
301 | return 0; | |
75152114 | 302 | nmi_watchdog = nmi; |
1da177e4 LT |
303 | return 1; |
304 | } | |
305 | ||
306 | __setup("nmi_watchdog=", setup_nmi_watchdog); | |
307 | ||
308 | static void disable_lapic_nmi_watchdog(void) | |
309 | { | |
310 | if (nmi_active <= 0) | |
311 | return; | |
312 | switch (boot_cpu_data.x86_vendor) { | |
313 | case X86_VENDOR_AMD: | |
314 | wrmsr(MSR_K7_EVNTSEL0, 0, 0); | |
315 | break; | |
316 | case X86_VENDOR_INTEL: | |
75152114 AK |
317 | if (boot_cpu_data.x86 == 15) { |
318 | wrmsr(MSR_P4_IQ_CCCR0, 0, 0); | |
319 | wrmsr(MSR_P4_CRU_ESCR0, 0, 0); | |
320 | } | |
1da177e4 LT |
321 | break; |
322 | } | |
323 | nmi_active = -1; | |
324 | /* tell do_nmi() and others that we're not active any more */ | |
325 | nmi_watchdog = 0; | |
326 | } | |
327 | ||
328 | static void enable_lapic_nmi_watchdog(void) | |
329 | { | |
330 | if (nmi_active < 0) { | |
331 | nmi_watchdog = NMI_LOCAL_APIC; | |
99019e91 | 332 | touch_nmi_watchdog(); |
1da177e4 LT |
333 | setup_apic_nmi_watchdog(); |
334 | } | |
335 | } | |
336 | ||
337 | int reserve_lapic_nmi(void) | |
338 | { | |
339 | unsigned int old_owner; | |
340 | ||
341 | spin_lock(&lapic_nmi_owner_lock); | |
342 | old_owner = lapic_nmi_owner; | |
343 | lapic_nmi_owner |= LAPIC_NMI_RESERVED; | |
344 | spin_unlock(&lapic_nmi_owner_lock); | |
345 | if (old_owner & LAPIC_NMI_RESERVED) | |
346 | return -EBUSY; | |
347 | if (old_owner & LAPIC_NMI_WATCHDOG) | |
348 | disable_lapic_nmi_watchdog(); | |
349 | return 0; | |
350 | } | |
351 | ||
352 | void release_lapic_nmi(void) | |
353 | { | |
354 | unsigned int new_owner; | |
355 | ||
356 | spin_lock(&lapic_nmi_owner_lock); | |
357 | new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED; | |
358 | lapic_nmi_owner = new_owner; | |
359 | spin_unlock(&lapic_nmi_owner_lock); | |
360 | if (new_owner & LAPIC_NMI_WATCHDOG) | |
361 | enable_lapic_nmi_watchdog(); | |
362 | } | |
363 | ||
364 | void disable_timer_nmi_watchdog(void) | |
365 | { | |
366 | if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0)) | |
367 | return; | |
368 | ||
369 | disable_irq(0); | |
370 | unset_nmi_callback(); | |
371 | nmi_active = -1; | |
372 | nmi_watchdog = NMI_NONE; | |
373 | } | |
374 | ||
375 | void enable_timer_nmi_watchdog(void) | |
376 | { | |
377 | if (nmi_active < 0) { | |
378 | nmi_watchdog = NMI_IO_APIC; | |
379 | touch_nmi_watchdog(); | |
380 | nmi_active = 1; | |
381 | enable_irq(0); | |
382 | } | |
383 | } | |
384 | ||
385 | #ifdef CONFIG_PM | |
386 | ||
387 | static int nmi_pm_active; /* nmi_active before suspend */ | |
388 | ||
829ca9a3 | 389 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
1da177e4 LT |
390 | { |
391 | nmi_pm_active = nmi_active; | |
392 | disable_lapic_nmi_watchdog(); | |
393 | return 0; | |
394 | } | |
395 | ||
396 | static int lapic_nmi_resume(struct sys_device *dev) | |
397 | { | |
398 | if (nmi_pm_active > 0) | |
399 | enable_lapic_nmi_watchdog(); | |
400 | return 0; | |
401 | } | |
402 | ||
403 | static struct sysdev_class nmi_sysclass = { | |
404 | set_kset_name("lapic_nmi"), | |
405 | .resume = lapic_nmi_resume, | |
406 | .suspend = lapic_nmi_suspend, | |
407 | }; | |
408 | ||
409 | static struct sys_device device_lapic_nmi = { | |
410 | .id = 0, | |
411 | .cls = &nmi_sysclass, | |
412 | }; | |
413 | ||
414 | static int __init init_lapic_nmi_sysfs(void) | |
415 | { | |
416 | int error; | |
417 | ||
418 | if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC) | |
419 | return 0; | |
420 | ||
421 | error = sysdev_class_register(&nmi_sysclass); | |
422 | if (!error) | |
423 | error = sysdev_register(&device_lapic_nmi); | |
424 | return error; | |
425 | } | |
426 | /* must come after the local APIC's device_initcall() */ | |
427 | late_initcall(init_lapic_nmi_sysfs); | |
428 | ||
429 | #endif /* CONFIG_PM */ | |
430 | ||
828f0afd | 431 | static int setup_k7_watchdog(void) |
75152114 | 432 | { |
1da177e4 LT |
433 | unsigned int evntsel; |
434 | ||
1da177e4 LT |
435 | nmi_perfctr_msr = MSR_K7_PERFCTR0; |
436 | ||
828f0afd DZ |
437 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
438 | goto fail; | |
439 | ||
440 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0)) | |
441 | goto fail1; | |
442 | ||
443 | /* Simulator may not support it */ | |
444 | if (checking_wrmsrl(MSR_K7_EVNTSEL0, 0UL)) | |
445 | goto fail2; | |
446 | wrmsrl(MSR_K7_PERFCTR0, 0UL); | |
1da177e4 LT |
447 | |
448 | evntsel = K7_EVNTSEL_INT | |
449 | | K7_EVNTSEL_OS | |
450 | | K7_EVNTSEL_USR | |
451 | | K7_NMI_EVENT; | |
452 | ||
453 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | |
42ac8ff2 | 454 | wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz)); |
1da177e4 LT |
455 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
456 | evntsel |= K7_EVNTSEL_ENABLE; | |
457 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | |
828f0afd DZ |
458 | return 1; |
459 | fail2: | |
460 | release_evntsel_nmi(MSR_K7_EVNTSEL0); | |
461 | fail1: | |
462 | release_perfctr_nmi(nmi_perfctr_msr); | |
463 | fail: | |
464 | return 0; | |
1da177e4 LT |
465 | } |
466 | ||
75152114 AK |
467 | |
468 | static int setup_p4_watchdog(void) | |
469 | { | |
470 | unsigned int misc_enable, dummy; | |
471 | ||
472 | rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy); | |
473 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | |
474 | return 0; | |
475 | ||
476 | nmi_perfctr_msr = MSR_P4_IQ_COUNTER0; | |
477 | nmi_p4_cccr_val = P4_NMI_IQ_CCCR0; | |
478 | #ifdef CONFIG_SMP | |
479 | if (smp_num_siblings == 2) | |
480 | nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; | |
481 | #endif | |
482 | ||
828f0afd DZ |
483 | if (!reserve_perfctr_nmi(nmi_perfctr_msr)) |
484 | goto fail; | |
485 | ||
486 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) | |
487 | goto fail1; | |
75152114 AK |
488 | |
489 | wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); | |
490 | wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); | |
42ac8ff2 JB |
491 | Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz * 1000UL / nmi_hz)); |
492 | wrmsrl(MSR_P4_IQ_COUNTER0, -((u64)cpu_khz * 1000 / nmi_hz)); | |
75152114 AK |
493 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
494 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | |
495 | return 1; | |
828f0afd DZ |
496 | fail1: |
497 | release_perfctr_nmi(nmi_perfctr_msr); | |
498 | fail: | |
499 | return 0; | |
75152114 AK |
500 | } |
501 | ||
1da177e4 LT |
502 | void setup_apic_nmi_watchdog(void) |
503 | { | |
504 | switch (boot_cpu_data.x86_vendor) { | |
505 | case X86_VENDOR_AMD: | |
72e76be2 | 506 | if (boot_cpu_data.x86 != 15) |
1da177e4 LT |
507 | return; |
508 | if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) | |
509 | return; | |
828f0afd DZ |
510 | if (!setup_k7_watchdog()) |
511 | return; | |
1da177e4 | 512 | break; |
75152114 | 513 | case X86_VENDOR_INTEL: |
b07f8915 AK |
514 | if (boot_cpu_data.x86 != 15) |
515 | return; | |
516 | if (!setup_p4_watchdog()) | |
75152114 AK |
517 | return; |
518 | break; | |
519 | ||
1da177e4 LT |
520 | default: |
521 | return; | |
522 | } | |
523 | lapic_nmi_owner = LAPIC_NMI_WATCHDOG; | |
524 | nmi_active = 1; | |
525 | } | |
526 | ||
527 | /* | |
528 | * the best way to detect whether a CPU has a 'hard lockup' problem | |
529 | * is to check it's local APIC timer IRQ counts. If they are not | |
530 | * changing then that CPU has some problem. | |
531 | * | |
532 | * as these watchdog NMI IRQs are generated on every CPU, we only | |
533 | * have to check the current processor. | |
1da177e4 LT |
534 | */ |
535 | ||
75152114 AK |
536 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
537 | static DEFINE_PER_CPU(local_t, alert_counter); | |
538 | static DEFINE_PER_CPU(int, nmi_touch); | |
1da177e4 LT |
539 | |
540 | void touch_nmi_watchdog (void) | |
541 | { | |
99019e91 JB |
542 | if (nmi_watchdog > 0) { |
543 | unsigned cpu; | |
1da177e4 | 544 | |
99019e91 JB |
545 | /* |
546 | * Tell other CPUs to reset their alert counters. We cannot | |
547 | * do it ourselves because the alert count increase is not | |
548 | * atomic. | |
549 | */ | |
550 | for_each_present_cpu (cpu) | |
551 | per_cpu(nmi_touch, cpu) = 1; | |
552 | } | |
8446f1d3 IM |
553 | |
554 | touch_softlockup_watchdog(); | |
1da177e4 LT |
555 | } |
556 | ||
eddb6fb9 | 557 | void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) |
1da177e4 | 558 | { |
75152114 AK |
559 | int sum; |
560 | int touched = 0; | |
1da177e4 | 561 | |
1da177e4 | 562 | sum = read_pda(apic_timer_irqs); |
75152114 AK |
563 | if (__get_cpu_var(nmi_touch)) { |
564 | __get_cpu_var(nmi_touch) = 0; | |
565 | touched = 1; | |
566 | } | |
553f265f AK |
567 | #ifdef CONFIG_X86_MCE |
568 | /* Could check oops_in_progress here too, but it's safer | |
569 | not too */ | |
570 | if (atomic_read(&mce_entry) > 0) | |
571 | touched = 1; | |
572 | #endif | |
75152114 | 573 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
1da177e4 LT |
574 | /* |
575 | * Ayiee, looks like this CPU is stuck ... | |
576 | * wait a few IRQs (5 seconds) before doing the oops ... | |
577 | */ | |
75152114 AK |
578 | local_inc(&__get_cpu_var(alert_counter)); |
579 | if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) { | |
1da177e4 LT |
580 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) |
581 | == NOTIFY_STOP) { | |
75152114 | 582 | local_set(&__get_cpu_var(alert_counter), 0); |
1da177e4 | 583 | return; |
84781576 CE |
584 | } |
585 | die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs); | |
1da177e4 LT |
586 | } |
587 | } else { | |
75152114 AK |
588 | __get_cpu_var(last_irq_sum) = sum; |
589 | local_set(&__get_cpu_var(alert_counter), 0); | |
1da177e4 | 590 | } |
75152114 AK |
591 | if (nmi_perfctr_msr) { |
592 | if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { | |
593 | /* | |
594 | * P4 quirks: | |
595 | * - An overflown perfctr will assert its interrupt | |
596 | * until the OVF flag in its CCCR is cleared. | |
597 | * - LVTPC is masked on interrupt and must be | |
598 | * unmasked by the LVTPC handler. | |
599 | */ | |
600 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | |
601 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
b07f8915 | 602 | } |
42ac8ff2 | 603 | wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); |
75152114 | 604 | } |
1da177e4 LT |
605 | } |
606 | ||
eddb6fb9 | 607 | static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu) |
1da177e4 LT |
608 | { |
609 | return 0; | |
610 | } | |
611 | ||
612 | static nmi_callback_t nmi_callback = dummy_nmi_callback; | |
613 | ||
eddb6fb9 | 614 | asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) |
1da177e4 LT |
615 | { |
616 | int cpu = safe_smp_processor_id(); | |
617 | ||
618 | nmi_enter(); | |
619 | add_pda(__nmi_count,1); | |
19306059 | 620 | if (!rcu_dereference(nmi_callback)(regs, cpu)) |
1da177e4 LT |
621 | default_do_nmi(regs); |
622 | nmi_exit(); | |
623 | } | |
624 | ||
625 | void set_nmi_callback(nmi_callback_t callback) | |
626 | { | |
8c914cb7 | 627 | vmalloc_sync_all(); |
19306059 | 628 | rcu_assign_pointer(nmi_callback, callback); |
1da177e4 | 629 | } |
d9a56854 | 630 | EXPORT_SYMBOL_GPL(set_nmi_callback); |
1da177e4 LT |
631 | |
632 | void unset_nmi_callback(void) | |
633 | { | |
634 | nmi_callback = dummy_nmi_callback; | |
635 | } | |
d9a56854 | 636 | EXPORT_SYMBOL_GPL(unset_nmi_callback); |
1da177e4 LT |
637 | |
638 | #ifdef CONFIG_SYSCTL | |
639 | ||
640 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) | |
641 | { | |
642 | unsigned char reason = get_nmi_reason(); | |
643 | char buf[64]; | |
644 | ||
645 | if (!(reason & 0xc0)) { | |
646 | sprintf(buf, "NMI received for unknown reason %02x\n", reason); | |
647 | die_nmi(buf,regs); | |
648 | } | |
649 | return 0; | |
650 | } | |
651 | ||
652 | /* | |
653 | * proc handler for /proc/sys/kernel/unknown_nmi_panic | |
654 | */ | |
655 | int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file, | |
656 | void __user *buffer, size_t *length, loff_t *ppos) | |
657 | { | |
658 | int old_state; | |
659 | ||
660 | old_state = unknown_nmi_panic; | |
661 | proc_dointvec(table, write, file, buffer, length, ppos); | |
662 | if (!!old_state == !!unknown_nmi_panic) | |
663 | return 0; | |
664 | ||
665 | if (unknown_nmi_panic) { | |
666 | if (reserve_lapic_nmi() < 0) { | |
667 | unknown_nmi_panic = 0; | |
668 | return -EBUSY; | |
669 | } else { | |
670 | set_nmi_callback(unknown_nmi_panic_callback); | |
671 | } | |
672 | } else { | |
673 | release_lapic_nmi(); | |
674 | unset_nmi_callback(); | |
675 | } | |
676 | return 0; | |
677 | } | |
678 | ||
679 | #endif | |
680 | ||
681 | EXPORT_SYMBOL(nmi_active); | |
682 | EXPORT_SYMBOL(nmi_watchdog); | |
828f0afd DZ |
683 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); |
684 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | |
685 | EXPORT_SYMBOL(reserve_perfctr_nmi); | |
686 | EXPORT_SYMBOL(release_perfctr_nmi); | |
687 | EXPORT_SYMBOL(reserve_evntsel_nmi); | |
688 | EXPORT_SYMBOL(release_evntsel_nmi); | |
1da177e4 LT |
689 | EXPORT_SYMBOL(reserve_lapic_nmi); |
690 | EXPORT_SYMBOL(release_lapic_nmi); | |
691 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); | |
692 | EXPORT_SYMBOL(enable_timer_nmi_watchdog); | |
693 | EXPORT_SYMBOL(touch_nmi_watchdog); |