]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/nmi.c | |
3 | * | |
4 | * NMI watchdog support on APIC systems | |
5 | * | |
6 | * Started by Ingo Molnar <mingo@redhat.com> | |
7 | * | |
8 | * Fixes: | |
9 | * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. | |
10 | * Mikael Pettersson : Power Management for local APIC NMI watchdog. | |
11 | * Pavel Machek and | |
12 | * Mikael Pettersson : PM converted to driver model. Disable/enable API. | |
13 | */ | |
14 | ||
1da177e4 | 15 | #include <linux/mm.h> |
1da177e4 | 16 | #include <linux/delay.h> |
1da177e4 | 17 | #include <linux/interrupt.h> |
1da177e4 LT |
18 | #include <linux/module.h> |
19 | #include <linux/sysdev.h> | |
20 | #include <linux/nmi.h> | |
21 | #include <linux/sysctl.h> | |
eddb6fb9 | 22 | #include <linux/kprobes.h> |
1da177e4 LT |
23 | |
24 | #include <asm/smp.h> | |
1da177e4 | 25 | #include <asm/nmi.h> |
1da177e4 LT |
26 | #include <asm/proto.h> |
27 | #include <asm/kdebug.h> | |
553f265f | 28 | #include <asm/mce.h> |
1da177e4 | 29 | |
828f0afd DZ |
30 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: |
31 | * evtsel_nmi_owner tracks the ownership of the event selection | |
32 | * - different performance counters/ event selection may be reserved for | |
33 | * different subsystems this reservation system just tries to coordinate | |
34 | * things a little | |
35 | */ | |
36 | static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner); | |
37 | static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]); | |
38 | ||
39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | |
40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | |
41 | */ | |
42 | #define NMI_MAX_COUNTER_BITS 66 | |
43 | ||
1da177e4 | 44 | /* nmi_active: |
f2802e7f DZ |
45 | * >0: the lapic NMI watchdog is active, but can be disabled |
46 | * <0: the lapic NMI watchdog has not been set up, and cannot | |
1da177e4 | 47 | * be enabled |
f2802e7f | 48 | * 0: the lapic NMI watchdog is disabled, but can be enabled |
1da177e4 | 49 | */ |
f2802e7f | 50 | atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ |
1da177e4 LT |
51 | int panic_on_timeout; |
52 | ||
53 | unsigned int nmi_watchdog = NMI_DEFAULT; | |
54 | static unsigned int nmi_hz = HZ; | |
1da177e4 | 55 | |
f2802e7f DZ |
56 | struct nmi_watchdog_ctlblk { |
57 | int enabled; | |
58 | u64 check_bit; | |
59 | unsigned int cccr_msr; | |
60 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ | |
61 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ | |
62 | }; | |
63 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | |
1da177e4 | 64 | |
f2802e7f DZ |
65 | /* local prototypes */ |
66 | static void stop_apic_nmi_watchdog(void *unused); | |
67 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | |
75152114 | 68 | |
828f0afd DZ |
69 | /* converts an msr to an appropriate reservation bit */ |
70 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |
71 | { | |
72 | /* returns the bit offset of the performance counter register */ | |
73 | switch (boot_cpu_data.x86_vendor) { | |
74 | case X86_VENDOR_AMD: | |
75 | return (msr - MSR_K7_PERFCTR0); | |
76 | case X86_VENDOR_INTEL: | |
77 | return (msr - MSR_P4_BPU_PERFCTR0); | |
78 | } | |
79 | return 0; | |
80 | } | |
81 | ||
82 | /* converts an msr to an appropriate reservation bit */ | |
83 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |
84 | { | |
85 | /* returns the bit offset of the event selection register */ | |
86 | switch (boot_cpu_data.x86_vendor) { | |
87 | case X86_VENDOR_AMD: | |
88 | return (msr - MSR_K7_EVNTSEL0); | |
89 | case X86_VENDOR_INTEL: | |
90 | return (msr - MSR_P4_BSU_ESCR0); | |
91 | } | |
92 | return 0; | |
93 | } | |
94 | ||
95 | /* checks for a bit availability (hack for oprofile) */ | |
96 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |
97 | { | |
98 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
99 | ||
100 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | |
101 | } | |
102 | ||
103 | /* checks the an msr for availability */ | |
104 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | |
105 | { | |
106 | unsigned int counter; | |
107 | ||
108 | counter = nmi_perfctr_msr_to_bit(msr); | |
109 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
110 | ||
111 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | |
112 | } | |
113 | ||
114 | int reserve_perfctr_nmi(unsigned int msr) | |
115 | { | |
116 | unsigned int counter; | |
117 | ||
118 | counter = nmi_perfctr_msr_to_bit(msr); | |
119 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
120 | ||
121 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | |
122 | return 1; | |
123 | return 0; | |
124 | } | |
125 | ||
126 | void release_perfctr_nmi(unsigned int msr) | |
127 | { | |
128 | unsigned int counter; | |
129 | ||
130 | counter = nmi_perfctr_msr_to_bit(msr); | |
131 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
132 | ||
133 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | |
134 | } | |
135 | ||
136 | int reserve_evntsel_nmi(unsigned int msr) | |
137 | { | |
138 | unsigned int counter; | |
139 | ||
140 | counter = nmi_evntsel_msr_to_bit(msr); | |
141 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
142 | ||
143 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner))) | |
144 | return 1; | |
145 | return 0; | |
146 | } | |
147 | ||
148 | void release_evntsel_nmi(unsigned int msr) | |
149 | { | |
150 | unsigned int counter; | |
151 | ||
152 | counter = nmi_evntsel_msr_to_bit(msr); | |
153 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
154 | ||
155 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)); | |
156 | } | |
157 | ||
e6982c67 | 158 | static __cpuinit inline int nmi_known_cpu(void) |
75152114 AK |
159 | { |
160 | switch (boot_cpu_data.x86_vendor) { | |
161 | case X86_VENDOR_AMD: | |
162 | return boot_cpu_data.x86 == 15; | |
163 | case X86_VENDOR_INTEL: | |
b07f8915 | 164 | return boot_cpu_data.x86 == 15; |
75152114 AK |
165 | } |
166 | return 0; | |
167 | } | |
1da177e4 LT |
168 | |
169 | /* Run after command line and cpu_init init, but before all other checks */ | |
e6982c67 | 170 | void __cpuinit nmi_watchdog_default(void) |
1da177e4 LT |
171 | { |
172 | if (nmi_watchdog != NMI_DEFAULT) | |
173 | return; | |
75152114 AK |
174 | if (nmi_known_cpu()) |
175 | nmi_watchdog = NMI_LOCAL_APIC; | |
176 | else | |
1da177e4 | 177 | nmi_watchdog = NMI_IO_APIC; |
1da177e4 LT |
178 | } |
179 | ||
75152114 AK |
180 | #ifdef CONFIG_SMP |
181 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | |
182 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | |
183 | * CPUs during the test make them busy. | |
184 | */ | |
185 | static __init void nmi_cpu_busy(void *data) | |
1da177e4 | 186 | { |
75152114 | 187 | volatile int *endflag = data; |
366c7f55 | 188 | local_irq_enable_in_hardirq(); |
75152114 AK |
189 | /* Intentionally don't use cpu_relax here. This is |
190 | to make sure that the performance counter really ticks, | |
191 | even if there is a simulator or similar that catches the | |
192 | pause instruction. On a real HT machine this is fine because | |
193 | all other CPUs are busy with "useless" delay loops and don't | |
194 | care if they get somewhat less cycles. */ | |
195 | while (*endflag == 0) | |
196 | barrier(); | |
1da177e4 | 197 | } |
75152114 | 198 | #endif |
1da177e4 | 199 | |
75152114 | 200 | int __init check_nmi_watchdog (void) |
1da177e4 | 201 | { |
75152114 | 202 | volatile int endflag = 0; |
ac6b931c | 203 | int *counts; |
1da177e4 LT |
204 | int cpu; |
205 | ||
f2802e7f DZ |
206 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) |
207 | return 0; | |
208 | ||
209 | if (!atomic_read(&nmi_active)) | |
210 | return 0; | |
211 | ||
75152114 AK |
212 | counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
213 | if (!counts) | |
214 | return -1; | |
1da177e4 | 215 | |
75152114 | 216 | printk(KERN_INFO "testing NMI watchdog ... "); |
ac6b931c | 217 | |
7554c3f0 | 218 | #ifdef CONFIG_SMP |
75152114 AK |
219 | if (nmi_watchdog == NMI_LOCAL_APIC) |
220 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | |
7554c3f0 | 221 | #endif |
1da177e4 LT |
222 | |
223 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
df79efde | 224 | counts[cpu] = cpu_pda(cpu)->__nmi_count; |
1da177e4 LT |
225 | local_irq_enable(); |
226 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | |
227 | ||
394e3902 | 228 | for_each_online_cpu(cpu) { |
f2802e7f DZ |
229 | if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled) |
230 | continue; | |
df79efde | 231 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { |
75152114 | 232 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", |
1da177e4 | 233 | cpu, |
75152114 | 234 | counts[cpu], |
df79efde | 235 | cpu_pda(cpu)->__nmi_count); |
f2802e7f DZ |
236 | per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0; |
237 | atomic_dec(&nmi_active); | |
1da177e4 LT |
238 | } |
239 | } | |
f2802e7f DZ |
240 | if (!atomic_read(&nmi_active)) { |
241 | kfree(counts); | |
242 | atomic_set(&nmi_active, -1); | |
243 | return -1; | |
244 | } | |
75152114 | 245 | endflag = 1; |
1da177e4 LT |
246 | printk("OK.\n"); |
247 | ||
248 | /* now that we know it works we can reduce NMI frequency to | |
249 | something more reasonable; makes a difference in some configs */ | |
250 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
251 | nmi_hz = 1; | |
252 | ||
ac6b931c | 253 | kfree(counts); |
1da177e4 LT |
254 | return 0; |
255 | } | |
256 | ||
257 | int __init setup_nmi_watchdog(char *str) | |
258 | { | |
259 | int nmi; | |
260 | ||
261 | if (!strncmp(str,"panic",5)) { | |
262 | panic_on_timeout = 1; | |
263 | str = strchr(str, ','); | |
264 | if (!str) | |
265 | return 1; | |
266 | ++str; | |
267 | } | |
268 | ||
269 | get_option(&str, &nmi); | |
270 | ||
f2802e7f | 271 | if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) |
1da177e4 | 272 | return 0; |
f2802e7f DZ |
273 | |
274 | if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0)) | |
275 | return 0; /* no lapic support */ | |
75152114 | 276 | nmi_watchdog = nmi; |
1da177e4 LT |
277 | return 1; |
278 | } | |
279 | ||
280 | __setup("nmi_watchdog=", setup_nmi_watchdog); | |
281 | ||
282 | static void disable_lapic_nmi_watchdog(void) | |
283 | { | |
f2802e7f DZ |
284 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); |
285 | ||
286 | if (atomic_read(&nmi_active) <= 0) | |
1da177e4 | 287 | return; |
f2802e7f DZ |
288 | |
289 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | |
290 | ||
291 | BUG_ON(atomic_read(&nmi_active) != 0); | |
1da177e4 LT |
292 | } |
293 | ||
294 | static void enable_lapic_nmi_watchdog(void) | |
295 | { | |
f2802e7f DZ |
296 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); |
297 | ||
298 | /* are we already enabled */ | |
299 | if (atomic_read(&nmi_active) != 0) | |
300 | return; | |
301 | ||
302 | /* are we lapic aware */ | |
303 | if (nmi_known_cpu() <= 0) | |
304 | return; | |
305 | ||
306 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | |
307 | touch_nmi_watchdog(); | |
1da177e4 LT |
308 | } |
309 | ||
1da177e4 LT |
310 | void disable_timer_nmi_watchdog(void) |
311 | { | |
f2802e7f DZ |
312 | BUG_ON(nmi_watchdog != NMI_IO_APIC); |
313 | ||
314 | if (atomic_read(&nmi_active) <= 0) | |
1da177e4 LT |
315 | return; |
316 | ||
317 | disable_irq(0); | |
f2802e7f DZ |
318 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); |
319 | ||
320 | BUG_ON(atomic_read(&nmi_active) != 0); | |
1da177e4 LT |
321 | } |
322 | ||
323 | void enable_timer_nmi_watchdog(void) | |
324 | { | |
f2802e7f DZ |
325 | BUG_ON(nmi_watchdog != NMI_IO_APIC); |
326 | ||
327 | if (atomic_read(&nmi_active) == 0) { | |
1da177e4 | 328 | touch_nmi_watchdog(); |
f2802e7f | 329 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); |
1da177e4 LT |
330 | enable_irq(0); |
331 | } | |
332 | } | |
333 | ||
334 | #ifdef CONFIG_PM | |
335 | ||
336 | static int nmi_pm_active; /* nmi_active before suspend */ | |
337 | ||
829ca9a3 | 338 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
1da177e4 | 339 | { |
f2802e7f | 340 | nmi_pm_active = atomic_read(&nmi_active); |
1da177e4 LT |
341 | disable_lapic_nmi_watchdog(); |
342 | return 0; | |
343 | } | |
344 | ||
345 | static int lapic_nmi_resume(struct sys_device *dev) | |
346 | { | |
347 | if (nmi_pm_active > 0) | |
f2802e7f | 348 | enable_lapic_nmi_watchdog(); |
1da177e4 LT |
349 | return 0; |
350 | } | |
351 | ||
352 | static struct sysdev_class nmi_sysclass = { | |
353 | set_kset_name("lapic_nmi"), | |
354 | .resume = lapic_nmi_resume, | |
355 | .suspend = lapic_nmi_suspend, | |
356 | }; | |
357 | ||
358 | static struct sys_device device_lapic_nmi = { | |
359 | .id = 0, | |
360 | .cls = &nmi_sysclass, | |
361 | }; | |
362 | ||
363 | static int __init init_lapic_nmi_sysfs(void) | |
364 | { | |
365 | int error; | |
366 | ||
f2802e7f DZ |
367 | /* should really be a BUG_ON but b/c this is an |
368 | * init call, it just doesn't work. -dcz | |
369 | */ | |
370 | if (nmi_watchdog != NMI_LOCAL_APIC) | |
371 | return 0; | |
372 | ||
373 | if ( atomic_read(&nmi_active) < 0 ) | |
1da177e4 LT |
374 | return 0; |
375 | ||
376 | error = sysdev_class_register(&nmi_sysclass); | |
377 | if (!error) | |
378 | error = sysdev_register(&device_lapic_nmi); | |
379 | return error; | |
380 | } | |
381 | /* must come after the local APIC's device_initcall() */ | |
382 | late_initcall(init_lapic_nmi_sysfs); | |
383 | ||
384 | #endif /* CONFIG_PM */ | |
385 | ||
f2802e7f DZ |
386 | /* |
387 | * Activate the NMI watchdog via the local APIC. | |
388 | * Original code written by Keith Owens. | |
389 | */ | |
390 | ||
391 | /* Note that these events don't tick when the CPU idles. This means | |
392 | the frequency varies with CPU load. */ | |
393 | ||
394 | #define K7_EVNTSEL_ENABLE (1 << 22) | |
395 | #define K7_EVNTSEL_INT (1 << 20) | |
396 | #define K7_EVNTSEL_OS (1 << 17) | |
397 | #define K7_EVNTSEL_USR (1 << 16) | |
398 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | |
399 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | |
400 | ||
828f0afd | 401 | static int setup_k7_watchdog(void) |
75152114 | 402 | { |
f2802e7f | 403 | unsigned int perfctr_msr, evntsel_msr; |
1da177e4 | 404 | unsigned int evntsel; |
f2802e7f | 405 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
1da177e4 | 406 | |
f2802e7f DZ |
407 | perfctr_msr = MSR_K7_PERFCTR0; |
408 | evntsel_msr = MSR_K7_EVNTSEL0; | |
409 | if (!reserve_perfctr_nmi(perfctr_msr)) | |
828f0afd DZ |
410 | goto fail; |
411 | ||
f2802e7f | 412 | if (!reserve_evntsel_nmi(evntsel_msr)) |
828f0afd DZ |
413 | goto fail1; |
414 | ||
415 | /* Simulator may not support it */ | |
f2802e7f | 416 | if (checking_wrmsrl(evntsel_msr, 0UL)) |
828f0afd | 417 | goto fail2; |
f2802e7f | 418 | wrmsrl(perfctr_msr, 0UL); |
1da177e4 LT |
419 | |
420 | evntsel = K7_EVNTSEL_INT | |
421 | | K7_EVNTSEL_OS | |
422 | | K7_EVNTSEL_USR | |
423 | | K7_NMI_EVENT; | |
424 | ||
f2802e7f DZ |
425 | /* setup the timer */ |
426 | wrmsr(evntsel_msr, evntsel, 0); | |
427 | wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | |
1da177e4 LT |
428 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
429 | evntsel |= K7_EVNTSEL_ENABLE; | |
f2802e7f DZ |
430 | wrmsr(evntsel_msr, evntsel, 0); |
431 | ||
432 | wd->perfctr_msr = perfctr_msr; | |
433 | wd->evntsel_msr = evntsel_msr; | |
434 | wd->cccr_msr = 0; //unused | |
435 | wd->check_bit = 1ULL<<63; | |
828f0afd DZ |
436 | return 1; |
437 | fail2: | |
f2802e7f | 438 | release_evntsel_nmi(evntsel_msr); |
828f0afd | 439 | fail1: |
f2802e7f | 440 | release_perfctr_nmi(perfctr_msr); |
828f0afd DZ |
441 | fail: |
442 | return 0; | |
1da177e4 LT |
443 | } |
444 | ||
f2802e7f DZ |
445 | static void stop_k7_watchdog(void) |
446 | { | |
447 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
448 | ||
449 | wrmsr(wd->evntsel_msr, 0, 0); | |
450 | ||
451 | release_evntsel_nmi(wd->evntsel_msr); | |
452 | release_perfctr_nmi(wd->perfctr_msr); | |
453 | } | |
454 | ||
455 | /* Note that these events don't tick when the CPU idles. This means | |
456 | the frequency varies with CPU load. */ | |
457 | ||
458 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | |
459 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | |
460 | #define P4_ESCR_OS (1<<3) | |
461 | #define P4_ESCR_USR (1<<2) | |
462 | #define P4_CCCR_OVF_PMI0 (1<<26) | |
463 | #define P4_CCCR_OVF_PMI1 (1<<27) | |
464 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | |
465 | #define P4_CCCR_COMPLEMENT (1<<19) | |
466 | #define P4_CCCR_COMPARE (1<<18) | |
467 | #define P4_CCCR_REQUIRED (3<<16) | |
468 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | |
469 | #define P4_CCCR_ENABLE (1<<12) | |
470 | #define P4_CCCR_OVF (1<<31) | |
471 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | |
472 | CRU_ESCR0 (with any non-null event selector) through a complemented | |
473 | max threshold. [IA32-Vol3, Section 14.9.9] */ | |
75152114 AK |
474 | |
475 | static int setup_p4_watchdog(void) | |
476 | { | |
f2802e7f DZ |
477 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; |
478 | unsigned int evntsel, cccr_val; | |
75152114 | 479 | unsigned int misc_enable, dummy; |
f2802e7f DZ |
480 | unsigned int ht_num; |
481 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
75152114 | 482 | |
f2802e7f | 483 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); |
75152114 AK |
484 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) |
485 | return 0; | |
486 | ||
75152114 | 487 | #ifdef CONFIG_SMP |
f2802e7f DZ |
488 | /* detect which hyperthread we are on */ |
489 | if (smp_num_siblings == 2) { | |
490 | unsigned int ebx, apicid; | |
491 | ||
492 | ebx = cpuid_ebx(1); | |
493 | apicid = (ebx >> 24) & 0xff; | |
494 | ht_num = apicid & 1; | |
495 | } else | |
75152114 | 496 | #endif |
f2802e7f DZ |
497 | ht_num = 0; |
498 | ||
499 | /* performance counters are shared resources | |
500 | * assign each hyperthread its own set | |
501 | * (re-use the ESCR0 register, seems safe | |
502 | * and keeps the cccr_val the same) | |
503 | */ | |
504 | if (!ht_num) { | |
505 | /* logical cpu 0 */ | |
506 | perfctr_msr = MSR_P4_IQ_PERFCTR0; | |
507 | evntsel_msr = MSR_P4_CRU_ESCR0; | |
508 | cccr_msr = MSR_P4_IQ_CCCR0; | |
509 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | |
510 | } else { | |
511 | /* logical cpu 1 */ | |
512 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | |
513 | evntsel_msr = MSR_P4_CRU_ESCR0; | |
514 | cccr_msr = MSR_P4_IQ_CCCR1; | |
515 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | |
516 | } | |
75152114 | 517 | |
f2802e7f | 518 | if (!reserve_perfctr_nmi(perfctr_msr)) |
828f0afd DZ |
519 | goto fail; |
520 | ||
f2802e7f | 521 | if (!reserve_evntsel_nmi(evntsel_msr)) |
828f0afd | 522 | goto fail1; |
75152114 | 523 | |
f2802e7f DZ |
524 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
525 | | P4_ESCR_OS | |
526 | | P4_ESCR_USR; | |
527 | ||
528 | cccr_val |= P4_CCCR_THRESHOLD(15) | |
529 | | P4_CCCR_COMPLEMENT | |
530 | | P4_CCCR_COMPARE | |
531 | | P4_CCCR_REQUIRED; | |
532 | ||
533 | wrmsr(evntsel_msr, evntsel, 0); | |
534 | wrmsr(cccr_msr, cccr_val, 0); | |
535 | wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | |
75152114 | 536 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
f2802e7f DZ |
537 | cccr_val |= P4_CCCR_ENABLE; |
538 | wrmsr(cccr_msr, cccr_val, 0); | |
539 | ||
540 | wd->perfctr_msr = perfctr_msr; | |
541 | wd->evntsel_msr = evntsel_msr; | |
542 | wd->cccr_msr = cccr_msr; | |
543 | wd->check_bit = 1ULL<<39; | |
75152114 | 544 | return 1; |
828f0afd | 545 | fail1: |
f2802e7f | 546 | release_perfctr_nmi(perfctr_msr); |
828f0afd DZ |
547 | fail: |
548 | return 0; | |
75152114 AK |
549 | } |
550 | ||
f2802e7f | 551 | static void stop_p4_watchdog(void) |
1da177e4 | 552 | { |
f2802e7f DZ |
553 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
554 | ||
555 | wrmsr(wd->cccr_msr, 0, 0); | |
556 | wrmsr(wd->evntsel_msr, 0, 0); | |
557 | ||
558 | release_evntsel_nmi(wd->evntsel_msr); | |
559 | release_perfctr_nmi(wd->perfctr_msr); | |
560 | } | |
561 | ||
562 | void setup_apic_nmi_watchdog(void *unused) | |
563 | { | |
564 | /* only support LOCAL and IO APICs for now */ | |
565 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | |
566 | (nmi_watchdog != NMI_IO_APIC)) | |
567 | return; | |
568 | ||
569 | if (nmi_watchdog == NMI_LOCAL_APIC) { | |
570 | switch (boot_cpu_data.x86_vendor) { | |
571 | case X86_VENDOR_AMD: | |
572 | if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) | |
573 | return; | |
574 | if (!setup_k7_watchdog()) | |
575 | return; | |
576 | break; | |
577 | case X86_VENDOR_INTEL: | |
578 | if (!setup_p4_watchdog()) | |
579 | return; | |
580 | break; | |
581 | default: | |
75152114 | 582 | return; |
f2802e7f DZ |
583 | } |
584 | } | |
585 | __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1; | |
586 | atomic_inc(&nmi_active); | |
587 | } | |
75152114 | 588 | |
f2802e7f DZ |
589 | static void stop_apic_nmi_watchdog(void *unused) |
590 | { | |
591 | /* only support LOCAL and IO APICs for now */ | |
592 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | |
593 | (nmi_watchdog != NMI_IO_APIC)) | |
594 | return; | |
595 | ||
596 | if (nmi_watchdog == NMI_LOCAL_APIC) { | |
597 | switch (boot_cpu_data.x86_vendor) { | |
598 | case X86_VENDOR_AMD: | |
599 | if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) | |
600 | return; | |
601 | stop_k7_watchdog(); | |
602 | break; | |
603 | case X86_VENDOR_INTEL: | |
604 | stop_p4_watchdog(); | |
605 | break; | |
606 | default: | |
607 | return; | |
608 | } | |
1da177e4 | 609 | } |
f2802e7f DZ |
610 | __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0; |
611 | atomic_dec(&nmi_active); | |
1da177e4 LT |
612 | } |
613 | ||
614 | /* | |
615 | * the best way to detect whether a CPU has a 'hard lockup' problem | |
616 | * is to check it's local APIC timer IRQ counts. If they are not | |
617 | * changing then that CPU has some problem. | |
618 | * | |
619 | * as these watchdog NMI IRQs are generated on every CPU, we only | |
620 | * have to check the current processor. | |
1da177e4 LT |
621 | */ |
622 | ||
75152114 AK |
623 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
624 | static DEFINE_PER_CPU(local_t, alert_counter); | |
625 | static DEFINE_PER_CPU(int, nmi_touch); | |
1da177e4 LT |
626 | |
627 | void touch_nmi_watchdog (void) | |
628 | { | |
99019e91 JB |
629 | if (nmi_watchdog > 0) { |
630 | unsigned cpu; | |
1da177e4 | 631 | |
99019e91 JB |
632 | /* |
633 | * Tell other CPUs to reset their alert counters. We cannot | |
634 | * do it ourselves because the alert count increase is not | |
635 | * atomic. | |
636 | */ | |
637 | for_each_present_cpu (cpu) | |
638 | per_cpu(nmi_touch, cpu) = 1; | |
639 | } | |
8446f1d3 IM |
640 | |
641 | touch_softlockup_watchdog(); | |
1da177e4 LT |
642 | } |
643 | ||
3adbbcce | 644 | int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) |
1da177e4 | 645 | { |
75152114 AK |
646 | int sum; |
647 | int touched = 0; | |
f2802e7f DZ |
648 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
649 | u64 dummy; | |
3adbbcce | 650 | int rc=0; |
f2802e7f DZ |
651 | |
652 | /* check for other users first */ | |
653 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) | |
654 | == NOTIFY_STOP) { | |
3adbbcce | 655 | rc = 1; |
f2802e7f DZ |
656 | touched = 1; |
657 | } | |
1da177e4 | 658 | |
1da177e4 | 659 | sum = read_pda(apic_timer_irqs); |
75152114 AK |
660 | if (__get_cpu_var(nmi_touch)) { |
661 | __get_cpu_var(nmi_touch) = 0; | |
662 | touched = 1; | |
663 | } | |
f2802e7f | 664 | |
553f265f AK |
665 | #ifdef CONFIG_X86_MCE |
666 | /* Could check oops_in_progress here too, but it's safer | |
667 | not too */ | |
668 | if (atomic_read(&mce_entry) > 0) | |
669 | touched = 1; | |
670 | #endif | |
f2802e7f | 671 | /* if the apic timer isn't firing, this cpu isn't doing much */ |
75152114 | 672 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
1da177e4 LT |
673 | /* |
674 | * Ayiee, looks like this CPU is stuck ... | |
675 | * wait a few IRQs (5 seconds) before doing the oops ... | |
676 | */ | |
75152114 | 677 | local_inc(&__get_cpu_var(alert_counter)); |
f2802e7f | 678 | if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) |
84781576 | 679 | die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs); |
1da177e4 | 680 | } else { |
75152114 AK |
681 | __get_cpu_var(last_irq_sum) = sum; |
682 | local_set(&__get_cpu_var(alert_counter), 0); | |
1da177e4 | 683 | } |
f2802e7f DZ |
684 | |
685 | /* see if the nmi watchdog went off */ | |
686 | if (wd->enabled) { | |
687 | if (nmi_watchdog == NMI_LOCAL_APIC) { | |
688 | rdmsrl(wd->perfctr_msr, dummy); | |
689 | if (dummy & wd->check_bit){ | |
690 | /* this wasn't a watchdog timer interrupt */ | |
691 | goto done; | |
692 | } | |
693 | ||
694 | /* only Intel uses the cccr msr */ | |
695 | if (wd->cccr_msr != 0) { | |
696 | /* | |
697 | * P4 quirks: | |
698 | * - An overflown perfctr will assert its interrupt | |
699 | * until the OVF flag in its CCCR is cleared. | |
700 | * - LVTPC is masked on interrupt and must be | |
701 | * unmasked by the LVTPC handler. | |
702 | */ | |
703 | rdmsrl(wd->cccr_msr, dummy); | |
704 | dummy &= ~P4_CCCR_OVF; | |
705 | wrmsrl(wd->cccr_msr, dummy); | |
706 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
707 | } | |
708 | /* start the cycle over again */ | |
709 | wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | |
3adbbcce DZ |
710 | rc = 1; |
711 | } else if (nmi_watchdog == NMI_IO_APIC) { | |
712 | /* don't know how to accurately check for this. | |
713 | * just assume it was a watchdog timer interrupt | |
714 | * This matches the old behaviour. | |
715 | */ | |
716 | rc = 1; | |
717 | } else | |
718 | printk(KERN_WARNING "Unknown enabled NMI hardware?!\n"); | |
75152114 | 719 | } |
f2802e7f | 720 | done: |
3adbbcce | 721 | return rc; |
1da177e4 LT |
722 | } |
723 | ||
eddb6fb9 | 724 | asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) |
1da177e4 | 725 | { |
1da177e4 LT |
726 | nmi_enter(); |
727 | add_pda(__nmi_count,1); | |
3adbbcce | 728 | default_do_nmi(regs); |
1da177e4 LT |
729 | nmi_exit(); |
730 | } | |
731 | ||
3adbbcce DZ |
732 | int do_nmi_callback(struct pt_regs * regs, int cpu) |
733 | { | |
2fbe7b25 DZ |
734 | #ifdef CONFIG_SYSCTL |
735 | if (unknown_nmi_panic) | |
736 | return unknown_nmi_panic_callback(regs, cpu); | |
737 | #endif | |
738 | return 0; | |
1da177e4 LT |
739 | } |
740 | ||
741 | #ifdef CONFIG_SYSCTL | |
742 | ||
743 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) | |
744 | { | |
745 | unsigned char reason = get_nmi_reason(); | |
746 | char buf[64]; | |
747 | ||
2fbe7b25 DZ |
748 | sprintf(buf, "NMI received for unknown reason %02x\n", reason); |
749 | die_nmi(buf,regs); | |
1da177e4 LT |
750 | return 0; |
751 | } | |
752 | ||
407984f1 DZ |
753 | /* |
754 | * proc handler for /proc/sys/kernel/nmi | |
755 | */ | |
756 | int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, | |
757 | void __user *buffer, size_t *length, loff_t *ppos) | |
758 | { | |
759 | int old_state; | |
760 | ||
761 | nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; | |
762 | old_state = nmi_watchdog_enabled; | |
763 | proc_dointvec(table, write, file, buffer, length, ppos); | |
764 | if (!!old_state == !!nmi_watchdog_enabled) | |
765 | return 0; | |
766 | ||
767 | if (atomic_read(&nmi_active) < 0) { | |
768 | printk( KERN_WARNING "NMI watchdog is permanently disabled\n"); | |
769 | return -EINVAL; | |
770 | } | |
771 | ||
772 | /* if nmi_watchdog is not set yet, then set it */ | |
773 | nmi_watchdog_default(); | |
774 | ||
775 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
776 | { | |
777 | if (nmi_watchdog_enabled) | |
778 | enable_lapic_nmi_watchdog(); | |
779 | else | |
780 | disable_lapic_nmi_watchdog(); | |
781 | } else if (nmi_watchdog == NMI_IO_APIC) { | |
782 | /* FIXME | |
783 | * for some reason these functions don't work | |
784 | */ | |
785 | printk("Can not enable/disable NMI on IO APIC\n"); | |
786 | return -EIO; | |
787 | #if 0 | |
788 | if (nmi_watchdog_enabled) | |
789 | enable_timer_nmi_watchdog(); | |
790 | else | |
791 | disable_timer_nmi_watchdog(); | |
792 | #endif | |
793 | } else { | |
794 | printk(KERN_WARNING | |
795 | "NMI watchdog doesn't know what hardware to touch\n"); | |
796 | return -EIO; | |
797 | } | |
798 | return 0; | |
799 | } | |
800 | ||
1da177e4 LT |
801 | #endif |
802 | ||
803 | EXPORT_SYMBOL(nmi_active); | |
804 | EXPORT_SYMBOL(nmi_watchdog); | |
828f0afd DZ |
805 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); |
806 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | |
807 | EXPORT_SYMBOL(reserve_perfctr_nmi); | |
808 | EXPORT_SYMBOL(release_perfctr_nmi); | |
809 | EXPORT_SYMBOL(reserve_evntsel_nmi); | |
810 | EXPORT_SYMBOL(release_evntsel_nmi); | |
1da177e4 LT |
811 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); |
812 | EXPORT_SYMBOL(enable_timer_nmi_watchdog); | |
813 | EXPORT_SYMBOL(touch_nmi_watchdog); |