]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/linux/nmi.h | |
3 | */ | |
4 | #ifndef LINUX_NMI_H | |
5 | #define LINUX_NMI_H | |
6 | ||
9938406a | 7 | #include <linux/sched.h> |
1da177e4 | 8 | #include <asm/irq.h> |
f2e0cff8 NP |
9 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) |
10 | #include <asm/nmi.h> | |
11 | #endif | |
1da177e4 | 12 | |
d151b27d IM |
13 | #ifdef CONFIG_LOCKUP_DETECTOR |
14 | extern void touch_softlockup_watchdog_sched(void); | |
15 | extern void touch_softlockup_watchdog(void); | |
16 | extern void touch_softlockup_watchdog_sync(void); | |
17 | extern void touch_all_softlockup_watchdogs(void); | |
d151b27d IM |
18 | extern unsigned int softlockup_panic; |
19 | extern unsigned int hardlockup_panic; | |
20 | void lockup_detector_init(void); | |
21 | #else | |
22 | static inline void touch_softlockup_watchdog_sched(void) | |
23 | { | |
24 | } | |
25 | static inline void touch_softlockup_watchdog(void) | |
26 | { | |
27 | } | |
28 | static inline void touch_softlockup_watchdog_sync(void) | |
29 | { | |
30 | } | |
31 | static inline void touch_all_softlockup_watchdogs(void) | |
32 | { | |
33 | } | |
34 | static inline void lockup_detector_init(void) | |
35 | { | |
36 | } | |
37 | #endif | |
38 | ||
39 | #ifdef CONFIG_DETECT_HUNG_TASK | |
40 | void reset_hung_task_detector(void); | |
41 | #else | |
42 | static inline void reset_hung_task_detector(void) | |
43 | { | |
44 | } | |
45 | #endif | |
46 | ||
249e52e3 BM |
47 | /* |
48 | * The run state of the lockup detectors is controlled by the content of the | |
49 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | |
50 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | |
51 | * | |
52 | * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' | |
53 | * are variables that are only used as an 'interface' between the parameters | |
54 | * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The | |
55 | * 'watchdog_thresh' variable is handled differently because its value is not | |
56 | * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' | |
57 | * is equal zero. | |
58 | */ | |
59 | #define NMI_WATCHDOG_ENABLED_BIT 0 | |
60 | #define SOFT_WATCHDOG_ENABLED_BIT 1 | |
61 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | |
62 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | |
63 | ||
f2e0cff8 NP |
64 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
65 | extern void hardlockup_detector_disable(void); | |
66 | #else | |
67 | static inline void hardlockup_detector_disable(void) {} | |
68 | #endif | |
69 | ||
70 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) | |
71 | extern void arch_touch_nmi_watchdog(void); | |
72 | #else | |
73 | static inline void arch_touch_nmi_watchdog(void) {} | |
74 | #endif | |
75 | ||
1da177e4 LT |
76 | /** |
77 | * touch_nmi_watchdog - restart NMI watchdog timeout. | |
78 | * | |
79 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() | |
80 | * may be used to reset the timeout - for code which intentionally | |
81 | * disables interrupts for a long time. This call is stateless. | |
82 | */ | |
5d0e600d IM |
83 | static inline void touch_nmi_watchdog(void) |
84 | { | |
f2e0cff8 | 85 | arch_touch_nmi_watchdog(); |
5d0e600d IM |
86 | touch_softlockup_watchdog(); |
87 | } | |
6e7458a6 | 88 | |
47cab6a7 IM |
89 | /* |
90 | * Create trigger_all_cpu_backtrace() out of the arch-provided | |
91 | * base function. Return whether such support was available, | |
92 | * to allow calling code to fall back to some other mechanism: | |
93 | */ | |
9a01c3ed | 94 | #ifdef arch_trigger_cpumask_backtrace |
47cab6a7 IM |
95 | static inline bool trigger_all_cpu_backtrace(void) |
96 | { | |
9a01c3ed | 97 | arch_trigger_cpumask_backtrace(cpu_online_mask, false); |
47cab6a7 IM |
98 | return true; |
99 | } | |
9a01c3ed | 100 | |
f3aca3d0 AT |
101 | static inline bool trigger_allbutself_cpu_backtrace(void) |
102 | { | |
9a01c3ed CM |
103 | arch_trigger_cpumask_backtrace(cpu_online_mask, true); |
104 | return true; | |
105 | } | |
106 | ||
107 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) | |
108 | { | |
109 | arch_trigger_cpumask_backtrace(mask, false); | |
110 | return true; | |
111 | } | |
112 | ||
113 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
114 | { | |
115 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); | |
f3aca3d0 AT |
116 | return true; |
117 | } | |
b2c0b2cb RK |
118 | |
119 | /* generic implementation */ | |
9a01c3ed CM |
120 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
121 | bool exclude_self, | |
b2c0b2cb RK |
122 | void (*raise)(cpumask_t *mask)); |
123 | bool nmi_cpu_backtrace(struct pt_regs *regs); | |
124 | ||
47cab6a7 IM |
125 | #else |
126 | static inline bool trigger_all_cpu_backtrace(void) | |
127 | { | |
128 | return false; | |
129 | } | |
f3aca3d0 AT |
130 | static inline bool trigger_allbutself_cpu_backtrace(void) |
131 | { | |
132 | return false; | |
133 | } | |
9a01c3ed CM |
134 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
135 | { | |
136 | return false; | |
137 | } | |
138 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
139 | { | |
140 | return false; | |
141 | } | |
bb81a09e AM |
142 | #endif |
143 | ||
58687acb | 144 | #ifdef CONFIG_LOCKUP_DETECTOR |
4eec42f3 | 145 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
84d56e66 UO |
146 | extern int nmi_watchdog_enabled; |
147 | extern int soft_watchdog_enabled; | |
3c00ea82 | 148 | extern int watchdog_user_enabled; |
586692a5 | 149 | extern int watchdog_thresh; |
249e52e3 | 150 | extern unsigned long watchdog_enabled; |
fe4ba3c3 | 151 | extern unsigned long *watchdog_cpumask_bits; |
b94f5118 | 152 | extern atomic_t watchdog_park_in_progress; |
249e52e3 | 153 | #ifdef CONFIG_SMP |
ed235875 | 154 | extern int sysctl_softlockup_all_cpu_backtrace; |
55537871 | 155 | extern int sysctl_hardlockup_all_cpu_backtrace; |
249e52e3 BM |
156 | #else |
157 | #define sysctl_softlockup_all_cpu_backtrace 0 | |
158 | #define sysctl_hardlockup_all_cpu_backtrace 0 | |
159 | #endif | |
160 | extern bool is_hardlockup(void); | |
504d7cf1 | 161 | struct ctl_table; |
83a80a39 UO |
162 | extern int proc_watchdog(struct ctl_table *, int , |
163 | void __user *, size_t *, loff_t *); | |
164 | extern int proc_nmi_watchdog(struct ctl_table *, int , | |
165 | void __user *, size_t *, loff_t *); | |
166 | extern int proc_soft_watchdog(struct ctl_table *, int , | |
167 | void __user *, size_t *, loff_t *); | |
168 | extern int proc_watchdog_thresh(struct ctl_table *, int , | |
169 | void __user *, size_t *, loff_t *); | |
fe4ba3c3 CM |
170 | extern int proc_watchdog_cpumask(struct ctl_table *, int, |
171 | void __user *, size_t *, loff_t *); | |
ec6a9066 UO |
172 | extern int lockup_detector_suspend(void); |
173 | extern void lockup_detector_resume(void); | |
999bbe49 | 174 | #else |
ec6a9066 | 175 | static inline int lockup_detector_suspend(void) |
999bbe49 UO |
176 | { |
177 | return 0; | |
178 | } | |
179 | ||
ec6a9066 | 180 | static inline void lockup_detector_resume(void) |
999bbe49 UO |
181 | { |
182 | } | |
84e478c6 DZ |
183 | #endif |
184 | ||
44a69f61 TN |
185 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
186 | #include <asm/nmi.h> | |
187 | #endif | |
188 | ||
1da177e4 | 189 | #endif |