]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/include/linux/nmi.h | |
4 | */ | |
5 | #ifndef LINUX_NMI_H | |
6 | #define LINUX_NMI_H | |
7 | ||
9938406a | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <asm/irq.h> |
f2e0cff8 NP |
10 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) |
11 | #include <asm/nmi.h> | |
12 | #endif | |
1da177e4 | 13 | |
d151b27d | 14 | #ifdef CONFIG_LOCKUP_DETECTOR |
05a4a952 | 15 | void lockup_detector_init(void); |
6554fd8c | 16 | void lockup_detector_soft_poweroff(void); |
941154bd | 17 | void lockup_detector_cleanup(void); |
3b371b59 TG |
18 | bool is_hardlockup(void); |
19 | ||
20 | extern int watchdog_user_enabled; | |
7feeb9cd TG |
21 | extern int nmi_watchdog_user_enabled; |
22 | extern int soft_watchdog_user_enabled; | |
3b371b59 TG |
23 | extern int watchdog_thresh; |
24 | extern unsigned long watchdog_enabled; | |
25 | ||
26 | extern struct cpumask watchdog_cpumask; | |
27 | extern unsigned long *watchdog_cpumask_bits; | |
28 | #ifdef CONFIG_SMP | |
29 | extern int sysctl_softlockup_all_cpu_backtrace; | |
30 | extern int sysctl_hardlockup_all_cpu_backtrace; | |
05a4a952 | 31 | #else |
3b371b59 TG |
32 | #define sysctl_softlockup_all_cpu_backtrace 0 |
33 | #define sysctl_hardlockup_all_cpu_backtrace 0 | |
34 | #endif /* !CONFIG_SMP */ | |
35 | ||
36 | #else /* CONFIG_LOCKUP_DETECTOR */ | |
6554fd8c TG |
37 | static inline void lockup_detector_init(void) { } |
38 | static inline void lockup_detector_soft_poweroff(void) { } | |
941154bd | 39 | static inline void lockup_detector_cleanup(void) { } |
3b371b59 | 40 | #endif /* !CONFIG_LOCKUP_DETECTOR */ |
05a4a952 NP |
41 | |
42 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | |
d151b27d IM |
43 | extern void touch_softlockup_watchdog_sched(void); |
44 | extern void touch_softlockup_watchdog(void); | |
45 | extern void touch_softlockup_watchdog_sync(void); | |
46 | extern void touch_all_softlockup_watchdogs(void); | |
d151b27d | 47 | extern unsigned int softlockup_panic; |
d151b27d | 48 | #else |
3b371b59 TG |
49 | static inline void touch_softlockup_watchdog_sched(void) { } |
50 | static inline void touch_softlockup_watchdog(void) { } | |
51 | static inline void touch_softlockup_watchdog_sync(void) { } | |
52 | static inline void touch_all_softlockup_watchdogs(void) { } | |
d151b27d IM |
53 | #endif |
54 | ||
55 | #ifdef CONFIG_DETECT_HUNG_TASK | |
56 | void reset_hung_task_detector(void); | |
57 | #else | |
3b371b59 | 58 | static inline void reset_hung_task_detector(void) { } |
d151b27d IM |
59 | #endif |
60 | ||
249e52e3 BM |
61 | /* |
62 | * The run state of the lockup detectors is controlled by the content of the | |
63 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | |
64 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | |
65 | * | |
7feeb9cd TG |
66 | * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and |
67 | * 'soft_watchdog_user_enabled' are variables that are only used as an | |
68 | * 'interface' between the parameters in /proc/sys/kernel and the internal | |
69 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is | |
70 | * handled differently because its value is not boolean, and the lockup | |
71 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. | |
249e52e3 BM |
72 | */ |
73 | #define NMI_WATCHDOG_ENABLED_BIT 0 | |
74 | #define SOFT_WATCHDOG_ENABLED_BIT 1 | |
75 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | |
76 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | |
77 | ||
f2e0cff8 NP |
78 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
79 | extern void hardlockup_detector_disable(void); | |
05a4a952 | 80 | extern unsigned int hardlockup_panic; |
f2e0cff8 NP |
81 | #else |
82 | static inline void hardlockup_detector_disable(void) {} | |
83 | #endif | |
84 | ||
51d4052b TG |
85 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) |
86 | # define NMI_WATCHDOG_SYSCTL_PERM 0644 | |
87 | #else | |
88 | # define NMI_WATCHDOG_SYSCTL_PERM 0444 | |
89 | #endif | |
90 | ||
05a4a952 | 91 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
f2e0cff8 | 92 | extern void arch_touch_nmi_watchdog(void); |
d0b6e0a8 PZ |
93 | extern void hardlockup_detector_perf_stop(void); |
94 | extern void hardlockup_detector_perf_restart(void); | |
941154bd | 95 | extern void hardlockup_detector_perf_disable(void); |
2a1b8ee4 | 96 | extern void hardlockup_detector_perf_enable(void); |
941154bd | 97 | extern void hardlockup_detector_perf_cleanup(void); |
178b9f7a | 98 | extern int hardlockup_detector_perf_init(void); |
f2e0cff8 | 99 | #else |
d0b6e0a8 PZ |
100 | static inline void hardlockup_detector_perf_stop(void) { } |
101 | static inline void hardlockup_detector_perf_restart(void) { } | |
941154bd | 102 | static inline void hardlockup_detector_perf_disable(void) { } |
2a1b8ee4 | 103 | static inline void hardlockup_detector_perf_enable(void) { } |
941154bd | 104 | static inline void hardlockup_detector_perf_cleanup(void) { } |
178b9f7a TG |
105 | # if !defined(CONFIG_HAVE_NMI_WATCHDOG) |
106 | static inline int hardlockup_detector_perf_init(void) { return -ENODEV; } | |
f2e0cff8 | 107 | static inline void arch_touch_nmi_watchdog(void) {} |
178b9f7a TG |
108 | # else |
109 | static inline int hardlockup_detector_perf_init(void) { return 0; } | |
110 | # endif | |
05a4a952 | 111 | #endif |
f2e0cff8 | 112 | |
6b9dc480 TG |
113 | void watchdog_nmi_stop(void); |
114 | void watchdog_nmi_start(void); | |
34ddaa3e | 115 | int watchdog_nmi_probe(void); |
67d4edfd MM |
116 | int watchdog_nmi_enable(unsigned int cpu); |
117 | void watchdog_nmi_disable(unsigned int cpu); | |
6592ad2f | 118 | |
1da177e4 LT |
119 | /** |
120 | * touch_nmi_watchdog - restart NMI watchdog timeout. | |
3b371b59 | 121 | * |
1da177e4 LT |
122 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() |
123 | * may be used to reset the timeout - for code which intentionally | |
124 | * disables interrupts for a long time. This call is stateless. | |
125 | */ | |
5d0e600d IM |
126 | static inline void touch_nmi_watchdog(void) |
127 | { | |
f2e0cff8 | 128 | arch_touch_nmi_watchdog(); |
5d0e600d IM |
129 | touch_softlockup_watchdog(); |
130 | } | |
6e7458a6 | 131 | |
47cab6a7 IM |
132 | /* |
133 | * Create trigger_all_cpu_backtrace() out of the arch-provided | |
134 | * base function. Return whether such support was available, | |
135 | * to allow calling code to fall back to some other mechanism: | |
136 | */ | |
9a01c3ed | 137 | #ifdef arch_trigger_cpumask_backtrace |
47cab6a7 IM |
138 | static inline bool trigger_all_cpu_backtrace(void) |
139 | { | |
9a01c3ed | 140 | arch_trigger_cpumask_backtrace(cpu_online_mask, false); |
47cab6a7 IM |
141 | return true; |
142 | } | |
9a01c3ed | 143 | |
f3aca3d0 AT |
144 | static inline bool trigger_allbutself_cpu_backtrace(void) |
145 | { | |
9a01c3ed CM |
146 | arch_trigger_cpumask_backtrace(cpu_online_mask, true); |
147 | return true; | |
148 | } | |
149 | ||
150 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) | |
151 | { | |
152 | arch_trigger_cpumask_backtrace(mask, false); | |
153 | return true; | |
154 | } | |
155 | ||
156 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
157 | { | |
158 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); | |
f3aca3d0 AT |
159 | return true; |
160 | } | |
b2c0b2cb RK |
161 | |
162 | /* generic implementation */ | |
9a01c3ed CM |
163 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
164 | bool exclude_self, | |
b2c0b2cb RK |
165 | void (*raise)(cpumask_t *mask)); |
166 | bool nmi_cpu_backtrace(struct pt_regs *regs); | |
167 | ||
47cab6a7 IM |
168 | #else |
169 | static inline bool trigger_all_cpu_backtrace(void) | |
170 | { | |
171 | return false; | |
172 | } | |
f3aca3d0 AT |
173 | static inline bool trigger_allbutself_cpu_backtrace(void) |
174 | { | |
175 | return false; | |
176 | } | |
9a01c3ed CM |
177 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
178 | { | |
179 | return false; | |
180 | } | |
181 | static inline bool trigger_single_cpu_backtrace(int cpu) | |
182 | { | |
183 | return false; | |
184 | } | |
bb81a09e AM |
185 | #endif |
186 | ||
05a4a952 | 187 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF |
4eec42f3 | 188 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
05a4a952 NP |
189 | #endif |
190 | ||
7edaeb68 TG |
191 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
192 | defined(CONFIG_HARDLOCKUP_DETECTOR) | |
193 | void watchdog_update_hrtimer_threshold(u64 period); | |
194 | #else | |
195 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } | |
196 | #endif | |
197 | ||
504d7cf1 | 198 | struct ctl_table; |
83a80a39 UO |
199 | extern int proc_watchdog(struct ctl_table *, int , |
200 | void __user *, size_t *, loff_t *); | |
201 | extern int proc_nmi_watchdog(struct ctl_table *, int , | |
202 | void __user *, size_t *, loff_t *); | |
203 | extern int proc_soft_watchdog(struct ctl_table *, int , | |
204 | void __user *, size_t *, loff_t *); | |
205 | extern int proc_watchdog_thresh(struct ctl_table *, int , | |
206 | void __user *, size_t *, loff_t *); | |
fe4ba3c3 CM |
207 | extern int proc_watchdog_cpumask(struct ctl_table *, int, |
208 | void __user *, size_t *, loff_t *); | |
84e478c6 | 209 | |
44a69f61 TN |
210 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
211 | #include <asm/nmi.h> | |
212 | #endif | |
213 | ||
1da177e4 | 214 | #endif |