]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Detect Hung Task | |
3 | * | |
4 | * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/mm.h> | |
9 | #include <linux/cpu.h> | |
10 | #include <linux/nmi.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/freezer.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/lockdep.h> | |
16 | #include <linux/export.h> | |
17 | #include <linux/sysctl.h> | |
18 | #include <linux/utsname.h> | |
19 | #include <linux/sched/signal.h> | |
20 | #include <linux/sched/debug.h> | |
21 | ||
22 | #include <trace/events/sched.h> | |
23 | ||
24 | /* | |
25 | * The number of tasks checked: | |
26 | */ | |
27 | int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; | |
28 | ||
29 | /* | |
30 | * Limit number of tasks checked in a batch. | |
31 | * | |
32 | * This value controls the preemptibility of khungtaskd since preemption | |
33 | * is disabled during the critical section. It also controls the size of | |
34 | * the RCU grace period. So it needs to be upper-bound. | |
35 | */ | |
36 | #define HUNG_TASK_BATCHING 1024 | |
37 | ||
38 | /* | |
39 | * Zero means infinite timeout - no checking done: | |
40 | */ | |
41 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; | |
42 | ||
43 | int __read_mostly sysctl_hung_task_warnings = 10; | |
44 | ||
45 | static int __read_mostly did_panic; | |
46 | static bool hung_task_show_lock; | |
47 | static bool hung_task_call_panic; | |
48 | ||
49 | static struct task_struct *watchdog_task; | |
50 | ||
51 | /* | |
52 | * Should we panic (and reboot, if panic_timeout= is set) when a | |
53 | * hung task is detected: | |
54 | */ | |
55 | unsigned int __read_mostly sysctl_hung_task_panic = | |
56 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE; | |
57 | ||
58 | static int __init hung_task_panic_setup(char *str) | |
59 | { | |
60 | int rc = kstrtouint(str, 0, &sysctl_hung_task_panic); | |
61 | ||
62 | if (rc) | |
63 | return rc; | |
64 | return 1; | |
65 | } | |
66 | __setup("hung_task_panic=", hung_task_panic_setup); | |
67 | ||
68 | static int | |
69 | hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr) | |
70 | { | |
71 | did_panic = 1; | |
72 | ||
73 | return NOTIFY_DONE; | |
74 | } | |
75 | ||
76 | static struct notifier_block panic_block = { | |
77 | .notifier_call = hung_task_panic, | |
78 | }; | |
79 | ||
80 | static void check_hung_task(struct task_struct *t, unsigned long timeout) | |
81 | { | |
82 | unsigned long switch_count = t->nvcsw + t->nivcsw; | |
83 | ||
84 | /* | |
85 | * Ensure the task is not frozen. | |
86 | * Also, skip vfork and any other user process that freezer should skip. | |
87 | */ | |
88 | if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) | |
89 | return; | |
90 | ||
91 | /* | |
92 | * When a freshly created task is scheduled once, changes its state to | |
93 | * TASK_UNINTERRUPTIBLE without having ever been switched out once, it | |
94 | * musn't be checked. | |
95 | */ | |
96 | if (unlikely(!switch_count)) | |
97 | return; | |
98 | ||
99 | if (switch_count != t->last_switch_count) { | |
100 | t->last_switch_count = switch_count; | |
101 | return; | |
102 | } | |
103 | ||
104 | trace_sched_process_hang(t); | |
105 | ||
106 | if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic) | |
107 | return; | |
108 | ||
109 | /* | |
110 | * Ok, the task did not get scheduled for more than 2 minutes, | |
111 | * complain: | |
112 | */ | |
113 | if (sysctl_hung_task_warnings) { | |
114 | if (sysctl_hung_task_warnings > 0) | |
115 | sysctl_hung_task_warnings--; | |
116 | pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", | |
117 | t->comm, t->pid, timeout); | |
118 | pr_err(" %s %s %.*s\n", | |
119 | print_tainted(), init_utsname()->release, | |
120 | (int)strcspn(init_utsname()->version, " "), | |
121 | init_utsname()->version); | |
122 | pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" | |
123 | " disables this message.\n"); | |
124 | sched_show_task(t); | |
125 | hung_task_show_lock = true; | |
126 | } | |
127 | ||
128 | touch_nmi_watchdog(); | |
129 | ||
130 | if (sysctl_hung_task_panic) { | |
131 | hung_task_show_lock = true; | |
132 | hung_task_call_panic = true; | |
133 | } | |
134 | } | |
135 | ||
136 | /* | |
137 | * To avoid extending the RCU grace period for an unbounded amount of time, | |
138 | * periodically exit the critical section and enter a new one. | |
139 | * | |
140 | * For preemptible RCU it is sufficient to call rcu_read_unlock in order | |
141 | * to exit the grace period. For classic RCU, a reschedule is required. | |
142 | */ | |
143 | static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) | |
144 | { | |
145 | bool can_cont; | |
146 | ||
147 | get_task_struct(g); | |
148 | get_task_struct(t); | |
149 | rcu_read_unlock(); | |
150 | cond_resched(); | |
151 | rcu_read_lock(); | |
152 | can_cont = pid_alive(g) && pid_alive(t); | |
153 | put_task_struct(t); | |
154 | put_task_struct(g); | |
155 | ||
156 | return can_cont; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for | |
161 | * a really long time (120 seconds). If that happens, print out | |
162 | * a warning. | |
163 | */ | |
164 | static void check_hung_uninterruptible_tasks(unsigned long timeout) | |
165 | { | |
166 | int max_count = sysctl_hung_task_check_count; | |
167 | int batch_count = HUNG_TASK_BATCHING; | |
168 | struct task_struct *g, *t; | |
169 | ||
170 | /* | |
171 | * If the system crashed already then all bets are off, | |
172 | * do not report extra hung tasks: | |
173 | */ | |
174 | if (test_taint(TAINT_DIE) || did_panic) | |
175 | return; | |
176 | ||
177 | hung_task_show_lock = false; | |
178 | rcu_read_lock(); | |
179 | for_each_process_thread(g, t) { | |
180 | if (!max_count--) | |
181 | goto unlock; | |
182 | if (!--batch_count) { | |
183 | batch_count = HUNG_TASK_BATCHING; | |
184 | if (!rcu_lock_break(g, t)) | |
185 | goto unlock; | |
186 | } | |
187 | /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ | |
188 | if (t->state == TASK_UNINTERRUPTIBLE) | |
189 | check_hung_task(t, timeout); | |
190 | } | |
191 | unlock: | |
192 | rcu_read_unlock(); | |
193 | if (hung_task_show_lock) | |
194 | debug_show_all_locks(); | |
195 | if (hung_task_call_panic) { | |
196 | trigger_all_cpu_backtrace(); | |
197 | panic("hung_task: blocked tasks"); | |
198 | } | |
199 | } | |
200 | ||
201 | static long hung_timeout_jiffies(unsigned long last_checked, | |
202 | unsigned long timeout) | |
203 | { | |
204 | /* timeout of 0 will disable the watchdog */ | |
205 | return timeout ? last_checked - jiffies + timeout * HZ : | |
206 | MAX_SCHEDULE_TIMEOUT; | |
207 | } | |
208 | ||
209 | /* | |
210 | * Process updating of timeout sysctl | |
211 | */ | |
212 | int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | |
213 | void __user *buffer, | |
214 | size_t *lenp, loff_t *ppos) | |
215 | { | |
216 | int ret; | |
217 | ||
218 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
219 | ||
220 | if (ret || !write) | |
221 | goto out; | |
222 | ||
223 | wake_up_process(watchdog_task); | |
224 | ||
225 | out: | |
226 | return ret; | |
227 | } | |
228 | ||
229 | static atomic_t reset_hung_task = ATOMIC_INIT(0); | |
230 | ||
231 | void reset_hung_task_detector(void) | |
232 | { | |
233 | atomic_set(&reset_hung_task, 1); | |
234 | } | |
235 | EXPORT_SYMBOL_GPL(reset_hung_task_detector); | |
236 | ||
237 | /* | |
238 | * kthread which checks for tasks stuck in D state | |
239 | */ | |
240 | static int watchdog(void *dummy) | |
241 | { | |
242 | unsigned long hung_last_checked = jiffies; | |
243 | ||
244 | set_user_nice(current, 0); | |
245 | ||
246 | for ( ; ; ) { | |
247 | unsigned long timeout = sysctl_hung_task_timeout_secs; | |
248 | long t = hung_timeout_jiffies(hung_last_checked, timeout); | |
249 | ||
250 | if (t <= 0) { | |
251 | if (!atomic_xchg(&reset_hung_task, 0)) | |
252 | check_hung_uninterruptible_tasks(timeout); | |
253 | hung_last_checked = jiffies; | |
254 | continue; | |
255 | } | |
256 | schedule_timeout_interruptible(t); | |
257 | } | |
258 | ||
259 | return 0; | |
260 | } | |
261 | ||
262 | static int __init hung_task_init(void) | |
263 | { | |
264 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | |
265 | watchdog_task = kthread_run(watchdog, NULL, "khungtaskd"); | |
266 | ||
267 | return 0; | |
268 | } | |
269 | subsys_initcall(hung_task_init); |