]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/kernel/panic.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This function is used through-out the kernel (including mm and fs) | |
10 | * to indicate a major problem. | |
11 | */ | |
c95dbf27 | 12 | #include <linux/debug_locks.h> |
b17b0153 | 13 | #include <linux/sched/debug.h> |
c95dbf27 | 14 | #include <linux/interrupt.h> |
7d92bda2 | 15 | #include <linux/kgdb.h> |
456b565c | 16 | #include <linux/kmsg_dump.h> |
c95dbf27 IM |
17 | #include <linux/kallsyms.h> |
18 | #include <linux/notifier.h> | |
c7c3f05e | 19 | #include <linux/vt_kern.h> |
1da177e4 | 20 | #include <linux/module.h> |
c95dbf27 | 21 | #include <linux/random.h> |
de7edd31 | 22 | #include <linux/ftrace.h> |
1da177e4 | 23 | #include <linux/reboot.h> |
c95dbf27 IM |
24 | #include <linux/delay.h> |
25 | #include <linux/kexec.h> | |
26 | #include <linux/sched.h> | |
1da177e4 | 27 | #include <linux/sysrq.h> |
c95dbf27 | 28 | #include <linux/init.h> |
1da177e4 | 29 | #include <linux/nmi.h> |
08d78658 | 30 | #include <linux/console.h> |
2553b67a | 31 | #include <linux/bug.h> |
7a46ec0e | 32 | #include <linux/ratelimit.h> |
b1fca27d AK |
33 | #include <linux/debugfs.h> |
34 | #include <asm/sections.h> | |
1da177e4 | 35 | |
c7ff0d9c TS |
36 | #define PANIC_TIMER_STEP 100 |
37 | #define PANIC_BLINK_SPD 18 | |
38 | ||
60c958d8 GP |
39 | #ifdef CONFIG_SMP |
40 | /* | |
41 | * Should we dump all CPUs backtraces in an oops event? | |
42 | * Defaults to 0, can be changed via sysctl. | |
43 | */ | |
44 | unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; | |
45 | #endif /* CONFIG_SMP */ | |
46 | ||
2a01bb38 | 47 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
bc4f2f54 KC |
48 | static unsigned long tainted_mask = |
49 | IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; | |
dd287796 AM |
50 | static int pause_on_oops; |
51 | static int pause_on_oops_flag; | |
52 | static DEFINE_SPINLOCK(pause_on_oops_lock); | |
5375b708 | 53 | bool crash_kexec_post_notifiers; |
9e3961a0 | 54 | int panic_on_warn __read_mostly; |
db38d5c1 RA |
55 | unsigned long panic_on_taint; |
56 | bool panic_on_taint_nousertaint = false; | |
1da177e4 | 57 | |
5800dc3c | 58 | int panic_timeout = CONFIG_PANIC_TIMEOUT; |
81e88fdc | 59 | EXPORT_SYMBOL_GPL(panic_timeout); |
1da177e4 | 60 | |
d999bd93 FT |
61 | #define PANIC_PRINT_TASK_INFO 0x00000001 |
62 | #define PANIC_PRINT_MEM_INFO 0x00000002 | |
63 | #define PANIC_PRINT_TIMER_INFO 0x00000004 | |
64 | #define PANIC_PRINT_LOCK_INFO 0x00000008 | |
65 | #define PANIC_PRINT_FTRACE_INFO 0x00000010 | |
de6da1e8 | 66 | #define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020 |
81c9d43f | 67 | unsigned long panic_print; |
d999bd93 | 68 | |
e041c683 | 69 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
1da177e4 LT |
70 | |
71 | EXPORT_SYMBOL(panic_notifier_list); | |
72 | ||
c7ff0d9c | 73 | static long no_blink(int state) |
8aeee85a | 74 | { |
c7ff0d9c | 75 | return 0; |
8aeee85a AB |
76 | } |
77 | ||
c7ff0d9c TS |
78 | /* Returns how long it waited in ms */ |
79 | long (*panic_blink)(int state); | |
80 | EXPORT_SYMBOL(panic_blink); | |
81 | ||
93e13a36 MH |
82 | /* |
83 | * Stop ourself in panic -- architecture code may override this | |
84 | */ | |
85 | void __weak panic_smp_self_stop(void) | |
86 | { | |
87 | while (1) | |
88 | cpu_relax(); | |
89 | } | |
90 | ||
58c5661f HK |
91 | /* |
92 | * Stop ourselves in NMI context if another CPU has already panicked. Arch code | |
93 | * may override this to prepare for crash dumping, e.g. save regs info. | |
94 | */ | |
95 | void __weak nmi_panic_self_stop(struct pt_regs *regs) | |
96 | { | |
97 | panic_smp_self_stop(); | |
98 | } | |
99 | ||
0ee59413 HK |
100 | /* |
101 | * Stop other CPUs in panic. Architecture dependent code may override this | |
102 | * with more suitable version. For example, if the architecture supports | |
103 | * crash dump, it should save registers of each stopped CPU and disable | |
104 | * per-CPU features such as virtualization extensions. | |
105 | */ | |
106 | void __weak crash_smp_send_stop(void) | |
107 | { | |
108 | static int cpus_stopped; | |
109 | ||
110 | /* | |
111 | * This function can be called twice in panic path, but obviously | |
112 | * we execute this only once. | |
113 | */ | |
114 | if (cpus_stopped) | |
115 | return; | |
116 | ||
117 | /* | |
118 | * Note smp_send_stop is the usual smp shutdown function, which | |
119 | * unfortunately means it may not be hardened to work in a panic | |
120 | * situation. | |
121 | */ | |
122 | smp_send_stop(); | |
123 | cpus_stopped = 1; | |
124 | } | |
125 | ||
1717f209 HK |
126 | atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); |
127 | ||
ebc41f20 HK |
128 | /* |
129 | * A variant of panic() called from NMI context. We return if we've already | |
130 | * panicked on this CPU. If another CPU already panicked, loop in | |
131 | * nmi_panic_self_stop() which can provide architecture dependent code such | |
132 | * as saving register state for crash dump. | |
133 | */ | |
134 | void nmi_panic(struct pt_regs *regs, const char *msg) | |
135 | { | |
136 | int old_cpu, cpu; | |
137 | ||
138 | cpu = raw_smp_processor_id(); | |
139 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); | |
140 | ||
141 | if (old_cpu == PANIC_CPU_INVALID) | |
142 | panic("%s", msg); | |
143 | else if (old_cpu != cpu) | |
144 | nmi_panic_self_stop(regs); | |
145 | } | |
146 | EXPORT_SYMBOL(nmi_panic); | |
147 | ||
d999bd93 FT |
148 | static void panic_print_sys_info(void) |
149 | { | |
de6da1e8 FT |
150 | if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG) |
151 | console_flush_on_panic(CONSOLE_REPLAY_ALL); | |
152 | ||
d999bd93 FT |
153 | if (panic_print & PANIC_PRINT_TASK_INFO) |
154 | show_state(); | |
155 | ||
156 | if (panic_print & PANIC_PRINT_MEM_INFO) | |
157 | show_mem(0, NULL); | |
158 | ||
159 | if (panic_print & PANIC_PRINT_TIMER_INFO) | |
160 | sysrq_timer_list_show(); | |
161 | ||
162 | if (panic_print & PANIC_PRINT_LOCK_INFO) | |
163 | debug_show_all_locks(); | |
164 | ||
165 | if (panic_print & PANIC_PRINT_FTRACE_INFO) | |
166 | ftrace_dump(DUMP_ALL); | |
167 | } | |
168 | ||
1da177e4 LT |
169 | /** |
170 | * panic - halt the system | |
171 | * @fmt: The text string to print | |
172 | * | |
173 | * Display a message, then perform cleanups. | |
174 | * | |
175 | * This function never returns. | |
176 | */ | |
9402c95f | 177 | void panic(const char *fmt, ...) |
1da177e4 | 178 | { |
1da177e4 LT |
179 | static char buf[1024]; |
180 | va_list args; | |
b49dec1c | 181 | long i, i_next = 0, len; |
c7ff0d9c | 182 | int state = 0; |
1717f209 | 183 | int old_cpu, this_cpu; |
b26e27dd | 184 | bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; |
1da177e4 | 185 | |
190320c3 VM |
186 | /* |
187 | * Disable local interrupts. This will prevent panic_smp_self_stop | |
188 | * from deadlocking the first cpu that invokes the panic, since | |
189 | * there is nothing to prevent an interrupt handler (that runs | |
1717f209 | 190 | * after setting panic_cpu) from invoking panic() again. |
190320c3 VM |
191 | */ |
192 | local_irq_disable(); | |
20bb759a | 193 | preempt_disable_notrace(); |
190320c3 | 194 | |
dc009d92 | 195 | /* |
c95dbf27 IM |
196 | * It's possible to come here directly from a panic-assertion and |
197 | * not have preempt disabled. Some functions called from here want | |
dc009d92 | 198 | * preempt to be disabled. No point enabling it later though... |
93e13a36 MH |
199 | * |
200 | * Only one CPU is allowed to execute the panic code from here. For | |
201 | * multiple parallel invocations of panic, all other CPUs either | |
202 | * stop themself or will wait until they are stopped by the 1st CPU | |
203 | * with smp_send_stop(). | |
1717f209 HK |
204 | * |
205 | * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which | |
206 | * comes here, so go ahead. | |
207 | * `old_cpu == this_cpu' means we came from nmi_panic() which sets | |
208 | * panic_cpu to this CPU. In this case, this is also the 1st CPU. | |
dc009d92 | 209 | */ |
1717f209 HK |
210 | this_cpu = raw_smp_processor_id(); |
211 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); | |
212 | ||
213 | if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) | |
93e13a36 | 214 | panic_smp_self_stop(); |
dc009d92 | 215 | |
5b530fc1 | 216 | console_verbose(); |
1da177e4 LT |
217 | bust_spinlocks(1); |
218 | va_start(args, fmt); | |
b49dec1c | 219 | len = vscnprintf(buf, sizeof(buf), fmt, args); |
1da177e4 | 220 | va_end(args); |
b49dec1c BP |
221 | |
222 | if (len && buf[len - 1] == '\n') | |
223 | buf[len - 1] = '\0'; | |
224 | ||
d7c0847f | 225 | pr_emerg("Kernel panic - not syncing: %s\n", buf); |
5cb27301 | 226 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
6e6f0a1f AK |
227 | /* |
228 | * Avoid nested stack-dumping if a panic occurs during oops processing | |
229 | */ | |
026ee1f6 | 230 | if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) |
6e6f0a1f | 231 | dump_stack(); |
5cb27301 | 232 | #endif |
1da177e4 | 233 | |
7d92bda2 DA |
234 | /* |
235 | * If kgdb is enabled, give it a chance to run before we stop all | |
236 | * the other CPUs or else we won't be able to debug processes left | |
237 | * running on them. | |
238 | */ | |
239 | kgdb_panic(buf); | |
240 | ||
dc009d92 EB |
241 | /* |
242 | * If we have crashed and we have a crash kernel loaded let it handle | |
243 | * everything else. | |
f06e5153 MH |
244 | * If we want to run this after calling panic_notifiers, pass |
245 | * the "crash_kexec_post_notifiers" option to the kernel. | |
7bbee5ca HK |
246 | * |
247 | * Bypass the panic_cpu check and call __crash_kexec directly. | |
dc009d92 | 248 | */ |
b26e27dd | 249 | if (!_crash_kexec_post_notifiers) { |
f92bac3b | 250 | printk_safe_flush_on_panic(); |
7bbee5ca | 251 | __crash_kexec(NULL); |
dc009d92 | 252 | |
0ee59413 HK |
253 | /* |
254 | * Note smp_send_stop is the usual smp shutdown function, which | |
255 | * unfortunately means it may not be hardened to work in a | |
256 | * panic situation. | |
257 | */ | |
258 | smp_send_stop(); | |
259 | } else { | |
260 | /* | |
261 | * If we want to do crash dump after notifier calls and | |
262 | * kmsg_dump, we will need architecture dependent extra | |
263 | * works in addition to stopping other CPUs. | |
264 | */ | |
265 | crash_smp_send_stop(); | |
266 | } | |
1da177e4 | 267 | |
6723734c KC |
268 | /* |
269 | * Run any panic handlers, including those that might need to | |
270 | * add information to the kmsg dump output. | |
271 | */ | |
e041c683 | 272 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
1da177e4 | 273 | |
cf9b1106 | 274 | /* Call flush even twice. It tries harder with a single online CPU */ |
f92bac3b | 275 | printk_safe_flush_on_panic(); |
6723734c KC |
276 | kmsg_dump(KMSG_DUMP_PANIC); |
277 | ||
f06e5153 MH |
278 | /* |
279 | * If you doubt kdump always works fine in any situation, | |
280 | * "crash_kexec_post_notifiers" offers you a chance to run | |
281 | * panic_notifiers and dumping kmsg before kdump. | |
282 | * Note: since some panic_notifiers can make crashed kernel | |
283 | * more unstable, it can increase risks of the kdump failure too. | |
7bbee5ca HK |
284 | * |
285 | * Bypass the panic_cpu check and call __crash_kexec directly. | |
f06e5153 | 286 | */ |
b26e27dd | 287 | if (_crash_kexec_post_notifiers) |
7bbee5ca | 288 | __crash_kexec(NULL); |
f06e5153 | 289 | |
c7c3f05e SS |
290 | #ifdef CONFIG_VT |
291 | unblank_screen(); | |
292 | #endif | |
293 | console_unblank(); | |
d014e889 | 294 | |
08d78658 VK |
295 | /* |
296 | * We may have ended up stopping the CPU holding the lock (in | |
297 | * smp_send_stop()) while still having some valuable data in the console | |
298 | * buffer. Try to acquire the lock then release it regardless of the | |
7625b3a0 VK |
299 | * result. The release will also print the buffers out. Locks debug |
300 | * should be disabled to avoid reporting bad unlock balance when | |
301 | * panic() is not being callled from OOPS. | |
08d78658 | 302 | */ |
7625b3a0 | 303 | debug_locks_off(); |
de6da1e8 | 304 | console_flush_on_panic(CONSOLE_FLUSH_PENDING); |
08d78658 | 305 | |
d999bd93 FT |
306 | panic_print_sys_info(); |
307 | ||
c7ff0d9c TS |
308 | if (!panic_blink) |
309 | panic_blink = no_blink; | |
310 | ||
dc009d92 | 311 | if (panic_timeout > 0) { |
1da177e4 | 312 | /* |
c95dbf27 IM |
313 | * Delay timeout seconds before rebooting the machine. |
314 | * We can't use the "normal" timers since we just panicked. | |
315 | */ | |
ff7a28a0 | 316 | pr_emerg("Rebooting in %d seconds..\n", panic_timeout); |
c95dbf27 | 317 | |
c7ff0d9c | 318 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
1da177e4 | 319 | touch_nmi_watchdog(); |
c7ff0d9c TS |
320 | if (i >= i_next) { |
321 | i += panic_blink(state ^= 1); | |
322 | i_next = i + 3600 / PANIC_BLINK_SPD; | |
323 | } | |
324 | mdelay(PANIC_TIMER_STEP); | |
1da177e4 | 325 | } |
4302fbc8 HD |
326 | } |
327 | if (panic_timeout != 0) { | |
c95dbf27 IM |
328 | /* |
329 | * This will not be a clean reboot, with everything | |
330 | * shutting down. But if there is a chance of | |
331 | * rebooting the system it will be rebooted. | |
1da177e4 | 332 | */ |
b287a25a AK |
333 | if (panic_reboot_mode != REBOOT_UNDEFINED) |
334 | reboot_mode = panic_reboot_mode; | |
2f048ea8 | 335 | emergency_restart(); |
1da177e4 LT |
336 | } |
337 | #ifdef __sparc__ | |
338 | { | |
339 | extern int stop_a_enabled; | |
a271c241 | 340 | /* Make sure the user can actually press Stop-A (L1-A) */ |
1da177e4 | 341 | stop_a_enabled = 1; |
7db60d05 VK |
342 | pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" |
343 | "twice on console to return to the boot prom\n"); | |
1da177e4 LT |
344 | } |
345 | #endif | |
347a8dc3 | 346 | #if defined(CONFIG_S390) |
98587c2d | 347 | disabled_wait(); |
1da177e4 | 348 | #endif |
5ad75105 | 349 | pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); |
c39ea0b9 FT |
350 | |
351 | /* Do not scroll important messages printed above */ | |
352 | suppress_printk = 1; | |
1da177e4 | 353 | local_irq_enable(); |
c7ff0d9c | 354 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
c22db941 | 355 | touch_softlockup_watchdog(); |
c7ff0d9c TS |
356 | if (i >= i_next) { |
357 | i += panic_blink(state ^= 1); | |
358 | i_next = i + 3600 / PANIC_BLINK_SPD; | |
359 | } | |
360 | mdelay(PANIC_TIMER_STEP); | |
1da177e4 LT |
361 | } |
362 | } | |
363 | ||
364 | EXPORT_SYMBOL(panic); | |
365 | ||
7fd8329b PM |
366 | /* |
367 | * TAINT_FORCED_RMMOD could be a per-module flag but the module | |
368 | * is being removed anyway. | |
369 | */ | |
370 | const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { | |
47d4b263 KC |
371 | [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, |
372 | [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, | |
373 | [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, | |
374 | [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, | |
375 | [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, | |
376 | [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, | |
377 | [ TAINT_USER ] = { 'U', ' ', false }, | |
378 | [ TAINT_DIE ] = { 'D', ' ', false }, | |
379 | [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, | |
380 | [ TAINT_WARN ] = { 'W', ' ', false }, | |
381 | [ TAINT_CRAP ] = { 'C', ' ', true }, | |
382 | [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, | |
383 | [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, | |
384 | [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, | |
385 | [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, | |
386 | [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, | |
387 | [ TAINT_AUX ] = { 'X', ' ', true }, | |
bc4f2f54 | 388 | [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, |
25ddbb18 AK |
389 | }; |
390 | ||
1da177e4 | 391 | /** |
9c4560e5 | 392 | * print_tainted - return a string to represent the kernel taint state. |
1da177e4 | 393 | * |
57043247 | 394 | * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst |
1da177e4 | 395 | * |
9c4560e5 KC |
396 | * The string is overwritten by the next call to print_tainted(), |
397 | * but is always NULL terminated. | |
1da177e4 | 398 | */ |
1da177e4 LT |
399 | const char *print_tainted(void) |
400 | { | |
7fd8329b | 401 | static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; |
25ddbb18 | 402 | |
47d4b263 KC |
403 | BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); |
404 | ||
25ddbb18 AK |
405 | if (tainted_mask) { |
406 | char *s; | |
407 | int i; | |
408 | ||
409 | s = buf + sprintf(buf, "Tainted: "); | |
7fd8329b PM |
410 | for (i = 0; i < TAINT_FLAGS_COUNT; i++) { |
411 | const struct taint_flag *t = &taint_flags[i]; | |
412 | *s++ = test_bit(i, &tainted_mask) ? | |
5eb7c0d0 | 413 | t->c_true : t->c_false; |
25ddbb18 AK |
414 | } |
415 | *s = 0; | |
416 | } else | |
1da177e4 | 417 | snprintf(buf, sizeof(buf), "Not tainted"); |
c95dbf27 IM |
418 | |
419 | return buf; | |
1da177e4 LT |
420 | } |
421 | ||
25ddbb18 | 422 | int test_taint(unsigned flag) |
1da177e4 | 423 | { |
25ddbb18 AK |
424 | return test_bit(flag, &tainted_mask); |
425 | } | |
426 | EXPORT_SYMBOL(test_taint); | |
427 | ||
428 | unsigned long get_taint(void) | |
429 | { | |
430 | return tainted_mask; | |
1da177e4 | 431 | } |
dd287796 | 432 | |
373d4d09 RR |
433 | /** |
434 | * add_taint: add a taint flag if not already set. | |
435 | * @flag: one of the TAINT_* constants. | |
436 | * @lockdep_ok: whether lock debugging is still OK. | |
437 | * | |
438 | * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for | |
439 | * some notewortht-but-not-corrupting cases, it can be set to true. | |
440 | */ | |
441 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) | |
dd287796 | 442 | { |
373d4d09 | 443 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) |
d7c0847f | 444 | pr_warn("Disabling lock debugging due to kernel taint\n"); |
9eeba613 | 445 | |
25ddbb18 | 446 | set_bit(flag, &tainted_mask); |
db38d5c1 RA |
447 | |
448 | if (tainted_mask & panic_on_taint) { | |
449 | panic_on_taint = 0; | |
450 | panic("panic_on_taint set ..."); | |
451 | } | |
dd287796 | 452 | } |
1da177e4 | 453 | EXPORT_SYMBOL(add_taint); |
dd287796 AM |
454 | |
455 | static void spin_msec(int msecs) | |
456 | { | |
457 | int i; | |
458 | ||
459 | for (i = 0; i < msecs; i++) { | |
460 | touch_nmi_watchdog(); | |
461 | mdelay(1); | |
462 | } | |
463 | } | |
464 | ||
465 | /* | |
466 | * It just happens that oops_enter() and oops_exit() are identically | |
467 | * implemented... | |
468 | */ | |
469 | static void do_oops_enter_exit(void) | |
470 | { | |
471 | unsigned long flags; | |
472 | static int spin_counter; | |
473 | ||
474 | if (!pause_on_oops) | |
475 | return; | |
476 | ||
477 | spin_lock_irqsave(&pause_on_oops_lock, flags); | |
478 | if (pause_on_oops_flag == 0) { | |
479 | /* This CPU may now print the oops message */ | |
480 | pause_on_oops_flag = 1; | |
481 | } else { | |
482 | /* We need to stall this CPU */ | |
483 | if (!spin_counter) { | |
484 | /* This CPU gets to do the counting */ | |
485 | spin_counter = pause_on_oops; | |
486 | do { | |
487 | spin_unlock(&pause_on_oops_lock); | |
488 | spin_msec(MSEC_PER_SEC); | |
489 | spin_lock(&pause_on_oops_lock); | |
490 | } while (--spin_counter); | |
491 | pause_on_oops_flag = 0; | |
492 | } else { | |
493 | /* This CPU waits for a different one */ | |
494 | while (spin_counter) { | |
495 | spin_unlock(&pause_on_oops_lock); | |
496 | spin_msec(1); | |
497 | spin_lock(&pause_on_oops_lock); | |
498 | } | |
499 | } | |
500 | } | |
501 | spin_unlock_irqrestore(&pause_on_oops_lock, flags); | |
502 | } | |
503 | ||
504 | /* | |
c95dbf27 IM |
505 | * Return true if the calling CPU is allowed to print oops-related info. |
506 | * This is a bit racy.. | |
dd287796 AM |
507 | */ |
508 | int oops_may_print(void) | |
509 | { | |
510 | return pause_on_oops_flag == 0; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Called when the architecture enters its oops handler, before it prints | |
c95dbf27 IM |
515 | * anything. If this is the first CPU to oops, and it's oopsing the first |
516 | * time then let it proceed. | |
dd287796 | 517 | * |
c95dbf27 IM |
518 | * This is all enabled by the pause_on_oops kernel boot option. We do all |
519 | * this to ensure that oopses don't scroll off the screen. It has the | |
520 | * side-effect of preventing later-oopsing CPUs from mucking up the display, | |
521 | * too. | |
dd287796 | 522 | * |
c95dbf27 IM |
523 | * It turns out that the CPU which is allowed to print ends up pausing for |
524 | * the right duration, whereas all the other CPUs pause for twice as long: | |
525 | * once in oops_enter(), once in oops_exit(). | |
dd287796 AM |
526 | */ |
527 | void oops_enter(void) | |
528 | { | |
bdff7870 | 529 | tracing_off(); |
c95dbf27 IM |
530 | /* can't trust the integrity of the kernel anymore: */ |
531 | debug_locks_off(); | |
dd287796 | 532 | do_oops_enter_exit(); |
60c958d8 GP |
533 | |
534 | if (sysctl_oops_all_cpu_backtrace) | |
535 | trigger_all_cpu_backtrace(); | |
dd287796 AM |
536 | } |
537 | ||
2c3b20e9 AV |
538 | /* |
539 | * 64-bit random ID for oopses: | |
540 | */ | |
541 | static u64 oops_id; | |
542 | ||
543 | static int init_oops_id(void) | |
544 | { | |
545 | if (!oops_id) | |
546 | get_random_bytes(&oops_id, sizeof(oops_id)); | |
d6624f99 AV |
547 | else |
548 | oops_id++; | |
2c3b20e9 AV |
549 | |
550 | return 0; | |
551 | } | |
552 | late_initcall(init_oops_id); | |
553 | ||
863a6049 | 554 | void print_oops_end_marker(void) |
71c33911 AV |
555 | { |
556 | init_oops_id(); | |
d7c0847f | 557 | pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); |
71c33911 AV |
558 | } |
559 | ||
dd287796 AM |
560 | /* |
561 | * Called when the architecture exits its oops handler, after printing | |
562 | * everything. | |
563 | */ | |
564 | void oops_exit(void) | |
565 | { | |
566 | do_oops_enter_exit(); | |
71c33911 | 567 | print_oops_end_marker(); |
456b565c | 568 | kmsg_dump(KMSG_DUMP_OOPS); |
dd287796 | 569 | } |
3162f751 | 570 | |
2553b67a | 571 | struct warn_args { |
0f6f49a8 | 572 | const char *fmt; |
a8f18b90 | 573 | va_list args; |
0f6f49a8 | 574 | }; |
bd89bb29 | 575 | |
2553b67a JP |
576 | void __warn(const char *file, int line, void *caller, unsigned taint, |
577 | struct pt_regs *regs, struct warn_args *args) | |
0f6f49a8 | 578 | { |
de7edd31 SRRH |
579 | disable_trace_on_warning(); |
580 | ||
2553b67a JP |
581 | if (file) |
582 | pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", | |
583 | raw_smp_processor_id(), current->pid, file, line, | |
584 | caller); | |
585 | else | |
586 | pr_warn("WARNING: CPU: %d PID: %d at %pS\n", | |
587 | raw_smp_processor_id(), current->pid, caller); | |
74853dba | 588 | |
0f6f49a8 LT |
589 | if (args) |
590 | vprintk(args->fmt, args->args); | |
a8f18b90 | 591 | |
9e3961a0 PB |
592 | if (panic_on_warn) { |
593 | /* | |
594 | * This thread may hit another WARN() in the panic path. | |
595 | * Resetting this prevents additional WARN() from panicking the | |
596 | * system on this thread. Other threads are blocked by the | |
597 | * panic_mutex in panic(). | |
598 | */ | |
599 | panic_on_warn = 0; | |
600 | panic("panic_on_warn set ...\n"); | |
601 | } | |
602 | ||
a8f18b90 | 603 | print_modules(); |
2553b67a JP |
604 | |
605 | if (regs) | |
606 | show_regs(regs); | |
607 | else | |
608 | dump_stack(); | |
609 | ||
4c281074 SRV |
610 | print_irqtrace_events(current); |
611 | ||
a8f18b90 | 612 | print_oops_end_marker(); |
2553b67a | 613 | |
373d4d09 RR |
614 | /* Just a warning, don't kill lockdep. */ |
615 | add_taint(taint, LOCKDEP_STILL_OK); | |
a8f18b90 | 616 | } |
0f6f49a8 | 617 | |
2da1ead4 | 618 | #ifndef __WARN_FLAGS |
ee871133 KC |
619 | void warn_slowpath_fmt(const char *file, int line, unsigned taint, |
620 | const char *fmt, ...) | |
b2be0527 | 621 | { |
2553b67a | 622 | struct warn_args args; |
b2be0527 | 623 | |
d38aba49 KC |
624 | pr_warn(CUT_HERE); |
625 | ||
f2f84b05 | 626 | if (!fmt) { |
f2f84b05 KC |
627 | __warn(file, line, __builtin_return_address(0), taint, |
628 | NULL, NULL); | |
629 | return; | |
630 | } | |
631 | ||
b2be0527 BH |
632 | args.fmt = fmt; |
633 | va_start(args.args, fmt); | |
2553b67a | 634 | __warn(file, line, __builtin_return_address(0), taint, NULL, &args); |
b2be0527 BH |
635 | va_end(args.args); |
636 | } | |
ee871133 | 637 | EXPORT_SYMBOL(warn_slowpath_fmt); |
a7bed27a KC |
638 | #else |
639 | void __warn_printk(const char *fmt, ...) | |
640 | { | |
641 | va_list args; | |
642 | ||
643 | pr_warn(CUT_HERE); | |
644 | ||
645 | va_start(args, fmt); | |
646 | vprintk(fmt, args); | |
647 | va_end(args); | |
648 | } | |
649 | EXPORT_SYMBOL(__warn_printk); | |
79b4cc5e AV |
650 | #endif |
651 | ||
b1fca27d AK |
652 | #ifdef CONFIG_BUG |
653 | ||
654 | /* Support resetting WARN*_ONCE state */ | |
655 | ||
656 | static int clear_warn_once_set(void *data, u64 val) | |
657 | { | |
aaf5dcfb | 658 | generic_bug_clear_once(); |
b1fca27d AK |
659 | memset(__start_once, 0, __end_once - __start_once); |
660 | return 0; | |
661 | } | |
662 | ||
4169680e Y |
663 | DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set, |
664 | "%lld\n"); | |
b1fca27d AK |
665 | |
666 | static __init int register_warn_debugfs(void) | |
667 | { | |
668 | /* Don't care about failure */ | |
4169680e Y |
669 | debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL, |
670 | &clear_warn_once_fops); | |
b1fca27d AK |
671 | return 0; |
672 | } | |
673 | ||
674 | device_initcall(register_warn_debugfs); | |
675 | #endif | |
676 | ||
050e9baa | 677 | #ifdef CONFIG_STACKPROTECTOR |
54371a43 | 678 | |
3162f751 AV |
679 | /* |
680 | * Called when gcc's -fstack-protector feature is used, and | |
681 | * gcc detects corruption of the on-stack canary value | |
682 | */ | |
5916d5f9 | 683 | __visible noinstr void __stack_chk_fail(void) |
3162f751 | 684 | { |
5916d5f9 | 685 | instrumentation_begin(); |
95c4fb78 | 686 | panic("stack-protector: Kernel stack is corrupted in: %pB", |
517a92c4 | 687 | __builtin_return_address(0)); |
5916d5f9 | 688 | instrumentation_end(); |
3162f751 AV |
689 | } |
690 | EXPORT_SYMBOL(__stack_chk_fail); | |
54371a43 | 691 | |
3162f751 | 692 | #endif |
f44dd164 RR |
693 | |
694 | core_param(panic, panic_timeout, int, 0644); | |
d999bd93 | 695 | core_param(panic_print, panic_print, ulong, 0644); |
f44dd164 | 696 | core_param(pause_on_oops, pause_on_oops, int, 0644); |
9e3961a0 | 697 | core_param(panic_on_warn, panic_on_warn, int, 0644); |
b26e27dd | 698 | core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); |
f06e5153 | 699 | |
d404ab0a OH |
700 | static int __init oops_setup(char *s) |
701 | { | |
702 | if (!s) | |
703 | return -EINVAL; | |
704 | if (!strcmp(s, "panic")) | |
705 | panic_on_oops = 1; | |
706 | return 0; | |
707 | } | |
708 | early_param("oops", oops_setup); | |
db38d5c1 RA |
709 | |
710 | static int __init panic_on_taint_setup(char *s) | |
711 | { | |
712 | char *taint_str; | |
713 | ||
714 | if (!s) | |
715 | return -EINVAL; | |
716 | ||
717 | taint_str = strsep(&s, ","); | |
718 | if (kstrtoul(taint_str, 16, &panic_on_taint)) | |
719 | return -EINVAL; | |
720 | ||
721 | /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */ | |
722 | panic_on_taint &= TAINT_FLAGS_MAX; | |
723 | ||
724 | if (!panic_on_taint) | |
725 | return -EINVAL; | |
726 | ||
727 | if (s && !strcmp(s, "nousertaint")) | |
728 | panic_on_taint_nousertaint = true; | |
729 | ||
730 | pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%sabled\n", | |
731 | panic_on_taint, panic_on_taint_nousertaint ? "en" : "dis"); | |
732 | ||
733 | return 0; | |
734 | } | |
735 | early_param("panic_on_taint", panic_on_taint_setup); |