4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem.
11 #include <linux/debug_locks.h>
12 #include <linux/sched/debug.h>
13 #include <linux/interrupt.h>
14 #include <linux/kmsg_dump.h>
15 #include <linux/kallsyms.h>
16 #include <linux/notifier.h>
17 #include <linux/vt_kern.h>
18 #include <linux/module.h>
19 #include <linux/random.h>
20 #include <linux/ftrace.h>
21 #include <linux/reboot.h>
22 #include <linux/delay.h>
23 #include <linux/kexec.h>
24 #include <linux/sched.h>
25 #include <linux/sysrq.h>
26 #include <linux/init.h>
27 #include <linux/nmi.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/debugfs.h>
32 #include <asm/sections.h>
34 #define PANIC_TIMER_STEP 100
35 #define PANIC_BLINK_SPD 18
37 int panic_on_oops
= CONFIG_PANIC_ON_OOPS_VALUE
;
38 static unsigned long tainted_mask
;
39 static int pause_on_oops
;
40 static int pause_on_oops_flag
;
41 static DEFINE_SPINLOCK(pause_on_oops_lock
);
42 bool crash_kexec_post_notifiers
;
43 int panic_on_warn __read_mostly
;
45 int panic_timeout
= CONFIG_PANIC_TIMEOUT
;
46 EXPORT_SYMBOL_GPL(panic_timeout
);
48 ATOMIC_NOTIFIER_HEAD(panic_notifier_list
);
50 EXPORT_SYMBOL(panic_notifier_list
);
52 static long no_blink(int state
)
57 /* Returns how long it waited in ms */
58 long (*panic_blink
)(int state
);
59 EXPORT_SYMBOL(panic_blink
);
62 * Stop ourself in panic -- architecture code may override this
64 void __weak
panic_smp_self_stop(void)
71 * Stop ourselves in NMI context if another CPU has already panicked. Arch code
72 * may override this to prepare for crash dumping, e.g. save regs info.
74 void __weak
nmi_panic_self_stop(struct pt_regs
*regs
)
76 panic_smp_self_stop();
80 * Stop other CPUs in panic. Architecture dependent code may override this
81 * with more suitable version. For example, if the architecture supports
82 * crash dump, it should save registers of each stopped CPU and disable
83 * per-CPU features such as virtualization extensions.
85 void __weak
crash_smp_send_stop(void)
87 static int cpus_stopped
;
90 * This function can be called twice in panic path, but obviously
91 * we execute this only once.
97 * Note smp_send_stop is the usual smp shutdown function, which
98 * unfortunately means it may not be hardened to work in a panic
105 atomic_t panic_cpu
= ATOMIC_INIT(PANIC_CPU_INVALID
);
108 * A variant of panic() called from NMI context. We return if we've already
109 * panicked on this CPU. If another CPU already panicked, loop in
110 * nmi_panic_self_stop() which can provide architecture dependent code such
111 * as saving register state for crash dump.
113 void nmi_panic(struct pt_regs
*regs
, const char *msg
)
117 cpu
= raw_smp_processor_id();
118 old_cpu
= atomic_cmpxchg(&panic_cpu
, PANIC_CPU_INVALID
, cpu
);
120 if (old_cpu
== PANIC_CPU_INVALID
)
122 else if (old_cpu
!= cpu
)
123 nmi_panic_self_stop(regs
);
125 EXPORT_SYMBOL(nmi_panic
);
128 * panic - halt the system
129 * @fmt: The text string to print
131 * Display a message, then perform cleanups.
133 * This function never returns.
135 void panic(const char *fmt
, ...)
137 static char buf
[1024];
141 int old_cpu
, this_cpu
;
142 bool _crash_kexec_post_notifiers
= crash_kexec_post_notifiers
;
145 * Disable local interrupts. This will prevent panic_smp_self_stop
146 * from deadlocking the first cpu that invokes the panic, since
147 * there is nothing to prevent an interrupt handler (that runs
148 * after setting panic_cpu) from invoking panic() again.
151 preempt_disable_notrace();
154 * It's possible to come here directly from a panic-assertion and
155 * not have preempt disabled. Some functions called from here want
156 * preempt to be disabled. No point enabling it later though...
158 * Only one CPU is allowed to execute the panic code from here. For
159 * multiple parallel invocations of panic, all other CPUs either
160 * stop themself or will wait until they are stopped by the 1st CPU
161 * with smp_send_stop().
163 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
164 * comes here, so go ahead.
165 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
166 * panic_cpu to this CPU. In this case, this is also the 1st CPU.
168 this_cpu
= raw_smp_processor_id();
169 old_cpu
= atomic_cmpxchg(&panic_cpu
, PANIC_CPU_INVALID
, this_cpu
);
171 if (old_cpu
!= PANIC_CPU_INVALID
&& old_cpu
!= this_cpu
)
172 panic_smp_self_stop();
177 vsnprintf(buf
, sizeof(buf
), fmt
, args
);
179 pr_emerg("Kernel panic - not syncing: %s\n", buf
);
180 #ifdef CONFIG_DEBUG_BUGVERBOSE
182 * Avoid nested stack-dumping if a panic occurs during oops processing
184 if (!test_taint(TAINT_DIE
) && oops_in_progress
<= 1)
189 * If we have crashed and we have a crash kernel loaded let it handle
191 * If we want to run this after calling panic_notifiers, pass
192 * the "crash_kexec_post_notifiers" option to the kernel.
194 * Bypass the panic_cpu check and call __crash_kexec directly.
196 if (!_crash_kexec_post_notifiers
) {
197 printk_safe_flush_on_panic();
201 * Note smp_send_stop is the usual smp shutdown function, which
202 * unfortunately means it may not be hardened to work in a
208 * If we want to do crash dump after notifier calls and
209 * kmsg_dump, we will need architecture dependent extra
210 * works in addition to stopping other CPUs.
212 crash_smp_send_stop();
216 * Run any panic handlers, including those that might need to
217 * add information to the kmsg dump output.
219 atomic_notifier_call_chain(&panic_notifier_list
, 0, buf
);
221 /* Call flush even twice. It tries harder with a single online CPU */
222 printk_safe_flush_on_panic();
223 kmsg_dump(KMSG_DUMP_PANIC
);
226 * If you doubt kdump always works fine in any situation,
227 * "crash_kexec_post_notifiers" offers you a chance to run
228 * panic_notifiers and dumping kmsg before kdump.
229 * Note: since some panic_notifiers can make crashed kernel
230 * more unstable, it can increase risks of the kdump failure too.
232 * Bypass the panic_cpu check and call __crash_kexec directly.
234 if (_crash_kexec_post_notifiers
)
243 * We may have ended up stopping the CPU holding the lock (in
244 * smp_send_stop()) while still having some valuable data in the console
245 * buffer. Try to acquire the lock then release it regardless of the
246 * result. The release will also print the buffers out. Locks debug
247 * should be disabled to avoid reporting bad unlock balance when
248 * panic() is not being callled from OOPS.
251 console_flush_on_panic();
254 panic_blink
= no_blink
;
256 if (panic_timeout
> 0) {
258 * Delay timeout seconds before rebooting the machine.
259 * We can't use the "normal" timers since we just panicked.
261 pr_emerg("Rebooting in %d seconds..\n", panic_timeout
);
263 for (i
= 0; i
< panic_timeout
* 1000; i
+= PANIC_TIMER_STEP
) {
264 touch_nmi_watchdog();
266 i
+= panic_blink(state
^= 1);
267 i_next
= i
+ 3600 / PANIC_BLINK_SPD
;
269 mdelay(PANIC_TIMER_STEP
);
272 if (panic_timeout
!= 0) {
274 * This will not be a clean reboot, with everything
275 * shutting down. But if there is a chance of
276 * rebooting the system it will be rebooted.
282 extern int stop_a_enabled
;
283 /* Make sure the user can actually press Stop-A (L1-A) */
285 pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
286 "twice on console to return to the boot prom\n");
289 #if defined(CONFIG_S390)
291 unsigned long caller
;
293 caller
= (unsigned long)__builtin_return_address(0);
294 disabled_wait(caller
);
297 pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf
);
299 for (i
= 0; ; i
+= PANIC_TIMER_STEP
) {
300 touch_softlockup_watchdog();
302 i
+= panic_blink(state
^= 1);
303 i_next
= i
+ 3600 / PANIC_BLINK_SPD
;
305 mdelay(PANIC_TIMER_STEP
);
309 EXPORT_SYMBOL(panic
);
312 * TAINT_FORCED_RMMOD could be a per-module flag but the module
313 * is being removed anyway.
315 const struct taint_flag taint_flags
[TAINT_FLAGS_COUNT
] = {
316 { 'P', 'G', true }, /* TAINT_PROPRIETARY_MODULE */
317 { 'F', ' ', true }, /* TAINT_FORCED_MODULE */
318 { 'S', ' ', false }, /* TAINT_CPU_OUT_OF_SPEC */
319 { 'R', ' ', false }, /* TAINT_FORCED_RMMOD */
320 { 'M', ' ', false }, /* TAINT_MACHINE_CHECK */
321 { 'B', ' ', false }, /* TAINT_BAD_PAGE */
322 { 'U', ' ', false }, /* TAINT_USER */
323 { 'D', ' ', false }, /* TAINT_DIE */
324 { 'A', ' ', false }, /* TAINT_OVERRIDDEN_ACPI_TABLE */
325 { 'W', ' ', false }, /* TAINT_WARN */
326 { 'C', ' ', true }, /* TAINT_CRAP */
327 { 'I', ' ', false }, /* TAINT_FIRMWARE_WORKAROUND */
328 { 'O', ' ', true }, /* TAINT_OOT_MODULE */
329 { 'E', ' ', true }, /* TAINT_UNSIGNED_MODULE */
330 { 'L', ' ', false }, /* TAINT_SOFTLOCKUP */
331 { 'K', ' ', true }, /* TAINT_LIVEPATCH */
332 { 'X', ' ', true }, /* TAINT_AUX */
336 * print_tainted - return a string to represent the kernel taint state.
338 * 'P' - Proprietary module has been loaded.
339 * 'F' - Module has been forcibly loaded.
340 * 'S' - SMP with CPUs not designed for SMP.
341 * 'R' - User forced a module unload.
342 * 'M' - System experienced a machine check exception.
343 * 'B' - System has hit bad_page.
344 * 'U' - Userspace-defined naughtiness.
345 * 'D' - Kernel has oopsed before
346 * 'A' - ACPI table overridden.
347 * 'W' - Taint on warning.
348 * 'C' - modules from drivers/staging are loaded.
349 * 'I' - Working around severe firmware bug.
350 * 'O' - Out-of-tree module has been loaded.
351 * 'E' - Unsigned module has been loaded.
352 * 'L' - A soft lockup has previously occurred.
353 * 'K' - Kernel has been live patched.
354 * 'X' - Auxiliary taint, for distros' use.
356 * The string is overwritten by the next call to print_tainted().
358 const char *print_tainted(void)
360 static char buf
[TAINT_FLAGS_COUNT
+ sizeof("Tainted: ")];
366 s
= buf
+ sprintf(buf
, "Tainted: ");
367 for (i
= 0; i
< TAINT_FLAGS_COUNT
; i
++) {
368 const struct taint_flag
*t
= &taint_flags
[i
];
369 *s
++ = test_bit(i
, &tainted_mask
) ?
370 t
->c_true
: t
->c_false
;
374 snprintf(buf
, sizeof(buf
), "Not tainted");
379 int test_taint(unsigned flag
)
381 return test_bit(flag
, &tainted_mask
);
383 EXPORT_SYMBOL(test_taint
);
385 unsigned long get_taint(void)
391 * add_taint: add a taint flag if not already set.
392 * @flag: one of the TAINT_* constants.
393 * @lockdep_ok: whether lock debugging is still OK.
395 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
396 * some notewortht-but-not-corrupting cases, it can be set to true.
398 void add_taint(unsigned flag
, enum lockdep_ok lockdep_ok
)
400 if (lockdep_ok
== LOCKDEP_NOW_UNRELIABLE
&& __debug_locks_off())
401 pr_warn("Disabling lock debugging due to kernel taint\n");
403 set_bit(flag
, &tainted_mask
);
405 EXPORT_SYMBOL(add_taint
);
407 static void spin_msec(int msecs
)
411 for (i
= 0; i
< msecs
; i
++) {
412 touch_nmi_watchdog();
418 * It just happens that oops_enter() and oops_exit() are identically
421 static void do_oops_enter_exit(void)
424 static int spin_counter
;
429 spin_lock_irqsave(&pause_on_oops_lock
, flags
);
430 if (pause_on_oops_flag
== 0) {
431 /* This CPU may now print the oops message */
432 pause_on_oops_flag
= 1;
434 /* We need to stall this CPU */
436 /* This CPU gets to do the counting */
437 spin_counter
= pause_on_oops
;
439 spin_unlock(&pause_on_oops_lock
);
440 spin_msec(MSEC_PER_SEC
);
441 spin_lock(&pause_on_oops_lock
);
442 } while (--spin_counter
);
443 pause_on_oops_flag
= 0;
445 /* This CPU waits for a different one */
446 while (spin_counter
) {
447 spin_unlock(&pause_on_oops_lock
);
449 spin_lock(&pause_on_oops_lock
);
453 spin_unlock_irqrestore(&pause_on_oops_lock
, flags
);
457 * Return true if the calling CPU is allowed to print oops-related info.
458 * This is a bit racy..
460 int oops_may_print(void)
462 return pause_on_oops_flag
== 0;
466 * Called when the architecture enters its oops handler, before it prints
467 * anything. If this is the first CPU to oops, and it's oopsing the first
468 * time then let it proceed.
470 * This is all enabled by the pause_on_oops kernel boot option. We do all
471 * this to ensure that oopses don't scroll off the screen. It has the
472 * side-effect of preventing later-oopsing CPUs from mucking up the display,
475 * It turns out that the CPU which is allowed to print ends up pausing for
476 * the right duration, whereas all the other CPUs pause for twice as long:
477 * once in oops_enter(), once in oops_exit().
479 void oops_enter(void)
482 /* can't trust the integrity of the kernel anymore: */
484 do_oops_enter_exit();
488 * 64-bit random ID for oopses:
492 static int init_oops_id(void)
495 get_random_bytes(&oops_id
, sizeof(oops_id
));
501 late_initcall(init_oops_id
);
503 void print_oops_end_marker(void)
506 pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id
);
510 * Called when the architecture exits its oops handler, after printing
515 do_oops_enter_exit();
516 print_oops_end_marker();
517 kmsg_dump(KMSG_DUMP_OOPS
);
525 void __warn(const char *file
, int line
, void *caller
, unsigned taint
,
526 struct pt_regs
*regs
, struct warn_args
*args
)
528 disable_trace_on_warning();
534 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
535 raw_smp_processor_id(), current
->pid
, file
, line
,
538 pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
539 raw_smp_processor_id(), current
->pid
, caller
);
542 vprintk(args
->fmt
, args
->args
);
546 * This thread may hit another WARN() in the panic path.
547 * Resetting this prevents additional WARN() from panicking the
548 * system on this thread. Other threads are blocked by the
549 * panic_mutex in panic().
552 panic("panic_on_warn set ...\n");
562 print_oops_end_marker();
564 /* Just a warning, don't kill lockdep. */
565 add_taint(taint
, LOCKDEP_STILL_OK
);
568 #ifdef WANT_WARN_ON_SLOWPATH
569 void warn_slowpath_fmt(const char *file
, int line
, const char *fmt
, ...)
571 struct warn_args args
;
574 va_start(args
.args
, fmt
);
575 __warn(file
, line
, __builtin_return_address(0), TAINT_WARN
, NULL
,
579 EXPORT_SYMBOL(warn_slowpath_fmt
);
581 void warn_slowpath_fmt_taint(const char *file
, int line
,
582 unsigned taint
, const char *fmt
, ...)
584 struct warn_args args
;
587 va_start(args
.args
, fmt
);
588 __warn(file
, line
, __builtin_return_address(0), taint
, NULL
, &args
);
591 EXPORT_SYMBOL(warn_slowpath_fmt_taint
);
593 void warn_slowpath_null(const char *file
, int line
)
596 __warn(file
, line
, __builtin_return_address(0), TAINT_WARN
, NULL
, NULL
);
598 EXPORT_SYMBOL(warn_slowpath_null
);
600 void __warn_printk(const char *fmt
, ...)
610 EXPORT_SYMBOL(__warn_printk
);
615 /* Support resetting WARN*_ONCE state */
617 static int clear_warn_once_set(void *data
, u64 val
)
619 generic_bug_clear_once();
620 memset(__start_once
, 0, __end_once
- __start_once
);
624 DEFINE_SIMPLE_ATTRIBUTE(clear_warn_once_fops
,
629 static __init
int register_warn_debugfs(void)
631 /* Don't care about failure */
632 debugfs_create_file("clear_warn_once", 0200, NULL
,
633 NULL
, &clear_warn_once_fops
);
637 device_initcall(register_warn_debugfs
);
640 #ifdef CONFIG_CC_STACKPROTECTOR
643 * Called when gcc's -fstack-protector feature is used, and
644 * gcc detects corruption of the on-stack canary value
646 __visible
void __stack_chk_fail(void)
648 panic("stack-protector: Kernel stack is corrupted in: %pB",
649 __builtin_return_address(0));
651 EXPORT_SYMBOL(__stack_chk_fail
);
655 #ifdef CONFIG_ARCH_HAS_REFCOUNT
656 void refcount_error_report(struct pt_regs
*regs
, const char *err
)
658 WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n",
659 err
, (void *)instruction_pointer(regs
),
660 current
->comm
, task_pid_nr(current
),
661 from_kuid_munged(&init_user_ns
, current_uid()),
662 from_kuid_munged(&init_user_ns
, current_euid()));
666 core_param(panic
, panic_timeout
, int, 0644);
667 core_param(pause_on_oops
, pause_on_oops
, int, 0644);
668 core_param(panic_on_warn
, panic_on_warn
, int, 0644);
669 core_param(crash_kexec_post_notifiers
, crash_kexec_post_notifiers
, bool, 0644);
671 static int __init
oops_setup(char *s
)
675 if (!strcmp(s
, "panic"))
679 early_param("oops", oops_setup
);