]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/hung_task.c
ftrace: Avoid potential division by zero in function profiler
[mirror_ubuntu-bionic-kernel.git] / kernel / hung_task.c
index 751593ed7c0b0b9cc9bf74735fb9bd5d7e100be2..2e4869fa66c93da40672bfc450850480332de542 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/lockdep.h>
 #include <linux/export.h>
 #include <linux/sysctl.h>
+#include <linux/suspend.h>
 #include <linux/utsname.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/debug.h>
@@ -33,7 +34,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
  * is disabled during the critical section. It also controls the size of
  * the RCU grace period. So it needs to be upper-bound.
  */
-#define HUNG_TASK_BATCHING 1024
+#define HUNG_TASK_LOCK_BREAK (HZ / 10)
 
 /*
  * Zero means infinite timeout - no checking done:
@@ -44,6 +45,7 @@ int __read_mostly sysctl_hung_task_warnings = 10;
 
 static int __read_mostly did_panic;
 static bool hung_task_show_lock;
+static bool hung_task_call_panic;
 
 static struct task_struct *watchdog_task;
 
@@ -102,8 +104,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
        trace_sched_process_hang(t);
 
-       if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
-               return;
+       if (sysctl_hung_task_panic) {
+               console_verbose();
+               hung_task_show_lock = true;
+               hung_task_call_panic = true;
+       }
 
        /*
         * Ok, the task did not get scheduled for more than 2 minutes,
@@ -125,13 +130,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
        }
 
        touch_nmi_watchdog();
-
-       if (sysctl_hung_task_panic) {
-               if (hung_task_show_lock)
-                       debug_show_all_locks();
-               trigger_all_cpu_backtrace();
-               panic("hung_task: blocked tasks");
-       }
 }
 
 /*
@@ -165,7 +163,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
 static void check_hung_uninterruptible_tasks(unsigned long timeout)
 {
        int max_count = sysctl_hung_task_check_count;
-       int batch_count = HUNG_TASK_BATCHING;
+       unsigned long last_break = jiffies;
        struct task_struct *g, *t;
 
        /*
@@ -180,10 +178,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
        for_each_process_thread(g, t) {
                if (!max_count--)
                        goto unlock;
-               if (!--batch_count) {
-                       batch_count = HUNG_TASK_BATCHING;
+               if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
                        if (!rcu_lock_break(g, t))
                                goto unlock;
+                       last_break = jiffies;
                }
                /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
                if (t->state == TASK_UNINTERRUPTIBLE)
@@ -193,6 +191,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
        rcu_read_unlock();
        if (hung_task_show_lock)
                debug_show_all_locks();
+       if (hung_task_call_panic) {
+               trigger_all_cpu_backtrace();
+               panic("hung_task: blocked tasks");
+       }
 }
 
 static long hung_timeout_jiffies(unsigned long last_checked,
@@ -231,6 +233,28 @@ void reset_hung_task_detector(void)
 }
 EXPORT_SYMBOL_GPL(reset_hung_task_detector);
 
+static bool hung_detector_suspended;
+
+static int hungtask_pm_notify(struct notifier_block *self,
+                             unsigned long action, void *hcpu)
+{
+       switch (action) {
+       case PM_SUSPEND_PREPARE:
+       case PM_HIBERNATION_PREPARE:
+       case PM_RESTORE_PREPARE:
+               hung_detector_suspended = true;
+               break;
+       case PM_POST_SUSPEND:
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+               hung_detector_suspended = false;
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
 /*
  * kthread which checks for tasks stuck in D state
  */
@@ -245,7 +269,8 @@ static int watchdog(void *dummy)
                long t = hung_timeout_jiffies(hung_last_checked, timeout);
 
                if (t <= 0) {
-                       if (!atomic_xchg(&reset_hung_task, 0))
+                       if (!atomic_xchg(&reset_hung_task, 0) &&
+                           !hung_detector_suspended)
                                check_hung_uninterruptible_tasks(timeout);
                        hung_last_checked = jiffies;
                        continue;
@@ -259,6 +284,10 @@ static int watchdog(void *dummy)
 static int __init hung_task_init(void)
 {
        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+       /* Disable hung task detector on suspend */
+       pm_notifier(hungtask_pm_notify, 0);
+
        watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
 
        return 0;