]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/kthread.c
softirq: Reorder trace_softirqs_on to prevent lockdep splat
[mirror_ubuntu-bionic-kernel.git] / kernel / kthread.c
index ba3992c8c3753bcc0785ecf998e457d21c013873..0368ccb50db6d187eae929e3df91609b8ac37782 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
-#include <linux/cgroup.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
@@ -47,13 +46,15 @@ struct kthread {
        void *data;
        struct completion parked;
        struct completion exited;
+#ifdef CONFIG_BLK_CGROUP
+       struct cgroup_subsys_state *blkcg_css;
+#endif
 };
 
 enum KTHREAD_BITS {
        KTHREAD_IS_PER_CPU = 0,
        KTHREAD_SHOULD_STOP,
        KTHREAD_SHOULD_PARK,
-       KTHREAD_IS_PARKED,
 };
 
 static inline void set_kthread_struct(void *kthread)
@@ -74,11 +75,17 @@ static inline struct kthread *to_kthread(struct task_struct *k)
 
 void free_kthread_struct(struct task_struct *k)
 {
+       struct kthread *kthread;
+
        /*
         * Can be NULL if this kthread was created by kernel_thread()
         * or if kmalloc() in kthread() failed.
         */
-       kfree(to_kthread(k));
+       kthread = to_kthread(k);
+#ifdef CONFIG_BLK_CGROUP
+       WARN_ON_ONCE(kthread && kthread->blkcg_css);
+#endif
+       kfree(kthread);
 }
 
 /**
@@ -169,14 +176,12 @@ void *kthread_probe_data(struct task_struct *task)
 
 static void __kthread_parkme(struct kthread *self)
 {
-       __set_current_state(TASK_PARKED);
-       while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
-               if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
-                       complete(&self->parked);
+       for (;;) {
+               set_current_state(TASK_PARKED);
+               if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
+                       break;
                schedule();
-               __set_current_state(TASK_PARKED);
        }
-       clear_bit(KTHREAD_IS_PARKED, &self->flags);
        __set_current_state(TASK_RUNNING);
 }
 
@@ -186,6 +191,11 @@ void kthread_parkme(void)
 }
 EXPORT_SYMBOL_GPL(kthread_parkme);
 
+void kthread_park_complete(struct task_struct *k)
+{
+       complete(&to_kthread(k)->parked);
+}
+
 static int kthread(void *_create)
 {
        /* Copy data: it's on kthread's stack */
@@ -196,7 +206,7 @@ static int kthread(void *_create)
        struct kthread *self;
        int ret;
 
-       self = kmalloc(sizeof(*self), GFP_KERNEL);
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
        set_kthread_struct(self);
 
        /* If user was SIGKILLed, I release the structure. */
@@ -212,7 +222,6 @@ static int kthread(void *_create)
                do_exit(-ENOMEM);
        }
 
-       self->flags = 0;
        self->data = data;
        init_completion(&self->exited);
        init_completion(&self->parked);
@@ -294,6 +303,17 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
         * new kernel thread.
         */
        if (unlikely(wait_for_completion_killable(&done))) {
+               int i = 0;
+
+               /*
+                * I got SIGKILL, but wait for 10 more seconds for completion
+                * unless chosen by the OOM killer. This delay is there as a
+                * workaround for boot failure caused by SIGKILL upon device
+                * driver initialization timeout.
+                */
+               while (i++ < 10 && !test_tsk_thread_flag(current, TIF_MEMDIE))
+                       if (wait_for_completion_timeout(&done, HZ))
+                               goto ready;
                /*
                 * If I was SIGKILLed before kthreadd (or new kernel thread)
                 * calls complete(), leave the cleanup of this structure to
@@ -307,6 +327,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
                 */
                wait_for_completion(&done);
        }
+ready:
        task = create->result;
        if (!IS_ERR(task)) {
                static const struct sched_param param = { .sched_priority = 0 };
@@ -443,22 +464,15 @@ void kthread_unpark(struct task_struct *k)
 {
        struct kthread *kthread = to_kthread(k);
 
-       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
        /*
-        * We clear the IS_PARKED bit here as we don't wait
-        * until the task has left the park code. So if we'd
-        * park before that happens we'd see the IS_PARKED bit
-        * which might be about to be cleared.
+        * Newly created kthread was parked when the CPU was offline.
+        * The binding was lost and we need to set it again.
         */
-       if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-               /*
-                * Newly created kthread was parked when the CPU was offline.
-                * The binding was lost and we need to set it again.
-                */
-               if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
-                       __kthread_bind(k, kthread->cpu, TASK_PARKED);
-               wake_up_state(k, TASK_PARKED);
-       }
+       if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+               __kthread_bind(k, kthread->cpu, TASK_PARKED);
+
+       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       wake_up_state(k, TASK_PARKED);
 }
 EXPORT_SYMBOL_GPL(kthread_unpark);
 
@@ -481,12 +495,13 @@ int kthread_park(struct task_struct *k)
        if (WARN_ON(k->flags & PF_EXITING))
                return -ENOSYS;
 
-       if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-               set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
-               if (k != current) {
-                       wake_up_process(k);
-                       wait_for_completion(&kthread->parked);
-               }
+       if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
+               return -EBUSY;
+
+       set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       if (k != current) {
+               wake_up_process(k);
+               wait_for_completion(&kthread->parked);
        }
 
        return 0;
@@ -836,7 +851,7 @@ void __kthread_queue_delayed_work(struct kthread_worker *worker,
        struct timer_list *timer = &dwork->timer;
        struct kthread_work *work = &dwork->work;
 
-       WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn);
+       WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
 
        /*
         * If @delay is 0, queue @dwork->work immediately.  This is for
@@ -1152,3 +1167,54 @@ void kthread_destroy_worker(struct kthread_worker *worker)
        kfree(worker);
 }
 EXPORT_SYMBOL(kthread_destroy_worker);
+
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * kthread_associate_blkcg - associate blkcg to current kthread
+ * @css: the cgroup info
+ *
+ * Current thread must be a kthread. The thread is running jobs on behalf of
+ * other threads. In some cases, we expect the jobs attach cgroup info of
+ * original threads instead of that of current thread. This function stores
+ * original thread's cgroup info in current kthread context for later
+ * retrieval.
+ */
+void kthread_associate_blkcg(struct cgroup_subsys_state *css)
+{
+       struct kthread *kthread;
+
+       if (!(current->flags & PF_KTHREAD))
+               return;
+       kthread = to_kthread(current);
+       if (!kthread)
+               return;
+
+       if (kthread->blkcg_css) {
+               css_put(kthread->blkcg_css);
+               kthread->blkcg_css = NULL;
+       }
+       if (css) {
+               css_get(css);
+               kthread->blkcg_css = css;
+       }
+}
+EXPORT_SYMBOL(kthread_associate_blkcg);
+
+/**
+ * kthread_blkcg - get associated blkcg css of current kthread
+ *
+ * Current thread must be a kthread.
+ */
+struct cgroup_subsys_state *kthread_blkcg(void)
+{
+       struct kthread *kthread;
+
+       if (current->flags & PF_KTHREAD) {
+               kthread = to_kthread(current);
+               if (kthread)
+                       return kthread->blkcg_css;
+       }
+       return NULL;
+}
+EXPORT_SYMBOL(kthread_blkcg);
+#endif