]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/stop_machine.c
ftrace: Get a reference counter for the trace_array on filter files
[mirror_ubuntu-bionic-kernel.git] / kernel / stop_machine.c
index b7591261652d3ea88811f9d2af2a3ce188fb3320..067cb83f37eae5644fa84e29a614abd6b4d8a3de 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/smpboot.h>
 #include <linux/atomic.h>
 #include <linux/nmi.h>
+#include <linux/sched/wake_q.h>
 
 /*
  * Structure to determine completion condition and record errors.  May
@@ -36,7 +37,7 @@ struct cpu_stop_done {
 struct cpu_stopper {
        struct task_struct      *thread;
 
-       spinlock_t              lock;
+       raw_spinlock_t          lock;
        bool                    enabled;        /* is this stopper enabled? */
        struct list_head        works;          /* list of pending works */
 
@@ -65,26 +66,32 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
 }
 
 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
-                                       struct cpu_stop_work *work)
+                                       struct cpu_stop_work *work,
+                                       struct wake_q_head *wakeq)
 {
        list_add_tail(&work->list, &stopper->works);
-       wake_up_process(stopper->thread);
+       wake_q_add(wakeq, stopper->thread);
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       DEFINE_WAKE_Q(wakeq);
        unsigned long flags;
        bool enabled;
 
-       spin_lock_irqsave(&stopper->lock, flags);
+       preempt_disable();
+       raw_spin_lock_irqsave(&stopper->lock, flags);
        enabled = stopper->enabled;
        if (enabled)
-               __cpu_stop_queue_work(stopper, work);
+               __cpu_stop_queue_work(stopper, work, &wakeq);
        else if (work->done)
                cpu_stop_signal_done(work->done);
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       raw_spin_unlock_irqrestore(&stopper->lock, flags);
+
+       wake_up_q(&wakeq);
+       preempt_enable();
 
        return enabled;
 }
@@ -229,14 +236,26 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
 {
        struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
        struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+       DEFINE_WAKE_Q(wakeq);
        int err;
+
 retry:
-       spin_lock_irq(&stopper1->lock);
-       spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+       /*
+        * The waking up of stopper threads has to happen in the same
+        * scheduling context as the queueing.  Otherwise, there is a
+        * possibility of one of the above stoppers being woken up by another
+        * CPU, and preempting us. This will cause us to not wake up the other
+        * stopper forever.
+        */
+       preempt_disable();
+       raw_spin_lock_irq(&stopper1->lock);
+       raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
-       err = -ENOENT;
-       if (!stopper1->enabled || !stopper2->enabled)
+       if (!stopper1->enabled || !stopper2->enabled) {
+               err = -ENOENT;
                goto unlock;
+       }
+
        /*
         * Ensure that if we race with __stop_cpus() the stoppers won't get
         * queued up in reverse order leading to system deadlock.
@@ -247,22 +266,31 @@ retry:
         * It can be falsely true but it is safe to spin until it is cleared,
         * queue_stop_cpus_work() does everything under preempt_disable().
         */
-       err = -EDEADLK;
-       if (unlikely(stop_cpus_in_progress))
-                       goto unlock;
+       if (unlikely(stop_cpus_in_progress)) {
+               err = -EDEADLK;
+               goto unlock;
+       }
 
        err = 0;
-       __cpu_stop_queue_work(stopper1, work1);
-       __cpu_stop_queue_work(stopper2, work2);
+       __cpu_stop_queue_work(stopper1, work1, &wakeq);
+       __cpu_stop_queue_work(stopper2, work2, &wakeq);
+
 unlock:
-       spin_unlock(&stopper2->lock);
-       spin_unlock_irq(&stopper1->lock);
+       raw_spin_unlock(&stopper2->lock);
+       raw_spin_unlock_irq(&stopper1->lock);
 
        if (unlikely(err == -EDEADLK)) {
+               preempt_enable();
+
                while (stop_cpus_in_progress)
                        cpu_relax();
+
                goto retry;
        }
+
+       wake_up_q(&wakeq);
+       preempt_enable();
+
        return err;
 }
 /**
@@ -448,9 +476,9 @@ static int cpu_stop_should_run(unsigned int cpu)
        unsigned long flags;
        int run;
 
-       spin_lock_irqsave(&stopper->lock, flags);
+       raw_spin_lock_irqsave(&stopper->lock, flags);
        run = !list_empty(&stopper->works);
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       raw_spin_unlock_irqrestore(&stopper->lock, flags);
        return run;
 }
 
@@ -461,13 +489,13 @@ static void cpu_stopper_thread(unsigned int cpu)
 
 repeat:
        work = NULL;
-       spin_lock_irq(&stopper->lock);
+       raw_spin_lock_irq(&stopper->lock);
        if (!list_empty(&stopper->works)) {
                work = list_first_entry(&stopper->works,
                                        struct cpu_stop_work, list);
                list_del_init(&work->list);
        }
-       spin_unlock_irq(&stopper->lock);
+       raw_spin_unlock_irq(&stopper->lock);
 
        if (work) {
                cpu_stop_fn_t fn = work->fn;
@@ -541,7 +569,7 @@ static int __init cpu_stop_init(void)
        for_each_possible_cpu(cpu) {
                struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
-               spin_lock_init(&stopper->lock);
+               raw_spin_lock_init(&stopper->lock);
                INIT_LIST_HEAD(&stopper->works);
        }