]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
workqueue/hotplug: simplify workqueue_offline_cpu()
authorLai Jiangshan <jiangshanlai@gmail.com>
Fri, 1 Dec 2017 14:20:36 +0000 (22:20 +0800)
committerTejun Heo <tj@kernel.org>
Mon, 4 Dec 2017 22:44:11 +0000 (14:44 -0800)
Since the recent cpu/hotplug refactoring, workqueue_offline_cpu() is
guaranteed to run on the local cpu which is going offline.

This also fixes the following deadlock by removing work item
scheduling and flushing from CPU hotplug path.

 http://lkml.kernel.org/r/1504764252-29091-1-git-send-email-prsood@codeaurora.org

tj: Description update.

Signed-off-by: Lai Jiangshan <jiangshanlai@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index 6a5658cb46dadf6afeb845dc1476657621c458b1..48a4d00f55dc80a24554a526ae33d15149302da0 100644 (file)
@@ -1635,7 +1635,7 @@ static void worker_enter_idle(struct worker *worker)
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
        /*
-        * Sanity check nr_running.  Because wq_unbind_fn() releases
+        * Sanity check nr_running.  Because unbind_workers() releases
         * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
@@ -4511,9 +4511,8 @@ void show_workqueue_state(void)
  * cpu comes back online.
  */
 
-static void wq_unbind_fn(struct work_struct *work)
+static void unbind_workers(int cpu)
 {
-       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
 
@@ -4710,12 +4709,13 @@ int workqueue_online_cpu(unsigned int cpu)
 
 int workqueue_offline_cpu(unsigned int cpu)
 {
-       struct work_struct unbind_work;
        struct workqueue_struct *wq;
 
        /* unbinding per-cpu workers should happen on the local CPU */
-       INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
-       queue_work_on(cpu, system_highpri_wq, &unbind_work);
+       if (WARN_ON(cpu != smp_processor_id()))
+               return -1;
+
+       unbind_workers(cpu);
 
        /* update NUMA affinity of unbound workqueues */
        mutex_lock(&wq_pool_mutex);
@@ -4723,9 +4723,6 @@ int workqueue_offline_cpu(unsigned int cpu)
                wq_update_unbound_numa(wq, cpu, false);
        mutex_unlock(&wq_pool_mutex);
 
-       /* wait for per-cpu unbinding to finish */
-       flush_work(&unbind_work);
-       destroy_work_on_stack(&unbind_work);
        return 0;
 }