]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - kernel/workqueue.c
tracing: Have traceon and traceoff trigger honor the instance
[mirror_ubuntu-jammy-kernel.git] / kernel / workqueue.c
index 33a6b4a2443d274de3dfb1f31c11a460da118747..3f4d276685768cc7fc7061f114cffa7dc257745f 100644 (file)
@@ -867,8 +867,17 @@ void wq_worker_running(struct task_struct *task)
 
        if (!worker->sleeping)
                return;
+
+       /*
+        * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
+        * and the nr_running increment below, we may ruin the nr_running reset
+        * and leave with an unexpected pool->nr_running == 1 on the newly unbound
+        * pool. Protect against such race.
+        */
+       preempt_disable();
        if (!(worker->flags & WORKER_NOT_RUNNING))
                atomic_inc(&worker->pool->nr_running);
+       preempt_enable();
        worker->sleeping = 0;
 }
 
@@ -4830,8 +4839,16 @@ void show_workqueue_state(void)
 
                for_each_pwq(pwq, wq) {
                        raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+                               /*
+                                * Defer printing to avoid deadlocks in console
+                                * drivers that queue work while holding locks
+                                * also taken in their write paths.
+                                */
+                               printk_deferred_enter();
                                show_pwq(pwq);
+                               printk_deferred_exit();
+                       }
                        raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                        /*
                         * We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4866,12 @@ void show_workqueue_state(void)
                raw_spin_lock_irqsave(&pool->lock, flags);
                if (pool->nr_workers == pool->nr_idle)
                        goto next_pool;
-
+               /*
+                * Defer printing to avoid deadlocks in console drivers that
+                * queue work while holding locks also taken in their write
+                * paths.
+                */
+               printk_deferred_enter();
                pr_info("pool %d:", pool->id);
                pr_cont_pool_info(pool);
                pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4886,7 @@ void show_workqueue_state(void)
                        first = false;
                }
                pr_cont("\n");
+               printk_deferred_exit();
        next_pool:
                raw_spin_unlock_irqrestore(&pool->lock, flags);
                /*
@@ -5370,9 +5393,6 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
        int ret = -EINVAL;
        cpumask_var_t saved_cpumask;
 
-       if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
-               return -ENOMEM;
-
        /*
         * Not excluding isolated cpus on purpose.
         * If the user wishes to include them, we allow that.
@@ -5380,6 +5400,15 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
        cpumask_and(cpumask, cpumask, cpu_possible_mask);
        if (!cpumask_empty(cpumask)) {
                apply_wqattrs_lock();
+               if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
+                       ret = 0;
+                       goto out_unlock;
+               }
+
+               if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
 
                /* save the old wq_unbound_cpumask. */
                cpumask_copy(saved_cpumask, wq_unbound_cpumask);
@@ -5392,10 +5421,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
                if (ret < 0)
                        cpumask_copy(wq_unbound_cpumask, saved_cpumask);
 
+               free_cpumask_var(saved_cpumask);
+out_unlock:
                apply_wqattrs_unlock();
        }
 
-       free_cpumask_var(saved_cpumask);
        return ret;
 }