]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/workqueue.c
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / kernel / workqueue.c
index 6e5eed58f215db9f29939f930251c54cbeebdd72..7368b57842ea349ca0c1183f6999ec7f16e6c647 100644 (file)
@@ -68,6 +68,7 @@ enum {
         * attach_mutex to avoid changing binding state while
         * worker_attach_to_pool() is in progress.
         */
+       POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
 
        /* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
                                                /* L: hash of busy workers */
 
        /* see manage_workers() for details on the two manager mutexes */
-       struct mutex            manager_arb;    /* manager arbitration */
        struct worker           *manager;       /* L: purely informational */
        struct mutex            attach_mutex;   /* attach/detach exclusion */
        struct list_head        workers;        /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 
 static DEFINE_MUTEX(wq_pool_mutex);    /* protects pools and workqueues list */
 static DEFINE_SPINLOCK(wq_mayday_lock);        /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-       bool managing = mutex_is_locked(&pool->manager_arb);
+       bool managing = pool->flags & POOL_MANAGER_ACTIVE;
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
@@ -1375,7 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
         * queued or lose PENDING.  Grabbing PENDING and queueing should
         * happen with IRQ disabled.
         */
-       WARN_ON_ONCE(!irqs_disabled());
+       lockdep_assert_irqs_disabled();
 
        debug_work_activate(work);
 
@@ -1979,24 +1980,17 @@ static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
 
-       /*
-        * Anyone who successfully grabs manager_arb wins the arbitration
-        * and becomes the manager.  mutex_trylock() on pool->manager_arb
-        * failure while holding pool->lock reliably indicates that someone
-        * else is managing the pool and the worker which failed trylock
-        * can proceed to executing work items.  This means that anyone
-        * grabbing manager_arb is responsible for actually performing
-        * manager duties.  If manager_arb is grabbed and released without
-        * actual management, the pool may stall indefinitely.
-        */
-       if (!mutex_trylock(&pool->manager_arb))
+       if (pool->flags & POOL_MANAGER_ACTIVE)
                return false;
+
+       pool->flags |= POOL_MANAGER_ACTIVE;
        pool->manager = worker;
 
        maybe_create_worker(pool);
 
        pool->manager = NULL;
-       mutex_unlock(&pool->manager_arb);
+       pool->flags &= ~POOL_MANAGER_ACTIVE;
+       wake_up(&wq_manager_wait);
        return true;
 }
 
@@ -2496,15 +2490,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
 
-       /*
-        * Explicitly init the crosslock for wq_barrier::done, make its lock
-        * key a subkey of the corresponding work. As a result we won't
-        * build a dependency between wq_barrier::done and unrelated work.
-        */
-       lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
-                                  "(complete)wq_barr::done",
-                                  target->lockdep_map.key, 1);
-       __init_completion(&barr->done);
+       init_completion_map(&barr->done, &target->lockdep_map);
+
        barr->task = current;
 
        /*
@@ -2610,16 +2597,13 @@ void flush_workqueue(struct workqueue_struct *wq)
        struct wq_flusher this_flusher = {
                .list = LIST_HEAD_INIT(this_flusher.list),
                .flush_color = -1,
-               .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
+               .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
        };
        int next_color;
 
        if (WARN_ON(!wq_online))
                return;
 
-       lock_map_acquire(&wq->lockdep_map);
-       lock_map_release(&wq->lockdep_map);
-
        mutex_lock(&wq->mutex);
 
        /*
@@ -2882,9 +2866,6 @@ bool flush_work(struct work_struct *work)
        if (WARN_ON(!wq_online))
                return false;
 
-       lock_map_acquire(&work->lockdep_map);
-       lock_map_release(&work->lockdep_map);
-
        if (start_flush_work(work, &barr)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);
@@ -3245,7 +3226,6 @@ static int init_worker_pool(struct worker_pool *pool)
 
        timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
 
-       mutex_init(&pool->manager_arb);
        mutex_init(&pool->attach_mutex);
        INIT_LIST_HEAD(&pool->workers);
 
@@ -3315,13 +3295,15 @@ static void put_unbound_pool(struct worker_pool *pool)
        hash_del(&pool->hash_node);
 
        /*
-        * Become the manager and destroy all workers.  Grabbing
-        * manager_arb prevents @pool's workers from blocking on
-        * attach_mutex.
+        * Become the manager and destroy all workers.  This prevents
+        * @pool's workers from blocking on attach_mutex.  We're the last
+        * manager and @pool gets freed with the flag set.
         */
-       mutex_lock(&pool->manager_arb);
-
        spin_lock_irq(&pool->lock);
+       wait_event_lock_irq(wq_manager_wait,
+                           !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+       pool->flags |= POOL_MANAGER_ACTIVE;
+
        while ((worker = first_idle_worker(pool)))
                destroy_worker(worker);
        WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3335,8 +3317,6 @@ static void put_unbound_pool(struct worker_pool *pool)
        if (pool->detach_completion)
                wait_for_completion(pool->detach_completion);
 
-       mutex_unlock(&pool->manager_arb);
-
        /* shut down the timers */
        del_timer_sync(&pool->idle_timer);
        del_timer_sync(&pool->mayday_timer);
@@ -4644,7 +4624,7 @@ static void rebind_workers(struct worker_pool *pool)
                 * concurrency management.  Note that when or whether
                 * @worker clears REBOUND doesn't affect correctness.
                 *
-                * ACCESS_ONCE() is necessary because @worker->flags may be
+                * WRITE_ONCE() is necessary because @worker->flags may be
                 * tested without holding any lock in
                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
                 * fail incorrectly leading to premature concurrency
@@ -4653,7 +4633,7 @@ static void rebind_workers(struct worker_pool *pool)
                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
                worker_flags |= WORKER_REBOUND;
                worker_flags &= ~WORKER_UNBOUND;
-               ACCESS_ONCE(worker->flags) = worker_flags;
+               WRITE_ONCE(worker->flags, worker_flags);
        }
 
        spin_unlock_irq(&pool->lock);