]> git.proxmox.com Git - mirror_spl-debian.git/commitdiff
Fix race between taskq_destroy and dynamic spawning thread
authorChunwei Chen <david.chen@osnexus.com>
Sat, 21 May 2016 01:04:03 +0000 (18:04 -0700)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Tue, 24 May 2016 20:00:17 +0000 (13:00 -0700)
While taskq_destroy would wait for dynamic_taskq to finish its tasks, but it
does not implies the thread being spawned is up and running. This will cause
taskq to be freed before the thread can exit.

We fix this by using tq_nspawn to indicate how many threads are being spawned
before they are inserted to the thread list. And have taskq_destroy to wait
for it to drop to zero.

Signed-off-by: Chunwei Chen <david.chen@osnexus.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Issue #553
Closes #550

module/spl/spl-taskq.c

index bfcf651af693b0b16eb7c15b5443e946f1db4a2e..9784473bde76025410a5680fbe2696676e2e285b 100644 (file)
@@ -763,11 +763,12 @@ taskq_thread_spawn_task(void *arg)
        taskq_t *tq = (taskq_t *)arg;
        unsigned long flags;
 
-       (void) taskq_thread_create(tq);
-
-       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
-       tq->tq_nspawn--;
-       spin_unlock_irqrestore(&tq->tq_lock, flags);
+       if (taskq_thread_create(tq) == NULL) {
+               /* restore spawning count if failed */
+               spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+               tq->tq_nspawn--;
+               spin_unlock_irqrestore(&tq->tq_lock, flags);
+       }
 }
 
 /*
@@ -848,6 +849,14 @@ taskq_thread(void *args)
 
        tsd_set(taskq_tsd, tq);
        spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+       /*
+        * If we are dynamically spawned, decrease spawning count. Note that
+        * we could be created during taskq_create, in which case we shouldn't
+        * do the decrement. But it's fine because taskq_create will reset
+        * tq_nspawn later.
+        */
+       if (tq->tq_flags & TASKQ_DYNAMIC)
+               tq->tq_nspawn--;
 
        /* Immediately exit if more threads than allowed were created. */
        if (tq->tq_nthreads >= tq->tq_maxthreads)
@@ -1063,6 +1072,11 @@ taskq_create(const char *name, int nthreads, pri_t pri,
 
        /* Wait for all threads to be started before potential destroy */
        wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
+       /*
+        * taskq_thread might have touched nspawn, but we don't want them to
+        * because they're not dynamically spawned. So we reset it to 0
+        */
+       tq->tq_nspawn = 0;
 
        if (rc) {
                taskq_destroy(tq);
@@ -1106,6 +1120,12 @@ taskq_destroy(taskq_t *tq)
        up_write(&tq_list_sem);
 
        spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+       /* wait for spawning threads to insert themselves to the list */
+       while (tq->tq_nspawn) {
+               spin_unlock_irqrestore(&tq->tq_lock, flags);
+               schedule_timeout_interruptible(1);
+               spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+       }
 
        /*
         * Signal each thread to exit and block until it does.  Each thread