]> git.proxmox.com Git - mirror_spl.git/blobdiff - module/spl/spl-taskq.c
Fix taskq dynamic spawning
[mirror_spl.git] / module / spl / spl-taskq.c
index 2202aced78b8c30a8ee79b53ef6c8f7879948101..2c2e3ad465abc2fad764816edb53723892595db6 100644 (file)
@@ -53,7 +53,6 @@ EXPORT_SYMBOL(system_taskq);
 /* Private dedicated taskq for creating new taskq threads on demand. */
 static taskq_t *dynamic_taskq;
 static taskq_thread_t *taskq_thread_create(taskq_t *);
-static int taskq_thread_spawn(taskq_t *tq, int seq_tasks);
 
 static int
 task_km_flags(uint_t flags)
@@ -449,8 +448,8 @@ taskq_wait(taskq_t *tq)
 }
 EXPORT_SYMBOL(taskq_wait);
 
-int
-taskq_member(taskq_t *tq, void *t)
+static int
+taskq_member_impl(taskq_t *tq, void *t)
 {
        struct list_head *l;
        taskq_thread_t *tqt;
@@ -458,8 +457,8 @@ taskq_member(taskq_t *tq, void *t)
 
        ASSERT(tq);
        ASSERT(t);
+       ASSERT(spin_is_locked(&tq->tq_lock));
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
        list_for_each(l, &tq->tq_thread_list) {
                tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
                if (tqt->tqt_thread == (struct task_struct *)t) {
@@ -467,6 +466,16 @@ taskq_member(taskq_t *tq, void *t)
                        break;
                }
        }
+       return (found);
+}
+
+int
+taskq_member(taskq_t *tq, void *t)
+{
+       int found;
+
+       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       found = taskq_member_impl(tq, t);
        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
 
        return (found);
@@ -529,12 +538,13 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
 }
 EXPORT_SYMBOL(taskq_cancel_id);
 
+static int taskq_thread_spawn(taskq_t *tq);
+
 taskqid_t
 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 {
        taskq_ent_t *t;
        taskqid_t rc = 0;
-       boolean_t threadlimit = B_FALSE;
 
        ASSERT(tq);
        ASSERT(func);
@@ -576,13 +586,11 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 
        wake_up(&tq->tq_work_waitq);
 out:
-       threadlimit = (tq->tq_nactive == tq->tq_nthreads);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
-
        /* Spawn additional taskq threads if required. */
-       if (threadlimit && taskq_member(tq, current))
-               (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+       if (tq->tq_nactive == tq->tq_nthreads)
+               (void) taskq_thread_spawn(tq);
 
+       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
        return (rc);
 }
 EXPORT_SYMBOL(taskq_dispatch);
@@ -593,7 +601,6 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
 {
        taskqid_t rc = 0;
        taskq_ent_t *t;
-       boolean_t threadlimit = B_FALSE;
 
        ASSERT(tq);
        ASSERT(func);
@@ -626,13 +633,10 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
 
        spin_unlock(&t->tqent_lock);
 out:
-       threadlimit = (tq->tq_nactive == tq->tq_nthreads);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
-
        /* Spawn additional taskq threads if required. */
-       if (threadlimit && taskq_member(tq, current))
-               (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
-
+       if (tq->tq_nactive == tq->tq_nthreads)
+               (void) taskq_thread_spawn(tq);
+       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
        return (rc);
 }
 EXPORT_SYMBOL(taskq_dispatch_delay);
@@ -641,8 +645,6 @@ void
 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
    taskq_ent_t *t)
 {
-       boolean_t threadlimit = B_FALSE;
-
        ASSERT(tq);
        ASSERT(func);
 
@@ -678,12 +680,10 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
 
        wake_up(&tq->tq_work_waitq);
 out:
-       threadlimit = (tq->tq_nactive == tq->tq_nthreads);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
-
        /* Spawn additional taskq threads if required. */
-       if (threadlimit && taskq_member(tq, current))
-               (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+       if (tq->tq_nactive == tq->tq_nthreads)
+               (void) taskq_thread_spawn(tq);
+       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
 }
 EXPORT_SYMBOL(taskq_dispatch_ent);
 
@@ -753,15 +753,14 @@ taskq_thread_spawn_task(void *arg)
  * which is also a dynamic taskq cannot be safely used for this.
  */
 static int
-taskq_thread_spawn(taskq_t *tq, int seq_tasks)
+taskq_thread_spawn(taskq_t *tq)
 {
        int spawning = 0;
 
        if (!(tq->tq_flags & TASKQ_DYNAMIC))
                return (0);
 
-       if ((seq_tasks > spl_taskq_thread_sequential) &&
-           (tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
+       if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
            (tq->tq_flags & TASKQ_ACTIVE)) {
                spawning = (++tq->tq_nspawn);
                taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
@@ -814,6 +813,10 @@ taskq_thread(void *args)
        tq = tqt->tqt_tq;
        current->flags |= PF_NOFREEZE;
 
+       #if defined(PF_MEMALLOC_NOIO)
+       (void) memalloc_noio_save();
+       #endif
+
        sigfillset(&blocked);
        sigprocmask(SIG_BLOCK, &blocked, NULL);
        flush_signals(current);
@@ -891,7 +894,8 @@ taskq_thread(void *args)
                        }
 
                        /* Spawn additional taskq threads if required. */
-                       if (taskq_thread_spawn(tq, ++seq_tasks))
+                       if ((++seq_tasks) > spl_taskq_thread_sequential &&
+                           taskq_thread_spawn(tq))
                                seq_tasks = 0;
 
                        tqt->tqt_id = 0;