]> git.proxmox.com Git - mirror_spl.git/blobdiff - module/spl/spl-taskq.c
Don't use tq->tq_lock_flags
[mirror_spl.git] / module / spl / spl-taskq.c
index 44799de1d19360191236c5845a738e001548b7d0..ded6d3b80c70114b18ee2521cfe4761aae8805ca 100644 (file)
@@ -36,6 +36,11 @@ int spl_taskq_thread_dynamic = 1;
 module_param(spl_taskq_thread_dynamic, int, 0644);
 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
 
+int spl_taskq_thread_priority = 1;
+module_param(spl_taskq_thread_priority, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_priority,
+    "Allow non-default priority for taskq threads");
+
 int spl_taskq_thread_sequential = 4;
 module_param(spl_taskq_thread_sequential, int, 0644);
 MODULE_PARM_DESC(spl_taskq_thread_sequential,
@@ -66,7 +71,7 @@ task_km_flags(uint_t flags)
  * is not attached to the free, work, or pending taskq lists.
  */
 static taskq_ent_t *
-task_alloc(taskq_t *tq, uint_t flags)
+task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
 {
        taskq_ent_t *t;
        int count = 0;
@@ -106,18 +111,19 @@ retry:
                 * end up delaying the task allocation by one second, thereby
                 * throttling the task dispatch rate.
                 */
-               spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+               spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
                schedule_timeout(HZ / 100);
-               spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+               spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
+                   tq->tq_lock_class);
                if (count < 100) {
                        count++;
                        goto retry;
                }
        }
 
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
        t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
 
        if (t) {
                taskq_init_ent(t);
@@ -182,12 +188,13 @@ task_expire(unsigned long data)
        taskq_ent_t *w, *t = (taskq_ent_t *)data;
        taskq_t *tq = t->tqent_taskq;
        struct list_head *l;
+       unsigned long flags;
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
 
        if (t->tqent_flags & TQENT_FLAG_CANCEL) {
                ASSERT(list_empty(&t->tqent_list));
-               spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+               spin_unlock_irqrestore(&tq->tq_lock, flags);
                return;
        }
 
@@ -206,7 +213,7 @@ task_expire(unsigned long data)
        if (l == &tq->tq_prio_list)
                list_add(&t->tqent_list, &tq->tq_prio_list);
 
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        wake_up(&tq->tq_work_waitq);
 }
@@ -373,10 +380,11 @@ taskq_wait_id_check(taskq_t *tq, taskqid_t id)
 {
        int active = 0;
        int rc;
+       unsigned long flags;
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
        rc = (taskq_find(tq, id, &active) == NULL);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        return (rc);
 }
@@ -396,10 +404,11 @@ static int
 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
 {
        int rc;
+       unsigned long flags;
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
        rc = (id < tq->tq_lowest_id);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        return (rc);
 }
@@ -423,10 +432,11 @@ static int
 taskq_wait_check(taskq_t *tq)
 {
        int rc;
+       unsigned long flags;
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
        rc = (tq->tq_lowest_id == tq->tq_next_id);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        return (rc);
 }
@@ -443,8 +453,8 @@ taskq_wait(taskq_t *tq)
 }
 EXPORT_SYMBOL(taskq_wait);
 
-int
-taskq_member(taskq_t *tq, void *t)
+static int
+taskq_member_impl(taskq_t *tq, void *t)
 {
        struct list_head *l;
        taskq_thread_t *tqt;
@@ -452,8 +462,8 @@ taskq_member(taskq_t *tq, void *t)
 
        ASSERT(tq);
        ASSERT(t);
+       ASSERT(spin_is_locked(&tq->tq_lock));
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
        list_for_each(l, &tq->tq_thread_list) {
                tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
                if (tqt->tqt_thread == (struct task_struct *)t) {
@@ -461,7 +471,18 @@ taskq_member(taskq_t *tq, void *t)
                        break;
                }
        }
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       return (found);
+}
+
+int
+taskq_member(taskq_t *tq, void *t)
+{
+       int found;
+       unsigned long flags;
+
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
+       found = taskq_member_impl(tq, t);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        return (found);
 }
@@ -479,10 +500,11 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
        taskq_ent_t *t;
        int active = 0;
        int rc = ENOENT;
+       unsigned long flags;
 
        ASSERT(tq);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
        t = taskq_find(tq, id, &active);
        if (t && !active) {
                list_del_init(&t->tqent_list);
@@ -502,9 +524,10 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
                 * drop the lock before synchronously cancelling the timer.
                 */
                if (timer_pending(&t->tqent_timer)) {
-                       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+                       spin_unlock_irqrestore(&tq->tq_lock, flags);
                        del_timer_sync(&t->tqent_timer);
-                       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+                       spin_lock_irqsave_nested(&tq->tq_lock, flags,
+                           tq->tq_lock_class);
                }
 
                if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
@@ -512,7 +535,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
 
                rc = 0;
        }
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        if (active) {
                taskq_wait_id(tq, id);
@@ -523,16 +546,19 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
 }
 EXPORT_SYMBOL(taskq_cancel_id);
 
+static int taskq_thread_spawn(taskq_t *tq);
+
 taskqid_t
 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 {
        taskq_ent_t *t;
        taskqid_t rc = 0;
+       unsigned long irqflags;
 
        ASSERT(tq);
        ASSERT(func);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
 
        /* Taskq being destroyed and all tasks drained */
        if (!(tq->tq_flags & TASKQ_ACTIVE))
@@ -543,7 +569,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
        if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
                goto out;
 
-       if ((t = task_alloc(tq, flags)) == NULL)
+       if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
                goto out;
 
        spin_lock(&t->tqent_lock);
@@ -569,7 +595,11 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 
        wake_up(&tq->tq_work_waitq);
 out:
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       /* Spawn additional taskq threads if required. */
+       if (tq->tq_nactive == tq->tq_nthreads)
+               (void) taskq_thread_spawn(tq);
+
+       spin_unlock_irqrestore(&tq->tq_lock, irqflags);
        return (rc);
 }
 EXPORT_SYMBOL(taskq_dispatch);
@@ -580,17 +610,18 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
 {
        taskqid_t rc = 0;
        taskq_ent_t *t;
+       unsigned long irqflags;
 
        ASSERT(tq);
        ASSERT(func);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
 
        /* Taskq being destroyed and all tasks drained */
        if (!(tq->tq_flags & TASKQ_ACTIVE))
                goto out;
 
-       if ((t = task_alloc(tq, flags)) == NULL)
+       if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
                goto out;
 
        spin_lock(&t->tqent_lock);
@@ -612,7 +643,10 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
 
        spin_unlock(&t->tqent_lock);
 out:
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       /* Spawn additional taskq threads if required. */
+       if (tq->tq_nactive == tq->tq_nthreads)
+               (void) taskq_thread_spawn(tq);
+       spin_unlock_irqrestore(&tq->tq_lock, irqflags);
        return (rc);
 }
 EXPORT_SYMBOL(taskq_dispatch_delay);
@@ -621,10 +655,12 @@ void
 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
    taskq_ent_t *t)
 {
+       unsigned long irqflags;
        ASSERT(tq);
        ASSERT(func);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
+           tq->tq_lock_class);
 
        /* Taskq being destroyed and all tasks drained */
        if (!(tq->tq_flags & TASKQ_ACTIVE)) {
@@ -656,7 +692,10 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
 
        wake_up(&tq->tq_work_waitq);
 out:
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       /* Spawn additional taskq threads if required. */
+       if (tq->tq_nactive == tq->tq_nthreads)
+               (void) taskq_thread_spawn(tq);
+       spin_unlock_irqrestore(&tq->tq_lock, irqflags);
 }
 EXPORT_SYMBOL(taskq_dispatch_ent);
 
@@ -710,31 +749,31 @@ static void
 taskq_thread_spawn_task(void *arg)
 {
        taskq_t *tq = (taskq_t *)arg;
+       unsigned long flags;
 
        (void) taskq_thread_create(tq);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
        tq->tq_nspawn--;
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 }
 
 /*
- * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
+ * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
  * number of threads is insufficient to handle the pending tasks.  These
  * new threads must be created by the dedicated dynamic_taskq to avoid
  * deadlocks between thread creation and memory reclaim.  The system_taskq
  * which is also a dynamic taskq cannot be safely used for this.
  */
 static int
-taskq_thread_spawn(taskq_t *tq, int seq_tasks)
+taskq_thread_spawn(taskq_t *tq)
 {
        int spawning = 0;
 
        if (!(tq->tq_flags & TASKQ_DYNAMIC))
                return (0);
 
-       if ((seq_tasks > spl_taskq_thread_sequential) &&
-           (tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
+       if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
            (tq->tq_flags & TASKQ_ACTIVE)) {
                spawning = (++tq->tq_nspawn);
                taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
@@ -782,16 +821,22 @@ taskq_thread(void *args)
        taskq_t *tq;
        taskq_ent_t *t;
        int seq_tasks = 0;
+       unsigned long flags;
 
        ASSERT(tqt);
+       ASSERT(tqt->tqt_tq);
        tq = tqt->tqt_tq;
        current->flags |= PF_NOFREEZE;
 
+       #if defined(PF_MEMALLOC_NOIO)
+       (void) memalloc_noio_save();
+       #endif
+
        sigfillset(&blocked);
        sigprocmask(SIG_BLOCK, &blocked, NULL);
        flush_signals(current);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
 
        /* Immediately exit if more threads than allowed were created. */
        if (tq->tq_nthreads >= tq->tq_maxthreads)
@@ -813,12 +858,13 @@ taskq_thread(void *args)
                        }
 
                        add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
-                       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+                       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
                        schedule();
                        seq_tasks = 0;
 
-                       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+                       spin_lock_irqsave_nested(&tq->tq_lock, flags,
+                           tq->tq_lock_class);
                        remove_wait_queue(&tq->tq_work_waitq, &wait);
                } else {
                        __set_current_state(TASK_RUNNING);
@@ -842,12 +888,13 @@ taskq_thread(void *args)
 
                        taskq_insert_in_order(tq, tqt);
                        tq->tq_nactive++;
-                       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+                       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
                        /* Perform the requested task */
                        t->tqent_func(t->tqent_arg);
 
-                       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+                       spin_lock_irqsave_nested(&tq->tq_lock, flags,
+                           tq->tq_lock_class);
                        tq->tq_nactive--;
                        list_del_init(&tqt->tqt_active_list);
                        tqt->tqt_task = NULL;
@@ -864,7 +911,8 @@ taskq_thread(void *args)
                        }
 
                        /* Spawn additional taskq threads if required. */
-                       if (taskq_thread_spawn(tq, ++seq_tasks))
+                       if ((++seq_tasks) > spl_taskq_thread_sequential &&
+                           taskq_thread_spawn(tq))
                                seq_tasks = 0;
 
                        tqt->tqt_id = 0;
@@ -884,7 +932,7 @@ taskq_thread(void *args)
        list_del_init(&tqt->tqt_thread_list);
 error:
        kmem_free(tqt, sizeof (taskq_thread_t));
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        return (0);
 }
@@ -913,7 +961,9 @@ taskq_thread_create(taskq_t *tq)
                kthread_bind(tqt->tqt_thread, last_used_cpu);
        }
 
-       set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
+       if (spl_taskq_thread_priority)
+               set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
+
        wake_up_process(tqt->tqt_thread);
 
        return (tqt);
@@ -926,9 +976,9 @@ taskq_create(const char *name, int nthreads, pri_t pri,
        taskq_t *tq;
        taskq_thread_t *tqt;
        int count = 0, rc = 0, i;
+       unsigned long irqflags;
 
        ASSERT(name != NULL);
-       ASSERT(pri <= maxclsyspri);
        ASSERT(minalloc >= 0);
        ASSERT(maxalloc <= INT_MAX);
        ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
@@ -967,14 +1017,17 @@ taskq_create(const char *name, int nthreads, pri_t pri,
        INIT_LIST_HEAD(&tq->tq_delay_list);
        init_waitqueue_head(&tq->tq_work_waitq);
        init_waitqueue_head(&tq->tq_wait_waitq);
+       tq->tq_lock_class = TQ_LOCK_GENERAL;
 
        if (flags & TASKQ_PREPOPULATE) {
-               spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+               spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
+                   tq->tq_lock_class);
 
                for (i = 0; i < minalloc; i++)
-                       task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
+                       task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
+                           &irqflags));
 
-               spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+               spin_unlock_irqrestore(&tq->tq_lock, irqflags);
        }
 
        if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
@@ -1006,11 +1059,12 @@ taskq_destroy(taskq_t *tq)
        struct task_struct *thread;
        taskq_thread_t *tqt;
        taskq_ent_t *t;
+       unsigned long flags;
 
        ASSERT(tq);
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
        tq->tq_flags &= ~TASKQ_ACTIVE;
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        /*
         * When TASKQ_ACTIVE is clear new tasks may not be added nor may
@@ -1021,7 +1075,7 @@ taskq_destroy(taskq_t *tq)
 
        taskq_wait(tq);
 
-       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
 
        /*
         * Signal each thread to exit and block until it does.  Each thread
@@ -1033,11 +1087,12 @@ taskq_destroy(taskq_t *tq)
                tqt = list_entry(tq->tq_thread_list.next,
                    taskq_thread_t, tqt_thread_list);
                thread = tqt->tqt_thread;
-               spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+               spin_unlock_irqrestore(&tq->tq_lock, flags);
 
                kthread_stop(thread);
 
-               spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+               spin_lock_irqsave_nested(&tq->tq_lock, flags,
+                   tq->tq_lock_class);
        }
 
        while (!list_empty(&tq->tq_free_list)) {
@@ -1059,7 +1114,7 @@ taskq_destroy(taskq_t *tq)
        ASSERT(list_empty(&tq->tq_prio_list));
        ASSERT(list_empty(&tq->tq_delay_list));
 
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       spin_unlock_irqrestore(&tq->tq_lock, flags);
 
        strfree(tq->tq_name);
        kmem_free(tq, sizeof (taskq_t));
@@ -1070,17 +1125,23 @@ int
 spl_taskq_init(void)
 {
        system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
-           minclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
+           maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
        if (system_taskq == NULL)
                return (1);
 
        dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
-           minclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
+           maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
        if (dynamic_taskq == NULL) {
                taskq_destroy(system_taskq);
                return (1);
        }
 
+       /* This is used to annotate tq_lock, so
+        *      taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
+        * does not trigger a lockdep warning re: possible recursive locking
+        */
+       dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
+
        return (0);
 }