]> git.proxmox.com Git - mirror_spl.git/blobdiff - module/spl/spl-taskq.c
Remove TQ_SLEEP -> KM_SLEEP mapping
[mirror_spl.git] / module / spl / spl-taskq.c
index b0677666d4698fcc1595f637179de2077719fb03..7ea20461b1aae21e81bd67cc3905422d8a494aa5 100644 (file)
 taskq_t *system_taskq;
 EXPORT_SYMBOL(system_taskq);
 
+static int
+task_km_flags(uint_t flags)
+{
+       if (flags & TQ_NOSLEEP)
+               return KM_NOSLEEP;
+
+       if (flags & TQ_PUSHPAGE)
+               return KM_PUSHPAGE;
+
+       return KM_SLEEP;
+}
+
 /*
  * NOTE: Must be called with tq->tq_lock held, returns a list_t which
  * is not attached to the free, work, or pending taskq lists.
@@ -50,8 +62,6 @@ task_alloc(taskq_t *tq, uint_t flags)
         SENTRY;
 
         ASSERT(tq);
-        ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP));               /* One set */
-        ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
         ASSERT(spin_is_locked(&tq->tq_lock));
 retry:
         /* Acquire taskq_ent_t's from free list if available */
@@ -92,7 +102,7 @@ retry:
         }
 
         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
-        t = kmem_alloc(sizeof(taskq_ent_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
+        t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
 
         if (t) {
@@ -156,19 +166,22 @@ task_done(taskq_t *tq, taskq_ent_t *t)
  * monotonically increasing taskqid and added to the tail of the pending
  * list.  As worker threads become available the tasks are removed from
  * the head of the pending or priority list, giving preference to the
- * priority list.  The tasks are then added to the work list, preserving
- * the ordering by taskqid.  Finally, as tasks complete they are removed
- * from the work list.  This means that the pending and work lists are
- * always kept sorted by taskqid.  Thus the lowest outstanding
- * incomplete taskqid can be determined simply by checking the min
- * taskqid for each head item on the pending, priority, and work list.
- * This value is stored in tq->tq_lowest_id and only updated to the new
- * lowest id when the previous lowest id completes.  All taskqids lower
- * than tq->tq_lowest_id must have completed.  It is also possible
- * larger taskqid's have completed because they may be processed in
- * parallel by several worker threads.  However, this is not a problem
- * because the behavior of taskq_wait_id() is to block until all
- * previously submitted taskqid's have completed.
+ * priority list.  The tasks are then removed from their respective
+ * list, and the taskq_thread servicing the task is added to the active
+ * list, preserving the order using the serviced task's taskqid.
+ * Finally, as tasks complete the taskq_thread servicing the task is
+ * removed from the active list.  This means that the pending task and
+ * active taskq_thread lists are always kept sorted by taskqid. Thus the
+ * lowest outstanding incomplete taskqid can be determined simply by
+ * checking the min taskqid for each head item on the pending, priority,
+ * and active taskq_thread list. This value is stored in
+ * tq->tq_lowest_id and only updated to the new lowest id when the
+ * previous lowest id completes.  All taskqids lower than
+ * tq->tq_lowest_id must have completed.  It is also possible larger
+ * taskqid's have completed because they may be processed in parallel by
+ * several worker threads.  However, this is not a problem because the
+ * behavior of taskq_wait_id() is to block until all previously
+ * submitted taskqid's have completed.
  *
  * XXX: Taskqid_t wrapping is not handled.  However, taskqid_t's are
  * 64-bit values so even if a taskq is processing 2^24 (16,777,216)
@@ -248,14 +261,6 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
         ASSERT(tq);
         ASSERT(func);
 
-       /* Solaris assumes TQ_SLEEP if not passed explicitly */
-       if (!(flags & (TQ_SLEEP | TQ_NOSLEEP)))
-               flags |= TQ_SLEEP;
-
-       if (unlikely(in_atomic() && (flags & TQ_SLEEP)))
-               PANIC("May schedule while atomic: %s/0x%08x/%d\n",
-                   current->comm, preempt_count(), current->pid);
-
         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
 
        /* Taskq being destroyed and all tasks drained */
@@ -286,11 +291,10 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
        ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
 
        spin_unlock(&t->tqent_lock);
+
+       wake_up(&tq->tq_work_waitq);
 out:
        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
-       if (rc > 0)
-               wake_up(&tq->tq_work_waitq);
-
        SRETURN(rc);
 }
 EXPORT_SYMBOL(__taskq_dispatch);
@@ -310,7 +314,6 @@ __taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
        /* Taskq being destroyed and all tasks drained */
        if (!(tq->tq_flags & TQ_ACTIVE)) {
                t->tqent_id = 0;
-               spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
                goto out;
        }
 
@@ -334,10 +337,10 @@ __taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
        t->tqent_arg = arg;
 
        spin_unlock(&t->tqent_lock);
-       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
 
        wake_up(&tq->tq_work_waitq);
 out:
+       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
        SEXIT;
 }
 EXPORT_SYMBOL(__taskq_dispatch_ent);
@@ -441,10 +444,6 @@ taskq_thread(void *args)
        tq = tqt->tqt_tq;
         current->flags |= PF_NOFREEZE;
 
-       /* Disable the direct memory reclaim path */
-       if (tq->tq_flags & TASKQ_NORECLAIM)
-               current->flags |= PF_MEMALLOC;
-
         sigfillset(&blocked);
         sigprocmask(SIG_BLOCK, &blocked, NULL);
         flush_signals(current);
@@ -458,11 +457,11 @@ taskq_thread(void *args)
 
                if (list_empty(&tq->tq_pend_list) &&
                    list_empty(&tq->tq_prio_list)) {
-                       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
                        add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
+                       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
                        schedule();
-                       remove_wait_queue(&tq->tq_work_waitq, &wait);
                        spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+                       remove_wait_queue(&tq->tq_work_waitq, &wait);
                } else {
                        __set_current_state(TASK_RUNNING);
                }
@@ -557,7 +556,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
                nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
        }
 
-        tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
+        tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
         if (tq == NULL)
                 SRETURN(NULL);
 
@@ -583,12 +582,12 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
 
         if (flags & TASKQ_PREPOPULATE)
                 for (i = 0; i < minalloc; i++)
-                        task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
+                        task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
 
         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
 
        for (i = 0; i < nthreads; i++) {
-               tqt = kmem_alloc(sizeof(*tqt), KM_SLEEP);
+               tqt = kmem_alloc(sizeof(*tqt), KM_PUSHPAGE);
                INIT_LIST_HEAD(&tqt->tqt_thread_list);
                INIT_LIST_HEAD(&tqt->tqt_active_list);
                tqt->tqt_tq = tq;