]> git.proxmox.com Git - mirror_spl-debian.git/commitdiff
Swap taskq_ent_t with taskqid_t in taskq_thread_t
authorPrakash Surya <surya1@llnl.gov>
Fri, 16 Dec 2011 17:44:31 +0000 (09:44 -0800)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 16 Dec 2011 21:26:54 +0000 (13:26 -0800)
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.

Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.

Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).

To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.

Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71

include/sys/taskq.h
module/spl/spl-taskq.c

index 54d869afec852cd3da51ca58db40e74df13e2220..0a71433753b995e17af738b0a790ea2b19cc6a58 100644 (file)
@@ -96,7 +96,7 @@ typedef struct taskq_thread {
        struct list_head       tqt_active_list;
        struct task_struct     *tqt_thread;
        taskq_t                *tqt_tq;
-       taskq_ent_t            *tqt_ent;
+       taskqid_t              tqt_id;
 } taskq_thread_t;
 
 /* Global system-wide dynamic task queue available for all consumers */
index b2b0e6ca86e2d59b44c84850a7b0227833ac4419..ccb713c206de9a05708dafa0c2a2375b86c6ecd9 100644 (file)
@@ -393,8 +393,8 @@ taskq_lowest_id(taskq_t *tq)
        if (!list_empty(&tq->tq_active_list)) {
                tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
                                 tqt_active_list);
-               ASSERT(tqt->tqt_ent != NULL);
-               lowest_id = MIN(lowest_id, tqt->tqt_ent->tqent_id);
+               ASSERT(tqt->tqt_id != 0);
+               lowest_id = MIN(lowest_id, tqt->tqt_id);
        }
 
        SRETURN(lowest_id);
@@ -417,7 +417,7 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
 
        list_for_each_prev(l, &tq->tq_active_list) {
                w = list_entry(l, taskq_thread_t, tqt_active_list);
-               if (w->tqt_ent->tqent_id < tqt->tqt_ent->tqent_id) {
+               if (w->tqt_id < tqt->tqt_id) {
                        list_add(&tqt->tqt_active_list, l);
                        break;
                }
@@ -433,7 +433,6 @@ taskq_thread(void *args)
 {
         DECLARE_WAITQUEUE(wait, current);
         sigset_t blocked;
-       taskqid_t id;
        taskq_thread_t *tqt = args;
         taskq_t *tq;
         taskq_ent_t *t;
@@ -484,8 +483,7 @@ taskq_thread(void *args)
                        /* In order to support recursively dispatching a
                         * preallocated taskq_ent_t, tqent_id must be
                         * stored prior to executing tqent_func. */
-                       id = t->tqent_id;
-                       tqt->tqt_ent = t;
+                       tqt->tqt_id = t->tqent_id;
                        taskq_insert_in_order(tq, tqt);
                         tq->tq_nactive++;
                        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
@@ -496,16 +494,16 @@ taskq_thread(void *args)
                        spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
                         tq->tq_nactive--;
                        list_del_init(&tqt->tqt_active_list);
-                       tqt->tqt_ent = NULL;
                         task_done(tq, t);
 
                        /* When the current lowest outstanding taskqid is
                         * done calculate the new lowest outstanding id */
-                       if (tq->tq_lowest_id == id) {
+                       if (tq->tq_lowest_id == tqt->tqt_id) {
                                tq->tq_lowest_id = taskq_lowest_id(tq);
-                               ASSERT(tq->tq_lowest_id > id);
+                               ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
                        }
 
+                       tqt->tqt_id = 0;
                         wake_up_all(&tq->tq_wait_waitq);
                }
 
@@ -582,7 +580,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
                INIT_LIST_HEAD(&tqt->tqt_thread_list);
                INIT_LIST_HEAD(&tqt->tqt_active_list);
                tqt->tqt_tq = tq;
-               tqt->tqt_ent = NULL;
+               tqt->tqt_id = 0;
 
                tqt->tqt_thread = kthread_create(taskq_thread, tqt,
                                                 "%s/%d", name, i);