]> git.proxmox.com Git - mirror_spl-debian.git/commitdiff
Fix taskq_wait() not waiting bug
authorBrian Behlendorf <behlendorf1@llnl.gov>
Sun, 15 Mar 2009 22:13:49 +0000 (15:13 -0700)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Sun, 15 Mar 2009 22:13:49 +0000 (15:13 -0700)
I'm very surprised this has not surfaced until now.  But the taskq_wait()
implementation work only wait successfully the first time it was called.
Subsequent usage of taskq_wait() on the taskq would not wait.

The issue was caused by tq->tq_lowest_id being set to MAX_INT after the
first wait completed.  This caused subsequent waits which check that the
waiting id is less than the lowest taskq id to always succeed.  The fix
is to ensure that tq->tq_lowest_id is never set larger than tq->tq_next.id.

Additional fixes which were added to this patch include:
1) Fix a race by placing the taskq_wait_check() in the tq->tq_lock spinlock.
2) taskq_wait() should wait for the largest outstanding id.
3) Multiple spelling corrections.
4) Added taskq wait regression test to validate correct behavior.

include/sys/taskq.h
module/spl/spl-taskq.c
module/splat/splat-taskq.c

index 0e78ea1233eb869904f52e83519ed6e07ff14c9c..0b86231031f47983268ebbd1590690ca4a772424 100644 (file)
@@ -87,6 +87,7 @@ extern taskq_t *system_taskq;
 extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
 extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t);
 extern void __taskq_destroy(taskq_t *);
+extern void __taskq_wait_id(taskq_t *, taskqid_t);
 extern void __taskq_wait(taskq_t *);
 extern int __taskq_member(taskq_t *, void *);
 
index e77ef669fc3125eee274b701c7319975cdaeee2b..5960761f4a41406fba99e2b7635c2fee1ee61ad0 100644 (file)
@@ -60,14 +60,14 @@ task_alloc(taskq_t *tq, uint_t flags)
         ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
         ASSERT(spin_is_locked(&tq->tq_lock));
 retry:
-        /* Aquire spl_task_t's from free list if available */
+        /* Acquire spl_task_t's from free list if available */
         if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
                 t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
                 list_del_init(&t->t_list);
                 RETURN(t);
         }
 
-        /* Free list is empty and memory allocs are prohibited */
+        /* Free list is empty and memory allocations are prohibited */
         if (flags & TQ_NOALLOC)
                 RETURN(NULL);
 
@@ -89,7 +89,7 @@ retry:
                         RETURN(NULL);
                 }
 
-                /* Unreachable, TQ_SLEEP xor TQ_NOSLEEP */
+                /* Unreachable, TQ_SLEEP or TQ_NOSLEEP */
                 SBUG();
         }
 
@@ -109,7 +109,7 @@ retry:
         RETURN(t);
 }
 
-/* NOTE: Must be called with tq->tq_lock held, expectes the spl_task_t
+/* NOTE: Must be called with tq->tq_lock held, expects the spl_task_t
  * to already be removed from the free, work, or pending taskq lists.
  */
 static void
@@ -128,7 +128,7 @@ task_free(taskq_t *tq, spl_task_t *t)
        EXIT;
 }
 
-/* NOTE: Must be called with tq->tq_lock held, either destroyes the
+/* NOTE: Must be called with tq->tq_lock held, either destroys the
  * spl_task_t if too many exist or moves it to the free list for later use.
  */
 static void
@@ -154,7 +154,7 @@ task_done(taskq_t *tq, spl_task_t *t)
 }
 
 /* Taskqid's are handed out in a monotonically increasing fashion per
- * taskq_t.  We don't handle taskqid wrapping yet, but fortuntely it isi
+ * taskq_t.  We don't handle taskqid wrapping yet, but fortunately it is
  * a 64-bit value so this is probably never going to happen.  The lowest
  * pending taskqid is stored in the taskq_t to make it easy for any
  * taskq_wait()'ers to know if the tasks they're waiting for have
@@ -164,12 +164,18 @@ task_done(taskq_t *tq, spl_task_t *t)
 static int
 taskq_wait_check(taskq_t *tq, taskqid_t id)
 {
-       RETURN(tq->tq_lowest_id >= id);
+       int rc;
+
+       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+       rc = (id < tq->tq_lowest_id);
+       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+
+       RETURN(rc);
 }
 
 /* Expected to wait for all previously scheduled tasks to complete.  We do
  * not need to wait for tasked scheduled after this call to complete.  In
- * otherwords we do not need to drain the entire taskq. */
+ * other words we do not need to drain the entire taskq. */
 void
 __taskq_wait_id(taskq_t *tq, taskqid_t id)
 {
@@ -189,8 +195,9 @@ __taskq_wait(taskq_t *tq)
        ENTRY;
        ASSERT(tq);
 
+       /* Wait for the largest outstanding taskqid */
        spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
-       id = tq->tq_next_id;
+       id = tq->tq_next_id - 1;
        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
 
        __taskq_wait_id(tq, id);
@@ -265,7 +272,7 @@ EXPORT_SYMBOL(__taskq_dispatch);
 static taskqid_t
 taskq_lowest_id(taskq_t *tq)
 {
-       taskqid_t lowest_id = ~0;
+       taskqid_t lowest_id = tq->tq_next_id;
         spl_task_t *t;
        ENTRY;
 
@@ -318,7 +325,7 @@ taskq_thread(void *args)
 
                remove_wait_queue(&tq->tq_work_waitq, &wait);
                 if (!list_empty(&tq->tq_pend_list)) {
-                        t = list_entry(tq->tq_pend_list.next, spl_task_t, t_list);
+                        t = list_entry(tq->tq_pend_list.next,spl_task_t,t_list);
                         list_del_init(&t->t_list);
                        list_add_tail(&t->t_list, &tq->tq_work_list);
                         tq->tq_nactive++;
@@ -332,7 +339,8 @@ taskq_thread(void *args)
                        id = t->t_id;
                         task_done(tq, t);
 
-                       /* Update the lowest remaining taskqid yet to run */
+                       /* When the current lowest outstanding taskqid is
+                        * done calculate the new lowest outstanding id */
                        if (tq->tq_lowest_id == id) {
                                tq->tq_lowest_id = taskq_lowest_id(tq);
                                ASSERT(tq->tq_lowest_id > id);
index 3cc09bcb97d35bb01bc9cbc60a8c1e14f8fee721..a9398f5a57e6582f2d3c521d905fb9a3411cacaf 100644 (file)
 #define SPLAT_TASKQ_TEST3_NAME         "system"
 #define SPLAT_TASKQ_TEST3_DESC         "System task queue, multiple tasks"
 
+#define SPLAT_TASKQ_TEST4_ID            0x0204
+#define SPLAT_TASKQ_TEST4_NAME         "wait"
+#define SPLAT_TASKQ_TEST4_DESC         "Multiple task waiting"
+
 typedef struct splat_taskq_arg {
        int flag;
        int id;
+       atomic_t count;
        struct file *file;
        const char *name;
 } splat_taskq_arg_t;
@@ -266,6 +271,73 @@ splat_taskq_test3(struct file *file, void *arg)
        return (tq_arg.flag) ? 0 : -EINVAL;
 }
 
+static void
+splat_taskq_test4_func(void *arg)
+{
+       splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
+       ASSERT(tq_arg);
+
+       atomic_inc(&tq_arg->count);
+}
+
+static int
+splat_taskq_test4(struct file *file, void *arg)
+{
+       taskq_t *tq;
+       splat_taskq_arg_t tq_arg;
+       int i, j, rc = 0;
+
+       splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' creating\n",
+                    SPLAT_TASKQ_TEST4_NAME);
+       if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
+                              50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
+               splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
+                            "Taskq '%s' create failed\n",
+                            SPLAT_TASKQ_TEST4_NAME);
+               return -EINVAL;
+       }
+
+       tq_arg.file = file;
+       tq_arg.name = SPLAT_TASKQ_TEST4_NAME;
+
+       for (i = 1; i <= 1024; i *= 2) {
+               atomic_set(&tq_arg.count, 0);
+               splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
+                            "Taskq '%s' function '%s' dispatched %d times\n",
+                            tq_arg.name, sym2str(splat_taskq_test4_func), i);
+
+               for (j = 0; j < i; j++) {
+                       if ((taskq_dispatch(tq, splat_taskq_test4_func,
+                                           &tq_arg, TQ_SLEEP)) == 0) {
+                               splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
+                                       "Taskq '%s' function '%s' dispatch "
+                                       "%d failed\n", tq_arg.name,
+                                       sym2str(splat_taskq_test13_func), j);
+                                       rc = -EINVAL;
+                                       goto out;
+                       }
+               }
+
+               splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
+                            "waiting for %d dispatches\n", tq_arg.name, i);
+               taskq_wait(tq);
+               splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
+                            "%d/%d dispatches finished\n", tq_arg.name,
+                            atomic_read(&tq_arg.count), i);
+               if (atomic_read(&tq_arg.count) != i) {
+                       rc = -ERANGE;
+                       goto out;
+
+               }
+       }
+out:
+       splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
+                  tq_arg.name);
+       taskq_destroy(tq);
+
+       return rc;
+}
+
 splat_subsystem_t *
 splat_taskq_init(void)
 {
@@ -289,6 +361,8 @@ splat_taskq_init(void)
                      SPLAT_TASKQ_TEST2_ID, splat_taskq_test2);
        SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST3_NAME, SPLAT_TASKQ_TEST3_DESC,
                      SPLAT_TASKQ_TEST3_ID, splat_taskq_test3);
+       SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST4_NAME, SPLAT_TASKQ_TEST4_DESC,
+                     SPLAT_TASKQ_TEST4_ID, splat_taskq_test4);
 
         return sub;
 }
@@ -297,6 +371,7 @@ void
 splat_taskq_fini(splat_subsystem_t *sub)
 {
         ASSERT(sub);
+       SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST4_ID);
        SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST3_ID);
        SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST2_ID);
        SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST1_ID);