From 3c6ed5410beb7a4f9e0c042229eb63c4c11a5fc9 Mon Sep 17 00:00:00 2001 From: Ned Bass Date: Thu, 19 Jan 2012 10:33:19 -0800 Subject: [PATCH] Taskq locking optimizations Testing has shown that tq->tq_lock can be highly contended when a large number of small work items are dispatched. The lock hold time is reduced by the following changes: 1) Use exclusive threads in the work_waitq When a single work item is dispatched we only need to wake a single thread to service it. The current implementation uses non-exclusive threads so all threads are woken when the dispatcher calls wake_up(). If a large number of threads are in the queue this overhead can become non-negligible. 2) Conditionally add/remove threads from work waitq Taskq threads need only add themselves to the work wait queue if there are no pending work items. Signed-off-by: Brian Behlendorf Issue #32 --- module/spl/spl-taskq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index ece99aad6..0b3b3a131 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -454,17 +454,17 @@ taskq_thread(void *args) while (!kthread_should_stop()) { - add_wait_queue(&tq->tq_work_waitq, &wait); if (list_empty(&tq->tq_pend_list) && list_empty(&tq->tq_prio_list)) { + add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); schedule(); spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); + remove_wait_queue(&tq->tq_work_waitq, &wait); } else { __set_current_state(TASK_RUNNING); } - remove_wait_queue(&tq->tq_work_waitq, &wait); if (!list_empty(&tq->tq_prio_list)) pend_list = &tq->tq_prio_list; -- 2.39.5