2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
27 #include <sys/taskq.h>
30 int spl_taskq_thread_bind
= 0;
31 module_param(spl_taskq_thread_bind
, int, 0644);
32 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
35 int spl_taskq_thread_dynamic
= 1;
36 module_param(spl_taskq_thread_dynamic
, int, 0644);
37 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
39 int spl_taskq_thread_priority
= 1;
40 module_param(spl_taskq_thread_priority
, int, 0644);
41 MODULE_PARM_DESC(spl_taskq_thread_priority
,
42 "Allow non-default priority for taskq threads");
44 int spl_taskq_thread_sequential
= 4;
45 module_param(spl_taskq_thread_sequential
, int, 0644);
46 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
47 "Create new taskq threads after N sequential tasks");
49 /* Global system-wide dynamic task queue available for all consumers */
50 taskq_t
*system_taskq
;
51 EXPORT_SYMBOL(system_taskq
);
53 /* Private dedicated taskq for creating new taskq threads on demand. */
54 static taskq_t
*dynamic_taskq
;
55 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
57 /* List of all taskqs */
59 DECLARE_RWSEM(tq_list_sem
);
62 task_km_flags(uint_t flags
)
64 if (flags
& TQ_NOSLEEP
)
67 if (flags
& TQ_PUSHPAGE
)
74 * taskq_find_by_name - Find the largest instance number of a named taskq.
77 taskq_find_by_name(const char *name
)
79 struct list_head
*tql
;
82 list_for_each_prev(tql
, &tq_list
) {
83 tq
= list_entry(tql
, taskq_t
, tq_taskqs
);
84 if (strcmp(name
, tq
->tq_name
) == 0)
85 return tq
->tq_instance
;
91 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
92 * is not attached to the free, work, or pending taskq lists.
95 task_alloc(taskq_t
*tq
, uint_t flags
, unsigned long *irqflags
)
101 ASSERT(spin_is_locked(&tq
->tq_lock
));
103 /* Acquire taskq_ent_t's from free list if available */
104 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
105 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
107 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
108 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
109 ASSERT(!timer_pending(&t
->tqent_timer
));
111 list_del_init(&t
->tqent_list
);
115 /* Free list is empty and memory allocations are prohibited */
116 if (flags
& TQ_NOALLOC
)
119 /* Hit maximum taskq_ent_t pool size */
120 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
121 if (flags
& TQ_NOSLEEP
)
125 * Sleep periodically polling the free list for an available
126 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
127 * but we cannot block forever waiting for an taskq_ent_t to
128 * show up in the free list, otherwise a deadlock can happen.
130 * Therefore, we need to allocate a new task even if the number
131 * of allocated tasks is above tq->tq_maxalloc, but we still
132 * end up delaying the task allocation by one second, thereby
133 * throttling the task dispatch rate.
135 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
136 schedule_timeout(HZ
/ 100);
137 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
,
145 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
146 t
= kmem_alloc(sizeof (taskq_ent_t
), task_km_flags(flags
));
147 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
, tq
->tq_lock_class
);
158 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
159 * to already be removed from the free, work, or pending taskq lists.
162 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
166 ASSERT(spin_is_locked(&tq
->tq_lock
));
167 ASSERT(list_empty(&t
->tqent_list
));
168 ASSERT(!timer_pending(&t
->tqent_timer
));
170 kmem_free(t
, sizeof (taskq_ent_t
));
175 * NOTE: Must be called with tq->tq_lock held, either destroys the
176 * taskq_ent_t if too many exist or moves it to the free list for later use.
179 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
183 ASSERT(spin_is_locked(&tq
->tq_lock
));
185 /* Wake tasks blocked in taskq_wait_id() */
186 wake_up_all(&t
->tqent_waitq
);
188 list_del_init(&t
->tqent_list
);
190 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
192 t
->tqent_func
= NULL
;
196 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
203 * When a delayed task timer expires remove it from the delay list and
204 * add it to the priority list in order for immediate processing.
207 task_expire(unsigned long data
)
209 taskq_ent_t
*w
, *t
= (taskq_ent_t
*)data
;
210 taskq_t
*tq
= t
->tqent_taskq
;
214 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
216 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
217 ASSERT(list_empty(&t
->tqent_list
));
218 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
223 * The priority list must be maintained in strict task id order
224 * from lowest to highest for lowest_id to be easily calculable.
226 list_del(&t
->tqent_list
);
227 list_for_each_prev(l
, &tq
->tq_prio_list
) {
228 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
229 if (w
->tqent_id
< t
->tqent_id
) {
230 list_add(&t
->tqent_list
, l
);
234 if (l
== &tq
->tq_prio_list
)
235 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
237 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
239 wake_up(&tq
->tq_work_waitq
);
243 * Returns the lowest incomplete taskqid_t. The taskqid_t may
244 * be queued on the pending list, on the priority list, on the
245 * delay list, or on the work list currently being handled, but
246 * it is not 100% complete yet.
249 taskq_lowest_id(taskq_t
*tq
)
251 taskqid_t lowest_id
= tq
->tq_next_id
;
256 ASSERT(spin_is_locked(&tq
->tq_lock
));
258 if (!list_empty(&tq
->tq_pend_list
)) {
259 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
260 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
263 if (!list_empty(&tq
->tq_prio_list
)) {
264 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
265 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
268 if (!list_empty(&tq
->tq_delay_list
)) {
269 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
270 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
273 if (!list_empty(&tq
->tq_active_list
)) {
274 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
276 ASSERT(tqt
->tqt_id
!= 0);
277 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
284 * Insert a task into a list keeping the list sorted by increasing taskqid.
287 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
294 ASSERT(spin_is_locked(&tq
->tq_lock
));
296 list_for_each_prev(l
, &tq
->tq_active_list
) {
297 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
298 if (w
->tqt_id
< tqt
->tqt_id
) {
299 list_add(&tqt
->tqt_active_list
, l
);
303 if (l
== &tq
->tq_active_list
)
304 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
308 * Find and return a task from the given list if it exists. The list
309 * must be in lowest to highest task id order.
312 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
317 ASSERT(spin_is_locked(&tq
->tq_lock
));
319 list_for_each(l
, lh
) {
320 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
322 if (t
->tqent_id
== id
)
325 if (t
->tqent_id
> id
)
333 * Find an already dispatched task given the task id regardless of what
334 * state it is in. If a task is still pending or executing it will be
335 * returned and 'active' set appropriately. If the task has already
336 * been run then NULL is returned.
339 taskq_find(taskq_t
*tq
, taskqid_t id
, int *active
)
345 ASSERT(spin_is_locked(&tq
->tq_lock
));
348 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
352 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
356 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
360 list_for_each(l
, &tq
->tq_active_list
) {
361 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
362 if (tqt
->tqt_id
== id
) {
373 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
374 * taskq_wait() functions below.
376 * Taskq waiting is accomplished by tracking the lowest outstanding task
377 * id and the next available task id. As tasks are dispatched they are
378 * added to the tail of the pending, priority, or delay lists. As worker
379 * threads become available the tasks are removed from the heads of these
380 * lists and linked to the worker threads. This ensures the lists are
381 * kept sorted by lowest to highest task id.
383 * Therefore the lowest outstanding task id can be quickly determined by
384 * checking the head item from all of these lists. This value is stored
385 * with the taskq as the lowest id. It only needs to be recalculated when
386 * either the task with the current lowest id completes or is canceled.
388 * By blocking until the lowest task id exceeds the passed task id the
389 * taskq_wait_outstanding() function can be easily implemented. Similarly,
390 * by blocking until the lowest task id matches the next task id taskq_wait()
391 * can be implemented.
393 * Callers should be aware that when there are multiple worked threads it
394 * is possible for larger task ids to complete before smaller ones. Also
395 * when the taskq contains delay tasks with small task ids callers may
396 * block for a considerable length of time waiting for them to expire and
400 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
406 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
407 rc
= (taskq_find(tq
, id
, &active
) == NULL
);
408 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
414 * The taskq_wait_id() function blocks until the passed task id completes.
415 * This does not guarantee that all lower task ids have completed.
418 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
420 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
422 EXPORT_SYMBOL(taskq_wait_id
);
425 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
430 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
431 rc
= (id
< tq
->tq_lowest_id
);
432 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
438 * The taskq_wait_outstanding() function will block until all tasks with a
439 * lower taskqid than the passed 'id' have been completed. Note that all
440 * task id's are assigned monotonically at dispatch time. Zero may be
441 * passed for the id to indicate all tasks dispatch up to this point,
442 * but not after, should be waited for.
445 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
447 wait_event(tq
->tq_wait_waitq
,
448 taskq_wait_outstanding_check(tq
, id
? id
: tq
->tq_next_id
- 1));
450 EXPORT_SYMBOL(taskq_wait_outstanding
);
453 taskq_wait_check(taskq_t
*tq
)
458 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
459 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
460 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
466 * The taskq_wait() function will block until the taskq is empty.
467 * This means that if a taskq re-dispatches work to itself taskq_wait()
468 * callers will block indefinitely.
471 taskq_wait(taskq_t
*tq
)
473 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
475 EXPORT_SYMBOL(taskq_wait
);
478 taskq_member_impl(taskq_t
*tq
, void *t
)
486 ASSERT(spin_is_locked(&tq
->tq_lock
));
488 list_for_each(l
, &tq
->tq_thread_list
) {
489 tqt
= list_entry(l
, taskq_thread_t
, tqt_thread_list
);
490 if (tqt
->tqt_thread
== (struct task_struct
*)t
) {
499 taskq_member(taskq_t
*tq
, void *t
)
504 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
505 found
= taskq_member_impl(tq
, t
);
506 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
510 EXPORT_SYMBOL(taskq_member
);
513 * Cancel an already dispatched task given the task id. Still pending tasks
514 * will be immediately canceled, and if the task is active the function will
515 * block until it completes. Preallocated tasks which are canceled must be
516 * freed by the caller.
519 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
528 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
529 t
= taskq_find(tq
, id
, &active
);
531 list_del_init(&t
->tqent_list
);
532 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
535 * When canceling the lowest outstanding task id we
536 * must recalculate the new lowest outstanding id.
538 if (tq
->tq_lowest_id
== t
->tqent_id
) {
539 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
540 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
544 * The task_expire() function takes the tq->tq_lock so drop
545 * drop the lock before synchronously cancelling the timer.
547 if (timer_pending(&t
->tqent_timer
)) {
548 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
549 del_timer_sync(&t
->tqent_timer
);
550 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
554 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
559 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
562 taskq_wait_id(tq
, id
);
568 EXPORT_SYMBOL(taskq_cancel_id
);
570 static int taskq_thread_spawn(taskq_t
*tq
);
573 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
577 unsigned long irqflags
;
582 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
584 /* Taskq being destroyed and all tasks drained */
585 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
588 /* Do not queue the task unless there is idle thread for it */
589 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
590 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
593 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
596 spin_lock(&t
->tqent_lock
);
598 /* Queue to the priority list instead of the pending list */
599 if (flags
& TQ_FRONT
)
600 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
602 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
604 t
->tqent_id
= rc
= tq
->tq_next_id
;
606 t
->tqent_func
= func
;
609 t
->tqent_timer
.data
= 0;
610 t
->tqent_timer
.function
= NULL
;
611 t
->tqent_timer
.expires
= 0;
613 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
615 spin_unlock(&t
->tqent_lock
);
617 wake_up(&tq
->tq_work_waitq
);
619 /* Spawn additional taskq threads if required. */
620 if (tq
->tq_nactive
== tq
->tq_nthreads
)
621 (void) taskq_thread_spawn(tq
);
623 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
626 EXPORT_SYMBOL(taskq_dispatch
);
629 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
630 uint_t flags
, clock_t expire_time
)
634 unsigned long irqflags
;
639 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
641 /* Taskq being destroyed and all tasks drained */
642 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
645 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
648 spin_lock(&t
->tqent_lock
);
650 /* Queue to the delay list for subsequent execution */
651 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
653 t
->tqent_id
= rc
= tq
->tq_next_id
;
655 t
->tqent_func
= func
;
658 t
->tqent_timer
.data
= (unsigned long)t
;
659 t
->tqent_timer
.function
= task_expire
;
660 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
661 add_timer(&t
->tqent_timer
);
663 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
665 spin_unlock(&t
->tqent_lock
);
667 /* Spawn additional taskq threads if required. */
668 if (tq
->tq_nactive
== tq
->tq_nthreads
)
669 (void) taskq_thread_spawn(tq
);
670 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
673 EXPORT_SYMBOL(taskq_dispatch_delay
);
676 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
679 unsigned long irqflags
;
683 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
686 /* Taskq being destroyed and all tasks drained */
687 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
692 spin_lock(&t
->tqent_lock
);
695 * Mark it as a prealloc'd task. This is important
696 * to ensure that we don't free it later.
698 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
700 /* Queue to the priority list instead of the pending list */
701 if (flags
& TQ_FRONT
)
702 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
704 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
706 t
->tqent_id
= tq
->tq_next_id
;
708 t
->tqent_func
= func
;
712 spin_unlock(&t
->tqent_lock
);
714 wake_up(&tq
->tq_work_waitq
);
716 /* Spawn additional taskq threads if required. */
717 if (tq
->tq_nactive
== tq
->tq_nthreads
)
718 (void) taskq_thread_spawn(tq
);
719 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
721 EXPORT_SYMBOL(taskq_dispatch_ent
);
724 taskq_empty_ent(taskq_ent_t
*t
)
726 return (list_empty(&t
->tqent_list
));
728 EXPORT_SYMBOL(taskq_empty_ent
);
731 taskq_init_ent(taskq_ent_t
*t
)
733 spin_lock_init(&t
->tqent_lock
);
734 init_waitqueue_head(&t
->tqent_waitq
);
735 init_timer(&t
->tqent_timer
);
736 INIT_LIST_HEAD(&t
->tqent_list
);
738 t
->tqent_func
= NULL
;
741 t
->tqent_taskq
= NULL
;
743 EXPORT_SYMBOL(taskq_init_ent
);
746 * Return the next pending task, preference is given to tasks on the
747 * priority list which were dispatched with TQ_FRONT.
750 taskq_next_ent(taskq_t
*tq
)
752 struct list_head
*list
;
754 ASSERT(spin_is_locked(&tq
->tq_lock
));
756 if (!list_empty(&tq
->tq_prio_list
))
757 list
= &tq
->tq_prio_list
;
758 else if (!list_empty(&tq
->tq_pend_list
))
759 list
= &tq
->tq_pend_list
;
763 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
767 * Spawns a new thread for the specified taskq.
770 taskq_thread_spawn_task(void *arg
)
772 taskq_t
*tq
= (taskq_t
*)arg
;
775 (void) taskq_thread_create(tq
);
777 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
779 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
783 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
784 * number of threads is insufficient to handle the pending tasks. These
785 * new threads must be created by the dedicated dynamic_taskq to avoid
786 * deadlocks between thread creation and memory reclaim. The system_taskq
787 * which is also a dynamic taskq cannot be safely used for this.
790 taskq_thread_spawn(taskq_t
*tq
)
794 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
797 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
798 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
799 spawning
= (++tq
->tq_nspawn
);
800 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
808 * Threads in a dynamic taskq should only exit once it has been completely
809 * drained and no other threads are actively servicing tasks. This prevents
810 * threads from being created and destroyed more than is required.
812 * The first thread is the thread list is treated as the primary thread.
813 * There is nothing special about the primary thread but in order to avoid
814 * all the taskq pids from changing we opt to make it long running.
817 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
819 ASSERT(spin_is_locked(&tq
->tq_lock
));
821 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
824 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
825 tqt_thread_list
) == tqt
)
829 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
830 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
831 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
832 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
833 (spl_taskq_thread_dynamic
)); /* Dynamic taskqs are allowed */
837 taskq_thread(void *args
)
839 DECLARE_WAITQUEUE(wait
, current
);
841 taskq_thread_t
*tqt
= args
;
850 current
->flags
|= PF_NOFREEZE
;
852 (void) spl_fstrans_mark();
854 sigfillset(&blocked
);
855 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
856 flush_signals(current
);
858 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
860 /* Immediately exit if more threads than allowed were created. */
861 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
865 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
866 wake_up(&tq
->tq_wait_waitq
);
867 set_current_state(TASK_INTERRUPTIBLE
);
869 while (!kthread_should_stop()) {
871 if (list_empty(&tq
->tq_pend_list
) &&
872 list_empty(&tq
->tq_prio_list
)) {
874 if (taskq_thread_should_stop(tq
, tqt
)) {
875 wake_up_all(&tq
->tq_wait_waitq
);
879 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
880 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
885 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
887 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
889 __set_current_state(TASK_RUNNING
);
892 if ((t
= taskq_next_ent(tq
)) != NULL
) {
893 list_del_init(&t
->tqent_list
);
896 * In order to support recursively dispatching a
897 * preallocated taskq_ent_t, tqent_id must be
898 * stored prior to executing tqent_func.
900 tqt
->tqt_id
= t
->tqent_id
;
904 * We must store a copy of the flags prior to
905 * servicing the task (servicing a prealloc'd task
906 * returns the ownership of the tqent back to
907 * the caller of taskq_dispatch). Thus,
908 * tqent_flags _may_ change within the call.
910 tqt
->tqt_flags
= t
->tqent_flags
;
912 taskq_insert_in_order(tq
, tqt
);
914 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
916 /* Perform the requested task */
917 t
->tqent_func(t
->tqent_arg
);
919 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
922 list_del_init(&tqt
->tqt_active_list
);
923 tqt
->tqt_task
= NULL
;
925 /* For prealloc'd tasks, we don't free anything. */
926 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
930 * When the current lowest outstanding taskqid is
931 * done calculate the new lowest outstanding id
933 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
934 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
935 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
938 /* Spawn additional taskq threads if required. */
939 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
940 taskq_thread_spawn(tq
))
945 wake_up_all(&tq
->tq_wait_waitq
);
947 if (taskq_thread_should_stop(tq
, tqt
))
951 set_current_state(TASK_INTERRUPTIBLE
);
955 __set_current_state(TASK_RUNNING
);
957 list_del_init(&tqt
->tqt_thread_list
);
959 kmem_free(tqt
, sizeof (taskq_thread_t
));
960 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
965 static taskq_thread_t
*
966 taskq_thread_create(taskq_t
*tq
)
968 static int last_used_cpu
= 0;
971 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
972 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
973 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
977 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
979 if (tqt
->tqt_thread
== NULL
) {
980 kmem_free(tqt
, sizeof (taskq_thread_t
));
984 if (spl_taskq_thread_bind
) {
985 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
986 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
989 if (spl_taskq_thread_priority
)
990 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
992 wake_up_process(tqt
->tqt_thread
);
998 taskq_create(const char *name
, int nthreads
, pri_t pri
,
999 int minalloc
, int maxalloc
, uint_t flags
)
1002 taskq_thread_t
*tqt
;
1003 int count
= 0, rc
= 0, i
;
1004 unsigned long irqflags
;
1006 ASSERT(name
!= NULL
);
1007 ASSERT(minalloc
>= 0);
1008 ASSERT(maxalloc
<= INT_MAX
);
1009 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
1011 /* Scale the number of threads using nthreads as a percentage */
1012 if (flags
& TASKQ_THREADS_CPU_PCT
) {
1013 ASSERT(nthreads
<= 100);
1014 ASSERT(nthreads
>= 0);
1015 nthreads
= MIN(nthreads
, 100);
1016 nthreads
= MAX(nthreads
, 0);
1017 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
1020 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
1024 spin_lock_init(&tq
->tq_lock
);
1025 INIT_LIST_HEAD(&tq
->tq_thread_list
);
1026 INIT_LIST_HEAD(&tq
->tq_active_list
);
1027 tq
->tq_name
= strdup(name
);
1029 tq
->tq_nthreads
= 0;
1031 tq
->tq_maxthreads
= nthreads
;
1033 tq
->tq_minalloc
= minalloc
;
1034 tq
->tq_maxalloc
= maxalloc
;
1036 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1038 tq
->tq_lowest_id
= 1;
1039 INIT_LIST_HEAD(&tq
->tq_free_list
);
1040 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1041 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1042 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1043 init_waitqueue_head(&tq
->tq_work_waitq
);
1044 init_waitqueue_head(&tq
->tq_wait_waitq
);
1045 tq
->tq_lock_class
= TQ_LOCK_GENERAL
;
1046 INIT_LIST_HEAD(&tq
->tq_taskqs
);
1048 if (flags
& TASKQ_PREPOPULATE
) {
1049 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
1052 for (i
= 0; i
< minalloc
; i
++)
1053 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
,
1056 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
1059 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1062 for (i
= 0; i
< nthreads
; i
++) {
1063 tqt
= taskq_thread_create(tq
);
1070 /* Wait for all threads to be started before potential destroy */
1071 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1077 down_write(&tq_list_sem
);
1078 tq
->tq_instance
= taskq_find_by_name(name
) + 1;
1079 list_add_tail(&tq
->tq_taskqs
, &tq_list
);
1080 up_write(&tq_list_sem
);
1085 EXPORT_SYMBOL(taskq_create
);
1088 taskq_destroy(taskq_t
*tq
)
1090 struct task_struct
*thread
;
1091 taskq_thread_t
*tqt
;
1093 unsigned long flags
;
1096 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1097 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1098 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1101 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1102 * new worker threads be spawned for dynamic taskq.
1104 if (dynamic_taskq
!= NULL
)
1105 taskq_wait_outstanding(dynamic_taskq
, 0);
1109 /* remove taskq from global list used by the kstats */
1110 down_write(&tq_list_sem
);
1111 list_del(&tq
->tq_taskqs
);
1112 up_write(&tq_list_sem
);
1114 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1117 * Signal each thread to exit and block until it does. Each thread
1118 * is responsible for removing itself from the list and freeing its
1119 * taskq_thread_t. This allows for idle threads to opt to remove
1120 * themselves from the taskq. They can be recreated as needed.
1122 while (!list_empty(&tq
->tq_thread_list
)) {
1123 tqt
= list_entry(tq
->tq_thread_list
.next
,
1124 taskq_thread_t
, tqt_thread_list
);
1125 thread
= tqt
->tqt_thread
;
1126 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1128 kthread_stop(thread
);
1130 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1134 while (!list_empty(&tq
->tq_free_list
)) {
1135 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1137 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1139 list_del_init(&t
->tqent_list
);
1143 ASSERT0(tq
->tq_nthreads
);
1144 ASSERT0(tq
->tq_nalloc
);
1145 ASSERT0(tq
->tq_nspawn
);
1146 ASSERT(list_empty(&tq
->tq_thread_list
));
1147 ASSERT(list_empty(&tq
->tq_active_list
));
1148 ASSERT(list_empty(&tq
->tq_free_list
));
1149 ASSERT(list_empty(&tq
->tq_pend_list
));
1150 ASSERT(list_empty(&tq
->tq_prio_list
));
1151 ASSERT(list_empty(&tq
->tq_delay_list
));
1153 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1155 strfree(tq
->tq_name
);
1156 kmem_free(tq
, sizeof (taskq_t
));
1158 EXPORT_SYMBOL(taskq_destroy
);
1161 spl_taskq_init(void)
1163 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1164 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1165 if (system_taskq
== NULL
)
1168 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1169 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1170 if (dynamic_taskq
== NULL
) {
1171 taskq_destroy(system_taskq
);
1176 * This is used to annotate tq_lock, so
1177 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1178 * does not trigger a lockdep warning re: possible recursive locking
1180 dynamic_taskq
->tq_lock_class
= TQ_LOCK_DYNAMIC
;
1186 spl_taskq_fini(void)
1188 taskq_destroy(dynamic_taskq
);
1189 dynamic_taskq
= NULL
;
1191 taskq_destroy(system_taskq
);
1192 system_taskq
= NULL
;