2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
27 #include <sys/taskq.h>
31 int spl_taskq_thread_bind
= 0;
32 module_param(spl_taskq_thread_bind
, int, 0644);
33 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
36 int spl_taskq_thread_dynamic
= 1;
37 module_param(spl_taskq_thread_dynamic
, int, 0644);
38 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
40 int spl_taskq_thread_priority
= 1;
41 module_param(spl_taskq_thread_priority
, int, 0644);
42 MODULE_PARM_DESC(spl_taskq_thread_priority
,
43 "Allow non-default priority for taskq threads");
45 int spl_taskq_thread_sequential
= 4;
46 module_param(spl_taskq_thread_sequential
, int, 0644);
47 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
48 "Create new taskq threads after N sequential tasks");
50 /* Global system-wide dynamic task queue available for all consumers */
51 taskq_t
*system_taskq
;
52 EXPORT_SYMBOL(system_taskq
);
53 /* Global dynamic task queue for long delay */
54 taskq_t
*system_delay_taskq
;
55 EXPORT_SYMBOL(system_delay_taskq
);
57 /* Private dedicated taskq for creating new taskq threads on demand. */
58 static taskq_t
*dynamic_taskq
;
59 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
61 /* List of all taskqs */
63 DECLARE_RWSEM(tq_list_sem
);
64 static uint_t taskq_tsd
;
67 task_km_flags(uint_t flags
)
69 if (flags
& TQ_NOSLEEP
)
72 if (flags
& TQ_PUSHPAGE
)
79 * taskq_find_by_name - Find the largest instance number of a named taskq.
82 taskq_find_by_name(const char *name
)
84 struct list_head
*tql
;
87 list_for_each_prev(tql
, &tq_list
) {
88 tq
= list_entry(tql
, taskq_t
, tq_taskqs
);
89 if (strcmp(name
, tq
->tq_name
) == 0)
90 return tq
->tq_instance
;
96 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
97 * is not attached to the free, work, or pending taskq lists.
100 task_alloc(taskq_t
*tq
, uint_t flags
, unsigned long *irqflags
)
106 ASSERT(spin_is_locked(&tq
->tq_lock
));
108 /* Acquire taskq_ent_t's from free list if available */
109 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
110 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
112 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
113 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
114 ASSERT(!timer_pending(&t
->tqent_timer
));
116 list_del_init(&t
->tqent_list
);
120 /* Free list is empty and memory allocations are prohibited */
121 if (flags
& TQ_NOALLOC
)
124 /* Hit maximum taskq_ent_t pool size */
125 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
126 if (flags
& TQ_NOSLEEP
)
130 * Sleep periodically polling the free list for an available
131 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
132 * but we cannot block forever waiting for an taskq_ent_t to
133 * show up in the free list, otherwise a deadlock can happen.
135 * Therefore, we need to allocate a new task even if the number
136 * of allocated tasks is above tq->tq_maxalloc, but we still
137 * end up delaying the task allocation by one second, thereby
138 * throttling the task dispatch rate.
140 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
141 schedule_timeout(HZ
/ 100);
142 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
,
150 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
151 t
= kmem_alloc(sizeof (taskq_ent_t
), task_km_flags(flags
));
152 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
, tq
->tq_lock_class
);
163 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
164 * to already be removed from the free, work, or pending taskq lists.
167 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
171 ASSERT(spin_is_locked(&tq
->tq_lock
));
172 ASSERT(list_empty(&t
->tqent_list
));
173 ASSERT(!timer_pending(&t
->tqent_timer
));
175 kmem_free(t
, sizeof (taskq_ent_t
));
180 * NOTE: Must be called with tq->tq_lock held, either destroys the
181 * taskq_ent_t if too many exist or moves it to the free list for later use.
184 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
188 ASSERT(spin_is_locked(&tq
->tq_lock
));
190 /* Wake tasks blocked in taskq_wait_id() */
191 wake_up_all(&t
->tqent_waitq
);
193 list_del_init(&t
->tqent_list
);
195 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
196 t
->tqent_id
= TASKQID_INVALID
;
197 t
->tqent_func
= NULL
;
201 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
208 * When a delayed task timer expires remove it from the delay list and
209 * add it to the priority list in order for immediate processing.
212 task_expire(unsigned long data
)
214 taskq_ent_t
*w
, *t
= (taskq_ent_t
*)data
;
215 taskq_t
*tq
= t
->tqent_taskq
;
219 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
221 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
222 ASSERT(list_empty(&t
->tqent_list
));
223 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
227 t
->tqent_birth
= jiffies
;
229 * The priority list must be maintained in strict task id order
230 * from lowest to highest for lowest_id to be easily calculable.
232 list_del(&t
->tqent_list
);
233 list_for_each_prev(l
, &tq
->tq_prio_list
) {
234 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
235 if (w
->tqent_id
< t
->tqent_id
) {
236 list_add(&t
->tqent_list
, l
);
240 if (l
== &tq
->tq_prio_list
)
241 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
243 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
245 wake_up(&tq
->tq_work_waitq
);
249 * Returns the lowest incomplete taskqid_t. The taskqid_t may
250 * be queued on the pending list, on the priority list, on the
251 * delay list, or on the work list currently being handled, but
252 * it is not 100% complete yet.
255 taskq_lowest_id(taskq_t
*tq
)
257 taskqid_t lowest_id
= tq
->tq_next_id
;
262 ASSERT(spin_is_locked(&tq
->tq_lock
));
264 if (!list_empty(&tq
->tq_pend_list
)) {
265 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
266 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
269 if (!list_empty(&tq
->tq_prio_list
)) {
270 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
271 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
274 if (!list_empty(&tq
->tq_delay_list
)) {
275 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
276 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
279 if (!list_empty(&tq
->tq_active_list
)) {
280 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
282 ASSERT(tqt
->tqt_id
!= TASKQID_INVALID
);
283 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
290 * Insert a task into a list keeping the list sorted by increasing taskqid.
293 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
300 ASSERT(spin_is_locked(&tq
->tq_lock
));
302 list_for_each_prev(l
, &tq
->tq_active_list
) {
303 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
304 if (w
->tqt_id
< tqt
->tqt_id
) {
305 list_add(&tqt
->tqt_active_list
, l
);
309 if (l
== &tq
->tq_active_list
)
310 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
314 * Find and return a task from the given list if it exists. The list
315 * must be in lowest to highest task id order.
318 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
323 ASSERT(spin_is_locked(&tq
->tq_lock
));
325 list_for_each(l
, lh
) {
326 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
328 if (t
->tqent_id
== id
)
331 if (t
->tqent_id
> id
)
339 * Find an already dispatched task given the task id regardless of what
340 * state it is in. If a task is still pending it will be returned.
341 * If a task is executing, then -EBUSY will be returned instead.
342 * If the task has already been run then NULL is returned.
345 taskq_find(taskq_t
*tq
, taskqid_t id
)
351 ASSERT(spin_is_locked(&tq
->tq_lock
));
353 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
357 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
361 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
365 list_for_each(l
, &tq
->tq_active_list
) {
366 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
367 if (tqt
->tqt_id
== id
) {
369 * Instead of returning tqt_task, we just return a non
370 * NULL value to prevent misuse, since tqt_task only
371 * has two valid fields.
373 return (ERR_PTR(-EBUSY
));
381 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
382 * taskq_wait() functions below.
384 * Taskq waiting is accomplished by tracking the lowest outstanding task
385 * id and the next available task id. As tasks are dispatched they are
386 * added to the tail of the pending, priority, or delay lists. As worker
387 * threads become available the tasks are removed from the heads of these
388 * lists and linked to the worker threads. This ensures the lists are
389 * kept sorted by lowest to highest task id.
391 * Therefore the lowest outstanding task id can be quickly determined by
392 * checking the head item from all of these lists. This value is stored
393 * with the taskq as the lowest id. It only needs to be recalculated when
394 * either the task with the current lowest id completes or is canceled.
396 * By blocking until the lowest task id exceeds the passed task id the
397 * taskq_wait_outstanding() function can be easily implemented. Similarly,
398 * by blocking until the lowest task id matches the next task id taskq_wait()
399 * can be implemented.
401 * Callers should be aware that when there are multiple worked threads it
402 * is possible for larger task ids to complete before smaller ones. Also
403 * when the taskq contains delay tasks with small task ids callers may
404 * block for a considerable length of time waiting for them to expire and
408 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
413 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
414 rc
= (taskq_find(tq
, id
) == NULL
);
415 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
421 * The taskq_wait_id() function blocks until the passed task id completes.
422 * This does not guarantee that all lower task ids have completed.
425 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
427 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
429 EXPORT_SYMBOL(taskq_wait_id
);
432 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
437 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
438 rc
= (id
< tq
->tq_lowest_id
);
439 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
445 * The taskq_wait_outstanding() function will block until all tasks with a
446 * lower taskqid than the passed 'id' have been completed. Note that all
447 * task id's are assigned monotonically at dispatch time. Zero may be
448 * passed for the id to indicate all tasks dispatch up to this point,
449 * but not after, should be waited for.
452 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
454 id
= id
? id
: tq
->tq_next_id
- 1;
455 wait_event(tq
->tq_wait_waitq
, taskq_wait_outstanding_check(tq
, id
));
457 EXPORT_SYMBOL(taskq_wait_outstanding
);
460 taskq_wait_check(taskq_t
*tq
)
465 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
466 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
467 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
473 * The taskq_wait() function will block until the taskq is empty.
474 * This means that if a taskq re-dispatches work to itself taskq_wait()
475 * callers will block indefinitely.
478 taskq_wait(taskq_t
*tq
)
480 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
482 EXPORT_SYMBOL(taskq_wait
);
485 taskq_member(taskq_t
*tq
, kthread_t
*t
)
487 return (tq
== (taskq_t
*)tsd_get_by_thread(taskq_tsd
, t
));
489 EXPORT_SYMBOL(taskq_member
);
492 * Cancel an already dispatched task given the task id. Still pending tasks
493 * will be immediately canceled, and if the task is active the function will
494 * block until it completes. Preallocated tasks which are canceled must be
495 * freed by the caller.
498 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
506 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
507 t
= taskq_find(tq
, id
);
508 if (t
&& t
!= ERR_PTR(-EBUSY
)) {
509 list_del_init(&t
->tqent_list
);
510 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
513 * When canceling the lowest outstanding task id we
514 * must recalculate the new lowest outstanding id.
516 if (tq
->tq_lowest_id
== t
->tqent_id
) {
517 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
518 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
522 * The task_expire() function takes the tq->tq_lock so drop
523 * drop the lock before synchronously cancelling the timer.
525 if (timer_pending(&t
->tqent_timer
)) {
526 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
527 del_timer_sync(&t
->tqent_timer
);
528 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
532 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
537 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
539 if (t
== ERR_PTR(-EBUSY
)) {
540 taskq_wait_id(tq
, id
);
546 EXPORT_SYMBOL(taskq_cancel_id
);
548 static int taskq_thread_spawn(taskq_t
*tq
);
551 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
554 taskqid_t rc
= TASKQID_INVALID
;
555 unsigned long irqflags
;
560 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
562 /* Taskq being destroyed and all tasks drained */
563 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
566 /* Do not queue the task unless there is idle thread for it */
567 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
568 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
)) {
569 /* Dynamic taskq may be able to spawn another thread */
570 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
) || taskq_thread_spawn(tq
) == 0)
574 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
577 spin_lock(&t
->tqent_lock
);
579 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
580 if (flags
& TQ_NOQUEUE
)
581 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
582 /* Queue to the priority list instead of the pending list */
583 else if (flags
& TQ_FRONT
)
584 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
586 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
588 t
->tqent_id
= rc
= tq
->tq_next_id
;
590 t
->tqent_func
= func
;
593 t
->tqent_timer
.data
= 0;
594 t
->tqent_timer
.function
= NULL
;
595 t
->tqent_timer
.expires
= 0;
596 t
->tqent_birth
= jiffies
;
598 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
600 spin_unlock(&t
->tqent_lock
);
602 wake_up(&tq
->tq_work_waitq
);
604 /* Spawn additional taskq threads if required. */
605 if (!(flags
& TQ_NOQUEUE
) && tq
->tq_nactive
== tq
->tq_nthreads
)
606 (void) taskq_thread_spawn(tq
);
608 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
611 EXPORT_SYMBOL(taskq_dispatch
);
614 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
615 uint_t flags
, clock_t expire_time
)
617 taskqid_t rc
= TASKQID_INVALID
;
619 unsigned long irqflags
;
624 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
626 /* Taskq being destroyed and all tasks drained */
627 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
630 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
633 spin_lock(&t
->tqent_lock
);
635 /* Queue to the delay list for subsequent execution */
636 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
638 t
->tqent_id
= rc
= tq
->tq_next_id
;
640 t
->tqent_func
= func
;
643 t
->tqent_timer
.data
= (unsigned long)t
;
644 t
->tqent_timer
.function
= task_expire
;
645 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
646 add_timer(&t
->tqent_timer
);
648 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
650 spin_unlock(&t
->tqent_lock
);
652 /* Spawn additional taskq threads if required. */
653 if (tq
->tq_nactive
== tq
->tq_nthreads
)
654 (void) taskq_thread_spawn(tq
);
655 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
658 EXPORT_SYMBOL(taskq_dispatch_delay
);
661 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
664 unsigned long irqflags
;
668 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
671 /* Taskq being destroyed and all tasks drained */
672 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
673 t
->tqent_id
= TASKQID_INVALID
;
677 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
)) {
678 /* Dynamic taskq may be able to spawn another thread */
679 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
) || taskq_thread_spawn(tq
) == 0)
684 spin_lock(&t
->tqent_lock
);
687 * Make sure the entry is not on some other taskq; it is important to
688 * ASSERT() under lock
690 ASSERT(taskq_empty_ent(t
));
693 * Mark it as a prealloc'd task. This is important
694 * to ensure that we don't free it later.
696 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
698 /* Queue to the priority list instead of the pending list */
699 if (flags
& TQ_FRONT
)
700 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
702 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
704 t
->tqent_id
= tq
->tq_next_id
;
706 t
->tqent_func
= func
;
709 t
->tqent_birth
= jiffies
;
711 spin_unlock(&t
->tqent_lock
);
713 wake_up(&tq
->tq_work_waitq
);
715 /* Spawn additional taskq threads if required. */
716 if (tq
->tq_nactive
== tq
->tq_nthreads
)
717 (void) taskq_thread_spawn(tq
);
719 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
721 EXPORT_SYMBOL(taskq_dispatch_ent
);
724 taskq_empty_ent(taskq_ent_t
*t
)
726 return (list_empty(&t
->tqent_list
));
728 EXPORT_SYMBOL(taskq_empty_ent
);
731 taskq_init_ent(taskq_ent_t
*t
)
733 spin_lock_init(&t
->tqent_lock
);
734 init_waitqueue_head(&t
->tqent_waitq
);
735 init_timer(&t
->tqent_timer
);
736 INIT_LIST_HEAD(&t
->tqent_list
);
738 t
->tqent_func
= NULL
;
741 t
->tqent_taskq
= NULL
;
743 EXPORT_SYMBOL(taskq_init_ent
);
746 * Return the next pending task, preference is given to tasks on the
747 * priority list which were dispatched with TQ_FRONT.
750 taskq_next_ent(taskq_t
*tq
)
752 struct list_head
*list
;
754 ASSERT(spin_is_locked(&tq
->tq_lock
));
756 if (!list_empty(&tq
->tq_prio_list
))
757 list
= &tq
->tq_prio_list
;
758 else if (!list_empty(&tq
->tq_pend_list
))
759 list
= &tq
->tq_pend_list
;
763 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
767 * Spawns a new thread for the specified taskq.
770 taskq_thread_spawn_task(void *arg
)
772 taskq_t
*tq
= (taskq_t
*)arg
;
775 if (taskq_thread_create(tq
) == NULL
) {
776 /* restore spawning count if failed */
777 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
779 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
784 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
785 * number of threads is insufficient to handle the pending tasks. These
786 * new threads must be created by the dedicated dynamic_taskq to avoid
787 * deadlocks between thread creation and memory reclaim. The system_taskq
788 * which is also a dynamic taskq cannot be safely used for this.
791 taskq_thread_spawn(taskq_t
*tq
)
795 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
798 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
799 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
800 spawning
= (++tq
->tq_nspawn
);
801 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
809 * Threads in a dynamic taskq should only exit once it has been completely
810 * drained and no other threads are actively servicing tasks. This prevents
811 * threads from being created and destroyed more than is required.
813 * The first thread is the thread list is treated as the primary thread.
814 * There is nothing special about the primary thread but in order to avoid
815 * all the taskq pids from changing we opt to make it long running.
818 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
820 ASSERT(spin_is_locked(&tq
->tq_lock
));
822 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
825 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
826 tqt_thread_list
) == tqt
)
830 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
831 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
832 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
833 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
834 (spl_taskq_thread_dynamic
)); /* Dynamic taskqs are allowed */
838 taskq_thread(void *args
)
840 DECLARE_WAITQUEUE(wait
, current
);
842 taskq_thread_t
*tqt
= args
;
847 taskq_ent_t dup_task
= {};
852 current
->flags
|= PF_NOFREEZE
;
854 (void) spl_fstrans_mark();
856 sigfillset(&blocked
);
857 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
858 flush_signals(current
);
860 tsd_set(taskq_tsd
, tq
);
861 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
863 * If we are dynamically spawned, decrease spawning count. Note that
864 * we could be created during taskq_create, in which case we shouldn't
865 * do the decrement. But it's fine because taskq_create will reset
868 if (tq
->tq_flags
& TASKQ_DYNAMIC
)
871 /* Immediately exit if more threads than allowed were created. */
872 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
876 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
877 wake_up(&tq
->tq_wait_waitq
);
878 set_current_state(TASK_INTERRUPTIBLE
);
880 while (!kthread_should_stop()) {
882 if (list_empty(&tq
->tq_pend_list
) &&
883 list_empty(&tq
->tq_prio_list
)) {
885 if (taskq_thread_should_stop(tq
, tqt
)) {
886 wake_up_all(&tq
->tq_wait_waitq
);
890 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
891 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
896 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
898 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
900 __set_current_state(TASK_RUNNING
);
903 if ((t
= taskq_next_ent(tq
)) != NULL
) {
904 list_del_init(&t
->tqent_list
);
907 * A TQENT_FLAG_PREALLOC task may be reused or freed
908 * during the task function call. Store tqent_id and
911 * Also use an on stack taskq_ent_t for tqt_task
912 * assignment in this case. We only populate the two
913 * fields used by the only user in taskq proc file.
915 tqt
->tqt_id
= t
->tqent_id
;
916 tqt
->tqt_flags
= t
->tqent_flags
;
918 if (t
->tqent_flags
& TQENT_FLAG_PREALLOC
) {
919 dup_task
.tqent_func
= t
->tqent_func
;
920 dup_task
.tqent_arg
= t
->tqent_arg
;
925 taskq_insert_in_order(tq
, tqt
);
927 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
929 /* Perform the requested task */
930 t
->tqent_func(t
->tqent_arg
);
932 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
935 list_del_init(&tqt
->tqt_active_list
);
936 tqt
->tqt_task
= NULL
;
938 /* For prealloc'd tasks, we don't free anything. */
939 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
943 * When the current lowest outstanding taskqid is
944 * done calculate the new lowest outstanding id
946 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
947 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
948 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
951 /* Spawn additional taskq threads if required. */
952 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
953 taskq_thread_spawn(tq
))
956 tqt
->tqt_id
= TASKQID_INVALID
;
958 wake_up_all(&tq
->tq_wait_waitq
);
960 if (taskq_thread_should_stop(tq
, tqt
))
964 set_current_state(TASK_INTERRUPTIBLE
);
968 __set_current_state(TASK_RUNNING
);
970 list_del_init(&tqt
->tqt_thread_list
);
972 kmem_free(tqt
, sizeof (taskq_thread_t
));
973 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
975 tsd_set(taskq_tsd
, NULL
);
980 static taskq_thread_t
*
981 taskq_thread_create(taskq_t
*tq
)
983 static int last_used_cpu
= 0;
986 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
987 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
988 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
990 tqt
->tqt_id
= TASKQID_INVALID
;
992 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
994 if (tqt
->tqt_thread
== NULL
) {
995 kmem_free(tqt
, sizeof (taskq_thread_t
));
999 if (spl_taskq_thread_bind
) {
1000 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
1001 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
1004 if (spl_taskq_thread_priority
)
1005 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
1007 wake_up_process(tqt
->tqt_thread
);
1013 taskq_create(const char *name
, int nthreads
, pri_t pri
,
1014 int minalloc
, int maxalloc
, uint_t flags
)
1017 taskq_thread_t
*tqt
;
1018 int count
= 0, rc
= 0, i
;
1019 unsigned long irqflags
;
1021 ASSERT(name
!= NULL
);
1022 ASSERT(minalloc
>= 0);
1023 ASSERT(maxalloc
<= INT_MAX
);
1024 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
1026 /* Scale the number of threads using nthreads as a percentage */
1027 if (flags
& TASKQ_THREADS_CPU_PCT
) {
1028 ASSERT(nthreads
<= 100);
1029 ASSERT(nthreads
>= 0);
1030 nthreads
= MIN(nthreads
, 100);
1031 nthreads
= MAX(nthreads
, 0);
1032 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
1035 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
1039 spin_lock_init(&tq
->tq_lock
);
1040 INIT_LIST_HEAD(&tq
->tq_thread_list
);
1041 INIT_LIST_HEAD(&tq
->tq_active_list
);
1042 tq
->tq_name
= strdup(name
);
1044 tq
->tq_nthreads
= 0;
1046 tq
->tq_maxthreads
= nthreads
;
1048 tq
->tq_minalloc
= minalloc
;
1049 tq
->tq_maxalloc
= maxalloc
;
1051 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1052 tq
->tq_next_id
= TASKQID_INITIAL
;
1053 tq
->tq_lowest_id
= TASKQID_INITIAL
;
1054 INIT_LIST_HEAD(&tq
->tq_free_list
);
1055 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1056 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1057 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1058 init_waitqueue_head(&tq
->tq_work_waitq
);
1059 init_waitqueue_head(&tq
->tq_wait_waitq
);
1060 tq
->tq_lock_class
= TQ_LOCK_GENERAL
;
1061 INIT_LIST_HEAD(&tq
->tq_taskqs
);
1063 if (flags
& TASKQ_PREPOPULATE
) {
1064 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
1067 for (i
= 0; i
< minalloc
; i
++)
1068 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
,
1071 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
1074 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1077 for (i
= 0; i
< nthreads
; i
++) {
1078 tqt
= taskq_thread_create(tq
);
1085 /* Wait for all threads to be started before potential destroy */
1086 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1088 * taskq_thread might have touched nspawn, but we don't want them to
1089 * because they're not dynamically spawned. So we reset it to 0
1097 down_write(&tq_list_sem
);
1098 tq
->tq_instance
= taskq_find_by_name(name
) + 1;
1099 list_add_tail(&tq
->tq_taskqs
, &tq_list
);
1100 up_write(&tq_list_sem
);
1105 EXPORT_SYMBOL(taskq_create
);
1108 taskq_destroy(taskq_t
*tq
)
1110 struct task_struct
*thread
;
1111 taskq_thread_t
*tqt
;
1113 unsigned long flags
;
1116 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1117 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1118 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1121 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1122 * new worker threads be spawned for dynamic taskq.
1124 if (dynamic_taskq
!= NULL
)
1125 taskq_wait_outstanding(dynamic_taskq
, 0);
1129 /* remove taskq from global list used by the kstats */
1130 down_write(&tq_list_sem
);
1131 list_del(&tq
->tq_taskqs
);
1132 up_write(&tq_list_sem
);
1134 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1135 /* wait for spawning threads to insert themselves to the list */
1136 while (tq
->tq_nspawn
) {
1137 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1138 schedule_timeout_interruptible(1);
1139 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1143 * Signal each thread to exit and block until it does. Each thread
1144 * is responsible for removing itself from the list and freeing its
1145 * taskq_thread_t. This allows for idle threads to opt to remove
1146 * themselves from the taskq. They can be recreated as needed.
1148 while (!list_empty(&tq
->tq_thread_list
)) {
1149 tqt
= list_entry(tq
->tq_thread_list
.next
,
1150 taskq_thread_t
, tqt_thread_list
);
1151 thread
= tqt
->tqt_thread
;
1152 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1154 kthread_stop(thread
);
1156 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1160 while (!list_empty(&tq
->tq_free_list
)) {
1161 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1163 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1165 list_del_init(&t
->tqent_list
);
1169 ASSERT0(tq
->tq_nthreads
);
1170 ASSERT0(tq
->tq_nalloc
);
1171 ASSERT0(tq
->tq_nspawn
);
1172 ASSERT(list_empty(&tq
->tq_thread_list
));
1173 ASSERT(list_empty(&tq
->tq_active_list
));
1174 ASSERT(list_empty(&tq
->tq_free_list
));
1175 ASSERT(list_empty(&tq
->tq_pend_list
));
1176 ASSERT(list_empty(&tq
->tq_prio_list
));
1177 ASSERT(list_empty(&tq
->tq_delay_list
));
1179 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1181 strfree(tq
->tq_name
);
1182 kmem_free(tq
, sizeof (taskq_t
));
1184 EXPORT_SYMBOL(taskq_destroy
);
1187 static unsigned int spl_taskq_kick
= 0;
1191 * module_param_cb is introduced to take kernel_param_ops and
1192 * module_param_call is marked as obsolete. Also set and get operations
1193 * were changed to take a 'const struct kernel_param *'.
1196 #ifdef module_param_cb
1197 param_set_taskq_kick(const char *val
, const struct kernel_param
*kp
)
1199 param_set_taskq_kick(const char *val
, struct kernel_param
*kp
)
1205 unsigned long flags
;
1207 ret
= param_set_uint(val
, kp
);
1208 if (ret
< 0 || !spl_taskq_kick
)
1213 down_read(&tq_list_sem
);
1214 list_for_each_entry(tq
, &tq_list
, tq_taskqs
) {
1215 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1217 /* Check if the first pending is older than 5 seconds */
1218 t
= taskq_next_ent(tq
);
1219 if (t
&& time_after(jiffies
, t
->tqent_birth
+ 5*HZ
)) {
1220 (void) taskq_thread_spawn(tq
);
1221 printk(KERN_INFO
"spl: Kicked taskq %s/%d\n",
1222 tq
->tq_name
, tq
->tq_instance
);
1224 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1226 up_read(&tq_list_sem
);
1230 #ifdef module_param_cb
1231 static const struct kernel_param_ops param_ops_taskq_kick
= {
1232 .set
= param_set_taskq_kick
,
1233 .get
= param_get_uint
,
1235 module_param_cb(spl_taskq_kick
, ¶m_ops_taskq_kick
, &spl_taskq_kick
, 0644);
1237 module_param_call(spl_taskq_kick
, param_set_taskq_kick
, param_get_uint
,
1238 &spl_taskq_kick
, 0644);
1240 MODULE_PARM_DESC(spl_taskq_kick
,
1241 "Write nonzero to kick stuck taskqs to spawn more threads");
1244 spl_taskq_init(void)
1246 tsd_create(&taskq_tsd
, NULL
);
1248 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1249 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1250 if (system_taskq
== NULL
)
1253 system_delay_taskq
= taskq_create("spl_delay_taskq", MAX(boot_ncpus
, 4),
1254 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1255 if (system_delay_taskq
== NULL
) {
1256 taskq_destroy(system_taskq
);
1260 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1261 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1262 if (dynamic_taskq
== NULL
) {
1263 taskq_destroy(system_taskq
);
1264 taskq_destroy(system_delay_taskq
);
1269 * This is used to annotate tq_lock, so
1270 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1271 * does not trigger a lockdep warning re: possible recursive locking
1273 dynamic_taskq
->tq_lock_class
= TQ_LOCK_DYNAMIC
;
1279 spl_taskq_fini(void)
1281 taskq_destroy(dynamic_taskq
);
1282 dynamic_taskq
= NULL
;
1284 taskq_destroy(system_delay_taskq
);
1285 system_delay_taskq
= NULL
;
1287 taskq_destroy(system_taskq
);
1288 system_taskq
= NULL
;
1290 tsd_destroy(&taskq_tsd
);