2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
27 #include <sys/timer.h>
28 #include <sys/taskq.h>
33 int spl_taskq_thread_bind
= 0;
34 module_param(spl_taskq_thread_bind
, int, 0644);
35 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
38 int spl_taskq_thread_dynamic
= 1;
39 module_param(spl_taskq_thread_dynamic
, int, 0644);
40 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
42 int spl_taskq_thread_priority
= 1;
43 module_param(spl_taskq_thread_priority
, int, 0644);
44 MODULE_PARM_DESC(spl_taskq_thread_priority
,
45 "Allow non-default priority for taskq threads");
47 int spl_taskq_thread_sequential
= 4;
48 module_param(spl_taskq_thread_sequential
, int, 0644);
49 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
50 "Create new taskq threads after N sequential tasks");
52 /* Global system-wide dynamic task queue available for all consumers */
53 taskq_t
*system_taskq
;
54 EXPORT_SYMBOL(system_taskq
);
55 /* Global dynamic task queue for long delay */
56 taskq_t
*system_delay_taskq
;
57 EXPORT_SYMBOL(system_delay_taskq
);
59 /* Private dedicated taskq for creating new taskq threads on demand. */
60 static taskq_t
*dynamic_taskq
;
61 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
63 /* List of all taskqs */
65 struct rw_semaphore tq_list_sem
;
66 static uint_t taskq_tsd
;
69 task_km_flags(uint_t flags
)
71 if (flags
& TQ_NOSLEEP
)
74 if (flags
& TQ_PUSHPAGE
)
81 * taskq_find_by_name - Find the largest instance number of a named taskq.
84 taskq_find_by_name(const char *name
)
86 struct list_head
*tql
;
89 list_for_each_prev(tql
, &tq_list
) {
90 tq
= list_entry(tql
, taskq_t
, tq_taskqs
);
91 if (strcmp(name
, tq
->tq_name
) == 0)
92 return (tq
->tq_instance
);
98 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
99 * is not attached to the free, work, or pending taskq lists.
102 task_alloc(taskq_t
*tq
, uint_t flags
, unsigned long *irqflags
)
109 /* Acquire taskq_ent_t's from free list if available */
110 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
111 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
113 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
114 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
115 ASSERT(!timer_pending(&t
->tqent_timer
));
117 list_del_init(&t
->tqent_list
);
121 /* Free list is empty and memory allocations are prohibited */
122 if (flags
& TQ_NOALLOC
)
125 /* Hit maximum taskq_ent_t pool size */
126 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
127 if (flags
& TQ_NOSLEEP
)
131 * Sleep periodically polling the free list for an available
132 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
133 * but we cannot block forever waiting for an taskq_ent_t to
134 * show up in the free list, otherwise a deadlock can happen.
136 * Therefore, we need to allocate a new task even if the number
137 * of allocated tasks is above tq->tq_maxalloc, but we still
138 * end up delaying the task allocation by one second, thereby
139 * throttling the task dispatch rate.
141 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
142 schedule_timeout(HZ
/ 100);
143 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
,
151 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
152 t
= kmem_alloc(sizeof (taskq_ent_t
), task_km_flags(flags
));
153 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
, tq
->tq_lock_class
);
164 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
165 * to already be removed from the free, work, or pending taskq lists.
168 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
172 ASSERT(list_empty(&t
->tqent_list
));
173 ASSERT(!timer_pending(&t
->tqent_timer
));
175 kmem_free(t
, sizeof (taskq_ent_t
));
180 * NOTE: Must be called with tq->tq_lock held, either destroys the
181 * taskq_ent_t if too many exist or moves it to the free list for later use.
184 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
189 /* Wake tasks blocked in taskq_wait_id() */
190 wake_up_all(&t
->tqent_waitq
);
192 list_del_init(&t
->tqent_list
);
194 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
195 t
->tqent_id
= TASKQID_INVALID
;
196 t
->tqent_func
= NULL
;
200 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
207 * When a delayed task timer expires remove it from the delay list and
208 * add it to the priority list in order for immediate processing.
211 task_expire_impl(taskq_ent_t
*t
)
214 taskq_t
*tq
= t
->tqent_taskq
;
218 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
220 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
221 ASSERT(list_empty(&t
->tqent_list
));
222 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
226 t
->tqent_birth
= jiffies
;
228 * The priority list must be maintained in strict task id order
229 * from lowest to highest for lowest_id to be easily calculable.
231 list_del(&t
->tqent_list
);
232 list_for_each_prev(l
, &tq
->tq_prio_list
) {
233 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
234 if (w
->tqent_id
< t
->tqent_id
) {
235 list_add(&t
->tqent_list
, l
);
239 if (l
== &tq
->tq_prio_list
)
240 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
242 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
244 wake_up(&tq
->tq_work_waitq
);
248 task_expire(spl_timer_list_t tl
)
250 struct timer_list
*tmr
= (struct timer_list
*)tl
;
251 taskq_ent_t
*t
= from_timer(t
, tmr
, tqent_timer
);
256 * Returns the lowest incomplete taskqid_t. The taskqid_t may
257 * be queued on the pending list, on the priority list, on the
258 * delay list, or on the work list currently being handled, but
259 * it is not 100% complete yet.
262 taskq_lowest_id(taskq_t
*tq
)
264 taskqid_t lowest_id
= tq
->tq_next_id
;
270 if (!list_empty(&tq
->tq_pend_list
)) {
271 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
272 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
275 if (!list_empty(&tq
->tq_prio_list
)) {
276 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
277 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
280 if (!list_empty(&tq
->tq_delay_list
)) {
281 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
282 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
285 if (!list_empty(&tq
->tq_active_list
)) {
286 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
288 ASSERT(tqt
->tqt_id
!= TASKQID_INVALID
);
289 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
296 * Insert a task into a list keeping the list sorted by increasing taskqid.
299 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
307 list_for_each_prev(l
, &tq
->tq_active_list
) {
308 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
309 if (w
->tqt_id
< tqt
->tqt_id
) {
310 list_add(&tqt
->tqt_active_list
, l
);
314 if (l
== &tq
->tq_active_list
)
315 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
319 * Find and return a task from the given list if it exists. The list
320 * must be in lowest to highest task id order.
323 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
328 list_for_each(l
, lh
) {
329 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
331 if (t
->tqent_id
== id
)
334 if (t
->tqent_id
> id
)
342 * Find an already dispatched task given the task id regardless of what
343 * state it is in. If a task is still pending it will be returned.
344 * If a task is executing, then -EBUSY will be returned instead.
345 * If the task has already been run then NULL is returned.
348 taskq_find(taskq_t
*tq
, taskqid_t id
)
354 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
358 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
362 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
366 list_for_each(l
, &tq
->tq_active_list
) {
367 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
368 if (tqt
->tqt_id
== id
) {
370 * Instead of returning tqt_task, we just return a non
371 * NULL value to prevent misuse, since tqt_task only
372 * has two valid fields.
374 return (ERR_PTR(-EBUSY
));
382 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
383 * taskq_wait() functions below.
385 * Taskq waiting is accomplished by tracking the lowest outstanding task
386 * id and the next available task id. As tasks are dispatched they are
387 * added to the tail of the pending, priority, or delay lists. As worker
388 * threads become available the tasks are removed from the heads of these
389 * lists and linked to the worker threads. This ensures the lists are
390 * kept sorted by lowest to highest task id.
392 * Therefore the lowest outstanding task id can be quickly determined by
393 * checking the head item from all of these lists. This value is stored
394 * with the taskq as the lowest id. It only needs to be recalculated when
395 * either the task with the current lowest id completes or is canceled.
397 * By blocking until the lowest task id exceeds the passed task id the
398 * taskq_wait_outstanding() function can be easily implemented. Similarly,
399 * by blocking until the lowest task id matches the next task id taskq_wait()
400 * can be implemented.
402 * Callers should be aware that when there are multiple worked threads it
403 * is possible for larger task ids to complete before smaller ones. Also
404 * when the taskq contains delay tasks with small task ids callers may
405 * block for a considerable length of time waiting for them to expire and
409 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
414 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
415 rc
= (taskq_find(tq
, id
) == NULL
);
416 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
422 * The taskq_wait_id() function blocks until the passed task id completes.
423 * This does not guarantee that all lower task ids have completed.
426 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
428 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
430 EXPORT_SYMBOL(taskq_wait_id
);
433 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
438 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
439 rc
= (id
< tq
->tq_lowest_id
);
440 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
446 * The taskq_wait_outstanding() function will block until all tasks with a
447 * lower taskqid than the passed 'id' have been completed. Note that all
448 * task id's are assigned monotonically at dispatch time. Zero may be
449 * passed for the id to indicate all tasks dispatch up to this point,
450 * but not after, should be waited for.
453 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
455 id
= id
? id
: tq
->tq_next_id
- 1;
456 wait_event(tq
->tq_wait_waitq
, taskq_wait_outstanding_check(tq
, id
));
458 EXPORT_SYMBOL(taskq_wait_outstanding
);
461 taskq_wait_check(taskq_t
*tq
)
466 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
467 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
468 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
474 * The taskq_wait() function will block until the taskq is empty.
475 * This means that if a taskq re-dispatches work to itself taskq_wait()
476 * callers will block indefinitely.
479 taskq_wait(taskq_t
*tq
)
481 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
483 EXPORT_SYMBOL(taskq_wait
);
486 taskq_member(taskq_t
*tq
, kthread_t
*t
)
488 return (tq
== (taskq_t
*)tsd_get_by_thread(taskq_tsd
, t
));
490 EXPORT_SYMBOL(taskq_member
);
493 * Cancel an already dispatched task given the task id. Still pending tasks
494 * will be immediately canceled, and if the task is active the function will
495 * block until it completes. Preallocated tasks which are canceled must be
496 * freed by the caller.
499 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
507 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
508 t
= taskq_find(tq
, id
);
509 if (t
&& t
!= ERR_PTR(-EBUSY
)) {
510 list_del_init(&t
->tqent_list
);
511 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
514 * When canceling the lowest outstanding task id we
515 * must recalculate the new lowest outstanding id.
517 if (tq
->tq_lowest_id
== t
->tqent_id
) {
518 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
519 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
523 * The task_expire() function takes the tq->tq_lock so drop
524 * drop the lock before synchronously cancelling the timer.
526 if (timer_pending(&t
->tqent_timer
)) {
527 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
528 del_timer_sync(&t
->tqent_timer
);
529 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
533 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
538 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
540 if (t
== ERR_PTR(-EBUSY
)) {
541 taskq_wait_id(tq
, id
);
547 EXPORT_SYMBOL(taskq_cancel_id
);
549 static int taskq_thread_spawn(taskq_t
*tq
);
552 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
555 taskqid_t rc
= TASKQID_INVALID
;
556 unsigned long irqflags
;
561 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
563 /* Taskq being destroyed and all tasks drained */
564 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
567 /* Do not queue the task unless there is idle thread for it */
568 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
569 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
)) {
570 /* Dynamic taskq may be able to spawn another thread */
571 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
) ||
572 taskq_thread_spawn(tq
) == 0)
576 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
579 spin_lock(&t
->tqent_lock
);
581 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
582 if (flags
& TQ_NOQUEUE
)
583 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
584 /* Queue to the priority list instead of the pending list */
585 else if (flags
& TQ_FRONT
)
586 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
588 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
590 t
->tqent_id
= rc
= tq
->tq_next_id
;
592 t
->tqent_func
= func
;
595 t
->tqent_timer
.function
= NULL
;
596 t
->tqent_timer
.expires
= 0;
597 t
->tqent_birth
= jiffies
;
599 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
601 spin_unlock(&t
->tqent_lock
);
603 wake_up(&tq
->tq_work_waitq
);
605 /* Spawn additional taskq threads if required. */
606 if (!(flags
& TQ_NOQUEUE
) && tq
->tq_nactive
== tq
->tq_nthreads
)
607 (void) taskq_thread_spawn(tq
);
609 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
612 EXPORT_SYMBOL(taskq_dispatch
);
615 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
616 uint_t flags
, clock_t expire_time
)
618 taskqid_t rc
= TASKQID_INVALID
;
620 unsigned long irqflags
;
625 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
627 /* Taskq being destroyed and all tasks drained */
628 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
631 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
634 spin_lock(&t
->tqent_lock
);
636 /* Queue to the delay list for subsequent execution */
637 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
639 t
->tqent_id
= rc
= tq
->tq_next_id
;
641 t
->tqent_func
= func
;
644 t
->tqent_timer
.function
= task_expire
;
645 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
646 add_timer(&t
->tqent_timer
);
648 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
650 spin_unlock(&t
->tqent_lock
);
652 /* Spawn additional taskq threads if required. */
653 if (tq
->tq_nactive
== tq
->tq_nthreads
)
654 (void) taskq_thread_spawn(tq
);
655 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
658 EXPORT_SYMBOL(taskq_dispatch_delay
);
661 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
664 unsigned long irqflags
;
668 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
671 /* Taskq being destroyed and all tasks drained */
672 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
673 t
->tqent_id
= TASKQID_INVALID
;
677 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
)) {
678 /* Dynamic taskq may be able to spawn another thread */
679 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
) ||
680 taskq_thread_spawn(tq
) == 0)
685 spin_lock(&t
->tqent_lock
);
688 * Make sure the entry is not on some other taskq; it is important to
689 * ASSERT() under lock
691 ASSERT(taskq_empty_ent(t
));
694 * Mark it as a prealloc'd task. This is important
695 * to ensure that we don't free it later.
697 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
699 /* Queue to the priority list instead of the pending list */
700 if (flags
& TQ_FRONT
)
701 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
703 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
705 t
->tqent_id
= tq
->tq_next_id
;
707 t
->tqent_func
= func
;
710 t
->tqent_birth
= jiffies
;
712 spin_unlock(&t
->tqent_lock
);
714 wake_up(&tq
->tq_work_waitq
);
716 /* Spawn additional taskq threads if required. */
717 if (tq
->tq_nactive
== tq
->tq_nthreads
)
718 (void) taskq_thread_spawn(tq
);
720 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
722 EXPORT_SYMBOL(taskq_dispatch_ent
);
725 taskq_empty_ent(taskq_ent_t
*t
)
727 return (list_empty(&t
->tqent_list
));
729 EXPORT_SYMBOL(taskq_empty_ent
);
732 taskq_init_ent(taskq_ent_t
*t
)
734 spin_lock_init(&t
->tqent_lock
);
735 init_waitqueue_head(&t
->tqent_waitq
);
736 timer_setup(&t
->tqent_timer
, NULL
, 0);
737 INIT_LIST_HEAD(&t
->tqent_list
);
739 t
->tqent_func
= NULL
;
742 t
->tqent_taskq
= NULL
;
744 EXPORT_SYMBOL(taskq_init_ent
);
747 * Return the next pending task, preference is given to tasks on the
748 * priority list which were dispatched with TQ_FRONT.
751 taskq_next_ent(taskq_t
*tq
)
753 struct list_head
*list
;
755 if (!list_empty(&tq
->tq_prio_list
))
756 list
= &tq
->tq_prio_list
;
757 else if (!list_empty(&tq
->tq_pend_list
))
758 list
= &tq
->tq_pend_list
;
762 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
766 * Spawns a new thread for the specified taskq.
769 taskq_thread_spawn_task(void *arg
)
771 taskq_t
*tq
= (taskq_t
*)arg
;
774 if (taskq_thread_create(tq
) == NULL
) {
775 /* restore spawning count if failed */
776 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
779 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
784 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
785 * number of threads is insufficient to handle the pending tasks. These
786 * new threads must be created by the dedicated dynamic_taskq to avoid
787 * deadlocks between thread creation and memory reclaim. The system_taskq
788 * which is also a dynamic taskq cannot be safely used for this.
791 taskq_thread_spawn(taskq_t
*tq
)
795 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
798 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
799 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
800 spawning
= (++tq
->tq_nspawn
);
801 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
809 * Threads in a dynamic taskq should only exit once it has been completely
810 * drained and no other threads are actively servicing tasks. This prevents
811 * threads from being created and destroyed more than is required.
813 * The first thread is the thread list is treated as the primary thread.
814 * There is nothing special about the primary thread but in order to avoid
815 * all the taskq pids from changing we opt to make it long running.
818 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
820 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
823 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
824 tqt_thread_list
) == tqt
)
828 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
829 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
830 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
831 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
832 (spl_taskq_thread_dynamic
)); /* Dynamic taskqs are allowed */
836 taskq_thread(void *args
)
838 DECLARE_WAITQUEUE(wait
, current
);
840 taskq_thread_t
*tqt
= args
;
845 taskq_ent_t dup_task
= {};
850 current
->flags
|= PF_NOFREEZE
;
852 (void) spl_fstrans_mark();
854 sigfillset(&blocked
);
855 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
856 flush_signals(current
);
859 tsd_set(taskq_tsd
, tq
);
860 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
862 * If we are dynamically spawned, decrease spawning count. Note that
863 * we could be created during taskq_create, in which case we shouldn't
864 * do the decrement. But it's fine because taskq_create will reset
867 if (tq
->tq_flags
& TASKQ_DYNAMIC
)
870 /* Immediately exit if more threads than allowed were created. */
871 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
875 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
876 wake_up(&tq
->tq_wait_waitq
);
877 set_current_state(TASK_INTERRUPTIBLE
);
879 while (!kthread_should_stop()) {
881 if (list_empty(&tq
->tq_pend_list
) &&
882 list_empty(&tq
->tq_prio_list
)) {
884 if (taskq_thread_should_stop(tq
, tqt
)) {
885 wake_up_all(&tq
->tq_wait_waitq
);
889 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
890 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
895 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
897 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
899 __set_current_state(TASK_RUNNING
);
902 if ((t
= taskq_next_ent(tq
)) != NULL
) {
903 list_del_init(&t
->tqent_list
);
906 * A TQENT_FLAG_PREALLOC task may be reused or freed
907 * during the task function call. Store tqent_id and
910 * Also use an on stack taskq_ent_t for tqt_task
911 * assignment in this case. We only populate the two
912 * fields used by the only user in taskq proc file.
914 tqt
->tqt_id
= t
->tqent_id
;
915 tqt
->tqt_flags
= t
->tqent_flags
;
917 if (t
->tqent_flags
& TQENT_FLAG_PREALLOC
) {
918 dup_task
.tqent_func
= t
->tqent_func
;
919 dup_task
.tqent_arg
= t
->tqent_arg
;
924 taskq_insert_in_order(tq
, tqt
);
926 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
928 /* Perform the requested task */
929 t
->tqent_func(t
->tqent_arg
);
931 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
934 list_del_init(&tqt
->tqt_active_list
);
935 tqt
->tqt_task
= NULL
;
937 /* For prealloc'd tasks, we don't free anything. */
938 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
942 * When the current lowest outstanding taskqid is
943 * done calculate the new lowest outstanding id
945 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
946 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
947 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
950 /* Spawn additional taskq threads if required. */
951 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
952 taskq_thread_spawn(tq
))
955 tqt
->tqt_id
= TASKQID_INVALID
;
957 wake_up_all(&tq
->tq_wait_waitq
);
959 if (taskq_thread_should_stop(tq
, tqt
))
963 set_current_state(TASK_INTERRUPTIBLE
);
967 __set_current_state(TASK_RUNNING
);
969 list_del_init(&tqt
->tqt_thread_list
);
971 kmem_free(tqt
, sizeof (taskq_thread_t
));
972 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
974 tsd_set(taskq_tsd
, NULL
);
979 static taskq_thread_t
*
980 taskq_thread_create(taskq_t
*tq
)
982 static int last_used_cpu
= 0;
985 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
986 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
987 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
989 tqt
->tqt_id
= TASKQID_INVALID
;
991 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
993 if (tqt
->tqt_thread
== NULL
) {
994 kmem_free(tqt
, sizeof (taskq_thread_t
));
998 if (spl_taskq_thread_bind
) {
999 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
1000 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
1003 if (spl_taskq_thread_priority
)
1004 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
1006 wake_up_process(tqt
->tqt_thread
);
1012 taskq_create(const char *name
, int nthreads
, pri_t pri
,
1013 int minalloc
, int maxalloc
, uint_t flags
)
1016 taskq_thread_t
*tqt
;
1017 int count
= 0, rc
= 0, i
;
1018 unsigned long irqflags
;
1020 ASSERT(name
!= NULL
);
1021 ASSERT(minalloc
>= 0);
1022 ASSERT(maxalloc
<= INT_MAX
);
1023 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
1025 /* Scale the number of threads using nthreads as a percentage */
1026 if (flags
& TASKQ_THREADS_CPU_PCT
) {
1027 ASSERT(nthreads
<= 100);
1028 ASSERT(nthreads
>= 0);
1029 nthreads
= MIN(nthreads
, 100);
1030 nthreads
= MAX(nthreads
, 0);
1031 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
1034 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
1038 spin_lock_init(&tq
->tq_lock
);
1039 INIT_LIST_HEAD(&tq
->tq_thread_list
);
1040 INIT_LIST_HEAD(&tq
->tq_active_list
);
1041 tq
->tq_name
= strdup(name
);
1043 tq
->tq_nthreads
= 0;
1045 tq
->tq_maxthreads
= nthreads
;
1047 tq
->tq_minalloc
= minalloc
;
1048 tq
->tq_maxalloc
= maxalloc
;
1050 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1051 tq
->tq_next_id
= TASKQID_INITIAL
;
1052 tq
->tq_lowest_id
= TASKQID_INITIAL
;
1053 INIT_LIST_HEAD(&tq
->tq_free_list
);
1054 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1055 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1056 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1057 init_waitqueue_head(&tq
->tq_work_waitq
);
1058 init_waitqueue_head(&tq
->tq_wait_waitq
);
1059 tq
->tq_lock_class
= TQ_LOCK_GENERAL
;
1060 INIT_LIST_HEAD(&tq
->tq_taskqs
);
1062 if (flags
& TASKQ_PREPOPULATE
) {
1063 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
1066 for (i
= 0; i
< minalloc
; i
++)
1067 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
,
1070 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
1073 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1076 for (i
= 0; i
< nthreads
; i
++) {
1077 tqt
= taskq_thread_create(tq
);
1084 /* Wait for all threads to be started before potential destroy */
1085 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1087 * taskq_thread might have touched nspawn, but we don't want them to
1088 * because they're not dynamically spawned. So we reset it to 0
1096 down_write(&tq_list_sem
);
1097 tq
->tq_instance
= taskq_find_by_name(name
) + 1;
1098 list_add_tail(&tq
->tq_taskqs
, &tq_list
);
1099 up_write(&tq_list_sem
);
1104 EXPORT_SYMBOL(taskq_create
);
1107 taskq_destroy(taskq_t
*tq
)
1109 struct task_struct
*thread
;
1110 taskq_thread_t
*tqt
;
1112 unsigned long flags
;
1115 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1116 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1117 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1120 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1121 * new worker threads be spawned for dynamic taskq.
1123 if (dynamic_taskq
!= NULL
)
1124 taskq_wait_outstanding(dynamic_taskq
, 0);
1128 /* remove taskq from global list used by the kstats */
1129 down_write(&tq_list_sem
);
1130 list_del(&tq
->tq_taskqs
);
1131 up_write(&tq_list_sem
);
1133 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1134 /* wait for spawning threads to insert themselves to the list */
1135 while (tq
->tq_nspawn
) {
1136 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1137 schedule_timeout_interruptible(1);
1138 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1143 * Signal each thread to exit and block until it does. Each thread
1144 * is responsible for removing itself from the list and freeing its
1145 * taskq_thread_t. This allows for idle threads to opt to remove
1146 * themselves from the taskq. They can be recreated as needed.
1148 while (!list_empty(&tq
->tq_thread_list
)) {
1149 tqt
= list_entry(tq
->tq_thread_list
.next
,
1150 taskq_thread_t
, tqt_thread_list
);
1151 thread
= tqt
->tqt_thread
;
1152 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1154 kthread_stop(thread
);
1156 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1160 while (!list_empty(&tq
->tq_free_list
)) {
1161 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1163 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1165 list_del_init(&t
->tqent_list
);
1169 ASSERT0(tq
->tq_nthreads
);
1170 ASSERT0(tq
->tq_nalloc
);
1171 ASSERT0(tq
->tq_nspawn
);
1172 ASSERT(list_empty(&tq
->tq_thread_list
));
1173 ASSERT(list_empty(&tq
->tq_active_list
));
1174 ASSERT(list_empty(&tq
->tq_free_list
));
1175 ASSERT(list_empty(&tq
->tq_pend_list
));
1176 ASSERT(list_empty(&tq
->tq_prio_list
));
1177 ASSERT(list_empty(&tq
->tq_delay_list
));
1179 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1181 strfree(tq
->tq_name
);
1182 kmem_free(tq
, sizeof (taskq_t
));
1184 EXPORT_SYMBOL(taskq_destroy
);
1187 static unsigned int spl_taskq_kick
= 0;
1191 * module_param_cb is introduced to take kernel_param_ops and
1192 * module_param_call is marked as obsolete. Also set and get operations
1193 * were changed to take a 'const struct kernel_param *'.
1196 #ifdef module_param_cb
1197 param_set_taskq_kick(const char *val
, const struct kernel_param
*kp
)
1199 param_set_taskq_kick(const char *val
, struct kernel_param
*kp
)
1205 unsigned long flags
;
1207 ret
= param_set_uint(val
, kp
);
1208 if (ret
< 0 || !spl_taskq_kick
)
1213 down_read(&tq_list_sem
);
1214 list_for_each_entry(tq
, &tq_list
, tq_taskqs
) {
1215 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1217 /* Check if the first pending is older than 5 seconds */
1218 t
= taskq_next_ent(tq
);
1219 if (t
&& time_after(jiffies
, t
->tqent_birth
+ 5*HZ
)) {
1220 (void) taskq_thread_spawn(tq
);
1221 printk(KERN_INFO
"spl: Kicked taskq %s/%d\n",
1222 tq
->tq_name
, tq
->tq_instance
);
1224 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1226 up_read(&tq_list_sem
);
1230 #ifdef module_param_cb
1231 static const struct kernel_param_ops param_ops_taskq_kick
= {
1232 .set
= param_set_taskq_kick
,
1233 .get
= param_get_uint
,
1235 module_param_cb(spl_taskq_kick
, ¶m_ops_taskq_kick
, &spl_taskq_kick
, 0644);
1237 module_param_call(spl_taskq_kick
, param_set_taskq_kick
, param_get_uint
,
1238 &spl_taskq_kick
, 0644);
1240 MODULE_PARM_DESC(spl_taskq_kick
,
1241 "Write nonzero to kick stuck taskqs to spawn more threads");
1244 spl_taskq_init(void)
1246 init_rwsem(&tq_list_sem
);
1247 tsd_create(&taskq_tsd
, NULL
);
1249 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1250 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1251 if (system_taskq
== NULL
)
1254 system_delay_taskq
= taskq_create("spl_delay_taskq", MAX(boot_ncpus
, 4),
1255 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1256 if (system_delay_taskq
== NULL
) {
1257 taskq_destroy(system_taskq
);
1261 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1262 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1263 if (dynamic_taskq
== NULL
) {
1264 taskq_destroy(system_taskq
);
1265 taskq_destroy(system_delay_taskq
);
1270 * This is used to annotate tq_lock, so
1271 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1272 * does not trigger a lockdep warning re: possible recursive locking
1274 dynamic_taskq
->tq_lock_class
= TQ_LOCK_DYNAMIC
;
1280 spl_taskq_fini(void)
1282 taskq_destroy(dynamic_taskq
);
1283 dynamic_taskq
= NULL
;
1285 taskq_destroy(system_delay_taskq
);
1286 system_delay_taskq
= NULL
;
1288 taskq_destroy(system_taskq
);
1289 system_taskq
= NULL
;
1291 tsd_destroy(&taskq_tsd
);