1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
30 int spl_taskq_thread_bind
= 0;
31 module_param(spl_taskq_thread_bind
, int, 0644);
32 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
35 int spl_taskq_thread_dynamic
= 1;
36 module_param(spl_taskq_thread_dynamic
, int, 0644);
37 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
39 int spl_taskq_thread_priority
= 1;
40 module_param(spl_taskq_thread_priority
, int, 0644);
41 MODULE_PARM_DESC(spl_taskq_thread_priority
,
42 "Allow non-default priority for taskq threads");
44 int spl_taskq_thread_sequential
= 4;
45 module_param(spl_taskq_thread_sequential
, int, 0644);
46 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
47 "Create new taskq threads after N sequential tasks");
49 /* Global system-wide dynamic task queue available for all consumers */
50 taskq_t
*system_taskq
;
51 EXPORT_SYMBOL(system_taskq
);
53 /* Private dedicated taskq for creating new taskq threads on demand. */
54 static taskq_t
*dynamic_taskq
;
55 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
58 task_km_flags(uint_t flags
)
60 if (flags
& TQ_NOSLEEP
)
63 if (flags
& TQ_PUSHPAGE
)
70 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
71 * is not attached to the free, work, or pending taskq lists.
74 task_alloc(taskq_t
*tq
, uint_t flags
, unsigned long *irqflags
)
80 ASSERT(spin_is_locked(&tq
->tq_lock
));
82 /* Acquire taskq_ent_t's from free list if available */
83 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
84 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
86 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
87 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
88 ASSERT(!timer_pending(&t
->tqent_timer
));
90 list_del_init(&t
->tqent_list
);
94 /* Free list is empty and memory allocations are prohibited */
95 if (flags
& TQ_NOALLOC
)
98 /* Hit maximum taskq_ent_t pool size */
99 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
100 if (flags
& TQ_NOSLEEP
)
104 * Sleep periodically polling the free list for an available
105 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
106 * but we cannot block forever waiting for an taskq_ent_t to
107 * show up in the free list, otherwise a deadlock can happen.
109 * Therefore, we need to allocate a new task even if the number
110 * of allocated tasks is above tq->tq_maxalloc, but we still
111 * end up delaying the task allocation by one second, thereby
112 * throttling the task dispatch rate.
114 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
115 schedule_timeout(HZ
/ 100);
116 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
,
124 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
125 t
= kmem_alloc(sizeof(taskq_ent_t
), task_km_flags(flags
));
126 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
, tq
->tq_lock_class
);
137 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
138 * to already be removed from the free, work, or pending taskq lists.
141 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
145 ASSERT(spin_is_locked(&tq
->tq_lock
));
146 ASSERT(list_empty(&t
->tqent_list
));
147 ASSERT(!timer_pending(&t
->tqent_timer
));
149 kmem_free(t
, sizeof(taskq_ent_t
));
154 * NOTE: Must be called with tq->tq_lock held, either destroys the
155 * taskq_ent_t if too many exist or moves it to the free list for later use.
158 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
162 ASSERT(spin_is_locked(&tq
->tq_lock
));
164 /* Wake tasks blocked in taskq_wait_id() */
165 wake_up_all(&t
->tqent_waitq
);
167 list_del_init(&t
->tqent_list
);
169 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
171 t
->tqent_func
= NULL
;
175 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
182 * When a delayed task timer expires remove it from the delay list and
183 * add it to the priority list in order for immediate processing.
186 task_expire(unsigned long data
)
188 taskq_ent_t
*w
, *t
= (taskq_ent_t
*)data
;
189 taskq_t
*tq
= t
->tqent_taskq
;
193 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
195 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
196 ASSERT(list_empty(&t
->tqent_list
));
197 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
202 * The priority list must be maintained in strict task id order
203 * from lowest to highest for lowest_id to be easily calculable.
205 list_del(&t
->tqent_list
);
206 list_for_each_prev(l
, &tq
->tq_prio_list
) {
207 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
208 if (w
->tqent_id
< t
->tqent_id
) {
209 list_add(&t
->tqent_list
, l
);
213 if (l
== &tq
->tq_prio_list
)
214 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
216 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
218 wake_up(&tq
->tq_work_waitq
);
222 * Returns the lowest incomplete taskqid_t. The taskqid_t may
223 * be queued on the pending list, on the priority list, on the
224 * delay list, or on the work list currently being handled, but
225 * it is not 100% complete yet.
228 taskq_lowest_id(taskq_t
*tq
)
230 taskqid_t lowest_id
= tq
->tq_next_id
;
235 ASSERT(spin_is_locked(&tq
->tq_lock
));
237 if (!list_empty(&tq
->tq_pend_list
)) {
238 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
239 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
242 if (!list_empty(&tq
->tq_prio_list
)) {
243 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
244 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
247 if (!list_empty(&tq
->tq_delay_list
)) {
248 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
249 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
252 if (!list_empty(&tq
->tq_active_list
)) {
253 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
255 ASSERT(tqt
->tqt_id
!= 0);
256 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
263 * Insert a task into a list keeping the list sorted by increasing taskqid.
266 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
273 ASSERT(spin_is_locked(&tq
->tq_lock
));
275 list_for_each_prev(l
, &tq
->tq_active_list
) {
276 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
277 if (w
->tqt_id
< tqt
->tqt_id
) {
278 list_add(&tqt
->tqt_active_list
, l
);
282 if (l
== &tq
->tq_active_list
)
283 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
287 * Find and return a task from the given list if it exists. The list
288 * must be in lowest to highest task id order.
291 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
296 ASSERT(spin_is_locked(&tq
->tq_lock
));
298 list_for_each(l
, lh
) {
299 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
301 if (t
->tqent_id
== id
)
304 if (t
->tqent_id
> id
)
312 * Find an already dispatched task given the task id regardless of what
313 * state it is in. If a task is still pending or executing it will be
314 * returned and 'active' set appropriately. If the task has already
315 * been run then NULL is returned.
318 taskq_find(taskq_t
*tq
, taskqid_t id
, int *active
)
324 ASSERT(spin_is_locked(&tq
->tq_lock
));
327 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
331 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
335 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
339 list_for_each(l
, &tq
->tq_active_list
) {
340 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
341 if (tqt
->tqt_id
== id
) {
352 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
353 * taskq_wait() functions below.
355 * Taskq waiting is accomplished by tracking the lowest outstanding task
356 * id and the next available task id. As tasks are dispatched they are
357 * added to the tail of the pending, priority, or delay lists. As worker
358 * threads become available the tasks are removed from the heads of these
359 * lists and linked to the worker threads. This ensures the lists are
360 * kept sorted by lowest to highest task id.
362 * Therefore the lowest outstanding task id can be quickly determined by
363 * checking the head item from all of these lists. This value is stored
364 * with the taskq as the lowest id. It only needs to be recalculated when
365 * either the task with the current lowest id completes or is canceled.
367 * By blocking until the lowest task id exceeds the passed task id the
368 * taskq_wait_outstanding() function can be easily implemented. Similarly,
369 * by blocking until the lowest task id matches the next task id taskq_wait()
370 * can be implemented.
372 * Callers should be aware that when there are multiple worked threads it
373 * is possible for larger task ids to complete before smaller ones. Also
374 * when the taskq contains delay tasks with small task ids callers may
375 * block for a considerable length of time waiting for them to expire and
379 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
385 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
386 rc
= (taskq_find(tq
, id
, &active
) == NULL
);
387 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
393 * The taskq_wait_id() function blocks until the passed task id completes.
394 * This does not guarantee that all lower task ids have completed.
397 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
399 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
401 EXPORT_SYMBOL(taskq_wait_id
);
404 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
409 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
410 rc
= (id
< tq
->tq_lowest_id
);
411 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
417 * The taskq_wait_outstanding() function will block until all tasks with a
418 * lower taskqid than the passed 'id' have been completed. Note that all
419 * task id's are assigned monotonically at dispatch time. Zero may be
420 * passed for the id to indicate all tasks dispatch up to this point,
421 * but not after, should be waited for.
424 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
426 wait_event(tq
->tq_wait_waitq
,
427 taskq_wait_outstanding_check(tq
, id
? id
: tq
->tq_next_id
- 1));
429 EXPORT_SYMBOL(taskq_wait_outstanding
);
432 taskq_wait_check(taskq_t
*tq
)
437 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
438 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
439 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
445 * The taskq_wait() function will block until the taskq is empty.
446 * This means that if a taskq re-dispatches work to itself taskq_wait()
447 * callers will block indefinitely.
450 taskq_wait(taskq_t
*tq
)
452 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
454 EXPORT_SYMBOL(taskq_wait
);
457 taskq_member_impl(taskq_t
*tq
, void *t
)
465 ASSERT(spin_is_locked(&tq
->tq_lock
));
467 list_for_each(l
, &tq
->tq_thread_list
) {
468 tqt
= list_entry(l
, taskq_thread_t
, tqt_thread_list
);
469 if (tqt
->tqt_thread
== (struct task_struct
*)t
) {
478 taskq_member(taskq_t
*tq
, void *t
)
483 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
484 found
= taskq_member_impl(tq
, t
);
485 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
489 EXPORT_SYMBOL(taskq_member
);
492 * Cancel an already dispatched task given the task id. Still pending tasks
493 * will be immediately canceled, and if the task is active the function will
494 * block until it completes. Preallocated tasks which are canceled must be
495 * freed by the caller.
498 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
507 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
508 t
= taskq_find(tq
, id
, &active
);
510 list_del_init(&t
->tqent_list
);
511 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
514 * When canceling the lowest outstanding task id we
515 * must recalculate the new lowest outstanding id.
517 if (tq
->tq_lowest_id
== t
->tqent_id
) {
518 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
519 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
523 * The task_expire() function takes the tq->tq_lock so drop
524 * drop the lock before synchronously cancelling the timer.
526 if (timer_pending(&t
->tqent_timer
)) {
527 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
528 del_timer_sync(&t
->tqent_timer
);
529 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
533 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
538 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
541 taskq_wait_id(tq
, id
);
547 EXPORT_SYMBOL(taskq_cancel_id
);
549 static int taskq_thread_spawn(taskq_t
*tq
);
552 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
556 unsigned long irqflags
;
561 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
563 /* Taskq being destroyed and all tasks drained */
564 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
567 /* Do not queue the task unless there is idle thread for it */
568 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
569 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
572 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
575 spin_lock(&t
->tqent_lock
);
577 /* Queue to the priority list instead of the pending list */
578 if (flags
& TQ_FRONT
)
579 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
581 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
583 t
->tqent_id
= rc
= tq
->tq_next_id
;
585 t
->tqent_func
= func
;
588 t
->tqent_timer
.data
= 0;
589 t
->tqent_timer
.function
= NULL
;
590 t
->tqent_timer
.expires
= 0;
592 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
594 spin_unlock(&t
->tqent_lock
);
596 wake_up(&tq
->tq_work_waitq
);
598 /* Spawn additional taskq threads if required. */
599 if (tq
->tq_nactive
== tq
->tq_nthreads
)
600 (void) taskq_thread_spawn(tq
);
602 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
605 EXPORT_SYMBOL(taskq_dispatch
);
608 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
609 uint_t flags
, clock_t expire_time
)
613 unsigned long irqflags
;
618 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
620 /* Taskq being destroyed and all tasks drained */
621 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
624 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
627 spin_lock(&t
->tqent_lock
);
629 /* Queue to the delay list for subsequent execution */
630 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
632 t
->tqent_id
= rc
= tq
->tq_next_id
;
634 t
->tqent_func
= func
;
637 t
->tqent_timer
.data
= (unsigned long)t
;
638 t
->tqent_timer
.function
= task_expire
;
639 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
640 add_timer(&t
->tqent_timer
);
642 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
644 spin_unlock(&t
->tqent_lock
);
646 /* Spawn additional taskq threads if required. */
647 if (tq
->tq_nactive
== tq
->tq_nthreads
)
648 (void) taskq_thread_spawn(tq
);
649 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
652 EXPORT_SYMBOL(taskq_dispatch_delay
);
655 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
658 unsigned long irqflags
;
662 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
665 /* Taskq being destroyed and all tasks drained */
666 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
671 spin_lock(&t
->tqent_lock
);
674 * Mark it as a prealloc'd task. This is important
675 * to ensure that we don't free it later.
677 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
679 /* Queue to the priority list instead of the pending list */
680 if (flags
& TQ_FRONT
)
681 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
683 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
685 t
->tqent_id
= tq
->tq_next_id
;
687 t
->tqent_func
= func
;
691 spin_unlock(&t
->tqent_lock
);
693 wake_up(&tq
->tq_work_waitq
);
695 /* Spawn additional taskq threads if required. */
696 if (tq
->tq_nactive
== tq
->tq_nthreads
)
697 (void) taskq_thread_spawn(tq
);
698 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
700 EXPORT_SYMBOL(taskq_dispatch_ent
);
703 taskq_empty_ent(taskq_ent_t
*t
)
705 return list_empty(&t
->tqent_list
);
707 EXPORT_SYMBOL(taskq_empty_ent
);
710 taskq_init_ent(taskq_ent_t
*t
)
712 spin_lock_init(&t
->tqent_lock
);
713 init_waitqueue_head(&t
->tqent_waitq
);
714 init_timer(&t
->tqent_timer
);
715 INIT_LIST_HEAD(&t
->tqent_list
);
717 t
->tqent_func
= NULL
;
720 t
->tqent_taskq
= NULL
;
722 EXPORT_SYMBOL(taskq_init_ent
);
725 * Return the next pending task, preference is given to tasks on the
726 * priority list which were dispatched with TQ_FRONT.
729 taskq_next_ent(taskq_t
*tq
)
731 struct list_head
*list
;
733 ASSERT(spin_is_locked(&tq
->tq_lock
));
735 if (!list_empty(&tq
->tq_prio_list
))
736 list
= &tq
->tq_prio_list
;
737 else if (!list_empty(&tq
->tq_pend_list
))
738 list
= &tq
->tq_pend_list
;
742 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
746 * Spawns a new thread for the specified taskq.
749 taskq_thread_spawn_task(void *arg
)
751 taskq_t
*tq
= (taskq_t
*)arg
;
754 (void) taskq_thread_create(tq
);
756 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
758 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
762 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
763 * number of threads is insufficient to handle the pending tasks. These
764 * new threads must be created by the dedicated dynamic_taskq to avoid
765 * deadlocks between thread creation and memory reclaim. The system_taskq
766 * which is also a dynamic taskq cannot be safely used for this.
769 taskq_thread_spawn(taskq_t
*tq
)
773 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
776 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
777 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
778 spawning
= (++tq
->tq_nspawn
);
779 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
787 * Threads in a dynamic taskq should only exit once it has been completely
788 * drained and no other threads are actively servicing tasks. This prevents
789 * threads from being created and destroyed more than is required.
791 * The first thread is the thread list is treated as the primary thread.
792 * There is nothing special about the primary thread but in order to avoid
793 * all the taskq pids from changing we opt to make it long running.
796 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
798 ASSERT(spin_is_locked(&tq
->tq_lock
));
800 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
803 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
804 tqt_thread_list
) == tqt
)
808 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
809 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
810 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
811 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
812 (spl_taskq_thread_dynamic
));/* Dynamic taskqs are allowed */
816 taskq_thread(void *args
)
818 DECLARE_WAITQUEUE(wait
, current
);
820 taskq_thread_t
*tqt
= args
;
829 current
->flags
|= PF_NOFREEZE
;
831 #if defined(PF_MEMALLOC_NOIO)
832 (void) memalloc_noio_save();
835 sigfillset(&blocked
);
836 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
837 flush_signals(current
);
839 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
841 /* Immediately exit if more threads than allowed were created. */
842 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
846 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
847 wake_up(&tq
->tq_wait_waitq
);
848 set_current_state(TASK_INTERRUPTIBLE
);
850 while (!kthread_should_stop()) {
852 if (list_empty(&tq
->tq_pend_list
) &&
853 list_empty(&tq
->tq_prio_list
)) {
855 if (taskq_thread_should_stop(tq
, tqt
)) {
856 wake_up_all(&tq
->tq_wait_waitq
);
860 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
861 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
866 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
868 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
870 __set_current_state(TASK_RUNNING
);
873 if ((t
= taskq_next_ent(tq
)) != NULL
) {
874 list_del_init(&t
->tqent_list
);
876 /* In order to support recursively dispatching a
877 * preallocated taskq_ent_t, tqent_id must be
878 * stored prior to executing tqent_func. */
879 tqt
->tqt_id
= t
->tqent_id
;
882 /* We must store a copy of the flags prior to
883 * servicing the task (servicing a prealloc'd task
884 * returns the ownership of the tqent back to
885 * the caller of taskq_dispatch). Thus,
886 * tqent_flags _may_ change within the call. */
887 tqt
->tqt_flags
= t
->tqent_flags
;
889 taskq_insert_in_order(tq
, tqt
);
891 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
893 /* Perform the requested task */
894 t
->tqent_func(t
->tqent_arg
);
896 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
899 list_del_init(&tqt
->tqt_active_list
);
900 tqt
->tqt_task
= NULL
;
902 /* For prealloc'd tasks, we don't free anything. */
903 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
906 /* When the current lowest outstanding taskqid is
907 * done calculate the new lowest outstanding id */
908 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
909 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
910 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
913 /* Spawn additional taskq threads if required. */
914 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
915 taskq_thread_spawn(tq
))
920 wake_up_all(&tq
->tq_wait_waitq
);
922 if (taskq_thread_should_stop(tq
, tqt
))
926 set_current_state(TASK_INTERRUPTIBLE
);
930 __set_current_state(TASK_RUNNING
);
932 list_del_init(&tqt
->tqt_thread_list
);
934 kmem_free(tqt
, sizeof (taskq_thread_t
));
935 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
940 static taskq_thread_t
*
941 taskq_thread_create(taskq_t
*tq
)
943 static int last_used_cpu
= 0;
946 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
947 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
948 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
952 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
954 if (tqt
->tqt_thread
== NULL
) {
955 kmem_free(tqt
, sizeof (taskq_thread_t
));
959 if (spl_taskq_thread_bind
) {
960 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
961 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
964 if (spl_taskq_thread_priority
)
965 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
967 wake_up_process(tqt
->tqt_thread
);
973 taskq_create(const char *name
, int nthreads
, pri_t pri
,
974 int minalloc
, int maxalloc
, uint_t flags
)
978 int count
= 0, rc
= 0, i
;
979 unsigned long irqflags
;
981 ASSERT(name
!= NULL
);
982 ASSERT(minalloc
>= 0);
983 ASSERT(maxalloc
<= INT_MAX
);
984 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
986 /* Scale the number of threads using nthreads as a percentage */
987 if (flags
& TASKQ_THREADS_CPU_PCT
) {
988 ASSERT(nthreads
<= 100);
989 ASSERT(nthreads
>= 0);
990 nthreads
= MIN(nthreads
, 100);
991 nthreads
= MAX(nthreads
, 0);
992 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
995 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
999 spin_lock_init(&tq
->tq_lock
);
1000 INIT_LIST_HEAD(&tq
->tq_thread_list
);
1001 INIT_LIST_HEAD(&tq
->tq_active_list
);
1002 tq
->tq_name
= strdup(name
);
1004 tq
->tq_nthreads
= 0;
1006 tq
->tq_maxthreads
= nthreads
;
1008 tq
->tq_minalloc
= minalloc
;
1009 tq
->tq_maxalloc
= maxalloc
;
1011 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1013 tq
->tq_lowest_id
= 1;
1014 INIT_LIST_HEAD(&tq
->tq_free_list
);
1015 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1016 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1017 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1018 init_waitqueue_head(&tq
->tq_work_waitq
);
1019 init_waitqueue_head(&tq
->tq_wait_waitq
);
1020 tq
->tq_lock_class
= TQ_LOCK_GENERAL
;
1022 if (flags
& TASKQ_PREPOPULATE
) {
1023 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
1026 for (i
= 0; i
< minalloc
; i
++)
1027 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
,
1030 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
1033 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1036 for (i
= 0; i
< nthreads
; i
++) {
1037 tqt
= taskq_thread_create(tq
);
1044 /* Wait for all threads to be started before potential destroy */
1045 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1054 EXPORT_SYMBOL(taskq_create
);
1057 taskq_destroy(taskq_t
*tq
)
1059 struct task_struct
*thread
;
1060 taskq_thread_t
*tqt
;
1062 unsigned long flags
;
1065 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1066 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1067 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1070 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1071 * new worker threads be spawned for dynamic taskq.
1073 if (dynamic_taskq
!= NULL
)
1074 taskq_wait_outstanding(dynamic_taskq
, 0);
1078 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1081 * Signal each thread to exit and block until it does. Each thread
1082 * is responsible for removing itself from the list and freeing its
1083 * taskq_thread_t. This allows for idle threads to opt to remove
1084 * themselves from the taskq. They can be recreated as needed.
1086 while (!list_empty(&tq
->tq_thread_list
)) {
1087 tqt
= list_entry(tq
->tq_thread_list
.next
,
1088 taskq_thread_t
, tqt_thread_list
);
1089 thread
= tqt
->tqt_thread
;
1090 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1092 kthread_stop(thread
);
1094 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1098 while (!list_empty(&tq
->tq_free_list
)) {
1099 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1101 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1103 list_del_init(&t
->tqent_list
);
1107 ASSERT0(tq
->tq_nthreads
);
1108 ASSERT0(tq
->tq_nalloc
);
1109 ASSERT0(tq
->tq_nspawn
);
1110 ASSERT(list_empty(&tq
->tq_thread_list
));
1111 ASSERT(list_empty(&tq
->tq_active_list
));
1112 ASSERT(list_empty(&tq
->tq_free_list
));
1113 ASSERT(list_empty(&tq
->tq_pend_list
));
1114 ASSERT(list_empty(&tq
->tq_prio_list
));
1115 ASSERT(list_empty(&tq
->tq_delay_list
));
1117 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1119 strfree(tq
->tq_name
);
1120 kmem_free(tq
, sizeof (taskq_t
));
1122 EXPORT_SYMBOL(taskq_destroy
);
1125 spl_taskq_init(void)
1127 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1128 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1129 if (system_taskq
== NULL
)
1132 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1133 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1134 if (dynamic_taskq
== NULL
) {
1135 taskq_destroy(system_taskq
);
1139 /* This is used to annotate tq_lock, so
1140 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1141 * does not trigger a lockdep warning re: possible recursive locking
1143 dynamic_taskq
->tq_lock_class
= TQ_LOCK_DYNAMIC
;
1149 spl_taskq_fini(void)
1151 taskq_destroy(dynamic_taskq
);
1152 dynamic_taskq
= NULL
;
1154 taskq_destroy(system_taskq
);
1155 system_taskq
= NULL
;