1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
30 int spl_taskq_thread_bind
= 0;
31 module_param(spl_taskq_thread_bind
, int, 0644);
32 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
35 int spl_taskq_thread_dynamic
= 0;
36 module_param(spl_taskq_thread_dynamic
, int, 0644);
37 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
39 int spl_taskq_thread_priority
= 1;
40 module_param(spl_taskq_thread_priority
, int, 0644);
41 MODULE_PARM_DESC(spl_taskq_thread_priority
,
42 "Allow non-default priority for taskq threads");
44 int spl_taskq_thread_sequential
= 4;
45 module_param(spl_taskq_thread_sequential
, int, 0644);
46 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
47 "Create new taskq threads after N sequential tasks");
49 /* Global system-wide dynamic task queue available for all consumers */
50 taskq_t
*system_taskq
;
51 EXPORT_SYMBOL(system_taskq
);
53 /* Private dedicated taskq for creating new taskq threads on demand. */
54 static taskq_t
*dynamic_taskq
;
55 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
58 task_km_flags(uint_t flags
)
60 if (flags
& TQ_NOSLEEP
)
63 if (flags
& TQ_PUSHPAGE
)
70 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
71 * is not attached to the free, work, or pending taskq lists.
74 task_alloc(taskq_t
*tq
, uint_t flags
)
80 ASSERT(spin_is_locked(&tq
->tq_lock
));
82 /* Acquire taskq_ent_t's from free list if available */
83 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
84 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
86 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
87 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
88 ASSERT(!timer_pending(&t
->tqent_timer
));
90 list_del_init(&t
->tqent_list
);
94 /* Free list is empty and memory allocations are prohibited */
95 if (flags
& TQ_NOALLOC
)
98 /* Hit maximum taskq_ent_t pool size */
99 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
100 if (flags
& TQ_NOSLEEP
)
104 * Sleep periodically polling the free list for an available
105 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
106 * but we cannot block forever waiting for an taskq_ent_t to
107 * show up in the free list, otherwise a deadlock can happen.
109 * Therefore, we need to allocate a new task even if the number
110 * of allocated tasks is above tq->tq_maxalloc, but we still
111 * end up delaying the task allocation by one second, thereby
112 * throttling the task dispatch rate.
114 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
115 schedule_timeout(HZ
/ 100);
116 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
123 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
124 t
= kmem_alloc(sizeof(taskq_ent_t
), task_km_flags(flags
));
125 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
136 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
137 * to already be removed from the free, work, or pending taskq lists.
140 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
144 ASSERT(spin_is_locked(&tq
->tq_lock
));
145 ASSERT(list_empty(&t
->tqent_list
));
146 ASSERT(!timer_pending(&t
->tqent_timer
));
148 kmem_free(t
, sizeof(taskq_ent_t
));
153 * NOTE: Must be called with tq->tq_lock held, either destroys the
154 * taskq_ent_t if too many exist or moves it to the free list for later use.
157 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
161 ASSERT(spin_is_locked(&tq
->tq_lock
));
163 /* Wake tasks blocked in taskq_wait_id() */
164 wake_up_all(&t
->tqent_waitq
);
166 list_del_init(&t
->tqent_list
);
168 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
170 t
->tqent_func
= NULL
;
174 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
181 * When a delayed task timer expires remove it from the delay list and
182 * add it to the priority list in order for immediate processing.
185 task_expire(unsigned long data
)
187 taskq_ent_t
*w
, *t
= (taskq_ent_t
*)data
;
188 taskq_t
*tq
= t
->tqent_taskq
;
191 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
193 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
194 ASSERT(list_empty(&t
->tqent_list
));
195 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
200 * The priority list must be maintained in strict task id order
201 * from lowest to highest for lowest_id to be easily calculable.
203 list_del(&t
->tqent_list
);
204 list_for_each_prev(l
, &tq
->tq_prio_list
) {
205 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
206 if (w
->tqent_id
< t
->tqent_id
) {
207 list_add(&t
->tqent_list
, l
);
211 if (l
== &tq
->tq_prio_list
)
212 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
214 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
216 wake_up(&tq
->tq_work_waitq
);
220 * Returns the lowest incomplete taskqid_t. The taskqid_t may
221 * be queued on the pending list, on the priority list, on the
222 * delay list, or on the work list currently being handled, but
223 * it is not 100% complete yet.
226 taskq_lowest_id(taskq_t
*tq
)
228 taskqid_t lowest_id
= tq
->tq_next_id
;
233 ASSERT(spin_is_locked(&tq
->tq_lock
));
235 if (!list_empty(&tq
->tq_pend_list
)) {
236 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
237 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
240 if (!list_empty(&tq
->tq_prio_list
)) {
241 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
242 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
245 if (!list_empty(&tq
->tq_delay_list
)) {
246 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
247 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
250 if (!list_empty(&tq
->tq_active_list
)) {
251 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
253 ASSERT(tqt
->tqt_id
!= 0);
254 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
261 * Insert a task into a list keeping the list sorted by increasing taskqid.
264 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
271 ASSERT(spin_is_locked(&tq
->tq_lock
));
273 list_for_each_prev(l
, &tq
->tq_active_list
) {
274 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
275 if (w
->tqt_id
< tqt
->tqt_id
) {
276 list_add(&tqt
->tqt_active_list
, l
);
280 if (l
== &tq
->tq_active_list
)
281 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
285 * Find and return a task from the given list if it exists. The list
286 * must be in lowest to highest task id order.
289 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
294 ASSERT(spin_is_locked(&tq
->tq_lock
));
296 list_for_each(l
, lh
) {
297 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
299 if (t
->tqent_id
== id
)
302 if (t
->tqent_id
> id
)
310 * Find an already dispatched task given the task id regardless of what
311 * state it is in. If a task is still pending or executing it will be
312 * returned and 'active' set appropriately. If the task has already
313 * been run then NULL is returned.
316 taskq_find(taskq_t
*tq
, taskqid_t id
, int *active
)
322 ASSERT(spin_is_locked(&tq
->tq_lock
));
325 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
329 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
333 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
337 list_for_each(l
, &tq
->tq_active_list
) {
338 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
339 if (tqt
->tqt_id
== id
) {
350 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
351 * taskq_wait() functions below.
353 * Taskq waiting is accomplished by tracking the lowest outstanding task
354 * id and the next available task id. As tasks are dispatched they are
355 * added to the tail of the pending, priority, or delay lists. As worker
356 * threads become available the tasks are removed from the heads of these
357 * lists and linked to the worker threads. This ensures the lists are
358 * kept sorted by lowest to highest task id.
360 * Therefore the lowest outstanding task id can be quickly determined by
361 * checking the head item from all of these lists. This value is stored
362 * with the taskq as the lowest id. It only needs to be recalculated when
363 * either the task with the current lowest id completes or is canceled.
365 * By blocking until the lowest task id exceeds the passed task id the
366 * taskq_wait_outstanding() function can be easily implemented. Similarly,
367 * by blocking until the lowest task id matches the next task id taskq_wait()
368 * can be implemented.
370 * Callers should be aware that when there are multiple worked threads it
371 * is possible for larger task ids to complete before smaller ones. Also
372 * when the taskq contains delay tasks with small task ids callers may
373 * block for a considerable length of time waiting for them to expire and
377 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
382 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
383 rc
= (taskq_find(tq
, id
, &active
) == NULL
);
384 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
390 * The taskq_wait_id() function blocks until the passed task id completes.
391 * This does not guarantee that all lower task ids have completed.
394 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
396 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
398 EXPORT_SYMBOL(taskq_wait_id
);
401 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
405 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
406 rc
= (id
< tq
->tq_lowest_id
);
407 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
413 * The taskq_wait_outstanding() function will block until all tasks with a
414 * lower taskqid than the passed 'id' have been completed. Note that all
415 * task id's are assigned monotonically at dispatch time. Zero may be
416 * passed for the id to indicate all tasks dispatch up to this point,
417 * but not after, should be waited for.
420 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
422 id
= id
? id
: tq
->tq_next_id
- 1;
423 wait_event(tq
->tq_wait_waitq
, taskq_wait_outstanding_check(tq
, id
));
425 EXPORT_SYMBOL(taskq_wait_outstanding
);
428 taskq_wait_check(taskq_t
*tq
)
432 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
433 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
434 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
440 * The taskq_wait() function will block until the taskq is empty.
441 * This means that if a taskq re-dispatches work to itself taskq_wait()
442 * callers will block indefinitely.
445 taskq_wait(taskq_t
*tq
)
447 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
449 EXPORT_SYMBOL(taskq_wait
);
452 taskq_member_impl(taskq_t
*tq
, void *t
)
460 ASSERT(spin_is_locked(&tq
->tq_lock
));
462 list_for_each(l
, &tq
->tq_thread_list
) {
463 tqt
= list_entry(l
, taskq_thread_t
, tqt_thread_list
);
464 if (tqt
->tqt_thread
== (struct task_struct
*)t
) {
473 taskq_member(taskq_t
*tq
, void *t
)
477 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
478 found
= taskq_member_impl(tq
, t
);
479 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
483 EXPORT_SYMBOL(taskq_member
);
486 * Cancel an already dispatched task given the task id. Still pending tasks
487 * will be immediately canceled, and if the task is active the function will
488 * block until it completes. Preallocated tasks which are canceled must be
489 * freed by the caller.
492 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
500 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
501 t
= taskq_find(tq
, id
, &active
);
503 list_del_init(&t
->tqent_list
);
504 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
507 * When canceling the lowest outstanding task id we
508 * must recalculate the new lowest outstanding id.
510 if (tq
->tq_lowest_id
== t
->tqent_id
) {
511 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
512 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
516 * The task_expire() function takes the tq->tq_lock so drop
517 * drop the lock before synchronously cancelling the timer.
519 if (timer_pending(&t
->tqent_timer
)) {
520 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
521 del_timer_sync(&t
->tqent_timer
);
522 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
525 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
530 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
533 taskq_wait_id(tq
, id
);
539 EXPORT_SYMBOL(taskq_cancel_id
);
541 static int taskq_thread_spawn(taskq_t
*tq
);
544 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
552 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
554 /* Taskq being destroyed and all tasks drained */
555 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
558 /* Do not queue the task unless there is idle thread for it */
559 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
560 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
563 if ((t
= task_alloc(tq
, flags
)) == NULL
)
566 spin_lock(&t
->tqent_lock
);
568 /* Queue to the priority list instead of the pending list */
569 if (flags
& TQ_FRONT
)
570 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
572 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
574 t
->tqent_id
= rc
= tq
->tq_next_id
;
576 t
->tqent_func
= func
;
579 t
->tqent_timer
.data
= 0;
580 t
->tqent_timer
.function
= NULL
;
581 t
->tqent_timer
.expires
= 0;
583 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
585 spin_unlock(&t
->tqent_lock
);
587 wake_up(&tq
->tq_work_waitq
);
589 /* Spawn additional taskq threads if required. */
590 if (tq
->tq_nactive
== tq
->tq_nthreads
)
591 (void) taskq_thread_spawn(tq
);
593 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
596 EXPORT_SYMBOL(taskq_dispatch
);
599 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
600 uint_t flags
, clock_t expire_time
)
608 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
610 /* Taskq being destroyed and all tasks drained */
611 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
614 if ((t
= task_alloc(tq
, flags
)) == NULL
)
617 spin_lock(&t
->tqent_lock
);
619 /* Queue to the delay list for subsequent execution */
620 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
622 t
->tqent_id
= rc
= tq
->tq_next_id
;
624 t
->tqent_func
= func
;
627 t
->tqent_timer
.data
= (unsigned long)t
;
628 t
->tqent_timer
.function
= task_expire
;
629 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
630 add_timer(&t
->tqent_timer
);
632 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
634 spin_unlock(&t
->tqent_lock
);
636 /* Spawn additional taskq threads if required. */
637 if (tq
->tq_nactive
== tq
->tq_nthreads
)
638 (void) taskq_thread_spawn(tq
);
639 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
642 EXPORT_SYMBOL(taskq_dispatch_delay
);
645 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
651 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
653 /* Taskq being destroyed and all tasks drained */
654 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
659 spin_lock(&t
->tqent_lock
);
662 * Mark it as a prealloc'd task. This is important
663 * to ensure that we don't free it later.
665 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
667 /* Queue to the priority list instead of the pending list */
668 if (flags
& TQ_FRONT
)
669 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
671 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
673 t
->tqent_id
= tq
->tq_next_id
;
675 t
->tqent_func
= func
;
679 spin_unlock(&t
->tqent_lock
);
681 wake_up(&tq
->tq_work_waitq
);
683 /* Spawn additional taskq threads if required. */
684 if (tq
->tq_nactive
== tq
->tq_nthreads
)
685 (void) taskq_thread_spawn(tq
);
686 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
688 EXPORT_SYMBOL(taskq_dispatch_ent
);
691 taskq_empty_ent(taskq_ent_t
*t
)
693 return list_empty(&t
->tqent_list
);
695 EXPORT_SYMBOL(taskq_empty_ent
);
698 taskq_init_ent(taskq_ent_t
*t
)
700 spin_lock_init(&t
->tqent_lock
);
701 init_waitqueue_head(&t
->tqent_waitq
);
702 init_timer(&t
->tqent_timer
);
703 INIT_LIST_HEAD(&t
->tqent_list
);
705 t
->tqent_func
= NULL
;
708 t
->tqent_taskq
= NULL
;
710 EXPORT_SYMBOL(taskq_init_ent
);
713 * Return the next pending task, preference is given to tasks on the
714 * priority list which were dispatched with TQ_FRONT.
717 taskq_next_ent(taskq_t
*tq
)
719 struct list_head
*list
;
721 ASSERT(spin_is_locked(&tq
->tq_lock
));
723 if (!list_empty(&tq
->tq_prio_list
))
724 list
= &tq
->tq_prio_list
;
725 else if (!list_empty(&tq
->tq_pend_list
))
726 list
= &tq
->tq_pend_list
;
730 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
734 * Spawns a new thread for the specified taskq.
737 taskq_thread_spawn_task(void *arg
)
739 taskq_t
*tq
= (taskq_t
*)arg
;
741 if (taskq_thread_create(tq
) == NULL
) {
742 /* restore spawning count if failed */
743 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
745 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
750 * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
751 * number of threads is insufficient to handle the pending tasks. These
752 * new threads must be created by the dedicated dynamic_taskq to avoid
753 * deadlocks between thread creation and memory reclaim. The system_taskq
754 * which is also a dynamic taskq cannot be safely used for this.
757 taskq_thread_spawn(taskq_t
*tq
)
761 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
764 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
765 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
766 spawning
= (++tq
->tq_nspawn
);
767 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
775 * Threads in a dynamic taskq should only exit once it has been completely
776 * drained and no other threads are actively servicing tasks. This prevents
777 * threads from being created and destroyed more than is required.
779 * The first thread is the thread list is treated as the primary thread.
780 * There is nothing special about the primary thread but in order to avoid
781 * all the taskq pids from changing we opt to make it long running.
784 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
786 ASSERT(spin_is_locked(&tq
->tq_lock
));
788 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
791 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
792 tqt_thread_list
) == tqt
)
796 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
797 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
798 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
799 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
800 (spl_taskq_thread_dynamic
));/* Dynamic taskqs are allowed */
804 taskq_thread(void *args
)
806 DECLARE_WAITQUEUE(wait
, current
);
808 taskq_thread_t
*tqt
= args
;
815 current
->flags
|= PF_NOFREEZE
;
817 (void) spl_fstrans_mark();
819 sigfillset(&blocked
);
820 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
821 flush_signals(current
);
823 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
825 * If we are dynamically spawned, decrease spawning count. Note that
826 * we could be created during taskq_create, in which case we shouldn't
827 * do the decrement. But it's fine because taskq_create will reset
830 if (tq
->tq_flags
& TASKQ_DYNAMIC
)
833 /* Immediately exit if more threads than allowed were created. */
834 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
838 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
839 wake_up(&tq
->tq_wait_waitq
);
840 set_current_state(TASK_INTERRUPTIBLE
);
842 while (!kthread_should_stop()) {
844 if (list_empty(&tq
->tq_pend_list
) &&
845 list_empty(&tq
->tq_prio_list
)) {
847 if (taskq_thread_should_stop(tq
, tqt
)) {
848 wake_up_all(&tq
->tq_wait_waitq
);
852 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
853 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
858 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
859 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
861 __set_current_state(TASK_RUNNING
);
864 if ((t
= taskq_next_ent(tq
)) != NULL
) {
865 list_del_init(&t
->tqent_list
);
867 /* In order to support recursively dispatching a
868 * preallocated taskq_ent_t, tqent_id must be
869 * stored prior to executing tqent_func. */
870 tqt
->tqt_id
= t
->tqent_id
;
873 /* We must store a copy of the flags prior to
874 * servicing the task (servicing a prealloc'd task
875 * returns the ownership of the tqent back to
876 * the caller of taskq_dispatch). Thus,
877 * tqent_flags _may_ change within the call. */
878 tqt
->tqt_flags
= t
->tqent_flags
;
880 taskq_insert_in_order(tq
, tqt
);
882 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
884 /* Perform the requested task */
885 t
->tqent_func(t
->tqent_arg
);
887 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
889 list_del_init(&tqt
->tqt_active_list
);
890 tqt
->tqt_task
= NULL
;
892 /* For prealloc'd tasks, we don't free anything. */
893 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
896 /* When the current lowest outstanding taskqid is
897 * done calculate the new lowest outstanding id */
898 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
899 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
900 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
903 /* Spawn additional taskq threads if required. */
904 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
905 taskq_thread_spawn(tq
))
910 wake_up_all(&tq
->tq_wait_waitq
);
912 if (taskq_thread_should_stop(tq
, tqt
))
916 set_current_state(TASK_INTERRUPTIBLE
);
920 __set_current_state(TASK_RUNNING
);
922 list_del_init(&tqt
->tqt_thread_list
);
924 kmem_free(tqt
, sizeof (taskq_thread_t
));
925 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
930 static taskq_thread_t
*
931 taskq_thread_create(taskq_t
*tq
)
933 static int last_used_cpu
= 0;
936 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
937 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
938 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
942 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
944 if (tqt
->tqt_thread
== NULL
) {
945 kmem_free(tqt
, sizeof (taskq_thread_t
));
949 if (spl_taskq_thread_bind
) {
950 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
951 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
954 if (spl_taskq_thread_priority
)
955 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
957 wake_up_process(tqt
->tqt_thread
);
963 taskq_create(const char *name
, int nthreads
, pri_t pri
,
964 int minalloc
, int maxalloc
, uint_t flags
)
968 int count
= 0, rc
= 0, i
;
970 ASSERT(name
!= NULL
);
971 ASSERT(minalloc
>= 0);
972 ASSERT(maxalloc
<= INT_MAX
);
973 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
975 /* Scale the number of threads using nthreads as a percentage */
976 if (flags
& TASKQ_THREADS_CPU_PCT
) {
977 ASSERT(nthreads
<= 100);
978 ASSERT(nthreads
>= 0);
979 nthreads
= MIN(nthreads
, 100);
980 nthreads
= MAX(nthreads
, 0);
981 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
984 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
988 spin_lock_init(&tq
->tq_lock
);
989 INIT_LIST_HEAD(&tq
->tq_thread_list
);
990 INIT_LIST_HEAD(&tq
->tq_active_list
);
991 tq
->tq_name
= strdup(name
);
995 tq
->tq_maxthreads
= nthreads
;
997 tq
->tq_minalloc
= minalloc
;
998 tq
->tq_maxalloc
= maxalloc
;
1000 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1002 tq
->tq_lowest_id
= 1;
1003 INIT_LIST_HEAD(&tq
->tq_free_list
);
1004 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1005 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1006 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1007 init_waitqueue_head(&tq
->tq_work_waitq
);
1008 init_waitqueue_head(&tq
->tq_wait_waitq
);
1010 if (flags
& TASKQ_PREPOPULATE
) {
1011 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
1013 for (i
= 0; i
< minalloc
; i
++)
1014 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
));
1016 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
1019 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1022 for (i
= 0; i
< nthreads
; i
++) {
1023 tqt
= taskq_thread_create(tq
);
1030 /* Wait for all threads to be started before potential destroy */
1031 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1033 * taskq_thread might have touched nspawn, but we don't want them to
1034 * because they're not dynamically spawned. So we reset it to 0
1045 EXPORT_SYMBOL(taskq_create
);
1048 taskq_destroy(taskq_t
*tq
)
1050 struct task_struct
*thread
;
1051 taskq_thread_t
*tqt
;
1055 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
1056 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1057 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
1060 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1061 * new worker threads be spawned for dynamic taskq.
1063 if (dynamic_taskq
!= NULL
)
1064 taskq_wait_outstanding(dynamic_taskq
, 0);
1068 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
1069 /* wait for spawning threads to insert themselves to the list */
1070 while (tq
->tq_nspawn
) {
1071 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
1072 schedule_timeout_interruptible(1);
1073 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
1077 * Signal each thread to exit and block until it does. Each thread
1078 * is responsible for removing itself from the list and freeing its
1079 * taskq_thread_t. This allows for idle threads to opt to remove
1080 * themselves from the taskq. They can be recreated as needed.
1082 while (!list_empty(&tq
->tq_thread_list
)) {
1083 tqt
= list_entry(tq
->tq_thread_list
.next
,
1084 taskq_thread_t
, tqt_thread_list
);
1085 thread
= tqt
->tqt_thread
;
1086 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
1088 kthread_stop(thread
);
1090 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
1093 while (!list_empty(&tq
->tq_free_list
)) {
1094 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1096 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1098 list_del_init(&t
->tqent_list
);
1102 ASSERT0(tq
->tq_nthreads
);
1103 ASSERT0(tq
->tq_nalloc
);
1104 ASSERT0(tq
->tq_nspawn
);
1105 ASSERT(list_empty(&tq
->tq_thread_list
));
1106 ASSERT(list_empty(&tq
->tq_active_list
));
1107 ASSERT(list_empty(&tq
->tq_free_list
));
1108 ASSERT(list_empty(&tq
->tq_pend_list
));
1109 ASSERT(list_empty(&tq
->tq_prio_list
));
1110 ASSERT(list_empty(&tq
->tq_delay_list
));
1112 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
1114 strfree(tq
->tq_name
);
1115 kmem_free(tq
, sizeof (taskq_t
));
1117 EXPORT_SYMBOL(taskq_destroy
);
1120 spl_taskq_init(void)
1122 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1123 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1124 if (system_taskq
== NULL
)
1127 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1128 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1129 if (dynamic_taskq
== NULL
) {
1130 taskq_destroy(system_taskq
);
1138 spl_taskq_fini(void)
1140 taskq_destroy(dynamic_taskq
);
1141 dynamic_taskq
= NULL
;
1143 taskq_destroy(system_taskq
);
1144 system_taskq
= NULL
;