2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
27 #include <sys/taskq.h>
31 int spl_taskq_thread_bind
= 0;
32 module_param(spl_taskq_thread_bind
, int, 0644);
33 MODULE_PARM_DESC(spl_taskq_thread_bind
, "Bind taskq thread to CPU by default");
36 int spl_taskq_thread_dynamic
= 1;
37 module_param(spl_taskq_thread_dynamic
, int, 0644);
38 MODULE_PARM_DESC(spl_taskq_thread_dynamic
, "Allow dynamic taskq threads");
40 int spl_taskq_thread_priority
= 1;
41 module_param(spl_taskq_thread_priority
, int, 0644);
42 MODULE_PARM_DESC(spl_taskq_thread_priority
,
43 "Allow non-default priority for taskq threads");
45 int spl_taskq_thread_sequential
= 4;
46 module_param(spl_taskq_thread_sequential
, int, 0644);
47 MODULE_PARM_DESC(spl_taskq_thread_sequential
,
48 "Create new taskq threads after N sequential tasks");
50 /* Global system-wide dynamic task queue available for all consumers */
51 taskq_t
*system_taskq
;
52 EXPORT_SYMBOL(system_taskq
);
54 /* Private dedicated taskq for creating new taskq threads on demand. */
55 static taskq_t
*dynamic_taskq
;
56 static taskq_thread_t
*taskq_thread_create(taskq_t
*);
58 /* List of all taskqs */
60 DECLARE_RWSEM(tq_list_sem
);
61 static uint_t taskq_tsd
;
64 task_km_flags(uint_t flags
)
66 if (flags
& TQ_NOSLEEP
)
69 if (flags
& TQ_PUSHPAGE
)
76 * taskq_find_by_name - Find the largest instance number of a named taskq.
79 taskq_find_by_name(const char *name
)
81 struct list_head
*tql
;
84 list_for_each_prev(tql
, &tq_list
) {
85 tq
= list_entry(tql
, taskq_t
, tq_taskqs
);
86 if (strcmp(name
, tq
->tq_name
) == 0)
87 return tq
->tq_instance
;
93 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
94 * is not attached to the free, work, or pending taskq lists.
97 task_alloc(taskq_t
*tq
, uint_t flags
, unsigned long *irqflags
)
103 ASSERT(spin_is_locked(&tq
->tq_lock
));
105 /* Acquire taskq_ent_t's from free list if available */
106 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
107 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
109 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
110 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
111 ASSERT(!timer_pending(&t
->tqent_timer
));
113 list_del_init(&t
->tqent_list
);
117 /* Free list is empty and memory allocations are prohibited */
118 if (flags
& TQ_NOALLOC
)
121 /* Hit maximum taskq_ent_t pool size */
122 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
123 if (flags
& TQ_NOSLEEP
)
127 * Sleep periodically polling the free list for an available
128 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
129 * but we cannot block forever waiting for an taskq_ent_t to
130 * show up in the free list, otherwise a deadlock can happen.
132 * Therefore, we need to allocate a new task even if the number
133 * of allocated tasks is above tq->tq_maxalloc, but we still
134 * end up delaying the task allocation by one second, thereby
135 * throttling the task dispatch rate.
137 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
138 schedule_timeout(HZ
/ 100);
139 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
,
147 spin_unlock_irqrestore(&tq
->tq_lock
, *irqflags
);
148 t
= kmem_alloc(sizeof (taskq_ent_t
), task_km_flags(flags
));
149 spin_lock_irqsave_nested(&tq
->tq_lock
, *irqflags
, tq
->tq_lock_class
);
160 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
161 * to already be removed from the free, work, or pending taskq lists.
164 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
168 ASSERT(spin_is_locked(&tq
->tq_lock
));
169 ASSERT(list_empty(&t
->tqent_list
));
170 ASSERT(!timer_pending(&t
->tqent_timer
));
172 kmem_free(t
, sizeof (taskq_ent_t
));
177 * NOTE: Must be called with tq->tq_lock held, either destroys the
178 * taskq_ent_t if too many exist or moves it to the free list for later use.
181 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
185 ASSERT(spin_is_locked(&tq
->tq_lock
));
187 /* Wake tasks blocked in taskq_wait_id() */
188 wake_up_all(&t
->tqent_waitq
);
190 list_del_init(&t
->tqent_list
);
192 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
194 t
->tqent_func
= NULL
;
198 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
205 * When a delayed task timer expires remove it from the delay list and
206 * add it to the priority list in order for immediate processing.
209 task_expire(unsigned long data
)
211 taskq_ent_t
*w
, *t
= (taskq_ent_t
*)data
;
212 taskq_t
*tq
= t
->tqent_taskq
;
216 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
218 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
219 ASSERT(list_empty(&t
->tqent_list
));
220 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
225 * The priority list must be maintained in strict task id order
226 * from lowest to highest for lowest_id to be easily calculable.
228 list_del(&t
->tqent_list
);
229 list_for_each_prev(l
, &tq
->tq_prio_list
) {
230 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
231 if (w
->tqent_id
< t
->tqent_id
) {
232 list_add(&t
->tqent_list
, l
);
236 if (l
== &tq
->tq_prio_list
)
237 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
239 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
241 wake_up(&tq
->tq_work_waitq
);
245 * Returns the lowest incomplete taskqid_t. The taskqid_t may
246 * be queued on the pending list, on the priority list, on the
247 * delay list, or on the work list currently being handled, but
248 * it is not 100% complete yet.
251 taskq_lowest_id(taskq_t
*tq
)
253 taskqid_t lowest_id
= tq
->tq_next_id
;
258 ASSERT(spin_is_locked(&tq
->tq_lock
));
260 if (!list_empty(&tq
->tq_pend_list
)) {
261 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
262 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
265 if (!list_empty(&tq
->tq_prio_list
)) {
266 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
267 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
270 if (!list_empty(&tq
->tq_delay_list
)) {
271 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
272 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
275 if (!list_empty(&tq
->tq_active_list
)) {
276 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
278 ASSERT(tqt
->tqt_id
!= 0);
279 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
286 * Insert a task into a list keeping the list sorted by increasing taskqid.
289 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
296 ASSERT(spin_is_locked(&tq
->tq_lock
));
298 list_for_each_prev(l
, &tq
->tq_active_list
) {
299 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
300 if (w
->tqt_id
< tqt
->tqt_id
) {
301 list_add(&tqt
->tqt_active_list
, l
);
305 if (l
== &tq
->tq_active_list
)
306 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
310 * Find and return a task from the given list if it exists. The list
311 * must be in lowest to highest task id order.
314 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
319 ASSERT(spin_is_locked(&tq
->tq_lock
));
321 list_for_each(l
, lh
) {
322 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
324 if (t
->tqent_id
== id
)
327 if (t
->tqent_id
> id
)
335 * Find an already dispatched task given the task id regardless of what
336 * state it is in. If a task is still pending or executing it will be
337 * returned and 'active' set appropriately. If the task has already
338 * been run then NULL is returned.
341 taskq_find(taskq_t
*tq
, taskqid_t id
, int *active
)
347 ASSERT(spin_is_locked(&tq
->tq_lock
));
350 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
354 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
358 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
362 list_for_each(l
, &tq
->tq_active_list
) {
363 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
364 if (tqt
->tqt_id
== id
) {
375 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
376 * taskq_wait() functions below.
378 * Taskq waiting is accomplished by tracking the lowest outstanding task
379 * id and the next available task id. As tasks are dispatched they are
380 * added to the tail of the pending, priority, or delay lists. As worker
381 * threads become available the tasks are removed from the heads of these
382 * lists and linked to the worker threads. This ensures the lists are
383 * kept sorted by lowest to highest task id.
385 * Therefore the lowest outstanding task id can be quickly determined by
386 * checking the head item from all of these lists. This value is stored
387 * with the taskq as the lowest id. It only needs to be recalculated when
388 * either the task with the current lowest id completes or is canceled.
390 * By blocking until the lowest task id exceeds the passed task id the
391 * taskq_wait_outstanding() function can be easily implemented. Similarly,
392 * by blocking until the lowest task id matches the next task id taskq_wait()
393 * can be implemented.
395 * Callers should be aware that when there are multiple worked threads it
396 * is possible for larger task ids to complete before smaller ones. Also
397 * when the taskq contains delay tasks with small task ids callers may
398 * block for a considerable length of time waiting for them to expire and
402 taskq_wait_id_check(taskq_t
*tq
, taskqid_t id
)
408 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
409 rc
= (taskq_find(tq
, id
, &active
) == NULL
);
410 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
416 * The taskq_wait_id() function blocks until the passed task id completes.
417 * This does not guarantee that all lower task ids have completed.
420 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
422 wait_event(tq
->tq_wait_waitq
, taskq_wait_id_check(tq
, id
));
424 EXPORT_SYMBOL(taskq_wait_id
);
427 taskq_wait_outstanding_check(taskq_t
*tq
, taskqid_t id
)
432 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
433 rc
= (id
< tq
->tq_lowest_id
);
434 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
440 * The taskq_wait_outstanding() function will block until all tasks with a
441 * lower taskqid than the passed 'id' have been completed. Note that all
442 * task id's are assigned monotonically at dispatch time. Zero may be
443 * passed for the id to indicate all tasks dispatch up to this point,
444 * but not after, should be waited for.
447 taskq_wait_outstanding(taskq_t
*tq
, taskqid_t id
)
449 wait_event(tq
->tq_wait_waitq
,
450 taskq_wait_outstanding_check(tq
, id
? id
: tq
->tq_next_id
- 1));
452 EXPORT_SYMBOL(taskq_wait_outstanding
);
455 taskq_wait_check(taskq_t
*tq
)
460 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
461 rc
= (tq
->tq_lowest_id
== tq
->tq_next_id
);
462 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
468 * The taskq_wait() function will block until the taskq is empty.
469 * This means that if a taskq re-dispatches work to itself taskq_wait()
470 * callers will block indefinitely.
473 taskq_wait(taskq_t
*tq
)
475 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
));
477 EXPORT_SYMBOL(taskq_wait
);
480 taskq_member(taskq_t
*tq
, kthread_t
*t
)
482 return (tq
== (taskq_t
*)tsd_get_by_thread(taskq_tsd
, t
));
484 EXPORT_SYMBOL(taskq_member
);
487 * Cancel an already dispatched task given the task id. Still pending tasks
488 * will be immediately canceled, and if the task is active the function will
489 * block until it completes. Preallocated tasks which are canceled must be
490 * freed by the caller.
493 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
502 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
503 t
= taskq_find(tq
, id
, &active
);
505 list_del_init(&t
->tqent_list
);
506 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
509 * When canceling the lowest outstanding task id we
510 * must recalculate the new lowest outstanding id.
512 if (tq
->tq_lowest_id
== t
->tqent_id
) {
513 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
514 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
518 * The task_expire() function takes the tq->tq_lock so drop
519 * drop the lock before synchronously cancelling the timer.
521 if (timer_pending(&t
->tqent_timer
)) {
522 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
523 del_timer_sync(&t
->tqent_timer
);
524 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
528 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
533 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
536 taskq_wait_id(tq
, id
);
542 EXPORT_SYMBOL(taskq_cancel_id
);
544 static int taskq_thread_spawn(taskq_t
*tq
);
547 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
551 unsigned long irqflags
;
556 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
558 /* Taskq being destroyed and all tasks drained */
559 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
562 /* Do not queue the task unless there is idle thread for it */
563 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
564 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
567 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
570 spin_lock(&t
->tqent_lock
);
572 /* Queue to the priority list instead of the pending list */
573 if (flags
& TQ_FRONT
)
574 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
576 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
578 t
->tqent_id
= rc
= tq
->tq_next_id
;
580 t
->tqent_func
= func
;
583 t
->tqent_timer
.data
= 0;
584 t
->tqent_timer
.function
= NULL
;
585 t
->tqent_timer
.expires
= 0;
587 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
589 spin_unlock(&t
->tqent_lock
);
591 wake_up(&tq
->tq_work_waitq
);
593 /* Spawn additional taskq threads if required. */
594 if (tq
->tq_nactive
== tq
->tq_nthreads
)
595 (void) taskq_thread_spawn(tq
);
597 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
600 EXPORT_SYMBOL(taskq_dispatch
);
603 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
604 uint_t flags
, clock_t expire_time
)
608 unsigned long irqflags
;
613 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
, tq
->tq_lock_class
);
615 /* Taskq being destroyed and all tasks drained */
616 if (!(tq
->tq_flags
& TASKQ_ACTIVE
))
619 if ((t
= task_alloc(tq
, flags
, &irqflags
)) == NULL
)
622 spin_lock(&t
->tqent_lock
);
624 /* Queue to the delay list for subsequent execution */
625 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
627 t
->tqent_id
= rc
= tq
->tq_next_id
;
629 t
->tqent_func
= func
;
632 t
->tqent_timer
.data
= (unsigned long)t
;
633 t
->tqent_timer
.function
= task_expire
;
634 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
635 add_timer(&t
->tqent_timer
);
637 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
639 spin_unlock(&t
->tqent_lock
);
641 /* Spawn additional taskq threads if required. */
642 if (tq
->tq_nactive
== tq
->tq_nthreads
)
643 (void) taskq_thread_spawn(tq
);
644 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
647 EXPORT_SYMBOL(taskq_dispatch_delay
);
650 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
653 unsigned long irqflags
;
657 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
660 /* Taskq being destroyed and all tasks drained */
661 if (!(tq
->tq_flags
& TASKQ_ACTIVE
)) {
666 spin_lock(&t
->tqent_lock
);
669 * Mark it as a prealloc'd task. This is important
670 * to ensure that we don't free it later.
672 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
674 /* Queue to the priority list instead of the pending list */
675 if (flags
& TQ_FRONT
)
676 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
678 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
680 t
->tqent_id
= tq
->tq_next_id
;
682 t
->tqent_func
= func
;
686 spin_unlock(&t
->tqent_lock
);
688 wake_up(&tq
->tq_work_waitq
);
690 /* Spawn additional taskq threads if required. */
691 if (tq
->tq_nactive
== tq
->tq_nthreads
)
692 (void) taskq_thread_spawn(tq
);
693 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
695 EXPORT_SYMBOL(taskq_dispatch_ent
);
698 taskq_empty_ent(taskq_ent_t
*t
)
700 return (list_empty(&t
->tqent_list
));
702 EXPORT_SYMBOL(taskq_empty_ent
);
705 taskq_init_ent(taskq_ent_t
*t
)
707 spin_lock_init(&t
->tqent_lock
);
708 init_waitqueue_head(&t
->tqent_waitq
);
709 init_timer(&t
->tqent_timer
);
710 INIT_LIST_HEAD(&t
->tqent_list
);
712 t
->tqent_func
= NULL
;
715 t
->tqent_taskq
= NULL
;
717 EXPORT_SYMBOL(taskq_init_ent
);
720 * Return the next pending task, preference is given to tasks on the
721 * priority list which were dispatched with TQ_FRONT.
724 taskq_next_ent(taskq_t
*tq
)
726 struct list_head
*list
;
728 ASSERT(spin_is_locked(&tq
->tq_lock
));
730 if (!list_empty(&tq
->tq_prio_list
))
731 list
= &tq
->tq_prio_list
;
732 else if (!list_empty(&tq
->tq_pend_list
))
733 list
= &tq
->tq_pend_list
;
737 return (list_entry(list
->next
, taskq_ent_t
, tqent_list
));
741 * Spawns a new thread for the specified taskq.
744 taskq_thread_spawn_task(void *arg
)
746 taskq_t
*tq
= (taskq_t
*)arg
;
749 (void) taskq_thread_create(tq
);
751 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
753 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
757 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
758 * number of threads is insufficient to handle the pending tasks. These
759 * new threads must be created by the dedicated dynamic_taskq to avoid
760 * deadlocks between thread creation and memory reclaim. The system_taskq
761 * which is also a dynamic taskq cannot be safely used for this.
764 taskq_thread_spawn(taskq_t
*tq
)
768 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
771 if ((tq
->tq_nthreads
+ tq
->tq_nspawn
< tq
->tq_maxthreads
) &&
772 (tq
->tq_flags
& TASKQ_ACTIVE
)) {
773 spawning
= (++tq
->tq_nspawn
);
774 taskq_dispatch(dynamic_taskq
, taskq_thread_spawn_task
,
782 * Threads in a dynamic taskq should only exit once it has been completely
783 * drained and no other threads are actively servicing tasks. This prevents
784 * threads from being created and destroyed more than is required.
786 * The first thread is the thread list is treated as the primary thread.
787 * There is nothing special about the primary thread but in order to avoid
788 * all the taskq pids from changing we opt to make it long running.
791 taskq_thread_should_stop(taskq_t
*tq
, taskq_thread_t
*tqt
)
793 ASSERT(spin_is_locked(&tq
->tq_lock
));
795 if (!(tq
->tq_flags
& TASKQ_DYNAMIC
))
798 if (list_first_entry(&(tq
->tq_thread_list
), taskq_thread_t
,
799 tqt_thread_list
) == tqt
)
803 ((tq
->tq_nspawn
== 0) && /* No threads are being spawned */
804 (tq
->tq_nactive
== 0) && /* No threads are handling tasks */
805 (tq
->tq_nthreads
> 1) && /* More than 1 thread is running */
806 (!taskq_next_ent(tq
)) && /* There are no pending tasks */
807 (spl_taskq_thread_dynamic
)); /* Dynamic taskqs are allowed */
811 taskq_thread(void *args
)
813 DECLARE_WAITQUEUE(wait
, current
);
815 taskq_thread_t
*tqt
= args
;
824 current
->flags
|= PF_NOFREEZE
;
826 (void) spl_fstrans_mark();
828 sigfillset(&blocked
);
829 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
830 flush_signals(current
);
832 tsd_set(taskq_tsd
, tq
);
833 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
835 /* Immediately exit if more threads than allowed were created. */
836 if (tq
->tq_nthreads
>= tq
->tq_maxthreads
)
840 list_add_tail(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
841 wake_up(&tq
->tq_wait_waitq
);
842 set_current_state(TASK_INTERRUPTIBLE
);
844 while (!kthread_should_stop()) {
846 if (list_empty(&tq
->tq_pend_list
) &&
847 list_empty(&tq
->tq_prio_list
)) {
849 if (taskq_thread_should_stop(tq
, tqt
)) {
850 wake_up_all(&tq
->tq_wait_waitq
);
854 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
855 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
860 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
862 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
864 __set_current_state(TASK_RUNNING
);
867 if ((t
= taskq_next_ent(tq
)) != NULL
) {
868 list_del_init(&t
->tqent_list
);
871 * In order to support recursively dispatching a
872 * preallocated taskq_ent_t, tqent_id must be
873 * stored prior to executing tqent_func.
875 tqt
->tqt_id
= t
->tqent_id
;
879 * We must store a copy of the flags prior to
880 * servicing the task (servicing a prealloc'd task
881 * returns the ownership of the tqent back to
882 * the caller of taskq_dispatch). Thus,
883 * tqent_flags _may_ change within the call.
885 tqt
->tqt_flags
= t
->tqent_flags
;
887 taskq_insert_in_order(tq
, tqt
);
889 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
891 /* Perform the requested task */
892 t
->tqent_func(t
->tqent_arg
);
894 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
897 list_del_init(&tqt
->tqt_active_list
);
898 tqt
->tqt_task
= NULL
;
900 /* For prealloc'd tasks, we don't free anything. */
901 if (!(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
905 * When the current lowest outstanding taskqid is
906 * done calculate the new lowest outstanding id
908 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
909 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
910 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
913 /* Spawn additional taskq threads if required. */
914 if ((++seq_tasks
) > spl_taskq_thread_sequential
&&
915 taskq_thread_spawn(tq
))
920 wake_up_all(&tq
->tq_wait_waitq
);
922 if (taskq_thread_should_stop(tq
, tqt
))
926 set_current_state(TASK_INTERRUPTIBLE
);
930 __set_current_state(TASK_RUNNING
);
932 list_del_init(&tqt
->tqt_thread_list
);
934 kmem_free(tqt
, sizeof (taskq_thread_t
));
935 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
937 tsd_set(taskq_tsd
, NULL
);
942 static taskq_thread_t
*
943 taskq_thread_create(taskq_t
*tq
)
945 static int last_used_cpu
= 0;
948 tqt
= kmem_alloc(sizeof (*tqt
), KM_PUSHPAGE
);
949 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
950 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
954 tqt
->tqt_thread
= spl_kthread_create(taskq_thread
, tqt
,
956 if (tqt
->tqt_thread
== NULL
) {
957 kmem_free(tqt
, sizeof (taskq_thread_t
));
961 if (spl_taskq_thread_bind
) {
962 last_used_cpu
= (last_used_cpu
+ 1) % num_online_cpus();
963 kthread_bind(tqt
->tqt_thread
, last_used_cpu
);
966 if (spl_taskq_thread_priority
)
967 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(tq
->tq_pri
));
969 wake_up_process(tqt
->tqt_thread
);
975 taskq_create(const char *name
, int nthreads
, pri_t pri
,
976 int minalloc
, int maxalloc
, uint_t flags
)
980 int count
= 0, rc
= 0, i
;
981 unsigned long irqflags
;
983 ASSERT(name
!= NULL
);
984 ASSERT(minalloc
>= 0);
985 ASSERT(maxalloc
<= INT_MAX
);
986 ASSERT(!(flags
& (TASKQ_CPR_SAFE
))); /* Unsupported */
988 /* Scale the number of threads using nthreads as a percentage */
989 if (flags
& TASKQ_THREADS_CPU_PCT
) {
990 ASSERT(nthreads
<= 100);
991 ASSERT(nthreads
>= 0);
992 nthreads
= MIN(nthreads
, 100);
993 nthreads
= MAX(nthreads
, 0);
994 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
997 tq
= kmem_alloc(sizeof (*tq
), KM_PUSHPAGE
);
1001 spin_lock_init(&tq
->tq_lock
);
1002 INIT_LIST_HEAD(&tq
->tq_thread_list
);
1003 INIT_LIST_HEAD(&tq
->tq_active_list
);
1004 tq
->tq_name
= strdup(name
);
1006 tq
->tq_nthreads
= 0;
1008 tq
->tq_maxthreads
= nthreads
;
1010 tq
->tq_minalloc
= minalloc
;
1011 tq
->tq_maxalloc
= maxalloc
;
1013 tq
->tq_flags
= (flags
| TASKQ_ACTIVE
);
1015 tq
->tq_lowest_id
= 1;
1016 INIT_LIST_HEAD(&tq
->tq_free_list
);
1017 INIT_LIST_HEAD(&tq
->tq_pend_list
);
1018 INIT_LIST_HEAD(&tq
->tq_prio_list
);
1019 INIT_LIST_HEAD(&tq
->tq_delay_list
);
1020 init_waitqueue_head(&tq
->tq_work_waitq
);
1021 init_waitqueue_head(&tq
->tq_wait_waitq
);
1022 tq
->tq_lock_class
= TQ_LOCK_GENERAL
;
1023 INIT_LIST_HEAD(&tq
->tq_taskqs
);
1025 if (flags
& TASKQ_PREPOPULATE
) {
1026 spin_lock_irqsave_nested(&tq
->tq_lock
, irqflags
,
1029 for (i
= 0; i
< minalloc
; i
++)
1030 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
,
1033 spin_unlock_irqrestore(&tq
->tq_lock
, irqflags
);
1036 if ((flags
& TASKQ_DYNAMIC
) && spl_taskq_thread_dynamic
)
1039 for (i
= 0; i
< nthreads
; i
++) {
1040 tqt
= taskq_thread_create(tq
);
1047 /* Wait for all threads to be started before potential destroy */
1048 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== count
);
1054 down_write(&tq_list_sem
);
1055 tq
->tq_instance
= taskq_find_by_name(name
) + 1;
1056 list_add_tail(&tq
->tq_taskqs
, &tq_list
);
1057 up_write(&tq_list_sem
);
1062 EXPORT_SYMBOL(taskq_create
);
1065 taskq_destroy(taskq_t
*tq
)
1067 struct task_struct
*thread
;
1068 taskq_thread_t
*tqt
;
1070 unsigned long flags
;
1073 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1074 tq
->tq_flags
&= ~TASKQ_ACTIVE
;
1075 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1078 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1079 * new worker threads be spawned for dynamic taskq.
1081 if (dynamic_taskq
!= NULL
)
1082 taskq_wait_outstanding(dynamic_taskq
, 0);
1086 /* remove taskq from global list used by the kstats */
1087 down_write(&tq_list_sem
);
1088 list_del(&tq
->tq_taskqs
);
1089 up_write(&tq_list_sem
);
1091 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
1094 * Signal each thread to exit and block until it does. Each thread
1095 * is responsible for removing itself from the list and freeing its
1096 * taskq_thread_t. This allows for idle threads to opt to remove
1097 * themselves from the taskq. They can be recreated as needed.
1099 while (!list_empty(&tq
->tq_thread_list
)) {
1100 tqt
= list_entry(tq
->tq_thread_list
.next
,
1101 taskq_thread_t
, tqt_thread_list
);
1102 thread
= tqt
->tqt_thread
;
1103 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1105 kthread_stop(thread
);
1107 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
,
1111 while (!list_empty(&tq
->tq_free_list
)) {
1112 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
1114 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
1116 list_del_init(&t
->tqent_list
);
1120 ASSERT0(tq
->tq_nthreads
);
1121 ASSERT0(tq
->tq_nalloc
);
1122 ASSERT0(tq
->tq_nspawn
);
1123 ASSERT(list_empty(&tq
->tq_thread_list
));
1124 ASSERT(list_empty(&tq
->tq_active_list
));
1125 ASSERT(list_empty(&tq
->tq_free_list
));
1126 ASSERT(list_empty(&tq
->tq_pend_list
));
1127 ASSERT(list_empty(&tq
->tq_prio_list
));
1128 ASSERT(list_empty(&tq
->tq_delay_list
));
1130 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
1132 strfree(tq
->tq_name
);
1133 kmem_free(tq
, sizeof (taskq_t
));
1135 EXPORT_SYMBOL(taskq_destroy
);
1138 spl_taskq_init(void)
1140 tsd_create(&taskq_tsd
, NULL
);
1142 system_taskq
= taskq_create("spl_system_taskq", MAX(boot_ncpus
, 64),
1143 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
|TASKQ_DYNAMIC
);
1144 if (system_taskq
== NULL
)
1147 dynamic_taskq
= taskq_create("spl_dynamic_taskq", 1,
1148 maxclsyspri
, boot_ncpus
, INT_MAX
, TASKQ_PREPOPULATE
);
1149 if (dynamic_taskq
== NULL
) {
1150 taskq_destroy(system_taskq
);
1155 * This is used to annotate tq_lock, so
1156 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1157 * does not trigger a lockdep warning re: possible recursive locking
1159 dynamic_taskq
->tq_lock_class
= TQ_LOCK_DYNAMIC
;
1165 spl_taskq_fini(void)
1167 taskq_destroy(dynamic_taskq
);
1168 dynamic_taskq
= NULL
;
1170 taskq_destroy(system_taskq
);
1171 system_taskq
= NULL
;
1173 tsd_destroy(&taskq_tsd
);