]>
git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-taskq.c
c9ae0a50b65edd186560c784fd345bf16ae930f2
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
29 #include <spl-debug.h>
31 #ifdef SS_DEBUG_SUBSYS
32 #undef SS_DEBUG_SUBSYS
35 #define SS_DEBUG_SUBSYS SS_TASKQ
37 /* Global system-wide dynamic task queue available for all consumers */
38 taskq_t
*system_taskq
;
39 EXPORT_SYMBOL(system_taskq
);
42 task_km_flags(uint_t flags
)
44 if (flags
& TQ_NOSLEEP
)
47 if (flags
& TQ_PUSHPAGE
)
54 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
55 * is not attached to the free, work, or pending taskq lists.
58 task_alloc(taskq_t
*tq
, uint_t flags
)
65 ASSERT(spin_is_locked(&tq
->tq_lock
));
67 /* Acquire taskq_ent_t's from free list if available */
68 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
69 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
71 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
72 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_CANCEL
));
73 ASSERT(!timer_pending(&t
->tqent_timer
));
75 list_del_init(&t
->tqent_list
);
79 /* Free list is empty and memory allocations are prohibited */
80 if (flags
& TQ_NOALLOC
)
83 /* Hit maximum taskq_ent_t pool size */
84 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
85 if (flags
& TQ_NOSLEEP
)
89 * Sleep periodically polling the free list for an available
90 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
91 * but we cannot block forever waiting for an taskq_ent_t to
92 * show up in the free list, otherwise a deadlock can happen.
94 * Therefore, we need to allocate a new task even if the number
95 * of allocated tasks is above tq->tq_maxalloc, but we still
96 * end up delaying the task allocation by one second, thereby
97 * throttling the task dispatch rate.
99 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
100 schedule_timeout(HZ
/ 100);
101 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
103 SGOTO(retry
, count
++);
106 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
107 t
= kmem_alloc(sizeof(taskq_ent_t
), task_km_flags(flags
));
108 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
119 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
120 * to already be removed from the free, work, or pending taskq lists.
123 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
129 ASSERT(spin_is_locked(&tq
->tq_lock
));
130 ASSERT(list_empty(&t
->tqent_list
));
131 ASSERT(!timer_pending(&t
->tqent_timer
));
133 kmem_free(t
, sizeof(taskq_ent_t
));
140 * NOTE: Must be called with tq->tq_lock held, either destroys the
141 * taskq_ent_t if too many exist or moves it to the free list for later use.
144 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
149 ASSERT(spin_is_locked(&tq
->tq_lock
));
151 /* Wake tasks blocked in taskq_wait_id() */
152 wake_up_all(&t
->tqent_waitq
);
154 list_del_init(&t
->tqent_list
);
156 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
158 t
->tqent_func
= NULL
;
162 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
171 * When a delayed task timer expires remove it from the delay list and
172 * add it to the priority list in order for immediate processing.
175 task_expire(unsigned long data
)
177 taskq_ent_t
*w
, *t
= (taskq_ent_t
*)data
;
178 taskq_t
*tq
= t
->tqent_taskq
;
181 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
183 if (t
->tqent_flags
& TQENT_FLAG_CANCEL
) {
184 ASSERT(list_empty(&t
->tqent_list
));
185 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
190 * The priority list must be maintained in strict task id order
191 * from lowest to highest for lowest_id to be easily calculable.
193 list_del(&t
->tqent_list
);
194 list_for_each_prev(l
, &tq
->tq_prio_list
) {
195 w
= list_entry(l
, taskq_ent_t
, tqent_list
);
196 if (w
->tqent_id
< t
->tqent_id
) {
197 list_add(&t
->tqent_list
, l
);
201 if (l
== &tq
->tq_prio_list
)
202 list_add(&t
->tqent_list
, &tq
->tq_prio_list
);
204 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
206 wake_up(&tq
->tq_work_waitq
);
210 * Returns the lowest incomplete taskqid_t. The taskqid_t may
211 * be queued on the pending list, on the priority list, on the
212 * delay list, or on the work list currently being handled, but
213 * it is not 100% complete yet.
216 taskq_lowest_id(taskq_t
*tq
)
218 taskqid_t lowest_id
= tq
->tq_next_id
;
224 ASSERT(spin_is_locked(&tq
->tq_lock
));
226 if (!list_empty(&tq
->tq_pend_list
)) {
227 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
228 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
231 if (!list_empty(&tq
->tq_prio_list
)) {
232 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
233 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
236 if (!list_empty(&tq
->tq_delay_list
)) {
237 t
= list_entry(tq
->tq_delay_list
.next
, taskq_ent_t
, tqent_list
);
238 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
241 if (!list_empty(&tq
->tq_active_list
)) {
242 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
244 ASSERT(tqt
->tqt_id
!= 0);
245 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
252 * Insert a task into a list keeping the list sorted by increasing taskqid.
255 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
263 ASSERT(spin_is_locked(&tq
->tq_lock
));
265 list_for_each_prev(l
, &tq
->tq_active_list
) {
266 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
267 if (w
->tqt_id
< tqt
->tqt_id
) {
268 list_add(&tqt
->tqt_active_list
, l
);
272 if (l
== &tq
->tq_active_list
)
273 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
279 * Find and return a task from the given list if it exists. The list
280 * must be in lowest to highest task id order.
283 taskq_find_list(taskq_t
*tq
, struct list_head
*lh
, taskqid_t id
)
289 ASSERT(spin_is_locked(&tq
->tq_lock
));
291 list_for_each(l
, lh
) {
292 t
= list_entry(l
, taskq_ent_t
, tqent_list
);
294 if (t
->tqent_id
== id
)
297 if (t
->tqent_id
> id
)
305 * Find an already dispatched task given the task id regardless of what
306 * state it is in. If a task is still pending or executing it will be
307 * returned and 'active' set appropriately. If the task has already
308 * been run then NULL is returned.
311 taskq_find(taskq_t
*tq
, taskqid_t id
, int *active
)
318 ASSERT(spin_is_locked(&tq
->tq_lock
));
321 t
= taskq_find_list(tq
, &tq
->tq_delay_list
, id
);
325 t
= taskq_find_list(tq
, &tq
->tq_prio_list
, id
);
329 t
= taskq_find_list(tq
, &tq
->tq_pend_list
, id
);
333 list_for_each(l
, &tq
->tq_active_list
) {
334 tqt
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
335 if (tqt
->tqt_id
== id
) {
346 * The taskq_wait_id() function blocks until the passed task id completes.
347 * This does not guarantee that all lower task id's have completed.
350 taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
360 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
361 t
= taskq_find(tq
, id
, &active
);
363 prepare_to_wait(&t
->tqent_waitq
, &wait
, TASK_UNINTERRUPTIBLE
);
364 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
367 * We rely on the kernels autoremove_wake_function() function to
368 * remove us from the wait queue in the context of wake_up().
369 * Once woken the taskq_ent_t pointer must never be accessed.
374 __set_current_state(TASK_RUNNING
);
379 EXPORT_SYMBOL(taskq_wait_id
);
382 * The taskq_wait() function will block until all previously submitted
383 * tasks have been completed. A previously submitted task is defined as
384 * a task with a lower task id than the current task queue id. Note that
385 * all task id's are assigned monotonically at dispatch time.
387 * Waiting for all previous tasks to complete is accomplished by tracking
388 * the lowest outstanding task id. As tasks are dispatched they are added
389 * added to the tail of the pending, priority, or delay lists. And as
390 * worker threads become available the tasks are removed from the heads
391 * of these lists and linked to the worker threads. This ensures the
392 * lists are kept in lowest to highest task id order.
394 * Therefore the lowest outstanding task id can be quickly determined by
395 * checking the head item from all of these lists. This value is stored
396 * with the task queue as the lowest id. It only needs to be recalculated
397 * when either the task with the current lowest id completes or is canceled.
399 * By blocking until the lowest task id exceeds the current task id when
400 * the function was called we ensure all previous tasks have completed.
402 * NOTE: When there are multiple worked threads it is possible for larger
403 * task ids to complete before smaller ones. Conversely when the task
404 * queue contains delay tasks with small task ids, you may block for a
405 * considerable length of time waiting for them to expire and execute.
408 taskq_wait_check(taskq_t
*tq
, taskqid_t id
)
412 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
413 rc
= (id
< tq
->tq_lowest_id
);
414 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
420 taskq_wait_all(taskq_t
*tq
, taskqid_t id
)
422 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
, id
));
424 EXPORT_SYMBOL(taskq_wait_all
);
427 taskq_wait(taskq_t
*tq
)
433 /* Wait for the largest outstanding taskqid */
434 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
435 id
= tq
->tq_next_id
- 1;
436 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
438 taskq_wait_all(tq
, id
);
443 EXPORT_SYMBOL(taskq_wait
);
446 taskq_member(taskq_t
*tq
, void *t
)
455 list_for_each(l
, &tq
->tq_thread_list
) {
456 tqt
= list_entry(l
, taskq_thread_t
, tqt_thread_list
);
457 if (tqt
->tqt_thread
== (struct task_struct
*)t
)
463 EXPORT_SYMBOL(taskq_member
);
466 * Cancel an already dispatched task given the task id. Still pending tasks
467 * will be immediately canceled, and if the task is active the function will
468 * block until it completes. Preallocated tasks which are canceled must be
469 * freed by the caller.
472 taskq_cancel_id(taskq_t
*tq
, taskqid_t id
)
481 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
482 t
= taskq_find(tq
, id
, &active
);
484 list_del_init(&t
->tqent_list
);
485 t
->tqent_flags
|= TQENT_FLAG_CANCEL
;
488 * When canceling the lowest outstanding task id we
489 * must recalculate the new lowest outstanding id.
491 if (tq
->tq_lowest_id
== t
->tqent_id
) {
492 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
493 ASSERT3S(tq
->tq_lowest_id
, >, t
->tqent_id
);
497 * The task_expire() function takes the tq->tq_lock so drop
498 * drop the lock before synchronously cancelling the timer.
500 if (timer_pending(&t
->tqent_timer
)) {
501 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
502 del_timer_sync(&t
->tqent_timer
);
503 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
506 if (!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
))
511 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
514 taskq_wait_id(tq
, id
);
520 EXPORT_SYMBOL(taskq_cancel_id
);
523 taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
532 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
534 /* Taskq being destroyed and all tasks drained */
535 if (!(tq
->tq_flags
& TQ_ACTIVE
))
538 /* Do not queue the task unless there is idle thread for it */
539 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
540 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
543 if ((t
= task_alloc(tq
, flags
)) == NULL
)
546 spin_lock(&t
->tqent_lock
);
548 /* Queue to the priority list instead of the pending list */
549 if (flags
& TQ_FRONT
)
550 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
552 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
554 t
->tqent_id
= rc
= tq
->tq_next_id
;
556 t
->tqent_func
= func
;
559 t
->tqent_timer
.data
= 0;
560 t
->tqent_timer
.function
= NULL
;
561 t
->tqent_timer
.expires
= 0;
563 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
565 spin_unlock(&t
->tqent_lock
);
567 wake_up(&tq
->tq_work_waitq
);
569 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
572 EXPORT_SYMBOL(taskq_dispatch
);
575 taskq_dispatch_delay(taskq_t
*tq
, task_func_t func
, void *arg
,
576 uint_t flags
, clock_t expire_time
)
585 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
587 /* Taskq being destroyed and all tasks drained */
588 if (!(tq
->tq_flags
& TQ_ACTIVE
))
591 if ((t
= task_alloc(tq
, flags
)) == NULL
)
594 spin_lock(&t
->tqent_lock
);
596 /* Queue to the delay list for subsequent execution */
597 list_add_tail(&t
->tqent_list
, &tq
->tq_delay_list
);
599 t
->tqent_id
= rc
= tq
->tq_next_id
;
601 t
->tqent_func
= func
;
604 t
->tqent_timer
.data
= (unsigned long)t
;
605 t
->tqent_timer
.function
= task_expire
;
606 t
->tqent_timer
.expires
= (unsigned long)expire_time
;
607 add_timer(&t
->tqent_timer
);
609 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
611 spin_unlock(&t
->tqent_lock
);
613 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
616 EXPORT_SYMBOL(taskq_dispatch_delay
);
619 taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
626 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
628 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
630 /* Taskq being destroyed and all tasks drained */
631 if (!(tq
->tq_flags
& TQ_ACTIVE
)) {
636 spin_lock(&t
->tqent_lock
);
639 * Mark it as a prealloc'd task. This is important
640 * to ensure that we don't free it later.
642 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
644 /* Queue to the priority list instead of the pending list */
645 if (flags
& TQ_FRONT
)
646 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
648 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
650 t
->tqent_id
= tq
->tq_next_id
;
652 t
->tqent_func
= func
;
656 spin_unlock(&t
->tqent_lock
);
658 wake_up(&tq
->tq_work_waitq
);
660 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
663 EXPORT_SYMBOL(taskq_dispatch_ent
);
666 taskq_empty_ent(taskq_ent_t
*t
)
668 return list_empty(&t
->tqent_list
);
670 EXPORT_SYMBOL(taskq_empty_ent
);
673 taskq_init_ent(taskq_ent_t
*t
)
675 spin_lock_init(&t
->tqent_lock
);
676 init_waitqueue_head(&t
->tqent_waitq
);
677 init_timer(&t
->tqent_timer
);
678 INIT_LIST_HEAD(&t
->tqent_list
);
680 t
->tqent_func
= NULL
;
683 t
->tqent_taskq
= NULL
;
685 EXPORT_SYMBOL(taskq_init_ent
);
688 taskq_thread(void *args
)
690 DECLARE_WAITQUEUE(wait
, current
);
692 taskq_thread_t
*tqt
= args
;
695 struct list_head
*pend_list
;
700 current
->flags
|= PF_NOFREEZE
;
702 sigfillset(&blocked
);
703 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
704 flush_signals(current
);
706 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
708 wake_up(&tq
->tq_wait_waitq
);
709 set_current_state(TASK_INTERRUPTIBLE
);
711 while (!kthread_should_stop()) {
713 if (list_empty(&tq
->tq_pend_list
) &&
714 list_empty(&tq
->tq_prio_list
)) {
715 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
716 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
718 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
719 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
721 __set_current_state(TASK_RUNNING
);
725 if (!list_empty(&tq
->tq_prio_list
))
726 pend_list
= &tq
->tq_prio_list
;
727 else if (!list_empty(&tq
->tq_pend_list
))
728 pend_list
= &tq
->tq_pend_list
;
733 t
= list_entry(pend_list
->next
,taskq_ent_t
,tqent_list
);
734 list_del_init(&t
->tqent_list
);
736 /* In order to support recursively dispatching a
737 * preallocated taskq_ent_t, tqent_id must be
738 * stored prior to executing tqent_func. */
739 tqt
->tqt_id
= t
->tqent_id
;
742 /* We must store a copy of the flags prior to
743 * servicing the task (servicing a prealloc'd task
744 * returns the ownership of the tqent back to
745 * the caller of taskq_dispatch). Thus,
746 * tqent_flags _may_ change within the call. */
747 tqt
->tqt_flags
= t
->tqent_flags
;
749 taskq_insert_in_order(tq
, tqt
);
751 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
753 /* Perform the requested task */
754 t
->tqent_func(t
->tqent_arg
);
756 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
758 list_del_init(&tqt
->tqt_active_list
);
759 tqt
->tqt_task
= NULL
;
761 /* For prealloc'd tasks, we don't free anything. */
762 if ((tq
->tq_flags
& TASKQ_DYNAMIC
) ||
763 !(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
766 /* When the current lowest outstanding taskqid is
767 * done calculate the new lowest outstanding id */
768 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
769 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
770 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
775 wake_up_all(&tq
->tq_wait_waitq
);
778 set_current_state(TASK_INTERRUPTIBLE
);
782 __set_current_state(TASK_RUNNING
);
784 list_del_init(&tqt
->tqt_thread_list
);
785 kmem_free(tqt
, sizeof(taskq_thread_t
));
787 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
793 taskq_create(const char *name
, int nthreads
, pri_t pri
,
794 int minalloc
, int maxalloc
, uint_t flags
)
798 int rc
= 0, i
, j
= 0;
801 ASSERT(name
!= NULL
);
802 ASSERT(pri
<= maxclsyspri
);
803 ASSERT(minalloc
>= 0);
804 ASSERT(maxalloc
<= INT_MAX
);
805 ASSERT(!(flags
& (TASKQ_CPR_SAFE
| TASKQ_DYNAMIC
))); /* Unsupported */
807 /* Scale the number of threads using nthreads as a percentage */
808 if (flags
& TASKQ_THREADS_CPU_PCT
) {
809 ASSERT(nthreads
<= 100);
810 ASSERT(nthreads
>= 0);
811 nthreads
= MIN(nthreads
, 100);
812 nthreads
= MAX(nthreads
, 0);
813 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
816 tq
= kmem_alloc(sizeof(*tq
), KM_PUSHPAGE
);
820 spin_lock_init(&tq
->tq_lock
);
821 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
822 INIT_LIST_HEAD(&tq
->tq_thread_list
);
823 INIT_LIST_HEAD(&tq
->tq_active_list
);
828 tq
->tq_minalloc
= minalloc
;
829 tq
->tq_maxalloc
= maxalloc
;
831 tq
->tq_flags
= (flags
| TQ_ACTIVE
);
833 tq
->tq_lowest_id
= 1;
834 INIT_LIST_HEAD(&tq
->tq_free_list
);
835 INIT_LIST_HEAD(&tq
->tq_pend_list
);
836 INIT_LIST_HEAD(&tq
->tq_prio_list
);
837 INIT_LIST_HEAD(&tq
->tq_delay_list
);
838 init_waitqueue_head(&tq
->tq_work_waitq
);
839 init_waitqueue_head(&tq
->tq_wait_waitq
);
841 if (flags
& TASKQ_PREPOPULATE
)
842 for (i
= 0; i
< minalloc
; i
++)
843 task_done(tq
, task_alloc(tq
, TQ_PUSHPAGE
| TQ_NEW
));
845 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
847 for (i
= 0; i
< nthreads
; i
++) {
848 tqt
= kmem_alloc(sizeof(*tqt
), KM_PUSHPAGE
);
849 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
850 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
854 tqt
->tqt_thread
= kthread_create(taskq_thread
, tqt
,
856 if (tqt
->tqt_thread
) {
857 list_add(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
858 kthread_bind(tqt
->tqt_thread
, i
% num_online_cpus());
859 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(pri
));
860 wake_up_process(tqt
->tqt_thread
);
863 kmem_free(tqt
, sizeof(taskq_thread_t
));
868 /* Wait for all threads to be started before potential destroy */
869 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== j
);
878 EXPORT_SYMBOL(taskq_create
);
881 taskq_destroy(taskq_t
*tq
)
883 struct task_struct
*thread
;
889 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
890 tq
->tq_flags
&= ~TQ_ACTIVE
;
891 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
893 /* TQ_ACTIVE cleared prevents new tasks being added to pending */
896 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
899 * Signal each thread to exit and block until it does. Each thread
900 * is responsible for removing itself from the list and freeing its
901 * taskq_thread_t. This allows for idle threads to opt to remove
902 * themselves from the taskq. They can be recreated as needed.
904 while (!list_empty(&tq
->tq_thread_list
)) {
905 tqt
= list_entry(tq
->tq_thread_list
.next
,
906 taskq_thread_t
, tqt_thread_list
);
907 thread
= tqt
->tqt_thread
;
908 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
910 kthread_stop(thread
);
912 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
915 while (!list_empty(&tq
->tq_free_list
)) {
916 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
918 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
920 list_del_init(&t
->tqent_list
);
924 ASSERT(tq
->tq_nthreads
== 0);
925 ASSERT(tq
->tq_nalloc
== 0);
926 ASSERT(list_empty(&tq
->tq_thread_list
));
927 ASSERT(list_empty(&tq
->tq_active_list
));
928 ASSERT(list_empty(&tq
->tq_free_list
));
929 ASSERT(list_empty(&tq
->tq_pend_list
));
930 ASSERT(list_empty(&tq
->tq_prio_list
));
931 ASSERT(list_empty(&tq
->tq_delay_list
));
933 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
935 kmem_free(tq
, sizeof(taskq_t
));
939 EXPORT_SYMBOL(taskq_destroy
);
946 /* Solaris creates a dynamic taskq of up to 64 threads, however in
947 * a Linux environment 1 thread per-core is usually about right */
948 system_taskq
= taskq_create("spl_system_taskq", num_online_cpus(),
949 minclsyspri
, 4, 512, TASKQ_PREPOPULATE
);
950 if (system_taskq
== NULL
)
960 taskq_destroy(system_taskq
);