]>
git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-taskq.c
e4092b8422f4a5b7db307427d834282fd9e50bac
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
29 #include <spl-debug.h>
31 #ifdef SS_DEBUG_SUBSYS
32 #undef SS_DEBUG_SUBSYS
35 #define SS_DEBUG_SUBSYS SS_TASKQ
37 /* Global system-wide dynamic task queue available for all consumers */
38 taskq_t
*system_taskq
;
39 EXPORT_SYMBOL(system_taskq
);
42 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
43 * is not attached to the free, work, or pending taskq lists.
46 task_alloc(taskq_t
*tq
, uint_t flags
)
53 ASSERT(flags
& (TQ_SLEEP
| TQ_NOSLEEP
)); /* One set */
54 ASSERT(!((flags
& TQ_SLEEP
) && (flags
& TQ_NOSLEEP
))); /* Not both */
55 ASSERT(spin_is_locked(&tq
->tq_lock
));
57 /* Acquire taskq_ent_t's from free list if available */
58 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
59 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
61 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
63 list_del_init(&t
->tqent_list
);
67 /* Free list is empty and memory allocations are prohibited */
68 if (flags
& TQ_NOALLOC
)
71 /* Hit maximum taskq_ent_t pool size */
72 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
73 if (flags
& TQ_NOSLEEP
)
77 * Sleep periodically polling the free list for an available
78 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
79 * but we cannot block forever waiting for an taskq_entq_t to
80 * show up in the free list, otherwise a deadlock can happen.
82 * Therefore, we need to allocate a new task even if the number
83 * of allocated tasks is above tq->tq_maxalloc, but we still
84 * end up delaying the task allocation by one second, thereby
85 * throttling the task dispatch rate.
87 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
88 schedule_timeout(HZ
/ 100);
89 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
91 SGOTO(retry
, count
++);
94 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
95 t
= kmem_alloc(sizeof(taskq_ent_t
), flags
& (TQ_SLEEP
| TQ_NOSLEEP
));
96 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
107 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
108 * to already be removed from the free, work, or pending taskq lists.
111 task_free(taskq_t
*tq
, taskq_ent_t
*t
)
117 ASSERT(spin_is_locked(&tq
->tq_lock
));
118 ASSERT(list_empty(&t
->tqent_list
));
120 kmem_free(t
, sizeof(taskq_ent_t
));
127 * NOTE: Must be called with tq->tq_lock held, either destroys the
128 * taskq_ent_t if too many exist or moves it to the free list for later use.
131 task_done(taskq_t
*tq
, taskq_ent_t
*t
)
136 ASSERT(spin_is_locked(&tq
->tq_lock
));
138 list_del_init(&t
->tqent_list
);
140 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
142 t
->tqent_func
= NULL
;
146 list_add_tail(&t
->tqent_list
, &tq
->tq_free_list
);
155 * As tasks are submitted to the task queue they are assigned a
156 * monotonically increasing taskqid and added to the tail of the pending
157 * list. As worker threads become available the tasks are removed from
158 * the head of the pending or priority list, giving preference to the
159 * priority list. The tasks are then removed from their respective
160 * list, and the taskq_thread servicing the task is added to the active
161 * list, preserving the order using the serviced task's taskqid.
162 * Finally, as tasks complete the taskq_thread servicing the task is
163 * removed from the active list. This means that the pending task and
164 * active taskq_thread lists are always kept sorted by taskqid. Thus the
165 * lowest outstanding incomplete taskqid can be determined simply by
166 * checking the min taskqid for each head item on the pending, priority,
167 * and active taskq_thread list. This value is stored in
168 * tq->tq_lowest_id and only updated to the new lowest id when the
169 * previous lowest id completes. All taskqids lower than
170 * tq->tq_lowest_id must have completed. It is also possible larger
171 * taskqid's have completed because they may be processed in parallel by
172 * several worker threads. However, this is not a problem because the
173 * behavior of taskq_wait_id() is to block until all previously
174 * submitted taskqid's have completed.
176 * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are
177 * 64-bit values so even if a taskq is processing 2^24 (16,777,216)
178 * taskqid_ts per second it will still take 2^40 seconds, 34,865 years,
179 * before the wrap occurs. I can live with that for now.
182 taskq_wait_check(taskq_t
*tq
, taskqid_t id
)
186 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
187 rc
= (id
< tq
->tq_lowest_id
);
188 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
194 __taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
199 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
, id
));
203 EXPORT_SYMBOL(__taskq_wait_id
);
206 __taskq_wait(taskq_t
*tq
)
212 /* Wait for the largest outstanding taskqid */
213 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
214 id
= tq
->tq_next_id
- 1;
215 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
217 __taskq_wait_id(tq
, id
);
222 EXPORT_SYMBOL(__taskq_wait
);
225 __taskq_member(taskq_t
*tq
, void *t
)
234 list_for_each(l
, &tq
->tq_thread_list
) {
235 tqt
= list_entry(l
, taskq_thread_t
, tqt_thread_list
);
236 if (tqt
->tqt_thread
== (struct task_struct
*)t
)
242 EXPORT_SYMBOL(__taskq_member
);
245 __taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
254 /* Solaris assumes TQ_SLEEP if not passed explicitly */
255 if (!(flags
& (TQ_SLEEP
| TQ_NOSLEEP
)))
258 if (unlikely(in_atomic() && (flags
& TQ_SLEEP
)))
259 PANIC("May schedule while atomic: %s/0x%08x/%d\n",
260 current
->comm
, preempt_count(), current
->pid
);
262 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
264 /* Taskq being destroyed and all tasks drained */
265 if (!(tq
->tq_flags
& TQ_ACTIVE
))
268 /* Do not queue the task unless there is idle thread for it */
269 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
270 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
273 if ((t
= task_alloc(tq
, flags
)) == NULL
)
276 spin_lock(&t
->tqent_lock
);
278 /* Queue to the priority list instead of the pending list */
279 if (flags
& TQ_FRONT
)
280 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
282 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
284 t
->tqent_id
= rc
= tq
->tq_next_id
;
286 t
->tqent_func
= func
;
289 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
291 spin_unlock(&t
->tqent_lock
);
293 wake_up(&tq
->tq_work_waitq
);
295 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
298 EXPORT_SYMBOL(__taskq_dispatch
);
301 __taskq_dispatch_ent(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
,
308 ASSERT(!(tq
->tq_flags
& TASKQ_DYNAMIC
));
310 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
312 /* Taskq being destroyed and all tasks drained */
313 if (!(tq
->tq_flags
& TQ_ACTIVE
)) {
318 spin_lock(&t
->tqent_lock
);
321 * Mark it as a prealloc'd task. This is important
322 * to ensure that we don't free it later.
324 t
->tqent_flags
|= TQENT_FLAG_PREALLOC
;
326 /* Queue to the priority list instead of the pending list */
327 if (flags
& TQ_FRONT
)
328 list_add_tail(&t
->tqent_list
, &tq
->tq_prio_list
);
330 list_add_tail(&t
->tqent_list
, &tq
->tq_pend_list
);
332 t
->tqent_id
= tq
->tq_next_id
;
334 t
->tqent_func
= func
;
337 spin_unlock(&t
->tqent_lock
);
339 wake_up(&tq
->tq_work_waitq
);
341 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
344 EXPORT_SYMBOL(__taskq_dispatch_ent
);
347 __taskq_empty_ent(taskq_ent_t
*t
)
349 return list_empty(&t
->tqent_list
);
351 EXPORT_SYMBOL(__taskq_empty_ent
);
354 __taskq_init_ent(taskq_ent_t
*t
)
356 spin_lock_init(&t
->tqent_lock
);
357 INIT_LIST_HEAD(&t
->tqent_list
);
359 t
->tqent_func
= NULL
;
363 EXPORT_SYMBOL(__taskq_init_ent
);
366 * Returns the lowest incomplete taskqid_t. The taskqid_t may
367 * be queued on the pending list, on the priority list, or on
368 * the work list currently being handled, but it is not 100%
372 taskq_lowest_id(taskq_t
*tq
)
374 taskqid_t lowest_id
= tq
->tq_next_id
;
380 ASSERT(spin_is_locked(&tq
->tq_lock
));
382 if (!list_empty(&tq
->tq_pend_list
)) {
383 t
= list_entry(tq
->tq_pend_list
.next
, taskq_ent_t
, tqent_list
);
384 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
387 if (!list_empty(&tq
->tq_prio_list
)) {
388 t
= list_entry(tq
->tq_prio_list
.next
, taskq_ent_t
, tqent_list
);
389 lowest_id
= MIN(lowest_id
, t
->tqent_id
);
392 if (!list_empty(&tq
->tq_active_list
)) {
393 tqt
= list_entry(tq
->tq_active_list
.next
, taskq_thread_t
,
395 ASSERT(tqt
->tqt_id
!= 0);
396 lowest_id
= MIN(lowest_id
, tqt
->tqt_id
);
403 * Insert a task into a list keeping the list sorted by increasing
407 taskq_insert_in_order(taskq_t
*tq
, taskq_thread_t
*tqt
)
415 ASSERT(spin_is_locked(&tq
->tq_lock
));
417 list_for_each_prev(l
, &tq
->tq_active_list
) {
418 w
= list_entry(l
, taskq_thread_t
, tqt_active_list
);
419 if (w
->tqt_id
< tqt
->tqt_id
) {
420 list_add(&tqt
->tqt_active_list
, l
);
424 if (l
== &tq
->tq_active_list
)
425 list_add(&tqt
->tqt_active_list
, &tq
->tq_active_list
);
431 taskq_thread(void *args
)
433 DECLARE_WAITQUEUE(wait
, current
);
435 taskq_thread_t
*tqt
= args
;
438 struct list_head
*pend_list
;
443 current
->flags
|= PF_NOFREEZE
;
445 sigfillset(&blocked
);
446 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
447 flush_signals(current
);
449 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
451 wake_up(&tq
->tq_wait_waitq
);
452 set_current_state(TASK_INTERRUPTIBLE
);
454 while (!kthread_should_stop()) {
456 if (list_empty(&tq
->tq_pend_list
) &&
457 list_empty(&tq
->tq_prio_list
)) {
458 add_wait_queue_exclusive(&tq
->tq_work_waitq
, &wait
);
459 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
461 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
462 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
464 __set_current_state(TASK_RUNNING
);
468 if (!list_empty(&tq
->tq_prio_list
))
469 pend_list
= &tq
->tq_prio_list
;
470 else if (!list_empty(&tq
->tq_pend_list
))
471 pend_list
= &tq
->tq_pend_list
;
476 t
= list_entry(pend_list
->next
, taskq_ent_t
, tqent_list
);
477 list_del_init(&t
->tqent_list
);
479 /* In order to support recursively dispatching a
480 * preallocated taskq_ent_t, tqent_id must be
481 * stored prior to executing tqent_func. */
482 tqt
->tqt_id
= t
->tqent_id
;
484 /* We must store a copy of the flags prior to
485 * servicing the task (servicing a prealloc'd task
486 * returns the ownership of the tqent back to
487 * the caller of taskq_dispatch). Thus,
488 * tqent_flags _may_ change within the call. */
489 tqt
->tqt_flags
= t
->tqent_flags
;
491 taskq_insert_in_order(tq
, tqt
);
493 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
495 /* Perform the requested task */
496 t
->tqent_func(t
->tqent_arg
);
498 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
500 list_del_init(&tqt
->tqt_active_list
);
502 /* For prealloc'd tasks, we don't free anything. */
503 if ((tq
->tq_flags
& TASKQ_DYNAMIC
) ||
504 !(tqt
->tqt_flags
& TQENT_FLAG_PREALLOC
))
507 /* When the current lowest outstanding taskqid is
508 * done calculate the new lowest outstanding id */
509 if (tq
->tq_lowest_id
== tqt
->tqt_id
) {
510 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
511 ASSERT3S(tq
->tq_lowest_id
, >, tqt
->tqt_id
);
516 wake_up_all(&tq
->tq_wait_waitq
);
519 set_current_state(TASK_INTERRUPTIBLE
);
523 __set_current_state(TASK_RUNNING
);
525 list_del_init(&tqt
->tqt_thread_list
);
526 kmem_free(tqt
, sizeof(taskq_thread_t
));
528 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
534 __taskq_create(const char *name
, int nthreads
, pri_t pri
,
535 int minalloc
, int maxalloc
, uint_t flags
)
539 int rc
= 0, i
, j
= 0;
542 ASSERT(name
!= NULL
);
543 ASSERT(pri
<= maxclsyspri
);
544 ASSERT(minalloc
>= 0);
545 ASSERT(maxalloc
<= INT_MAX
);
546 ASSERT(!(flags
& (TASKQ_CPR_SAFE
| TASKQ_DYNAMIC
))); /* Unsupported */
548 /* Scale the number of threads using nthreads as a percentage */
549 if (flags
& TASKQ_THREADS_CPU_PCT
) {
550 ASSERT(nthreads
<= 100);
551 ASSERT(nthreads
>= 0);
552 nthreads
= MIN(nthreads
, 100);
553 nthreads
= MAX(nthreads
, 0);
554 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
557 tq
= kmem_alloc(sizeof(*tq
), KM_SLEEP
);
561 spin_lock_init(&tq
->tq_lock
);
562 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
563 INIT_LIST_HEAD(&tq
->tq_thread_list
);
564 INIT_LIST_HEAD(&tq
->tq_active_list
);
569 tq
->tq_minalloc
= minalloc
;
570 tq
->tq_maxalloc
= maxalloc
;
572 tq
->tq_flags
= (flags
| TQ_ACTIVE
);
574 tq
->tq_lowest_id
= 1;
575 INIT_LIST_HEAD(&tq
->tq_free_list
);
576 INIT_LIST_HEAD(&tq
->tq_pend_list
);
577 INIT_LIST_HEAD(&tq
->tq_prio_list
);
578 init_waitqueue_head(&tq
->tq_work_waitq
);
579 init_waitqueue_head(&tq
->tq_wait_waitq
);
581 if (flags
& TASKQ_PREPOPULATE
)
582 for (i
= 0; i
< minalloc
; i
++)
583 task_done(tq
, task_alloc(tq
, TQ_SLEEP
| TQ_NEW
));
585 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
587 for (i
= 0; i
< nthreads
; i
++) {
588 tqt
= kmem_alloc(sizeof(*tqt
), KM_SLEEP
);
589 INIT_LIST_HEAD(&tqt
->tqt_thread_list
);
590 INIT_LIST_HEAD(&tqt
->tqt_active_list
);
594 tqt
->tqt_thread
= kthread_create(taskq_thread
, tqt
,
596 if (tqt
->tqt_thread
) {
597 list_add(&tqt
->tqt_thread_list
, &tq
->tq_thread_list
);
598 kthread_bind(tqt
->tqt_thread
, i
% num_online_cpus());
599 set_user_nice(tqt
->tqt_thread
, PRIO_TO_NICE(pri
));
600 wake_up_process(tqt
->tqt_thread
);
603 kmem_free(tqt
, sizeof(taskq_thread_t
));
608 /* Wait for all threads to be started before potential destroy */
609 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== j
);
618 EXPORT_SYMBOL(__taskq_create
);
621 __taskq_destroy(taskq_t
*tq
)
623 struct task_struct
*thread
;
629 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
630 tq
->tq_flags
&= ~TQ_ACTIVE
;
631 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
633 /* TQ_ACTIVE cleared prevents new tasks being added to pending */
636 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
639 * Signal each thread to exit and block until it does. Each thread
640 * is responsible for removing itself from the list and freeing its
641 * taskq_thread_t. This allows for idle threads to opt to remove
642 * themselves from the taskq. They can be recreated as needed.
644 while (!list_empty(&tq
->tq_thread_list
)) {
645 tqt
= list_entry(tq
->tq_thread_list
.next
,
646 taskq_thread_t
, tqt_thread_list
);
647 thread
= tqt
->tqt_thread
;
648 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
650 kthread_stop(thread
);
652 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
655 while (!list_empty(&tq
->tq_free_list
)) {
656 t
= list_entry(tq
->tq_free_list
.next
, taskq_ent_t
, tqent_list
);
658 ASSERT(!(t
->tqent_flags
& TQENT_FLAG_PREALLOC
));
660 list_del_init(&t
->tqent_list
);
664 ASSERT(tq
->tq_nthreads
== 0);
665 ASSERT(tq
->tq_nalloc
== 0);
666 ASSERT(list_empty(&tq
->tq_thread_list
));
667 ASSERT(list_empty(&tq
->tq_active_list
));
668 ASSERT(list_empty(&tq
->tq_free_list
));
669 ASSERT(list_empty(&tq
->tq_pend_list
));
670 ASSERT(list_empty(&tq
->tq_prio_list
));
672 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
674 kmem_free(tq
, sizeof(taskq_t
));
678 EXPORT_SYMBOL(__taskq_destroy
);
685 /* Solaris creates a dynamic taskq of up to 64 threads, however in
686 * a Linux environment 1 thread per-core is usually about right */
687 system_taskq
= taskq_create("spl_system_taskq", num_online_cpus(),
688 minclsyspri
, 4, 512, TASKQ_PREPOPULATE
);
689 if (system_taskq
== NULL
)
699 taskq_destroy(system_taskq
);