]>
git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-taskq.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
29 #include <spl-debug.h>
31 #ifdef SS_DEBUG_SUBSYS
32 #undef SS_DEBUG_SUBSYS
35 #define SS_DEBUG_SUBSYS SS_TASKQ
37 /* Global system-wide dynamic task queue available for all consumers */
38 taskq_t
*system_taskq
;
39 EXPORT_SYMBOL(system_taskq
);
41 typedef struct spl_task
{
43 struct list_head t_list
;
50 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
51 * is not attached to the free, work, or pending taskq lists.
54 task_alloc(taskq_t
*tq
, uint_t flags
)
61 ASSERT(flags
& (TQ_SLEEP
| TQ_NOSLEEP
)); /* One set */
62 ASSERT(!((flags
& TQ_SLEEP
) && (flags
& TQ_NOSLEEP
))); /* Not both */
63 ASSERT(spin_is_locked(&tq
->tq_lock
));
65 /* Acquire spl_task_t's from free list if available */
66 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
67 t
= list_entry(tq
->tq_free_list
.next
, spl_task_t
, t_list
);
68 list_del_init(&t
->t_list
);
72 /* Free list is empty and memory allocations are prohibited */
73 if (flags
& TQ_NOALLOC
)
76 /* Hit maximum spl_task_t pool size */
77 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
78 if (flags
& TQ_NOSLEEP
)
82 * Sleep periodically polling the free list for an available
83 * spl_task_t. Dispatching with TQ_SLEEP should always succeed
84 * but we cannot block forever waiting for an spl_taskq_t to
85 * show up in the free list, otherwise a deadlock can happen.
87 * Therefore, we need to allocate a new task even if the number
88 * of allocated tasks is above tq->tq_maxalloc, but we still
89 * end up delaying the task allocation by one second, thereby
90 * throttling the task dispatch rate.
92 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
93 schedule_timeout(HZ
/ 100);
94 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
96 SGOTO(retry
, count
++);
99 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
100 t
= kmem_alloc(sizeof(spl_task_t
), flags
& (TQ_SLEEP
| TQ_NOSLEEP
));
101 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
104 spin_lock_init(&t
->t_lock
);
105 INIT_LIST_HEAD(&t
->t_list
);
116 * NOTE: Must be called with tq->tq_lock held, expects the spl_task_t
117 * to already be removed from the free, work, or pending taskq lists.
120 task_free(taskq_t
*tq
, spl_task_t
*t
)
126 ASSERT(spin_is_locked(&tq
->tq_lock
));
127 ASSERT(list_empty(&t
->t_list
));
129 kmem_free(t
, sizeof(spl_task_t
));
136 * NOTE: Must be called with tq->tq_lock held, either destroys the
137 * spl_task_t if too many exist or moves it to the free list for later use.
140 task_done(taskq_t
*tq
, spl_task_t
*t
)
145 ASSERT(spin_is_locked(&tq
->tq_lock
));
147 list_del_init(&t
->t_list
);
149 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
153 list_add_tail(&t
->t_list
, &tq
->tq_free_list
);
162 * As tasks are submitted to the task queue they are assigned a
163 * monotonically increasing taskqid and added to the tail of the pending
164 * list. As worker threads become available the tasks are removed from
165 * the head of the pending or priority list, giving preference to the
166 * priority list. The tasks are then added to the work list, preserving
167 * the ordering by taskqid. Finally, as tasks complete they are removed
168 * from the work list. This means that the pending and work lists are
169 * always kept sorted by taskqid. Thus the lowest outstanding
170 * incomplete taskqid can be determined simply by checking the min
171 * taskqid for each head item on the pending, priority, and work list.
172 * This value is stored in tq->tq_lowest_id and only updated to the new
173 * lowest id when the previous lowest id completes. All taskqids lower
174 * than tq->tq_lowest_id must have completed. It is also possible
175 * larger taskqid's have completed because they may be processed in
176 * parallel by several worker threads. However, this is not a problem
177 * because the behavior of taskq_wait_id() is to block until all
178 * previously submitted taskqid's have completed.
180 * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are
181 * 64-bit values so even if a taskq is processing 2^24 (16,777,216)
182 * taskqid_ts per second it will still take 2^40 seconds, 34,865 years,
183 * before the wrap occurs. I can live with that for now.
186 taskq_wait_check(taskq_t
*tq
, taskqid_t id
)
190 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
191 rc
= (id
< tq
->tq_lowest_id
);
192 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
198 __taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
203 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
, id
));
207 EXPORT_SYMBOL(__taskq_wait_id
);
210 __taskq_wait(taskq_t
*tq
)
216 /* Wait for the largest outstanding taskqid */
217 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
218 id
= tq
->tq_next_id
- 1;
219 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
221 __taskq_wait_id(tq
, id
);
226 EXPORT_SYMBOL(__taskq_wait
);
229 __taskq_member(taskq_t
*tq
, void *t
)
237 for (i
= 0; i
< tq
->tq_nthreads
; i
++)
238 if (tq
->tq_threads
[i
] == (struct task_struct
*)t
)
243 EXPORT_SYMBOL(__taskq_member
);
246 __taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
255 /* Solaris assumes TQ_SLEEP if not passed explicitly */
256 if (!(flags
& (TQ_SLEEP
| TQ_NOSLEEP
)))
259 if (unlikely(in_atomic() && (flags
& TQ_SLEEP
)))
260 PANIC("May schedule while atomic: %s/0x%08x/%d\n",
261 current
->comm
, preempt_count(), current
->pid
);
263 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
265 /* Taskq being destroyed and all tasks drained */
266 if (!(tq
->tq_flags
& TQ_ACTIVE
))
269 /* Do not queue the task unless there is idle thread for it */
270 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
271 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
274 if ((t
= task_alloc(tq
, flags
)) == NULL
)
277 spin_lock(&t
->t_lock
);
279 /* Queue to the priority list instead of the pending list */
280 if (flags
& TQ_FRONT
)
281 list_add_tail(&t
->t_list
, &tq
->tq_prio_list
);
283 list_add_tail(&t
->t_list
, &tq
->tq_pend_list
);
285 t
->t_id
= rc
= tq
->tq_next_id
;
289 spin_unlock(&t
->t_lock
);
291 wake_up(&tq
->tq_work_waitq
);
293 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
296 EXPORT_SYMBOL(__taskq_dispatch
);
299 * Returns the lowest incomplete taskqid_t. The taskqid_t may
300 * be queued on the pending list, on the priority list, or on
301 * the work list currently being handled, but it is not 100%
305 taskq_lowest_id(taskq_t
*tq
)
307 taskqid_t lowest_id
= tq
->tq_next_id
;
312 ASSERT(spin_is_locked(&tq
->tq_lock
));
314 if (!list_empty(&tq
->tq_pend_list
)) {
315 t
= list_entry(tq
->tq_pend_list
.next
, spl_task_t
, t_list
);
316 lowest_id
= MIN(lowest_id
, t
->t_id
);
319 if (!list_empty(&tq
->tq_prio_list
)) {
320 t
= list_entry(tq
->tq_prio_list
.next
, spl_task_t
, t_list
);
321 lowest_id
= MIN(lowest_id
, t
->t_id
);
324 if (!list_empty(&tq
->tq_work_list
)) {
325 t
= list_entry(tq
->tq_work_list
.next
, spl_task_t
, t_list
);
326 lowest_id
= MIN(lowest_id
, t
->t_id
);
333 * Insert a task into a list keeping the list sorted by increasing
337 taskq_insert_in_order(taskq_t
*tq
, spl_task_t
*t
)
345 ASSERT(spin_is_locked(&tq
->tq_lock
));
347 list_for_each_prev(l
, &tq
->tq_work_list
) {
348 w
= list_entry(l
, spl_task_t
, t_list
);
349 if (w
->t_id
< t
->t_id
) {
350 list_add(&t
->t_list
, l
);
354 if (l
== &tq
->tq_work_list
)
355 list_add(&t
->t_list
, &tq
->tq_work_list
);
361 taskq_thread(void *args
)
363 DECLARE_WAITQUEUE(wait
, current
);
368 struct list_head
*pend_list
;
372 current
->flags
|= PF_NOFREEZE
;
374 sigfillset(&blocked
);
375 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
376 flush_signals(current
);
378 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
380 wake_up(&tq
->tq_wait_waitq
);
381 set_current_state(TASK_INTERRUPTIBLE
);
383 while (!kthread_should_stop()) {
385 add_wait_queue(&tq
->tq_work_waitq
, &wait
);
386 if (list_empty(&tq
->tq_pend_list
) &&
387 list_empty(&tq
->tq_prio_list
)) {
388 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
390 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
392 __set_current_state(TASK_RUNNING
);
395 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
397 if (!list_empty(&tq
->tq_prio_list
))
398 pend_list
= &tq
->tq_prio_list
;
399 else if (!list_empty(&tq
->tq_pend_list
))
400 pend_list
= &tq
->tq_pend_list
;
405 t
= list_entry(pend_list
->next
, spl_task_t
, t_list
);
406 list_del_init(&t
->t_list
);
407 taskq_insert_in_order(tq
, t
);
409 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
411 /* Perform the requested task */
414 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
419 /* When the current lowest outstanding taskqid is
420 * done calculate the new lowest outstanding id */
421 if (tq
->tq_lowest_id
== id
) {
422 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
423 ASSERT(tq
->tq_lowest_id
> id
);
426 wake_up_all(&tq
->tq_wait_waitq
);
429 set_current_state(TASK_INTERRUPTIBLE
);
433 __set_current_state(TASK_RUNNING
);
435 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
441 __taskq_create(const char *name
, int nthreads
, pri_t pri
,
442 int minalloc
, int maxalloc
, uint_t flags
)
445 struct task_struct
*t
;
446 int rc
= 0, i
, j
= 0;
449 ASSERT(name
!= NULL
);
450 ASSERT(pri
<= maxclsyspri
);
451 ASSERT(minalloc
>= 0);
452 ASSERT(maxalloc
<= INT_MAX
);
453 ASSERT(!(flags
& (TASKQ_CPR_SAFE
| TASKQ_DYNAMIC
))); /* Unsupported */
455 /* Scale the number of threads using nthreads as a percentage */
456 if (flags
& TASKQ_THREADS_CPU_PCT
) {
457 ASSERT(nthreads
<= 100);
458 ASSERT(nthreads
>= 0);
459 nthreads
= MIN(nthreads
, 100);
460 nthreads
= MAX(nthreads
, 0);
461 nthreads
= MAX((num_online_cpus() * nthreads
) / 100, 1);
464 tq
= kmem_alloc(sizeof(*tq
), KM_SLEEP
);
468 tq
->tq_threads
= kmem_alloc(nthreads
* sizeof(t
), KM_SLEEP
);
469 if (tq
->tq_threads
== NULL
) {
470 kmem_free(tq
, sizeof(*tq
));
474 spin_lock_init(&tq
->tq_lock
);
475 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
480 tq
->tq_minalloc
= minalloc
;
481 tq
->tq_maxalloc
= maxalloc
;
483 tq
->tq_flags
= (flags
| TQ_ACTIVE
);
485 tq
->tq_lowest_id
= 1;
486 INIT_LIST_HEAD(&tq
->tq_free_list
);
487 INIT_LIST_HEAD(&tq
->tq_work_list
);
488 INIT_LIST_HEAD(&tq
->tq_pend_list
);
489 INIT_LIST_HEAD(&tq
->tq_prio_list
);
490 init_waitqueue_head(&tq
->tq_work_waitq
);
491 init_waitqueue_head(&tq
->tq_wait_waitq
);
493 if (flags
& TASKQ_PREPOPULATE
)
494 for (i
= 0; i
< minalloc
; i
++)
495 task_done(tq
, task_alloc(tq
, TQ_SLEEP
| TQ_NEW
));
497 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
499 for (i
= 0; i
< nthreads
; i
++) {
500 t
= kthread_create(taskq_thread
, tq
, "%s/%d", name
, i
);
502 tq
->tq_threads
[i
] = t
;
503 kthread_bind(t
, i
% num_online_cpus());
504 set_user_nice(t
, PRIO_TO_NICE(pri
));
508 tq
->tq_threads
[i
] = NULL
;
513 /* Wait for all threads to be started before potential destroy */
514 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== j
);
523 EXPORT_SYMBOL(__taskq_create
);
526 __taskq_destroy(taskq_t
*tq
)
533 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
534 tq
->tq_flags
&= ~TQ_ACTIVE
;
535 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
537 /* TQ_ACTIVE cleared prevents new tasks being added to pending */
540 nthreads
= tq
->tq_nthreads
;
541 for (i
= 0; i
< nthreads
; i
++)
542 if (tq
->tq_threads
[i
])
543 kthread_stop(tq
->tq_threads
[i
]);
545 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
547 while (!list_empty(&tq
->tq_free_list
)) {
548 t
= list_entry(tq
->tq_free_list
.next
, spl_task_t
, t_list
);
549 list_del_init(&t
->t_list
);
553 ASSERT(tq
->tq_nthreads
== 0);
554 ASSERT(tq
->tq_nalloc
== 0);
555 ASSERT(list_empty(&tq
->tq_free_list
));
556 ASSERT(list_empty(&tq
->tq_work_list
));
557 ASSERT(list_empty(&tq
->tq_pend_list
));
558 ASSERT(list_empty(&tq
->tq_prio_list
));
560 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
561 kmem_free(tq
->tq_threads
, nthreads
* sizeof(spl_task_t
*));
562 kmem_free(tq
, sizeof(taskq_t
));
566 EXPORT_SYMBOL(__taskq_destroy
);
573 /* Solaris creates a dynamic taskq of up to 64 threads, however in
574 * a Linux environment 1 thread per-core is usually about right */
575 system_taskq
= taskq_create("spl_system_taskq", num_online_cpus(),
576 minclsyspri
, 4, 512, TASKQ_PREPOPULATE
);
577 if (system_taskq
== NULL
)
587 taskq_destroy(system_taskq
);