]>
git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-taskq.c
5960761f4a41406fba99e2b7635c2fee1ee61ad0
2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/taskq.h>
30 #ifdef DEBUG_SUBSYSTEM
31 #undef DEBUG_SUBSYSTEM
34 #define DEBUG_SUBSYSTEM S_TASKQ
36 /* Global system-wide dynamic task queue available for all consumers */
37 taskq_t
*system_taskq
;
38 EXPORT_SYMBOL(system_taskq
);
40 typedef struct spl_task
{
42 struct list_head t_list
;
48 /* NOTE: Must be called with tq->tq_lock held, returns a list_t which
49 * is not attached to the free, work, or pending taskq lists.
52 task_alloc(taskq_t
*tq
, uint_t flags
)
59 ASSERT(flags
& (TQ_SLEEP
| TQ_NOSLEEP
)); /* One set */
60 ASSERT(!((flags
& TQ_SLEEP
) && (flags
& TQ_NOSLEEP
))); /* Not both */
61 ASSERT(spin_is_locked(&tq
->tq_lock
));
63 /* Acquire spl_task_t's from free list if available */
64 if (!list_empty(&tq
->tq_free_list
) && !(flags
& TQ_NEW
)) {
65 t
= list_entry(tq
->tq_free_list
.next
, spl_task_t
, t_list
);
66 list_del_init(&t
->t_list
);
70 /* Free list is empty and memory allocations are prohibited */
71 if (flags
& TQ_NOALLOC
)
74 /* Hit maximum spl_task_t pool size */
75 if (tq
->tq_nalloc
>= tq
->tq_maxalloc
) {
76 if (flags
& TQ_NOSLEEP
)
79 /* Sleep periodically polling the free list for an available
80 * spl_task_t. If a full second passes and we have not found
81 * one gives up and return a NULL to the caller. */
82 if (flags
& TQ_SLEEP
) {
83 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
84 schedule_timeout(HZ
/ 100);
85 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
92 /* Unreachable, TQ_SLEEP or TQ_NOSLEEP */
96 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
97 t
= kmem_alloc(sizeof(spl_task_t
), flags
& (TQ_SLEEP
| TQ_NOSLEEP
));
98 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
101 spin_lock_init(&t
->t_lock
);
102 INIT_LIST_HEAD(&t
->t_list
);
112 /* NOTE: Must be called with tq->tq_lock held, expects the spl_task_t
113 * to already be removed from the free, work, or pending taskq lists.
116 task_free(taskq_t
*tq
, spl_task_t
*t
)
122 ASSERT(spin_is_locked(&tq
->tq_lock
));
123 ASSERT(list_empty(&t
->t_list
));
125 kmem_free(t
, sizeof(spl_task_t
));
131 /* NOTE: Must be called with tq->tq_lock held, either destroys the
132 * spl_task_t if too many exist or moves it to the free list for later use.
135 task_done(taskq_t
*tq
, spl_task_t
*t
)
140 ASSERT(spin_is_locked(&tq
->tq_lock
));
142 list_del_init(&t
->t_list
);
144 if (tq
->tq_nalloc
<= tq
->tq_minalloc
) {
148 list_add_tail(&t
->t_list
, &tq
->tq_free_list
);
156 /* Taskqid's are handed out in a monotonically increasing fashion per
157 * taskq_t. We don't handle taskqid wrapping yet, but fortunately it is
158 * a 64-bit value so this is probably never going to happen. The lowest
159 * pending taskqid is stored in the taskq_t to make it easy for any
160 * taskq_wait()'ers to know if the tasks they're waiting for have
161 * completed. Unfortunately, tq_task_lowest is kept up to date is
162 * a pretty brain dead way, something more clever should be done.
165 taskq_wait_check(taskq_t
*tq
, taskqid_t id
)
169 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
170 rc
= (id
< tq
->tq_lowest_id
);
171 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
176 /* Expected to wait for all previously scheduled tasks to complete. We do
177 * not need to wait for tasked scheduled after this call to complete. In
178 * other words we do not need to drain the entire taskq. */
180 __taskq_wait_id(taskq_t
*tq
, taskqid_t id
)
185 wait_event(tq
->tq_wait_waitq
, taskq_wait_check(tq
, id
));
189 EXPORT_SYMBOL(__taskq_wait_id
);
192 __taskq_wait(taskq_t
*tq
)
198 /* Wait for the largest outstanding taskqid */
199 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
200 id
= tq
->tq_next_id
- 1;
201 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
203 __taskq_wait_id(tq
, id
);
208 EXPORT_SYMBOL(__taskq_wait
);
211 __taskq_member(taskq_t
*tq
, void *t
)
219 for (i
= 0; i
< tq
->tq_nthreads
; i
++)
220 if (tq
->tq_threads
[i
] == (struct task_struct
*)t
)
225 EXPORT_SYMBOL(__taskq_member
);
228 __taskq_dispatch(taskq_t
*tq
, task_func_t func
, void *arg
, uint_t flags
)
236 if (unlikely(in_atomic() && (flags
& TQ_SLEEP
))) {
237 CERROR("May schedule while atomic: %s/0x%08x/%d\n",
238 current
->comm
, preempt_count(), current
->pid
);
242 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
244 /* Taskq being destroyed and all tasks drained */
245 if (!(tq
->tq_flags
& TQ_ACTIVE
))
248 /* Do not queue the task unless there is idle thread for it */
249 ASSERT(tq
->tq_nactive
<= tq
->tq_nthreads
);
250 if ((flags
& TQ_NOQUEUE
) && (tq
->tq_nactive
== tq
->tq_nthreads
))
253 if ((t
= task_alloc(tq
, flags
)) == NULL
)
256 spin_lock(&t
->t_lock
);
257 list_add_tail(&t
->t_list
, &tq
->tq_pend_list
);
258 t
->t_id
= rc
= tq
->tq_next_id
;
262 spin_unlock(&t
->t_lock
);
264 wake_up(&tq
->tq_work_waitq
);
266 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
269 EXPORT_SYMBOL(__taskq_dispatch
);
271 /* NOTE: Must be called with tq->tq_lock held */
273 taskq_lowest_id(taskq_t
*tq
)
275 taskqid_t lowest_id
= tq
->tq_next_id
;
280 ASSERT(spin_is_locked(&tq
->tq_lock
));
282 list_for_each_entry(t
, &tq
->tq_pend_list
, t_list
)
283 if (t
->t_id
< lowest_id
)
286 list_for_each_entry(t
, &tq
->tq_work_list
, t_list
)
287 if (t
->t_id
< lowest_id
)
294 taskq_thread(void *args
)
296 DECLARE_WAITQUEUE(wait
, current
);
304 current
->flags
|= PF_NOFREEZE
;
306 sigfillset(&blocked
);
307 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
308 flush_signals(current
);
310 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
312 wake_up(&tq
->tq_wait_waitq
);
313 set_current_state(TASK_INTERRUPTIBLE
);
315 while (!kthread_should_stop()) {
317 add_wait_queue(&tq
->tq_work_waitq
, &wait
);
318 if (list_empty(&tq
->tq_pend_list
)) {
319 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
321 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
323 __set_current_state(TASK_RUNNING
);
326 remove_wait_queue(&tq
->tq_work_waitq
, &wait
);
327 if (!list_empty(&tq
->tq_pend_list
)) {
328 t
= list_entry(tq
->tq_pend_list
.next
,spl_task_t
,t_list
);
329 list_del_init(&t
->t_list
);
330 list_add_tail(&t
->t_list
, &tq
->tq_work_list
);
332 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
334 /* Perform the requested task */
337 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
342 /* When the current lowest outstanding taskqid is
343 * done calculate the new lowest outstanding id */
344 if (tq
->tq_lowest_id
== id
) {
345 tq
->tq_lowest_id
= taskq_lowest_id(tq
);
346 ASSERT(tq
->tq_lowest_id
> id
);
349 wake_up_all(&tq
->tq_wait_waitq
);
352 set_current_state(TASK_INTERRUPTIBLE
);
356 __set_current_state(TASK_RUNNING
);
358 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
364 __taskq_create(const char *name
, int nthreads
, pri_t pri
,
365 int minalloc
, int maxalloc
, uint_t flags
)
368 struct task_struct
*t
;
369 int rc
= 0, i
, j
= 0;
372 ASSERT(name
!= NULL
);
373 ASSERT(pri
<= maxclsyspri
);
374 ASSERT(minalloc
>= 0);
375 ASSERT(maxalloc
<= INT_MAX
);
376 ASSERT(!(flags
& (TASKQ_CPR_SAFE
| TASKQ_DYNAMIC
))); /* Unsupported */
378 tq
= kmem_alloc(sizeof(*tq
), KM_SLEEP
);
382 tq
->tq_threads
= kmem_alloc(nthreads
* sizeof(t
), KM_SLEEP
);
383 if (tq
->tq_threads
== NULL
) {
384 kmem_free(tq
, sizeof(*tq
));
388 spin_lock_init(&tq
->tq_lock
);
389 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
394 tq
->tq_minalloc
= minalloc
;
395 tq
->tq_maxalloc
= maxalloc
;
397 tq
->tq_flags
= (flags
| TQ_ACTIVE
);
399 tq
->tq_lowest_id
= 1;
400 INIT_LIST_HEAD(&tq
->tq_free_list
);
401 INIT_LIST_HEAD(&tq
->tq_work_list
);
402 INIT_LIST_HEAD(&tq
->tq_pend_list
);
403 init_waitqueue_head(&tq
->tq_work_waitq
);
404 init_waitqueue_head(&tq
->tq_wait_waitq
);
406 if (flags
& TASKQ_PREPOPULATE
)
407 for (i
= 0; i
< minalloc
; i
++)
408 task_done(tq
, task_alloc(tq
, TQ_SLEEP
| TQ_NEW
));
410 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
412 for (i
= 0; i
< nthreads
; i
++) {
413 t
= kthread_create(taskq_thread
, tq
, "%s/%d", name
, i
);
415 tq
->tq_threads
[i
] = t
;
416 kthread_bind(t
, i
% num_online_cpus());
417 set_user_nice(t
, PRIO_TO_NICE(pri
));
421 tq
->tq_threads
[i
] = NULL
;
426 /* Wait for all threads to be started before potential destroy */
427 wait_event(tq
->tq_wait_waitq
, tq
->tq_nthreads
== j
);
436 EXPORT_SYMBOL(__taskq_create
);
439 __taskq_destroy(taskq_t
*tq
)
446 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
447 tq
->tq_flags
&= ~TQ_ACTIVE
;
448 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
450 /* TQ_ACTIVE cleared prevents new tasks being added to pending */
453 nthreads
= tq
->tq_nthreads
;
454 for (i
= 0; i
< nthreads
; i
++)
455 if (tq
->tq_threads
[i
])
456 kthread_stop(tq
->tq_threads
[i
]);
458 spin_lock_irqsave(&tq
->tq_lock
, tq
->tq_lock_flags
);
460 while (!list_empty(&tq
->tq_free_list
)) {
461 t
= list_entry(tq
->tq_free_list
.next
, spl_task_t
, t_list
);
462 list_del_init(&t
->t_list
);
466 ASSERT(tq
->tq_nthreads
== 0);
467 ASSERT(tq
->tq_nalloc
== 0);
468 ASSERT(list_empty(&tq
->tq_free_list
));
469 ASSERT(list_empty(&tq
->tq_work_list
));
470 ASSERT(list_empty(&tq
->tq_pend_list
));
472 spin_unlock_irqrestore(&tq
->tq_lock
, tq
->tq_lock_flags
);
473 kmem_free(tq
->tq_threads
, nthreads
* sizeof(spl_task_t
*));
474 kmem_free(tq
, sizeof(taskq_t
));
478 EXPORT_SYMBOL(__taskq_destroy
);
485 /* Solaris creates a dynamic taskq of up to 64 threads, however in
486 * a Linux environment 1 thread per-core is usually about right */
487 system_taskq
= taskq_create("spl_system_taskq", num_online_cpus(),
488 minclsyspri
, 4, 512, TASKQ_PREPOPULATE
);
489 if (system_taskq
== NULL
)
499 taskq_destroy(system_taskq
);