]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-taskq.c
Replace struct spl_task with struct taskq_ent
[mirror_spl-debian.git] / module / spl / spl-taskq.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5
BB
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
715f6251 10 *
716154c5
BB
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25\*****************************************************************************/
715f6251 26
f4b37741 27#include <sys/taskq.h>
3d061e9d 28#include <sys/kmem.h>
55abb092 29#include <spl-debug.h>
f1ca4da6 30
b17edc10
BB
31#ifdef SS_DEBUG_SUBSYS
32#undef SS_DEBUG_SUBSYS
937879f1 33#endif
34
b17edc10 35#define SS_DEBUG_SUBSYS SS_TASKQ
937879f1 36
e9cb2b4f
BB
37/* Global system-wide dynamic task queue available for all consumers */
38taskq_t *system_taskq;
39EXPORT_SYMBOL(system_taskq);
40
046a70c9
PS
41typedef struct taskq_ent {
42 spinlock_t tqent_lock;
43 struct list_head tqent_list;
44 taskqid_t tqent_id;
45 task_func_t *tqent_func;
46 void *tqent_arg;
47} taskq_ent_t;
3d061e9d 48
82387586
BB
49/*
50 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
bcd68186 51 * is not attached to the free, work, or pending taskq lists.
f1ca4da6 52 */
046a70c9 53static taskq_ent_t *
bcd68186 54task_alloc(taskq_t *tq, uint_t flags)
55{
046a70c9 56 taskq_ent_t *t;
bcd68186 57 int count = 0;
b17edc10 58 SENTRY;
bcd68186 59
60 ASSERT(tq);
61 ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */
62 ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
3d061e9d 63 ASSERT(spin_is_locked(&tq->tq_lock));
bcd68186 64retry:
046a70c9 65 /* Acquire taskq_ent_t's from free list if available */
bcd68186 66 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
046a70c9
PS
67 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
68 list_del_init(&t->tqent_list);
b17edc10 69 SRETURN(t);
bcd68186 70 }
71
7257ec41 72 /* Free list is empty and memory allocations are prohibited */
bcd68186 73 if (flags & TQ_NOALLOC)
b17edc10 74 SRETURN(NULL);
bcd68186 75
046a70c9 76 /* Hit maximum taskq_ent_t pool size */
bcd68186 77 if (tq->tq_nalloc >= tq->tq_maxalloc) {
78 if (flags & TQ_NOSLEEP)
b17edc10 79 SRETURN(NULL);
bcd68186 80
26f7245c
RC
81 /*
82 * Sleep periodically polling the free list for an available
046a70c9
PS
83 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
84 * but we cannot block forever waiting for an taskq_entq_t to
26f7245c
RC
85 * show up in the free list, otherwise a deadlock can happen.
86 *
87 * Therefore, we need to allocate a new task even if the number
88 * of allocated tasks is above tq->tq_maxalloc, but we still
89 * end up delaying the task allocation by one second, thereby
90 * throttling the task dispatch rate.
91 */
92 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
93 schedule_timeout(HZ / 100);
94 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
95 if (count < 100)
96 SGOTO(retry, count++);
bcd68186 97 }
98
26f7245c 99 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
046a70c9 100 t = kmem_alloc(sizeof(taskq_ent_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
749045bb 101 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 102
26f7245c 103 if (t) {
046a70c9
PS
104 spin_lock_init(&t->tqent_lock);
105 INIT_LIST_HEAD(&t->tqent_list);
106 t->tqent_id = 0;
107 t->tqent_func = NULL;
108 t->tqent_arg = NULL;
26f7245c
RC
109 tq->tq_nalloc++;
110 }
bcd68186 111
b17edc10 112 SRETURN(t);
bcd68186 113}
114
82387586 115/*
046a70c9 116 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
bcd68186 117 * to already be removed from the free, work, or pending taskq lists.
118 */
119static void
046a70c9 120task_free(taskq_t *tq, taskq_ent_t *t)
bcd68186 121{
b17edc10 122 SENTRY;
bcd68186 123
124 ASSERT(tq);
125 ASSERT(t);
126 ASSERT(spin_is_locked(&tq->tq_lock));
046a70c9 127 ASSERT(list_empty(&t->tqent_list));
bcd68186 128
046a70c9 129 kmem_free(t, sizeof(taskq_ent_t));
bcd68186 130 tq->tq_nalloc--;
f1ca4da6 131
b17edc10 132 SEXIT;
bcd68186 133}
134
82387586
BB
135/*
136 * NOTE: Must be called with tq->tq_lock held, either destroys the
046a70c9 137 * taskq_ent_t if too many exist or moves it to the free list for later use.
bcd68186 138 */
f1ca4da6 139static void
046a70c9 140task_done(taskq_t *tq, taskq_ent_t *t)
f1ca4da6 141{
b17edc10 142 SENTRY;
bcd68186 143 ASSERT(tq);
144 ASSERT(t);
145 ASSERT(spin_is_locked(&tq->tq_lock));
146
046a70c9 147 list_del_init(&t->tqent_list);
f1ca4da6 148
bcd68186 149 if (tq->tq_nalloc <= tq->tq_minalloc) {
046a70c9
PS
150 t->tqent_id = 0;
151 t->tqent_func = NULL;
152 t->tqent_arg = NULL;
153 list_add_tail(&t->tqent_list, &tq->tq_free_list);
bcd68186 154 } else {
155 task_free(tq, t);
156 }
f1ca4da6 157
b17edc10 158 SEXIT;
f1ca4da6 159}
160
82387586
BB
161/*
162 * As tasks are submitted to the task queue they are assigned a
f0d8bb26
NB
163 * monotonically increasing taskqid and added to the tail of the pending
164 * list. As worker threads become available the tasks are removed from
165 * the head of the pending or priority list, giving preference to the
166 * priority list. The tasks are then added to the work list, preserving
167 * the ordering by taskqid. Finally, as tasks complete they are removed
168 * from the work list. This means that the pending and work lists are
169 * always kept sorted by taskqid. Thus the lowest outstanding
82387586 170 * incomplete taskqid can be determined simply by checking the min
f0d8bb26
NB
171 * taskqid for each head item on the pending, priority, and work list.
172 * This value is stored in tq->tq_lowest_id and only updated to the new
173 * lowest id when the previous lowest id completes. All taskqids lower
174 * than tq->tq_lowest_id must have completed. It is also possible
175 * larger taskqid's have completed because they may be processed in
176 * parallel by several worker threads. However, this is not a problem
177 * because the behavior of taskq_wait_id() is to block until all
178 * previously submitted taskqid's have completed.
82387586
BB
179 *
180 * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are
181 * 64-bit values so even if a taskq is processing 2^24 (16,777,216)
182 * taskqid_ts per second it will still take 2^40 seconds, 34,865 years,
183 * before the wrap occurs. I can live with that for now.
bcd68186 184 */
185static int
186taskq_wait_check(taskq_t *tq, taskqid_t id)
187{
7257ec41
BB
188 int rc;
189
190 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
191 rc = (id < tq->tq_lowest_id);
192 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
193
b17edc10 194 SRETURN(rc);
bcd68186 195}
196
bcd68186 197void
198__taskq_wait_id(taskq_t *tq, taskqid_t id)
f1ca4da6 199{
b17edc10 200 SENTRY;
bcd68186 201 ASSERT(tq);
202
203 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id));
204
b17edc10 205 SEXIT;
bcd68186 206}
207EXPORT_SYMBOL(__taskq_wait_id);
208
209void
210__taskq_wait(taskq_t *tq)
211{
212 taskqid_t id;
b17edc10 213 SENTRY;
bcd68186 214 ASSERT(tq);
215
7257ec41 216 /* Wait for the largest outstanding taskqid */
749045bb 217 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
7257ec41 218 id = tq->tq_next_id - 1;
749045bb 219 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 220
221 __taskq_wait_id(tq, id);
222
b17edc10 223 SEXIT;
bcd68186 224
225}
226EXPORT_SYMBOL(__taskq_wait);
227
228int
229__taskq_member(taskq_t *tq, void *t)
230{
231 int i;
b17edc10 232 SENTRY;
bcd68186 233
234 ASSERT(tq);
235 ASSERT(t);
236
237 for (i = 0; i < tq->tq_nthreads; i++)
238 if (tq->tq_threads[i] == (struct task_struct *)t)
b17edc10 239 SRETURN(1);
bcd68186 240
b17edc10 241 SRETURN(0);
bcd68186 242}
243EXPORT_SYMBOL(__taskq_member);
244
245taskqid_t
246__taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
247{
046a70c9 248 taskq_ent_t *t;
bcd68186 249 taskqid_t rc = 0;
b17edc10 250 SENTRY;
f1ca4da6 251
937879f1 252 ASSERT(tq);
253 ASSERT(func);
d05ec4b4
BB
254
255 /* Solaris assumes TQ_SLEEP if not passed explicitly */
256 if (!(flags & (TQ_SLEEP | TQ_NOSLEEP)))
257 flags |= TQ_SLEEP;
258
55abb092
BB
259 if (unlikely(in_atomic() && (flags & TQ_SLEEP)))
260 PANIC("May schedule while atomic: %s/0x%08x/%d\n",
261 current->comm, preempt_count(), current->pid);
f1ca4da6 262
749045bb 263 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
f1ca4da6 264
bcd68186 265 /* Taskq being destroyed and all tasks drained */
266 if (!(tq->tq_flags & TQ_ACTIVE))
b17edc10 267 SGOTO(out, rc = 0);
f1ca4da6 268
bcd68186 269 /* Do not queue the task unless there is idle thread for it */
270 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
271 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
b17edc10 272 SGOTO(out, rc = 0);
bcd68186 273
274 if ((t = task_alloc(tq, flags)) == NULL)
b17edc10 275 SGOTO(out, rc = 0);
f1ca4da6 276
046a70c9 277 spin_lock(&t->tqent_lock);
f0d8bb26
NB
278
279 /* Queue to the priority list instead of the pending list */
280 if (flags & TQ_FRONT)
046a70c9 281 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
f0d8bb26 282 else
046a70c9 283 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
f0d8bb26 284
046a70c9 285 t->tqent_id = rc = tq->tq_next_id;
bcd68186 286 tq->tq_next_id++;
046a70c9
PS
287 t->tqent_func = func;
288 t->tqent_arg = arg;
289 spin_unlock(&t->tqent_lock);
bcd68186 290
291 wake_up(&tq->tq_work_waitq);
292out:
749045bb 293 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
b17edc10 294 SRETURN(rc);
f1ca4da6 295}
f1b59d26 296EXPORT_SYMBOL(__taskq_dispatch);
82387586
BB
297/*
298 * Returns the lowest incomplete taskqid_t. The taskqid_t may
f0d8bb26
NB
299 * be queued on the pending list, on the priority list, or on
300 * the work list currently being handled, but it is not 100%
301 * complete yet.
82387586 302 */
bcd68186 303static taskqid_t
304taskq_lowest_id(taskq_t *tq)
305{
7257ec41 306 taskqid_t lowest_id = tq->tq_next_id;
046a70c9 307 taskq_ent_t *t;
b17edc10 308 SENTRY;
bcd68186 309
310 ASSERT(tq);
311 ASSERT(spin_is_locked(&tq->tq_lock));
312
82387586 313 if (!list_empty(&tq->tq_pend_list)) {
046a70c9
PS
314 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
315 lowest_id = MIN(lowest_id, t->tqent_id);
82387586 316 }
bcd68186 317
f0d8bb26 318 if (!list_empty(&tq->tq_prio_list)) {
046a70c9
PS
319 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
320 lowest_id = MIN(lowest_id, t->tqent_id);
f0d8bb26
NB
321 }
322
82387586 323 if (!list_empty(&tq->tq_work_list)) {
046a70c9
PS
324 t = list_entry(tq->tq_work_list.next, taskq_ent_t, tqent_list);
325 lowest_id = MIN(lowest_id, t->tqent_id);
82387586 326 }
bcd68186 327
b17edc10 328 SRETURN(lowest_id);
bcd68186 329}
330
f0d8bb26
NB
331/*
332 * Insert a task into a list keeping the list sorted by increasing
333 * taskqid.
334 */
335static void
046a70c9 336taskq_insert_in_order(taskq_t *tq, taskq_ent_t *t)
f0d8bb26 337{
046a70c9 338 taskq_ent_t *w;
f0d8bb26
NB
339 struct list_head *l;
340
b17edc10 341 SENTRY;
f0d8bb26
NB
342 ASSERT(tq);
343 ASSERT(t);
344 ASSERT(spin_is_locked(&tq->tq_lock));
345
346 list_for_each_prev(l, &tq->tq_work_list) {
046a70c9
PS
347 w = list_entry(l, taskq_ent_t, tqent_list);
348 if (w->tqent_id < t->tqent_id) {
349 list_add(&t->tqent_list, l);
f0d8bb26
NB
350 break;
351 }
352 }
353 if (l == &tq->tq_work_list)
046a70c9 354 list_add(&t->tqent_list, &tq->tq_work_list);
f0d8bb26 355
b17edc10 356 SEXIT;
f0d8bb26
NB
357}
358
bcd68186 359static int
360taskq_thread(void *args)
361{
362 DECLARE_WAITQUEUE(wait, current);
363 sigset_t blocked;
364 taskqid_t id;
365 taskq_t *tq = args;
046a70c9 366 taskq_ent_t *t;
f0d8bb26 367 struct list_head *pend_list;
b17edc10 368 SENTRY;
bcd68186 369
370 ASSERT(tq);
371 current->flags |= PF_NOFREEZE;
372
372c2572
BB
373 /* Disable the direct memory reclaim path */
374 if (tq->tq_flags & TASKQ_NORECLAIM)
375 current->flags |= PF_MEMALLOC;
376
bcd68186 377 sigfillset(&blocked);
378 sigprocmask(SIG_BLOCK, &blocked, NULL);
379 flush_signals(current);
380
749045bb 381 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 382 tq->tq_nthreads++;
383 wake_up(&tq->tq_wait_waitq);
384 set_current_state(TASK_INTERRUPTIBLE);
385
386 while (!kthread_should_stop()) {
387
388 add_wait_queue(&tq->tq_work_waitq, &wait);
f0d8bb26
NB
389 if (list_empty(&tq->tq_pend_list) &&
390 list_empty(&tq->tq_prio_list)) {
749045bb 391 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 392 schedule();
749045bb 393 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 394 } else {
395 __set_current_state(TASK_RUNNING);
396 }
397
398 remove_wait_queue(&tq->tq_work_waitq, &wait);
f0d8bb26
NB
399
400 if (!list_empty(&tq->tq_prio_list))
401 pend_list = &tq->tq_prio_list;
402 else if (!list_empty(&tq->tq_pend_list))
403 pend_list = &tq->tq_pend_list;
404 else
405 pend_list = NULL;
406
407 if (pend_list) {
046a70c9
PS
408 t = list_entry(pend_list->next, taskq_ent_t, tqent_list);
409 list_del_init(&t->tqent_list);
f0d8bb26 410 taskq_insert_in_order(tq, t);
bcd68186 411 tq->tq_nactive++;
749045bb 412 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 413
414 /* Perform the requested task */
046a70c9 415 t->tqent_func(t->tqent_arg);
bcd68186 416
749045bb 417 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 418 tq->tq_nactive--;
046a70c9 419 id = t->tqent_id;
bcd68186 420 task_done(tq, t);
421
7257ec41
BB
422 /* When the current lowest outstanding taskqid is
423 * done calculate the new lowest outstanding id */
bcd68186 424 if (tq->tq_lowest_id == id) {
425 tq->tq_lowest_id = taskq_lowest_id(tq);
426 ASSERT(tq->tq_lowest_id > id);
427 }
428
429 wake_up_all(&tq->tq_wait_waitq);
430 }
431
432 set_current_state(TASK_INTERRUPTIBLE);
433
434 }
435
436 __set_current_state(TASK_RUNNING);
437 tq->tq_nthreads--;
749045bb 438 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 439
b17edc10 440 SRETURN(0);
bcd68186 441}
442
f1ca4da6 443taskq_t *
444__taskq_create(const char *name, int nthreads, pri_t pri,
445 int minalloc, int maxalloc, uint_t flags)
446{
bcd68186 447 taskq_t *tq;
448 struct task_struct *t;
449 int rc = 0, i, j = 0;
b17edc10 450 SENTRY;
bcd68186 451
452 ASSERT(name != NULL);
453 ASSERT(pri <= maxclsyspri);
454 ASSERT(minalloc >= 0);
455 ASSERT(maxalloc <= INT_MAX);
456 ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */
457
915404bd
BB
458 /* Scale the number of threads using nthreads as a percentage */
459 if (flags & TASKQ_THREADS_CPU_PCT) {
460 ASSERT(nthreads <= 100);
461 ASSERT(nthreads >= 0);
462 nthreads = MIN(nthreads, 100);
463 nthreads = MAX(nthreads, 0);
464 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
465 }
466
bcd68186 467 tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
468 if (tq == NULL)
b17edc10 469 SRETURN(NULL);
bcd68186 470
471 tq->tq_threads = kmem_alloc(nthreads * sizeof(t), KM_SLEEP);
472 if (tq->tq_threads == NULL) {
473 kmem_free(tq, sizeof(*tq));
b17edc10 474 SRETURN(NULL);
bcd68186 475 }
476
477 spin_lock_init(&tq->tq_lock);
749045bb 478 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 479 tq->tq_name = name;
480 tq->tq_nactive = 0;
481 tq->tq_nthreads = 0;
482 tq->tq_pri = pri;
483 tq->tq_minalloc = minalloc;
484 tq->tq_maxalloc = maxalloc;
485 tq->tq_nalloc = 0;
486 tq->tq_flags = (flags | TQ_ACTIVE);
487 tq->tq_next_id = 1;
488 tq->tq_lowest_id = 1;
489 INIT_LIST_HEAD(&tq->tq_free_list);
490 INIT_LIST_HEAD(&tq->tq_work_list);
491 INIT_LIST_HEAD(&tq->tq_pend_list);
f0d8bb26 492 INIT_LIST_HEAD(&tq->tq_prio_list);
bcd68186 493 init_waitqueue_head(&tq->tq_work_waitq);
494 init_waitqueue_head(&tq->tq_wait_waitq);
495
496 if (flags & TASKQ_PREPOPULATE)
497 for (i = 0; i < minalloc; i++)
498 task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
6e605b6e 499
749045bb 500 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
6e605b6e 501
bcd68186 502 for (i = 0; i < nthreads; i++) {
503 t = kthread_create(taskq_thread, tq, "%s/%d", name, i);
504 if (t) {
505 tq->tq_threads[i] = t;
506 kthread_bind(t, i % num_online_cpus());
507 set_user_nice(t, PRIO_TO_NICE(pri));
508 wake_up_process(t);
509 j++;
510 } else {
511 tq->tq_threads[i] = NULL;
512 rc = 1;
513 }
514 }
515
516 /* Wait for all threads to be started before potential destroy */
517 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j);
518
519 if (rc) {
520 __taskq_destroy(tq);
521 tq = NULL;
522 }
523
b17edc10 524 SRETURN(tq);
f1ca4da6 525}
f1b59d26 526EXPORT_SYMBOL(__taskq_create);
b123971f 527
528void
529__taskq_destroy(taskq_t *tq)
530{
046a70c9 531 taskq_ent_t *t;
bcd68186 532 int i, nthreads;
b17edc10 533 SENTRY;
b123971f 534
bcd68186 535 ASSERT(tq);
749045bb 536 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 537 tq->tq_flags &= ~TQ_ACTIVE;
749045bb 538 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 539
540 /* TQ_ACTIVE cleared prevents new tasks being added to pending */
541 __taskq_wait(tq);
542
543 nthreads = tq->tq_nthreads;
544 for (i = 0; i < nthreads; i++)
545 if (tq->tq_threads[i])
546 kthread_stop(tq->tq_threads[i]);
547
749045bb 548 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 549
550 while (!list_empty(&tq->tq_free_list)) {
046a70c9
PS
551 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
552 list_del_init(&t->tqent_list);
bcd68186 553 task_free(tq, t);
554 }
555
556 ASSERT(tq->tq_nthreads == 0);
557 ASSERT(tq->tq_nalloc == 0);
558 ASSERT(list_empty(&tq->tq_free_list));
559 ASSERT(list_empty(&tq->tq_work_list));
560 ASSERT(list_empty(&tq->tq_pend_list));
f0d8bb26 561 ASSERT(list_empty(&tq->tq_prio_list));
bcd68186 562
749045bb 563 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
046a70c9 564 kmem_free(tq->tq_threads, nthreads * sizeof(taskq_ent_t *));
bcd68186 565 kmem_free(tq, sizeof(taskq_t));
566
b17edc10 567 SEXIT;
b123971f 568}
bcd68186 569EXPORT_SYMBOL(__taskq_destroy);
e9cb2b4f
BB
570
571int
572spl_taskq_init(void)
573{
b17edc10 574 SENTRY;
e9cb2b4f 575
f220894e
BB
576 /* Solaris creates a dynamic taskq of up to 64 threads, however in
577 * a Linux environment 1 thread per-core is usually about right */
578 system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
579 minclsyspri, 4, 512, TASKQ_PREPOPULATE);
e9cb2b4f 580 if (system_taskq == NULL)
b17edc10 581 SRETURN(1);
e9cb2b4f 582
b17edc10 583 SRETURN(0);
e9cb2b4f
BB
584}
585
586void
587spl_taskq_fini(void)
588{
b17edc10 589 SENTRY;
e9cb2b4f 590 taskq_destroy(system_taskq);
b17edc10 591 SEXIT;
e9cb2b4f 592}