]>
Commit | Line | Data |
---|---|---|
715f6251 | 1 | /* |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
f4b37741 | 27 | #include <sys/taskq.h> |
3d061e9d | 28 | #include <sys/kmem.h> |
f1ca4da6 | 29 | |
937879f1 | 30 | #ifdef DEBUG_SUBSYSTEM |
31 | #undef DEBUG_SUBSYSTEM | |
32 | #endif | |
33 | ||
34 | #define DEBUG_SUBSYSTEM S_TASKQ | |
35 | ||
e9cb2b4f BB |
36 | /* Global system-wide dynamic task queue available for all consumers */ |
37 | taskq_t *system_taskq; | |
38 | EXPORT_SYMBOL(system_taskq); | |
39 | ||
3d061e9d | 40 | typedef struct spl_task { |
41 | spinlock_t t_lock; | |
42 | struct list_head t_list; | |
43 | taskqid_t t_id; | |
44 | task_func_t *t_func; | |
45 | void *t_arg; | |
46 | } spl_task_t; | |
47 | ||
82387586 BB |
48 | /* |
49 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
bcd68186 | 50 | * is not attached to the free, work, or pending taskq lists. |
f1ca4da6 | 51 | */ |
3d061e9d | 52 | static spl_task_t * |
bcd68186 | 53 | task_alloc(taskq_t *tq, uint_t flags) |
54 | { | |
3d061e9d | 55 | spl_task_t *t; |
bcd68186 | 56 | int count = 0; |
57 | ENTRY; | |
58 | ||
59 | ASSERT(tq); | |
60 | ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */ | |
61 | ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */ | |
3d061e9d | 62 | ASSERT(spin_is_locked(&tq->tq_lock)); |
bcd68186 | 63 | retry: |
7257ec41 | 64 | /* Acquire spl_task_t's from free list if available */ |
bcd68186 | 65 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { |
3d061e9d | 66 | t = list_entry(tq->tq_free_list.next, spl_task_t, t_list); |
67 | list_del_init(&t->t_list); | |
68 | RETURN(t); | |
bcd68186 | 69 | } |
70 | ||
7257ec41 | 71 | /* Free list is empty and memory allocations are prohibited */ |
bcd68186 | 72 | if (flags & TQ_NOALLOC) |
73 | RETURN(NULL); | |
74 | ||
3d061e9d | 75 | /* Hit maximum spl_task_t pool size */ |
bcd68186 | 76 | if (tq->tq_nalloc >= tq->tq_maxalloc) { |
77 | if (flags & TQ_NOSLEEP) | |
78 | RETURN(NULL); | |
79 | ||
80 | /* Sleep periodically polling the free list for an available | |
3d061e9d | 81 | * spl_task_t. If a full second passes and we have not found |
bcd68186 | 82 | * one gives up and return a NULL to the caller. */ |
83 | if (flags & TQ_SLEEP) { | |
749045bb | 84 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 85 | schedule_timeout(HZ / 100); |
749045bb | 86 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 87 | if (count < 100) |
88 | GOTO(retry, count++); | |
89 | ||
90 | RETURN(NULL); | |
91 | } | |
92 | ||
7257ec41 | 93 | /* Unreachable, TQ_SLEEP or TQ_NOSLEEP */ |
bcd68186 | 94 | SBUG(); |
95 | } | |
96 | ||
749045bb | 97 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
3d061e9d | 98 | t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP)); |
749045bb | 99 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 100 | |
101 | if (t) { | |
102 | spin_lock_init(&t->t_lock); | |
103 | INIT_LIST_HEAD(&t->t_list); | |
104 | t->t_id = 0; | |
105 | t->t_func = NULL; | |
106 | t->t_arg = NULL; | |
107 | tq->tq_nalloc++; | |
108 | } | |
109 | ||
110 | RETURN(t); | |
111 | } | |
112 | ||
82387586 BB |
113 | /* |
114 | * NOTE: Must be called with tq->tq_lock held, expects the spl_task_t | |
bcd68186 | 115 | * to already be removed from the free, work, or pending taskq lists. |
116 | */ | |
117 | static void | |
3d061e9d | 118 | task_free(taskq_t *tq, spl_task_t *t) |
bcd68186 | 119 | { |
120 | ENTRY; | |
121 | ||
122 | ASSERT(tq); | |
123 | ASSERT(t); | |
124 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
125 | ASSERT(list_empty(&t->t_list)); | |
126 | ||
3d061e9d | 127 | kmem_free(t, sizeof(spl_task_t)); |
bcd68186 | 128 | tq->tq_nalloc--; |
f1ca4da6 | 129 | |
bcd68186 | 130 | EXIT; |
131 | } | |
132 | ||
82387586 BB |
133 | /* |
134 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
3d061e9d | 135 | * spl_task_t if too many exist or moves it to the free list for later use. |
bcd68186 | 136 | */ |
f1ca4da6 | 137 | static void |
3d061e9d | 138 | task_done(taskq_t *tq, spl_task_t *t) |
f1ca4da6 | 139 | { |
bcd68186 | 140 | ENTRY; |
141 | ASSERT(tq); | |
142 | ASSERT(t); | |
143 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
144 | ||
145 | list_del_init(&t->t_list); | |
f1ca4da6 | 146 | |
bcd68186 | 147 | if (tq->tq_nalloc <= tq->tq_minalloc) { |
148 | t->t_id = 0; | |
149 | t->t_func = NULL; | |
150 | t->t_arg = NULL; | |
9ab1ac14 | 151 | list_add_tail(&t->t_list, &tq->tq_free_list); |
bcd68186 | 152 | } else { |
153 | task_free(tq, t); | |
154 | } | |
f1ca4da6 | 155 | |
bcd68186 | 156 | EXIT; |
f1ca4da6 | 157 | } |
158 | ||
82387586 BB |
159 | /* |
160 | * As tasks are submitted to the task queue they are assigned a | |
161 | * monotonically increasing taskqid and added to the tail of the | |
162 | * pending list. As worker threads become available the tasks are | |
163 | * removed from the head of the pending list and added to the tail | |
164 | * of the work list. Finally, as tasks complete they are removed | |
165 | * from the work list. This means that the pending and work lists | |
166 | * are always kept sorted by taskqid. Thus the lowest outstanding | |
167 | * incomplete taskqid can be determined simply by checking the min | |
168 | * taskqid for each head item on the pending and work list. This | |
169 | * value is stored in tq->tq_lowest_id and only updated to the new | |
170 | * lowest id when the previous lowest id completes. All taskqids | |
171 | * lower than tq->tq_lowest_id must have completed. It is also | |
172 | * possible larger taskqid's have completed because they may be | |
173 | * processed in parallel by several worker threads. However, this | |
174 | * is not a problem because the behavior of taskq_wait_id() is to | |
175 | * block until all previously submitted taskqid's have completed. | |
176 | * | |
177 | * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are | |
178 | * 64-bit values so even if a taskq is processing 2^24 (16,777,216) | |
179 | * taskqid_ts per second it will still take 2^40 seconds, 34,865 years, | |
180 | * before the wrap occurs. I can live with that for now. | |
bcd68186 | 181 | */ |
182 | static int | |
183 | taskq_wait_check(taskq_t *tq, taskqid_t id) | |
184 | { | |
7257ec41 BB |
185 | int rc; |
186 | ||
187 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
188 | rc = (id < tq->tq_lowest_id); | |
189 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
190 | ||
191 | RETURN(rc); | |
bcd68186 | 192 | } |
193 | ||
bcd68186 | 194 | void |
195 | __taskq_wait_id(taskq_t *tq, taskqid_t id) | |
f1ca4da6 | 196 | { |
937879f1 | 197 | ENTRY; |
bcd68186 | 198 | ASSERT(tq); |
199 | ||
200 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id)); | |
201 | ||
202 | EXIT; | |
203 | } | |
204 | EXPORT_SYMBOL(__taskq_wait_id); | |
205 | ||
206 | void | |
207 | __taskq_wait(taskq_t *tq) | |
208 | { | |
209 | taskqid_t id; | |
210 | ENTRY; | |
211 | ASSERT(tq); | |
212 | ||
7257ec41 | 213 | /* Wait for the largest outstanding taskqid */ |
749045bb | 214 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
7257ec41 | 215 | id = tq->tq_next_id - 1; |
749045bb | 216 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 217 | |
218 | __taskq_wait_id(tq, id); | |
219 | ||
220 | EXIT; | |
221 | ||
222 | } | |
223 | EXPORT_SYMBOL(__taskq_wait); | |
224 | ||
225 | int | |
226 | __taskq_member(taskq_t *tq, void *t) | |
227 | { | |
228 | int i; | |
229 | ENTRY; | |
230 | ||
231 | ASSERT(tq); | |
232 | ASSERT(t); | |
233 | ||
234 | for (i = 0; i < tq->tq_nthreads; i++) | |
235 | if (tq->tq_threads[i] == (struct task_struct *)t) | |
236 | RETURN(1); | |
237 | ||
238 | RETURN(0); | |
239 | } | |
240 | EXPORT_SYMBOL(__taskq_member); | |
241 | ||
242 | taskqid_t | |
243 | __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
244 | { | |
3d061e9d | 245 | spl_task_t *t; |
bcd68186 | 246 | taskqid_t rc = 0; |
247 | ENTRY; | |
f1ca4da6 | 248 | |
937879f1 | 249 | ASSERT(tq); |
250 | ASSERT(func); | |
bcd68186 | 251 | if (unlikely(in_atomic() && (flags & TQ_SLEEP))) { |
252 | CERROR("May schedule while atomic: %s/0x%08x/%d\n", | |
253 | current->comm, preempt_count(), current->pid); | |
254 | SBUG(); | |
255 | } | |
f1ca4da6 | 256 | |
749045bb | 257 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
f1ca4da6 | 258 | |
bcd68186 | 259 | /* Taskq being destroyed and all tasks drained */ |
260 | if (!(tq->tq_flags & TQ_ACTIVE)) | |
261 | GOTO(out, rc = 0); | |
f1ca4da6 | 262 | |
bcd68186 | 263 | /* Do not queue the task unless there is idle thread for it */ |
264 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
265 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
266 | GOTO(out, rc = 0); | |
267 | ||
268 | if ((t = task_alloc(tq, flags)) == NULL) | |
269 | GOTO(out, rc = 0); | |
f1ca4da6 | 270 | |
bcd68186 | 271 | spin_lock(&t->t_lock); |
9ab1ac14 | 272 | list_add_tail(&t->t_list, &tq->tq_pend_list); |
bcd68186 | 273 | t->t_id = rc = tq->tq_next_id; |
274 | tq->tq_next_id++; | |
275 | t->t_func = func; | |
276 | t->t_arg = arg; | |
277 | spin_unlock(&t->t_lock); | |
278 | ||
279 | wake_up(&tq->tq_work_waitq); | |
280 | out: | |
749045bb | 281 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 282 | RETURN(rc); |
f1ca4da6 | 283 | } |
f1b59d26 | 284 | EXPORT_SYMBOL(__taskq_dispatch); |
f1ca4da6 | 285 | |
82387586 BB |
286 | /* |
287 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
288 | * be queued on the pending list or may be on the work list | |
289 | * currently being handled, but it is not 100% complete yet. | |
290 | */ | |
bcd68186 | 291 | static taskqid_t |
292 | taskq_lowest_id(taskq_t *tq) | |
293 | { | |
7257ec41 | 294 | taskqid_t lowest_id = tq->tq_next_id; |
3d061e9d | 295 | spl_task_t *t; |
bcd68186 | 296 | ENTRY; |
297 | ||
298 | ASSERT(tq); | |
299 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
300 | ||
82387586 BB |
301 | if (!list_empty(&tq->tq_pend_list)) { |
302 | t = list_entry(tq->tq_pend_list.next, spl_task_t, t_list); | |
303 | lowest_id = MIN(lowest_id, t->t_id); | |
304 | } | |
bcd68186 | 305 | |
82387586 BB |
306 | if (!list_empty(&tq->tq_work_list)) { |
307 | t = list_entry(tq->tq_work_list.next, spl_task_t, t_list); | |
308 | lowest_id = MIN(lowest_id, t->t_id); | |
309 | } | |
bcd68186 | 310 | |
311 | RETURN(lowest_id); | |
312 | } | |
313 | ||
314 | static int | |
315 | taskq_thread(void *args) | |
316 | { | |
317 | DECLARE_WAITQUEUE(wait, current); | |
318 | sigset_t blocked; | |
319 | taskqid_t id; | |
320 | taskq_t *tq = args; | |
3d061e9d | 321 | spl_task_t *t; |
bcd68186 | 322 | ENTRY; |
323 | ||
324 | ASSERT(tq); | |
325 | current->flags |= PF_NOFREEZE; | |
326 | ||
327 | sigfillset(&blocked); | |
328 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
329 | flush_signals(current); | |
330 | ||
749045bb | 331 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 332 | tq->tq_nthreads++; |
333 | wake_up(&tq->tq_wait_waitq); | |
334 | set_current_state(TASK_INTERRUPTIBLE); | |
335 | ||
336 | while (!kthread_should_stop()) { | |
337 | ||
338 | add_wait_queue(&tq->tq_work_waitq, &wait); | |
339 | if (list_empty(&tq->tq_pend_list)) { | |
749045bb | 340 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 341 | schedule(); |
749045bb | 342 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 343 | } else { |
344 | __set_current_state(TASK_RUNNING); | |
345 | } | |
346 | ||
347 | remove_wait_queue(&tq->tq_work_waitq, &wait); | |
348 | if (!list_empty(&tq->tq_pend_list)) { | |
7257ec41 | 349 | t = list_entry(tq->tq_pend_list.next,spl_task_t,t_list); |
bcd68186 | 350 | list_del_init(&t->t_list); |
9ab1ac14 | 351 | list_add_tail(&t->t_list, &tq->tq_work_list); |
bcd68186 | 352 | tq->tq_nactive++; |
749045bb | 353 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 354 | |
355 | /* Perform the requested task */ | |
356 | t->t_func(t->t_arg); | |
357 | ||
749045bb | 358 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 359 | tq->tq_nactive--; |
360 | id = t->t_id; | |
361 | task_done(tq, t); | |
362 | ||
7257ec41 BB |
363 | /* When the current lowest outstanding taskqid is |
364 | * done calculate the new lowest outstanding id */ | |
bcd68186 | 365 | if (tq->tq_lowest_id == id) { |
366 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
367 | ASSERT(tq->tq_lowest_id > id); | |
368 | } | |
369 | ||
370 | wake_up_all(&tq->tq_wait_waitq); | |
371 | } | |
372 | ||
373 | set_current_state(TASK_INTERRUPTIBLE); | |
374 | ||
375 | } | |
376 | ||
377 | __set_current_state(TASK_RUNNING); | |
378 | tq->tq_nthreads--; | |
749045bb | 379 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 380 | |
381 | RETURN(0); | |
382 | } | |
383 | ||
f1ca4da6 | 384 | taskq_t * |
385 | __taskq_create(const char *name, int nthreads, pri_t pri, | |
386 | int minalloc, int maxalloc, uint_t flags) | |
387 | { | |
bcd68186 | 388 | taskq_t *tq; |
389 | struct task_struct *t; | |
390 | int rc = 0, i, j = 0; | |
391 | ENTRY; | |
392 | ||
393 | ASSERT(name != NULL); | |
394 | ASSERT(pri <= maxclsyspri); | |
395 | ASSERT(minalloc >= 0); | |
396 | ASSERT(maxalloc <= INT_MAX); | |
397 | ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */ | |
398 | ||
915404bd BB |
399 | /* Scale the number of threads using nthreads as a percentage */ |
400 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
401 | ASSERT(nthreads <= 100); | |
402 | ASSERT(nthreads >= 0); | |
403 | nthreads = MIN(nthreads, 100); | |
404 | nthreads = MAX(nthreads, 0); | |
405 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
406 | } | |
407 | ||
bcd68186 | 408 | tq = kmem_alloc(sizeof(*tq), KM_SLEEP); |
409 | if (tq == NULL) | |
410 | RETURN(NULL); | |
411 | ||
412 | tq->tq_threads = kmem_alloc(nthreads * sizeof(t), KM_SLEEP); | |
413 | if (tq->tq_threads == NULL) { | |
414 | kmem_free(tq, sizeof(*tq)); | |
415 | RETURN(NULL); | |
416 | } | |
417 | ||
418 | spin_lock_init(&tq->tq_lock); | |
749045bb | 419 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 420 | tq->tq_name = name; |
421 | tq->tq_nactive = 0; | |
422 | tq->tq_nthreads = 0; | |
423 | tq->tq_pri = pri; | |
424 | tq->tq_minalloc = minalloc; | |
425 | tq->tq_maxalloc = maxalloc; | |
426 | tq->tq_nalloc = 0; | |
427 | tq->tq_flags = (flags | TQ_ACTIVE); | |
428 | tq->tq_next_id = 1; | |
429 | tq->tq_lowest_id = 1; | |
430 | INIT_LIST_HEAD(&tq->tq_free_list); | |
431 | INIT_LIST_HEAD(&tq->tq_work_list); | |
432 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
433 | init_waitqueue_head(&tq->tq_work_waitq); | |
434 | init_waitqueue_head(&tq->tq_wait_waitq); | |
435 | ||
436 | if (flags & TASKQ_PREPOPULATE) | |
437 | for (i = 0; i < minalloc; i++) | |
438 | task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW)); | |
6e605b6e | 439 | |
749045bb | 440 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
6e605b6e | 441 | |
bcd68186 | 442 | for (i = 0; i < nthreads; i++) { |
443 | t = kthread_create(taskq_thread, tq, "%s/%d", name, i); | |
444 | if (t) { | |
445 | tq->tq_threads[i] = t; | |
446 | kthread_bind(t, i % num_online_cpus()); | |
447 | set_user_nice(t, PRIO_TO_NICE(pri)); | |
448 | wake_up_process(t); | |
449 | j++; | |
450 | } else { | |
451 | tq->tq_threads[i] = NULL; | |
452 | rc = 1; | |
453 | } | |
454 | } | |
455 | ||
456 | /* Wait for all threads to be started before potential destroy */ | |
457 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j); | |
458 | ||
459 | if (rc) { | |
460 | __taskq_destroy(tq); | |
461 | tq = NULL; | |
462 | } | |
463 | ||
464 | RETURN(tq); | |
f1ca4da6 | 465 | } |
f1b59d26 | 466 | EXPORT_SYMBOL(__taskq_create); |
b123971f | 467 | |
468 | void | |
469 | __taskq_destroy(taskq_t *tq) | |
470 | { | |
3d061e9d | 471 | spl_task_t *t; |
bcd68186 | 472 | int i, nthreads; |
937879f1 | 473 | ENTRY; |
b123971f | 474 | |
bcd68186 | 475 | ASSERT(tq); |
749045bb | 476 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 477 | tq->tq_flags &= ~TQ_ACTIVE; |
749045bb | 478 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 479 | |
480 | /* TQ_ACTIVE cleared prevents new tasks being added to pending */ | |
481 | __taskq_wait(tq); | |
482 | ||
483 | nthreads = tq->tq_nthreads; | |
484 | for (i = 0; i < nthreads; i++) | |
485 | if (tq->tq_threads[i]) | |
486 | kthread_stop(tq->tq_threads[i]); | |
487 | ||
749045bb | 488 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 489 | |
490 | while (!list_empty(&tq->tq_free_list)) { | |
3d061e9d | 491 | t = list_entry(tq->tq_free_list.next, spl_task_t, t_list); |
bcd68186 | 492 | list_del_init(&t->t_list); |
493 | task_free(tq, t); | |
494 | } | |
495 | ||
496 | ASSERT(tq->tq_nthreads == 0); | |
497 | ASSERT(tq->tq_nalloc == 0); | |
498 | ASSERT(list_empty(&tq->tq_free_list)); | |
499 | ASSERT(list_empty(&tq->tq_work_list)); | |
500 | ASSERT(list_empty(&tq->tq_pend_list)); | |
501 | ||
749045bb | 502 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
3d061e9d | 503 | kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *)); |
bcd68186 | 504 | kmem_free(tq, sizeof(taskq_t)); |
505 | ||
937879f1 | 506 | EXIT; |
b123971f | 507 | } |
bcd68186 | 508 | EXPORT_SYMBOL(__taskq_destroy); |
e9cb2b4f BB |
509 | |
510 | int | |
511 | spl_taskq_init(void) | |
512 | { | |
513 | ENTRY; | |
514 | ||
f220894e BB |
515 | /* Solaris creates a dynamic taskq of up to 64 threads, however in |
516 | * a Linux environment 1 thread per-core is usually about right */ | |
517 | system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), | |
518 | minclsyspri, 4, 512, TASKQ_PREPOPULATE); | |
e9cb2b4f BB |
519 | if (system_taskq == NULL) |
520 | RETURN(1); | |
521 | ||
522 | RETURN(0); | |
523 | } | |
524 | ||
525 | void | |
526 | spl_taskq_fini(void) | |
527 | { | |
528 | ENTRY; | |
529 | taskq_destroy(system_taskq); | |
530 | EXIT; | |
531 | } |