]>
Commit | Line | Data |
---|---|---|
716154c5 BB |
1 | /*****************************************************************************\ |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 BB |
8 | * This file is part of the SPL, Solaris Porting Layer. |
9 | * For details, see <http://github.com/behlendorf/spl/>. | |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 BB |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Task Queue Implementation. | |
25 | \*****************************************************************************/ | |
715f6251 | 26 | |
f4b37741 | 27 | #include <sys/taskq.h> |
3d061e9d | 28 | #include <sys/kmem.h> |
55abb092 | 29 | #include <spl-debug.h> |
f1ca4da6 | 30 | |
b17edc10 BB |
31 | #ifdef SS_DEBUG_SUBSYS |
32 | #undef SS_DEBUG_SUBSYS | |
937879f1 | 33 | #endif |
34 | ||
b17edc10 | 35 | #define SS_DEBUG_SUBSYS SS_TASKQ |
937879f1 | 36 | |
e9cb2b4f BB |
37 | /* Global system-wide dynamic task queue available for all consumers */ |
38 | taskq_t *system_taskq; | |
39 | EXPORT_SYMBOL(system_taskq); | |
40 | ||
3d061e9d | 41 | typedef struct spl_task { |
42 | spinlock_t t_lock; | |
43 | struct list_head t_list; | |
44 | taskqid_t t_id; | |
45 | task_func_t *t_func; | |
46 | void *t_arg; | |
47 | } spl_task_t; | |
48 | ||
82387586 BB |
49 | /* |
50 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
bcd68186 | 51 | * is not attached to the free, work, or pending taskq lists. |
f1ca4da6 | 52 | */ |
3d061e9d | 53 | static spl_task_t * |
bcd68186 | 54 | task_alloc(taskq_t *tq, uint_t flags) |
55 | { | |
3d061e9d | 56 | spl_task_t *t; |
bcd68186 | 57 | int count = 0; |
b17edc10 | 58 | SENTRY; |
bcd68186 | 59 | |
60 | ASSERT(tq); | |
61 | ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */ | |
62 | ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */ | |
3d061e9d | 63 | ASSERT(spin_is_locked(&tq->tq_lock)); |
bcd68186 | 64 | retry: |
7257ec41 | 65 | /* Acquire spl_task_t's from free list if available */ |
bcd68186 | 66 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { |
3d061e9d | 67 | t = list_entry(tq->tq_free_list.next, spl_task_t, t_list); |
68 | list_del_init(&t->t_list); | |
b17edc10 | 69 | SRETURN(t); |
bcd68186 | 70 | } |
71 | ||
7257ec41 | 72 | /* Free list is empty and memory allocations are prohibited */ |
bcd68186 | 73 | if (flags & TQ_NOALLOC) |
b17edc10 | 74 | SRETURN(NULL); |
bcd68186 | 75 | |
3d061e9d | 76 | /* Hit maximum spl_task_t pool size */ |
bcd68186 | 77 | if (tq->tq_nalloc >= tq->tq_maxalloc) { |
78 | if (flags & TQ_NOSLEEP) | |
b17edc10 | 79 | SRETURN(NULL); |
bcd68186 | 80 | |
81 | /* Sleep periodically polling the free list for an available | |
3d061e9d | 82 | * spl_task_t. If a full second passes and we have not found |
bcd68186 | 83 | * one gives up and return a NULL to the caller. */ |
84 | if (flags & TQ_SLEEP) { | |
749045bb | 85 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 86 | schedule_timeout(HZ / 100); |
749045bb | 87 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 88 | if (count < 100) |
b17edc10 | 89 | SGOTO(retry, count++); |
bcd68186 | 90 | |
b17edc10 | 91 | SRETURN(NULL); |
bcd68186 | 92 | } |
93 | ||
55abb092 BB |
94 | /* Unreachable, Neither TQ_SLEEP or TQ_NOSLEEP set */ |
95 | PANIC("Neither TQ_SLEEP or TQ_NOSLEEP set"); | |
bcd68186 | 96 | } |
97 | ||
749045bb | 98 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
3d061e9d | 99 | t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP)); |
749045bb | 100 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 101 | |
102 | if (t) { | |
103 | spin_lock_init(&t->t_lock); | |
104 | INIT_LIST_HEAD(&t->t_list); | |
105 | t->t_id = 0; | |
106 | t->t_func = NULL; | |
107 | t->t_arg = NULL; | |
108 | tq->tq_nalloc++; | |
109 | } | |
110 | ||
b17edc10 | 111 | SRETURN(t); |
bcd68186 | 112 | } |
113 | ||
82387586 BB |
114 | /* |
115 | * NOTE: Must be called with tq->tq_lock held, expects the spl_task_t | |
bcd68186 | 116 | * to already be removed from the free, work, or pending taskq lists. |
117 | */ | |
118 | static void | |
3d061e9d | 119 | task_free(taskq_t *tq, spl_task_t *t) |
bcd68186 | 120 | { |
b17edc10 | 121 | SENTRY; |
bcd68186 | 122 | |
123 | ASSERT(tq); | |
124 | ASSERT(t); | |
125 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
126 | ASSERT(list_empty(&t->t_list)); | |
127 | ||
3d061e9d | 128 | kmem_free(t, sizeof(spl_task_t)); |
bcd68186 | 129 | tq->tq_nalloc--; |
f1ca4da6 | 130 | |
b17edc10 | 131 | SEXIT; |
bcd68186 | 132 | } |
133 | ||
82387586 BB |
134 | /* |
135 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
3d061e9d | 136 | * spl_task_t if too many exist or moves it to the free list for later use. |
bcd68186 | 137 | */ |
f1ca4da6 | 138 | static void |
3d061e9d | 139 | task_done(taskq_t *tq, spl_task_t *t) |
f1ca4da6 | 140 | { |
b17edc10 | 141 | SENTRY; |
bcd68186 | 142 | ASSERT(tq); |
143 | ASSERT(t); | |
144 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
145 | ||
146 | list_del_init(&t->t_list); | |
f1ca4da6 | 147 | |
bcd68186 | 148 | if (tq->tq_nalloc <= tq->tq_minalloc) { |
149 | t->t_id = 0; | |
150 | t->t_func = NULL; | |
151 | t->t_arg = NULL; | |
9ab1ac14 | 152 | list_add_tail(&t->t_list, &tq->tq_free_list); |
bcd68186 | 153 | } else { |
154 | task_free(tq, t); | |
155 | } | |
f1ca4da6 | 156 | |
b17edc10 | 157 | SEXIT; |
f1ca4da6 | 158 | } |
159 | ||
82387586 BB |
160 | /* |
161 | * As tasks are submitted to the task queue they are assigned a | |
f0d8bb26 NB |
162 | * monotonically increasing taskqid and added to the tail of the pending |
163 | * list. As worker threads become available the tasks are removed from | |
164 | * the head of the pending or priority list, giving preference to the | |
165 | * priority list. The tasks are then added to the work list, preserving | |
166 | * the ordering by taskqid. Finally, as tasks complete they are removed | |
167 | * from the work list. This means that the pending and work lists are | |
168 | * always kept sorted by taskqid. Thus the lowest outstanding | |
82387586 | 169 | * incomplete taskqid can be determined simply by checking the min |
f0d8bb26 NB |
170 | * taskqid for each head item on the pending, priority, and work list. |
171 | * This value is stored in tq->tq_lowest_id and only updated to the new | |
172 | * lowest id when the previous lowest id completes. All taskqids lower | |
173 | * than tq->tq_lowest_id must have completed. It is also possible | |
174 | * larger taskqid's have completed because they may be processed in | |
175 | * parallel by several worker threads. However, this is not a problem | |
176 | * because the behavior of taskq_wait_id() is to block until all | |
177 | * previously submitted taskqid's have completed. | |
82387586 BB |
178 | * |
179 | * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are | |
180 | * 64-bit values so even if a taskq is processing 2^24 (16,777,216) | |
181 | * taskqid_ts per second it will still take 2^40 seconds, 34,865 years, | |
182 | * before the wrap occurs. I can live with that for now. | |
bcd68186 | 183 | */ |
184 | static int | |
185 | taskq_wait_check(taskq_t *tq, taskqid_t id) | |
186 | { | |
7257ec41 BB |
187 | int rc; |
188 | ||
189 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
190 | rc = (id < tq->tq_lowest_id); | |
191 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
192 | ||
b17edc10 | 193 | SRETURN(rc); |
bcd68186 | 194 | } |
195 | ||
bcd68186 | 196 | void |
197 | __taskq_wait_id(taskq_t *tq, taskqid_t id) | |
f1ca4da6 | 198 | { |
b17edc10 | 199 | SENTRY; |
bcd68186 | 200 | ASSERT(tq); |
201 | ||
202 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id)); | |
203 | ||
b17edc10 | 204 | SEXIT; |
bcd68186 | 205 | } |
206 | EXPORT_SYMBOL(__taskq_wait_id); | |
207 | ||
208 | void | |
209 | __taskq_wait(taskq_t *tq) | |
210 | { | |
211 | taskqid_t id; | |
b17edc10 | 212 | SENTRY; |
bcd68186 | 213 | ASSERT(tq); |
214 | ||
7257ec41 | 215 | /* Wait for the largest outstanding taskqid */ |
749045bb | 216 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
7257ec41 | 217 | id = tq->tq_next_id - 1; |
749045bb | 218 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 219 | |
220 | __taskq_wait_id(tq, id); | |
221 | ||
b17edc10 | 222 | SEXIT; |
bcd68186 | 223 | |
224 | } | |
225 | EXPORT_SYMBOL(__taskq_wait); | |
226 | ||
227 | int | |
228 | __taskq_member(taskq_t *tq, void *t) | |
229 | { | |
230 | int i; | |
b17edc10 | 231 | SENTRY; |
bcd68186 | 232 | |
233 | ASSERT(tq); | |
234 | ASSERT(t); | |
235 | ||
236 | for (i = 0; i < tq->tq_nthreads; i++) | |
237 | if (tq->tq_threads[i] == (struct task_struct *)t) | |
b17edc10 | 238 | SRETURN(1); |
bcd68186 | 239 | |
b17edc10 | 240 | SRETURN(0); |
bcd68186 | 241 | } |
242 | EXPORT_SYMBOL(__taskq_member); | |
243 | ||
244 | taskqid_t | |
245 | __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
246 | { | |
3d061e9d | 247 | spl_task_t *t; |
bcd68186 | 248 | taskqid_t rc = 0; |
b17edc10 | 249 | SENTRY; |
f1ca4da6 | 250 | |
937879f1 | 251 | ASSERT(tq); |
252 | ASSERT(func); | |
d05ec4b4 BB |
253 | |
254 | /* Solaris assumes TQ_SLEEP if not passed explicitly */ | |
255 | if (!(flags & (TQ_SLEEP | TQ_NOSLEEP))) | |
256 | flags |= TQ_SLEEP; | |
257 | ||
55abb092 BB |
258 | if (unlikely(in_atomic() && (flags & TQ_SLEEP))) |
259 | PANIC("May schedule while atomic: %s/0x%08x/%d\n", | |
260 | current->comm, preempt_count(), current->pid); | |
f1ca4da6 | 261 | |
749045bb | 262 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
f1ca4da6 | 263 | |
bcd68186 | 264 | /* Taskq being destroyed and all tasks drained */ |
265 | if (!(tq->tq_flags & TQ_ACTIVE)) | |
b17edc10 | 266 | SGOTO(out, rc = 0); |
f1ca4da6 | 267 | |
bcd68186 | 268 | /* Do not queue the task unless there is idle thread for it */ |
269 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
270 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
b17edc10 | 271 | SGOTO(out, rc = 0); |
bcd68186 | 272 | |
273 | if ((t = task_alloc(tq, flags)) == NULL) | |
b17edc10 | 274 | SGOTO(out, rc = 0); |
f1ca4da6 | 275 | |
bcd68186 | 276 | spin_lock(&t->t_lock); |
f0d8bb26 NB |
277 | |
278 | /* Queue to the priority list instead of the pending list */ | |
279 | if (flags & TQ_FRONT) | |
280 | list_add_tail(&t->t_list, &tq->tq_prio_list); | |
281 | else | |
282 | list_add_tail(&t->t_list, &tq->tq_pend_list); | |
283 | ||
bcd68186 | 284 | t->t_id = rc = tq->tq_next_id; |
285 | tq->tq_next_id++; | |
286 | t->t_func = func; | |
287 | t->t_arg = arg; | |
288 | spin_unlock(&t->t_lock); | |
289 | ||
290 | wake_up(&tq->tq_work_waitq); | |
291 | out: | |
749045bb | 292 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
b17edc10 | 293 | SRETURN(rc); |
f1ca4da6 | 294 | } |
f1b59d26 | 295 | EXPORT_SYMBOL(__taskq_dispatch); |
f1ca4da6 | 296 | |
82387586 BB |
297 | /* |
298 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
f0d8bb26 NB |
299 | * be queued on the pending list, on the priority list, or on |
300 | * the work list currently being handled, but it is not 100% | |
301 | * complete yet. | |
82387586 | 302 | */ |
bcd68186 | 303 | static taskqid_t |
304 | taskq_lowest_id(taskq_t *tq) | |
305 | { | |
7257ec41 | 306 | taskqid_t lowest_id = tq->tq_next_id; |
3d061e9d | 307 | spl_task_t *t; |
b17edc10 | 308 | SENTRY; |
bcd68186 | 309 | |
310 | ASSERT(tq); | |
311 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
312 | ||
82387586 BB |
313 | if (!list_empty(&tq->tq_pend_list)) { |
314 | t = list_entry(tq->tq_pend_list.next, spl_task_t, t_list); | |
315 | lowest_id = MIN(lowest_id, t->t_id); | |
316 | } | |
bcd68186 | 317 | |
f0d8bb26 NB |
318 | if (!list_empty(&tq->tq_prio_list)) { |
319 | t = list_entry(tq->tq_prio_list.next, spl_task_t, t_list); | |
320 | lowest_id = MIN(lowest_id, t->t_id); | |
321 | } | |
322 | ||
82387586 BB |
323 | if (!list_empty(&tq->tq_work_list)) { |
324 | t = list_entry(tq->tq_work_list.next, spl_task_t, t_list); | |
325 | lowest_id = MIN(lowest_id, t->t_id); | |
326 | } | |
bcd68186 | 327 | |
b17edc10 | 328 | SRETURN(lowest_id); |
bcd68186 | 329 | } |
330 | ||
f0d8bb26 NB |
331 | /* |
332 | * Insert a task into a list keeping the list sorted by increasing | |
333 | * taskqid. | |
334 | */ | |
335 | static void | |
336 | taskq_insert_in_order(taskq_t *tq, spl_task_t *t) | |
337 | { | |
338 | spl_task_t *w; | |
339 | struct list_head *l; | |
340 | ||
b17edc10 | 341 | SENTRY; |
f0d8bb26 NB |
342 | ASSERT(tq); |
343 | ASSERT(t); | |
344 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
345 | ||
346 | list_for_each_prev(l, &tq->tq_work_list) { | |
347 | w = list_entry(l, spl_task_t, t_list); | |
348 | if (w->t_id < t->t_id) { | |
349 | list_add(&t->t_list, l); | |
350 | break; | |
351 | } | |
352 | } | |
353 | if (l == &tq->tq_work_list) | |
354 | list_add(&t->t_list, &tq->tq_work_list); | |
355 | ||
b17edc10 | 356 | SEXIT; |
f0d8bb26 NB |
357 | } |
358 | ||
bcd68186 | 359 | static int |
360 | taskq_thread(void *args) | |
361 | { | |
362 | DECLARE_WAITQUEUE(wait, current); | |
363 | sigset_t blocked; | |
364 | taskqid_t id; | |
365 | taskq_t *tq = args; | |
3d061e9d | 366 | spl_task_t *t; |
f0d8bb26 | 367 | struct list_head *pend_list; |
b17edc10 | 368 | SENTRY; |
bcd68186 | 369 | |
370 | ASSERT(tq); | |
371 | current->flags |= PF_NOFREEZE; | |
372 | ||
373 | sigfillset(&blocked); | |
374 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
375 | flush_signals(current); | |
376 | ||
749045bb | 377 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 378 | tq->tq_nthreads++; |
379 | wake_up(&tq->tq_wait_waitq); | |
380 | set_current_state(TASK_INTERRUPTIBLE); | |
381 | ||
382 | while (!kthread_should_stop()) { | |
383 | ||
384 | add_wait_queue(&tq->tq_work_waitq, &wait); | |
f0d8bb26 NB |
385 | if (list_empty(&tq->tq_pend_list) && |
386 | list_empty(&tq->tq_prio_list)) { | |
749045bb | 387 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 388 | schedule(); |
749045bb | 389 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 390 | } else { |
391 | __set_current_state(TASK_RUNNING); | |
392 | } | |
393 | ||
394 | remove_wait_queue(&tq->tq_work_waitq, &wait); | |
f0d8bb26 NB |
395 | |
396 | if (!list_empty(&tq->tq_prio_list)) | |
397 | pend_list = &tq->tq_prio_list; | |
398 | else if (!list_empty(&tq->tq_pend_list)) | |
399 | pend_list = &tq->tq_pend_list; | |
400 | else | |
401 | pend_list = NULL; | |
402 | ||
403 | if (pend_list) { | |
404 | t = list_entry(pend_list->next, spl_task_t, t_list); | |
bcd68186 | 405 | list_del_init(&t->t_list); |
f0d8bb26 | 406 | taskq_insert_in_order(tq, t); |
bcd68186 | 407 | tq->tq_nactive++; |
749045bb | 408 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 409 | |
410 | /* Perform the requested task */ | |
411 | t->t_func(t->t_arg); | |
412 | ||
749045bb | 413 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 414 | tq->tq_nactive--; |
415 | id = t->t_id; | |
416 | task_done(tq, t); | |
417 | ||
7257ec41 BB |
418 | /* When the current lowest outstanding taskqid is |
419 | * done calculate the new lowest outstanding id */ | |
bcd68186 | 420 | if (tq->tq_lowest_id == id) { |
421 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
422 | ASSERT(tq->tq_lowest_id > id); | |
423 | } | |
424 | ||
425 | wake_up_all(&tq->tq_wait_waitq); | |
426 | } | |
427 | ||
428 | set_current_state(TASK_INTERRUPTIBLE); | |
429 | ||
430 | } | |
431 | ||
432 | __set_current_state(TASK_RUNNING); | |
433 | tq->tq_nthreads--; | |
749045bb | 434 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 435 | |
b17edc10 | 436 | SRETURN(0); |
bcd68186 | 437 | } |
438 | ||
f1ca4da6 | 439 | taskq_t * |
440 | __taskq_create(const char *name, int nthreads, pri_t pri, | |
441 | int minalloc, int maxalloc, uint_t flags) | |
442 | { | |
bcd68186 | 443 | taskq_t *tq; |
444 | struct task_struct *t; | |
445 | int rc = 0, i, j = 0; | |
b17edc10 | 446 | SENTRY; |
bcd68186 | 447 | |
448 | ASSERT(name != NULL); | |
449 | ASSERT(pri <= maxclsyspri); | |
450 | ASSERT(minalloc >= 0); | |
451 | ASSERT(maxalloc <= INT_MAX); | |
452 | ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */ | |
453 | ||
915404bd BB |
454 | /* Scale the number of threads using nthreads as a percentage */ |
455 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
456 | ASSERT(nthreads <= 100); | |
457 | ASSERT(nthreads >= 0); | |
458 | nthreads = MIN(nthreads, 100); | |
459 | nthreads = MAX(nthreads, 0); | |
460 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
461 | } | |
462 | ||
bcd68186 | 463 | tq = kmem_alloc(sizeof(*tq), KM_SLEEP); |
464 | if (tq == NULL) | |
b17edc10 | 465 | SRETURN(NULL); |
bcd68186 | 466 | |
467 | tq->tq_threads = kmem_alloc(nthreads * sizeof(t), KM_SLEEP); | |
468 | if (tq->tq_threads == NULL) { | |
469 | kmem_free(tq, sizeof(*tq)); | |
b17edc10 | 470 | SRETURN(NULL); |
bcd68186 | 471 | } |
472 | ||
473 | spin_lock_init(&tq->tq_lock); | |
749045bb | 474 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 475 | tq->tq_name = name; |
476 | tq->tq_nactive = 0; | |
477 | tq->tq_nthreads = 0; | |
478 | tq->tq_pri = pri; | |
479 | tq->tq_minalloc = minalloc; | |
480 | tq->tq_maxalloc = maxalloc; | |
481 | tq->tq_nalloc = 0; | |
482 | tq->tq_flags = (flags | TQ_ACTIVE); | |
483 | tq->tq_next_id = 1; | |
484 | tq->tq_lowest_id = 1; | |
485 | INIT_LIST_HEAD(&tq->tq_free_list); | |
486 | INIT_LIST_HEAD(&tq->tq_work_list); | |
487 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
f0d8bb26 | 488 | INIT_LIST_HEAD(&tq->tq_prio_list); |
bcd68186 | 489 | init_waitqueue_head(&tq->tq_work_waitq); |
490 | init_waitqueue_head(&tq->tq_wait_waitq); | |
491 | ||
492 | if (flags & TASKQ_PREPOPULATE) | |
493 | for (i = 0; i < minalloc; i++) | |
494 | task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW)); | |
6e605b6e | 495 | |
749045bb | 496 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
6e605b6e | 497 | |
bcd68186 | 498 | for (i = 0; i < nthreads; i++) { |
499 | t = kthread_create(taskq_thread, tq, "%s/%d", name, i); | |
500 | if (t) { | |
501 | tq->tq_threads[i] = t; | |
502 | kthread_bind(t, i % num_online_cpus()); | |
503 | set_user_nice(t, PRIO_TO_NICE(pri)); | |
504 | wake_up_process(t); | |
505 | j++; | |
506 | } else { | |
507 | tq->tq_threads[i] = NULL; | |
508 | rc = 1; | |
509 | } | |
510 | } | |
511 | ||
512 | /* Wait for all threads to be started before potential destroy */ | |
513 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j); | |
514 | ||
515 | if (rc) { | |
516 | __taskq_destroy(tq); | |
517 | tq = NULL; | |
518 | } | |
519 | ||
b17edc10 | 520 | SRETURN(tq); |
f1ca4da6 | 521 | } |
f1b59d26 | 522 | EXPORT_SYMBOL(__taskq_create); |
b123971f | 523 | |
524 | void | |
525 | __taskq_destroy(taskq_t *tq) | |
526 | { | |
3d061e9d | 527 | spl_task_t *t; |
bcd68186 | 528 | int i, nthreads; |
b17edc10 | 529 | SENTRY; |
b123971f | 530 | |
bcd68186 | 531 | ASSERT(tq); |
749045bb | 532 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 533 | tq->tq_flags &= ~TQ_ACTIVE; |
749045bb | 534 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 535 | |
536 | /* TQ_ACTIVE cleared prevents new tasks being added to pending */ | |
537 | __taskq_wait(tq); | |
538 | ||
539 | nthreads = tq->tq_nthreads; | |
540 | for (i = 0; i < nthreads; i++) | |
541 | if (tq->tq_threads[i]) | |
542 | kthread_stop(tq->tq_threads[i]); | |
543 | ||
749045bb | 544 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 545 | |
546 | while (!list_empty(&tq->tq_free_list)) { | |
3d061e9d | 547 | t = list_entry(tq->tq_free_list.next, spl_task_t, t_list); |
bcd68186 | 548 | list_del_init(&t->t_list); |
549 | task_free(tq, t); | |
550 | } | |
551 | ||
552 | ASSERT(tq->tq_nthreads == 0); | |
553 | ASSERT(tq->tq_nalloc == 0); | |
554 | ASSERT(list_empty(&tq->tq_free_list)); | |
555 | ASSERT(list_empty(&tq->tq_work_list)); | |
556 | ASSERT(list_empty(&tq->tq_pend_list)); | |
f0d8bb26 | 557 | ASSERT(list_empty(&tq->tq_prio_list)); |
bcd68186 | 558 | |
749045bb | 559 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
3d061e9d | 560 | kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *)); |
bcd68186 | 561 | kmem_free(tq, sizeof(taskq_t)); |
562 | ||
b17edc10 | 563 | SEXIT; |
b123971f | 564 | } |
bcd68186 | 565 | EXPORT_SYMBOL(__taskq_destroy); |
e9cb2b4f BB |
566 | |
567 | int | |
568 | spl_taskq_init(void) | |
569 | { | |
b17edc10 | 570 | SENTRY; |
e9cb2b4f | 571 | |
f220894e BB |
572 | /* Solaris creates a dynamic taskq of up to 64 threads, however in |
573 | * a Linux environment 1 thread per-core is usually about right */ | |
574 | system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), | |
575 | minclsyspri, 4, 512, TASKQ_PREPOPULATE); | |
e9cb2b4f | 576 | if (system_taskq == NULL) |
b17edc10 | 577 | SRETURN(1); |
e9cb2b4f | 578 | |
b17edc10 | 579 | SRETURN(0); |
e9cb2b4f BB |
580 | } |
581 | ||
582 | void | |
583 | spl_taskq_fini(void) | |
584 | { | |
b17edc10 | 585 | SENTRY; |
e9cb2b4f | 586 | taskq_destroy(system_taskq); |
b17edc10 | 587 | SEXIT; |
e9cb2b4f | 588 | } |