]>
Commit | Line | Data |
---|---|---|
716154c5 BB |
1 | /*****************************************************************************\ |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 BB |
8 | * This file is part of the SPL, Solaris Porting Layer. |
9 | * For details, see <http://github.com/behlendorf/spl/>. | |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 BB |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Task Queue Implementation. | |
25 | \*****************************************************************************/ | |
715f6251 | 26 | |
f4b37741 | 27 | #include <sys/taskq.h> |
3d061e9d | 28 | #include <sys/kmem.h> |
55abb092 | 29 | #include <spl-debug.h> |
f1ca4da6 | 30 | |
b17edc10 BB |
31 | #ifdef SS_DEBUG_SUBSYS |
32 | #undef SS_DEBUG_SUBSYS | |
937879f1 | 33 | #endif |
34 | ||
b17edc10 | 35 | #define SS_DEBUG_SUBSYS SS_TASKQ |
937879f1 | 36 | |
e9cb2b4f BB |
37 | /* Global system-wide dynamic task queue available for all consumers */ |
38 | taskq_t *system_taskq; | |
39 | EXPORT_SYMBOL(system_taskq); | |
40 | ||
82387586 BB |
41 | /* |
42 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
bcd68186 | 43 | * is not attached to the free, work, or pending taskq lists. |
f1ca4da6 | 44 | */ |
046a70c9 | 45 | static taskq_ent_t * |
bcd68186 | 46 | task_alloc(taskq_t *tq, uint_t flags) |
47 | { | |
046a70c9 | 48 | taskq_ent_t *t; |
bcd68186 | 49 | int count = 0; |
b17edc10 | 50 | SENTRY; |
bcd68186 | 51 | |
52 | ASSERT(tq); | |
53 | ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */ | |
54 | ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */ | |
3d061e9d | 55 | ASSERT(spin_is_locked(&tq->tq_lock)); |
bcd68186 | 56 | retry: |
046a70c9 | 57 | /* Acquire taskq_ent_t's from free list if available */ |
bcd68186 | 58 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { |
046a70c9 | 59 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); |
44217f7a PS |
60 | |
61 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
62 | ||
046a70c9 | 63 | list_del_init(&t->tqent_list); |
b17edc10 | 64 | SRETURN(t); |
bcd68186 | 65 | } |
66 | ||
7257ec41 | 67 | /* Free list is empty and memory allocations are prohibited */ |
bcd68186 | 68 | if (flags & TQ_NOALLOC) |
b17edc10 | 69 | SRETURN(NULL); |
bcd68186 | 70 | |
046a70c9 | 71 | /* Hit maximum taskq_ent_t pool size */ |
bcd68186 | 72 | if (tq->tq_nalloc >= tq->tq_maxalloc) { |
73 | if (flags & TQ_NOSLEEP) | |
b17edc10 | 74 | SRETURN(NULL); |
bcd68186 | 75 | |
26f7245c RC |
76 | /* |
77 | * Sleep periodically polling the free list for an available | |
046a70c9 PS |
78 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed |
79 | * but we cannot block forever waiting for an taskq_entq_t to | |
26f7245c RC |
80 | * show up in the free list, otherwise a deadlock can happen. |
81 | * | |
82 | * Therefore, we need to allocate a new task even if the number | |
83 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
84 | * end up delaying the task allocation by one second, thereby | |
85 | * throttling the task dispatch rate. | |
86 | */ | |
87 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
88 | schedule_timeout(HZ / 100); | |
89 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
90 | if (count < 100) | |
91 | SGOTO(retry, count++); | |
bcd68186 | 92 | } |
93 | ||
26f7245c | 94 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
046a70c9 | 95 | t = kmem_alloc(sizeof(taskq_ent_t), flags & (TQ_SLEEP | TQ_NOSLEEP)); |
749045bb | 96 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 97 | |
26f7245c | 98 | if (t) { |
44217f7a | 99 | taskq_init_ent(t); |
26f7245c RC |
100 | tq->tq_nalloc++; |
101 | } | |
bcd68186 | 102 | |
b17edc10 | 103 | SRETURN(t); |
bcd68186 | 104 | } |
105 | ||
82387586 | 106 | /* |
046a70c9 | 107 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t |
bcd68186 | 108 | * to already be removed from the free, work, or pending taskq lists. |
109 | */ | |
110 | static void | |
046a70c9 | 111 | task_free(taskq_t *tq, taskq_ent_t *t) |
bcd68186 | 112 | { |
b17edc10 | 113 | SENTRY; |
bcd68186 | 114 | |
115 | ASSERT(tq); | |
116 | ASSERT(t); | |
117 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
046a70c9 | 118 | ASSERT(list_empty(&t->tqent_list)); |
bcd68186 | 119 | |
046a70c9 | 120 | kmem_free(t, sizeof(taskq_ent_t)); |
bcd68186 | 121 | tq->tq_nalloc--; |
f1ca4da6 | 122 | |
b17edc10 | 123 | SEXIT; |
bcd68186 | 124 | } |
125 | ||
82387586 BB |
126 | /* |
127 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
046a70c9 | 128 | * taskq_ent_t if too many exist or moves it to the free list for later use. |
bcd68186 | 129 | */ |
f1ca4da6 | 130 | static void |
046a70c9 | 131 | task_done(taskq_t *tq, taskq_ent_t *t) |
f1ca4da6 | 132 | { |
b17edc10 | 133 | SENTRY; |
bcd68186 | 134 | ASSERT(tq); |
135 | ASSERT(t); | |
136 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
137 | ||
44217f7a PS |
138 | /* For prealloc'd tasks, we don't free anything. */ |
139 | if ((!(tq->tq_flags & TASKQ_DYNAMIC)) && | |
140 | (t->tqent_flags & TQENT_FLAG_PREALLOC)) | |
141 | return; | |
142 | ||
046a70c9 | 143 | list_del_init(&t->tqent_list); |
f1ca4da6 | 144 | |
bcd68186 | 145 | if (tq->tq_nalloc <= tq->tq_minalloc) { |
046a70c9 PS |
146 | t->tqent_id = 0; |
147 | t->tqent_func = NULL; | |
148 | t->tqent_arg = NULL; | |
44217f7a | 149 | t->tqent_flags = 0; |
046a70c9 | 150 | list_add_tail(&t->tqent_list, &tq->tq_free_list); |
bcd68186 | 151 | } else { |
152 | task_free(tq, t); | |
153 | } | |
f1ca4da6 | 154 | |
b17edc10 | 155 | SEXIT; |
f1ca4da6 | 156 | } |
157 | ||
82387586 BB |
158 | /* |
159 | * As tasks are submitted to the task queue they are assigned a | |
f0d8bb26 NB |
160 | * monotonically increasing taskqid and added to the tail of the pending |
161 | * list. As worker threads become available the tasks are removed from | |
162 | * the head of the pending or priority list, giving preference to the | |
163 | * priority list. The tasks are then added to the work list, preserving | |
164 | * the ordering by taskqid. Finally, as tasks complete they are removed | |
165 | * from the work list. This means that the pending and work lists are | |
166 | * always kept sorted by taskqid. Thus the lowest outstanding | |
82387586 | 167 | * incomplete taskqid can be determined simply by checking the min |
f0d8bb26 NB |
168 | * taskqid for each head item on the pending, priority, and work list. |
169 | * This value is stored in tq->tq_lowest_id and only updated to the new | |
170 | * lowest id when the previous lowest id completes. All taskqids lower | |
171 | * than tq->tq_lowest_id must have completed. It is also possible | |
172 | * larger taskqid's have completed because they may be processed in | |
173 | * parallel by several worker threads. However, this is not a problem | |
174 | * because the behavior of taskq_wait_id() is to block until all | |
175 | * previously submitted taskqid's have completed. | |
82387586 BB |
176 | * |
177 | * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are | |
178 | * 64-bit values so even if a taskq is processing 2^24 (16,777,216) | |
179 | * taskqid_ts per second it will still take 2^40 seconds, 34,865 years, | |
180 | * before the wrap occurs. I can live with that for now. | |
bcd68186 | 181 | */ |
182 | static int | |
183 | taskq_wait_check(taskq_t *tq, taskqid_t id) | |
184 | { | |
7257ec41 BB |
185 | int rc; |
186 | ||
187 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
188 | rc = (id < tq->tq_lowest_id); | |
189 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
190 | ||
b17edc10 | 191 | SRETURN(rc); |
bcd68186 | 192 | } |
193 | ||
bcd68186 | 194 | void |
195 | __taskq_wait_id(taskq_t *tq, taskqid_t id) | |
f1ca4da6 | 196 | { |
b17edc10 | 197 | SENTRY; |
bcd68186 | 198 | ASSERT(tq); |
199 | ||
200 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id)); | |
201 | ||
b17edc10 | 202 | SEXIT; |
bcd68186 | 203 | } |
204 | EXPORT_SYMBOL(__taskq_wait_id); | |
205 | ||
206 | void | |
207 | __taskq_wait(taskq_t *tq) | |
208 | { | |
209 | taskqid_t id; | |
b17edc10 | 210 | SENTRY; |
bcd68186 | 211 | ASSERT(tq); |
212 | ||
7257ec41 | 213 | /* Wait for the largest outstanding taskqid */ |
749045bb | 214 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
7257ec41 | 215 | id = tq->tq_next_id - 1; |
749045bb | 216 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 217 | |
218 | __taskq_wait_id(tq, id); | |
219 | ||
b17edc10 | 220 | SEXIT; |
bcd68186 | 221 | |
222 | } | |
223 | EXPORT_SYMBOL(__taskq_wait); | |
224 | ||
225 | int | |
226 | __taskq_member(taskq_t *tq, void *t) | |
227 | { | |
2c02b71b PS |
228 | struct list_head *l; |
229 | taskq_thread_t *tqt; | |
b17edc10 | 230 | SENTRY; |
bcd68186 | 231 | |
232 | ASSERT(tq); | |
233 | ASSERT(t); | |
234 | ||
2c02b71b PS |
235 | list_for_each(l, &tq->tq_thread_list) { |
236 | tqt = list_entry(l, taskq_thread_t, tqt_thread_list); | |
237 | if (tqt->tqt_thread == (struct task_struct *)t) | |
238 | SRETURN(1); | |
239 | } | |
bcd68186 | 240 | |
b17edc10 | 241 | SRETURN(0); |
bcd68186 | 242 | } |
243 | EXPORT_SYMBOL(__taskq_member); | |
244 | ||
245 | taskqid_t | |
246 | __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
247 | { | |
046a70c9 | 248 | taskq_ent_t *t; |
bcd68186 | 249 | taskqid_t rc = 0; |
b17edc10 | 250 | SENTRY; |
f1ca4da6 | 251 | |
937879f1 | 252 | ASSERT(tq); |
253 | ASSERT(func); | |
d05ec4b4 BB |
254 | |
255 | /* Solaris assumes TQ_SLEEP if not passed explicitly */ | |
256 | if (!(flags & (TQ_SLEEP | TQ_NOSLEEP))) | |
257 | flags |= TQ_SLEEP; | |
258 | ||
55abb092 BB |
259 | if (unlikely(in_atomic() && (flags & TQ_SLEEP))) |
260 | PANIC("May schedule while atomic: %s/0x%08x/%d\n", | |
261 | current->comm, preempt_count(), current->pid); | |
f1ca4da6 | 262 | |
749045bb | 263 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
f1ca4da6 | 264 | |
bcd68186 | 265 | /* Taskq being destroyed and all tasks drained */ |
266 | if (!(tq->tq_flags & TQ_ACTIVE)) | |
b17edc10 | 267 | SGOTO(out, rc = 0); |
f1ca4da6 | 268 | |
bcd68186 | 269 | /* Do not queue the task unless there is idle thread for it */ |
270 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
271 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
b17edc10 | 272 | SGOTO(out, rc = 0); |
bcd68186 | 273 | |
274 | if ((t = task_alloc(tq, flags)) == NULL) | |
b17edc10 | 275 | SGOTO(out, rc = 0); |
f1ca4da6 | 276 | |
046a70c9 | 277 | spin_lock(&t->tqent_lock); |
f0d8bb26 NB |
278 | |
279 | /* Queue to the priority list instead of the pending list */ | |
280 | if (flags & TQ_FRONT) | |
046a70c9 | 281 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); |
f0d8bb26 | 282 | else |
046a70c9 | 283 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); |
f0d8bb26 | 284 | |
046a70c9 | 285 | t->tqent_id = rc = tq->tq_next_id; |
bcd68186 | 286 | tq->tq_next_id++; |
046a70c9 PS |
287 | t->tqent_func = func; |
288 | t->tqent_arg = arg; | |
44217f7a PS |
289 | |
290 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
291 | ||
046a70c9 | 292 | spin_unlock(&t->tqent_lock); |
bcd68186 | 293 | |
294 | wake_up(&tq->tq_work_waitq); | |
295 | out: | |
749045bb | 296 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
b17edc10 | 297 | SRETURN(rc); |
f1ca4da6 | 298 | } |
f1b59d26 | 299 | EXPORT_SYMBOL(__taskq_dispatch); |
44217f7a PS |
300 | |
301 | void | |
302 | __taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, | |
303 | taskq_ent_t *t) | |
304 | { | |
305 | SENTRY; | |
306 | ||
307 | ASSERT(tq); | |
308 | ASSERT(func); | |
309 | ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC)); | |
310 | ||
311 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
312 | ||
313 | /* Taskq being destroyed and all tasks drained */ | |
314 | if (!(tq->tq_flags & TQ_ACTIVE)) { | |
315 | t->tqent_id = 0; | |
316 | goto out; | |
317 | } | |
318 | ||
319 | spin_lock(&t->tqent_lock); | |
320 | ||
321 | /* | |
322 | * Mark it as a prealloc'd task. This is important | |
323 | * to ensure that we don't free it later. | |
324 | */ | |
325 | t->tqent_flags |= TQENT_FLAG_PREALLOC; | |
326 | ||
327 | /* Queue to the priority list instead of the pending list */ | |
328 | if (flags & TQ_FRONT) | |
329 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
330 | else | |
331 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
332 | ||
333 | t->tqent_id = tq->tq_next_id; | |
334 | tq->tq_next_id++; | |
335 | t->tqent_func = func; | |
336 | t->tqent_arg = arg; | |
337 | ||
338 | spin_unlock(&t->tqent_lock); | |
339 | ||
340 | wake_up(&tq->tq_work_waitq); | |
341 | out: | |
342 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
343 | SEXIT; | |
344 | } | |
345 | EXPORT_SYMBOL(__taskq_dispatch_ent); | |
346 | ||
347 | int | |
348 | __taskq_empty_ent(taskq_ent_t *t) | |
349 | { | |
350 | return list_empty(&t->tqent_list); | |
351 | } | |
352 | EXPORT_SYMBOL(__taskq_empty_ent); | |
353 | ||
354 | void | |
355 | __taskq_init_ent(taskq_ent_t *t) | |
356 | { | |
357 | spin_lock_init(&t->tqent_lock); | |
358 | INIT_LIST_HEAD(&t->tqent_list); | |
359 | t->tqent_id = 0; | |
360 | t->tqent_func = NULL; | |
361 | t->tqent_arg = NULL; | |
362 | t->tqent_flags = 0; | |
363 | } | |
364 | EXPORT_SYMBOL(__taskq_init_ent); | |
365 | ||
82387586 BB |
366 | /* |
367 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
f0d8bb26 NB |
368 | * be queued on the pending list, on the priority list, or on |
369 | * the work list currently being handled, but it is not 100% | |
370 | * complete yet. | |
82387586 | 371 | */ |
bcd68186 | 372 | static taskqid_t |
373 | taskq_lowest_id(taskq_t *tq) | |
374 | { | |
7257ec41 | 375 | taskqid_t lowest_id = tq->tq_next_id; |
046a70c9 | 376 | taskq_ent_t *t; |
2c02b71b | 377 | taskq_thread_t *tqt; |
b17edc10 | 378 | SENTRY; |
bcd68186 | 379 | |
380 | ASSERT(tq); | |
381 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
382 | ||
82387586 | 383 | if (!list_empty(&tq->tq_pend_list)) { |
046a70c9 PS |
384 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); |
385 | lowest_id = MIN(lowest_id, t->tqent_id); | |
82387586 | 386 | } |
bcd68186 | 387 | |
f0d8bb26 | 388 | if (!list_empty(&tq->tq_prio_list)) { |
046a70c9 PS |
389 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); |
390 | lowest_id = MIN(lowest_id, t->tqent_id); | |
f0d8bb26 NB |
391 | } |
392 | ||
2c02b71b PS |
393 | if (!list_empty(&tq->tq_active_list)) { |
394 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
395 | tqt_active_list); | |
396 | ASSERT(tqt->tqt_ent != NULL); | |
397 | lowest_id = MIN(lowest_id, tqt->tqt_ent->tqent_id); | |
82387586 | 398 | } |
bcd68186 | 399 | |
b17edc10 | 400 | SRETURN(lowest_id); |
bcd68186 | 401 | } |
402 | ||
f0d8bb26 NB |
403 | /* |
404 | * Insert a task into a list keeping the list sorted by increasing | |
405 | * taskqid. | |
406 | */ | |
407 | static void | |
2c02b71b | 408 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) |
f0d8bb26 | 409 | { |
2c02b71b | 410 | taskq_thread_t *w; |
f0d8bb26 NB |
411 | struct list_head *l; |
412 | ||
b17edc10 | 413 | SENTRY; |
f0d8bb26 | 414 | ASSERT(tq); |
2c02b71b | 415 | ASSERT(tqt); |
f0d8bb26 NB |
416 | ASSERT(spin_is_locked(&tq->tq_lock)); |
417 | ||
2c02b71b PS |
418 | list_for_each_prev(l, &tq->tq_active_list) { |
419 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
420 | if (w->tqt_ent->tqent_id < tqt->tqt_ent->tqent_id) { | |
421 | list_add(&tqt->tqt_active_list, l); | |
f0d8bb26 NB |
422 | break; |
423 | } | |
424 | } | |
2c02b71b PS |
425 | if (l == &tq->tq_active_list) |
426 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
f0d8bb26 | 427 | |
b17edc10 | 428 | SEXIT; |
f0d8bb26 NB |
429 | } |
430 | ||
bcd68186 | 431 | static int |
432 | taskq_thread(void *args) | |
433 | { | |
434 | DECLARE_WAITQUEUE(wait, current); | |
435 | sigset_t blocked; | |
436 | taskqid_t id; | |
2c02b71b PS |
437 | taskq_thread_t *tqt = args; |
438 | taskq_t *tq; | |
046a70c9 | 439 | taskq_ent_t *t; |
f0d8bb26 | 440 | struct list_head *pend_list; |
b17edc10 | 441 | SENTRY; |
bcd68186 | 442 | |
2c02b71b PS |
443 | ASSERT(tqt); |
444 | tq = tqt->tqt_tq; | |
bcd68186 | 445 | current->flags |= PF_NOFREEZE; |
446 | ||
372c2572 BB |
447 | /* Disable the direct memory reclaim path */ |
448 | if (tq->tq_flags & TASKQ_NORECLAIM) | |
449 | current->flags |= PF_MEMALLOC; | |
450 | ||
bcd68186 | 451 | sigfillset(&blocked); |
452 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
453 | flush_signals(current); | |
454 | ||
749045bb | 455 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 456 | tq->tq_nthreads++; |
457 | wake_up(&tq->tq_wait_waitq); | |
458 | set_current_state(TASK_INTERRUPTIBLE); | |
459 | ||
460 | while (!kthread_should_stop()) { | |
461 | ||
462 | add_wait_queue(&tq->tq_work_waitq, &wait); | |
f0d8bb26 NB |
463 | if (list_empty(&tq->tq_pend_list) && |
464 | list_empty(&tq->tq_prio_list)) { | |
749045bb | 465 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 466 | schedule(); |
749045bb | 467 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 468 | } else { |
469 | __set_current_state(TASK_RUNNING); | |
470 | } | |
471 | ||
472 | remove_wait_queue(&tq->tq_work_waitq, &wait); | |
f0d8bb26 NB |
473 | |
474 | if (!list_empty(&tq->tq_prio_list)) | |
475 | pend_list = &tq->tq_prio_list; | |
476 | else if (!list_empty(&tq->tq_pend_list)) | |
477 | pend_list = &tq->tq_pend_list; | |
478 | else | |
479 | pend_list = NULL; | |
480 | ||
481 | if (pend_list) { | |
046a70c9 PS |
482 | t = list_entry(pend_list->next, taskq_ent_t, tqent_list); |
483 | list_del_init(&t->tqent_list); | |
44217f7a PS |
484 | /* In order to support recursively dispatching a |
485 | * preallocated taskq_ent_t, tqent_id must be | |
486 | * stored prior to executing tqent_func. */ | |
487 | id = t->tqent_id; | |
2c02b71b PS |
488 | tqt->tqt_ent = t; |
489 | taskq_insert_in_order(tq, tqt); | |
bcd68186 | 490 | tq->tq_nactive++; |
749045bb | 491 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 492 | |
493 | /* Perform the requested task */ | |
046a70c9 | 494 | t->tqent_func(t->tqent_arg); |
bcd68186 | 495 | |
749045bb | 496 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 497 | tq->tq_nactive--; |
2c02b71b PS |
498 | list_del_init(&tqt->tqt_active_list); |
499 | tqt->tqt_ent = NULL; | |
bcd68186 | 500 | task_done(tq, t); |
501 | ||
7257ec41 BB |
502 | /* When the current lowest outstanding taskqid is |
503 | * done calculate the new lowest outstanding id */ | |
bcd68186 | 504 | if (tq->tq_lowest_id == id) { |
505 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
506 | ASSERT(tq->tq_lowest_id > id); | |
507 | } | |
508 | ||
509 | wake_up_all(&tq->tq_wait_waitq); | |
510 | } | |
511 | ||
512 | set_current_state(TASK_INTERRUPTIBLE); | |
513 | ||
514 | } | |
515 | ||
516 | __set_current_state(TASK_RUNNING); | |
517 | tq->tq_nthreads--; | |
2c02b71b PS |
518 | list_del_init(&tqt->tqt_thread_list); |
519 | kmem_free(tqt, sizeof(taskq_thread_t)); | |
520 | ||
749045bb | 521 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 522 | |
b17edc10 | 523 | SRETURN(0); |
bcd68186 | 524 | } |
525 | ||
f1ca4da6 | 526 | taskq_t * |
527 | __taskq_create(const char *name, int nthreads, pri_t pri, | |
528 | int minalloc, int maxalloc, uint_t flags) | |
529 | { | |
bcd68186 | 530 | taskq_t *tq; |
2c02b71b | 531 | taskq_thread_t *tqt; |
bcd68186 | 532 | int rc = 0, i, j = 0; |
b17edc10 | 533 | SENTRY; |
bcd68186 | 534 | |
535 | ASSERT(name != NULL); | |
536 | ASSERT(pri <= maxclsyspri); | |
537 | ASSERT(minalloc >= 0); | |
538 | ASSERT(maxalloc <= INT_MAX); | |
539 | ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */ | |
540 | ||
915404bd BB |
541 | /* Scale the number of threads using nthreads as a percentage */ |
542 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
543 | ASSERT(nthreads <= 100); | |
544 | ASSERT(nthreads >= 0); | |
545 | nthreads = MIN(nthreads, 100); | |
546 | nthreads = MAX(nthreads, 0); | |
547 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
548 | } | |
549 | ||
bcd68186 | 550 | tq = kmem_alloc(sizeof(*tq), KM_SLEEP); |
551 | if (tq == NULL) | |
b17edc10 | 552 | SRETURN(NULL); |
bcd68186 | 553 | |
bcd68186 | 554 | spin_lock_init(&tq->tq_lock); |
749045bb | 555 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
2c02b71b PS |
556 | INIT_LIST_HEAD(&tq->tq_thread_list); |
557 | INIT_LIST_HEAD(&tq->tq_active_list); | |
bcd68186 | 558 | tq->tq_name = name; |
559 | tq->tq_nactive = 0; | |
560 | tq->tq_nthreads = 0; | |
561 | tq->tq_pri = pri; | |
562 | tq->tq_minalloc = minalloc; | |
563 | tq->tq_maxalloc = maxalloc; | |
564 | tq->tq_nalloc = 0; | |
565 | tq->tq_flags = (flags | TQ_ACTIVE); | |
566 | tq->tq_next_id = 1; | |
567 | tq->tq_lowest_id = 1; | |
568 | INIT_LIST_HEAD(&tq->tq_free_list); | |
bcd68186 | 569 | INIT_LIST_HEAD(&tq->tq_pend_list); |
f0d8bb26 | 570 | INIT_LIST_HEAD(&tq->tq_prio_list); |
bcd68186 | 571 | init_waitqueue_head(&tq->tq_work_waitq); |
572 | init_waitqueue_head(&tq->tq_wait_waitq); | |
573 | ||
574 | if (flags & TASKQ_PREPOPULATE) | |
575 | for (i = 0; i < minalloc; i++) | |
576 | task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW)); | |
6e605b6e | 577 | |
749045bb | 578 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
6e605b6e | 579 | |
2c02b71b PS |
580 | for (i = 0; i < nthreads; i++) { |
581 | tqt = kmem_alloc(sizeof(*tqt), KM_SLEEP); | |
582 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
583 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
584 | tqt->tqt_tq = tq; | |
585 | tqt->tqt_ent = NULL; | |
586 | ||
587 | tqt->tqt_thread = kthread_create(taskq_thread, tqt, | |
588 | "%s/%d", name, i); | |
589 | if (tqt->tqt_thread) { | |
590 | list_add(&tqt->tqt_thread_list, &tq->tq_thread_list); | |
591 | kthread_bind(tqt->tqt_thread, i % num_online_cpus()); | |
592 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri)); | |
593 | wake_up_process(tqt->tqt_thread); | |
bcd68186 | 594 | j++; |
2c02b71b PS |
595 | } else { |
596 | kmem_free(tqt, sizeof(taskq_thread_t)); | |
597 | rc = 1; | |
598 | } | |
599 | } | |
bcd68186 | 600 | |
601 | /* Wait for all threads to be started before potential destroy */ | |
602 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j); | |
603 | ||
604 | if (rc) { | |
605 | __taskq_destroy(tq); | |
606 | tq = NULL; | |
607 | } | |
608 | ||
b17edc10 | 609 | SRETURN(tq); |
f1ca4da6 | 610 | } |
f1b59d26 | 611 | EXPORT_SYMBOL(__taskq_create); |
b123971f | 612 | |
613 | void | |
614 | __taskq_destroy(taskq_t *tq) | |
615 | { | |
2c02b71b PS |
616 | struct task_struct *thread; |
617 | taskq_thread_t *tqt; | |
046a70c9 | 618 | taskq_ent_t *t; |
b17edc10 | 619 | SENTRY; |
b123971f | 620 | |
bcd68186 | 621 | ASSERT(tq); |
749045bb | 622 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 623 | tq->tq_flags &= ~TQ_ACTIVE; |
749045bb | 624 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 625 | |
626 | /* TQ_ACTIVE cleared prevents new tasks being added to pending */ | |
627 | __taskq_wait(tq); | |
628 | ||
749045bb | 629 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 630 | |
2c02b71b PS |
631 | /* |
632 | * Signal each thread to exit and block until it does. Each thread | |
633 | * is responsible for removing itself from the list and freeing its | |
634 | * taskq_thread_t. This allows for idle threads to opt to remove | |
635 | * themselves from the taskq. They can be recreated as needed. | |
636 | */ | |
637 | while (!list_empty(&tq->tq_thread_list)) { | |
638 | tqt = list_entry(tq->tq_thread_list.next, | |
639 | taskq_thread_t, tqt_thread_list); | |
640 | thread = tqt->tqt_thread; | |
641 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
642 | ||
643 | kthread_stop(thread); | |
644 | ||
645 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
646 | } | |
647 | ||
bcd68186 | 648 | while (!list_empty(&tq->tq_free_list)) { |
046a70c9 | 649 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); |
44217f7a PS |
650 | |
651 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
652 | ||
046a70c9 | 653 | list_del_init(&t->tqent_list); |
bcd68186 | 654 | task_free(tq, t); |
655 | } | |
656 | ||
657 | ASSERT(tq->tq_nthreads == 0); | |
658 | ASSERT(tq->tq_nalloc == 0); | |
2c02b71b PS |
659 | ASSERT(list_empty(&tq->tq_thread_list)); |
660 | ASSERT(list_empty(&tq->tq_active_list)); | |
bcd68186 | 661 | ASSERT(list_empty(&tq->tq_free_list)); |
bcd68186 | 662 | ASSERT(list_empty(&tq->tq_pend_list)); |
f0d8bb26 | 663 | ASSERT(list_empty(&tq->tq_prio_list)); |
bcd68186 | 664 | |
749045bb | 665 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
2c02b71b | 666 | |
bcd68186 | 667 | kmem_free(tq, sizeof(taskq_t)); |
668 | ||
b17edc10 | 669 | SEXIT; |
b123971f | 670 | } |
bcd68186 | 671 | EXPORT_SYMBOL(__taskq_destroy); |
e9cb2b4f BB |
672 | |
673 | int | |
674 | spl_taskq_init(void) | |
675 | { | |
b17edc10 | 676 | SENTRY; |
e9cb2b4f | 677 | |
f220894e BB |
678 | /* Solaris creates a dynamic taskq of up to 64 threads, however in |
679 | * a Linux environment 1 thread per-core is usually about right */ | |
680 | system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), | |
681 | minclsyspri, 4, 512, TASKQ_PREPOPULATE); | |
e9cb2b4f | 682 | if (system_taskq == NULL) |
b17edc10 | 683 | SRETURN(1); |
e9cb2b4f | 684 | |
b17edc10 | 685 | SRETURN(0); |
e9cb2b4f BB |
686 | } |
687 | ||
688 | void | |
689 | spl_taskq_fini(void) | |
690 | { | |
b17edc10 | 691 | SENTRY; |
e9cb2b4f | 692 | taskq_destroy(system_taskq); |
b17edc10 | 693 | SEXIT; |
e9cb2b4f | 694 | } |