]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-taskq.c
Imported Upstream version 0.6.5.2
[mirror_spl-debian.git] / module / spl / spl-taskq.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
715f6251 10 *
716154c5
BB
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25\*****************************************************************************/
715f6251 26
f4b37741 27#include <sys/taskq.h>
3d061e9d 28#include <sys/kmem.h>
937879f1 29
9e4fb5c2
LG
30int spl_taskq_thread_bind = 0;
31module_param(spl_taskq_thread_bind, int, 0644);
32MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
33
8095473b
AX
34
35int spl_taskq_thread_dynamic = 1;
36module_param(spl_taskq_thread_dynamic, int, 0644);
37MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
38
39int spl_taskq_thread_priority = 1;
40module_param(spl_taskq_thread_priority, int, 0644);
41MODULE_PARM_DESC(spl_taskq_thread_priority,
42 "Allow non-default priority for taskq threads");
43
44int spl_taskq_thread_sequential = 4;
45module_param(spl_taskq_thread_sequential, int, 0644);
46MODULE_PARM_DESC(spl_taskq_thread_sequential,
47 "Create new taskq threads after N sequential tasks");
48
e9cb2b4f
BB
49/* Global system-wide dynamic task queue available for all consumers */
50taskq_t *system_taskq;
51EXPORT_SYMBOL(system_taskq);
52
8095473b
AX
53/* Private dedicated taskq for creating new taskq threads on demand. */
54static taskq_t *dynamic_taskq;
55static taskq_thread_t *taskq_thread_create(taskq_t *);
56
9b51f218
BB
57static int
58task_km_flags(uint_t flags)
59{
60 if (flags & TQ_NOSLEEP)
61 return KM_NOSLEEP;
62
63 if (flags & TQ_PUSHPAGE)
64 return KM_PUSHPAGE;
65
66 return KM_SLEEP;
67}
68
82387586
BB
69/*
70 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
bcd68186 71 * is not attached to the free, work, or pending taskq lists.
f1ca4da6 72 */
046a70c9 73static taskq_ent_t *
bcd68186 74task_alloc(taskq_t *tq, uint_t flags)
75{
472a34ca
BB
76 taskq_ent_t *t;
77 int count = 0;
bcd68186 78
472a34ca
BB
79 ASSERT(tq);
80 ASSERT(spin_is_locked(&tq->tq_lock));
bcd68186 81retry:
472a34ca
BB
82 /* Acquire taskq_ent_t's from free list if available */
83 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
84 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
85
86 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
d9acd930
BB
87 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
88 ASSERT(!timer_pending(&t->tqent_timer));
472a34ca
BB
89
90 list_del_init(&t->tqent_list);
10946b02 91 return (t);
472a34ca
BB
92 }
93
94 /* Free list is empty and memory allocations are prohibited */
95 if (flags & TQ_NOALLOC)
10946b02 96 return (NULL);
472a34ca
BB
97
98 /* Hit maximum taskq_ent_t pool size */
99 if (tq->tq_nalloc >= tq->tq_maxalloc) {
100 if (flags & TQ_NOSLEEP)
10946b02 101 return (NULL);
472a34ca
BB
102
103 /*
104 * Sleep periodically polling the free list for an available
105 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
106 * but we cannot block forever waiting for an taskq_ent_t to
107 * show up in the free list, otherwise a deadlock can happen.
108 *
109 * Therefore, we need to allocate a new task even if the number
110 * of allocated tasks is above tq->tq_maxalloc, but we still
111 * end up delaying the task allocation by one second, thereby
112 * throttling the task dispatch rate.
113 */
114 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
115 schedule_timeout(HZ / 100);
116 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
10946b02
AX
117 if (count < 100) {
118 count++;
119 goto retry;
120 }
472a34ca
BB
121 }
122
123 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
124 t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
125 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
126
127 if (t) {
128 taskq_init_ent(t);
129 tq->tq_nalloc++;
130 }
131
10946b02 132 return (t);
bcd68186 133}
134
82387586 135/*
046a70c9 136 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
bcd68186 137 * to already be removed from the free, work, or pending taskq lists.
138 */
139static void
046a70c9 140task_free(taskq_t *tq, taskq_ent_t *t)
bcd68186 141{
472a34ca
BB
142 ASSERT(tq);
143 ASSERT(t);
bcd68186 144 ASSERT(spin_is_locked(&tq->tq_lock));
046a70c9 145 ASSERT(list_empty(&t->tqent_list));
d9acd930 146 ASSERT(!timer_pending(&t->tqent_timer));
bcd68186 147
472a34ca
BB
148 kmem_free(t, sizeof(taskq_ent_t));
149 tq->tq_nalloc--;
bcd68186 150}
151
82387586
BB
152/*
153 * NOTE: Must be called with tq->tq_lock held, either destroys the
046a70c9 154 * taskq_ent_t if too many exist or moves it to the free list for later use.
bcd68186 155 */
f1ca4da6 156static void
046a70c9 157task_done(taskq_t *tq, taskq_ent_t *t)
f1ca4da6 158{
bcd68186 159 ASSERT(tq);
160 ASSERT(t);
161 ASSERT(spin_is_locked(&tq->tq_lock));
162
d9acd930
BB
163 /* Wake tasks blocked in taskq_wait_id() */
164 wake_up_all(&t->tqent_waitq);
165
046a70c9 166 list_del_init(&t->tqent_list);
f1ca4da6 167
472a34ca 168 if (tq->tq_nalloc <= tq->tq_minalloc) {
046a70c9
PS
169 t->tqent_id = 0;
170 t->tqent_func = NULL;
171 t->tqent_arg = NULL;
44217f7a 172 t->tqent_flags = 0;
8f2503e0 173
472a34ca 174 list_add_tail(&t->tqent_list, &tq->tq_free_list);
bcd68186 175 } else {
176 task_free(tq, t);
177 }
f1ca4da6 178}
179
82387586 180/*
d9acd930
BB
181 * When a delayed task timer expires remove it from the delay list and
182 * add it to the priority list in order for immediate processing.
bcd68186 183 */
d9acd930
BB
184static void
185task_expire(unsigned long data)
bcd68186 186{
d9acd930
BB
187 taskq_ent_t *w, *t = (taskq_ent_t *)data;
188 taskq_t *tq = t->tqent_taskq;
189 struct list_head *l;
7257ec41
BB
190
191 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
d9acd930
BB
192
193 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
194 ASSERT(list_empty(&t->tqent_list));
195 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
196 return;
197 }
198
199 /*
200 * The priority list must be maintained in strict task id order
201 * from lowest to highest for lowest_id to be easily calculable.
202 */
203 list_del(&t->tqent_list);
204 list_for_each_prev(l, &tq->tq_prio_list) {
205 w = list_entry(l, taskq_ent_t, tqent_list);
206 if (w->tqent_id < t->tqent_id) {
207 list_add(&t->tqent_list, l);
208 break;
209 }
210 }
211 if (l == &tq->tq_prio_list)
212 list_add(&t->tqent_list, &tq->tq_prio_list);
213
7257ec41
BB
214 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
215
d9acd930
BB
216 wake_up(&tq->tq_work_waitq);
217}
218
219/*
220 * Returns the lowest incomplete taskqid_t. The taskqid_t may
221 * be queued on the pending list, on the priority list, on the
222 * delay list, or on the work list currently being handled, but
223 * it is not 100% complete yet.
224 */
225static taskqid_t
226taskq_lowest_id(taskq_t *tq)
227{
228 taskqid_t lowest_id = tq->tq_next_id;
229 taskq_ent_t *t;
230 taskq_thread_t *tqt;
d9acd930
BB
231
232 ASSERT(tq);
233 ASSERT(spin_is_locked(&tq->tq_lock));
234
235 if (!list_empty(&tq->tq_pend_list)) {
236 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
237 lowest_id = MIN(lowest_id, t->tqent_id);
238 }
239
240 if (!list_empty(&tq->tq_prio_list)) {
241 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
242 lowest_id = MIN(lowest_id, t->tqent_id);
243 }
244
245 if (!list_empty(&tq->tq_delay_list)) {
246 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
247 lowest_id = MIN(lowest_id, t->tqent_id);
248 }
249
250 if (!list_empty(&tq->tq_active_list)) {
251 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
252 tqt_active_list);
253 ASSERT(tqt->tqt_id != 0);
254 lowest_id = MIN(lowest_id, tqt->tqt_id);
255 }
256
10946b02 257 return (lowest_id);
d9acd930
BB
258}
259
260/*
261 * Insert a task into a list keeping the list sorted by increasing taskqid.
262 */
263static void
264taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
265{
266 taskq_thread_t *w;
267 struct list_head *l;
268
d9acd930
BB
269 ASSERT(tq);
270 ASSERT(tqt);
271 ASSERT(spin_is_locked(&tq->tq_lock));
272
273 list_for_each_prev(l, &tq->tq_active_list) {
274 w = list_entry(l, taskq_thread_t, tqt_active_list);
275 if (w->tqt_id < tqt->tqt_id) {
276 list_add(&tqt->tqt_active_list, l);
277 break;
278 }
279 }
280 if (l == &tq->tq_active_list)
281 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
d9acd930
BB
282}
283
284/*
285 * Find and return a task from the given list if it exists. The list
286 * must be in lowest to highest task id order.
287 */
288static taskq_ent_t *
289taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
290{
291 struct list_head *l;
292 taskq_ent_t *t;
d9acd930
BB
293
294 ASSERT(spin_is_locked(&tq->tq_lock));
295
296 list_for_each(l, lh) {
297 t = list_entry(l, taskq_ent_t, tqent_list);
298
299 if (t->tqent_id == id)
10946b02 300 return (t);
d9acd930
BB
301
302 if (t->tqent_id > id)
303 break;
304 }
305
10946b02 306 return (NULL);
bcd68186 307}
308
d9acd930
BB
309/*
310 * Find an already dispatched task given the task id regardless of what
311 * state it is in. If a task is still pending or executing it will be
312 * returned and 'active' set appropriately. If the task has already
313 * been run then NULL is returned.
314 */
315static taskq_ent_t *
316taskq_find(taskq_t *tq, taskqid_t id, int *active)
317{
318 taskq_thread_t *tqt;
319 struct list_head *l;
320 taskq_ent_t *t;
d9acd930
BB
321
322 ASSERT(spin_is_locked(&tq->tq_lock));
323 *active = 0;
324
325 t = taskq_find_list(tq, &tq->tq_delay_list, id);
326 if (t)
10946b02 327 return (t);
d9acd930
BB
328
329 t = taskq_find_list(tq, &tq->tq_prio_list, id);
330 if (t)
10946b02 331 return (t);
d9acd930
BB
332
333 t = taskq_find_list(tq, &tq->tq_pend_list, id);
334 if (t)
10946b02 335 return (t);
d9acd930
BB
336
337 list_for_each(l, &tq->tq_active_list) {
338 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
339 if (tqt->tqt_id == id) {
340 t = tqt->tqt_task;
341 *active = 1;
10946b02 342 return (t);
d9acd930
BB
343 }
344 }
345
10946b02 346 return (NULL);
d9acd930
BB
347}
348
8095473b
AX
349/*
350 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
351 * taskq_wait() functions below.
352 *
353 * Taskq waiting is accomplished by tracking the lowest outstanding task
354 * id and the next available task id. As tasks are dispatched they are
355 * added to the tail of the pending, priority, or delay lists. As worker
356 * threads become available the tasks are removed from the heads of these
357 * lists and linked to the worker threads. This ensures the lists are
358 * kept sorted by lowest to highest task id.
359 *
360 * Therefore the lowest outstanding task id can be quickly determined by
361 * checking the head item from all of these lists. This value is stored
362 * with the taskq as the lowest id. It only needs to be recalculated when
363 * either the task with the current lowest id completes or is canceled.
364 *
365 * By blocking until the lowest task id exceeds the passed task id the
366 * taskq_wait_outstanding() function can be easily implemented. Similarly,
367 * by blocking until the lowest task id matches the next task id taskq_wait()
368 * can be implemented.
369 *
370 * Callers should be aware that when there are multiple worked threads it
371 * is possible for larger task ids to complete before smaller ones. Also
372 * when the taskq contains delay tasks with small task ids callers may
373 * block for a considerable length of time waiting for them to expire and
374 * execute.
375 */
80093b6f
AX
376static int
377taskq_wait_id_check(taskq_t *tq, taskqid_t id)
f1ca4da6 378{
d9acd930 379 int active = 0;
80093b6f 380 int rc;
bcd68186 381
d9acd930 382 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
80093b6f 383 rc = (taskq_find(tq, id, &active) == NULL);
d9acd930
BB
384 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
385
80093b6f
AX
386 return (rc);
387}
bcd68186 388
80093b6f
AX
389/*
390 * The taskq_wait_id() function blocks until the passed task id completes.
391 * This does not guarantee that all lower task ids have completed.
392 */
393void
394taskq_wait_id(taskq_t *tq, taskqid_t id)
395{
396 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
bcd68186 397}
aed8671c 398EXPORT_SYMBOL(taskq_wait_id);
bcd68186 399
d9acd930 400static int
8095473b 401taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
d9acd930
BB
402{
403 int rc;
404
405 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
406 rc = (id < tq->tq_lowest_id);
407 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
408
10946b02 409 return (rc);
d9acd930
BB
410}
411
8095473b
AX
412/*
413 * The taskq_wait_outstanding() function will block until all tasks with a
414 * lower taskqid than the passed 'id' have been completed. Note that all
415 * task id's are assigned monotonically at dispatch time. Zero may be
416 * passed for the id to indicate all tasks dispatch up to this point,
417 * but not after, should be waited for.
418 */
d9acd930 419void
8095473b 420taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
d9acd930 421{
8095473b
AX
422 wait_event(tq->tq_wait_waitq,
423 taskq_wait_outstanding_check(tq, id ? id : tq->tq_next_id - 1));
d9acd930 424}
8095473b 425EXPORT_SYMBOL(taskq_wait_outstanding);
d9acd930 426
8095473b
AX
427static int
428taskq_wait_check(taskq_t *tq)
bcd68186 429{
8095473b 430 int rc;
bcd68186 431
749045bb 432 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
8095473b 433 rc = (tq->tq_lowest_id == tq->tq_next_id);
749045bb 434 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 435
8095473b
AX
436 return (rc);
437}
438
439/*
440 * The taskq_wait() function will block until the taskq is empty.
441 * This means that if a taskq re-dispatches work to itself taskq_wait()
442 * callers will block indefinitely.
443 */
444void
445taskq_wait(taskq_t *tq)
446{
447 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
bcd68186 448}
aed8671c 449EXPORT_SYMBOL(taskq_wait);
bcd68186 450
8095473b
AX
451static int
452taskq_member_impl(taskq_t *tq, void *t)
bcd68186 453{
2c02b71b
PS
454 struct list_head *l;
455 taskq_thread_t *tqt;
8095473b 456 int found = 0;
bcd68186 457
458 ASSERT(tq);
472a34ca 459 ASSERT(t);
8095473b 460 ASSERT(spin_is_locked(&tq->tq_lock));
bcd68186 461
2c02b71b
PS
462 list_for_each(l, &tq->tq_thread_list) {
463 tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
8095473b
AX
464 if (tqt->tqt_thread == (struct task_struct *)t) {
465 found = 1;
466 break;
467 }
2c02b71b 468 }
8095473b
AX
469 return (found);
470}
bcd68186 471
8095473b
AX
472int
473taskq_member(taskq_t *tq, void *t)
474{
475 int found;
476
477 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
478 found = taskq_member_impl(tq, t);
479 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
480
481 return (found);
bcd68186 482}
aed8671c 483EXPORT_SYMBOL(taskq_member);
bcd68186 484
d9acd930
BB
485/*
486 * Cancel an already dispatched task given the task id. Still pending tasks
487 * will be immediately canceled, and if the task is active the function will
488 * block until it completes. Preallocated tasks which are canceled must be
489 * freed by the caller.
490 */
491int
492taskq_cancel_id(taskq_t *tq, taskqid_t id)
493{
494 taskq_ent_t *t;
495 int active = 0;
496 int rc = ENOENT;
d9acd930
BB
497
498 ASSERT(tq);
499
500 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
501 t = taskq_find(tq, id, &active);
502 if (t && !active) {
503 list_del_init(&t->tqent_list);
504 t->tqent_flags |= TQENT_FLAG_CANCEL;
505
506 /*
507 * When canceling the lowest outstanding task id we
508 * must recalculate the new lowest outstanding id.
509 */
510 if (tq->tq_lowest_id == t->tqent_id) {
511 tq->tq_lowest_id = taskq_lowest_id(tq);
512 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
513 }
514
515 /*
516 * The task_expire() function takes the tq->tq_lock so drop
517 * drop the lock before synchronously cancelling the timer.
518 */
519 if (timer_pending(&t->tqent_timer)) {
520 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
521 del_timer_sync(&t->tqent_timer);
522 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
523 }
524
525 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
526 task_done(tq, t);
527
528 rc = 0;
529 }
530 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
531
532 if (active) {
533 taskq_wait_id(tq, id);
534 rc = EBUSY;
535 }
536
10946b02 537 return (rc);
d9acd930
BB
538}
539EXPORT_SYMBOL(taskq_cancel_id);
540
8095473b
AX
541static int taskq_thread_spawn(taskq_t *tq, int seq_tasks);
542
bcd68186 543taskqid_t
aed8671c 544taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
bcd68186 545{
472a34ca 546 taskq_ent_t *t;
bcd68186 547 taskqid_t rc = 0;
f1ca4da6 548
472a34ca
BB
549 ASSERT(tq);
550 ASSERT(func);
d05ec4b4 551
472a34ca 552 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
f1ca4da6 553
bcd68186 554 /* Taskq being destroyed and all tasks drained */
8095473b 555 if (!(tq->tq_flags & TASKQ_ACTIVE))
10946b02 556 goto out;
f1ca4da6 557
bcd68186 558 /* Do not queue the task unless there is idle thread for it */
559 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
560 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
10946b02 561 goto out;
bcd68186 562
472a34ca 563 if ((t = task_alloc(tq, flags)) == NULL)
10946b02 564 goto out;
f1ca4da6 565
046a70c9 566 spin_lock(&t->tqent_lock);
f0d8bb26
NB
567
568 /* Queue to the priority list instead of the pending list */
569 if (flags & TQ_FRONT)
046a70c9 570 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
f0d8bb26 571 else
046a70c9 572 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
f0d8bb26 573
046a70c9 574 t->tqent_id = rc = tq->tq_next_id;
bcd68186 575 tq->tq_next_id++;
472a34ca
BB
576 t->tqent_func = func;
577 t->tqent_arg = arg;
d9acd930
BB
578 t->tqent_taskq = tq;
579 t->tqent_timer.data = 0;
580 t->tqent_timer.function = NULL;
581 t->tqent_timer.expires = 0;
44217f7a
PS
582
583 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
584
046a70c9 585 spin_unlock(&t->tqent_lock);
0bb43ca2
NB
586
587 wake_up(&tq->tq_work_waitq);
bcd68186 588out:
8095473b
AX
589 /* Spawn additional taskq threads if required. */
590 if (tq->tq_nactive == tq->tq_nthreads &&
591 taskq_member_impl(tq, current))
592 (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
593
749045bb 594 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
10946b02 595 return (rc);
f1ca4da6 596}
aed8671c 597EXPORT_SYMBOL(taskq_dispatch);
44217f7a 598
d9acd930
BB
599taskqid_t
600taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
601 uint_t flags, clock_t expire_time)
602{
d9acd930 603 taskqid_t rc = 0;
10946b02 604 taskq_ent_t *t;
d9acd930
BB
605
606 ASSERT(tq);
607 ASSERT(func);
608
609 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
610
611 /* Taskq being destroyed and all tasks drained */
8095473b 612 if (!(tq->tq_flags & TASKQ_ACTIVE))
10946b02 613 goto out;
d9acd930
BB
614
615 if ((t = task_alloc(tq, flags)) == NULL)
10946b02 616 goto out;
d9acd930
BB
617
618 spin_lock(&t->tqent_lock);
619
620 /* Queue to the delay list for subsequent execution */
621 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
622
623 t->tqent_id = rc = tq->tq_next_id;
624 tq->tq_next_id++;
625 t->tqent_func = func;
626 t->tqent_arg = arg;
627 t->tqent_taskq = tq;
628 t->tqent_timer.data = (unsigned long)t;
629 t->tqent_timer.function = task_expire;
630 t->tqent_timer.expires = (unsigned long)expire_time;
631 add_timer(&t->tqent_timer);
632
633 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
634
635 spin_unlock(&t->tqent_lock);
636out:
8095473b
AX
637 /* Spawn additional taskq threads if required. */
638 if (tq->tq_nactive == tq->tq_nthreads &&
639 taskq_member_impl(tq, current))
640 (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
d9acd930 641 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
10946b02 642 return (rc);
d9acd930
BB
643}
644EXPORT_SYMBOL(taskq_dispatch_delay);
645
44217f7a 646void
aed8671c 647taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
44217f7a
PS
648 taskq_ent_t *t)
649{
44217f7a
PS
650 ASSERT(tq);
651 ASSERT(func);
44217f7a
PS
652
653 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
654
655 /* Taskq being destroyed and all tasks drained */
8095473b 656 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
44217f7a
PS
657 t->tqent_id = 0;
658 goto out;
659 }
660
661 spin_lock(&t->tqent_lock);
662
663 /*
664 * Mark it as a prealloc'd task. This is important
665 * to ensure that we don't free it later.
666 */
667 t->tqent_flags |= TQENT_FLAG_PREALLOC;
668
669 /* Queue to the priority list instead of the pending list */
670 if (flags & TQ_FRONT)
671 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
672 else
673 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
674
675 t->tqent_id = tq->tq_next_id;
676 tq->tq_next_id++;
677 t->tqent_func = func;
678 t->tqent_arg = arg;
d9acd930 679 t->tqent_taskq = tq;
44217f7a
PS
680
681 spin_unlock(&t->tqent_lock);
682
683 wake_up(&tq->tq_work_waitq);
684out:
8095473b
AX
685 /* Spawn additional taskq threads if required. */
686 if (tq->tq_nactive == tq->tq_nthreads &&
687 taskq_member_impl(tq, current))
688 (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
0bb43ca2 689 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
44217f7a 690}
aed8671c 691EXPORT_SYMBOL(taskq_dispatch_ent);
44217f7a
PS
692
693int
aed8671c 694taskq_empty_ent(taskq_ent_t *t)
44217f7a
PS
695{
696 return list_empty(&t->tqent_list);
697}
aed8671c 698EXPORT_SYMBOL(taskq_empty_ent);
44217f7a
PS
699
700void
aed8671c 701taskq_init_ent(taskq_ent_t *t)
44217f7a
PS
702{
703 spin_lock_init(&t->tqent_lock);
d9acd930
BB
704 init_waitqueue_head(&t->tqent_waitq);
705 init_timer(&t->tqent_timer);
44217f7a
PS
706 INIT_LIST_HEAD(&t->tqent_list);
707 t->tqent_id = 0;
708 t->tqent_func = NULL;
709 t->tqent_arg = NULL;
710 t->tqent_flags = 0;
d9acd930 711 t->tqent_taskq = NULL;
44217f7a 712}
aed8671c 713EXPORT_SYMBOL(taskq_init_ent);
44217f7a 714
8095473b
AX
715/*
716 * Return the next pending task, preference is given to tasks on the
717 * priority list which were dispatched with TQ_FRONT.
718 */
719static taskq_ent_t *
720taskq_next_ent(taskq_t *tq)
721{
722 struct list_head *list;
723
724 ASSERT(spin_is_locked(&tq->tq_lock));
725
726 if (!list_empty(&tq->tq_prio_list))
727 list = &tq->tq_prio_list;
728 else if (!list_empty(&tq->tq_pend_list))
729 list = &tq->tq_pend_list;
730 else
731 return (NULL);
732
733 return (list_entry(list->next, taskq_ent_t, tqent_list));
734}
735
736/*
737 * Spawns a new thread for the specified taskq.
738 */
739static void
740taskq_thread_spawn_task(void *arg)
741{
742 taskq_t *tq = (taskq_t *)arg;
743
744 (void) taskq_thread_create(tq);
745
746 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
747 tq->tq_nspawn--;
748 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
749}
750
751/*
752 * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
753 * number of threads is insufficient to handle the pending tasks. These
754 * new threads must be created by the dedicated dynamic_taskq to avoid
755 * deadlocks between thread creation and memory reclaim. The system_taskq
756 * which is also a dynamic taskq cannot be safely used for this.
757 */
758static int
759taskq_thread_spawn(taskq_t *tq, int seq_tasks)
760{
761 int spawning = 0;
762
763 if (!(tq->tq_flags & TASKQ_DYNAMIC))
764 return (0);
765
766 if ((seq_tasks > spl_taskq_thread_sequential) &&
767 (tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
768 (tq->tq_flags & TASKQ_ACTIVE)) {
769 spawning = (++tq->tq_nspawn);
770 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
771 tq, TQ_NOSLEEP);
772 }
773
774 return (spawning);
775}
776
777/*
778 * Threads in a dynamic taskq should only exit once it has been completely
779 * drained and no other threads are actively servicing tasks. This prevents
780 * threads from being created and destroyed more than is required.
781 *
782 * The first thread is the thread list is treated as the primary thread.
783 * There is nothing special about the primary thread but in order to avoid
784 * all the taskq pids from changing we opt to make it long running.
785 */
786static int
787taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
788{
789 ASSERT(spin_is_locked(&tq->tq_lock));
790
791 if (!(tq->tq_flags & TASKQ_DYNAMIC))
792 return (0);
793
794 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
795 tqt_thread_list) == tqt)
796 return (0);
797
798 return
799 ((tq->tq_nspawn == 0) && /* No threads are being spawned */
800 (tq->tq_nactive == 0) && /* No threads are handling tasks */
801 (tq->tq_nthreads > 1) && /* More than 1 thread is running */
802 (!taskq_next_ent(tq)) && /* There are no pending tasks */
803 (spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */
804}
805
bcd68186 806static int
807taskq_thread(void *args)
808{
472a34ca
BB
809 DECLARE_WAITQUEUE(wait, current);
810 sigset_t blocked;
2c02b71b 811 taskq_thread_t *tqt = args;
472a34ca
BB
812 taskq_t *tq;
813 taskq_ent_t *t;
8095473b 814 int seq_tasks = 0;
bcd68186 815
472a34ca 816 ASSERT(tqt);
2c02b71b 817 tq = tqt->tqt_tq;
472a34ca 818 current->flags |= PF_NOFREEZE;
bcd68186 819
8095473b
AX
820 #if defined(PF_MEMALLOC_NOIO)
821 (void) memalloc_noio_save();
822 #endif
823
472a34ca
BB
824 sigfillset(&blocked);
825 sigprocmask(SIG_BLOCK, &blocked, NULL);
826 flush_signals(current);
bcd68186 827
472a34ca 828 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
8095473b
AX
829
830 /* Immediately exit if more threads than allowed were created. */
831 if (tq->tq_nthreads >= tq->tq_maxthreads)
832 goto error;
833
472a34ca 834 tq->tq_nthreads++;
8095473b 835 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
472a34ca
BB
836 wake_up(&tq->tq_wait_waitq);
837 set_current_state(TASK_INTERRUPTIBLE);
bcd68186 838
472a34ca 839 while (!kthread_should_stop()) {
bcd68186 840
f0d8bb26
NB
841 if (list_empty(&tq->tq_pend_list) &&
842 list_empty(&tq->tq_prio_list)) {
8095473b
AX
843
844 if (taskq_thread_should_stop(tq, tqt)) {
845 wake_up_all(&tq->tq_wait_waitq);
846 break;
847 }
848
3c6ed541 849 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
749045bb 850 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
8095473b 851
bcd68186 852 schedule();
8095473b
AX
853 seq_tasks = 0;
854
749045bb 855 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
3c6ed541 856 remove_wait_queue(&tq->tq_work_waitq, &wait);
bcd68186 857 } else {
858 __set_current_state(TASK_RUNNING);
859 }
860
8095473b 861 if ((t = taskq_next_ent(tq)) != NULL) {
472a34ca 862 list_del_init(&t->tqent_list);
8f2503e0 863
44217f7a
PS
864 /* In order to support recursively dispatching a
865 * preallocated taskq_ent_t, tqent_id must be
866 * stored prior to executing tqent_func. */
e7e5f78e 867 tqt->tqt_id = t->tqent_id;
d9acd930 868 tqt->tqt_task = t;
8f2503e0
PS
869
870 /* We must store a copy of the flags prior to
871 * servicing the task (servicing a prealloc'd task
872 * returns the ownership of the tqent back to
873 * the caller of taskq_dispatch). Thus,
874 * tqent_flags _may_ change within the call. */
875 tqt->tqt_flags = t->tqent_flags;
876
2c02b71b 877 taskq_insert_in_order(tq, tqt);
472a34ca 878 tq->tq_nactive++;
749045bb 879 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 880
881 /* Perform the requested task */
472a34ca 882 t->tqent_func(t->tqent_arg);
bcd68186 883
749045bb 884 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
472a34ca 885 tq->tq_nactive--;
2c02b71b 886 list_del_init(&tqt->tqt_active_list);
d9acd930 887 tqt->tqt_task = NULL;
8f2503e0
PS
888
889 /* For prealloc'd tasks, we don't free anything. */
8095473b 890 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
8f2503e0 891 task_done(tq, t);
bcd68186 892
7257ec41
BB
893 /* When the current lowest outstanding taskqid is
894 * done calculate the new lowest outstanding id */
e7e5f78e 895 if (tq->tq_lowest_id == tqt->tqt_id) {
bcd68186 896 tq->tq_lowest_id = taskq_lowest_id(tq);
e7e5f78e 897 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
bcd68186 898 }
899
8095473b
AX
900 /* Spawn additional taskq threads if required. */
901 if (taskq_thread_spawn(tq, ++seq_tasks))
902 seq_tasks = 0;
903
e7e5f78e 904 tqt->tqt_id = 0;
8f2503e0 905 tqt->tqt_flags = 0;
472a34ca 906 wake_up_all(&tq->tq_wait_waitq);
8095473b
AX
907 } else {
908 if (taskq_thread_should_stop(tq, tqt))
909 break;
bcd68186 910 }
911
912 set_current_state(TASK_INTERRUPTIBLE);
913
472a34ca 914 }
bcd68186 915
916 __set_current_state(TASK_RUNNING);
472a34ca 917 tq->tq_nthreads--;
2c02b71b 918 list_del_init(&tqt->tqt_thread_list);
8095473b
AX
919error:
920 kmem_free(tqt, sizeof (taskq_thread_t));
472a34ca 921 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 922
10946b02 923 return (0);
bcd68186 924}
925
8095473b
AX
926static taskq_thread_t *
927taskq_thread_create(taskq_t *tq)
928{
929 static int last_used_cpu = 0;
930 taskq_thread_t *tqt;
931
932 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
933 INIT_LIST_HEAD(&tqt->tqt_thread_list);
934 INIT_LIST_HEAD(&tqt->tqt_active_list);
935 tqt->tqt_tq = tq;
936 tqt->tqt_id = 0;
937
938 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
939 "%s", tq->tq_name);
940 if (tqt->tqt_thread == NULL) {
941 kmem_free(tqt, sizeof (taskq_thread_t));
942 return (NULL);
943 }
944
945 if (spl_taskq_thread_bind) {
946 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
947 kthread_bind(tqt->tqt_thread, last_used_cpu);
948 }
949
950 if (spl_taskq_thread_priority)
951 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
952
953 wake_up_process(tqt->tqt_thread);
954
955 return (tqt);
956}
957
f1ca4da6 958taskq_t *
aed8671c 959taskq_create(const char *name, int nthreads, pri_t pri,
472a34ca 960 int minalloc, int maxalloc, uint_t flags)
f1ca4da6 961{
472a34ca 962 taskq_t *tq;
2c02b71b 963 taskq_thread_t *tqt;
8095473b 964 int count = 0, rc = 0, i;
bcd68186 965
472a34ca 966 ASSERT(name != NULL);
472a34ca
BB
967 ASSERT(minalloc >= 0);
968 ASSERT(maxalloc <= INT_MAX);
8095473b 969 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
bcd68186 970
915404bd
BB
971 /* Scale the number of threads using nthreads as a percentage */
972 if (flags & TASKQ_THREADS_CPU_PCT) {
973 ASSERT(nthreads <= 100);
974 ASSERT(nthreads >= 0);
975 nthreads = MIN(nthreads, 100);
976 nthreads = MAX(nthreads, 0);
977 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
978 }
979
8095473b 980 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
472a34ca 981 if (tq == NULL)
10946b02 982 return (NULL);
bcd68186 983
472a34ca 984 spin_lock_init(&tq->tq_lock);
472a34ca
BB
985 INIT_LIST_HEAD(&tq->tq_thread_list);
986 INIT_LIST_HEAD(&tq->tq_active_list);
8095473b
AX
987 tq->tq_name = strdup(name);
988 tq->tq_nactive = 0;
989 tq->tq_nthreads = 0;
990 tq->tq_nspawn = 0;
991 tq->tq_maxthreads = nthreads;
992 tq->tq_pri = pri;
993 tq->tq_minalloc = minalloc;
994 tq->tq_maxalloc = maxalloc;
995 tq->tq_nalloc = 0;
996 tq->tq_flags = (flags | TASKQ_ACTIVE);
997 tq->tq_next_id = 1;
998 tq->tq_lowest_id = 1;
472a34ca
BB
999 INIT_LIST_HEAD(&tq->tq_free_list);
1000 INIT_LIST_HEAD(&tq->tq_pend_list);
1001 INIT_LIST_HEAD(&tq->tq_prio_list);
d9acd930 1002 INIT_LIST_HEAD(&tq->tq_delay_list);
472a34ca
BB
1003 init_waitqueue_head(&tq->tq_work_waitq);
1004 init_waitqueue_head(&tq->tq_wait_waitq);
bcd68186 1005
8095473b
AX
1006 if (flags & TASKQ_PREPOPULATE) {
1007 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
1008
472a34ca
BB
1009 for (i = 0; i < minalloc; i++)
1010 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
6e605b6e 1011
8095473b
AX
1012 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1013 }
1014
1015 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1016 nthreads = 1;
6e605b6e 1017
2c02b71b 1018 for (i = 0; i < nthreads; i++) {
8095473b
AX
1019 tqt = taskq_thread_create(tq);
1020 if (tqt == NULL)
2c02b71b 1021 rc = 1;
8095473b
AX
1022 else
1023 count++;
2c02b71b 1024 }
bcd68186 1025
472a34ca 1026 /* Wait for all threads to be started before potential destroy */
8095473b 1027 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
bcd68186 1028
472a34ca 1029 if (rc) {
aed8671c 1030 taskq_destroy(tq);
472a34ca
BB
1031 tq = NULL;
1032 }
bcd68186 1033
10946b02 1034 return (tq);
f1ca4da6 1035}
aed8671c 1036EXPORT_SYMBOL(taskq_create);
b123971f 1037
1038void
aed8671c 1039taskq_destroy(taskq_t *tq)
b123971f 1040{
2c02b71b
PS
1041 struct task_struct *thread;
1042 taskq_thread_t *tqt;
046a70c9 1043 taskq_ent_t *t;
b123971f 1044
bcd68186 1045 ASSERT(tq);
749045bb 1046 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
8095473b 1047 tq->tq_flags &= ~TASKQ_ACTIVE;
749045bb 1048 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 1049
8095473b
AX
1050 /*
1051 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1052 * new worker threads be spawned for dynamic taskq.
1053 */
1054 if (dynamic_taskq != NULL)
1055 taskq_wait_outstanding(dynamic_taskq, 0);
1056
aed8671c 1057 taskq_wait(tq);
bcd68186 1058
472a34ca 1059 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
bcd68186 1060
2c02b71b
PS
1061 /*
1062 * Signal each thread to exit and block until it does. Each thread
1063 * is responsible for removing itself from the list and freeing its
1064 * taskq_thread_t. This allows for idle threads to opt to remove
1065 * themselves from the taskq. They can be recreated as needed.
1066 */
1067 while (!list_empty(&tq->tq_thread_list)) {
1068 tqt = list_entry(tq->tq_thread_list.next,
8095473b 1069 taskq_thread_t, tqt_thread_list);
2c02b71b
PS
1070 thread = tqt->tqt_thread;
1071 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1072
1073 kthread_stop(thread);
1074
472a34ca 1075 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
2c02b71b
PS
1076 }
1077
472a34ca 1078 while (!list_empty(&tq->tq_free_list)) {
046a70c9 1079 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
44217f7a
PS
1080
1081 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1082
472a34ca
BB
1083 list_del_init(&t->tqent_list);
1084 task_free(tq, t);
1085 }
bcd68186 1086
8095473b
AX
1087 ASSERT0(tq->tq_nthreads);
1088 ASSERT0(tq->tq_nalloc);
1089 ASSERT0(tq->tq_nspawn);
472a34ca
BB
1090 ASSERT(list_empty(&tq->tq_thread_list));
1091 ASSERT(list_empty(&tq->tq_active_list));
1092 ASSERT(list_empty(&tq->tq_free_list));
1093 ASSERT(list_empty(&tq->tq_pend_list));
1094 ASSERT(list_empty(&tq->tq_prio_list));
d9acd930 1095 ASSERT(list_empty(&tq->tq_delay_list));
bcd68186 1096
472a34ca 1097 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
2c02b71b 1098
8095473b
AX
1099 strfree(tq->tq_name);
1100 kmem_free(tq, sizeof (taskq_t));
b123971f 1101}
aed8671c 1102EXPORT_SYMBOL(taskq_destroy);
e9cb2b4f
BB
1103
1104int
1105spl_taskq_init(void)
1106{
8095473b
AX
1107 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1108 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
e9cb2b4f 1109 if (system_taskq == NULL)
10946b02 1110 return (1);
e9cb2b4f 1111
8095473b
AX
1112 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1113 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1114 if (dynamic_taskq == NULL) {
1115 taskq_destroy(system_taskq);
1116 return (1);
1117 }
1118
10946b02 1119 return (0);
e9cb2b4f
BB
1120}
1121
1122void
1123spl_taskq_fini(void)
1124{
8095473b
AX
1125 taskq_destroy(dynamic_taskq);
1126 dynamic_taskq = NULL;
1127
e9cb2b4f 1128 taskq_destroy(system_taskq);
8095473b 1129 system_taskq = NULL;
e9cb2b4f 1130}