]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-taskq.c
017b410ac03a6384a7a004b432aef3ff20bc6609
[mirror_spl-debian.git] / module / spl / spl-taskq.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
26
27 #include <sys/taskq.h>
28 #include <sys/kmem.h>
29
30 int spl_taskq_thread_bind = 0;
31 module_param(spl_taskq_thread_bind, int, 0644);
32 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
33
34
35 int spl_taskq_thread_dynamic = 0;
36 module_param(spl_taskq_thread_dynamic, int, 0644);
37 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
38
39 int spl_taskq_thread_priority = 1;
40 module_param(spl_taskq_thread_priority, int, 0644);
41 MODULE_PARM_DESC(spl_taskq_thread_priority,
42 "Allow non-default priority for taskq threads");
43
44 int spl_taskq_thread_sequential = 4;
45 module_param(spl_taskq_thread_sequential, int, 0644);
46 MODULE_PARM_DESC(spl_taskq_thread_sequential,
47 "Create new taskq threads after N sequential tasks");
48
49 /* Global system-wide dynamic task queue available for all consumers */
50 taskq_t *system_taskq;
51 EXPORT_SYMBOL(system_taskq);
52
53 /* Private dedicated taskq for creating new taskq threads on demand. */
54 static taskq_t *dynamic_taskq;
55 static taskq_thread_t *taskq_thread_create(taskq_t *);
56
57 static int
58 task_km_flags(uint_t flags)
59 {
60 if (flags & TQ_NOSLEEP)
61 return KM_NOSLEEP;
62
63 if (flags & TQ_PUSHPAGE)
64 return KM_PUSHPAGE;
65
66 return KM_SLEEP;
67 }
68
69 /*
70 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
71 * is not attached to the free, work, or pending taskq lists.
72 */
73 static taskq_ent_t *
74 task_alloc(taskq_t *tq, uint_t flags)
75 {
76 taskq_ent_t *t;
77 int count = 0;
78
79 ASSERT(tq);
80 ASSERT(spin_is_locked(&tq->tq_lock));
81 retry:
82 /* Acquire taskq_ent_t's from free list if available */
83 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
84 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
85
86 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
87 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
88 ASSERT(!timer_pending(&t->tqent_timer));
89
90 list_del_init(&t->tqent_list);
91 return (t);
92 }
93
94 /* Free list is empty and memory allocations are prohibited */
95 if (flags & TQ_NOALLOC)
96 return (NULL);
97
98 /* Hit maximum taskq_ent_t pool size */
99 if (tq->tq_nalloc >= tq->tq_maxalloc) {
100 if (flags & TQ_NOSLEEP)
101 return (NULL);
102
103 /*
104 * Sleep periodically polling the free list for an available
105 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
106 * but we cannot block forever waiting for an taskq_ent_t to
107 * show up in the free list, otherwise a deadlock can happen.
108 *
109 * Therefore, we need to allocate a new task even if the number
110 * of allocated tasks is above tq->tq_maxalloc, but we still
111 * end up delaying the task allocation by one second, thereby
112 * throttling the task dispatch rate.
113 */
114 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
115 schedule_timeout(HZ / 100);
116 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
117 if (count < 100) {
118 count++;
119 goto retry;
120 }
121 }
122
123 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
124 t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
125 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
126
127 if (t) {
128 taskq_init_ent(t);
129 tq->tq_nalloc++;
130 }
131
132 return (t);
133 }
134
135 /*
136 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
137 * to already be removed from the free, work, or pending taskq lists.
138 */
139 static void
140 task_free(taskq_t *tq, taskq_ent_t *t)
141 {
142 ASSERT(tq);
143 ASSERT(t);
144 ASSERT(spin_is_locked(&tq->tq_lock));
145 ASSERT(list_empty(&t->tqent_list));
146 ASSERT(!timer_pending(&t->tqent_timer));
147
148 kmem_free(t, sizeof(taskq_ent_t));
149 tq->tq_nalloc--;
150 }
151
152 /*
153 * NOTE: Must be called with tq->tq_lock held, either destroys the
154 * taskq_ent_t if too many exist or moves it to the free list for later use.
155 */
156 static void
157 task_done(taskq_t *tq, taskq_ent_t *t)
158 {
159 ASSERT(tq);
160 ASSERT(t);
161 ASSERT(spin_is_locked(&tq->tq_lock));
162
163 /* Wake tasks blocked in taskq_wait_id() */
164 wake_up_all(&t->tqent_waitq);
165
166 list_del_init(&t->tqent_list);
167
168 if (tq->tq_nalloc <= tq->tq_minalloc) {
169 t->tqent_id = 0;
170 t->tqent_func = NULL;
171 t->tqent_arg = NULL;
172 t->tqent_flags = 0;
173
174 list_add_tail(&t->tqent_list, &tq->tq_free_list);
175 } else {
176 task_free(tq, t);
177 }
178 }
179
180 /*
181 * When a delayed task timer expires remove it from the delay list and
182 * add it to the priority list in order for immediate processing.
183 */
184 static void
185 task_expire(unsigned long data)
186 {
187 taskq_ent_t *w, *t = (taskq_ent_t *)data;
188 taskq_t *tq = t->tqent_taskq;
189 struct list_head *l;
190
191 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
192
193 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
194 ASSERT(list_empty(&t->tqent_list));
195 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
196 return;
197 }
198
199 /*
200 * The priority list must be maintained in strict task id order
201 * from lowest to highest for lowest_id to be easily calculable.
202 */
203 list_del(&t->tqent_list);
204 list_for_each_prev(l, &tq->tq_prio_list) {
205 w = list_entry(l, taskq_ent_t, tqent_list);
206 if (w->tqent_id < t->tqent_id) {
207 list_add(&t->tqent_list, l);
208 break;
209 }
210 }
211 if (l == &tq->tq_prio_list)
212 list_add(&t->tqent_list, &tq->tq_prio_list);
213
214 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
215
216 wake_up(&tq->tq_work_waitq);
217 }
218
219 /*
220 * Returns the lowest incomplete taskqid_t. The taskqid_t may
221 * be queued on the pending list, on the priority list, on the
222 * delay list, or on the work list currently being handled, but
223 * it is not 100% complete yet.
224 */
225 static taskqid_t
226 taskq_lowest_id(taskq_t *tq)
227 {
228 taskqid_t lowest_id = tq->tq_next_id;
229 taskq_ent_t *t;
230 taskq_thread_t *tqt;
231
232 ASSERT(tq);
233 ASSERT(spin_is_locked(&tq->tq_lock));
234
235 if (!list_empty(&tq->tq_pend_list)) {
236 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
237 lowest_id = MIN(lowest_id, t->tqent_id);
238 }
239
240 if (!list_empty(&tq->tq_prio_list)) {
241 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
242 lowest_id = MIN(lowest_id, t->tqent_id);
243 }
244
245 if (!list_empty(&tq->tq_delay_list)) {
246 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
247 lowest_id = MIN(lowest_id, t->tqent_id);
248 }
249
250 if (!list_empty(&tq->tq_active_list)) {
251 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
252 tqt_active_list);
253 ASSERT(tqt->tqt_id != 0);
254 lowest_id = MIN(lowest_id, tqt->tqt_id);
255 }
256
257 return (lowest_id);
258 }
259
260 /*
261 * Insert a task into a list keeping the list sorted by increasing taskqid.
262 */
263 static void
264 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
265 {
266 taskq_thread_t *w;
267 struct list_head *l;
268
269 ASSERT(tq);
270 ASSERT(tqt);
271 ASSERT(spin_is_locked(&tq->tq_lock));
272
273 list_for_each_prev(l, &tq->tq_active_list) {
274 w = list_entry(l, taskq_thread_t, tqt_active_list);
275 if (w->tqt_id < tqt->tqt_id) {
276 list_add(&tqt->tqt_active_list, l);
277 break;
278 }
279 }
280 if (l == &tq->tq_active_list)
281 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
282 }
283
284 /*
285 * Find and return a task from the given list if it exists. The list
286 * must be in lowest to highest task id order.
287 */
288 static taskq_ent_t *
289 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
290 {
291 struct list_head *l;
292 taskq_ent_t *t;
293
294 ASSERT(spin_is_locked(&tq->tq_lock));
295
296 list_for_each(l, lh) {
297 t = list_entry(l, taskq_ent_t, tqent_list);
298
299 if (t->tqent_id == id)
300 return (t);
301
302 if (t->tqent_id > id)
303 break;
304 }
305
306 return (NULL);
307 }
308
309 /*
310 * Find an already dispatched task given the task id regardless of what
311 * state it is in. If a task is still pending or executing it will be
312 * returned and 'active' set appropriately. If the task has already
313 * been run then NULL is returned.
314 */
315 static taskq_ent_t *
316 taskq_find(taskq_t *tq, taskqid_t id, int *active)
317 {
318 taskq_thread_t *tqt;
319 struct list_head *l;
320 taskq_ent_t *t;
321
322 ASSERT(spin_is_locked(&tq->tq_lock));
323 *active = 0;
324
325 t = taskq_find_list(tq, &tq->tq_delay_list, id);
326 if (t)
327 return (t);
328
329 t = taskq_find_list(tq, &tq->tq_prio_list, id);
330 if (t)
331 return (t);
332
333 t = taskq_find_list(tq, &tq->tq_pend_list, id);
334 if (t)
335 return (t);
336
337 list_for_each(l, &tq->tq_active_list) {
338 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
339 if (tqt->tqt_id == id) {
340 t = tqt->tqt_task;
341 *active = 1;
342 return (t);
343 }
344 }
345
346 return (NULL);
347 }
348
349 /*
350 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
351 * taskq_wait() functions below.
352 *
353 * Taskq waiting is accomplished by tracking the lowest outstanding task
354 * id and the next available task id. As tasks are dispatched they are
355 * added to the tail of the pending, priority, or delay lists. As worker
356 * threads become available the tasks are removed from the heads of these
357 * lists and linked to the worker threads. This ensures the lists are
358 * kept sorted by lowest to highest task id.
359 *
360 * Therefore the lowest outstanding task id can be quickly determined by
361 * checking the head item from all of these lists. This value is stored
362 * with the taskq as the lowest id. It only needs to be recalculated when
363 * either the task with the current lowest id completes or is canceled.
364 *
365 * By blocking until the lowest task id exceeds the passed task id the
366 * taskq_wait_outstanding() function can be easily implemented. Similarly,
367 * by blocking until the lowest task id matches the next task id taskq_wait()
368 * can be implemented.
369 *
370 * Callers should be aware that when there are multiple worked threads it
371 * is possible for larger task ids to complete before smaller ones. Also
372 * when the taskq contains delay tasks with small task ids callers may
373 * block for a considerable length of time waiting for them to expire and
374 * execute.
375 */
376 static int
377 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
378 {
379 int active = 0;
380 int rc;
381
382 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
383 rc = (taskq_find(tq, id, &active) == NULL);
384 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
385
386 return (rc);
387 }
388
389 /*
390 * The taskq_wait_id() function blocks until the passed task id completes.
391 * This does not guarantee that all lower task ids have completed.
392 */
393 void
394 taskq_wait_id(taskq_t *tq, taskqid_t id)
395 {
396 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
397 }
398 EXPORT_SYMBOL(taskq_wait_id);
399
400 static int
401 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
402 {
403 int rc;
404
405 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
406 rc = (id < tq->tq_lowest_id);
407 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
408
409 return (rc);
410 }
411
412 /*
413 * The taskq_wait_outstanding() function will block until all tasks with a
414 * lower taskqid than the passed 'id' have been completed. Note that all
415 * task id's are assigned monotonically at dispatch time. Zero may be
416 * passed for the id to indicate all tasks dispatch up to this point,
417 * but not after, should be waited for.
418 */
419 void
420 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
421 {
422 id = id ? id : tq->tq_next_id - 1;
423 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
424 }
425 EXPORT_SYMBOL(taskq_wait_outstanding);
426
427 static int
428 taskq_wait_check(taskq_t *tq)
429 {
430 int rc;
431
432 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
433 rc = (tq->tq_lowest_id == tq->tq_next_id);
434 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
435
436 return (rc);
437 }
438
439 /*
440 * The taskq_wait() function will block until the taskq is empty.
441 * This means that if a taskq re-dispatches work to itself taskq_wait()
442 * callers will block indefinitely.
443 */
444 void
445 taskq_wait(taskq_t *tq)
446 {
447 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
448 }
449 EXPORT_SYMBOL(taskq_wait);
450
451 static int
452 taskq_member_impl(taskq_t *tq, void *t)
453 {
454 struct list_head *l;
455 taskq_thread_t *tqt;
456 int found = 0;
457
458 ASSERT(tq);
459 ASSERT(t);
460 ASSERT(spin_is_locked(&tq->tq_lock));
461
462 list_for_each(l, &tq->tq_thread_list) {
463 tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
464 if (tqt->tqt_thread == (struct task_struct *)t) {
465 found = 1;
466 break;
467 }
468 }
469 return (found);
470 }
471
472 int
473 taskq_member(taskq_t *tq, void *t)
474 {
475 int found;
476
477 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
478 found = taskq_member_impl(tq, t);
479 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
480
481 return (found);
482 }
483 EXPORT_SYMBOL(taskq_member);
484
485 /*
486 * Cancel an already dispatched task given the task id. Still pending tasks
487 * will be immediately canceled, and if the task is active the function will
488 * block until it completes. Preallocated tasks which are canceled must be
489 * freed by the caller.
490 */
491 int
492 taskq_cancel_id(taskq_t *tq, taskqid_t id)
493 {
494 taskq_ent_t *t;
495 int active = 0;
496 int rc = ENOENT;
497
498 ASSERT(tq);
499
500 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
501 t = taskq_find(tq, id, &active);
502 if (t && !active) {
503 list_del_init(&t->tqent_list);
504 t->tqent_flags |= TQENT_FLAG_CANCEL;
505
506 /*
507 * When canceling the lowest outstanding task id we
508 * must recalculate the new lowest outstanding id.
509 */
510 if (tq->tq_lowest_id == t->tqent_id) {
511 tq->tq_lowest_id = taskq_lowest_id(tq);
512 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
513 }
514
515 /*
516 * The task_expire() function takes the tq->tq_lock so drop
517 * drop the lock before synchronously cancelling the timer.
518 */
519 if (timer_pending(&t->tqent_timer)) {
520 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
521 del_timer_sync(&t->tqent_timer);
522 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
523 }
524
525 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
526 task_done(tq, t);
527
528 rc = 0;
529 }
530 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
531
532 if (active) {
533 taskq_wait_id(tq, id);
534 rc = EBUSY;
535 }
536
537 return (rc);
538 }
539 EXPORT_SYMBOL(taskq_cancel_id);
540
541 static int taskq_thread_spawn(taskq_t *tq);
542
543 taskqid_t
544 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
545 {
546 taskq_ent_t *t;
547 taskqid_t rc = 0;
548
549 ASSERT(tq);
550 ASSERT(func);
551
552 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
553
554 /* Taskq being destroyed and all tasks drained */
555 if (!(tq->tq_flags & TASKQ_ACTIVE))
556 goto out;
557
558 /* Do not queue the task unless there is idle thread for it */
559 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
560 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
561 goto out;
562
563 if ((t = task_alloc(tq, flags)) == NULL)
564 goto out;
565
566 spin_lock(&t->tqent_lock);
567
568 /* Queue to the priority list instead of the pending list */
569 if (flags & TQ_FRONT)
570 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
571 else
572 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
573
574 t->tqent_id = rc = tq->tq_next_id;
575 tq->tq_next_id++;
576 t->tqent_func = func;
577 t->tqent_arg = arg;
578 t->tqent_taskq = tq;
579 t->tqent_timer.data = 0;
580 t->tqent_timer.function = NULL;
581 t->tqent_timer.expires = 0;
582
583 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
584
585 spin_unlock(&t->tqent_lock);
586
587 wake_up(&tq->tq_work_waitq);
588 out:
589 /* Spawn additional taskq threads if required. */
590 if (tq->tq_nactive == tq->tq_nthreads)
591 (void) taskq_thread_spawn(tq);
592
593 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
594 return (rc);
595 }
596 EXPORT_SYMBOL(taskq_dispatch);
597
598 taskqid_t
599 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
600 uint_t flags, clock_t expire_time)
601 {
602 taskqid_t rc = 0;
603 taskq_ent_t *t;
604
605 ASSERT(tq);
606 ASSERT(func);
607
608 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
609
610 /* Taskq being destroyed and all tasks drained */
611 if (!(tq->tq_flags & TASKQ_ACTIVE))
612 goto out;
613
614 if ((t = task_alloc(tq, flags)) == NULL)
615 goto out;
616
617 spin_lock(&t->tqent_lock);
618
619 /* Queue to the delay list for subsequent execution */
620 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
621
622 t->tqent_id = rc = tq->tq_next_id;
623 tq->tq_next_id++;
624 t->tqent_func = func;
625 t->tqent_arg = arg;
626 t->tqent_taskq = tq;
627 t->tqent_timer.data = (unsigned long)t;
628 t->tqent_timer.function = task_expire;
629 t->tqent_timer.expires = (unsigned long)expire_time;
630 add_timer(&t->tqent_timer);
631
632 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
633
634 spin_unlock(&t->tqent_lock);
635 out:
636 /* Spawn additional taskq threads if required. */
637 if (tq->tq_nactive == tq->tq_nthreads)
638 (void) taskq_thread_spawn(tq);
639 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
640 return (rc);
641 }
642 EXPORT_SYMBOL(taskq_dispatch_delay);
643
644 void
645 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
646 taskq_ent_t *t)
647 {
648 ASSERT(tq);
649 ASSERT(func);
650
651 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
652
653 /* Taskq being destroyed and all tasks drained */
654 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
655 t->tqent_id = 0;
656 goto out;
657 }
658
659 spin_lock(&t->tqent_lock);
660
661 /*
662 * Mark it as a prealloc'd task. This is important
663 * to ensure that we don't free it later.
664 */
665 t->tqent_flags |= TQENT_FLAG_PREALLOC;
666
667 /* Queue to the priority list instead of the pending list */
668 if (flags & TQ_FRONT)
669 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
670 else
671 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
672
673 t->tqent_id = tq->tq_next_id;
674 tq->tq_next_id++;
675 t->tqent_func = func;
676 t->tqent_arg = arg;
677 t->tqent_taskq = tq;
678
679 spin_unlock(&t->tqent_lock);
680
681 wake_up(&tq->tq_work_waitq);
682 out:
683 /* Spawn additional taskq threads if required. */
684 if (tq->tq_nactive == tq->tq_nthreads)
685 (void) taskq_thread_spawn(tq);
686 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
687 }
688 EXPORT_SYMBOL(taskq_dispatch_ent);
689
690 int
691 taskq_empty_ent(taskq_ent_t *t)
692 {
693 return list_empty(&t->tqent_list);
694 }
695 EXPORT_SYMBOL(taskq_empty_ent);
696
697 void
698 taskq_init_ent(taskq_ent_t *t)
699 {
700 spin_lock_init(&t->tqent_lock);
701 init_waitqueue_head(&t->tqent_waitq);
702 init_timer(&t->tqent_timer);
703 INIT_LIST_HEAD(&t->tqent_list);
704 t->tqent_id = 0;
705 t->tqent_func = NULL;
706 t->tqent_arg = NULL;
707 t->tqent_flags = 0;
708 t->tqent_taskq = NULL;
709 }
710 EXPORT_SYMBOL(taskq_init_ent);
711
712 /*
713 * Return the next pending task, preference is given to tasks on the
714 * priority list which were dispatched with TQ_FRONT.
715 */
716 static taskq_ent_t *
717 taskq_next_ent(taskq_t *tq)
718 {
719 struct list_head *list;
720
721 ASSERT(spin_is_locked(&tq->tq_lock));
722
723 if (!list_empty(&tq->tq_prio_list))
724 list = &tq->tq_prio_list;
725 else if (!list_empty(&tq->tq_pend_list))
726 list = &tq->tq_pend_list;
727 else
728 return (NULL);
729
730 return (list_entry(list->next, taskq_ent_t, tqent_list));
731 }
732
733 /*
734 * Spawns a new thread for the specified taskq.
735 */
736 static void
737 taskq_thread_spawn_task(void *arg)
738 {
739 taskq_t *tq = (taskq_t *)arg;
740
741 if (taskq_thread_create(tq) == NULL) {
742 /* restore spawning count if failed */
743 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
744 tq->tq_nspawn--;
745 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
746 }
747 }
748
749 /*
750 * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
751 * number of threads is insufficient to handle the pending tasks. These
752 * new threads must be created by the dedicated dynamic_taskq to avoid
753 * deadlocks between thread creation and memory reclaim. The system_taskq
754 * which is also a dynamic taskq cannot be safely used for this.
755 */
756 static int
757 taskq_thread_spawn(taskq_t *tq)
758 {
759 int spawning = 0;
760
761 if (!(tq->tq_flags & TASKQ_DYNAMIC))
762 return (0);
763
764 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
765 (tq->tq_flags & TASKQ_ACTIVE)) {
766 spawning = (++tq->tq_nspawn);
767 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
768 tq, TQ_NOSLEEP);
769 }
770
771 return (spawning);
772 }
773
774 /*
775 * Threads in a dynamic taskq should only exit once it has been completely
776 * drained and no other threads are actively servicing tasks. This prevents
777 * threads from being created and destroyed more than is required.
778 *
779 * The first thread is the thread list is treated as the primary thread.
780 * There is nothing special about the primary thread but in order to avoid
781 * all the taskq pids from changing we opt to make it long running.
782 */
783 static int
784 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
785 {
786 ASSERT(spin_is_locked(&tq->tq_lock));
787
788 if (!(tq->tq_flags & TASKQ_DYNAMIC))
789 return (0);
790
791 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
792 tqt_thread_list) == tqt)
793 return (0);
794
795 return
796 ((tq->tq_nspawn == 0) && /* No threads are being spawned */
797 (tq->tq_nactive == 0) && /* No threads are handling tasks */
798 (tq->tq_nthreads > 1) && /* More than 1 thread is running */
799 (!taskq_next_ent(tq)) && /* There are no pending tasks */
800 (spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */
801 }
802
803 static int
804 taskq_thread(void *args)
805 {
806 DECLARE_WAITQUEUE(wait, current);
807 sigset_t blocked;
808 taskq_thread_t *tqt = args;
809 taskq_t *tq;
810 taskq_ent_t *t;
811 int seq_tasks = 0;
812
813 ASSERT(tqt);
814 tq = tqt->tqt_tq;
815 current->flags |= PF_NOFREEZE;
816
817 (void) spl_fstrans_mark();
818
819 sigfillset(&blocked);
820 sigprocmask(SIG_BLOCK, &blocked, NULL);
821 flush_signals(current);
822
823 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
824 /*
825 * If we are dynamically spawned, decrease spawning count. Note that
826 * we could be created during taskq_create, in which case we shouldn't
827 * do the decrement. But it's fine because taskq_create will reset
828 * tq_nspawn later.
829 */
830 if (tq->tq_flags & TASKQ_DYNAMIC)
831 tq->tq_nspawn--;
832
833 /* Immediately exit if more threads than allowed were created. */
834 if (tq->tq_nthreads >= tq->tq_maxthreads)
835 goto error;
836
837 tq->tq_nthreads++;
838 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
839 wake_up(&tq->tq_wait_waitq);
840 set_current_state(TASK_INTERRUPTIBLE);
841
842 while (!kthread_should_stop()) {
843
844 if (list_empty(&tq->tq_pend_list) &&
845 list_empty(&tq->tq_prio_list)) {
846
847 if (taskq_thread_should_stop(tq, tqt)) {
848 wake_up_all(&tq->tq_wait_waitq);
849 break;
850 }
851
852 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
853 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
854
855 schedule();
856 seq_tasks = 0;
857
858 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
859 remove_wait_queue(&tq->tq_work_waitq, &wait);
860 } else {
861 __set_current_state(TASK_RUNNING);
862 }
863
864 if ((t = taskq_next_ent(tq)) != NULL) {
865 list_del_init(&t->tqent_list);
866
867 /* In order to support recursively dispatching a
868 * preallocated taskq_ent_t, tqent_id must be
869 * stored prior to executing tqent_func. */
870 tqt->tqt_id = t->tqent_id;
871 tqt->tqt_task = t;
872
873 /* We must store a copy of the flags prior to
874 * servicing the task (servicing a prealloc'd task
875 * returns the ownership of the tqent back to
876 * the caller of taskq_dispatch). Thus,
877 * tqent_flags _may_ change within the call. */
878 tqt->tqt_flags = t->tqent_flags;
879
880 taskq_insert_in_order(tq, tqt);
881 tq->tq_nactive++;
882 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
883
884 /* Perform the requested task */
885 t->tqent_func(t->tqent_arg);
886
887 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
888 tq->tq_nactive--;
889 list_del_init(&tqt->tqt_active_list);
890 tqt->tqt_task = NULL;
891
892 /* For prealloc'd tasks, we don't free anything. */
893 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
894 task_done(tq, t);
895
896 /* When the current lowest outstanding taskqid is
897 * done calculate the new lowest outstanding id */
898 if (tq->tq_lowest_id == tqt->tqt_id) {
899 tq->tq_lowest_id = taskq_lowest_id(tq);
900 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
901 }
902
903 /* Spawn additional taskq threads if required. */
904 if ((++seq_tasks) > spl_taskq_thread_sequential &&
905 taskq_thread_spawn(tq))
906 seq_tasks = 0;
907
908 tqt->tqt_id = 0;
909 tqt->tqt_flags = 0;
910 wake_up_all(&tq->tq_wait_waitq);
911 } else {
912 if (taskq_thread_should_stop(tq, tqt))
913 break;
914 }
915
916 set_current_state(TASK_INTERRUPTIBLE);
917
918 }
919
920 __set_current_state(TASK_RUNNING);
921 tq->tq_nthreads--;
922 list_del_init(&tqt->tqt_thread_list);
923 error:
924 kmem_free(tqt, sizeof (taskq_thread_t));
925 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
926
927 return (0);
928 }
929
930 static taskq_thread_t *
931 taskq_thread_create(taskq_t *tq)
932 {
933 static int last_used_cpu = 0;
934 taskq_thread_t *tqt;
935
936 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
937 INIT_LIST_HEAD(&tqt->tqt_thread_list);
938 INIT_LIST_HEAD(&tqt->tqt_active_list);
939 tqt->tqt_tq = tq;
940 tqt->tqt_id = 0;
941
942 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
943 "%s", tq->tq_name);
944 if (tqt->tqt_thread == NULL) {
945 kmem_free(tqt, sizeof (taskq_thread_t));
946 return (NULL);
947 }
948
949 if (spl_taskq_thread_bind) {
950 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
951 kthread_bind(tqt->tqt_thread, last_used_cpu);
952 }
953
954 if (spl_taskq_thread_priority)
955 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
956
957 wake_up_process(tqt->tqt_thread);
958
959 return (tqt);
960 }
961
962 taskq_t *
963 taskq_create(const char *name, int nthreads, pri_t pri,
964 int minalloc, int maxalloc, uint_t flags)
965 {
966 taskq_t *tq;
967 taskq_thread_t *tqt;
968 int count = 0, rc = 0, i;
969
970 ASSERT(name != NULL);
971 ASSERT(minalloc >= 0);
972 ASSERT(maxalloc <= INT_MAX);
973 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
974
975 /* Scale the number of threads using nthreads as a percentage */
976 if (flags & TASKQ_THREADS_CPU_PCT) {
977 ASSERT(nthreads <= 100);
978 ASSERT(nthreads >= 0);
979 nthreads = MIN(nthreads, 100);
980 nthreads = MAX(nthreads, 0);
981 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
982 }
983
984 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
985 if (tq == NULL)
986 return (NULL);
987
988 spin_lock_init(&tq->tq_lock);
989 INIT_LIST_HEAD(&tq->tq_thread_list);
990 INIT_LIST_HEAD(&tq->tq_active_list);
991 tq->tq_name = strdup(name);
992 tq->tq_nactive = 0;
993 tq->tq_nthreads = 0;
994 tq->tq_nspawn = 0;
995 tq->tq_maxthreads = nthreads;
996 tq->tq_pri = pri;
997 tq->tq_minalloc = minalloc;
998 tq->tq_maxalloc = maxalloc;
999 tq->tq_nalloc = 0;
1000 tq->tq_flags = (flags | TASKQ_ACTIVE);
1001 tq->tq_next_id = 1;
1002 tq->tq_lowest_id = 1;
1003 INIT_LIST_HEAD(&tq->tq_free_list);
1004 INIT_LIST_HEAD(&tq->tq_pend_list);
1005 INIT_LIST_HEAD(&tq->tq_prio_list);
1006 INIT_LIST_HEAD(&tq->tq_delay_list);
1007 init_waitqueue_head(&tq->tq_work_waitq);
1008 init_waitqueue_head(&tq->tq_wait_waitq);
1009
1010 if (flags & TASKQ_PREPOPULATE) {
1011 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
1012
1013 for (i = 0; i < minalloc; i++)
1014 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
1015
1016 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1017 }
1018
1019 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1020 nthreads = 1;
1021
1022 for (i = 0; i < nthreads; i++) {
1023 tqt = taskq_thread_create(tq);
1024 if (tqt == NULL)
1025 rc = 1;
1026 else
1027 count++;
1028 }
1029
1030 /* Wait for all threads to be started before potential destroy */
1031 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1032 /*
1033 * taskq_thread might have touched nspawn, but we don't want them to
1034 * because they're not dynamically spawned. So we reset it to 0
1035 */
1036 tq->tq_nspawn = 0;
1037
1038 if (rc) {
1039 taskq_destroy(tq);
1040 tq = NULL;
1041 }
1042
1043 return (tq);
1044 }
1045 EXPORT_SYMBOL(taskq_create);
1046
1047 void
1048 taskq_destroy(taskq_t *tq)
1049 {
1050 struct task_struct *thread;
1051 taskq_thread_t *tqt;
1052 taskq_ent_t *t;
1053
1054 ASSERT(tq);
1055 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
1056 tq->tq_flags &= ~TASKQ_ACTIVE;
1057 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1058
1059 /*
1060 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1061 * new worker threads be spawned for dynamic taskq.
1062 */
1063 if (dynamic_taskq != NULL)
1064 taskq_wait_outstanding(dynamic_taskq, 0);
1065
1066 taskq_wait(tq);
1067
1068 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
1069 /* wait for spawning threads to insert themselves to the list */
1070 while (tq->tq_nspawn) {
1071 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1072 schedule_timeout_interruptible(1);
1073 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
1074 }
1075
1076 /*
1077 * Signal each thread to exit and block until it does. Each thread
1078 * is responsible for removing itself from the list and freeing its
1079 * taskq_thread_t. This allows for idle threads to opt to remove
1080 * themselves from the taskq. They can be recreated as needed.
1081 */
1082 while (!list_empty(&tq->tq_thread_list)) {
1083 tqt = list_entry(tq->tq_thread_list.next,
1084 taskq_thread_t, tqt_thread_list);
1085 thread = tqt->tqt_thread;
1086 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1087
1088 kthread_stop(thread);
1089
1090 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
1091 }
1092
1093 while (!list_empty(&tq->tq_free_list)) {
1094 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1095
1096 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1097
1098 list_del_init(&t->tqent_list);
1099 task_free(tq, t);
1100 }
1101
1102 ASSERT0(tq->tq_nthreads);
1103 ASSERT0(tq->tq_nalloc);
1104 ASSERT0(tq->tq_nspawn);
1105 ASSERT(list_empty(&tq->tq_thread_list));
1106 ASSERT(list_empty(&tq->tq_active_list));
1107 ASSERT(list_empty(&tq->tq_free_list));
1108 ASSERT(list_empty(&tq->tq_pend_list));
1109 ASSERT(list_empty(&tq->tq_prio_list));
1110 ASSERT(list_empty(&tq->tq_delay_list));
1111
1112 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
1113
1114 strfree(tq->tq_name);
1115 kmem_free(tq, sizeof (taskq_t));
1116 }
1117 EXPORT_SYMBOL(taskq_destroy);
1118
1119 int
1120 spl_taskq_init(void)
1121 {
1122 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1123 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1124 if (system_taskq == NULL)
1125 return (1);
1126
1127 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1128 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1129 if (dynamic_taskq == NULL) {
1130 taskq_destroy(system_taskq);
1131 return (1);
1132 }
1133
1134 return (0);
1135 }
1136
1137 void
1138 spl_taskq_fini(void)
1139 {
1140 taskq_destroy(dynamic_taskq);
1141 dynamic_taskq = NULL;
1142
1143 taskq_destroy(system_taskq);
1144 system_taskq = NULL;
1145 }