]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-taskq.c
Fix cstyle warnings
[mirror_spl.git] / module / spl / spl-taskq.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 */
26
27 #include <sys/taskq.h>
28 #include <sys/kmem.h>
29 #include <sys/tsd.h>
30
31 int spl_taskq_thread_bind = 0;
32 module_param(spl_taskq_thread_bind, int, 0644);
33 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
34
35
36 int spl_taskq_thread_dynamic = 1;
37 module_param(spl_taskq_thread_dynamic, int, 0644);
38 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
39
40 int spl_taskq_thread_priority = 1;
41 module_param(spl_taskq_thread_priority, int, 0644);
42 MODULE_PARM_DESC(spl_taskq_thread_priority,
43 "Allow non-default priority for taskq threads");
44
45 int spl_taskq_thread_sequential = 4;
46 module_param(spl_taskq_thread_sequential, int, 0644);
47 MODULE_PARM_DESC(spl_taskq_thread_sequential,
48 "Create new taskq threads after N sequential tasks");
49
50 /* Global system-wide dynamic task queue available for all consumers */
51 taskq_t *system_taskq;
52 EXPORT_SYMBOL(system_taskq);
53 /* Global dynamic task queue for long delay */
54 taskq_t *system_delay_taskq;
55 EXPORT_SYMBOL(system_delay_taskq);
56
57 /* Private dedicated taskq for creating new taskq threads on demand. */
58 static taskq_t *dynamic_taskq;
59 static taskq_thread_t *taskq_thread_create(taskq_t *);
60
61 /* List of all taskqs */
62 LIST_HEAD(tq_list);
63 DECLARE_RWSEM(tq_list_sem);
64 static uint_t taskq_tsd;
65
66 static int
67 task_km_flags(uint_t flags)
68 {
69 if (flags & TQ_NOSLEEP)
70 return (KM_NOSLEEP);
71
72 if (flags & TQ_PUSHPAGE)
73 return (KM_PUSHPAGE);
74
75 return (KM_SLEEP);
76 }
77
78 /*
79 * taskq_find_by_name - Find the largest instance number of a named taskq.
80 */
81 static int
82 taskq_find_by_name(const char *name)
83 {
84 struct list_head *tql;
85 taskq_t *tq;
86
87 list_for_each_prev(tql, &tq_list) {
88 tq = list_entry(tql, taskq_t, tq_taskqs);
89 if (strcmp(name, tq->tq_name) == 0)
90 return (tq->tq_instance);
91 }
92 return (-1);
93 }
94
95 /*
96 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
97 * is not attached to the free, work, or pending taskq lists.
98 */
99 static taskq_ent_t *
100 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
101 {
102 taskq_ent_t *t;
103 int count = 0;
104
105 ASSERT(tq);
106 retry:
107 /* Acquire taskq_ent_t's from free list if available */
108 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
109 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
110
111 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
112 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
113 ASSERT(!timer_pending(&t->tqent_timer));
114
115 list_del_init(&t->tqent_list);
116 return (t);
117 }
118
119 /* Free list is empty and memory allocations are prohibited */
120 if (flags & TQ_NOALLOC)
121 return (NULL);
122
123 /* Hit maximum taskq_ent_t pool size */
124 if (tq->tq_nalloc >= tq->tq_maxalloc) {
125 if (flags & TQ_NOSLEEP)
126 return (NULL);
127
128 /*
129 * Sleep periodically polling the free list for an available
130 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
131 * but we cannot block forever waiting for an taskq_ent_t to
132 * show up in the free list, otherwise a deadlock can happen.
133 *
134 * Therefore, we need to allocate a new task even if the number
135 * of allocated tasks is above tq->tq_maxalloc, but we still
136 * end up delaying the task allocation by one second, thereby
137 * throttling the task dispatch rate.
138 */
139 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
140 schedule_timeout(HZ / 100);
141 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
142 tq->tq_lock_class);
143 if (count < 100) {
144 count++;
145 goto retry;
146 }
147 }
148
149 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
150 t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
151 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
152
153 if (t) {
154 taskq_init_ent(t);
155 tq->tq_nalloc++;
156 }
157
158 return (t);
159 }
160
161 /*
162 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
163 * to already be removed from the free, work, or pending taskq lists.
164 */
165 static void
166 task_free(taskq_t *tq, taskq_ent_t *t)
167 {
168 ASSERT(tq);
169 ASSERT(t);
170 ASSERT(list_empty(&t->tqent_list));
171 ASSERT(!timer_pending(&t->tqent_timer));
172
173 kmem_free(t, sizeof (taskq_ent_t));
174 tq->tq_nalloc--;
175 }
176
177 /*
178 * NOTE: Must be called with tq->tq_lock held, either destroys the
179 * taskq_ent_t if too many exist or moves it to the free list for later use.
180 */
181 static void
182 task_done(taskq_t *tq, taskq_ent_t *t)
183 {
184 ASSERT(tq);
185 ASSERT(t);
186
187 /* Wake tasks blocked in taskq_wait_id() */
188 wake_up_all(&t->tqent_waitq);
189
190 list_del_init(&t->tqent_list);
191
192 if (tq->tq_nalloc <= tq->tq_minalloc) {
193 t->tqent_id = TASKQID_INVALID;
194 t->tqent_func = NULL;
195 t->tqent_arg = NULL;
196 t->tqent_flags = 0;
197
198 list_add_tail(&t->tqent_list, &tq->tq_free_list);
199 } else {
200 task_free(tq, t);
201 }
202 }
203
204 /*
205 * When a delayed task timer expires remove it from the delay list and
206 * add it to the priority list in order for immediate processing.
207 */
208 static void
209 task_expire_impl(taskq_ent_t *t)
210 {
211 taskq_ent_t *w;
212 taskq_t *tq = t->tqent_taskq;
213 struct list_head *l;
214 unsigned long flags;
215
216 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
217
218 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
219 ASSERT(list_empty(&t->tqent_list));
220 spin_unlock_irqrestore(&tq->tq_lock, flags);
221 return;
222 }
223
224 t->tqent_birth = jiffies;
225 /*
226 * The priority list must be maintained in strict task id order
227 * from lowest to highest for lowest_id to be easily calculable.
228 */
229 list_del(&t->tqent_list);
230 list_for_each_prev(l, &tq->tq_prio_list) {
231 w = list_entry(l, taskq_ent_t, tqent_list);
232 if (w->tqent_id < t->tqent_id) {
233 list_add(&t->tqent_list, l);
234 break;
235 }
236 }
237 if (l == &tq->tq_prio_list)
238 list_add(&t->tqent_list, &tq->tq_prio_list);
239
240 spin_unlock_irqrestore(&tq->tq_lock, flags);
241
242 wake_up(&tq->tq_work_waitq);
243 }
244
245 #ifdef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
246 static void
247 task_expire(struct timer_list *tl)
248 {
249 taskq_ent_t *t = from_timer(t, tl, tqent_timer);
250 task_expire_impl(t);
251 }
252 #else
253 static void
254 task_expire(unsigned long data)
255 {
256 task_expire_impl((taskq_ent_t *)data);
257 }
258 #endif
259
260 /*
261 * Returns the lowest incomplete taskqid_t. The taskqid_t may
262 * be queued on the pending list, on the priority list, on the
263 * delay list, or on the work list currently being handled, but
264 * it is not 100% complete yet.
265 */
266 static taskqid_t
267 taskq_lowest_id(taskq_t *tq)
268 {
269 taskqid_t lowest_id = tq->tq_next_id;
270 taskq_ent_t *t;
271 taskq_thread_t *tqt;
272
273 ASSERT(tq);
274
275 if (!list_empty(&tq->tq_pend_list)) {
276 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
277 lowest_id = MIN(lowest_id, t->tqent_id);
278 }
279
280 if (!list_empty(&tq->tq_prio_list)) {
281 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
282 lowest_id = MIN(lowest_id, t->tqent_id);
283 }
284
285 if (!list_empty(&tq->tq_delay_list)) {
286 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
287 lowest_id = MIN(lowest_id, t->tqent_id);
288 }
289
290 if (!list_empty(&tq->tq_active_list)) {
291 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
292 tqt_active_list);
293 ASSERT(tqt->tqt_id != TASKQID_INVALID);
294 lowest_id = MIN(lowest_id, tqt->tqt_id);
295 }
296
297 return (lowest_id);
298 }
299
300 /*
301 * Insert a task into a list keeping the list sorted by increasing taskqid.
302 */
303 static void
304 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
305 {
306 taskq_thread_t *w;
307 struct list_head *l;
308
309 ASSERT(tq);
310 ASSERT(tqt);
311
312 list_for_each_prev(l, &tq->tq_active_list) {
313 w = list_entry(l, taskq_thread_t, tqt_active_list);
314 if (w->tqt_id < tqt->tqt_id) {
315 list_add(&tqt->tqt_active_list, l);
316 break;
317 }
318 }
319 if (l == &tq->tq_active_list)
320 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
321 }
322
323 /*
324 * Find and return a task from the given list if it exists. The list
325 * must be in lowest to highest task id order.
326 */
327 static taskq_ent_t *
328 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
329 {
330 struct list_head *l;
331 taskq_ent_t *t;
332
333 list_for_each(l, lh) {
334 t = list_entry(l, taskq_ent_t, tqent_list);
335
336 if (t->tqent_id == id)
337 return (t);
338
339 if (t->tqent_id > id)
340 break;
341 }
342
343 return (NULL);
344 }
345
346 /*
347 * Find an already dispatched task given the task id regardless of what
348 * state it is in. If a task is still pending it will be returned.
349 * If a task is executing, then -EBUSY will be returned instead.
350 * If the task has already been run then NULL is returned.
351 */
352 static taskq_ent_t *
353 taskq_find(taskq_t *tq, taskqid_t id)
354 {
355 taskq_thread_t *tqt;
356 struct list_head *l;
357 taskq_ent_t *t;
358
359 t = taskq_find_list(tq, &tq->tq_delay_list, id);
360 if (t)
361 return (t);
362
363 t = taskq_find_list(tq, &tq->tq_prio_list, id);
364 if (t)
365 return (t);
366
367 t = taskq_find_list(tq, &tq->tq_pend_list, id);
368 if (t)
369 return (t);
370
371 list_for_each(l, &tq->tq_active_list) {
372 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
373 if (tqt->tqt_id == id) {
374 /*
375 * Instead of returning tqt_task, we just return a non
376 * NULL value to prevent misuse, since tqt_task only
377 * has two valid fields.
378 */
379 return (ERR_PTR(-EBUSY));
380 }
381 }
382
383 return (NULL);
384 }
385
386 /*
387 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
388 * taskq_wait() functions below.
389 *
390 * Taskq waiting is accomplished by tracking the lowest outstanding task
391 * id and the next available task id. As tasks are dispatched they are
392 * added to the tail of the pending, priority, or delay lists. As worker
393 * threads become available the tasks are removed from the heads of these
394 * lists and linked to the worker threads. This ensures the lists are
395 * kept sorted by lowest to highest task id.
396 *
397 * Therefore the lowest outstanding task id can be quickly determined by
398 * checking the head item from all of these lists. This value is stored
399 * with the taskq as the lowest id. It only needs to be recalculated when
400 * either the task with the current lowest id completes or is canceled.
401 *
402 * By blocking until the lowest task id exceeds the passed task id the
403 * taskq_wait_outstanding() function can be easily implemented. Similarly,
404 * by blocking until the lowest task id matches the next task id taskq_wait()
405 * can be implemented.
406 *
407 * Callers should be aware that when there are multiple worked threads it
408 * is possible for larger task ids to complete before smaller ones. Also
409 * when the taskq contains delay tasks with small task ids callers may
410 * block for a considerable length of time waiting for them to expire and
411 * execute.
412 */
413 static int
414 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
415 {
416 int rc;
417 unsigned long flags;
418
419 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
420 rc = (taskq_find(tq, id) == NULL);
421 spin_unlock_irqrestore(&tq->tq_lock, flags);
422
423 return (rc);
424 }
425
426 /*
427 * The taskq_wait_id() function blocks until the passed task id completes.
428 * This does not guarantee that all lower task ids have completed.
429 */
430 void
431 taskq_wait_id(taskq_t *tq, taskqid_t id)
432 {
433 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
434 }
435 EXPORT_SYMBOL(taskq_wait_id);
436
437 static int
438 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
439 {
440 int rc;
441 unsigned long flags;
442
443 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
444 rc = (id < tq->tq_lowest_id);
445 spin_unlock_irqrestore(&tq->tq_lock, flags);
446
447 return (rc);
448 }
449
450 /*
451 * The taskq_wait_outstanding() function will block until all tasks with a
452 * lower taskqid than the passed 'id' have been completed. Note that all
453 * task id's are assigned monotonically at dispatch time. Zero may be
454 * passed for the id to indicate all tasks dispatch up to this point,
455 * but not after, should be waited for.
456 */
457 void
458 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
459 {
460 id = id ? id : tq->tq_next_id - 1;
461 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
462 }
463 EXPORT_SYMBOL(taskq_wait_outstanding);
464
465 static int
466 taskq_wait_check(taskq_t *tq)
467 {
468 int rc;
469 unsigned long flags;
470
471 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
472 rc = (tq->tq_lowest_id == tq->tq_next_id);
473 spin_unlock_irqrestore(&tq->tq_lock, flags);
474
475 return (rc);
476 }
477
478 /*
479 * The taskq_wait() function will block until the taskq is empty.
480 * This means that if a taskq re-dispatches work to itself taskq_wait()
481 * callers will block indefinitely.
482 */
483 void
484 taskq_wait(taskq_t *tq)
485 {
486 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
487 }
488 EXPORT_SYMBOL(taskq_wait);
489
490 int
491 taskq_member(taskq_t *tq, kthread_t *t)
492 {
493 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
494 }
495 EXPORT_SYMBOL(taskq_member);
496
497 /*
498 * Cancel an already dispatched task given the task id. Still pending tasks
499 * will be immediately canceled, and if the task is active the function will
500 * block until it completes. Preallocated tasks which are canceled must be
501 * freed by the caller.
502 */
503 int
504 taskq_cancel_id(taskq_t *tq, taskqid_t id)
505 {
506 taskq_ent_t *t;
507 int rc = ENOENT;
508 unsigned long flags;
509
510 ASSERT(tq);
511
512 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
513 t = taskq_find(tq, id);
514 if (t && t != ERR_PTR(-EBUSY)) {
515 list_del_init(&t->tqent_list);
516 t->tqent_flags |= TQENT_FLAG_CANCEL;
517
518 /*
519 * When canceling the lowest outstanding task id we
520 * must recalculate the new lowest outstanding id.
521 */
522 if (tq->tq_lowest_id == t->tqent_id) {
523 tq->tq_lowest_id = taskq_lowest_id(tq);
524 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
525 }
526
527 /*
528 * The task_expire() function takes the tq->tq_lock so drop
529 * drop the lock before synchronously cancelling the timer.
530 */
531 if (timer_pending(&t->tqent_timer)) {
532 spin_unlock_irqrestore(&tq->tq_lock, flags);
533 del_timer_sync(&t->tqent_timer);
534 spin_lock_irqsave_nested(&tq->tq_lock, flags,
535 tq->tq_lock_class);
536 }
537
538 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
539 task_done(tq, t);
540
541 rc = 0;
542 }
543 spin_unlock_irqrestore(&tq->tq_lock, flags);
544
545 if (t == ERR_PTR(-EBUSY)) {
546 taskq_wait_id(tq, id);
547 rc = EBUSY;
548 }
549
550 return (rc);
551 }
552 EXPORT_SYMBOL(taskq_cancel_id);
553
554 static int taskq_thread_spawn(taskq_t *tq);
555
556 taskqid_t
557 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
558 {
559 taskq_ent_t *t;
560 taskqid_t rc = TASKQID_INVALID;
561 unsigned long irqflags;
562
563 ASSERT(tq);
564 ASSERT(func);
565
566 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
567
568 /* Taskq being destroyed and all tasks drained */
569 if (!(tq->tq_flags & TASKQ_ACTIVE))
570 goto out;
571
572 /* Do not queue the task unless there is idle thread for it */
573 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
574 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
575 /* Dynamic taskq may be able to spawn another thread */
576 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
577 taskq_thread_spawn(tq) == 0)
578 goto out;
579 }
580
581 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
582 goto out;
583
584 spin_lock(&t->tqent_lock);
585
586 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
587 if (flags & TQ_NOQUEUE)
588 list_add(&t->tqent_list, &tq->tq_prio_list);
589 /* Queue to the priority list instead of the pending list */
590 else if (flags & TQ_FRONT)
591 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
592 else
593 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
594
595 t->tqent_id = rc = tq->tq_next_id;
596 tq->tq_next_id++;
597 t->tqent_func = func;
598 t->tqent_arg = arg;
599 t->tqent_taskq = tq;
600 #ifndef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
601 t->tqent_timer.data = 0;
602 #endif
603 t->tqent_timer.function = NULL;
604 t->tqent_timer.expires = 0;
605 t->tqent_birth = jiffies;
606
607 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
608
609 spin_unlock(&t->tqent_lock);
610
611 wake_up(&tq->tq_work_waitq);
612 out:
613 /* Spawn additional taskq threads if required. */
614 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
615 (void) taskq_thread_spawn(tq);
616
617 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
618 return (rc);
619 }
620 EXPORT_SYMBOL(taskq_dispatch);
621
622 taskqid_t
623 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
624 uint_t flags, clock_t expire_time)
625 {
626 taskqid_t rc = TASKQID_INVALID;
627 taskq_ent_t *t;
628 unsigned long irqflags;
629
630 ASSERT(tq);
631 ASSERT(func);
632
633 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
634
635 /* Taskq being destroyed and all tasks drained */
636 if (!(tq->tq_flags & TASKQ_ACTIVE))
637 goto out;
638
639 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
640 goto out;
641
642 spin_lock(&t->tqent_lock);
643
644 /* Queue to the delay list for subsequent execution */
645 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
646
647 t->tqent_id = rc = tq->tq_next_id;
648 tq->tq_next_id++;
649 t->tqent_func = func;
650 t->tqent_arg = arg;
651 t->tqent_taskq = tq;
652 #ifndef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
653 t->tqent_timer.data = (unsigned long)t;
654 #endif
655 t->tqent_timer.function = task_expire;
656 t->tqent_timer.expires = (unsigned long)expire_time;
657 add_timer(&t->tqent_timer);
658
659 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
660
661 spin_unlock(&t->tqent_lock);
662 out:
663 /* Spawn additional taskq threads if required. */
664 if (tq->tq_nactive == tq->tq_nthreads)
665 (void) taskq_thread_spawn(tq);
666 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
667 return (rc);
668 }
669 EXPORT_SYMBOL(taskq_dispatch_delay);
670
671 void
672 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
673 taskq_ent_t *t)
674 {
675 unsigned long irqflags;
676 ASSERT(tq);
677 ASSERT(func);
678
679 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
680 tq->tq_lock_class);
681
682 /* Taskq being destroyed and all tasks drained */
683 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
684 t->tqent_id = TASKQID_INVALID;
685 goto out;
686 }
687
688 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
689 /* Dynamic taskq may be able to spawn another thread */
690 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
691 taskq_thread_spawn(tq) == 0)
692 goto out2;
693 flags |= TQ_FRONT;
694 }
695
696 spin_lock(&t->tqent_lock);
697
698 /*
699 * Make sure the entry is not on some other taskq; it is important to
700 * ASSERT() under lock
701 */
702 ASSERT(taskq_empty_ent(t));
703
704 /*
705 * Mark it as a prealloc'd task. This is important
706 * to ensure that we don't free it later.
707 */
708 t->tqent_flags |= TQENT_FLAG_PREALLOC;
709
710 /* Queue to the priority list instead of the pending list */
711 if (flags & TQ_FRONT)
712 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
713 else
714 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
715
716 t->tqent_id = tq->tq_next_id;
717 tq->tq_next_id++;
718 t->tqent_func = func;
719 t->tqent_arg = arg;
720 t->tqent_taskq = tq;
721 t->tqent_birth = jiffies;
722
723 spin_unlock(&t->tqent_lock);
724
725 wake_up(&tq->tq_work_waitq);
726 out:
727 /* Spawn additional taskq threads if required. */
728 if (tq->tq_nactive == tq->tq_nthreads)
729 (void) taskq_thread_spawn(tq);
730 out2:
731 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
732 }
733 EXPORT_SYMBOL(taskq_dispatch_ent);
734
735 int
736 taskq_empty_ent(taskq_ent_t *t)
737 {
738 return (list_empty(&t->tqent_list));
739 }
740 EXPORT_SYMBOL(taskq_empty_ent);
741
742 void
743 taskq_init_ent(taskq_ent_t *t)
744 {
745 spin_lock_init(&t->tqent_lock);
746 init_waitqueue_head(&t->tqent_waitq);
747 #ifdef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST
748 timer_setup(&t->tqent_timer, NULL, 0);
749 #else
750 init_timer(&t->tqent_timer);
751 #endif
752 INIT_LIST_HEAD(&t->tqent_list);
753 t->tqent_id = 0;
754 t->tqent_func = NULL;
755 t->tqent_arg = NULL;
756 t->tqent_flags = 0;
757 t->tqent_taskq = NULL;
758 }
759 EXPORT_SYMBOL(taskq_init_ent);
760
761 /*
762 * Return the next pending task, preference is given to tasks on the
763 * priority list which were dispatched with TQ_FRONT.
764 */
765 static taskq_ent_t *
766 taskq_next_ent(taskq_t *tq)
767 {
768 struct list_head *list;
769
770 if (!list_empty(&tq->tq_prio_list))
771 list = &tq->tq_prio_list;
772 else if (!list_empty(&tq->tq_pend_list))
773 list = &tq->tq_pend_list;
774 else
775 return (NULL);
776
777 return (list_entry(list->next, taskq_ent_t, tqent_list));
778 }
779
780 /*
781 * Spawns a new thread for the specified taskq.
782 */
783 static void
784 taskq_thread_spawn_task(void *arg)
785 {
786 taskq_t *tq = (taskq_t *)arg;
787 unsigned long flags;
788
789 if (taskq_thread_create(tq) == NULL) {
790 /* restore spawning count if failed */
791 spin_lock_irqsave_nested(&tq->tq_lock, flags,
792 tq->tq_lock_class);
793 tq->tq_nspawn--;
794 spin_unlock_irqrestore(&tq->tq_lock, flags);
795 }
796 }
797
798 /*
799 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
800 * number of threads is insufficient to handle the pending tasks. These
801 * new threads must be created by the dedicated dynamic_taskq to avoid
802 * deadlocks between thread creation and memory reclaim. The system_taskq
803 * which is also a dynamic taskq cannot be safely used for this.
804 */
805 static int
806 taskq_thread_spawn(taskq_t *tq)
807 {
808 int spawning = 0;
809
810 if (!(tq->tq_flags & TASKQ_DYNAMIC))
811 return (0);
812
813 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
814 (tq->tq_flags & TASKQ_ACTIVE)) {
815 spawning = (++tq->tq_nspawn);
816 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
817 tq, TQ_NOSLEEP);
818 }
819
820 return (spawning);
821 }
822
823 /*
824 * Threads in a dynamic taskq should only exit once it has been completely
825 * drained and no other threads are actively servicing tasks. This prevents
826 * threads from being created and destroyed more than is required.
827 *
828 * The first thread is the thread list is treated as the primary thread.
829 * There is nothing special about the primary thread but in order to avoid
830 * all the taskq pids from changing we opt to make it long running.
831 */
832 static int
833 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
834 {
835 if (!(tq->tq_flags & TASKQ_DYNAMIC))
836 return (0);
837
838 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
839 tqt_thread_list) == tqt)
840 return (0);
841
842 return
843 ((tq->tq_nspawn == 0) && /* No threads are being spawned */
844 (tq->tq_nactive == 0) && /* No threads are handling tasks */
845 (tq->tq_nthreads > 1) && /* More than 1 thread is running */
846 (!taskq_next_ent(tq)) && /* There are no pending tasks */
847 (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
848 }
849
850 static int
851 taskq_thread(void *args)
852 {
853 DECLARE_WAITQUEUE(wait, current);
854 sigset_t blocked;
855 taskq_thread_t *tqt = args;
856 taskq_t *tq;
857 taskq_ent_t *t;
858 int seq_tasks = 0;
859 unsigned long flags;
860 taskq_ent_t dup_task = {};
861
862 ASSERT(tqt);
863 ASSERT(tqt->tqt_tq);
864 tq = tqt->tqt_tq;
865 current->flags |= PF_NOFREEZE;
866
867 (void) spl_fstrans_mark();
868
869 sigfillset(&blocked);
870 sigprocmask(SIG_BLOCK, &blocked, NULL);
871 flush_signals(current);
872
873 tsd_set(taskq_tsd, tq);
874 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
875 /*
876 * If we are dynamically spawned, decrease spawning count. Note that
877 * we could be created during taskq_create, in which case we shouldn't
878 * do the decrement. But it's fine because taskq_create will reset
879 * tq_nspawn later.
880 */
881 if (tq->tq_flags & TASKQ_DYNAMIC)
882 tq->tq_nspawn--;
883
884 /* Immediately exit if more threads than allowed were created. */
885 if (tq->tq_nthreads >= tq->tq_maxthreads)
886 goto error;
887
888 tq->tq_nthreads++;
889 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
890 wake_up(&tq->tq_wait_waitq);
891 set_current_state(TASK_INTERRUPTIBLE);
892
893 while (!kthread_should_stop()) {
894
895 if (list_empty(&tq->tq_pend_list) &&
896 list_empty(&tq->tq_prio_list)) {
897
898 if (taskq_thread_should_stop(tq, tqt)) {
899 wake_up_all(&tq->tq_wait_waitq);
900 break;
901 }
902
903 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
904 spin_unlock_irqrestore(&tq->tq_lock, flags);
905
906 schedule();
907 seq_tasks = 0;
908
909 spin_lock_irqsave_nested(&tq->tq_lock, flags,
910 tq->tq_lock_class);
911 remove_wait_queue(&tq->tq_work_waitq, &wait);
912 } else {
913 __set_current_state(TASK_RUNNING);
914 }
915
916 if ((t = taskq_next_ent(tq)) != NULL) {
917 list_del_init(&t->tqent_list);
918
919 /*
920 * A TQENT_FLAG_PREALLOC task may be reused or freed
921 * during the task function call. Store tqent_id and
922 * tqent_flags here.
923 *
924 * Also use an on stack taskq_ent_t for tqt_task
925 * assignment in this case. We only populate the two
926 * fields used by the only user in taskq proc file.
927 */
928 tqt->tqt_id = t->tqent_id;
929 tqt->tqt_flags = t->tqent_flags;
930
931 if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
932 dup_task.tqent_func = t->tqent_func;
933 dup_task.tqent_arg = t->tqent_arg;
934 t = &dup_task;
935 }
936 tqt->tqt_task = t;
937
938 taskq_insert_in_order(tq, tqt);
939 tq->tq_nactive++;
940 spin_unlock_irqrestore(&tq->tq_lock, flags);
941
942 /* Perform the requested task */
943 t->tqent_func(t->tqent_arg);
944
945 spin_lock_irqsave_nested(&tq->tq_lock, flags,
946 tq->tq_lock_class);
947 tq->tq_nactive--;
948 list_del_init(&tqt->tqt_active_list);
949 tqt->tqt_task = NULL;
950
951 /* For prealloc'd tasks, we don't free anything. */
952 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
953 task_done(tq, t);
954
955 /*
956 * When the current lowest outstanding taskqid is
957 * done calculate the new lowest outstanding id
958 */
959 if (tq->tq_lowest_id == tqt->tqt_id) {
960 tq->tq_lowest_id = taskq_lowest_id(tq);
961 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
962 }
963
964 /* Spawn additional taskq threads if required. */
965 if ((++seq_tasks) > spl_taskq_thread_sequential &&
966 taskq_thread_spawn(tq))
967 seq_tasks = 0;
968
969 tqt->tqt_id = TASKQID_INVALID;
970 tqt->tqt_flags = 0;
971 wake_up_all(&tq->tq_wait_waitq);
972 } else {
973 if (taskq_thread_should_stop(tq, tqt))
974 break;
975 }
976
977 set_current_state(TASK_INTERRUPTIBLE);
978
979 }
980
981 __set_current_state(TASK_RUNNING);
982 tq->tq_nthreads--;
983 list_del_init(&tqt->tqt_thread_list);
984 error:
985 kmem_free(tqt, sizeof (taskq_thread_t));
986 spin_unlock_irqrestore(&tq->tq_lock, flags);
987
988 tsd_set(taskq_tsd, NULL);
989
990 return (0);
991 }
992
993 static taskq_thread_t *
994 taskq_thread_create(taskq_t *tq)
995 {
996 static int last_used_cpu = 0;
997 taskq_thread_t *tqt;
998
999 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
1000 INIT_LIST_HEAD(&tqt->tqt_thread_list);
1001 INIT_LIST_HEAD(&tqt->tqt_active_list);
1002 tqt->tqt_tq = tq;
1003 tqt->tqt_id = TASKQID_INVALID;
1004
1005 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
1006 "%s", tq->tq_name);
1007 if (tqt->tqt_thread == NULL) {
1008 kmem_free(tqt, sizeof (taskq_thread_t));
1009 return (NULL);
1010 }
1011
1012 if (spl_taskq_thread_bind) {
1013 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
1014 kthread_bind(tqt->tqt_thread, last_used_cpu);
1015 }
1016
1017 if (spl_taskq_thread_priority)
1018 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
1019
1020 wake_up_process(tqt->tqt_thread);
1021
1022 return (tqt);
1023 }
1024
1025 taskq_t *
1026 taskq_create(const char *name, int nthreads, pri_t pri,
1027 int minalloc, int maxalloc, uint_t flags)
1028 {
1029 taskq_t *tq;
1030 taskq_thread_t *tqt;
1031 int count = 0, rc = 0, i;
1032 unsigned long irqflags;
1033
1034 ASSERT(name != NULL);
1035 ASSERT(minalloc >= 0);
1036 ASSERT(maxalloc <= INT_MAX);
1037 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
1038
1039 /* Scale the number of threads using nthreads as a percentage */
1040 if (flags & TASKQ_THREADS_CPU_PCT) {
1041 ASSERT(nthreads <= 100);
1042 ASSERT(nthreads >= 0);
1043 nthreads = MIN(nthreads, 100);
1044 nthreads = MAX(nthreads, 0);
1045 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
1046 }
1047
1048 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1049 if (tq == NULL)
1050 return (NULL);
1051
1052 spin_lock_init(&tq->tq_lock);
1053 INIT_LIST_HEAD(&tq->tq_thread_list);
1054 INIT_LIST_HEAD(&tq->tq_active_list);
1055 tq->tq_name = strdup(name);
1056 tq->tq_nactive = 0;
1057 tq->tq_nthreads = 0;
1058 tq->tq_nspawn = 0;
1059 tq->tq_maxthreads = nthreads;
1060 tq->tq_pri = pri;
1061 tq->tq_minalloc = minalloc;
1062 tq->tq_maxalloc = maxalloc;
1063 tq->tq_nalloc = 0;
1064 tq->tq_flags = (flags | TASKQ_ACTIVE);
1065 tq->tq_next_id = TASKQID_INITIAL;
1066 tq->tq_lowest_id = TASKQID_INITIAL;
1067 INIT_LIST_HEAD(&tq->tq_free_list);
1068 INIT_LIST_HEAD(&tq->tq_pend_list);
1069 INIT_LIST_HEAD(&tq->tq_prio_list);
1070 INIT_LIST_HEAD(&tq->tq_delay_list);
1071 init_waitqueue_head(&tq->tq_work_waitq);
1072 init_waitqueue_head(&tq->tq_wait_waitq);
1073 tq->tq_lock_class = TQ_LOCK_GENERAL;
1074 INIT_LIST_HEAD(&tq->tq_taskqs);
1075
1076 if (flags & TASKQ_PREPOPULATE) {
1077 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1078 tq->tq_lock_class);
1079
1080 for (i = 0; i < minalloc; i++)
1081 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1082 &irqflags));
1083
1084 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1085 }
1086
1087 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1088 nthreads = 1;
1089
1090 for (i = 0; i < nthreads; i++) {
1091 tqt = taskq_thread_create(tq);
1092 if (tqt == NULL)
1093 rc = 1;
1094 else
1095 count++;
1096 }
1097
1098 /* Wait for all threads to be started before potential destroy */
1099 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1100 /*
1101 * taskq_thread might have touched nspawn, but we don't want them to
1102 * because they're not dynamically spawned. So we reset it to 0
1103 */
1104 tq->tq_nspawn = 0;
1105
1106 if (rc) {
1107 taskq_destroy(tq);
1108 tq = NULL;
1109 } else {
1110 down_write(&tq_list_sem);
1111 tq->tq_instance = taskq_find_by_name(name) + 1;
1112 list_add_tail(&tq->tq_taskqs, &tq_list);
1113 up_write(&tq_list_sem);
1114 }
1115
1116 return (tq);
1117 }
1118 EXPORT_SYMBOL(taskq_create);
1119
1120 void
1121 taskq_destroy(taskq_t *tq)
1122 {
1123 struct task_struct *thread;
1124 taskq_thread_t *tqt;
1125 taskq_ent_t *t;
1126 unsigned long flags;
1127
1128 ASSERT(tq);
1129 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1130 tq->tq_flags &= ~TASKQ_ACTIVE;
1131 spin_unlock_irqrestore(&tq->tq_lock, flags);
1132
1133 /*
1134 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1135 * new worker threads be spawned for dynamic taskq.
1136 */
1137 if (dynamic_taskq != NULL)
1138 taskq_wait_outstanding(dynamic_taskq, 0);
1139
1140 taskq_wait(tq);
1141
1142 /* remove taskq from global list used by the kstats */
1143 down_write(&tq_list_sem);
1144 list_del(&tq->tq_taskqs);
1145 up_write(&tq_list_sem);
1146
1147 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1148 /* wait for spawning threads to insert themselves to the list */
1149 while (tq->tq_nspawn) {
1150 spin_unlock_irqrestore(&tq->tq_lock, flags);
1151 schedule_timeout_interruptible(1);
1152 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1153 tq->tq_lock_class);
1154 }
1155
1156 /*
1157 * Signal each thread to exit and block until it does. Each thread
1158 * is responsible for removing itself from the list and freeing its
1159 * taskq_thread_t. This allows for idle threads to opt to remove
1160 * themselves from the taskq. They can be recreated as needed.
1161 */
1162 while (!list_empty(&tq->tq_thread_list)) {
1163 tqt = list_entry(tq->tq_thread_list.next,
1164 taskq_thread_t, tqt_thread_list);
1165 thread = tqt->tqt_thread;
1166 spin_unlock_irqrestore(&tq->tq_lock, flags);
1167
1168 kthread_stop(thread);
1169
1170 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1171 tq->tq_lock_class);
1172 }
1173
1174 while (!list_empty(&tq->tq_free_list)) {
1175 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1176
1177 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1178
1179 list_del_init(&t->tqent_list);
1180 task_free(tq, t);
1181 }
1182
1183 ASSERT0(tq->tq_nthreads);
1184 ASSERT0(tq->tq_nalloc);
1185 ASSERT0(tq->tq_nspawn);
1186 ASSERT(list_empty(&tq->tq_thread_list));
1187 ASSERT(list_empty(&tq->tq_active_list));
1188 ASSERT(list_empty(&tq->tq_free_list));
1189 ASSERT(list_empty(&tq->tq_pend_list));
1190 ASSERT(list_empty(&tq->tq_prio_list));
1191 ASSERT(list_empty(&tq->tq_delay_list));
1192
1193 spin_unlock_irqrestore(&tq->tq_lock, flags);
1194
1195 strfree(tq->tq_name);
1196 kmem_free(tq, sizeof (taskq_t));
1197 }
1198 EXPORT_SYMBOL(taskq_destroy);
1199
1200
1201 static unsigned int spl_taskq_kick = 0;
1202
1203 /*
1204 * 2.6.36 API Change
1205 * module_param_cb is introduced to take kernel_param_ops and
1206 * module_param_call is marked as obsolete. Also set and get operations
1207 * were changed to take a 'const struct kernel_param *'.
1208 */
1209 static int
1210 #ifdef module_param_cb
1211 param_set_taskq_kick(const char *val, const struct kernel_param *kp)
1212 #else
1213 param_set_taskq_kick(const char *val, struct kernel_param *kp)
1214 #endif
1215 {
1216 int ret;
1217 taskq_t *tq;
1218 taskq_ent_t *t;
1219 unsigned long flags;
1220
1221 ret = param_set_uint(val, kp);
1222 if (ret < 0 || !spl_taskq_kick)
1223 return (ret);
1224 /* reset value */
1225 spl_taskq_kick = 0;
1226
1227 down_read(&tq_list_sem);
1228 list_for_each_entry(tq, &tq_list, tq_taskqs) {
1229 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1230 tq->tq_lock_class);
1231 /* Check if the first pending is older than 5 seconds */
1232 t = taskq_next_ent(tq);
1233 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
1234 (void) taskq_thread_spawn(tq);
1235 printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
1236 tq->tq_name, tq->tq_instance);
1237 }
1238 spin_unlock_irqrestore(&tq->tq_lock, flags);
1239 }
1240 up_read(&tq_list_sem);
1241 return (ret);
1242 }
1243
1244 #ifdef module_param_cb
1245 static const struct kernel_param_ops param_ops_taskq_kick = {
1246 .set = param_set_taskq_kick,
1247 .get = param_get_uint,
1248 };
1249 module_param_cb(spl_taskq_kick, &param_ops_taskq_kick, &spl_taskq_kick, 0644);
1250 #else
1251 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
1252 &spl_taskq_kick, 0644);
1253 #endif
1254 MODULE_PARM_DESC(spl_taskq_kick,
1255 "Write nonzero to kick stuck taskqs to spawn more threads");
1256
1257 int
1258 spl_taskq_init(void)
1259 {
1260 tsd_create(&taskq_tsd, NULL);
1261
1262 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1263 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1264 if (system_taskq == NULL)
1265 return (1);
1266
1267 system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
1268 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1269 if (system_delay_taskq == NULL) {
1270 taskq_destroy(system_taskq);
1271 return (1);
1272 }
1273
1274 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1275 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1276 if (dynamic_taskq == NULL) {
1277 taskq_destroy(system_taskq);
1278 taskq_destroy(system_delay_taskq);
1279 return (1);
1280 }
1281
1282 /*
1283 * This is used to annotate tq_lock, so
1284 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1285 * does not trigger a lockdep warning re: possible recursive locking
1286 */
1287 dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
1288
1289 return (0);
1290 }
1291
1292 void
1293 spl_taskq_fini(void)
1294 {
1295 taskq_destroy(dynamic_taskq);
1296 dynamic_taskq = NULL;
1297
1298 taskq_destroy(system_delay_taskq);
1299 system_delay_taskq = NULL;
1300
1301 taskq_destroy(system_taskq);
1302 system_taskq = NULL;
1303
1304 tsd_destroy(&taskq_tsd);
1305 }