]> git.proxmox.com Git - mirror_zfs.git/blob - module/spl/spl-taskq.c
Fix noop receive of raw send stream
[mirror_zfs.git] / module / spl / spl-taskq.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 */
26
27 #include <sys/timer.h>
28 #include <sys/taskq.h>
29 #include <sys/kmem.h>
30 #include <sys/tsd.h>
31 #include <sys/simd.h>
32
33 int spl_taskq_thread_bind = 0;
34 module_param(spl_taskq_thread_bind, int, 0644);
35 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
36
37
38 int spl_taskq_thread_dynamic = 1;
39 module_param(spl_taskq_thread_dynamic, int, 0644);
40 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
41
42 int spl_taskq_thread_priority = 1;
43 module_param(spl_taskq_thread_priority, int, 0644);
44 MODULE_PARM_DESC(spl_taskq_thread_priority,
45 "Allow non-default priority for taskq threads");
46
47 int spl_taskq_thread_sequential = 4;
48 module_param(spl_taskq_thread_sequential, int, 0644);
49 MODULE_PARM_DESC(spl_taskq_thread_sequential,
50 "Create new taskq threads after N sequential tasks");
51
52 /* Global system-wide dynamic task queue available for all consumers */
53 taskq_t *system_taskq;
54 EXPORT_SYMBOL(system_taskq);
55 /* Global dynamic task queue for long delay */
56 taskq_t *system_delay_taskq;
57 EXPORT_SYMBOL(system_delay_taskq);
58
59 /* Private dedicated taskq for creating new taskq threads on demand. */
60 static taskq_t *dynamic_taskq;
61 static taskq_thread_t *taskq_thread_create(taskq_t *);
62
63 /* List of all taskqs */
64 LIST_HEAD(tq_list);
65 struct rw_semaphore tq_list_sem;
66 static uint_t taskq_tsd;
67
68 static int
69 task_km_flags(uint_t flags)
70 {
71 if (flags & TQ_NOSLEEP)
72 return (KM_NOSLEEP);
73
74 if (flags & TQ_PUSHPAGE)
75 return (KM_PUSHPAGE);
76
77 return (KM_SLEEP);
78 }
79
80 /*
81 * taskq_find_by_name - Find the largest instance number of a named taskq.
82 */
83 static int
84 taskq_find_by_name(const char *name)
85 {
86 struct list_head *tql;
87 taskq_t *tq;
88
89 list_for_each_prev(tql, &tq_list) {
90 tq = list_entry(tql, taskq_t, tq_taskqs);
91 if (strcmp(name, tq->tq_name) == 0)
92 return (tq->tq_instance);
93 }
94 return (-1);
95 }
96
97 /*
98 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
99 * is not attached to the free, work, or pending taskq lists.
100 */
101 static taskq_ent_t *
102 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
103 {
104 taskq_ent_t *t;
105 int count = 0;
106
107 ASSERT(tq);
108 retry:
109 /* Acquire taskq_ent_t's from free list if available */
110 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
111 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
112
113 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
114 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
115 ASSERT(!timer_pending(&t->tqent_timer));
116
117 list_del_init(&t->tqent_list);
118 return (t);
119 }
120
121 /* Free list is empty and memory allocations are prohibited */
122 if (flags & TQ_NOALLOC)
123 return (NULL);
124
125 /* Hit maximum taskq_ent_t pool size */
126 if (tq->tq_nalloc >= tq->tq_maxalloc) {
127 if (flags & TQ_NOSLEEP)
128 return (NULL);
129
130 /*
131 * Sleep periodically polling the free list for an available
132 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
133 * but we cannot block forever waiting for an taskq_ent_t to
134 * show up in the free list, otherwise a deadlock can happen.
135 *
136 * Therefore, we need to allocate a new task even if the number
137 * of allocated tasks is above tq->tq_maxalloc, but we still
138 * end up delaying the task allocation by one second, thereby
139 * throttling the task dispatch rate.
140 */
141 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
142 schedule_timeout(HZ / 100);
143 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
144 tq->tq_lock_class);
145 if (count < 100) {
146 count++;
147 goto retry;
148 }
149 }
150
151 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
152 t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
153 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
154
155 if (t) {
156 taskq_init_ent(t);
157 tq->tq_nalloc++;
158 }
159
160 return (t);
161 }
162
163 /*
164 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
165 * to already be removed from the free, work, or pending taskq lists.
166 */
167 static void
168 task_free(taskq_t *tq, taskq_ent_t *t)
169 {
170 ASSERT(tq);
171 ASSERT(t);
172 ASSERT(list_empty(&t->tqent_list));
173 ASSERT(!timer_pending(&t->tqent_timer));
174
175 kmem_free(t, sizeof (taskq_ent_t));
176 tq->tq_nalloc--;
177 }
178
179 /*
180 * NOTE: Must be called with tq->tq_lock held, either destroys the
181 * taskq_ent_t if too many exist or moves it to the free list for later use.
182 */
183 static void
184 task_done(taskq_t *tq, taskq_ent_t *t)
185 {
186 ASSERT(tq);
187 ASSERT(t);
188
189 /* Wake tasks blocked in taskq_wait_id() */
190 wake_up_all(&t->tqent_waitq);
191
192 list_del_init(&t->tqent_list);
193
194 if (tq->tq_nalloc <= tq->tq_minalloc) {
195 t->tqent_id = TASKQID_INVALID;
196 t->tqent_func = NULL;
197 t->tqent_arg = NULL;
198 t->tqent_flags = 0;
199
200 list_add_tail(&t->tqent_list, &tq->tq_free_list);
201 } else {
202 task_free(tq, t);
203 }
204 }
205
206 /*
207 * When a delayed task timer expires remove it from the delay list and
208 * add it to the priority list in order for immediate processing.
209 */
210 static void
211 task_expire_impl(taskq_ent_t *t)
212 {
213 taskq_ent_t *w;
214 taskq_t *tq = t->tqent_taskq;
215 struct list_head *l;
216 unsigned long flags;
217
218 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
219
220 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
221 ASSERT(list_empty(&t->tqent_list));
222 spin_unlock_irqrestore(&tq->tq_lock, flags);
223 return;
224 }
225
226 t->tqent_birth = jiffies;
227 /*
228 * The priority list must be maintained in strict task id order
229 * from lowest to highest for lowest_id to be easily calculable.
230 */
231 list_del(&t->tqent_list);
232 list_for_each_prev(l, &tq->tq_prio_list) {
233 w = list_entry(l, taskq_ent_t, tqent_list);
234 if (w->tqent_id < t->tqent_id) {
235 list_add(&t->tqent_list, l);
236 break;
237 }
238 }
239 if (l == &tq->tq_prio_list)
240 list_add(&t->tqent_list, &tq->tq_prio_list);
241
242 spin_unlock_irqrestore(&tq->tq_lock, flags);
243
244 wake_up(&tq->tq_work_waitq);
245 }
246
247 static void
248 task_expire(spl_timer_list_t tl)
249 {
250 struct timer_list *tmr = (struct timer_list *)tl;
251 taskq_ent_t *t = from_timer(t, tmr, tqent_timer);
252 task_expire_impl(t);
253 }
254
255 /*
256 * Returns the lowest incomplete taskqid_t. The taskqid_t may
257 * be queued on the pending list, on the priority list, on the
258 * delay list, or on the work list currently being handled, but
259 * it is not 100% complete yet.
260 */
261 static taskqid_t
262 taskq_lowest_id(taskq_t *tq)
263 {
264 taskqid_t lowest_id = tq->tq_next_id;
265 taskq_ent_t *t;
266 taskq_thread_t *tqt;
267
268 ASSERT(tq);
269
270 if (!list_empty(&tq->tq_pend_list)) {
271 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
272 lowest_id = MIN(lowest_id, t->tqent_id);
273 }
274
275 if (!list_empty(&tq->tq_prio_list)) {
276 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
277 lowest_id = MIN(lowest_id, t->tqent_id);
278 }
279
280 if (!list_empty(&tq->tq_delay_list)) {
281 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
282 lowest_id = MIN(lowest_id, t->tqent_id);
283 }
284
285 if (!list_empty(&tq->tq_active_list)) {
286 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
287 tqt_active_list);
288 ASSERT(tqt->tqt_id != TASKQID_INVALID);
289 lowest_id = MIN(lowest_id, tqt->tqt_id);
290 }
291
292 return (lowest_id);
293 }
294
295 /*
296 * Insert a task into a list keeping the list sorted by increasing taskqid.
297 */
298 static void
299 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
300 {
301 taskq_thread_t *w;
302 struct list_head *l;
303
304 ASSERT(tq);
305 ASSERT(tqt);
306
307 list_for_each_prev(l, &tq->tq_active_list) {
308 w = list_entry(l, taskq_thread_t, tqt_active_list);
309 if (w->tqt_id < tqt->tqt_id) {
310 list_add(&tqt->tqt_active_list, l);
311 break;
312 }
313 }
314 if (l == &tq->tq_active_list)
315 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
316 }
317
318 /*
319 * Find and return a task from the given list if it exists. The list
320 * must be in lowest to highest task id order.
321 */
322 static taskq_ent_t *
323 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
324 {
325 struct list_head *l;
326 taskq_ent_t *t;
327
328 list_for_each(l, lh) {
329 t = list_entry(l, taskq_ent_t, tqent_list);
330
331 if (t->tqent_id == id)
332 return (t);
333
334 if (t->tqent_id > id)
335 break;
336 }
337
338 return (NULL);
339 }
340
341 /*
342 * Find an already dispatched task given the task id regardless of what
343 * state it is in. If a task is still pending it will be returned.
344 * If a task is executing, then -EBUSY will be returned instead.
345 * If the task has already been run then NULL is returned.
346 */
347 static taskq_ent_t *
348 taskq_find(taskq_t *tq, taskqid_t id)
349 {
350 taskq_thread_t *tqt;
351 struct list_head *l;
352 taskq_ent_t *t;
353
354 t = taskq_find_list(tq, &tq->tq_delay_list, id);
355 if (t)
356 return (t);
357
358 t = taskq_find_list(tq, &tq->tq_prio_list, id);
359 if (t)
360 return (t);
361
362 t = taskq_find_list(tq, &tq->tq_pend_list, id);
363 if (t)
364 return (t);
365
366 list_for_each(l, &tq->tq_active_list) {
367 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
368 if (tqt->tqt_id == id) {
369 /*
370 * Instead of returning tqt_task, we just return a non
371 * NULL value to prevent misuse, since tqt_task only
372 * has two valid fields.
373 */
374 return (ERR_PTR(-EBUSY));
375 }
376 }
377
378 return (NULL);
379 }
380
381 /*
382 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
383 * taskq_wait() functions below.
384 *
385 * Taskq waiting is accomplished by tracking the lowest outstanding task
386 * id and the next available task id. As tasks are dispatched they are
387 * added to the tail of the pending, priority, or delay lists. As worker
388 * threads become available the tasks are removed from the heads of these
389 * lists and linked to the worker threads. This ensures the lists are
390 * kept sorted by lowest to highest task id.
391 *
392 * Therefore the lowest outstanding task id can be quickly determined by
393 * checking the head item from all of these lists. This value is stored
394 * with the taskq as the lowest id. It only needs to be recalculated when
395 * either the task with the current lowest id completes or is canceled.
396 *
397 * By blocking until the lowest task id exceeds the passed task id the
398 * taskq_wait_outstanding() function can be easily implemented. Similarly,
399 * by blocking until the lowest task id matches the next task id taskq_wait()
400 * can be implemented.
401 *
402 * Callers should be aware that when there are multiple worked threads it
403 * is possible for larger task ids to complete before smaller ones. Also
404 * when the taskq contains delay tasks with small task ids callers may
405 * block for a considerable length of time waiting for them to expire and
406 * execute.
407 */
408 static int
409 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
410 {
411 int rc;
412 unsigned long flags;
413
414 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
415 rc = (taskq_find(tq, id) == NULL);
416 spin_unlock_irqrestore(&tq->tq_lock, flags);
417
418 return (rc);
419 }
420
421 /*
422 * The taskq_wait_id() function blocks until the passed task id completes.
423 * This does not guarantee that all lower task ids have completed.
424 */
425 void
426 taskq_wait_id(taskq_t *tq, taskqid_t id)
427 {
428 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
429 }
430 EXPORT_SYMBOL(taskq_wait_id);
431
432 static int
433 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
434 {
435 int rc;
436 unsigned long flags;
437
438 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
439 rc = (id < tq->tq_lowest_id);
440 spin_unlock_irqrestore(&tq->tq_lock, flags);
441
442 return (rc);
443 }
444
445 /*
446 * The taskq_wait_outstanding() function will block until all tasks with a
447 * lower taskqid than the passed 'id' have been completed. Note that all
448 * task id's are assigned monotonically at dispatch time. Zero may be
449 * passed for the id to indicate all tasks dispatch up to this point,
450 * but not after, should be waited for.
451 */
452 void
453 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
454 {
455 id = id ? id : tq->tq_next_id - 1;
456 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
457 }
458 EXPORT_SYMBOL(taskq_wait_outstanding);
459
460 static int
461 taskq_wait_check(taskq_t *tq)
462 {
463 int rc;
464 unsigned long flags;
465
466 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
467 rc = (tq->tq_lowest_id == tq->tq_next_id);
468 spin_unlock_irqrestore(&tq->tq_lock, flags);
469
470 return (rc);
471 }
472
473 /*
474 * The taskq_wait() function will block until the taskq is empty.
475 * This means that if a taskq re-dispatches work to itself taskq_wait()
476 * callers will block indefinitely.
477 */
478 void
479 taskq_wait(taskq_t *tq)
480 {
481 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
482 }
483 EXPORT_SYMBOL(taskq_wait);
484
485 int
486 taskq_member(taskq_t *tq, kthread_t *t)
487 {
488 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
489 }
490 EXPORT_SYMBOL(taskq_member);
491
492 /*
493 * Cancel an already dispatched task given the task id. Still pending tasks
494 * will be immediately canceled, and if the task is active the function will
495 * block until it completes. Preallocated tasks which are canceled must be
496 * freed by the caller.
497 */
498 int
499 taskq_cancel_id(taskq_t *tq, taskqid_t id)
500 {
501 taskq_ent_t *t;
502 int rc = ENOENT;
503 unsigned long flags;
504
505 ASSERT(tq);
506
507 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
508 t = taskq_find(tq, id);
509 if (t && t != ERR_PTR(-EBUSY)) {
510 list_del_init(&t->tqent_list);
511 t->tqent_flags |= TQENT_FLAG_CANCEL;
512
513 /*
514 * When canceling the lowest outstanding task id we
515 * must recalculate the new lowest outstanding id.
516 */
517 if (tq->tq_lowest_id == t->tqent_id) {
518 tq->tq_lowest_id = taskq_lowest_id(tq);
519 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
520 }
521
522 /*
523 * The task_expire() function takes the tq->tq_lock so drop
524 * drop the lock before synchronously cancelling the timer.
525 */
526 if (timer_pending(&t->tqent_timer)) {
527 spin_unlock_irqrestore(&tq->tq_lock, flags);
528 del_timer_sync(&t->tqent_timer);
529 spin_lock_irqsave_nested(&tq->tq_lock, flags,
530 tq->tq_lock_class);
531 }
532
533 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
534 task_done(tq, t);
535
536 rc = 0;
537 }
538 spin_unlock_irqrestore(&tq->tq_lock, flags);
539
540 if (t == ERR_PTR(-EBUSY)) {
541 taskq_wait_id(tq, id);
542 rc = EBUSY;
543 }
544
545 return (rc);
546 }
547 EXPORT_SYMBOL(taskq_cancel_id);
548
549 static int taskq_thread_spawn(taskq_t *tq);
550
551 taskqid_t
552 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
553 {
554 taskq_ent_t *t;
555 taskqid_t rc = TASKQID_INVALID;
556 unsigned long irqflags;
557
558 ASSERT(tq);
559 ASSERT(func);
560
561 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
562
563 /* Taskq being destroyed and all tasks drained */
564 if (!(tq->tq_flags & TASKQ_ACTIVE))
565 goto out;
566
567 /* Do not queue the task unless there is idle thread for it */
568 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
569 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
570 /* Dynamic taskq may be able to spawn another thread */
571 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
572 taskq_thread_spawn(tq) == 0)
573 goto out;
574 }
575
576 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
577 goto out;
578
579 spin_lock(&t->tqent_lock);
580
581 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
582 if (flags & TQ_NOQUEUE)
583 list_add(&t->tqent_list, &tq->tq_prio_list);
584 /* Queue to the priority list instead of the pending list */
585 else if (flags & TQ_FRONT)
586 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
587 else
588 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
589
590 t->tqent_id = rc = tq->tq_next_id;
591 tq->tq_next_id++;
592 t->tqent_func = func;
593 t->tqent_arg = arg;
594 t->tqent_taskq = tq;
595 t->tqent_timer.function = NULL;
596 t->tqent_timer.expires = 0;
597 t->tqent_birth = jiffies;
598
599 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
600
601 spin_unlock(&t->tqent_lock);
602
603 wake_up(&tq->tq_work_waitq);
604 out:
605 /* Spawn additional taskq threads if required. */
606 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
607 (void) taskq_thread_spawn(tq);
608
609 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
610 return (rc);
611 }
612 EXPORT_SYMBOL(taskq_dispatch);
613
614 taskqid_t
615 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
616 uint_t flags, clock_t expire_time)
617 {
618 taskqid_t rc = TASKQID_INVALID;
619 taskq_ent_t *t;
620 unsigned long irqflags;
621
622 ASSERT(tq);
623 ASSERT(func);
624
625 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
626
627 /* Taskq being destroyed and all tasks drained */
628 if (!(tq->tq_flags & TASKQ_ACTIVE))
629 goto out;
630
631 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
632 goto out;
633
634 spin_lock(&t->tqent_lock);
635
636 /* Queue to the delay list for subsequent execution */
637 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
638
639 t->tqent_id = rc = tq->tq_next_id;
640 tq->tq_next_id++;
641 t->tqent_func = func;
642 t->tqent_arg = arg;
643 t->tqent_taskq = tq;
644 t->tqent_timer.function = task_expire;
645 t->tqent_timer.expires = (unsigned long)expire_time;
646 add_timer(&t->tqent_timer);
647
648 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
649
650 spin_unlock(&t->tqent_lock);
651 out:
652 /* Spawn additional taskq threads if required. */
653 if (tq->tq_nactive == tq->tq_nthreads)
654 (void) taskq_thread_spawn(tq);
655 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
656 return (rc);
657 }
658 EXPORT_SYMBOL(taskq_dispatch_delay);
659
660 void
661 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
662 taskq_ent_t *t)
663 {
664 unsigned long irqflags;
665 ASSERT(tq);
666 ASSERT(func);
667
668 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
669 tq->tq_lock_class);
670
671 /* Taskq being destroyed and all tasks drained */
672 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
673 t->tqent_id = TASKQID_INVALID;
674 goto out;
675 }
676
677 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
678 /* Dynamic taskq may be able to spawn another thread */
679 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
680 taskq_thread_spawn(tq) == 0)
681 goto out2;
682 flags |= TQ_FRONT;
683 }
684
685 spin_lock(&t->tqent_lock);
686
687 /*
688 * Make sure the entry is not on some other taskq; it is important to
689 * ASSERT() under lock
690 */
691 ASSERT(taskq_empty_ent(t));
692
693 /*
694 * Mark it as a prealloc'd task. This is important
695 * to ensure that we don't free it later.
696 */
697 t->tqent_flags |= TQENT_FLAG_PREALLOC;
698
699 /* Queue to the priority list instead of the pending list */
700 if (flags & TQ_FRONT)
701 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
702 else
703 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
704
705 t->tqent_id = tq->tq_next_id;
706 tq->tq_next_id++;
707 t->tqent_func = func;
708 t->tqent_arg = arg;
709 t->tqent_taskq = tq;
710 t->tqent_birth = jiffies;
711
712 spin_unlock(&t->tqent_lock);
713
714 wake_up(&tq->tq_work_waitq);
715 out:
716 /* Spawn additional taskq threads if required. */
717 if (tq->tq_nactive == tq->tq_nthreads)
718 (void) taskq_thread_spawn(tq);
719 out2:
720 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
721 }
722 EXPORT_SYMBOL(taskq_dispatch_ent);
723
724 int
725 taskq_empty_ent(taskq_ent_t *t)
726 {
727 return (list_empty(&t->tqent_list));
728 }
729 EXPORT_SYMBOL(taskq_empty_ent);
730
731 void
732 taskq_init_ent(taskq_ent_t *t)
733 {
734 spin_lock_init(&t->tqent_lock);
735 init_waitqueue_head(&t->tqent_waitq);
736 timer_setup(&t->tqent_timer, NULL, 0);
737 INIT_LIST_HEAD(&t->tqent_list);
738 t->tqent_id = 0;
739 t->tqent_func = NULL;
740 t->tqent_arg = NULL;
741 t->tqent_flags = 0;
742 t->tqent_taskq = NULL;
743 }
744 EXPORT_SYMBOL(taskq_init_ent);
745
746 /*
747 * Return the next pending task, preference is given to tasks on the
748 * priority list which were dispatched with TQ_FRONT.
749 */
750 static taskq_ent_t *
751 taskq_next_ent(taskq_t *tq)
752 {
753 struct list_head *list;
754
755 if (!list_empty(&tq->tq_prio_list))
756 list = &tq->tq_prio_list;
757 else if (!list_empty(&tq->tq_pend_list))
758 list = &tq->tq_pend_list;
759 else
760 return (NULL);
761
762 return (list_entry(list->next, taskq_ent_t, tqent_list));
763 }
764
765 /*
766 * Spawns a new thread for the specified taskq.
767 */
768 static void
769 taskq_thread_spawn_task(void *arg)
770 {
771 taskq_t *tq = (taskq_t *)arg;
772 unsigned long flags;
773
774 if (taskq_thread_create(tq) == NULL) {
775 /* restore spawning count if failed */
776 spin_lock_irqsave_nested(&tq->tq_lock, flags,
777 tq->tq_lock_class);
778 tq->tq_nspawn--;
779 spin_unlock_irqrestore(&tq->tq_lock, flags);
780 }
781 }
782
783 /*
784 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
785 * number of threads is insufficient to handle the pending tasks. These
786 * new threads must be created by the dedicated dynamic_taskq to avoid
787 * deadlocks between thread creation and memory reclaim. The system_taskq
788 * which is also a dynamic taskq cannot be safely used for this.
789 */
790 static int
791 taskq_thread_spawn(taskq_t *tq)
792 {
793 int spawning = 0;
794
795 if (!(tq->tq_flags & TASKQ_DYNAMIC))
796 return (0);
797
798 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
799 (tq->tq_flags & TASKQ_ACTIVE)) {
800 spawning = (++tq->tq_nspawn);
801 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
802 tq, TQ_NOSLEEP);
803 }
804
805 return (spawning);
806 }
807
808 /*
809 * Threads in a dynamic taskq should only exit once it has been completely
810 * drained and no other threads are actively servicing tasks. This prevents
811 * threads from being created and destroyed more than is required.
812 *
813 * The first thread is the thread list is treated as the primary thread.
814 * There is nothing special about the primary thread but in order to avoid
815 * all the taskq pids from changing we opt to make it long running.
816 */
817 static int
818 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
819 {
820 if (!(tq->tq_flags & TASKQ_DYNAMIC))
821 return (0);
822
823 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
824 tqt_thread_list) == tqt)
825 return (0);
826
827 return
828 ((tq->tq_nspawn == 0) && /* No threads are being spawned */
829 (tq->tq_nactive == 0) && /* No threads are handling tasks */
830 (tq->tq_nthreads > 1) && /* More than 1 thread is running */
831 (!taskq_next_ent(tq)) && /* There are no pending tasks */
832 (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
833 }
834
835 static int
836 taskq_thread(void *args)
837 {
838 DECLARE_WAITQUEUE(wait, current);
839 sigset_t blocked;
840 taskq_thread_t *tqt = args;
841 taskq_t *tq;
842 taskq_ent_t *t;
843 int seq_tasks = 0;
844 unsigned long flags;
845 taskq_ent_t dup_task = {};
846
847 ASSERT(tqt);
848 ASSERT(tqt->tqt_tq);
849 tq = tqt->tqt_tq;
850 current->flags |= PF_NOFREEZE;
851
852 (void) spl_fstrans_mark();
853
854 sigfillset(&blocked);
855 sigprocmask(SIG_BLOCK, &blocked, NULL);
856 flush_signals(current);
857 kfpu_initialize();
858
859 tsd_set(taskq_tsd, tq);
860 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
861 /*
862 * If we are dynamically spawned, decrease spawning count. Note that
863 * we could be created during taskq_create, in which case we shouldn't
864 * do the decrement. But it's fine because taskq_create will reset
865 * tq_nspawn later.
866 */
867 if (tq->tq_flags & TASKQ_DYNAMIC)
868 tq->tq_nspawn--;
869
870 /* Immediately exit if more threads than allowed were created. */
871 if (tq->tq_nthreads >= tq->tq_maxthreads)
872 goto error;
873
874 tq->tq_nthreads++;
875 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
876 wake_up(&tq->tq_wait_waitq);
877 set_current_state(TASK_INTERRUPTIBLE);
878
879 while (!kthread_should_stop()) {
880
881 if (list_empty(&tq->tq_pend_list) &&
882 list_empty(&tq->tq_prio_list)) {
883
884 if (taskq_thread_should_stop(tq, tqt)) {
885 wake_up_all(&tq->tq_wait_waitq);
886 break;
887 }
888
889 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
890 spin_unlock_irqrestore(&tq->tq_lock, flags);
891
892 schedule();
893 seq_tasks = 0;
894
895 spin_lock_irqsave_nested(&tq->tq_lock, flags,
896 tq->tq_lock_class);
897 remove_wait_queue(&tq->tq_work_waitq, &wait);
898 } else {
899 __set_current_state(TASK_RUNNING);
900 }
901
902 if ((t = taskq_next_ent(tq)) != NULL) {
903 list_del_init(&t->tqent_list);
904
905 /*
906 * A TQENT_FLAG_PREALLOC task may be reused or freed
907 * during the task function call. Store tqent_id and
908 * tqent_flags here.
909 *
910 * Also use an on stack taskq_ent_t for tqt_task
911 * assignment in this case. We only populate the two
912 * fields used by the only user in taskq proc file.
913 */
914 tqt->tqt_id = t->tqent_id;
915 tqt->tqt_flags = t->tqent_flags;
916
917 if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
918 dup_task.tqent_func = t->tqent_func;
919 dup_task.tqent_arg = t->tqent_arg;
920 t = &dup_task;
921 }
922 tqt->tqt_task = t;
923
924 taskq_insert_in_order(tq, tqt);
925 tq->tq_nactive++;
926 spin_unlock_irqrestore(&tq->tq_lock, flags);
927
928 /* Perform the requested task */
929 t->tqent_func(t->tqent_arg);
930
931 spin_lock_irqsave_nested(&tq->tq_lock, flags,
932 tq->tq_lock_class);
933 tq->tq_nactive--;
934 list_del_init(&tqt->tqt_active_list);
935 tqt->tqt_task = NULL;
936
937 /* For prealloc'd tasks, we don't free anything. */
938 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
939 task_done(tq, t);
940
941 /*
942 * When the current lowest outstanding taskqid is
943 * done calculate the new lowest outstanding id
944 */
945 if (tq->tq_lowest_id == tqt->tqt_id) {
946 tq->tq_lowest_id = taskq_lowest_id(tq);
947 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
948 }
949
950 /* Spawn additional taskq threads if required. */
951 if ((++seq_tasks) > spl_taskq_thread_sequential &&
952 taskq_thread_spawn(tq))
953 seq_tasks = 0;
954
955 tqt->tqt_id = TASKQID_INVALID;
956 tqt->tqt_flags = 0;
957 wake_up_all(&tq->tq_wait_waitq);
958 } else {
959 if (taskq_thread_should_stop(tq, tqt))
960 break;
961 }
962
963 set_current_state(TASK_INTERRUPTIBLE);
964
965 }
966
967 __set_current_state(TASK_RUNNING);
968 tq->tq_nthreads--;
969 list_del_init(&tqt->tqt_thread_list);
970 error:
971 kmem_free(tqt, sizeof (taskq_thread_t));
972 spin_unlock_irqrestore(&tq->tq_lock, flags);
973
974 tsd_set(taskq_tsd, NULL);
975
976 return (0);
977 }
978
979 static taskq_thread_t *
980 taskq_thread_create(taskq_t *tq)
981 {
982 static int last_used_cpu = 0;
983 taskq_thread_t *tqt;
984
985 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
986 INIT_LIST_HEAD(&tqt->tqt_thread_list);
987 INIT_LIST_HEAD(&tqt->tqt_active_list);
988 tqt->tqt_tq = tq;
989 tqt->tqt_id = TASKQID_INVALID;
990
991 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
992 "%s", tq->tq_name);
993 if (tqt->tqt_thread == NULL) {
994 kmem_free(tqt, sizeof (taskq_thread_t));
995 return (NULL);
996 }
997
998 if (spl_taskq_thread_bind) {
999 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
1000 kthread_bind(tqt->tqt_thread, last_used_cpu);
1001 }
1002
1003 if (spl_taskq_thread_priority)
1004 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
1005
1006 wake_up_process(tqt->tqt_thread);
1007
1008 return (tqt);
1009 }
1010
1011 taskq_t *
1012 taskq_create(const char *name, int nthreads, pri_t pri,
1013 int minalloc, int maxalloc, uint_t flags)
1014 {
1015 taskq_t *tq;
1016 taskq_thread_t *tqt;
1017 int count = 0, rc = 0, i;
1018 unsigned long irqflags;
1019
1020 ASSERT(name != NULL);
1021 ASSERT(minalloc >= 0);
1022 ASSERT(maxalloc <= INT_MAX);
1023 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
1024
1025 /* Scale the number of threads using nthreads as a percentage */
1026 if (flags & TASKQ_THREADS_CPU_PCT) {
1027 ASSERT(nthreads <= 100);
1028 ASSERT(nthreads >= 0);
1029 nthreads = MIN(nthreads, 100);
1030 nthreads = MAX(nthreads, 0);
1031 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
1032 }
1033
1034 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1035 if (tq == NULL)
1036 return (NULL);
1037
1038 spin_lock_init(&tq->tq_lock);
1039 INIT_LIST_HEAD(&tq->tq_thread_list);
1040 INIT_LIST_HEAD(&tq->tq_active_list);
1041 tq->tq_name = strdup(name);
1042 tq->tq_nactive = 0;
1043 tq->tq_nthreads = 0;
1044 tq->tq_nspawn = 0;
1045 tq->tq_maxthreads = nthreads;
1046 tq->tq_pri = pri;
1047 tq->tq_minalloc = minalloc;
1048 tq->tq_maxalloc = maxalloc;
1049 tq->tq_nalloc = 0;
1050 tq->tq_flags = (flags | TASKQ_ACTIVE);
1051 tq->tq_next_id = TASKQID_INITIAL;
1052 tq->tq_lowest_id = TASKQID_INITIAL;
1053 INIT_LIST_HEAD(&tq->tq_free_list);
1054 INIT_LIST_HEAD(&tq->tq_pend_list);
1055 INIT_LIST_HEAD(&tq->tq_prio_list);
1056 INIT_LIST_HEAD(&tq->tq_delay_list);
1057 init_waitqueue_head(&tq->tq_work_waitq);
1058 init_waitqueue_head(&tq->tq_wait_waitq);
1059 tq->tq_lock_class = TQ_LOCK_GENERAL;
1060 INIT_LIST_HEAD(&tq->tq_taskqs);
1061
1062 if (flags & TASKQ_PREPOPULATE) {
1063 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1064 tq->tq_lock_class);
1065
1066 for (i = 0; i < minalloc; i++)
1067 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1068 &irqflags));
1069
1070 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1071 }
1072
1073 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1074 nthreads = 1;
1075
1076 for (i = 0; i < nthreads; i++) {
1077 tqt = taskq_thread_create(tq);
1078 if (tqt == NULL)
1079 rc = 1;
1080 else
1081 count++;
1082 }
1083
1084 /* Wait for all threads to be started before potential destroy */
1085 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1086 /*
1087 * taskq_thread might have touched nspawn, but we don't want them to
1088 * because they're not dynamically spawned. So we reset it to 0
1089 */
1090 tq->tq_nspawn = 0;
1091
1092 if (rc) {
1093 taskq_destroy(tq);
1094 tq = NULL;
1095 } else {
1096 down_write(&tq_list_sem);
1097 tq->tq_instance = taskq_find_by_name(name) + 1;
1098 list_add_tail(&tq->tq_taskqs, &tq_list);
1099 up_write(&tq_list_sem);
1100 }
1101
1102 return (tq);
1103 }
1104 EXPORT_SYMBOL(taskq_create);
1105
1106 void
1107 taskq_destroy(taskq_t *tq)
1108 {
1109 struct task_struct *thread;
1110 taskq_thread_t *tqt;
1111 taskq_ent_t *t;
1112 unsigned long flags;
1113
1114 ASSERT(tq);
1115 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1116 tq->tq_flags &= ~TASKQ_ACTIVE;
1117 spin_unlock_irqrestore(&tq->tq_lock, flags);
1118
1119 /*
1120 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1121 * new worker threads be spawned for dynamic taskq.
1122 */
1123 if (dynamic_taskq != NULL)
1124 taskq_wait_outstanding(dynamic_taskq, 0);
1125
1126 taskq_wait(tq);
1127
1128 /* remove taskq from global list used by the kstats */
1129 down_write(&tq_list_sem);
1130 list_del(&tq->tq_taskqs);
1131 up_write(&tq_list_sem);
1132
1133 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1134 /* wait for spawning threads to insert themselves to the list */
1135 while (tq->tq_nspawn) {
1136 spin_unlock_irqrestore(&tq->tq_lock, flags);
1137 schedule_timeout_interruptible(1);
1138 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1139 tq->tq_lock_class);
1140 }
1141
1142 /*
1143 * Signal each thread to exit and block until it does. Each thread
1144 * is responsible for removing itself from the list and freeing its
1145 * taskq_thread_t. This allows for idle threads to opt to remove
1146 * themselves from the taskq. They can be recreated as needed.
1147 */
1148 while (!list_empty(&tq->tq_thread_list)) {
1149 tqt = list_entry(tq->tq_thread_list.next,
1150 taskq_thread_t, tqt_thread_list);
1151 thread = tqt->tqt_thread;
1152 spin_unlock_irqrestore(&tq->tq_lock, flags);
1153
1154 kthread_stop(thread);
1155
1156 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1157 tq->tq_lock_class);
1158 }
1159
1160 while (!list_empty(&tq->tq_free_list)) {
1161 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1162
1163 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1164
1165 list_del_init(&t->tqent_list);
1166 task_free(tq, t);
1167 }
1168
1169 ASSERT0(tq->tq_nthreads);
1170 ASSERT0(tq->tq_nalloc);
1171 ASSERT0(tq->tq_nspawn);
1172 ASSERT(list_empty(&tq->tq_thread_list));
1173 ASSERT(list_empty(&tq->tq_active_list));
1174 ASSERT(list_empty(&tq->tq_free_list));
1175 ASSERT(list_empty(&tq->tq_pend_list));
1176 ASSERT(list_empty(&tq->tq_prio_list));
1177 ASSERT(list_empty(&tq->tq_delay_list));
1178
1179 spin_unlock_irqrestore(&tq->tq_lock, flags);
1180
1181 strfree(tq->tq_name);
1182 kmem_free(tq, sizeof (taskq_t));
1183 }
1184 EXPORT_SYMBOL(taskq_destroy);
1185
1186
1187 static unsigned int spl_taskq_kick = 0;
1188
1189 /*
1190 * 2.6.36 API Change
1191 * module_param_cb is introduced to take kernel_param_ops and
1192 * module_param_call is marked as obsolete. Also set and get operations
1193 * were changed to take a 'const struct kernel_param *'.
1194 */
1195 static int
1196 #ifdef module_param_cb
1197 param_set_taskq_kick(const char *val, const struct kernel_param *kp)
1198 #else
1199 param_set_taskq_kick(const char *val, struct kernel_param *kp)
1200 #endif
1201 {
1202 int ret;
1203 taskq_t *tq;
1204 taskq_ent_t *t;
1205 unsigned long flags;
1206
1207 ret = param_set_uint(val, kp);
1208 if (ret < 0 || !spl_taskq_kick)
1209 return (ret);
1210 /* reset value */
1211 spl_taskq_kick = 0;
1212
1213 down_read(&tq_list_sem);
1214 list_for_each_entry(tq, &tq_list, tq_taskqs) {
1215 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1216 tq->tq_lock_class);
1217 /* Check if the first pending is older than 5 seconds */
1218 t = taskq_next_ent(tq);
1219 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
1220 (void) taskq_thread_spawn(tq);
1221 printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
1222 tq->tq_name, tq->tq_instance);
1223 }
1224 spin_unlock_irqrestore(&tq->tq_lock, flags);
1225 }
1226 up_read(&tq_list_sem);
1227 return (ret);
1228 }
1229
1230 #ifdef module_param_cb
1231 static const struct kernel_param_ops param_ops_taskq_kick = {
1232 .set = param_set_taskq_kick,
1233 .get = param_get_uint,
1234 };
1235 module_param_cb(spl_taskq_kick, &param_ops_taskq_kick, &spl_taskq_kick, 0644);
1236 #else
1237 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
1238 &spl_taskq_kick, 0644);
1239 #endif
1240 MODULE_PARM_DESC(spl_taskq_kick,
1241 "Write nonzero to kick stuck taskqs to spawn more threads");
1242
1243 int
1244 spl_taskq_init(void)
1245 {
1246 init_rwsem(&tq_list_sem);
1247 tsd_create(&taskq_tsd, NULL);
1248
1249 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1250 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1251 if (system_taskq == NULL)
1252 return (1);
1253
1254 system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
1255 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1256 if (system_delay_taskq == NULL) {
1257 taskq_destroy(system_taskq);
1258 return (1);
1259 }
1260
1261 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1262 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1263 if (dynamic_taskq == NULL) {
1264 taskq_destroy(system_taskq);
1265 taskq_destroy(system_delay_taskq);
1266 return (1);
1267 }
1268
1269 /*
1270 * This is used to annotate tq_lock, so
1271 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1272 * does not trigger a lockdep warning re: possible recursive locking
1273 */
1274 dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
1275
1276 return (0);
1277 }
1278
1279 void
1280 spl_taskq_fini(void)
1281 {
1282 taskq_destroy(dynamic_taskq);
1283 dynamic_taskq = NULL;
1284
1285 taskq_destroy(system_delay_taskq);
1286 system_delay_taskq = NULL;
1287
1288 taskq_destroy(system_taskq);
1289 system_taskq = NULL;
1290
1291 tsd_destroy(&taskq_tsd);
1292 }