]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
9 | * For details, see <http://zfsonlinux.org/>. | |
10 | * | |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
23 | * | |
24 | * Solaris Porting Layer (SPL) Task Queue Implementation. | |
25 | */ | |
26 | ||
27 | #include <sys/taskq.h> | |
28 | #include <sys/kmem.h> | |
29 | ||
30 | int spl_taskq_thread_bind = 0; | |
31 | module_param(spl_taskq_thread_bind, int, 0644); | |
32 | MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); | |
33 | ||
34 | ||
35 | int spl_taskq_thread_dynamic = 1; | |
36 | module_param(spl_taskq_thread_dynamic, int, 0644); | |
37 | MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); | |
38 | ||
39 | int spl_taskq_thread_priority = 1; | |
40 | module_param(spl_taskq_thread_priority, int, 0644); | |
41 | MODULE_PARM_DESC(spl_taskq_thread_priority, | |
42 | "Allow non-default priority for taskq threads"); | |
43 | ||
44 | int spl_taskq_thread_sequential = 4; | |
45 | module_param(spl_taskq_thread_sequential, int, 0644); | |
46 | MODULE_PARM_DESC(spl_taskq_thread_sequential, | |
47 | "Create new taskq threads after N sequential tasks"); | |
48 | ||
49 | /* Global system-wide dynamic task queue available for all consumers */ | |
50 | taskq_t *system_taskq; | |
51 | EXPORT_SYMBOL(system_taskq); | |
52 | ||
53 | /* Private dedicated taskq for creating new taskq threads on demand. */ | |
54 | static taskq_t *dynamic_taskq; | |
55 | static taskq_thread_t *taskq_thread_create(taskq_t *); | |
56 | ||
57 | /* List of all taskqs */ | |
58 | LIST_HEAD(tq_list); | |
59 | DECLARE_RWSEM(tq_list_sem); | |
60 | ||
61 | static int | |
62 | task_km_flags(uint_t flags) | |
63 | { | |
64 | if (flags & TQ_NOSLEEP) | |
65 | return (KM_NOSLEEP); | |
66 | ||
67 | if (flags & TQ_PUSHPAGE) | |
68 | return (KM_PUSHPAGE); | |
69 | ||
70 | return (KM_SLEEP); | |
71 | } | |
72 | ||
73 | /* | |
74 | * taskq_find_by_name - Find the largest instance number of a named taskq. | |
75 | */ | |
76 | static int | |
77 | taskq_find_by_name(const char *name) | |
78 | { | |
79 | struct list_head *tql; | |
80 | taskq_t *tq; | |
81 | ||
82 | list_for_each_prev(tql, &tq_list) { | |
83 | tq = list_entry(tql, taskq_t, tq_taskqs); | |
84 | if (strcmp(name, tq->tq_name) == 0) | |
85 | return tq->tq_instance; | |
86 | } | |
87 | return (-1); | |
88 | } | |
89 | ||
90 | /* | |
91 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
92 | * is not attached to the free, work, or pending taskq lists. | |
93 | */ | |
94 | static taskq_ent_t * | |
95 | task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) | |
96 | { | |
97 | taskq_ent_t *t; | |
98 | int count = 0; | |
99 | ||
100 | ASSERT(tq); | |
101 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
102 | retry: | |
103 | /* Acquire taskq_ent_t's from free list if available */ | |
104 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | |
105 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
106 | ||
107 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
108 | ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); | |
109 | ASSERT(!timer_pending(&t->tqent_timer)); | |
110 | ||
111 | list_del_init(&t->tqent_list); | |
112 | return (t); | |
113 | } | |
114 | ||
115 | /* Free list is empty and memory allocations are prohibited */ | |
116 | if (flags & TQ_NOALLOC) | |
117 | return (NULL); | |
118 | ||
119 | /* Hit maximum taskq_ent_t pool size */ | |
120 | if (tq->tq_nalloc >= tq->tq_maxalloc) { | |
121 | if (flags & TQ_NOSLEEP) | |
122 | return (NULL); | |
123 | ||
124 | /* | |
125 | * Sleep periodically polling the free list for an available | |
126 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | |
127 | * but we cannot block forever waiting for an taskq_ent_t to | |
128 | * show up in the free list, otherwise a deadlock can happen. | |
129 | * | |
130 | * Therefore, we need to allocate a new task even if the number | |
131 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
132 | * end up delaying the task allocation by one second, thereby | |
133 | * throttling the task dispatch rate. | |
134 | */ | |
135 | spin_unlock_irqrestore(&tq->tq_lock, *irqflags); | |
136 | schedule_timeout(HZ / 100); | |
137 | spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, | |
138 | tq->tq_lock_class); | |
139 | if (count < 100) { | |
140 | count++; | |
141 | goto retry; | |
142 | } | |
143 | } | |
144 | ||
145 | spin_unlock_irqrestore(&tq->tq_lock, *irqflags); | |
146 | t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); | |
147 | spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); | |
148 | ||
149 | if (t) { | |
150 | taskq_init_ent(t); | |
151 | tq->tq_nalloc++; | |
152 | } | |
153 | ||
154 | return (t); | |
155 | } | |
156 | ||
157 | /* | |
158 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t | |
159 | * to already be removed from the free, work, or pending taskq lists. | |
160 | */ | |
161 | static void | |
162 | task_free(taskq_t *tq, taskq_ent_t *t) | |
163 | { | |
164 | ASSERT(tq); | |
165 | ASSERT(t); | |
166 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
167 | ASSERT(list_empty(&t->tqent_list)); | |
168 | ASSERT(!timer_pending(&t->tqent_timer)); | |
169 | ||
170 | kmem_free(t, sizeof (taskq_ent_t)); | |
171 | tq->tq_nalloc--; | |
172 | } | |
173 | ||
174 | /* | |
175 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
176 | * taskq_ent_t if too many exist or moves it to the free list for later use. | |
177 | */ | |
178 | static void | |
179 | task_done(taskq_t *tq, taskq_ent_t *t) | |
180 | { | |
181 | ASSERT(tq); | |
182 | ASSERT(t); | |
183 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
184 | ||
185 | /* Wake tasks blocked in taskq_wait_id() */ | |
186 | wake_up_all(&t->tqent_waitq); | |
187 | ||
188 | list_del_init(&t->tqent_list); | |
189 | ||
190 | if (tq->tq_nalloc <= tq->tq_minalloc) { | |
191 | t->tqent_id = 0; | |
192 | t->tqent_func = NULL; | |
193 | t->tqent_arg = NULL; | |
194 | t->tqent_flags = 0; | |
195 | ||
196 | list_add_tail(&t->tqent_list, &tq->tq_free_list); | |
197 | } else { | |
198 | task_free(tq, t); | |
199 | } | |
200 | } | |
201 | ||
202 | /* | |
203 | * When a delayed task timer expires remove it from the delay list and | |
204 | * add it to the priority list in order for immediate processing. | |
205 | */ | |
206 | static void | |
207 | task_expire(unsigned long data) | |
208 | { | |
209 | taskq_ent_t *w, *t = (taskq_ent_t *)data; | |
210 | taskq_t *tq = t->tqent_taskq; | |
211 | struct list_head *l; | |
212 | unsigned long flags; | |
213 | ||
214 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
215 | ||
216 | if (t->tqent_flags & TQENT_FLAG_CANCEL) { | |
217 | ASSERT(list_empty(&t->tqent_list)); | |
218 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
219 | return; | |
220 | } | |
221 | ||
222 | /* | |
223 | * The priority list must be maintained in strict task id order | |
224 | * from lowest to highest for lowest_id to be easily calculable. | |
225 | */ | |
226 | list_del(&t->tqent_list); | |
227 | list_for_each_prev(l, &tq->tq_prio_list) { | |
228 | w = list_entry(l, taskq_ent_t, tqent_list); | |
229 | if (w->tqent_id < t->tqent_id) { | |
230 | list_add(&t->tqent_list, l); | |
231 | break; | |
232 | } | |
233 | } | |
234 | if (l == &tq->tq_prio_list) | |
235 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
236 | ||
237 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
238 | ||
239 | wake_up(&tq->tq_work_waitq); | |
240 | } | |
241 | ||
242 | /* | |
243 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
244 | * be queued on the pending list, on the priority list, on the | |
245 | * delay list, or on the work list currently being handled, but | |
246 | * it is not 100% complete yet. | |
247 | */ | |
248 | static taskqid_t | |
249 | taskq_lowest_id(taskq_t *tq) | |
250 | { | |
251 | taskqid_t lowest_id = tq->tq_next_id; | |
252 | taskq_ent_t *t; | |
253 | taskq_thread_t *tqt; | |
254 | ||
255 | ASSERT(tq); | |
256 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
257 | ||
258 | if (!list_empty(&tq->tq_pend_list)) { | |
259 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | |
260 | lowest_id = MIN(lowest_id, t->tqent_id); | |
261 | } | |
262 | ||
263 | if (!list_empty(&tq->tq_prio_list)) { | |
264 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | |
265 | lowest_id = MIN(lowest_id, t->tqent_id); | |
266 | } | |
267 | ||
268 | if (!list_empty(&tq->tq_delay_list)) { | |
269 | t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); | |
270 | lowest_id = MIN(lowest_id, t->tqent_id); | |
271 | } | |
272 | ||
273 | if (!list_empty(&tq->tq_active_list)) { | |
274 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
275 | tqt_active_list); | |
276 | ASSERT(tqt->tqt_id != 0); | |
277 | lowest_id = MIN(lowest_id, tqt->tqt_id); | |
278 | } | |
279 | ||
280 | return (lowest_id); | |
281 | } | |
282 | ||
283 | /* | |
284 | * Insert a task into a list keeping the list sorted by increasing taskqid. | |
285 | */ | |
286 | static void | |
287 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | |
288 | { | |
289 | taskq_thread_t *w; | |
290 | struct list_head *l; | |
291 | ||
292 | ASSERT(tq); | |
293 | ASSERT(tqt); | |
294 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
295 | ||
296 | list_for_each_prev(l, &tq->tq_active_list) { | |
297 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
298 | if (w->tqt_id < tqt->tqt_id) { | |
299 | list_add(&tqt->tqt_active_list, l); | |
300 | break; | |
301 | } | |
302 | } | |
303 | if (l == &tq->tq_active_list) | |
304 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
305 | } | |
306 | ||
307 | /* | |
308 | * Find and return a task from the given list if it exists. The list | |
309 | * must be in lowest to highest task id order. | |
310 | */ | |
311 | static taskq_ent_t * | |
312 | taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) | |
313 | { | |
314 | struct list_head *l; | |
315 | taskq_ent_t *t; | |
316 | ||
317 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
318 | ||
319 | list_for_each(l, lh) { | |
320 | t = list_entry(l, taskq_ent_t, tqent_list); | |
321 | ||
322 | if (t->tqent_id == id) | |
323 | return (t); | |
324 | ||
325 | if (t->tqent_id > id) | |
326 | break; | |
327 | } | |
328 | ||
329 | return (NULL); | |
330 | } | |
331 | ||
332 | /* | |
333 | * Find an already dispatched task given the task id regardless of what | |
334 | * state it is in. If a task is still pending or executing it will be | |
335 | * returned and 'active' set appropriately. If the task has already | |
336 | * been run then NULL is returned. | |
337 | */ | |
338 | static taskq_ent_t * | |
339 | taskq_find(taskq_t *tq, taskqid_t id, int *active) | |
340 | { | |
341 | taskq_thread_t *tqt; | |
342 | struct list_head *l; | |
343 | taskq_ent_t *t; | |
344 | ||
345 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
346 | *active = 0; | |
347 | ||
348 | t = taskq_find_list(tq, &tq->tq_delay_list, id); | |
349 | if (t) | |
350 | return (t); | |
351 | ||
352 | t = taskq_find_list(tq, &tq->tq_prio_list, id); | |
353 | if (t) | |
354 | return (t); | |
355 | ||
356 | t = taskq_find_list(tq, &tq->tq_pend_list, id); | |
357 | if (t) | |
358 | return (t); | |
359 | ||
360 | list_for_each(l, &tq->tq_active_list) { | |
361 | tqt = list_entry(l, taskq_thread_t, tqt_active_list); | |
362 | if (tqt->tqt_id == id) { | |
363 | t = tqt->tqt_task; | |
364 | *active = 1; | |
365 | return (t); | |
366 | } | |
367 | } | |
368 | ||
369 | return (NULL); | |
370 | } | |
371 | ||
372 | /* | |
373 | * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and | |
374 | * taskq_wait() functions below. | |
375 | * | |
376 | * Taskq waiting is accomplished by tracking the lowest outstanding task | |
377 | * id and the next available task id. As tasks are dispatched they are | |
378 | * added to the tail of the pending, priority, or delay lists. As worker | |
379 | * threads become available the tasks are removed from the heads of these | |
380 | * lists and linked to the worker threads. This ensures the lists are | |
381 | * kept sorted by lowest to highest task id. | |
382 | * | |
383 | * Therefore the lowest outstanding task id can be quickly determined by | |
384 | * checking the head item from all of these lists. This value is stored | |
385 | * with the taskq as the lowest id. It only needs to be recalculated when | |
386 | * either the task with the current lowest id completes or is canceled. | |
387 | * | |
388 | * By blocking until the lowest task id exceeds the passed task id the | |
389 | * taskq_wait_outstanding() function can be easily implemented. Similarly, | |
390 | * by blocking until the lowest task id matches the next task id taskq_wait() | |
391 | * can be implemented. | |
392 | * | |
393 | * Callers should be aware that when there are multiple worked threads it | |
394 | * is possible for larger task ids to complete before smaller ones. Also | |
395 | * when the taskq contains delay tasks with small task ids callers may | |
396 | * block for a considerable length of time waiting for them to expire and | |
397 | * execute. | |
398 | */ | |
399 | static int | |
400 | taskq_wait_id_check(taskq_t *tq, taskqid_t id) | |
401 | { | |
402 | int active = 0; | |
403 | int rc; | |
404 | unsigned long flags; | |
405 | ||
406 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
407 | rc = (taskq_find(tq, id, &active) == NULL); | |
408 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
409 | ||
410 | return (rc); | |
411 | } | |
412 | ||
413 | /* | |
414 | * The taskq_wait_id() function blocks until the passed task id completes. | |
415 | * This does not guarantee that all lower task ids have completed. | |
416 | */ | |
417 | void | |
418 | taskq_wait_id(taskq_t *tq, taskqid_t id) | |
419 | { | |
420 | wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); | |
421 | } | |
422 | EXPORT_SYMBOL(taskq_wait_id); | |
423 | ||
424 | static int | |
425 | taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) | |
426 | { | |
427 | int rc; | |
428 | unsigned long flags; | |
429 | ||
430 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
431 | rc = (id < tq->tq_lowest_id); | |
432 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
433 | ||
434 | return (rc); | |
435 | } | |
436 | ||
437 | /* | |
438 | * The taskq_wait_outstanding() function will block until all tasks with a | |
439 | * lower taskqid than the passed 'id' have been completed. Note that all | |
440 | * task id's are assigned monotonically at dispatch time. Zero may be | |
441 | * passed for the id to indicate all tasks dispatch up to this point, | |
442 | * but not after, should be waited for. | |
443 | */ | |
444 | void | |
445 | taskq_wait_outstanding(taskq_t *tq, taskqid_t id) | |
446 | { | |
447 | wait_event(tq->tq_wait_waitq, | |
448 | taskq_wait_outstanding_check(tq, id ? id : tq->tq_next_id - 1)); | |
449 | } | |
450 | EXPORT_SYMBOL(taskq_wait_outstanding); | |
451 | ||
452 | static int | |
453 | taskq_wait_check(taskq_t *tq) | |
454 | { | |
455 | int rc; | |
456 | unsigned long flags; | |
457 | ||
458 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
459 | rc = (tq->tq_lowest_id == tq->tq_next_id); | |
460 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
461 | ||
462 | return (rc); | |
463 | } | |
464 | ||
465 | /* | |
466 | * The taskq_wait() function will block until the taskq is empty. | |
467 | * This means that if a taskq re-dispatches work to itself taskq_wait() | |
468 | * callers will block indefinitely. | |
469 | */ | |
470 | void | |
471 | taskq_wait(taskq_t *tq) | |
472 | { | |
473 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); | |
474 | } | |
475 | EXPORT_SYMBOL(taskq_wait); | |
476 | ||
477 | static int | |
478 | taskq_member_impl(taskq_t *tq, void *t) | |
479 | { | |
480 | struct list_head *l; | |
481 | taskq_thread_t *tqt; | |
482 | int found = 0; | |
483 | ||
484 | ASSERT(tq); | |
485 | ASSERT(t); | |
486 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
487 | ||
488 | list_for_each(l, &tq->tq_thread_list) { | |
489 | tqt = list_entry(l, taskq_thread_t, tqt_thread_list); | |
490 | if (tqt->tqt_thread == (struct task_struct *)t) { | |
491 | found = 1; | |
492 | break; | |
493 | } | |
494 | } | |
495 | return (found); | |
496 | } | |
497 | ||
498 | int | |
499 | taskq_member(taskq_t *tq, void *t) | |
500 | { | |
501 | int found; | |
502 | unsigned long flags; | |
503 | ||
504 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
505 | found = taskq_member_impl(tq, t); | |
506 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
507 | ||
508 | return (found); | |
509 | } | |
510 | EXPORT_SYMBOL(taskq_member); | |
511 | ||
512 | /* | |
513 | * Cancel an already dispatched task given the task id. Still pending tasks | |
514 | * will be immediately canceled, and if the task is active the function will | |
515 | * block until it completes. Preallocated tasks which are canceled must be | |
516 | * freed by the caller. | |
517 | */ | |
518 | int | |
519 | taskq_cancel_id(taskq_t *tq, taskqid_t id) | |
520 | { | |
521 | taskq_ent_t *t; | |
522 | int active = 0; | |
523 | int rc = ENOENT; | |
524 | unsigned long flags; | |
525 | ||
526 | ASSERT(tq); | |
527 | ||
528 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
529 | t = taskq_find(tq, id, &active); | |
530 | if (t && !active) { | |
531 | list_del_init(&t->tqent_list); | |
532 | t->tqent_flags |= TQENT_FLAG_CANCEL; | |
533 | ||
534 | /* | |
535 | * When canceling the lowest outstanding task id we | |
536 | * must recalculate the new lowest outstanding id. | |
537 | */ | |
538 | if (tq->tq_lowest_id == t->tqent_id) { | |
539 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
540 | ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); | |
541 | } | |
542 | ||
543 | /* | |
544 | * The task_expire() function takes the tq->tq_lock so drop | |
545 | * drop the lock before synchronously cancelling the timer. | |
546 | */ | |
547 | if (timer_pending(&t->tqent_timer)) { | |
548 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
549 | del_timer_sync(&t->tqent_timer); | |
550 | spin_lock_irqsave_nested(&tq->tq_lock, flags, | |
551 | tq->tq_lock_class); | |
552 | } | |
553 | ||
554 | if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) | |
555 | task_done(tq, t); | |
556 | ||
557 | rc = 0; | |
558 | } | |
559 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
560 | ||
561 | if (active) { | |
562 | taskq_wait_id(tq, id); | |
563 | rc = EBUSY; | |
564 | } | |
565 | ||
566 | return (rc); | |
567 | } | |
568 | EXPORT_SYMBOL(taskq_cancel_id); | |
569 | ||
570 | static int taskq_thread_spawn(taskq_t *tq); | |
571 | ||
572 | taskqid_t | |
573 | taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
574 | { | |
575 | taskq_ent_t *t; | |
576 | taskqid_t rc = 0; | |
577 | unsigned long irqflags; | |
578 | ||
579 | ASSERT(tq); | |
580 | ASSERT(func); | |
581 | ||
582 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); | |
583 | ||
584 | /* Taskq being destroyed and all tasks drained */ | |
585 | if (!(tq->tq_flags & TASKQ_ACTIVE)) | |
586 | goto out; | |
587 | ||
588 | /* Do not queue the task unless there is idle thread for it */ | |
589 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
590 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
591 | goto out; | |
592 | ||
593 | if ((t = task_alloc(tq, flags, &irqflags)) == NULL) | |
594 | goto out; | |
595 | ||
596 | spin_lock(&t->tqent_lock); | |
597 | ||
598 | /* Queue to the priority list instead of the pending list */ | |
599 | if (flags & TQ_FRONT) | |
600 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
601 | else | |
602 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
603 | ||
604 | t->tqent_id = rc = tq->tq_next_id; | |
605 | tq->tq_next_id++; | |
606 | t->tqent_func = func; | |
607 | t->tqent_arg = arg; | |
608 | t->tqent_taskq = tq; | |
609 | t->tqent_timer.data = 0; | |
610 | t->tqent_timer.function = NULL; | |
611 | t->tqent_timer.expires = 0; | |
612 | ||
613 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
614 | ||
615 | spin_unlock(&t->tqent_lock); | |
616 | ||
617 | wake_up(&tq->tq_work_waitq); | |
618 | out: | |
619 | /* Spawn additional taskq threads if required. */ | |
620 | if (tq->tq_nactive == tq->tq_nthreads) | |
621 | (void) taskq_thread_spawn(tq); | |
622 | ||
623 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); | |
624 | return (rc); | |
625 | } | |
626 | EXPORT_SYMBOL(taskq_dispatch); | |
627 | ||
628 | taskqid_t | |
629 | taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, | |
630 | uint_t flags, clock_t expire_time) | |
631 | { | |
632 | taskqid_t rc = 0; | |
633 | taskq_ent_t *t; | |
634 | unsigned long irqflags; | |
635 | ||
636 | ASSERT(tq); | |
637 | ASSERT(func); | |
638 | ||
639 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); | |
640 | ||
641 | /* Taskq being destroyed and all tasks drained */ | |
642 | if (!(tq->tq_flags & TASKQ_ACTIVE)) | |
643 | goto out; | |
644 | ||
645 | if ((t = task_alloc(tq, flags, &irqflags)) == NULL) | |
646 | goto out; | |
647 | ||
648 | spin_lock(&t->tqent_lock); | |
649 | ||
650 | /* Queue to the delay list for subsequent execution */ | |
651 | list_add_tail(&t->tqent_list, &tq->tq_delay_list); | |
652 | ||
653 | t->tqent_id = rc = tq->tq_next_id; | |
654 | tq->tq_next_id++; | |
655 | t->tqent_func = func; | |
656 | t->tqent_arg = arg; | |
657 | t->tqent_taskq = tq; | |
658 | t->tqent_timer.data = (unsigned long)t; | |
659 | t->tqent_timer.function = task_expire; | |
660 | t->tqent_timer.expires = (unsigned long)expire_time; | |
661 | add_timer(&t->tqent_timer); | |
662 | ||
663 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
664 | ||
665 | spin_unlock(&t->tqent_lock); | |
666 | out: | |
667 | /* Spawn additional taskq threads if required. */ | |
668 | if (tq->tq_nactive == tq->tq_nthreads) | |
669 | (void) taskq_thread_spawn(tq); | |
670 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); | |
671 | return (rc); | |
672 | } | |
673 | EXPORT_SYMBOL(taskq_dispatch_delay); | |
674 | ||
675 | void | |
676 | taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, | |
677 | taskq_ent_t *t) | |
678 | { | |
679 | unsigned long irqflags; | |
680 | ASSERT(tq); | |
681 | ASSERT(func); | |
682 | ||
683 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, | |
684 | tq->tq_lock_class); | |
685 | ||
686 | /* Taskq being destroyed and all tasks drained */ | |
687 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { | |
688 | t->tqent_id = 0; | |
689 | goto out; | |
690 | } | |
691 | ||
692 | spin_lock(&t->tqent_lock); | |
693 | ||
694 | /* | |
695 | * Mark it as a prealloc'd task. This is important | |
696 | * to ensure that we don't free it later. | |
697 | */ | |
698 | t->tqent_flags |= TQENT_FLAG_PREALLOC; | |
699 | ||
700 | /* Queue to the priority list instead of the pending list */ | |
701 | if (flags & TQ_FRONT) | |
702 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
703 | else | |
704 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
705 | ||
706 | t->tqent_id = tq->tq_next_id; | |
707 | tq->tq_next_id++; | |
708 | t->tqent_func = func; | |
709 | t->tqent_arg = arg; | |
710 | t->tqent_taskq = tq; | |
711 | ||
712 | spin_unlock(&t->tqent_lock); | |
713 | ||
714 | wake_up(&tq->tq_work_waitq); | |
715 | out: | |
716 | /* Spawn additional taskq threads if required. */ | |
717 | if (tq->tq_nactive == tq->tq_nthreads) | |
718 | (void) taskq_thread_spawn(tq); | |
719 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); | |
720 | } | |
721 | EXPORT_SYMBOL(taskq_dispatch_ent); | |
722 | ||
723 | int | |
724 | taskq_empty_ent(taskq_ent_t *t) | |
725 | { | |
726 | return (list_empty(&t->tqent_list)); | |
727 | } | |
728 | EXPORT_SYMBOL(taskq_empty_ent); | |
729 | ||
730 | void | |
731 | taskq_init_ent(taskq_ent_t *t) | |
732 | { | |
733 | spin_lock_init(&t->tqent_lock); | |
734 | init_waitqueue_head(&t->tqent_waitq); | |
735 | init_timer(&t->tqent_timer); | |
736 | INIT_LIST_HEAD(&t->tqent_list); | |
737 | t->tqent_id = 0; | |
738 | t->tqent_func = NULL; | |
739 | t->tqent_arg = NULL; | |
740 | t->tqent_flags = 0; | |
741 | t->tqent_taskq = NULL; | |
742 | } | |
743 | EXPORT_SYMBOL(taskq_init_ent); | |
744 | ||
745 | /* | |
746 | * Return the next pending task, preference is given to tasks on the | |
747 | * priority list which were dispatched with TQ_FRONT. | |
748 | */ | |
749 | static taskq_ent_t * | |
750 | taskq_next_ent(taskq_t *tq) | |
751 | { | |
752 | struct list_head *list; | |
753 | ||
754 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
755 | ||
756 | if (!list_empty(&tq->tq_prio_list)) | |
757 | list = &tq->tq_prio_list; | |
758 | else if (!list_empty(&tq->tq_pend_list)) | |
759 | list = &tq->tq_pend_list; | |
760 | else | |
761 | return (NULL); | |
762 | ||
763 | return (list_entry(list->next, taskq_ent_t, tqent_list)); | |
764 | } | |
765 | ||
766 | /* | |
767 | * Spawns a new thread for the specified taskq. | |
768 | */ | |
769 | static void | |
770 | taskq_thread_spawn_task(void *arg) | |
771 | { | |
772 | taskq_t *tq = (taskq_t *)arg; | |
773 | unsigned long flags; | |
774 | ||
775 | (void) taskq_thread_create(tq); | |
776 | ||
777 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
778 | tq->tq_nspawn--; | |
779 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
780 | } | |
781 | ||
782 | /* | |
783 | * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current | |
784 | * number of threads is insufficient to handle the pending tasks. These | |
785 | * new threads must be created by the dedicated dynamic_taskq to avoid | |
786 | * deadlocks between thread creation and memory reclaim. The system_taskq | |
787 | * which is also a dynamic taskq cannot be safely used for this. | |
788 | */ | |
789 | static int | |
790 | taskq_thread_spawn(taskq_t *tq) | |
791 | { | |
792 | int spawning = 0; | |
793 | ||
794 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
795 | return (0); | |
796 | ||
797 | if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && | |
798 | (tq->tq_flags & TASKQ_ACTIVE)) { | |
799 | spawning = (++tq->tq_nspawn); | |
800 | taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, | |
801 | tq, TQ_NOSLEEP); | |
802 | } | |
803 | ||
804 | return (spawning); | |
805 | } | |
806 | ||
807 | /* | |
808 | * Threads in a dynamic taskq should only exit once it has been completely | |
809 | * drained and no other threads are actively servicing tasks. This prevents | |
810 | * threads from being created and destroyed more than is required. | |
811 | * | |
812 | * The first thread is the thread list is treated as the primary thread. | |
813 | * There is nothing special about the primary thread but in order to avoid | |
814 | * all the taskq pids from changing we opt to make it long running. | |
815 | */ | |
816 | static int | |
817 | taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) | |
818 | { | |
819 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
820 | ||
821 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
822 | return (0); | |
823 | ||
824 | if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, | |
825 | tqt_thread_list) == tqt) | |
826 | return (0); | |
827 | ||
828 | return | |
829 | ((tq->tq_nspawn == 0) && /* No threads are being spawned */ | |
830 | (tq->tq_nactive == 0) && /* No threads are handling tasks */ | |
831 | (tq->tq_nthreads > 1) && /* More than 1 thread is running */ | |
832 | (!taskq_next_ent(tq)) && /* There are no pending tasks */ | |
833 | (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ | |
834 | } | |
835 | ||
836 | static int | |
837 | taskq_thread(void *args) | |
838 | { | |
839 | DECLARE_WAITQUEUE(wait, current); | |
840 | sigset_t blocked; | |
841 | taskq_thread_t *tqt = args; | |
842 | taskq_t *tq; | |
843 | taskq_ent_t *t; | |
844 | int seq_tasks = 0; | |
845 | unsigned long flags; | |
846 | ||
847 | ASSERT(tqt); | |
848 | ASSERT(tqt->tqt_tq); | |
849 | tq = tqt->tqt_tq; | |
850 | current->flags |= PF_NOFREEZE; | |
851 | ||
852 | (void) spl_fstrans_mark(); | |
853 | ||
854 | sigfillset(&blocked); | |
855 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
856 | flush_signals(current); | |
857 | ||
858 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
859 | ||
860 | /* Immediately exit if more threads than allowed were created. */ | |
861 | if (tq->tq_nthreads >= tq->tq_maxthreads) | |
862 | goto error; | |
863 | ||
864 | tq->tq_nthreads++; | |
865 | list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); | |
866 | wake_up(&tq->tq_wait_waitq); | |
867 | set_current_state(TASK_INTERRUPTIBLE); | |
868 | ||
869 | while (!kthread_should_stop()) { | |
870 | ||
871 | if (list_empty(&tq->tq_pend_list) && | |
872 | list_empty(&tq->tq_prio_list)) { | |
873 | ||
874 | if (taskq_thread_should_stop(tq, tqt)) { | |
875 | wake_up_all(&tq->tq_wait_waitq); | |
876 | break; | |
877 | } | |
878 | ||
879 | add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); | |
880 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
881 | ||
882 | schedule(); | |
883 | seq_tasks = 0; | |
884 | ||
885 | spin_lock_irqsave_nested(&tq->tq_lock, flags, | |
886 | tq->tq_lock_class); | |
887 | remove_wait_queue(&tq->tq_work_waitq, &wait); | |
888 | } else { | |
889 | __set_current_state(TASK_RUNNING); | |
890 | } | |
891 | ||
892 | if ((t = taskq_next_ent(tq)) != NULL) { | |
893 | list_del_init(&t->tqent_list); | |
894 | ||
895 | /* | |
896 | * In order to support recursively dispatching a | |
897 | * preallocated taskq_ent_t, tqent_id must be | |
898 | * stored prior to executing tqent_func. | |
899 | */ | |
900 | tqt->tqt_id = t->tqent_id; | |
901 | tqt->tqt_task = t; | |
902 | ||
903 | /* | |
904 | * We must store a copy of the flags prior to | |
905 | * servicing the task (servicing a prealloc'd task | |
906 | * returns the ownership of the tqent back to | |
907 | * the caller of taskq_dispatch). Thus, | |
908 | * tqent_flags _may_ change within the call. | |
909 | */ | |
910 | tqt->tqt_flags = t->tqent_flags; | |
911 | ||
912 | taskq_insert_in_order(tq, tqt); | |
913 | tq->tq_nactive++; | |
914 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
915 | ||
916 | /* Perform the requested task */ | |
917 | t->tqent_func(t->tqent_arg); | |
918 | ||
919 | spin_lock_irqsave_nested(&tq->tq_lock, flags, | |
920 | tq->tq_lock_class); | |
921 | tq->tq_nactive--; | |
922 | list_del_init(&tqt->tqt_active_list); | |
923 | tqt->tqt_task = NULL; | |
924 | ||
925 | /* For prealloc'd tasks, we don't free anything. */ | |
926 | if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) | |
927 | task_done(tq, t); | |
928 | ||
929 | /* | |
930 | * When the current lowest outstanding taskqid is | |
931 | * done calculate the new lowest outstanding id | |
932 | */ | |
933 | if (tq->tq_lowest_id == tqt->tqt_id) { | |
934 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
935 | ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); | |
936 | } | |
937 | ||
938 | /* Spawn additional taskq threads if required. */ | |
939 | if ((++seq_tasks) > spl_taskq_thread_sequential && | |
940 | taskq_thread_spawn(tq)) | |
941 | seq_tasks = 0; | |
942 | ||
943 | tqt->tqt_id = 0; | |
944 | tqt->tqt_flags = 0; | |
945 | wake_up_all(&tq->tq_wait_waitq); | |
946 | } else { | |
947 | if (taskq_thread_should_stop(tq, tqt)) | |
948 | break; | |
949 | } | |
950 | ||
951 | set_current_state(TASK_INTERRUPTIBLE); | |
952 | ||
953 | } | |
954 | ||
955 | __set_current_state(TASK_RUNNING); | |
956 | tq->tq_nthreads--; | |
957 | list_del_init(&tqt->tqt_thread_list); | |
958 | error: | |
959 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
960 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
961 | ||
962 | return (0); | |
963 | } | |
964 | ||
965 | static taskq_thread_t * | |
966 | taskq_thread_create(taskq_t *tq) | |
967 | { | |
968 | static int last_used_cpu = 0; | |
969 | taskq_thread_t *tqt; | |
970 | ||
971 | tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); | |
972 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
973 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
974 | tqt->tqt_tq = tq; | |
975 | tqt->tqt_id = 0; | |
976 | ||
977 | tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, | |
978 | "%s", tq->tq_name); | |
979 | if (tqt->tqt_thread == NULL) { | |
980 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
981 | return (NULL); | |
982 | } | |
983 | ||
984 | if (spl_taskq_thread_bind) { | |
985 | last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); | |
986 | kthread_bind(tqt->tqt_thread, last_used_cpu); | |
987 | } | |
988 | ||
989 | if (spl_taskq_thread_priority) | |
990 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); | |
991 | ||
992 | wake_up_process(tqt->tqt_thread); | |
993 | ||
994 | return (tqt); | |
995 | } | |
996 | ||
997 | taskq_t * | |
998 | taskq_create(const char *name, int nthreads, pri_t pri, | |
999 | int minalloc, int maxalloc, uint_t flags) | |
1000 | { | |
1001 | taskq_t *tq; | |
1002 | taskq_thread_t *tqt; | |
1003 | int count = 0, rc = 0, i; | |
1004 | unsigned long irqflags; | |
1005 | ||
1006 | ASSERT(name != NULL); | |
1007 | ASSERT(minalloc >= 0); | |
1008 | ASSERT(maxalloc <= INT_MAX); | |
1009 | ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ | |
1010 | ||
1011 | /* Scale the number of threads using nthreads as a percentage */ | |
1012 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
1013 | ASSERT(nthreads <= 100); | |
1014 | ASSERT(nthreads >= 0); | |
1015 | nthreads = MIN(nthreads, 100); | |
1016 | nthreads = MAX(nthreads, 0); | |
1017 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
1018 | } | |
1019 | ||
1020 | tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); | |
1021 | if (tq == NULL) | |
1022 | return (NULL); | |
1023 | ||
1024 | spin_lock_init(&tq->tq_lock); | |
1025 | INIT_LIST_HEAD(&tq->tq_thread_list); | |
1026 | INIT_LIST_HEAD(&tq->tq_active_list); | |
1027 | tq->tq_name = strdup(name); | |
1028 | tq->tq_nactive = 0; | |
1029 | tq->tq_nthreads = 0; | |
1030 | tq->tq_nspawn = 0; | |
1031 | tq->tq_maxthreads = nthreads; | |
1032 | tq->tq_pri = pri; | |
1033 | tq->tq_minalloc = minalloc; | |
1034 | tq->tq_maxalloc = maxalloc; | |
1035 | tq->tq_nalloc = 0; | |
1036 | tq->tq_flags = (flags | TASKQ_ACTIVE); | |
1037 | tq->tq_next_id = 1; | |
1038 | tq->tq_lowest_id = 1; | |
1039 | INIT_LIST_HEAD(&tq->tq_free_list); | |
1040 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
1041 | INIT_LIST_HEAD(&tq->tq_prio_list); | |
1042 | INIT_LIST_HEAD(&tq->tq_delay_list); | |
1043 | init_waitqueue_head(&tq->tq_work_waitq); | |
1044 | init_waitqueue_head(&tq->tq_wait_waitq); | |
1045 | tq->tq_lock_class = TQ_LOCK_GENERAL; | |
1046 | INIT_LIST_HEAD(&tq->tq_taskqs); | |
1047 | ||
1048 | if (flags & TASKQ_PREPOPULATE) { | |
1049 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, | |
1050 | tq->tq_lock_class); | |
1051 | ||
1052 | for (i = 0; i < minalloc; i++) | |
1053 | task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, | |
1054 | &irqflags)); | |
1055 | ||
1056 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); | |
1057 | } | |
1058 | ||
1059 | if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) | |
1060 | nthreads = 1; | |
1061 | ||
1062 | for (i = 0; i < nthreads; i++) { | |
1063 | tqt = taskq_thread_create(tq); | |
1064 | if (tqt == NULL) | |
1065 | rc = 1; | |
1066 | else | |
1067 | count++; | |
1068 | } | |
1069 | ||
1070 | /* Wait for all threads to be started before potential destroy */ | |
1071 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); | |
1072 | ||
1073 | if (rc) { | |
1074 | taskq_destroy(tq); | |
1075 | tq = NULL; | |
1076 | } else { | |
1077 | down_write(&tq_list_sem); | |
1078 | tq->tq_instance = taskq_find_by_name(name) + 1; | |
1079 | list_add_tail(&tq->tq_taskqs, &tq_list); | |
1080 | up_write(&tq_list_sem); | |
1081 | } | |
1082 | ||
1083 | return (tq); | |
1084 | } | |
1085 | EXPORT_SYMBOL(taskq_create); | |
1086 | ||
1087 | void | |
1088 | taskq_destroy(taskq_t *tq) | |
1089 | { | |
1090 | struct task_struct *thread; | |
1091 | taskq_thread_t *tqt; | |
1092 | taskq_ent_t *t; | |
1093 | unsigned long flags; | |
1094 | ||
1095 | ASSERT(tq); | |
1096 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
1097 | tq->tq_flags &= ~TASKQ_ACTIVE; | |
1098 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1099 | ||
1100 | /* | |
1101 | * When TASKQ_ACTIVE is clear new tasks may not be added nor may | |
1102 | * new worker threads be spawned for dynamic taskq. | |
1103 | */ | |
1104 | if (dynamic_taskq != NULL) | |
1105 | taskq_wait_outstanding(dynamic_taskq, 0); | |
1106 | ||
1107 | taskq_wait(tq); | |
1108 | ||
1109 | /* remove taskq from global list used by the kstats */ | |
1110 | down_write(&tq_list_sem); | |
1111 | list_del(&tq->tq_taskqs); | |
1112 | up_write(&tq_list_sem); | |
1113 | ||
1114 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
1115 | ||
1116 | /* | |
1117 | * Signal each thread to exit and block until it does. Each thread | |
1118 | * is responsible for removing itself from the list and freeing its | |
1119 | * taskq_thread_t. This allows for idle threads to opt to remove | |
1120 | * themselves from the taskq. They can be recreated as needed. | |
1121 | */ | |
1122 | while (!list_empty(&tq->tq_thread_list)) { | |
1123 | tqt = list_entry(tq->tq_thread_list.next, | |
1124 | taskq_thread_t, tqt_thread_list); | |
1125 | thread = tqt->tqt_thread; | |
1126 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1127 | ||
1128 | kthread_stop(thread); | |
1129 | ||
1130 | spin_lock_irqsave_nested(&tq->tq_lock, flags, | |
1131 | tq->tq_lock_class); | |
1132 | } | |
1133 | ||
1134 | while (!list_empty(&tq->tq_free_list)) { | |
1135 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
1136 | ||
1137 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
1138 | ||
1139 | list_del_init(&t->tqent_list); | |
1140 | task_free(tq, t); | |
1141 | } | |
1142 | ||
1143 | ASSERT0(tq->tq_nthreads); | |
1144 | ASSERT0(tq->tq_nalloc); | |
1145 | ASSERT0(tq->tq_nspawn); | |
1146 | ASSERT(list_empty(&tq->tq_thread_list)); | |
1147 | ASSERT(list_empty(&tq->tq_active_list)); | |
1148 | ASSERT(list_empty(&tq->tq_free_list)); | |
1149 | ASSERT(list_empty(&tq->tq_pend_list)); | |
1150 | ASSERT(list_empty(&tq->tq_prio_list)); | |
1151 | ASSERT(list_empty(&tq->tq_delay_list)); | |
1152 | ||
1153 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1154 | ||
1155 | strfree(tq->tq_name); | |
1156 | kmem_free(tq, sizeof (taskq_t)); | |
1157 | } | |
1158 | EXPORT_SYMBOL(taskq_destroy); | |
1159 | ||
1160 | int | |
1161 | spl_taskq_init(void) | |
1162 | { | |
1163 | system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), | |
1164 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); | |
1165 | if (system_taskq == NULL) | |
1166 | return (1); | |
1167 | ||
1168 | dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, | |
1169 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); | |
1170 | if (dynamic_taskq == NULL) { | |
1171 | taskq_destroy(system_taskq); | |
1172 | return (1); | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * This is used to annotate tq_lock, so | |
1177 | * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch | |
1178 | * does not trigger a lockdep warning re: possible recursive locking | |
1179 | */ | |
1180 | dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; | |
1181 | ||
1182 | return (0); | |
1183 | } | |
1184 | ||
1185 | void | |
1186 | spl_taskq_fini(void) | |
1187 | { | |
1188 | taskq_destroy(dynamic_taskq); | |
1189 | dynamic_taskq = NULL; | |
1190 | ||
1191 | taskq_destroy(system_taskq); | |
1192 | system_taskq = NULL; | |
1193 | } |