]>
Commit | Line | Data |
---|---|---|
2c4332cf | 1 | /* |
716154c5 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 BB |
6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
715f6251 | 9 | * |
716154c5 BB |
10 | * The SPL is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | * | |
15 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 BB |
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
18 | * for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License along | |
716154c5 | 21 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
2c4332cf | 22 | * |
716154c5 | 23 | * Solaris Porting Layer (SPL) Task Queue Implementation. |
2c4332cf | 24 | */ |
715f6251 | 25 | |
8b8b44d0 | 26 | #include <sys/timer.h> |
f4b37741 | 27 | #include <sys/taskq.h> |
3d061e9d | 28 | #include <sys/kmem.h> |
16522ac2 | 29 | #include <sys/tsd.h> |
ae38e009 | 30 | #include <sys/trace_spl.h> |
60a4c7d2 PD |
31 | #ifdef HAVE_CPU_HOTPLUG |
32 | #include <linux/cpuhotplug.h> | |
33 | #endif | |
937879f1 | 34 | |
18168da7 | 35 | static int spl_taskq_thread_bind = 0; |
703371d8 AV |
36 | module_param(spl_taskq_thread_bind, int, 0644); |
37 | MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); | |
38 | ||
f7a973d9 | 39 | |
18168da7 | 40 | static int spl_taskq_thread_dynamic = 1; |
60a4c7d2 | 41 | module_param(spl_taskq_thread_dynamic, int, 0444); |
f7a973d9 BB |
42 | MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); |
43 | ||
18168da7 | 44 | static int spl_taskq_thread_priority = 1; |
62aa81a5 BB |
45 | module_param(spl_taskq_thread_priority, int, 0644); |
46 | MODULE_PARM_DESC(spl_taskq_thread_priority, | |
2c4332cf | 47 | "Allow non-default priority for taskq threads"); |
62aa81a5 | 48 | |
fdc2d303 RY |
49 | static uint_t spl_taskq_thread_sequential = 4; |
50 | /* BEGIN CSTYLED */ | |
51 | module_param(spl_taskq_thread_sequential, uint, 0644); | |
52 | /* END CSTYLED */ | |
f7a973d9 | 53 | MODULE_PARM_DESC(spl_taskq_thread_sequential, |
2c4332cf | 54 | "Create new taskq threads after N sequential tasks"); |
f7a973d9 | 55 | |
f9e39f98 PD |
56 | /* |
57 | * Global system-wide dynamic task queue available for all consumers. This | |
58 | * taskq is not intended for long-running tasks; instead, a dedicated taskq | |
59 | * should be created. | |
60 | */ | |
e9cb2b4f BB |
61 | taskq_t *system_taskq; |
62 | EXPORT_SYMBOL(system_taskq); | |
f200b836 CC |
63 | /* Global dynamic task queue for long delay */ |
64 | taskq_t *system_delay_taskq; | |
65 | EXPORT_SYMBOL(system_delay_taskq); | |
e9cb2b4f | 66 | |
f7a973d9 BB |
67 | /* Private dedicated taskq for creating new taskq threads on demand. */ |
68 | static taskq_t *dynamic_taskq; | |
69 | static taskq_thread_t *taskq_thread_create(taskq_t *); | |
70 | ||
60a4c7d2 PD |
71 | #ifdef HAVE_CPU_HOTPLUG |
72 | /* Multi-callback id for cpu hotplugging. */ | |
73 | static int spl_taskq_cpuhp_state; | |
74 | #endif | |
75 | ||
200366f2 TC |
76 | /* List of all taskqs */ |
77 | LIST_HEAD(tq_list); | |
93ce2b4c | 78 | struct rw_semaphore tq_list_sem; |
16522ac2 | 79 | static uint_t taskq_tsd; |
200366f2 | 80 | |
9b51f218 BB |
81 | static int |
82 | task_km_flags(uint_t flags) | |
83 | { | |
84 | if (flags & TQ_NOSLEEP) | |
2c4332cf | 85 | return (KM_NOSLEEP); |
9b51f218 BB |
86 | |
87 | if (flags & TQ_PUSHPAGE) | |
2c4332cf | 88 | return (KM_PUSHPAGE); |
9b51f218 | 89 | |
2c4332cf | 90 | return (KM_SLEEP); |
9b51f218 BB |
91 | } |
92 | ||
200366f2 TC |
93 | /* |
94 | * taskq_find_by_name - Find the largest instance number of a named taskq. | |
95 | */ | |
96 | static int | |
97 | taskq_find_by_name(const char *name) | |
98 | { | |
7cf1fe63 | 99 | struct list_head *tql = NULL; |
200366f2 TC |
100 | taskq_t *tq; |
101 | ||
102 | list_for_each_prev(tql, &tq_list) { | |
103 | tq = list_entry(tql, taskq_t, tq_taskqs); | |
104 | if (strcmp(name, tq->tq_name) == 0) | |
5461eefe | 105 | return (tq->tq_instance); |
200366f2 TC |
106 | } |
107 | return (-1); | |
108 | } | |
109 | ||
82387586 BB |
110 | /* |
111 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
bcd68186 | 112 | * is not attached to the free, work, or pending taskq lists. |
f1ca4da6 | 113 | */ |
046a70c9 | 114 | static taskq_ent_t * |
066b89e6 | 115 | task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) |
bcd68186 | 116 | { |
472a34ca BB |
117 | taskq_ent_t *t; |
118 | int count = 0; | |
bcd68186 | 119 | |
472a34ca | 120 | ASSERT(tq); |
bcd68186 | 121 | retry: |
472a34ca BB |
122 | /* Acquire taskq_ent_t's from free list if available */ |
123 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | |
124 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
125 | ||
126 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
d9acd930 BB |
127 | ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); |
128 | ASSERT(!timer_pending(&t->tqent_timer)); | |
472a34ca BB |
129 | |
130 | list_del_init(&t->tqent_list); | |
8d9a23e8 | 131 | return (t); |
472a34ca BB |
132 | } |
133 | ||
134 | /* Free list is empty and memory allocations are prohibited */ | |
135 | if (flags & TQ_NOALLOC) | |
8d9a23e8 | 136 | return (NULL); |
472a34ca BB |
137 | |
138 | /* Hit maximum taskq_ent_t pool size */ | |
139 | if (tq->tq_nalloc >= tq->tq_maxalloc) { | |
140 | if (flags & TQ_NOSLEEP) | |
8d9a23e8 | 141 | return (NULL); |
472a34ca BB |
142 | |
143 | /* | |
144 | * Sleep periodically polling the free list for an available | |
145 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | |
146 | * but we cannot block forever waiting for an taskq_ent_t to | |
147 | * show up in the free list, otherwise a deadlock can happen. | |
148 | * | |
149 | * Therefore, we need to allocate a new task even if the number | |
150 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
151 | * end up delaying the task allocation by one second, thereby | |
152 | * throttling the task dispatch rate. | |
153 | */ | |
066b89e6 | 154 | spin_unlock_irqrestore(&tq->tq_lock, *irqflags); |
472a34ca | 155 | schedule_timeout(HZ / 100); |
066b89e6 | 156 | spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, |
326172d8 | 157 | tq->tq_lock_class); |
8d9a23e8 BB |
158 | if (count < 100) { |
159 | count++; | |
160 | goto retry; | |
161 | } | |
472a34ca BB |
162 | } |
163 | ||
066b89e6 | 164 | spin_unlock_irqrestore(&tq->tq_lock, *irqflags); |
2c4332cf | 165 | t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); |
066b89e6 | 166 | spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); |
472a34ca BB |
167 | |
168 | if (t) { | |
169 | taskq_init_ent(t); | |
170 | tq->tq_nalloc++; | |
171 | } | |
172 | ||
8d9a23e8 | 173 | return (t); |
bcd68186 BB |
174 | } |
175 | ||
82387586 | 176 | /* |
046a70c9 | 177 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t |
bcd68186 BB |
178 | * to already be removed from the free, work, or pending taskq lists. |
179 | */ | |
180 | static void | |
046a70c9 | 181 | task_free(taskq_t *tq, taskq_ent_t *t) |
bcd68186 | 182 | { |
472a34ca BB |
183 | ASSERT(tq); |
184 | ASSERT(t); | |
046a70c9 | 185 | ASSERT(list_empty(&t->tqent_list)); |
d9acd930 | 186 | ASSERT(!timer_pending(&t->tqent_timer)); |
bcd68186 | 187 | |
2c4332cf | 188 | kmem_free(t, sizeof (taskq_ent_t)); |
472a34ca | 189 | tq->tq_nalloc--; |
bcd68186 BB |
190 | } |
191 | ||
82387586 BB |
192 | /* |
193 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
046a70c9 | 194 | * taskq_ent_t if too many exist or moves it to the free list for later use. |
bcd68186 | 195 | */ |
f1ca4da6 | 196 | static void |
046a70c9 | 197 | task_done(taskq_t *tq, taskq_ent_t *t) |
f1ca4da6 | 198 | { |
bcd68186 BB |
199 | ASSERT(tq); |
200 | ASSERT(t); | |
bcd68186 | 201 | |
d9acd930 BB |
202 | /* Wake tasks blocked in taskq_wait_id() */ |
203 | wake_up_all(&t->tqent_waitq); | |
204 | ||
046a70c9 | 205 | list_del_init(&t->tqent_list); |
f1ca4da6 | 206 | |
472a34ca | 207 | if (tq->tq_nalloc <= tq->tq_minalloc) { |
cbba7146 | 208 | t->tqent_id = TASKQID_INVALID; |
046a70c9 PS |
209 | t->tqent_func = NULL; |
210 | t->tqent_arg = NULL; | |
44217f7a | 211 | t->tqent_flags = 0; |
8f2503e0 | 212 | |
472a34ca | 213 | list_add_tail(&t->tqent_list, &tq->tq_free_list); |
bcd68186 BB |
214 | } else { |
215 | task_free(tq, t); | |
216 | } | |
f1ca4da6 BB |
217 | } |
218 | ||
82387586 | 219 | /* |
d9acd930 BB |
220 | * When a delayed task timer expires remove it from the delay list and |
221 | * add it to the priority list in order for immediate processing. | |
bcd68186 | 222 | */ |
d9acd930 | 223 | static void |
c9821f1c | 224 | task_expire_impl(taskq_ent_t *t) |
bcd68186 | 225 | { |
c9821f1c | 226 | taskq_ent_t *w; |
d9acd930 | 227 | taskq_t *tq = t->tqent_taskq; |
7cf1fe63 | 228 | struct list_head *l = NULL; |
066b89e6 | 229 | unsigned long flags; |
7257ec41 | 230 | |
066b89e6 | 231 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
d9acd930 BB |
232 | |
233 | if (t->tqent_flags & TQENT_FLAG_CANCEL) { | |
234 | ASSERT(list_empty(&t->tqent_list)); | |
066b89e6 | 235 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 BB |
236 | return; |
237 | } | |
238 | ||
8f3b403a | 239 | t->tqent_birth = jiffies; |
ae38e009 PS |
240 | DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); |
241 | ||
d9acd930 BB |
242 | /* |
243 | * The priority list must be maintained in strict task id order | |
244 | * from lowest to highest for lowest_id to be easily calculable. | |
245 | */ | |
246 | list_del(&t->tqent_list); | |
247 | list_for_each_prev(l, &tq->tq_prio_list) { | |
248 | w = list_entry(l, taskq_ent_t, tqent_list); | |
249 | if (w->tqent_id < t->tqent_id) { | |
250 | list_add(&t->tqent_list, l); | |
251 | break; | |
252 | } | |
253 | } | |
254 | if (l == &tq->tq_prio_list) | |
255 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
256 | ||
066b89e6 | 257 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
7257ec41 | 258 | |
d9acd930 BB |
259 | wake_up(&tq->tq_work_waitq); |
260 | } | |
261 | ||
c9821f1c | 262 | static void |
8b8b44d0 | 263 | task_expire(spl_timer_list_t tl) |
c9821f1c | 264 | { |
8b8b44d0 RK |
265 | struct timer_list *tmr = (struct timer_list *)tl; |
266 | taskq_ent_t *t = from_timer(t, tmr, tqent_timer); | |
c9821f1c TH |
267 | task_expire_impl(t); |
268 | } | |
c9821f1c | 269 | |
d9acd930 BB |
270 | /* |
271 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
272 | * be queued on the pending list, on the priority list, on the | |
273 | * delay list, or on the work list currently being handled, but | |
274 | * it is not 100% complete yet. | |
275 | */ | |
276 | static taskqid_t | |
277 | taskq_lowest_id(taskq_t *tq) | |
278 | { | |
279 | taskqid_t lowest_id = tq->tq_next_id; | |
280 | taskq_ent_t *t; | |
281 | taskq_thread_t *tqt; | |
d9acd930 | 282 | |
d9acd930 BB |
283 | if (!list_empty(&tq->tq_pend_list)) { |
284 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | |
285 | lowest_id = MIN(lowest_id, t->tqent_id); | |
286 | } | |
287 | ||
288 | if (!list_empty(&tq->tq_prio_list)) { | |
289 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | |
290 | lowest_id = MIN(lowest_id, t->tqent_id); | |
291 | } | |
292 | ||
293 | if (!list_empty(&tq->tq_delay_list)) { | |
294 | t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); | |
295 | lowest_id = MIN(lowest_id, t->tqent_id); | |
296 | } | |
297 | ||
298 | if (!list_empty(&tq->tq_active_list)) { | |
299 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
300 | tqt_active_list); | |
cbba7146 | 301 | ASSERT(tqt->tqt_id != TASKQID_INVALID); |
d9acd930 BB |
302 | lowest_id = MIN(lowest_id, tqt->tqt_id); |
303 | } | |
304 | ||
8d9a23e8 | 305 | return (lowest_id); |
d9acd930 BB |
306 | } |
307 | ||
308 | /* | |
309 | * Insert a task into a list keeping the list sorted by increasing taskqid. | |
310 | */ | |
311 | static void | |
312 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | |
313 | { | |
314 | taskq_thread_t *w; | |
7cf1fe63 | 315 | struct list_head *l = NULL; |
d9acd930 | 316 | |
d9acd930 BB |
317 | ASSERT(tq); |
318 | ASSERT(tqt); | |
d9acd930 BB |
319 | |
320 | list_for_each_prev(l, &tq->tq_active_list) { | |
321 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
322 | if (w->tqt_id < tqt->tqt_id) { | |
323 | list_add(&tqt->tqt_active_list, l); | |
324 | break; | |
325 | } | |
326 | } | |
327 | if (l == &tq->tq_active_list) | |
328 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
d9acd930 BB |
329 | } |
330 | ||
331 | /* | |
332 | * Find and return a task from the given list if it exists. The list | |
333 | * must be in lowest to highest task id order. | |
334 | */ | |
335 | static taskq_ent_t * | |
336 | taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) | |
337 | { | |
7cf1fe63 | 338 | struct list_head *l = NULL; |
d9acd930 | 339 | taskq_ent_t *t; |
d9acd930 | 340 | |
d9acd930 BB |
341 | list_for_each(l, lh) { |
342 | t = list_entry(l, taskq_ent_t, tqent_list); | |
343 | ||
344 | if (t->tqent_id == id) | |
8d9a23e8 | 345 | return (t); |
d9acd930 BB |
346 | |
347 | if (t->tqent_id > id) | |
348 | break; | |
349 | } | |
350 | ||
8d9a23e8 | 351 | return (NULL); |
bcd68186 BB |
352 | } |
353 | ||
d9acd930 BB |
354 | /* |
355 | * Find an already dispatched task given the task id regardless of what | |
cce83ba0 CC |
356 | * state it is in. If a task is still pending it will be returned. |
357 | * If a task is executing, then -EBUSY will be returned instead. | |
358 | * If the task has already been run then NULL is returned. | |
d9acd930 BB |
359 | */ |
360 | static taskq_ent_t * | |
cce83ba0 | 361 | taskq_find(taskq_t *tq, taskqid_t id) |
d9acd930 BB |
362 | { |
363 | taskq_thread_t *tqt; | |
7cf1fe63 | 364 | struct list_head *l = NULL; |
d9acd930 | 365 | taskq_ent_t *t; |
d9acd930 | 366 | |
d9acd930 BB |
367 | t = taskq_find_list(tq, &tq->tq_delay_list, id); |
368 | if (t) | |
8d9a23e8 | 369 | return (t); |
d9acd930 BB |
370 | |
371 | t = taskq_find_list(tq, &tq->tq_prio_list, id); | |
372 | if (t) | |
8d9a23e8 | 373 | return (t); |
d9acd930 BB |
374 | |
375 | t = taskq_find_list(tq, &tq->tq_pend_list, id); | |
376 | if (t) | |
8d9a23e8 | 377 | return (t); |
d9acd930 BB |
378 | |
379 | list_for_each(l, &tq->tq_active_list) { | |
380 | tqt = list_entry(l, taskq_thread_t, tqt_active_list); | |
381 | if (tqt->tqt_id == id) { | |
cce83ba0 CC |
382 | /* |
383 | * Instead of returning tqt_task, we just return a non | |
384 | * NULL value to prevent misuse, since tqt_task only | |
385 | * has two valid fields. | |
386 | */ | |
387 | return (ERR_PTR(-EBUSY)); | |
d9acd930 BB |
388 | } |
389 | } | |
390 | ||
8d9a23e8 | 391 | return (NULL); |
d9acd930 BB |
392 | } |
393 | ||
a876b030 CD |
394 | /* |
395 | * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and | |
396 | * taskq_wait() functions below. | |
397 | * | |
398 | * Taskq waiting is accomplished by tracking the lowest outstanding task | |
399 | * id and the next available task id. As tasks are dispatched they are | |
400 | * added to the tail of the pending, priority, or delay lists. As worker | |
401 | * threads become available the tasks are removed from the heads of these | |
402 | * lists and linked to the worker threads. This ensures the lists are | |
403 | * kept sorted by lowest to highest task id. | |
404 | * | |
405 | * Therefore the lowest outstanding task id can be quickly determined by | |
406 | * checking the head item from all of these lists. This value is stored | |
407 | * with the taskq as the lowest id. It only needs to be recalculated when | |
408 | * either the task with the current lowest id completes or is canceled. | |
409 | * | |
410 | * By blocking until the lowest task id exceeds the passed task id the | |
411 | * taskq_wait_outstanding() function can be easily implemented. Similarly, | |
412 | * by blocking until the lowest task id matches the next task id taskq_wait() | |
413 | * can be implemented. | |
414 | * | |
415 | * Callers should be aware that when there are multiple worked threads it | |
416 | * is possible for larger task ids to complete before smaller ones. Also | |
417 | * when the taskq contains delay tasks with small task ids callers may | |
418 | * block for a considerable length of time waiting for them to expire and | |
419 | * execute. | |
420 | */ | |
99c452bb BB |
421 | static int |
422 | taskq_wait_id_check(taskq_t *tq, taskqid_t id) | |
f1ca4da6 | 423 | { |
99c452bb | 424 | int rc; |
066b89e6 | 425 | unsigned long flags; |
bcd68186 | 426 | |
066b89e6 | 427 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
cce83ba0 | 428 | rc = (taskq_find(tq, id) == NULL); |
066b89e6 | 429 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 430 | |
99c452bb BB |
431 | return (rc); |
432 | } | |
bcd68186 | 433 | |
99c452bb BB |
434 | /* |
435 | * The taskq_wait_id() function blocks until the passed task id completes. | |
436 | * This does not guarantee that all lower task ids have completed. | |
437 | */ | |
438 | void | |
439 | taskq_wait_id(taskq_t *tq, taskqid_t id) | |
440 | { | |
441 | wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); | |
bcd68186 | 442 | } |
aed8671c | 443 | EXPORT_SYMBOL(taskq_wait_id); |
bcd68186 | 444 | |
d9acd930 | 445 | static int |
a876b030 | 446 | taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) |
d9acd930 BB |
447 | { |
448 | int rc; | |
066b89e6 | 449 | unsigned long flags; |
d9acd930 | 450 | |
066b89e6 | 451 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
d9acd930 | 452 | rc = (id < tq->tq_lowest_id); |
066b89e6 | 453 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 454 | |
8d9a23e8 | 455 | return (rc); |
d9acd930 BB |
456 | } |
457 | ||
a876b030 CD |
458 | /* |
459 | * The taskq_wait_outstanding() function will block until all tasks with a | |
460 | * lower taskqid than the passed 'id' have been completed. Note that all | |
461 | * task id's are assigned monotonically at dispatch time. Zero may be | |
462 | * passed for the id to indicate all tasks dispatch up to this point, | |
463 | * but not after, should be waited for. | |
464 | */ | |
d9acd930 | 465 | void |
a876b030 | 466 | taskq_wait_outstanding(taskq_t *tq, taskqid_t id) |
d9acd930 | 467 | { |
b3a22a0a CC |
468 | id = id ? id : tq->tq_next_id - 1; |
469 | wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); | |
d9acd930 | 470 | } |
a876b030 | 471 | EXPORT_SYMBOL(taskq_wait_outstanding); |
d9acd930 | 472 | |
a876b030 CD |
473 | static int |
474 | taskq_wait_check(taskq_t *tq) | |
bcd68186 | 475 | { |
a876b030 | 476 | int rc; |
066b89e6 | 477 | unsigned long flags; |
bcd68186 | 478 | |
066b89e6 | 479 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
a876b030 | 480 | rc = (tq->tq_lowest_id == tq->tq_next_id); |
066b89e6 | 481 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 482 | |
a876b030 CD |
483 | return (rc); |
484 | } | |
485 | ||
486 | /* | |
487 | * The taskq_wait() function will block until the taskq is empty. | |
488 | * This means that if a taskq re-dispatches work to itself taskq_wait() | |
489 | * callers will block indefinitely. | |
490 | */ | |
491 | void | |
492 | taskq_wait(taskq_t *tq) | |
493 | { | |
494 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); | |
bcd68186 | 495 | } |
aed8671c | 496 | EXPORT_SYMBOL(taskq_wait); |
bcd68186 | 497 | |
c5a8b1e1 | 498 | int |
16522ac2 | 499 | taskq_member(taskq_t *tq, kthread_t *t) |
c5a8b1e1 | 500 | { |
16522ac2 | 501 | return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); |
c5a8b1e1 BB |
502 | } |
503 | EXPORT_SYMBOL(taskq_member); | |
504 | ||
b3212d2f MA |
505 | taskq_t * |
506 | taskq_of_curthread(void) | |
507 | { | |
508 | return (tsd_get(taskq_tsd)); | |
509 | } | |
510 | EXPORT_SYMBOL(taskq_of_curthread); | |
511 | ||
d9acd930 BB |
512 | /* |
513 | * Cancel an already dispatched task given the task id. Still pending tasks | |
514 | * will be immediately canceled, and if the task is active the function will | |
515 | * block until it completes. Preallocated tasks which are canceled must be | |
516 | * freed by the caller. | |
517 | */ | |
518 | int | |
519 | taskq_cancel_id(taskq_t *tq, taskqid_t id) | |
520 | { | |
521 | taskq_ent_t *t; | |
d9acd930 | 522 | int rc = ENOENT; |
066b89e6 | 523 | unsigned long flags; |
d9acd930 BB |
524 | |
525 | ASSERT(tq); | |
526 | ||
066b89e6 | 527 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
cce83ba0 CC |
528 | t = taskq_find(tq, id); |
529 | if (t && t != ERR_PTR(-EBUSY)) { | |
d9acd930 BB |
530 | list_del_init(&t->tqent_list); |
531 | t->tqent_flags |= TQENT_FLAG_CANCEL; | |
532 | ||
533 | /* | |
534 | * When canceling the lowest outstanding task id we | |
535 | * must recalculate the new lowest outstanding id. | |
536 | */ | |
537 | if (tq->tq_lowest_id == t->tqent_id) { | |
538 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
539 | ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); | |
540 | } | |
541 | ||
542 | /* | |
543 | * The task_expire() function takes the tq->tq_lock so drop | |
544 | * drop the lock before synchronously cancelling the timer. | |
545 | */ | |
546 | if (timer_pending(&t->tqent_timer)) { | |
066b89e6 | 547 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 548 | del_timer_sync(&t->tqent_timer); |
066b89e6 CC |
549 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
550 | tq->tq_lock_class); | |
d9acd930 BB |
551 | } |
552 | ||
553 | if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) | |
554 | task_done(tq, t); | |
555 | ||
556 | rc = 0; | |
557 | } | |
066b89e6 | 558 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 559 | |
cce83ba0 | 560 | if (t == ERR_PTR(-EBUSY)) { |
d9acd930 BB |
561 | taskq_wait_id(tq, id); |
562 | rc = EBUSY; | |
563 | } | |
564 | ||
8d9a23e8 | 565 | return (rc); |
d9acd930 BB |
566 | } |
567 | EXPORT_SYMBOL(taskq_cancel_id); | |
568 | ||
f5f2b87d | 569 | static int taskq_thread_spawn(taskq_t *tq); |
a64e5575 | 570 | |
bcd68186 | 571 | taskqid_t |
aed8671c | 572 | taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) |
bcd68186 | 573 | { |
472a34ca | 574 | taskq_ent_t *t; |
cbba7146 | 575 | taskqid_t rc = TASKQID_INVALID; |
066b89e6 | 576 | unsigned long irqflags; |
f1ca4da6 | 577 | |
472a34ca BB |
578 | ASSERT(tq); |
579 | ASSERT(func); | |
d05ec4b4 | 580 | |
066b89e6 | 581 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); |
f1ca4da6 | 582 | |
bcd68186 | 583 | /* Taskq being destroyed and all tasks drained */ |
f7a973d9 | 584 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
8d9a23e8 | 585 | goto out; |
f1ca4da6 | 586 | |
bcd68186 BB |
587 | /* Do not queue the task unless there is idle thread for it */ |
588 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
7bb5d92d TC |
589 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { |
590 | /* Dynamic taskq may be able to spawn another thread */ | |
5461eefe BB |
591 | if (!(tq->tq_flags & TASKQ_DYNAMIC) || |
592 | taskq_thread_spawn(tq) == 0) | |
7bb5d92d TC |
593 | goto out; |
594 | } | |
bcd68186 | 595 | |
066b89e6 | 596 | if ((t = task_alloc(tq, flags, &irqflags)) == NULL) |
8d9a23e8 | 597 | goto out; |
f1ca4da6 | 598 | |
046a70c9 | 599 | spin_lock(&t->tqent_lock); |
f0d8bb26 | 600 | |
7bb5d92d TC |
601 | /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ |
602 | if (flags & TQ_NOQUEUE) | |
603 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
f0d8bb26 | 604 | /* Queue to the priority list instead of the pending list */ |
7bb5d92d | 605 | else if (flags & TQ_FRONT) |
046a70c9 | 606 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); |
f0d8bb26 | 607 | else |
046a70c9 | 608 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); |
f0d8bb26 | 609 | |
046a70c9 | 610 | t->tqent_id = rc = tq->tq_next_id; |
bcd68186 | 611 | tq->tq_next_id++; |
472a34ca BB |
612 | t->tqent_func = func; |
613 | t->tqent_arg = arg; | |
d9acd930 | 614 | t->tqent_taskq = tq; |
d9acd930 BB |
615 | t->tqent_timer.function = NULL; |
616 | t->tqent_timer.expires = 0; | |
ae38e009 | 617 | |
8f3b403a | 618 | t->tqent_birth = jiffies; |
ae38e009 | 619 | DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); |
44217f7a PS |
620 | |
621 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
622 | ||
046a70c9 | 623 | spin_unlock(&t->tqent_lock); |
0bb43ca2 NB |
624 | |
625 | wake_up(&tq->tq_work_waitq); | |
bcd68186 | 626 | out: |
a64e5575 | 627 | /* Spawn additional taskq threads if required. */ |
7bb5d92d | 628 | if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) |
f5f2b87d | 629 | (void) taskq_thread_spawn(tq); |
a64e5575 | 630 | |
066b89e6 | 631 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
8d9a23e8 | 632 | return (rc); |
f1ca4da6 | 633 | } |
aed8671c | 634 | EXPORT_SYMBOL(taskq_dispatch); |
44217f7a | 635 | |
d9acd930 BB |
636 | taskqid_t |
637 | taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, | |
638 | uint_t flags, clock_t expire_time) | |
639 | { | |
cbba7146 | 640 | taskqid_t rc = TASKQID_INVALID; |
8d9a23e8 | 641 | taskq_ent_t *t; |
066b89e6 | 642 | unsigned long irqflags; |
d9acd930 BB |
643 | |
644 | ASSERT(tq); | |
645 | ASSERT(func); | |
646 | ||
066b89e6 | 647 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); |
d9acd930 BB |
648 | |
649 | /* Taskq being destroyed and all tasks drained */ | |
f7a973d9 | 650 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
8d9a23e8 | 651 | goto out; |
d9acd930 | 652 | |
066b89e6 | 653 | if ((t = task_alloc(tq, flags, &irqflags)) == NULL) |
8d9a23e8 | 654 | goto out; |
d9acd930 BB |
655 | |
656 | spin_lock(&t->tqent_lock); | |
657 | ||
658 | /* Queue to the delay list for subsequent execution */ | |
659 | list_add_tail(&t->tqent_list, &tq->tq_delay_list); | |
660 | ||
661 | t->tqent_id = rc = tq->tq_next_id; | |
662 | tq->tq_next_id++; | |
663 | t->tqent_func = func; | |
664 | t->tqent_arg = arg; | |
665 | t->tqent_taskq = tq; | |
d9acd930 BB |
666 | t->tqent_timer.function = task_expire; |
667 | t->tqent_timer.expires = (unsigned long)expire_time; | |
668 | add_timer(&t->tqent_timer); | |
669 | ||
670 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
671 | ||
672 | spin_unlock(&t->tqent_lock); | |
673 | out: | |
a64e5575 | 674 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 675 | if (tq->tq_nactive == tq->tq_nthreads) |
676 | (void) taskq_thread_spawn(tq); | |
066b89e6 | 677 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
8d9a23e8 | 678 | return (rc); |
d9acd930 BB |
679 | } |
680 | EXPORT_SYMBOL(taskq_dispatch_delay); | |
681 | ||
44217f7a | 682 | void |
aed8671c | 683 | taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, |
2c4332cf | 684 | taskq_ent_t *t) |
44217f7a | 685 | { |
066b89e6 | 686 | unsigned long irqflags; |
44217f7a PS |
687 | ASSERT(tq); |
688 | ASSERT(func); | |
44217f7a | 689 | |
066b89e6 | 690 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, |
326172d8 | 691 | tq->tq_lock_class); |
44217f7a PS |
692 | |
693 | /* Taskq being destroyed and all tasks drained */ | |
f7a973d9 | 694 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { |
cbba7146 | 695 | t->tqent_id = TASKQID_INVALID; |
44217f7a PS |
696 | goto out; |
697 | } | |
698 | ||
7bb5d92d TC |
699 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { |
700 | /* Dynamic taskq may be able to spawn another thread */ | |
5461eefe BB |
701 | if (!(tq->tq_flags & TASKQ_DYNAMIC) || |
702 | taskq_thread_spawn(tq) == 0) | |
7bb5d92d TC |
703 | goto out2; |
704 | flags |= TQ_FRONT; | |
705 | } | |
706 | ||
44217f7a PS |
707 | spin_lock(&t->tqent_lock); |
708 | ||
9243b0fb BP |
709 | /* |
710 | * Make sure the entry is not on some other taskq; it is important to | |
711 | * ASSERT() under lock | |
712 | */ | |
713 | ASSERT(taskq_empty_ent(t)); | |
714 | ||
44217f7a PS |
715 | /* |
716 | * Mark it as a prealloc'd task. This is important | |
717 | * to ensure that we don't free it later. | |
718 | */ | |
719 | t->tqent_flags |= TQENT_FLAG_PREALLOC; | |
720 | ||
721 | /* Queue to the priority list instead of the pending list */ | |
722 | if (flags & TQ_FRONT) | |
723 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
724 | else | |
725 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
726 | ||
727 | t->tqent_id = tq->tq_next_id; | |
728 | tq->tq_next_id++; | |
729 | t->tqent_func = func; | |
730 | t->tqent_arg = arg; | |
d9acd930 | 731 | t->tqent_taskq = tq; |
ae38e009 | 732 | |
8f3b403a | 733 | t->tqent_birth = jiffies; |
ae38e009 | 734 | DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); |
44217f7a PS |
735 | |
736 | spin_unlock(&t->tqent_lock); | |
737 | ||
738 | wake_up(&tq->tq_work_waitq); | |
739 | out: | |
a64e5575 | 740 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 741 | if (tq->tq_nactive == tq->tq_nthreads) |
742 | (void) taskq_thread_spawn(tq); | |
7bb5d92d | 743 | out2: |
066b89e6 | 744 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
44217f7a | 745 | } |
aed8671c | 746 | EXPORT_SYMBOL(taskq_dispatch_ent); |
44217f7a PS |
747 | |
748 | int | |
aed8671c | 749 | taskq_empty_ent(taskq_ent_t *t) |
44217f7a | 750 | { |
2c4332cf | 751 | return (list_empty(&t->tqent_list)); |
44217f7a | 752 | } |
aed8671c | 753 | EXPORT_SYMBOL(taskq_empty_ent); |
44217f7a PS |
754 | |
755 | void | |
aed8671c | 756 | taskq_init_ent(taskq_ent_t *t) |
44217f7a PS |
757 | { |
758 | spin_lock_init(&t->tqent_lock); | |
d9acd930 | 759 | init_waitqueue_head(&t->tqent_waitq); |
c9821f1c | 760 | timer_setup(&t->tqent_timer, NULL, 0); |
44217f7a PS |
761 | INIT_LIST_HEAD(&t->tqent_list); |
762 | t->tqent_id = 0; | |
763 | t->tqent_func = NULL; | |
764 | t->tqent_arg = NULL; | |
765 | t->tqent_flags = 0; | |
d9acd930 | 766 | t->tqent_taskq = NULL; |
44217f7a | 767 | } |
aed8671c | 768 | EXPORT_SYMBOL(taskq_init_ent); |
44217f7a | 769 | |
f7a973d9 BB |
770 | /* |
771 | * Return the next pending task, preference is given to tasks on the | |
772 | * priority list which were dispatched with TQ_FRONT. | |
773 | */ | |
774 | static taskq_ent_t * | |
775 | taskq_next_ent(taskq_t *tq) | |
776 | { | |
777 | struct list_head *list; | |
778 | ||
f7a973d9 BB |
779 | if (!list_empty(&tq->tq_prio_list)) |
780 | list = &tq->tq_prio_list; | |
781 | else if (!list_empty(&tq->tq_pend_list)) | |
782 | list = &tq->tq_pend_list; | |
783 | else | |
784 | return (NULL); | |
785 | ||
786 | return (list_entry(list->next, taskq_ent_t, tqent_list)); | |
787 | } | |
788 | ||
789 | /* | |
790 | * Spawns a new thread for the specified taskq. | |
791 | */ | |
792 | static void | |
793 | taskq_thread_spawn_task(void *arg) | |
794 | { | |
795 | taskq_t *tq = (taskq_t *)arg; | |
066b89e6 | 796 | unsigned long flags; |
f7a973d9 | 797 | |
5ce028b0 CC |
798 | if (taskq_thread_create(tq) == NULL) { |
799 | /* restore spawning count if failed */ | |
5461eefe BB |
800 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
801 | tq->tq_lock_class); | |
5ce028b0 CC |
802 | tq->tq_nspawn--; |
803 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
804 | } | |
f7a973d9 BB |
805 | } |
806 | ||
807 | /* | |
326172d8 | 808 | * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current |
f7a973d9 BB |
809 | * number of threads is insufficient to handle the pending tasks. These |
810 | * new threads must be created by the dedicated dynamic_taskq to avoid | |
811 | * deadlocks between thread creation and memory reclaim. The system_taskq | |
812 | * which is also a dynamic taskq cannot be safely used for this. | |
813 | */ | |
814 | static int | |
f5f2b87d | 815 | taskq_thread_spawn(taskq_t *tq) |
f7a973d9 BB |
816 | { |
817 | int spawning = 0; | |
818 | ||
819 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
820 | return (0); | |
821 | ||
f5f2b87d | 822 | if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && |
f7a973d9 BB |
823 | (tq->tq_flags & TASKQ_ACTIVE)) { |
824 | spawning = (++tq->tq_nspawn); | |
825 | taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, | |
826 | tq, TQ_NOSLEEP); | |
827 | } | |
828 | ||
829 | return (spawning); | |
830 | } | |
831 | ||
832 | /* | |
833 | * Threads in a dynamic taskq should only exit once it has been completely | |
834 | * drained and no other threads are actively servicing tasks. This prevents | |
835 | * threads from being created and destroyed more than is required. | |
836 | * | |
837 | * The first thread is the thread list is treated as the primary thread. | |
838 | * There is nothing special about the primary thread but in order to avoid | |
839 | * all the taskq pids from changing we opt to make it long running. | |
840 | */ | |
841 | static int | |
842 | taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) | |
843 | { | |
f7a973d9 BB |
844 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) |
845 | return (0); | |
846 | ||
847 | if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, | |
848 | tqt_thread_list) == tqt) | |
849 | return (0); | |
850 | ||
851 | return | |
852 | ((tq->tq_nspawn == 0) && /* No threads are being spawned */ | |
853 | (tq->tq_nactive == 0) && /* No threads are handling tasks */ | |
854 | (tq->tq_nthreads > 1) && /* More than 1 thread is running */ | |
855 | (!taskq_next_ent(tq)) && /* There are no pending tasks */ | |
2c4332cf | 856 | (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ |
f7a973d9 BB |
857 | } |
858 | ||
bcd68186 BB |
859 | static int |
860 | taskq_thread(void *args) | |
861 | { | |
472a34ca BB |
862 | DECLARE_WAITQUEUE(wait, current); |
863 | sigset_t blocked; | |
2c02b71b | 864 | taskq_thread_t *tqt = args; |
472a34ca BB |
865 | taskq_t *tq; |
866 | taskq_ent_t *t; | |
f7a973d9 | 867 | int seq_tasks = 0; |
066b89e6 | 868 | unsigned long flags; |
cce83ba0 | 869 | taskq_ent_t dup_task = {}; |
bcd68186 | 870 | |
472a34ca | 871 | ASSERT(tqt); |
326172d8 | 872 | ASSERT(tqt->tqt_tq); |
2c02b71b | 873 | tq = tqt->tqt_tq; |
472a34ca | 874 | current->flags |= PF_NOFREEZE; |
bcd68186 | 875 | |
b4ad50ac | 876 | (void) spl_fstrans_mark(); |
d4bf6d84 | 877 | |
472a34ca BB |
878 | sigfillset(&blocked); |
879 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
880 | flush_signals(current); | |
bcd68186 | 881 | |
16522ac2 | 882 | tsd_set(taskq_tsd, tq); |
066b89e6 | 883 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
5ce028b0 CC |
884 | /* |
885 | * If we are dynamically spawned, decrease spawning count. Note that | |
886 | * we could be created during taskq_create, in which case we shouldn't | |
887 | * do the decrement. But it's fine because taskq_create will reset | |
888 | * tq_nspawn later. | |
889 | */ | |
890 | if (tq->tq_flags & TASKQ_DYNAMIC) | |
891 | tq->tq_nspawn--; | |
f7a973d9 BB |
892 | |
893 | /* Immediately exit if more threads than allowed were created. */ | |
894 | if (tq->tq_nthreads >= tq->tq_maxthreads) | |
895 | goto error; | |
896 | ||
472a34ca | 897 | tq->tq_nthreads++; |
f7a973d9 | 898 | list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); |
472a34ca BB |
899 | wake_up(&tq->tq_wait_waitq); |
900 | set_current_state(TASK_INTERRUPTIBLE); | |
bcd68186 | 901 | |
472a34ca | 902 | while (!kthread_should_stop()) { |
bcd68186 | 903 | |
f0d8bb26 NB |
904 | if (list_empty(&tq->tq_pend_list) && |
905 | list_empty(&tq->tq_prio_list)) { | |
f7a973d9 BB |
906 | |
907 | if (taskq_thread_should_stop(tq, tqt)) { | |
908 | wake_up_all(&tq->tq_wait_waitq); | |
909 | break; | |
910 | } | |
911 | ||
3c6ed541 | 912 | add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); |
066b89e6 | 913 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
f7a973d9 | 914 | |
bcd68186 | 915 | schedule(); |
f7a973d9 BB |
916 | seq_tasks = 0; |
917 | ||
066b89e6 CC |
918 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
919 | tq->tq_lock_class); | |
3c6ed541 | 920 | remove_wait_queue(&tq->tq_work_waitq, &wait); |
bcd68186 BB |
921 | } else { |
922 | __set_current_state(TASK_RUNNING); | |
923 | } | |
924 | ||
f7a973d9 | 925 | if ((t = taskq_next_ent(tq)) != NULL) { |
472a34ca | 926 | list_del_init(&t->tqent_list); |
8f2503e0 | 927 | |
2c4332cf | 928 | /* |
cce83ba0 CC |
929 | * A TQENT_FLAG_PREALLOC task may be reused or freed |
930 | * during the task function call. Store tqent_id and | |
931 | * tqent_flags here. | |
932 | * | |
933 | * Also use an on stack taskq_ent_t for tqt_task | |
ae38e009 PS |
934 | * assignment in this case; we want to make sure |
935 | * to duplicate all fields, so the values are | |
936 | * correct when it's accessed via DTRACE_PROBE*. | |
2c4332cf | 937 | */ |
e7e5f78e | 938 | tqt->tqt_id = t->tqent_id; |
8f2503e0 PS |
939 | tqt->tqt_flags = t->tqent_flags; |
940 | ||
cce83ba0 | 941 | if (t->tqent_flags & TQENT_FLAG_PREALLOC) { |
ae38e009 | 942 | dup_task = *t; |
cce83ba0 CC |
943 | t = &dup_task; |
944 | } | |
945 | tqt->tqt_task = t; | |
946 | ||
2c02b71b | 947 | taskq_insert_in_order(tq, tqt); |
472a34ca | 948 | tq->tq_nactive++; |
066b89e6 | 949 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 950 | |
ae38e009 PS |
951 | DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); |
952 | ||
bcd68186 | 953 | /* Perform the requested task */ |
472a34ca | 954 | t->tqent_func(t->tqent_arg); |
bcd68186 | 955 | |
ae38e009 PS |
956 | DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); |
957 | ||
066b89e6 CC |
958 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
959 | tq->tq_lock_class); | |
472a34ca | 960 | tq->tq_nactive--; |
2c02b71b | 961 | list_del_init(&tqt->tqt_active_list); |
d9acd930 | 962 | tqt->tqt_task = NULL; |
8f2503e0 PS |
963 | |
964 | /* For prealloc'd tasks, we don't free anything. */ | |
f7a973d9 | 965 | if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) |
8f2503e0 | 966 | task_done(tq, t); |
bcd68186 | 967 | |
2c4332cf BB |
968 | /* |
969 | * When the current lowest outstanding taskqid is | |
970 | * done calculate the new lowest outstanding id | |
971 | */ | |
e7e5f78e | 972 | if (tq->tq_lowest_id == tqt->tqt_id) { |
bcd68186 | 973 | tq->tq_lowest_id = taskq_lowest_id(tq); |
e7e5f78e | 974 | ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); |
bcd68186 BB |
975 | } |
976 | ||
f7a973d9 | 977 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 978 | if ((++seq_tasks) > spl_taskq_thread_sequential && |
979 | taskq_thread_spawn(tq)) | |
f7a973d9 BB |
980 | seq_tasks = 0; |
981 | ||
cbba7146 | 982 | tqt->tqt_id = TASKQID_INVALID; |
8f2503e0 | 983 | tqt->tqt_flags = 0; |
472a34ca | 984 | wake_up_all(&tq->tq_wait_waitq); |
f7a973d9 BB |
985 | } else { |
986 | if (taskq_thread_should_stop(tq, tqt)) | |
987 | break; | |
bcd68186 BB |
988 | } |
989 | ||
990 | set_current_state(TASK_INTERRUPTIBLE); | |
991 | ||
472a34ca | 992 | } |
bcd68186 BB |
993 | |
994 | __set_current_state(TASK_RUNNING); | |
472a34ca | 995 | tq->tq_nthreads--; |
2c02b71b | 996 | list_del_init(&tqt->tqt_thread_list); |
f7a973d9 BB |
997 | error: |
998 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
066b89e6 | 999 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 1000 | |
16522ac2 | 1001 | tsd_set(taskq_tsd, NULL); |
a4134da2 | 1002 | thread_exit(); |
16522ac2 | 1003 | |
8d9a23e8 | 1004 | return (0); |
bcd68186 BB |
1005 | } |
1006 | ||
f7a973d9 BB |
1007 | static taskq_thread_t * |
1008 | taskq_thread_create(taskq_t *tq) | |
1009 | { | |
1010 | static int last_used_cpu = 0; | |
1011 | taskq_thread_t *tqt; | |
1012 | ||
1013 | tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); | |
1014 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
1015 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
1016 | tqt->tqt_tq = tq; | |
cbba7146 | 1017 | tqt->tqt_id = TASKQID_INVALID; |
f7a973d9 BB |
1018 | |
1019 | tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, | |
1020 | "%s", tq->tq_name); | |
1021 | if (tqt->tqt_thread == NULL) { | |
1022 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
1023 | return (NULL); | |
1024 | } | |
1025 | ||
1026 | if (spl_taskq_thread_bind) { | |
1027 | last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); | |
1028 | kthread_bind(tqt->tqt_thread, last_used_cpu); | |
1029 | } | |
1030 | ||
62aa81a5 BB |
1031 | if (spl_taskq_thread_priority) |
1032 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); | |
1033 | ||
f7a973d9 BB |
1034 | wake_up_process(tqt->tqt_thread); |
1035 | ||
1036 | return (tqt); | |
1037 | } | |
1038 | ||
f1ca4da6 | 1039 | taskq_t * |
60a4c7d2 | 1040 | taskq_create(const char *name, int threads_arg, pri_t pri, |
472a34ca | 1041 | int minalloc, int maxalloc, uint_t flags) |
f1ca4da6 | 1042 | { |
472a34ca | 1043 | taskq_t *tq; |
2c02b71b | 1044 | taskq_thread_t *tqt; |
f7a973d9 | 1045 | int count = 0, rc = 0, i; |
066b89e6 | 1046 | unsigned long irqflags; |
60a4c7d2 | 1047 | int nthreads = threads_arg; |
bcd68186 | 1048 | |
472a34ca | 1049 | ASSERT(name != NULL); |
472a34ca | 1050 | ASSERT(minalloc >= 0); |
f7a973d9 | 1051 | ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ |
bcd68186 | 1052 | |
915404bd BB |
1053 | /* Scale the number of threads using nthreads as a percentage */ |
1054 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
1055 | ASSERT(nthreads <= 100); | |
1056 | ASSERT(nthreads >= 0); | |
60a4c7d2 | 1057 | nthreads = MIN(threads_arg, 100); |
915404bd | 1058 | nthreads = MAX(nthreads, 0); |
60a4c7d2 | 1059 | nthreads = MAX((num_online_cpus() * nthreads) /100, 1); |
915404bd BB |
1060 | } |
1061 | ||
f7a973d9 | 1062 | tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); |
472a34ca | 1063 | if (tq == NULL) |
8d9a23e8 | 1064 | return (NULL); |
bcd68186 | 1065 | |
60a4c7d2 PD |
1066 | tq->tq_hp_support = B_FALSE; |
1067 | #ifdef HAVE_CPU_HOTPLUG | |
1068 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
1069 | tq->tq_hp_support = B_TRUE; | |
1070 | if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, | |
1071 | &tq->tq_hp_cb_node) != 0) { | |
1072 | kmem_free(tq, sizeof (*tq)); | |
1073 | return (NULL); | |
1074 | } | |
1075 | } | |
1076 | #endif | |
1077 | ||
472a34ca | 1078 | spin_lock_init(&tq->tq_lock); |
472a34ca BB |
1079 | INIT_LIST_HEAD(&tq->tq_thread_list); |
1080 | INIT_LIST_HEAD(&tq->tq_active_list); | |
e4f5fa12 | 1081 | tq->tq_name = kmem_strdup(name); |
2c4332cf BB |
1082 | tq->tq_nactive = 0; |
1083 | tq->tq_nthreads = 0; | |
1084 | tq->tq_nspawn = 0; | |
f7a973d9 | 1085 | tq->tq_maxthreads = nthreads; |
60a4c7d2 | 1086 | tq->tq_cpu_pct = threads_arg; |
2c4332cf BB |
1087 | tq->tq_pri = pri; |
1088 | tq->tq_minalloc = minalloc; | |
1089 | tq->tq_maxalloc = maxalloc; | |
1090 | tq->tq_nalloc = 0; | |
1091 | tq->tq_flags = (flags | TASKQ_ACTIVE); | |
cbba7146 BB |
1092 | tq->tq_next_id = TASKQID_INITIAL; |
1093 | tq->tq_lowest_id = TASKQID_INITIAL; | |
472a34ca BB |
1094 | INIT_LIST_HEAD(&tq->tq_free_list); |
1095 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
1096 | INIT_LIST_HEAD(&tq->tq_prio_list); | |
d9acd930 | 1097 | INIT_LIST_HEAD(&tq->tq_delay_list); |
472a34ca BB |
1098 | init_waitqueue_head(&tq->tq_work_waitq); |
1099 | init_waitqueue_head(&tq->tq_wait_waitq); | |
326172d8 | 1100 | tq->tq_lock_class = TQ_LOCK_GENERAL; |
200366f2 | 1101 | INIT_LIST_HEAD(&tq->tq_taskqs); |
bcd68186 | 1102 | |
f7a973d9 | 1103 | if (flags & TASKQ_PREPOPULATE) { |
066b89e6 | 1104 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, |
326172d8 | 1105 | tq->tq_lock_class); |
f7a973d9 | 1106 | |
472a34ca | 1107 | for (i = 0; i < minalloc; i++) |
066b89e6 CC |
1108 | task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, |
1109 | &irqflags)); | |
6e605b6e | 1110 | |
066b89e6 | 1111 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
f7a973d9 BB |
1112 | } |
1113 | ||
1114 | if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) | |
1115 | nthreads = 1; | |
6e605b6e | 1116 | |
2c02b71b | 1117 | for (i = 0; i < nthreads; i++) { |
f7a973d9 BB |
1118 | tqt = taskq_thread_create(tq); |
1119 | if (tqt == NULL) | |
2c02b71b | 1120 | rc = 1; |
f7a973d9 BB |
1121 | else |
1122 | count++; | |
2c02b71b | 1123 | } |
bcd68186 | 1124 | |
472a34ca | 1125 | /* Wait for all threads to be started before potential destroy */ |
f7a973d9 | 1126 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); |
5ce028b0 CC |
1127 | /* |
1128 | * taskq_thread might have touched nspawn, but we don't want them to | |
1129 | * because they're not dynamically spawned. So we reset it to 0 | |
1130 | */ | |
1131 | tq->tq_nspawn = 0; | |
bcd68186 | 1132 | |
472a34ca | 1133 | if (rc) { |
aed8671c | 1134 | taskq_destroy(tq); |
472a34ca | 1135 | tq = NULL; |
200366f2 TC |
1136 | } else { |
1137 | down_write(&tq_list_sem); | |
1138 | tq->tq_instance = taskq_find_by_name(name) + 1; | |
1139 | list_add_tail(&tq->tq_taskqs, &tq_list); | |
1140 | up_write(&tq_list_sem); | |
472a34ca | 1141 | } |
bcd68186 | 1142 | |
8d9a23e8 | 1143 | return (tq); |
f1ca4da6 | 1144 | } |
aed8671c | 1145 | EXPORT_SYMBOL(taskq_create); |
b123971f BB |
1146 | |
1147 | void | |
aed8671c | 1148 | taskq_destroy(taskq_t *tq) |
b123971f | 1149 | { |
2c02b71b PS |
1150 | struct task_struct *thread; |
1151 | taskq_thread_t *tqt; | |
046a70c9 | 1152 | taskq_ent_t *t; |
066b89e6 | 1153 | unsigned long flags; |
b123971f | 1154 | |
bcd68186 | 1155 | ASSERT(tq); |
066b89e6 | 1156 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
f7a973d9 | 1157 | tq->tq_flags &= ~TASKQ_ACTIVE; |
066b89e6 | 1158 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 1159 | |
60a4c7d2 PD |
1160 | #ifdef HAVE_CPU_HOTPLUG |
1161 | if (tq->tq_hp_support) { | |
1162 | VERIFY0(cpuhp_state_remove_instance_nocalls( | |
1163 | spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); | |
1164 | } | |
1165 | #endif | |
f7a973d9 BB |
1166 | /* |
1167 | * When TASKQ_ACTIVE is clear new tasks may not be added nor may | |
1168 | * new worker threads be spawned for dynamic taskq. | |
1169 | */ | |
1170 | if (dynamic_taskq != NULL) | |
1171 | taskq_wait_outstanding(dynamic_taskq, 0); | |
1172 | ||
aed8671c | 1173 | taskq_wait(tq); |
bcd68186 | 1174 | |
200366f2 TC |
1175 | /* remove taskq from global list used by the kstats */ |
1176 | down_write(&tq_list_sem); | |
1177 | list_del(&tq->tq_taskqs); | |
1178 | up_write(&tq_list_sem); | |
1179 | ||
066b89e6 | 1180 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
5ce028b0 CC |
1181 | /* wait for spawning threads to insert themselves to the list */ |
1182 | while (tq->tq_nspawn) { | |
1183 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1184 | schedule_timeout_interruptible(1); | |
5461eefe BB |
1185 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
1186 | tq->tq_lock_class); | |
5ce028b0 | 1187 | } |
bcd68186 | 1188 | |
2c02b71b PS |
1189 | /* |
1190 | * Signal each thread to exit and block until it does. Each thread | |
1191 | * is responsible for removing itself from the list and freeing its | |
1192 | * taskq_thread_t. This allows for idle threads to opt to remove | |
1193 | * themselves from the taskq. They can be recreated as needed. | |
1194 | */ | |
1195 | while (!list_empty(&tq->tq_thread_list)) { | |
1196 | tqt = list_entry(tq->tq_thread_list.next, | |
f7a973d9 | 1197 | taskq_thread_t, tqt_thread_list); |
2c02b71b | 1198 | thread = tqt->tqt_thread; |
066b89e6 | 1199 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
2c02b71b PS |
1200 | |
1201 | kthread_stop(thread); | |
1202 | ||
066b89e6 | 1203 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
326172d8 | 1204 | tq->tq_lock_class); |
2c02b71b PS |
1205 | } |
1206 | ||
472a34ca | 1207 | while (!list_empty(&tq->tq_free_list)) { |
046a70c9 | 1208 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); |
44217f7a PS |
1209 | |
1210 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
1211 | ||
472a34ca BB |
1212 | list_del_init(&t->tqent_list); |
1213 | task_free(tq, t); | |
1214 | } | |
bcd68186 | 1215 | |
f7a973d9 BB |
1216 | ASSERT0(tq->tq_nthreads); |
1217 | ASSERT0(tq->tq_nalloc); | |
1218 | ASSERT0(tq->tq_nspawn); | |
472a34ca BB |
1219 | ASSERT(list_empty(&tq->tq_thread_list)); |
1220 | ASSERT(list_empty(&tq->tq_active_list)); | |
1221 | ASSERT(list_empty(&tq->tq_free_list)); | |
1222 | ASSERT(list_empty(&tq->tq_pend_list)); | |
1223 | ASSERT(list_empty(&tq->tq_prio_list)); | |
d9acd930 | 1224 | ASSERT(list_empty(&tq->tq_delay_list)); |
bcd68186 | 1225 | |
066b89e6 | 1226 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
2c02b71b | 1227 | |
e4f5fa12 | 1228 | kmem_strfree(tq->tq_name); |
f7a973d9 | 1229 | kmem_free(tq, sizeof (taskq_t)); |
b123971f | 1230 | } |
aed8671c | 1231 | EXPORT_SYMBOL(taskq_destroy); |
e9cb2b4f | 1232 | |
8f3b403a CC |
1233 | static unsigned int spl_taskq_kick = 0; |
1234 | ||
1235 | /* | |
1236 | * 2.6.36 API Change | |
1237 | * module_param_cb is introduced to take kernel_param_ops and | |
1238 | * module_param_call is marked as obsolete. Also set and get operations | |
1239 | * were changed to take a 'const struct kernel_param *'. | |
1240 | */ | |
1241 | static int | |
1242 | #ifdef module_param_cb | |
1243 | param_set_taskq_kick(const char *val, const struct kernel_param *kp) | |
1244 | #else | |
1245 | param_set_taskq_kick(const char *val, struct kernel_param *kp) | |
1246 | #endif | |
1247 | { | |
1248 | int ret; | |
7cf1fe63 | 1249 | taskq_t *tq = NULL; |
8f3b403a CC |
1250 | taskq_ent_t *t; |
1251 | unsigned long flags; | |
1252 | ||
1253 | ret = param_set_uint(val, kp); | |
1254 | if (ret < 0 || !spl_taskq_kick) | |
1255 | return (ret); | |
1256 | /* reset value */ | |
1257 | spl_taskq_kick = 0; | |
1258 | ||
1259 | down_read(&tq_list_sem); | |
1260 | list_for_each_entry(tq, &tq_list, tq_taskqs) { | |
1261 | spin_lock_irqsave_nested(&tq->tq_lock, flags, | |
1262 | tq->tq_lock_class); | |
1263 | /* Check if the first pending is older than 5 seconds */ | |
1264 | t = taskq_next_ent(tq); | |
1265 | if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { | |
1266 | (void) taskq_thread_spawn(tq); | |
1267 | printk(KERN_INFO "spl: Kicked taskq %s/%d\n", | |
1268 | tq->tq_name, tq->tq_instance); | |
1269 | } | |
1270 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1271 | } | |
1272 | up_read(&tq_list_sem); | |
1273 | return (ret); | |
1274 | } | |
1275 | ||
1276 | #ifdef module_param_cb | |
1277 | static const struct kernel_param_ops param_ops_taskq_kick = { | |
5461eefe BB |
1278 | .set = param_set_taskq_kick, |
1279 | .get = param_get_uint, | |
8f3b403a CC |
1280 | }; |
1281 | module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); | |
1282 | #else | |
1283 | module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, | |
5461eefe | 1284 | &spl_taskq_kick, 0644); |
8f3b403a CC |
1285 | #endif |
1286 | MODULE_PARM_DESC(spl_taskq_kick, | |
5461eefe | 1287 | "Write nonzero to kick stuck taskqs to spawn more threads"); |
8f3b403a | 1288 | |
60a4c7d2 PD |
1289 | #ifdef HAVE_CPU_HOTPLUG |
1290 | /* | |
1291 | * This callback will be called exactly once for each core that comes online, | |
1292 | * for each dynamic taskq. We attempt to expand taskqs that have | |
1293 | * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every | |
1294 | * time, to correctly determine whether or not to add a thread. | |
1295 | */ | |
1296 | static int | |
1297 | spl_taskq_expand(unsigned int cpu, struct hlist_node *node) | |
1298 | { | |
1299 | taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); | |
1300 | unsigned long flags; | |
1301 | int err = 0; | |
1302 | ||
1303 | ASSERT(tq); | |
1304 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
1305 | ||
58bf6afd PD |
1306 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { |
1307 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1308 | return (err); | |
1309 | } | |
60a4c7d2 PD |
1310 | |
1311 | ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); | |
1312 | int nthreads = MIN(tq->tq_cpu_pct, 100); | |
1313 | nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); | |
1314 | tq->tq_maxthreads = nthreads; | |
1315 | ||
1316 | if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && | |
1317 | tq->tq_maxthreads > tq->tq_nthreads) { | |
58bf6afd | 1318 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
60a4c7d2 PD |
1319 | taskq_thread_t *tqt = taskq_thread_create(tq); |
1320 | if (tqt == NULL) | |
1321 | err = -1; | |
58bf6afd | 1322 | return (err); |
60a4c7d2 | 1323 | } |
60a4c7d2 PD |
1324 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
1325 | return (err); | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * While we don't support offlining CPUs, it is possible that CPUs will fail | |
1330 | * to online successfully. We do need to be able to handle this case | |
1331 | * gracefully. | |
1332 | */ | |
1333 | static int | |
1334 | spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) | |
1335 | { | |
1336 | taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); | |
1337 | unsigned long flags; | |
1338 | ||
1339 | ASSERT(tq); | |
1340 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
1341 | ||
1342 | if (!(tq->tq_flags & TASKQ_ACTIVE)) | |
1343 | goto out; | |
1344 | ||
1345 | ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); | |
1346 | int nthreads = MIN(tq->tq_cpu_pct, 100); | |
1347 | nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); | |
1348 | tq->tq_maxthreads = nthreads; | |
1349 | ||
1350 | if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && | |
1351 | tq->tq_maxthreads < tq->tq_nthreads) { | |
1352 | ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); | |
1353 | taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, | |
1354 | taskq_thread_t, tqt_thread_list); | |
1355 | struct task_struct *thread = tqt->tqt_thread; | |
1356 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1357 | ||
1358 | kthread_stop(thread); | |
1359 | ||
1360 | return (0); | |
1361 | } | |
1362 | ||
1363 | out: | |
1364 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1365 | return (0); | |
1366 | } | |
1367 | #endif | |
1368 | ||
e9cb2b4f BB |
1369 | int |
1370 | spl_taskq_init(void) | |
1371 | { | |
93ce2b4c | 1372 | init_rwsem(&tq_list_sem); |
16522ac2 CC |
1373 | tsd_create(&taskq_tsd, NULL); |
1374 | ||
60a4c7d2 PD |
1375 | #ifdef HAVE_CPU_HOTPLUG |
1376 | spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | |
1377 | "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); | |
1378 | #endif | |
1379 | ||
3c82160f | 1380 | system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), |
9dc5ffbe | 1381 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); |
e9cb2b4f | 1382 | if (system_taskq == NULL) |
380b0809 | 1383 | return (-ENOMEM); |
e9cb2b4f | 1384 | |
f200b836 CC |
1385 | system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), |
1386 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); | |
1387 | if (system_delay_taskq == NULL) { | |
60a4c7d2 PD |
1388 | #ifdef HAVE_CPU_HOTPLUG |
1389 | cpuhp_remove_multi_state(spl_taskq_cpuhp_state); | |
1390 | #endif | |
f200b836 | 1391 | taskq_destroy(system_taskq); |
380b0809 | 1392 | return (-ENOMEM); |
f200b836 CC |
1393 | } |
1394 | ||
f7a973d9 | 1395 | dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, |
9dc5ffbe | 1396 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); |
f7a973d9 | 1397 | if (dynamic_taskq == NULL) { |
60a4c7d2 PD |
1398 | #ifdef HAVE_CPU_HOTPLUG |
1399 | cpuhp_remove_multi_state(spl_taskq_cpuhp_state); | |
1400 | #endif | |
f7a973d9 | 1401 | taskq_destroy(system_taskq); |
f200b836 | 1402 | taskq_destroy(system_delay_taskq); |
380b0809 | 1403 | return (-ENOMEM); |
f7a973d9 BB |
1404 | } |
1405 | ||
2c4332cf BB |
1406 | /* |
1407 | * This is used to annotate tq_lock, so | |
1408 | * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch | |
326172d8 OF |
1409 | * does not trigger a lockdep warning re: possible recursive locking |
1410 | */ | |
1411 | dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; | |
1412 | ||
8d9a23e8 | 1413 | return (0); |
e9cb2b4f BB |
1414 | } |
1415 | ||
1416 | void | |
1417 | spl_taskq_fini(void) | |
1418 | { | |
f7a973d9 BB |
1419 | taskq_destroy(dynamic_taskq); |
1420 | dynamic_taskq = NULL; | |
1421 | ||
f200b836 CC |
1422 | taskq_destroy(system_delay_taskq); |
1423 | system_delay_taskq = NULL; | |
1424 | ||
e9cb2b4f | 1425 | taskq_destroy(system_taskq); |
f7a973d9 | 1426 | system_taskq = NULL; |
16522ac2 CC |
1427 | |
1428 | tsd_destroy(&taskq_tsd); | |
60a4c7d2 PD |
1429 | |
1430 | #ifdef HAVE_CPU_HOTPLUG | |
1431 | cpuhp_remove_multi_state(spl_taskq_cpuhp_state); | |
1432 | spl_taskq_cpuhp_state = 0; | |
1433 | #endif | |
e9cb2b4f | 1434 | } |