]>
Commit | Line | Data |
---|---|---|
716154c5 BB |
1 | /*****************************************************************************\ |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
3d6af2dd | 9 | * For details, see <http://zfsonlinux.org/>. |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 BB |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Task Queue Implementation. | |
25 | \*****************************************************************************/ | |
715f6251 | 26 | |
f4b37741 | 27 | #include <sys/taskq.h> |
3d061e9d | 28 | #include <sys/kmem.h> |
937879f1 | 29 | |
703371d8 AV |
30 | int spl_taskq_thread_bind = 0; |
31 | module_param(spl_taskq_thread_bind, int, 0644); | |
32 | MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); | |
33 | ||
f7a973d9 BB |
34 | |
35 | int spl_taskq_thread_dynamic = 1; | |
36 | module_param(spl_taskq_thread_dynamic, int, 0644); | |
37 | MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); | |
38 | ||
62aa81a5 BB |
39 | int spl_taskq_thread_priority = 1; |
40 | module_param(spl_taskq_thread_priority, int, 0644); | |
41 | MODULE_PARM_DESC(spl_taskq_thread_priority, | |
42 | "Allow non-default priority for taskq threads"); | |
43 | ||
f7a973d9 BB |
44 | int spl_taskq_thread_sequential = 4; |
45 | module_param(spl_taskq_thread_sequential, int, 0644); | |
46 | MODULE_PARM_DESC(spl_taskq_thread_sequential, | |
47 | "Create new taskq threads after N sequential tasks"); | |
48 | ||
e9cb2b4f BB |
49 | /* Global system-wide dynamic task queue available for all consumers */ |
50 | taskq_t *system_taskq; | |
51 | EXPORT_SYMBOL(system_taskq); | |
52 | ||
f7a973d9 BB |
53 | /* Private dedicated taskq for creating new taskq threads on demand. */ |
54 | static taskq_t *dynamic_taskq; | |
55 | static taskq_thread_t *taskq_thread_create(taskq_t *); | |
56 | ||
9b51f218 BB |
57 | static int |
58 | task_km_flags(uint_t flags) | |
59 | { | |
60 | if (flags & TQ_NOSLEEP) | |
61 | return KM_NOSLEEP; | |
62 | ||
63 | if (flags & TQ_PUSHPAGE) | |
64 | return KM_PUSHPAGE; | |
65 | ||
66 | return KM_SLEEP; | |
67 | } | |
68 | ||
82387586 BB |
69 | /* |
70 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
bcd68186 | 71 | * is not attached to the free, work, or pending taskq lists. |
f1ca4da6 | 72 | */ |
046a70c9 | 73 | static taskq_ent_t * |
bcd68186 | 74 | task_alloc(taskq_t *tq, uint_t flags) |
75 | { | |
472a34ca BB |
76 | taskq_ent_t *t; |
77 | int count = 0; | |
bcd68186 | 78 | |
472a34ca BB |
79 | ASSERT(tq); |
80 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
bcd68186 | 81 | retry: |
472a34ca BB |
82 | /* Acquire taskq_ent_t's from free list if available */ |
83 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | |
84 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
85 | ||
86 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
d9acd930 BB |
87 | ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); |
88 | ASSERT(!timer_pending(&t->tqent_timer)); | |
472a34ca BB |
89 | |
90 | list_del_init(&t->tqent_list); | |
8d9a23e8 | 91 | return (t); |
472a34ca BB |
92 | } |
93 | ||
94 | /* Free list is empty and memory allocations are prohibited */ | |
95 | if (flags & TQ_NOALLOC) | |
8d9a23e8 | 96 | return (NULL); |
472a34ca BB |
97 | |
98 | /* Hit maximum taskq_ent_t pool size */ | |
99 | if (tq->tq_nalloc >= tq->tq_maxalloc) { | |
100 | if (flags & TQ_NOSLEEP) | |
8d9a23e8 | 101 | return (NULL); |
472a34ca BB |
102 | |
103 | /* | |
104 | * Sleep periodically polling the free list for an available | |
105 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | |
106 | * but we cannot block forever waiting for an taskq_ent_t to | |
107 | * show up in the free list, otherwise a deadlock can happen. | |
108 | * | |
109 | * Therefore, we need to allocate a new task even if the number | |
110 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
111 | * end up delaying the task allocation by one second, thereby | |
112 | * throttling the task dispatch rate. | |
113 | */ | |
114 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
115 | schedule_timeout(HZ / 100); | |
326172d8 OF |
116 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
117 | tq->tq_lock_class); | |
8d9a23e8 BB |
118 | if (count < 100) { |
119 | count++; | |
120 | goto retry; | |
121 | } | |
472a34ca BB |
122 | } |
123 | ||
124 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
125 | t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags)); | |
326172d8 OF |
126 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
127 | tq->tq_lock_class); | |
472a34ca BB |
128 | |
129 | if (t) { | |
130 | taskq_init_ent(t); | |
131 | tq->tq_nalloc++; | |
132 | } | |
133 | ||
8d9a23e8 | 134 | return (t); |
bcd68186 | 135 | } |
136 | ||
82387586 | 137 | /* |
046a70c9 | 138 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t |
bcd68186 | 139 | * to already be removed from the free, work, or pending taskq lists. |
140 | */ | |
141 | static void | |
046a70c9 | 142 | task_free(taskq_t *tq, taskq_ent_t *t) |
bcd68186 | 143 | { |
472a34ca BB |
144 | ASSERT(tq); |
145 | ASSERT(t); | |
bcd68186 | 146 | ASSERT(spin_is_locked(&tq->tq_lock)); |
046a70c9 | 147 | ASSERT(list_empty(&t->tqent_list)); |
d9acd930 | 148 | ASSERT(!timer_pending(&t->tqent_timer)); |
bcd68186 | 149 | |
472a34ca BB |
150 | kmem_free(t, sizeof(taskq_ent_t)); |
151 | tq->tq_nalloc--; | |
bcd68186 | 152 | } |
153 | ||
82387586 BB |
154 | /* |
155 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
046a70c9 | 156 | * taskq_ent_t if too many exist or moves it to the free list for later use. |
bcd68186 | 157 | */ |
f1ca4da6 | 158 | static void |
046a70c9 | 159 | task_done(taskq_t *tq, taskq_ent_t *t) |
f1ca4da6 | 160 | { |
bcd68186 | 161 | ASSERT(tq); |
162 | ASSERT(t); | |
163 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
164 | ||
d9acd930 BB |
165 | /* Wake tasks blocked in taskq_wait_id() */ |
166 | wake_up_all(&t->tqent_waitq); | |
167 | ||
046a70c9 | 168 | list_del_init(&t->tqent_list); |
f1ca4da6 | 169 | |
472a34ca | 170 | if (tq->tq_nalloc <= tq->tq_minalloc) { |
046a70c9 PS |
171 | t->tqent_id = 0; |
172 | t->tqent_func = NULL; | |
173 | t->tqent_arg = NULL; | |
44217f7a | 174 | t->tqent_flags = 0; |
8f2503e0 | 175 | |
472a34ca | 176 | list_add_tail(&t->tqent_list, &tq->tq_free_list); |
bcd68186 | 177 | } else { |
178 | task_free(tq, t); | |
179 | } | |
f1ca4da6 | 180 | } |
181 | ||
82387586 | 182 | /* |
d9acd930 BB |
183 | * When a delayed task timer expires remove it from the delay list and |
184 | * add it to the priority list in order for immediate processing. | |
bcd68186 | 185 | */ |
d9acd930 BB |
186 | static void |
187 | task_expire(unsigned long data) | |
bcd68186 | 188 | { |
d9acd930 BB |
189 | taskq_ent_t *w, *t = (taskq_ent_t *)data; |
190 | taskq_t *tq = t->tqent_taskq; | |
191 | struct list_head *l; | |
7257ec41 | 192 | |
326172d8 OF |
193 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
194 | tq->tq_lock_class); | |
d9acd930 BB |
195 | |
196 | if (t->tqent_flags & TQENT_FLAG_CANCEL) { | |
197 | ASSERT(list_empty(&t->tqent_list)); | |
198 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
199 | return; | |
200 | } | |
201 | ||
202 | /* | |
203 | * The priority list must be maintained in strict task id order | |
204 | * from lowest to highest for lowest_id to be easily calculable. | |
205 | */ | |
206 | list_del(&t->tqent_list); | |
207 | list_for_each_prev(l, &tq->tq_prio_list) { | |
208 | w = list_entry(l, taskq_ent_t, tqent_list); | |
209 | if (w->tqent_id < t->tqent_id) { | |
210 | list_add(&t->tqent_list, l); | |
211 | break; | |
212 | } | |
213 | } | |
214 | if (l == &tq->tq_prio_list) | |
215 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
216 | ||
7257ec41 BB |
217 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
218 | ||
d9acd930 BB |
219 | wake_up(&tq->tq_work_waitq); |
220 | } | |
221 | ||
222 | /* | |
223 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
224 | * be queued on the pending list, on the priority list, on the | |
225 | * delay list, or on the work list currently being handled, but | |
226 | * it is not 100% complete yet. | |
227 | */ | |
228 | static taskqid_t | |
229 | taskq_lowest_id(taskq_t *tq) | |
230 | { | |
231 | taskqid_t lowest_id = tq->tq_next_id; | |
232 | taskq_ent_t *t; | |
233 | taskq_thread_t *tqt; | |
d9acd930 BB |
234 | |
235 | ASSERT(tq); | |
236 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
237 | ||
238 | if (!list_empty(&tq->tq_pend_list)) { | |
239 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | |
240 | lowest_id = MIN(lowest_id, t->tqent_id); | |
241 | } | |
242 | ||
243 | if (!list_empty(&tq->tq_prio_list)) { | |
244 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | |
245 | lowest_id = MIN(lowest_id, t->tqent_id); | |
246 | } | |
247 | ||
248 | if (!list_empty(&tq->tq_delay_list)) { | |
249 | t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); | |
250 | lowest_id = MIN(lowest_id, t->tqent_id); | |
251 | } | |
252 | ||
253 | if (!list_empty(&tq->tq_active_list)) { | |
254 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
255 | tqt_active_list); | |
256 | ASSERT(tqt->tqt_id != 0); | |
257 | lowest_id = MIN(lowest_id, tqt->tqt_id); | |
258 | } | |
259 | ||
8d9a23e8 | 260 | return (lowest_id); |
d9acd930 BB |
261 | } |
262 | ||
263 | /* | |
264 | * Insert a task into a list keeping the list sorted by increasing taskqid. | |
265 | */ | |
266 | static void | |
267 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | |
268 | { | |
269 | taskq_thread_t *w; | |
270 | struct list_head *l; | |
271 | ||
d9acd930 BB |
272 | ASSERT(tq); |
273 | ASSERT(tqt); | |
274 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
275 | ||
276 | list_for_each_prev(l, &tq->tq_active_list) { | |
277 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
278 | if (w->tqt_id < tqt->tqt_id) { | |
279 | list_add(&tqt->tqt_active_list, l); | |
280 | break; | |
281 | } | |
282 | } | |
283 | if (l == &tq->tq_active_list) | |
284 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
d9acd930 BB |
285 | } |
286 | ||
287 | /* | |
288 | * Find and return a task from the given list if it exists. The list | |
289 | * must be in lowest to highest task id order. | |
290 | */ | |
291 | static taskq_ent_t * | |
292 | taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) | |
293 | { | |
294 | struct list_head *l; | |
295 | taskq_ent_t *t; | |
d9acd930 BB |
296 | |
297 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
298 | ||
299 | list_for_each(l, lh) { | |
300 | t = list_entry(l, taskq_ent_t, tqent_list); | |
301 | ||
302 | if (t->tqent_id == id) | |
8d9a23e8 | 303 | return (t); |
d9acd930 BB |
304 | |
305 | if (t->tqent_id > id) | |
306 | break; | |
307 | } | |
308 | ||
8d9a23e8 | 309 | return (NULL); |
bcd68186 | 310 | } |
311 | ||
d9acd930 BB |
312 | /* |
313 | * Find an already dispatched task given the task id regardless of what | |
314 | * state it is in. If a task is still pending or executing it will be | |
315 | * returned and 'active' set appropriately. If the task has already | |
316 | * been run then NULL is returned. | |
317 | */ | |
318 | static taskq_ent_t * | |
319 | taskq_find(taskq_t *tq, taskqid_t id, int *active) | |
320 | { | |
321 | taskq_thread_t *tqt; | |
322 | struct list_head *l; | |
323 | taskq_ent_t *t; | |
d9acd930 BB |
324 | |
325 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
326 | *active = 0; | |
327 | ||
328 | t = taskq_find_list(tq, &tq->tq_delay_list, id); | |
329 | if (t) | |
8d9a23e8 | 330 | return (t); |
d9acd930 BB |
331 | |
332 | t = taskq_find_list(tq, &tq->tq_prio_list, id); | |
333 | if (t) | |
8d9a23e8 | 334 | return (t); |
d9acd930 BB |
335 | |
336 | t = taskq_find_list(tq, &tq->tq_pend_list, id); | |
337 | if (t) | |
8d9a23e8 | 338 | return (t); |
d9acd930 BB |
339 | |
340 | list_for_each(l, &tq->tq_active_list) { | |
341 | tqt = list_entry(l, taskq_thread_t, tqt_active_list); | |
342 | if (tqt->tqt_id == id) { | |
343 | t = tqt->tqt_task; | |
344 | *active = 1; | |
8d9a23e8 | 345 | return (t); |
d9acd930 BB |
346 | } |
347 | } | |
348 | ||
8d9a23e8 | 349 | return (NULL); |
d9acd930 BB |
350 | } |
351 | ||
a876b030 CD |
352 | /* |
353 | * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and | |
354 | * taskq_wait() functions below. | |
355 | * | |
356 | * Taskq waiting is accomplished by tracking the lowest outstanding task | |
357 | * id and the next available task id. As tasks are dispatched they are | |
358 | * added to the tail of the pending, priority, or delay lists. As worker | |
359 | * threads become available the tasks are removed from the heads of these | |
360 | * lists and linked to the worker threads. This ensures the lists are | |
361 | * kept sorted by lowest to highest task id. | |
362 | * | |
363 | * Therefore the lowest outstanding task id can be quickly determined by | |
364 | * checking the head item from all of these lists. This value is stored | |
365 | * with the taskq as the lowest id. It only needs to be recalculated when | |
366 | * either the task with the current lowest id completes or is canceled. | |
367 | * | |
368 | * By blocking until the lowest task id exceeds the passed task id the | |
369 | * taskq_wait_outstanding() function can be easily implemented. Similarly, | |
370 | * by blocking until the lowest task id matches the next task id taskq_wait() | |
371 | * can be implemented. | |
372 | * | |
373 | * Callers should be aware that when there are multiple worked threads it | |
374 | * is possible for larger task ids to complete before smaller ones. Also | |
375 | * when the taskq contains delay tasks with small task ids callers may | |
376 | * block for a considerable length of time waiting for them to expire and | |
377 | * execute. | |
378 | */ | |
99c452bb BB |
379 | static int |
380 | taskq_wait_id_check(taskq_t *tq, taskqid_t id) | |
f1ca4da6 | 381 | { |
d9acd930 | 382 | int active = 0; |
99c452bb | 383 | int rc; |
bcd68186 | 384 | |
326172d8 OF |
385 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
386 | tq->tq_lock_class); | |
99c452bb | 387 | rc = (taskq_find(tq, id, &active) == NULL); |
d9acd930 BB |
388 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
389 | ||
99c452bb BB |
390 | return (rc); |
391 | } | |
bcd68186 | 392 | |
99c452bb BB |
393 | /* |
394 | * The taskq_wait_id() function blocks until the passed task id completes. | |
395 | * This does not guarantee that all lower task ids have completed. | |
396 | */ | |
397 | void | |
398 | taskq_wait_id(taskq_t *tq, taskqid_t id) | |
399 | { | |
400 | wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); | |
bcd68186 | 401 | } |
aed8671c | 402 | EXPORT_SYMBOL(taskq_wait_id); |
bcd68186 | 403 | |
d9acd930 | 404 | static int |
a876b030 | 405 | taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) |
d9acd930 BB |
406 | { |
407 | int rc; | |
408 | ||
326172d8 OF |
409 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
410 | tq->tq_lock_class); | |
d9acd930 BB |
411 | rc = (id < tq->tq_lowest_id); |
412 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
413 | ||
8d9a23e8 | 414 | return (rc); |
d9acd930 BB |
415 | } |
416 | ||
a876b030 CD |
417 | /* |
418 | * The taskq_wait_outstanding() function will block until all tasks with a | |
419 | * lower taskqid than the passed 'id' have been completed. Note that all | |
420 | * task id's are assigned monotonically at dispatch time. Zero may be | |
421 | * passed for the id to indicate all tasks dispatch up to this point, | |
422 | * but not after, should be waited for. | |
423 | */ | |
d9acd930 | 424 | void |
a876b030 | 425 | taskq_wait_outstanding(taskq_t *tq, taskqid_t id) |
d9acd930 | 426 | { |
a876b030 CD |
427 | wait_event(tq->tq_wait_waitq, |
428 | taskq_wait_outstanding_check(tq, id ? id : tq->tq_next_id - 1)); | |
d9acd930 | 429 | } |
a876b030 | 430 | EXPORT_SYMBOL(taskq_wait_outstanding); |
d9acd930 | 431 | |
a876b030 CD |
432 | static int |
433 | taskq_wait_check(taskq_t *tq) | |
bcd68186 | 434 | { |
a876b030 | 435 | int rc; |
bcd68186 | 436 | |
326172d8 OF |
437 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
438 | tq->tq_lock_class); | |
a876b030 | 439 | rc = (tq->tq_lowest_id == tq->tq_next_id); |
749045bb | 440 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 441 | |
a876b030 CD |
442 | return (rc); |
443 | } | |
444 | ||
445 | /* | |
446 | * The taskq_wait() function will block until the taskq is empty. | |
447 | * This means that if a taskq re-dispatches work to itself taskq_wait() | |
448 | * callers will block indefinitely. | |
449 | */ | |
450 | void | |
451 | taskq_wait(taskq_t *tq) | |
452 | { | |
453 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); | |
bcd68186 | 454 | } |
aed8671c | 455 | EXPORT_SYMBOL(taskq_wait); |
bcd68186 | 456 | |
c5a8b1e1 BB |
457 | static int |
458 | taskq_member_impl(taskq_t *tq, void *t) | |
459 | { | |
460 | struct list_head *l; | |
461 | taskq_thread_t *tqt; | |
462 | int found = 0; | |
463 | ||
464 | ASSERT(tq); | |
465 | ASSERT(t); | |
466 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
467 | ||
468 | list_for_each(l, &tq->tq_thread_list) { | |
469 | tqt = list_entry(l, taskq_thread_t, tqt_thread_list); | |
470 | if (tqt->tqt_thread == (struct task_struct *)t) { | |
471 | found = 1; | |
472 | break; | |
473 | } | |
474 | } | |
475 | return (found); | |
476 | } | |
477 | ||
478 | int | |
479 | taskq_member(taskq_t *tq, void *t) | |
480 | { | |
481 | int found; | |
482 | ||
326172d8 OF |
483 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
484 | tq->tq_lock_class); | |
c5a8b1e1 BB |
485 | found = taskq_member_impl(tq, t); |
486 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
487 | ||
488 | return (found); | |
489 | } | |
490 | EXPORT_SYMBOL(taskq_member); | |
491 | ||
d9acd930 BB |
492 | /* |
493 | * Cancel an already dispatched task given the task id. Still pending tasks | |
494 | * will be immediately canceled, and if the task is active the function will | |
495 | * block until it completes. Preallocated tasks which are canceled must be | |
496 | * freed by the caller. | |
497 | */ | |
498 | int | |
499 | taskq_cancel_id(taskq_t *tq, taskqid_t id) | |
500 | { | |
501 | taskq_ent_t *t; | |
502 | int active = 0; | |
503 | int rc = ENOENT; | |
d9acd930 BB |
504 | |
505 | ASSERT(tq); | |
506 | ||
326172d8 OF |
507 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
508 | tq->tq_lock_class); | |
d9acd930 BB |
509 | t = taskq_find(tq, id, &active); |
510 | if (t && !active) { | |
511 | list_del_init(&t->tqent_list); | |
512 | t->tqent_flags |= TQENT_FLAG_CANCEL; | |
513 | ||
514 | /* | |
515 | * When canceling the lowest outstanding task id we | |
516 | * must recalculate the new lowest outstanding id. | |
517 | */ | |
518 | if (tq->tq_lowest_id == t->tqent_id) { | |
519 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
520 | ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); | |
521 | } | |
522 | ||
523 | /* | |
524 | * The task_expire() function takes the tq->tq_lock so drop | |
525 | * drop the lock before synchronously cancelling the timer. | |
526 | */ | |
527 | if (timer_pending(&t->tqent_timer)) { | |
528 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
529 | del_timer_sync(&t->tqent_timer); | |
326172d8 OF |
530 | spin_lock_irqsave_nested(&tq->tq_lock, |
531 | tq->tq_lock_flags, tq->tq_lock_class); | |
d9acd930 BB |
532 | } |
533 | ||
534 | if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) | |
535 | task_done(tq, t); | |
536 | ||
537 | rc = 0; | |
538 | } | |
539 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
540 | ||
541 | if (active) { | |
542 | taskq_wait_id(tq, id); | |
543 | rc = EBUSY; | |
544 | } | |
545 | ||
8d9a23e8 | 546 | return (rc); |
d9acd930 BB |
547 | } |
548 | EXPORT_SYMBOL(taskq_cancel_id); | |
549 | ||
f5f2b87d | 550 | static int taskq_thread_spawn(taskq_t *tq); |
a64e5575 | 551 | |
bcd68186 | 552 | taskqid_t |
aed8671c | 553 | taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) |
bcd68186 | 554 | { |
472a34ca | 555 | taskq_ent_t *t; |
bcd68186 | 556 | taskqid_t rc = 0; |
f1ca4da6 | 557 | |
472a34ca BB |
558 | ASSERT(tq); |
559 | ASSERT(func); | |
d05ec4b4 | 560 | |
326172d8 OF |
561 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
562 | tq->tq_lock_class); | |
f1ca4da6 | 563 | |
bcd68186 | 564 | /* Taskq being destroyed and all tasks drained */ |
f7a973d9 | 565 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
8d9a23e8 | 566 | goto out; |
f1ca4da6 | 567 | |
bcd68186 | 568 | /* Do not queue the task unless there is idle thread for it */ |
569 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
570 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
8d9a23e8 | 571 | goto out; |
bcd68186 | 572 | |
472a34ca | 573 | if ((t = task_alloc(tq, flags)) == NULL) |
8d9a23e8 | 574 | goto out; |
f1ca4da6 | 575 | |
046a70c9 | 576 | spin_lock(&t->tqent_lock); |
f0d8bb26 NB |
577 | |
578 | /* Queue to the priority list instead of the pending list */ | |
579 | if (flags & TQ_FRONT) | |
046a70c9 | 580 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); |
f0d8bb26 | 581 | else |
046a70c9 | 582 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); |
f0d8bb26 | 583 | |
046a70c9 | 584 | t->tqent_id = rc = tq->tq_next_id; |
bcd68186 | 585 | tq->tq_next_id++; |
472a34ca BB |
586 | t->tqent_func = func; |
587 | t->tqent_arg = arg; | |
d9acd930 BB |
588 | t->tqent_taskq = tq; |
589 | t->tqent_timer.data = 0; | |
590 | t->tqent_timer.function = NULL; | |
591 | t->tqent_timer.expires = 0; | |
44217f7a PS |
592 | |
593 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
594 | ||
046a70c9 | 595 | spin_unlock(&t->tqent_lock); |
0bb43ca2 NB |
596 | |
597 | wake_up(&tq->tq_work_waitq); | |
bcd68186 | 598 | out: |
a64e5575 | 599 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 600 | if (tq->tq_nactive == tq->tq_nthreads) |
601 | (void) taskq_thread_spawn(tq); | |
a64e5575 | 602 | |
749045bb | 603 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
8d9a23e8 | 604 | return (rc); |
f1ca4da6 | 605 | } |
aed8671c | 606 | EXPORT_SYMBOL(taskq_dispatch); |
44217f7a | 607 | |
d9acd930 BB |
608 | taskqid_t |
609 | taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, | |
610 | uint_t flags, clock_t expire_time) | |
611 | { | |
d9acd930 | 612 | taskqid_t rc = 0; |
8d9a23e8 | 613 | taskq_ent_t *t; |
d9acd930 BB |
614 | |
615 | ASSERT(tq); | |
616 | ASSERT(func); | |
617 | ||
326172d8 OF |
618 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
619 | tq->tq_lock_class); | |
d9acd930 BB |
620 | |
621 | /* Taskq being destroyed and all tasks drained */ | |
f7a973d9 | 622 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
8d9a23e8 | 623 | goto out; |
d9acd930 BB |
624 | |
625 | if ((t = task_alloc(tq, flags)) == NULL) | |
8d9a23e8 | 626 | goto out; |
d9acd930 BB |
627 | |
628 | spin_lock(&t->tqent_lock); | |
629 | ||
630 | /* Queue to the delay list for subsequent execution */ | |
631 | list_add_tail(&t->tqent_list, &tq->tq_delay_list); | |
632 | ||
633 | t->tqent_id = rc = tq->tq_next_id; | |
634 | tq->tq_next_id++; | |
635 | t->tqent_func = func; | |
636 | t->tqent_arg = arg; | |
637 | t->tqent_taskq = tq; | |
638 | t->tqent_timer.data = (unsigned long)t; | |
639 | t->tqent_timer.function = task_expire; | |
640 | t->tqent_timer.expires = (unsigned long)expire_time; | |
641 | add_timer(&t->tqent_timer); | |
642 | ||
643 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
644 | ||
645 | spin_unlock(&t->tqent_lock); | |
646 | out: | |
a64e5575 | 647 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 648 | if (tq->tq_nactive == tq->tq_nthreads) |
649 | (void) taskq_thread_spawn(tq); | |
d9acd930 | 650 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
8d9a23e8 | 651 | return (rc); |
d9acd930 BB |
652 | } |
653 | EXPORT_SYMBOL(taskq_dispatch_delay); | |
654 | ||
44217f7a | 655 | void |
aed8671c | 656 | taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, |
44217f7a PS |
657 | taskq_ent_t *t) |
658 | { | |
44217f7a PS |
659 | ASSERT(tq); |
660 | ASSERT(func); | |
44217f7a | 661 | |
326172d8 OF |
662 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
663 | tq->tq_lock_class); | |
44217f7a PS |
664 | |
665 | /* Taskq being destroyed and all tasks drained */ | |
f7a973d9 | 666 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { |
44217f7a PS |
667 | t->tqent_id = 0; |
668 | goto out; | |
669 | } | |
670 | ||
671 | spin_lock(&t->tqent_lock); | |
672 | ||
673 | /* | |
674 | * Mark it as a prealloc'd task. This is important | |
675 | * to ensure that we don't free it later. | |
676 | */ | |
677 | t->tqent_flags |= TQENT_FLAG_PREALLOC; | |
678 | ||
679 | /* Queue to the priority list instead of the pending list */ | |
680 | if (flags & TQ_FRONT) | |
681 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
682 | else | |
683 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
684 | ||
685 | t->tqent_id = tq->tq_next_id; | |
686 | tq->tq_next_id++; | |
687 | t->tqent_func = func; | |
688 | t->tqent_arg = arg; | |
d9acd930 | 689 | t->tqent_taskq = tq; |
44217f7a PS |
690 | |
691 | spin_unlock(&t->tqent_lock); | |
692 | ||
693 | wake_up(&tq->tq_work_waitq); | |
694 | out: | |
a64e5575 | 695 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 696 | if (tq->tq_nactive == tq->tq_nthreads) |
697 | (void) taskq_thread_spawn(tq); | |
0bb43ca2 | 698 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
44217f7a | 699 | } |
aed8671c | 700 | EXPORT_SYMBOL(taskq_dispatch_ent); |
44217f7a PS |
701 | |
702 | int | |
aed8671c | 703 | taskq_empty_ent(taskq_ent_t *t) |
44217f7a PS |
704 | { |
705 | return list_empty(&t->tqent_list); | |
706 | } | |
aed8671c | 707 | EXPORT_SYMBOL(taskq_empty_ent); |
44217f7a PS |
708 | |
709 | void | |
aed8671c | 710 | taskq_init_ent(taskq_ent_t *t) |
44217f7a PS |
711 | { |
712 | spin_lock_init(&t->tqent_lock); | |
d9acd930 BB |
713 | init_waitqueue_head(&t->tqent_waitq); |
714 | init_timer(&t->tqent_timer); | |
44217f7a PS |
715 | INIT_LIST_HEAD(&t->tqent_list); |
716 | t->tqent_id = 0; | |
717 | t->tqent_func = NULL; | |
718 | t->tqent_arg = NULL; | |
719 | t->tqent_flags = 0; | |
d9acd930 | 720 | t->tqent_taskq = NULL; |
44217f7a | 721 | } |
aed8671c | 722 | EXPORT_SYMBOL(taskq_init_ent); |
44217f7a | 723 | |
f7a973d9 BB |
724 | /* |
725 | * Return the next pending task, preference is given to tasks on the | |
726 | * priority list which were dispatched with TQ_FRONT. | |
727 | */ | |
728 | static taskq_ent_t * | |
729 | taskq_next_ent(taskq_t *tq) | |
730 | { | |
731 | struct list_head *list; | |
732 | ||
733 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
734 | ||
735 | if (!list_empty(&tq->tq_prio_list)) | |
736 | list = &tq->tq_prio_list; | |
737 | else if (!list_empty(&tq->tq_pend_list)) | |
738 | list = &tq->tq_pend_list; | |
739 | else | |
740 | return (NULL); | |
741 | ||
742 | return (list_entry(list->next, taskq_ent_t, tqent_list)); | |
743 | } | |
744 | ||
745 | /* | |
746 | * Spawns a new thread for the specified taskq. | |
747 | */ | |
748 | static void | |
749 | taskq_thread_spawn_task(void *arg) | |
750 | { | |
751 | taskq_t *tq = (taskq_t *)arg; | |
752 | ||
753 | (void) taskq_thread_create(tq); | |
754 | ||
326172d8 OF |
755 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
756 | tq->tq_lock_class); | |
f7a973d9 BB |
757 | tq->tq_nspawn--; |
758 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
759 | } | |
760 | ||
761 | /* | |
326172d8 | 762 | * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current |
f7a973d9 BB |
763 | * number of threads is insufficient to handle the pending tasks. These |
764 | * new threads must be created by the dedicated dynamic_taskq to avoid | |
765 | * deadlocks between thread creation and memory reclaim. The system_taskq | |
766 | * which is also a dynamic taskq cannot be safely used for this. | |
767 | */ | |
768 | static int | |
f5f2b87d | 769 | taskq_thread_spawn(taskq_t *tq) |
f7a973d9 BB |
770 | { |
771 | int spawning = 0; | |
772 | ||
773 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
774 | return (0); | |
775 | ||
f5f2b87d | 776 | if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && |
f7a973d9 BB |
777 | (tq->tq_flags & TASKQ_ACTIVE)) { |
778 | spawning = (++tq->tq_nspawn); | |
779 | taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, | |
780 | tq, TQ_NOSLEEP); | |
781 | } | |
782 | ||
783 | return (spawning); | |
784 | } | |
785 | ||
786 | /* | |
787 | * Threads in a dynamic taskq should only exit once it has been completely | |
788 | * drained and no other threads are actively servicing tasks. This prevents | |
789 | * threads from being created and destroyed more than is required. | |
790 | * | |
791 | * The first thread is the thread list is treated as the primary thread. | |
792 | * There is nothing special about the primary thread but in order to avoid | |
793 | * all the taskq pids from changing we opt to make it long running. | |
794 | */ | |
795 | static int | |
796 | taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) | |
797 | { | |
798 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
799 | ||
800 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
801 | return (0); | |
802 | ||
803 | if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, | |
804 | tqt_thread_list) == tqt) | |
805 | return (0); | |
806 | ||
807 | return | |
808 | ((tq->tq_nspawn == 0) && /* No threads are being spawned */ | |
809 | (tq->tq_nactive == 0) && /* No threads are handling tasks */ | |
810 | (tq->tq_nthreads > 1) && /* More than 1 thread is running */ | |
811 | (!taskq_next_ent(tq)) && /* There are no pending tasks */ | |
812 | (spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */ | |
813 | } | |
814 | ||
bcd68186 | 815 | static int |
816 | taskq_thread(void *args) | |
817 | { | |
472a34ca BB |
818 | DECLARE_WAITQUEUE(wait, current); |
819 | sigset_t blocked; | |
2c02b71b | 820 | taskq_thread_t *tqt = args; |
472a34ca BB |
821 | taskq_t *tq; |
822 | taskq_ent_t *t; | |
f7a973d9 | 823 | int seq_tasks = 0; |
bcd68186 | 824 | |
472a34ca | 825 | ASSERT(tqt); |
326172d8 | 826 | ASSERT(tqt->tqt_tq); |
2c02b71b | 827 | tq = tqt->tqt_tq; |
472a34ca | 828 | current->flags |= PF_NOFREEZE; |
bcd68186 | 829 | |
d4bf6d84 RY |
830 | #if defined(PF_MEMALLOC_NOIO) |
831 | (void) memalloc_noio_save(); | |
832 | #endif | |
833 | ||
472a34ca BB |
834 | sigfillset(&blocked); |
835 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
836 | flush_signals(current); | |
bcd68186 | 837 | |
326172d8 OF |
838 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
839 | tq->tq_lock_class); | |
f7a973d9 BB |
840 | |
841 | /* Immediately exit if more threads than allowed were created. */ | |
842 | if (tq->tq_nthreads >= tq->tq_maxthreads) | |
843 | goto error; | |
844 | ||
472a34ca | 845 | tq->tq_nthreads++; |
f7a973d9 | 846 | list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); |
472a34ca BB |
847 | wake_up(&tq->tq_wait_waitq); |
848 | set_current_state(TASK_INTERRUPTIBLE); | |
bcd68186 | 849 | |
472a34ca | 850 | while (!kthread_should_stop()) { |
bcd68186 | 851 | |
f0d8bb26 NB |
852 | if (list_empty(&tq->tq_pend_list) && |
853 | list_empty(&tq->tq_prio_list)) { | |
f7a973d9 BB |
854 | |
855 | if (taskq_thread_should_stop(tq, tqt)) { | |
856 | wake_up_all(&tq->tq_wait_waitq); | |
857 | break; | |
858 | } | |
859 | ||
3c6ed541 | 860 | add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); |
749045bb | 861 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
f7a973d9 | 862 | |
bcd68186 | 863 | schedule(); |
f7a973d9 BB |
864 | seq_tasks = 0; |
865 | ||
326172d8 OF |
866 | spin_lock_irqsave_nested(&tq->tq_lock, |
867 | tq->tq_lock_flags, tq->tq_lock_class); | |
3c6ed541 | 868 | remove_wait_queue(&tq->tq_work_waitq, &wait); |
bcd68186 | 869 | } else { |
870 | __set_current_state(TASK_RUNNING); | |
871 | } | |
872 | ||
f7a973d9 | 873 | if ((t = taskq_next_ent(tq)) != NULL) { |
472a34ca | 874 | list_del_init(&t->tqent_list); |
8f2503e0 | 875 | |
44217f7a PS |
876 | /* In order to support recursively dispatching a |
877 | * preallocated taskq_ent_t, tqent_id must be | |
878 | * stored prior to executing tqent_func. */ | |
e7e5f78e | 879 | tqt->tqt_id = t->tqent_id; |
d9acd930 | 880 | tqt->tqt_task = t; |
8f2503e0 PS |
881 | |
882 | /* We must store a copy of the flags prior to | |
883 | * servicing the task (servicing a prealloc'd task | |
884 | * returns the ownership of the tqent back to | |
885 | * the caller of taskq_dispatch). Thus, | |
886 | * tqent_flags _may_ change within the call. */ | |
887 | tqt->tqt_flags = t->tqent_flags; | |
888 | ||
2c02b71b | 889 | taskq_insert_in_order(tq, tqt); |
472a34ca | 890 | tq->tq_nactive++; |
749045bb | 891 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 892 | |
893 | /* Perform the requested task */ | |
472a34ca | 894 | t->tqent_func(t->tqent_arg); |
bcd68186 | 895 | |
326172d8 OF |
896 | spin_lock_irqsave_nested(&tq->tq_lock, |
897 | tq->tq_lock_flags, tq->tq_lock_class); | |
472a34ca | 898 | tq->tq_nactive--; |
2c02b71b | 899 | list_del_init(&tqt->tqt_active_list); |
d9acd930 | 900 | tqt->tqt_task = NULL; |
8f2503e0 PS |
901 | |
902 | /* For prealloc'd tasks, we don't free anything. */ | |
f7a973d9 | 903 | if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) |
8f2503e0 | 904 | task_done(tq, t); |
bcd68186 | 905 | |
7257ec41 BB |
906 | /* When the current lowest outstanding taskqid is |
907 | * done calculate the new lowest outstanding id */ | |
e7e5f78e | 908 | if (tq->tq_lowest_id == tqt->tqt_id) { |
bcd68186 | 909 | tq->tq_lowest_id = taskq_lowest_id(tq); |
e7e5f78e | 910 | ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); |
bcd68186 | 911 | } |
912 | ||
f7a973d9 | 913 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 914 | if ((++seq_tasks) > spl_taskq_thread_sequential && |
915 | taskq_thread_spawn(tq)) | |
f7a973d9 BB |
916 | seq_tasks = 0; |
917 | ||
e7e5f78e | 918 | tqt->tqt_id = 0; |
8f2503e0 | 919 | tqt->tqt_flags = 0; |
472a34ca | 920 | wake_up_all(&tq->tq_wait_waitq); |
f7a973d9 BB |
921 | } else { |
922 | if (taskq_thread_should_stop(tq, tqt)) | |
923 | break; | |
bcd68186 | 924 | } |
925 | ||
926 | set_current_state(TASK_INTERRUPTIBLE); | |
927 | ||
472a34ca | 928 | } |
bcd68186 | 929 | |
930 | __set_current_state(TASK_RUNNING); | |
472a34ca | 931 | tq->tq_nthreads--; |
2c02b71b | 932 | list_del_init(&tqt->tqt_thread_list); |
f7a973d9 BB |
933 | error: |
934 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
472a34ca | 935 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 936 | |
8d9a23e8 | 937 | return (0); |
bcd68186 | 938 | } |
939 | ||
f7a973d9 BB |
940 | static taskq_thread_t * |
941 | taskq_thread_create(taskq_t *tq) | |
942 | { | |
943 | static int last_used_cpu = 0; | |
944 | taskq_thread_t *tqt; | |
945 | ||
946 | tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); | |
947 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
948 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
949 | tqt->tqt_tq = tq; | |
950 | tqt->tqt_id = 0; | |
951 | ||
952 | tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, | |
953 | "%s", tq->tq_name); | |
954 | if (tqt->tqt_thread == NULL) { | |
955 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
956 | return (NULL); | |
957 | } | |
958 | ||
959 | if (spl_taskq_thread_bind) { | |
960 | last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); | |
961 | kthread_bind(tqt->tqt_thread, last_used_cpu); | |
962 | } | |
963 | ||
62aa81a5 BB |
964 | if (spl_taskq_thread_priority) |
965 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); | |
966 | ||
f7a973d9 BB |
967 | wake_up_process(tqt->tqt_thread); |
968 | ||
969 | return (tqt); | |
970 | } | |
971 | ||
f1ca4da6 | 972 | taskq_t * |
aed8671c | 973 | taskq_create(const char *name, int nthreads, pri_t pri, |
472a34ca | 974 | int minalloc, int maxalloc, uint_t flags) |
f1ca4da6 | 975 | { |
472a34ca | 976 | taskq_t *tq; |
2c02b71b | 977 | taskq_thread_t *tqt; |
f7a973d9 | 978 | int count = 0, rc = 0, i; |
bcd68186 | 979 | |
472a34ca | 980 | ASSERT(name != NULL); |
472a34ca BB |
981 | ASSERT(minalloc >= 0); |
982 | ASSERT(maxalloc <= INT_MAX); | |
f7a973d9 | 983 | ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ |
bcd68186 | 984 | |
915404bd BB |
985 | /* Scale the number of threads using nthreads as a percentage */ |
986 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
987 | ASSERT(nthreads <= 100); | |
988 | ASSERT(nthreads >= 0); | |
989 | nthreads = MIN(nthreads, 100); | |
990 | nthreads = MAX(nthreads, 0); | |
991 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
992 | } | |
993 | ||
f7a973d9 | 994 | tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); |
472a34ca | 995 | if (tq == NULL) |
8d9a23e8 | 996 | return (NULL); |
bcd68186 | 997 | |
472a34ca | 998 | spin_lock_init(&tq->tq_lock); |
472a34ca BB |
999 | INIT_LIST_HEAD(&tq->tq_thread_list); |
1000 | INIT_LIST_HEAD(&tq->tq_active_list); | |
f7a973d9 BB |
1001 | tq->tq_name = strdup(name); |
1002 | tq->tq_nactive = 0; | |
1003 | tq->tq_nthreads = 0; | |
1004 | tq->tq_nspawn = 0; | |
1005 | tq->tq_maxthreads = nthreads; | |
1006 | tq->tq_pri = pri; | |
1007 | tq->tq_minalloc = minalloc; | |
1008 | tq->tq_maxalloc = maxalloc; | |
1009 | tq->tq_nalloc = 0; | |
1010 | tq->tq_flags = (flags | TASKQ_ACTIVE); | |
1011 | tq->tq_next_id = 1; | |
1012 | tq->tq_lowest_id = 1; | |
472a34ca BB |
1013 | INIT_LIST_HEAD(&tq->tq_free_list); |
1014 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
1015 | INIT_LIST_HEAD(&tq->tq_prio_list); | |
d9acd930 | 1016 | INIT_LIST_HEAD(&tq->tq_delay_list); |
472a34ca BB |
1017 | init_waitqueue_head(&tq->tq_work_waitq); |
1018 | init_waitqueue_head(&tq->tq_wait_waitq); | |
326172d8 | 1019 | tq->tq_lock_class = TQ_LOCK_GENERAL; |
bcd68186 | 1020 | |
f7a973d9 | 1021 | if (flags & TASKQ_PREPOPULATE) { |
326172d8 OF |
1022 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
1023 | tq->tq_lock_class); | |
f7a973d9 | 1024 | |
472a34ca BB |
1025 | for (i = 0; i < minalloc; i++) |
1026 | task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW)); | |
6e605b6e | 1027 | |
f7a973d9 BB |
1028 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
1029 | } | |
1030 | ||
1031 | if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) | |
1032 | nthreads = 1; | |
6e605b6e | 1033 | |
2c02b71b | 1034 | for (i = 0; i < nthreads; i++) { |
f7a973d9 BB |
1035 | tqt = taskq_thread_create(tq); |
1036 | if (tqt == NULL) | |
2c02b71b | 1037 | rc = 1; |
f7a973d9 BB |
1038 | else |
1039 | count++; | |
2c02b71b | 1040 | } |
bcd68186 | 1041 | |
472a34ca | 1042 | /* Wait for all threads to be started before potential destroy */ |
f7a973d9 | 1043 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); |
bcd68186 | 1044 | |
472a34ca | 1045 | if (rc) { |
aed8671c | 1046 | taskq_destroy(tq); |
472a34ca BB |
1047 | tq = NULL; |
1048 | } | |
bcd68186 | 1049 | |
8d9a23e8 | 1050 | return (tq); |
f1ca4da6 | 1051 | } |
aed8671c | 1052 | EXPORT_SYMBOL(taskq_create); |
b123971f | 1053 | |
1054 | void | |
aed8671c | 1055 | taskq_destroy(taskq_t *tq) |
b123971f | 1056 | { |
2c02b71b PS |
1057 | struct task_struct *thread; |
1058 | taskq_thread_t *tqt; | |
046a70c9 | 1059 | taskq_ent_t *t; |
b123971f | 1060 | |
bcd68186 | 1061 | ASSERT(tq); |
326172d8 OF |
1062 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
1063 | tq->tq_lock_class); | |
f7a973d9 | 1064 | tq->tq_flags &= ~TASKQ_ACTIVE; |
749045bb | 1065 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
bcd68186 | 1066 | |
f7a973d9 BB |
1067 | /* |
1068 | * When TASKQ_ACTIVE is clear new tasks may not be added nor may | |
1069 | * new worker threads be spawned for dynamic taskq. | |
1070 | */ | |
1071 | if (dynamic_taskq != NULL) | |
1072 | taskq_wait_outstanding(dynamic_taskq, 0); | |
1073 | ||
aed8671c | 1074 | taskq_wait(tq); |
bcd68186 | 1075 | |
326172d8 OF |
1076 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
1077 | tq->tq_lock_class); | |
bcd68186 | 1078 | |
2c02b71b PS |
1079 | /* |
1080 | * Signal each thread to exit and block until it does. Each thread | |
1081 | * is responsible for removing itself from the list and freeing its | |
1082 | * taskq_thread_t. This allows for idle threads to opt to remove | |
1083 | * themselves from the taskq. They can be recreated as needed. | |
1084 | */ | |
1085 | while (!list_empty(&tq->tq_thread_list)) { | |
1086 | tqt = list_entry(tq->tq_thread_list.next, | |
f7a973d9 | 1087 | taskq_thread_t, tqt_thread_list); |
2c02b71b PS |
1088 | thread = tqt->tqt_thread; |
1089 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
1090 | ||
1091 | kthread_stop(thread); | |
1092 | ||
326172d8 OF |
1093 | spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, |
1094 | tq->tq_lock_class); | |
2c02b71b PS |
1095 | } |
1096 | ||
472a34ca | 1097 | while (!list_empty(&tq->tq_free_list)) { |
046a70c9 | 1098 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); |
44217f7a PS |
1099 | |
1100 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
1101 | ||
472a34ca BB |
1102 | list_del_init(&t->tqent_list); |
1103 | task_free(tq, t); | |
1104 | } | |
bcd68186 | 1105 | |
f7a973d9 BB |
1106 | ASSERT0(tq->tq_nthreads); |
1107 | ASSERT0(tq->tq_nalloc); | |
1108 | ASSERT0(tq->tq_nspawn); | |
472a34ca BB |
1109 | ASSERT(list_empty(&tq->tq_thread_list)); |
1110 | ASSERT(list_empty(&tq->tq_active_list)); | |
1111 | ASSERT(list_empty(&tq->tq_free_list)); | |
1112 | ASSERT(list_empty(&tq->tq_pend_list)); | |
1113 | ASSERT(list_empty(&tq->tq_prio_list)); | |
d9acd930 | 1114 | ASSERT(list_empty(&tq->tq_delay_list)); |
bcd68186 | 1115 | |
472a34ca | 1116 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); |
2c02b71b | 1117 | |
f7a973d9 BB |
1118 | strfree(tq->tq_name); |
1119 | kmem_free(tq, sizeof (taskq_t)); | |
b123971f | 1120 | } |
aed8671c | 1121 | EXPORT_SYMBOL(taskq_destroy); |
e9cb2b4f BB |
1122 | |
1123 | int | |
1124 | spl_taskq_init(void) | |
1125 | { | |
3c82160f | 1126 | system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), |
9dc5ffbe | 1127 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); |
e9cb2b4f | 1128 | if (system_taskq == NULL) |
8d9a23e8 | 1129 | return (1); |
e9cb2b4f | 1130 | |
f7a973d9 | 1131 | dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, |
9dc5ffbe | 1132 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); |
f7a973d9 BB |
1133 | if (dynamic_taskq == NULL) { |
1134 | taskq_destroy(system_taskq); | |
1135 | return (1); | |
1136 | } | |
1137 | ||
326172d8 OF |
1138 | /* This is used to annotate tq_lock, so |
1139 | * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch | |
1140 | * does not trigger a lockdep warning re: possible recursive locking | |
1141 | */ | |
1142 | dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; | |
1143 | ||
8d9a23e8 | 1144 | return (0); |
e9cb2b4f BB |
1145 | } |
1146 | ||
1147 | void | |
1148 | spl_taskq_fini(void) | |
1149 | { | |
f7a973d9 BB |
1150 | taskq_destroy(dynamic_taskq); |
1151 | dynamic_taskq = NULL; | |
1152 | ||
e9cb2b4f | 1153 | taskq_destroy(system_taskq); |
f7a973d9 | 1154 | system_taskq = NULL; |
e9cb2b4f | 1155 | } |