]>
Commit | Line | Data |
---|---|---|
2c4332cf | 1 | /* |
716154c5 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 BB |
6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
715f6251 | 9 | * |
716154c5 BB |
10 | * The SPL is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | * | |
15 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 BB |
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
18 | * for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License along | |
716154c5 | 21 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
2c4332cf | 22 | * |
716154c5 | 23 | * Solaris Porting Layer (SPL) Task Queue Implementation. |
2c4332cf | 24 | */ |
715f6251 | 25 | |
8b8b44d0 | 26 | #include <sys/timer.h> |
f4b37741 | 27 | #include <sys/taskq.h> |
3d061e9d | 28 | #include <sys/kmem.h> |
16522ac2 | 29 | #include <sys/tsd.h> |
ae38e009 | 30 | #include <sys/trace_spl.h> |
60a4c7d2 PD |
31 | #ifdef HAVE_CPU_HOTPLUG |
32 | #include <linux/cpuhotplug.h> | |
33 | #endif | |
937879f1 | 34 | |
18168da7 | 35 | static int spl_taskq_thread_bind = 0; |
703371d8 AV |
36 | module_param(spl_taskq_thread_bind, int, 0644); |
37 | MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); | |
38 | ||
e0bd8118 | 39 | static uint_t spl_taskq_thread_timeout_ms = 5000; |
35a6247c RE |
40 | /* BEGIN CSTYLED */ |
41 | module_param(spl_taskq_thread_timeout_ms, uint, 0644); | |
42 | /* END CSTYLED */ | |
43 | MODULE_PARM_DESC(spl_taskq_thread_timeout_ms, | |
e0bd8118 | 44 | "Minimum idle threads exit interval for dynamic taskqs"); |
f7a973d9 | 45 | |
18168da7 | 46 | static int spl_taskq_thread_dynamic = 1; |
60a4c7d2 | 47 | module_param(spl_taskq_thread_dynamic, int, 0444); |
f7a973d9 BB |
48 | MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); |
49 | ||
18168da7 | 50 | static int spl_taskq_thread_priority = 1; |
62aa81a5 BB |
51 | module_param(spl_taskq_thread_priority, int, 0644); |
52 | MODULE_PARM_DESC(spl_taskq_thread_priority, | |
2c4332cf | 53 | "Allow non-default priority for taskq threads"); |
62aa81a5 | 54 | |
fdc2d303 RY |
55 | static uint_t spl_taskq_thread_sequential = 4; |
56 | /* BEGIN CSTYLED */ | |
57 | module_param(spl_taskq_thread_sequential, uint, 0644); | |
58 | /* END CSTYLED */ | |
f7a973d9 | 59 | MODULE_PARM_DESC(spl_taskq_thread_sequential, |
2c4332cf | 60 | "Create new taskq threads after N sequential tasks"); |
f7a973d9 | 61 | |
f9e39f98 PD |
62 | /* |
63 | * Global system-wide dynamic task queue available for all consumers. This | |
64 | * taskq is not intended for long-running tasks; instead, a dedicated taskq | |
65 | * should be created. | |
66 | */ | |
e9cb2b4f BB |
67 | taskq_t *system_taskq; |
68 | EXPORT_SYMBOL(system_taskq); | |
f200b836 CC |
69 | /* Global dynamic task queue for long delay */ |
70 | taskq_t *system_delay_taskq; | |
71 | EXPORT_SYMBOL(system_delay_taskq); | |
e9cb2b4f | 72 | |
f7a973d9 BB |
73 | /* Private dedicated taskq for creating new taskq threads on demand. */ |
74 | static taskq_t *dynamic_taskq; | |
75 | static taskq_thread_t *taskq_thread_create(taskq_t *); | |
76 | ||
60a4c7d2 PD |
77 | #ifdef HAVE_CPU_HOTPLUG |
78 | /* Multi-callback id for cpu hotplugging. */ | |
79 | static int spl_taskq_cpuhp_state; | |
80 | #endif | |
81 | ||
200366f2 TC |
82 | /* List of all taskqs */ |
83 | LIST_HEAD(tq_list); | |
93ce2b4c | 84 | struct rw_semaphore tq_list_sem; |
16522ac2 | 85 | static uint_t taskq_tsd; |
200366f2 | 86 | |
9b51f218 BB |
87 | static int |
88 | task_km_flags(uint_t flags) | |
89 | { | |
90 | if (flags & TQ_NOSLEEP) | |
2c4332cf | 91 | return (KM_NOSLEEP); |
9b51f218 BB |
92 | |
93 | if (flags & TQ_PUSHPAGE) | |
2c4332cf | 94 | return (KM_PUSHPAGE); |
9b51f218 | 95 | |
2c4332cf | 96 | return (KM_SLEEP); |
9b51f218 BB |
97 | } |
98 | ||
200366f2 TC |
99 | /* |
100 | * taskq_find_by_name - Find the largest instance number of a named taskq. | |
101 | */ | |
102 | static int | |
103 | taskq_find_by_name(const char *name) | |
104 | { | |
7cf1fe63 | 105 | struct list_head *tql = NULL; |
200366f2 TC |
106 | taskq_t *tq; |
107 | ||
108 | list_for_each_prev(tql, &tq_list) { | |
109 | tq = list_entry(tql, taskq_t, tq_taskqs); | |
110 | if (strcmp(name, tq->tq_name) == 0) | |
5461eefe | 111 | return (tq->tq_instance); |
200366f2 TC |
112 | } |
113 | return (-1); | |
114 | } | |
115 | ||
82387586 BB |
116 | /* |
117 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
bcd68186 | 118 | * is not attached to the free, work, or pending taskq lists. |
f1ca4da6 | 119 | */ |
046a70c9 | 120 | static taskq_ent_t * |
066b89e6 | 121 | task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) |
bcd68186 | 122 | { |
472a34ca BB |
123 | taskq_ent_t *t; |
124 | int count = 0; | |
bcd68186 | 125 | |
472a34ca | 126 | ASSERT(tq); |
bcd68186 | 127 | retry: |
472a34ca BB |
128 | /* Acquire taskq_ent_t's from free list if available */ |
129 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | |
130 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
131 | ||
132 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
d9acd930 BB |
133 | ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); |
134 | ASSERT(!timer_pending(&t->tqent_timer)); | |
472a34ca BB |
135 | |
136 | list_del_init(&t->tqent_list); | |
8d9a23e8 | 137 | return (t); |
472a34ca BB |
138 | } |
139 | ||
140 | /* Free list is empty and memory allocations are prohibited */ | |
141 | if (flags & TQ_NOALLOC) | |
8d9a23e8 | 142 | return (NULL); |
472a34ca BB |
143 | |
144 | /* Hit maximum taskq_ent_t pool size */ | |
145 | if (tq->tq_nalloc >= tq->tq_maxalloc) { | |
146 | if (flags & TQ_NOSLEEP) | |
8d9a23e8 | 147 | return (NULL); |
472a34ca BB |
148 | |
149 | /* | |
150 | * Sleep periodically polling the free list for an available | |
151 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | |
152 | * but we cannot block forever waiting for an taskq_ent_t to | |
153 | * show up in the free list, otherwise a deadlock can happen. | |
154 | * | |
155 | * Therefore, we need to allocate a new task even if the number | |
156 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
157 | * end up delaying the task allocation by one second, thereby | |
158 | * throttling the task dispatch rate. | |
159 | */ | |
066b89e6 | 160 | spin_unlock_irqrestore(&tq->tq_lock, *irqflags); |
472a34ca | 161 | schedule_timeout(HZ / 100); |
066b89e6 | 162 | spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, |
326172d8 | 163 | tq->tq_lock_class); |
8d9a23e8 BB |
164 | if (count < 100) { |
165 | count++; | |
166 | goto retry; | |
167 | } | |
472a34ca BB |
168 | } |
169 | ||
066b89e6 | 170 | spin_unlock_irqrestore(&tq->tq_lock, *irqflags); |
2c4332cf | 171 | t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); |
066b89e6 | 172 | spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); |
472a34ca BB |
173 | |
174 | if (t) { | |
175 | taskq_init_ent(t); | |
176 | tq->tq_nalloc++; | |
177 | } | |
178 | ||
8d9a23e8 | 179 | return (t); |
bcd68186 BB |
180 | } |
181 | ||
82387586 | 182 | /* |
046a70c9 | 183 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t |
bcd68186 BB |
184 | * to already be removed from the free, work, or pending taskq lists. |
185 | */ | |
186 | static void | |
046a70c9 | 187 | task_free(taskq_t *tq, taskq_ent_t *t) |
bcd68186 | 188 | { |
472a34ca BB |
189 | ASSERT(tq); |
190 | ASSERT(t); | |
046a70c9 | 191 | ASSERT(list_empty(&t->tqent_list)); |
d9acd930 | 192 | ASSERT(!timer_pending(&t->tqent_timer)); |
bcd68186 | 193 | |
2c4332cf | 194 | kmem_free(t, sizeof (taskq_ent_t)); |
472a34ca | 195 | tq->tq_nalloc--; |
bcd68186 BB |
196 | } |
197 | ||
82387586 BB |
198 | /* |
199 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
046a70c9 | 200 | * taskq_ent_t if too many exist or moves it to the free list for later use. |
bcd68186 | 201 | */ |
f1ca4da6 | 202 | static void |
046a70c9 | 203 | task_done(taskq_t *tq, taskq_ent_t *t) |
f1ca4da6 | 204 | { |
bcd68186 BB |
205 | ASSERT(tq); |
206 | ASSERT(t); | |
bcd68186 | 207 | |
d9acd930 BB |
208 | /* Wake tasks blocked in taskq_wait_id() */ |
209 | wake_up_all(&t->tqent_waitq); | |
210 | ||
046a70c9 | 211 | list_del_init(&t->tqent_list); |
f1ca4da6 | 212 | |
472a34ca | 213 | if (tq->tq_nalloc <= tq->tq_minalloc) { |
cbba7146 | 214 | t->tqent_id = TASKQID_INVALID; |
046a70c9 PS |
215 | t->tqent_func = NULL; |
216 | t->tqent_arg = NULL; | |
44217f7a | 217 | t->tqent_flags = 0; |
8f2503e0 | 218 | |
472a34ca | 219 | list_add_tail(&t->tqent_list, &tq->tq_free_list); |
bcd68186 BB |
220 | } else { |
221 | task_free(tq, t); | |
222 | } | |
f1ca4da6 BB |
223 | } |
224 | ||
82387586 | 225 | /* |
d9acd930 BB |
226 | * When a delayed task timer expires remove it from the delay list and |
227 | * add it to the priority list in order for immediate processing. | |
bcd68186 | 228 | */ |
d9acd930 | 229 | static void |
c9821f1c | 230 | task_expire_impl(taskq_ent_t *t) |
bcd68186 | 231 | { |
c9821f1c | 232 | taskq_ent_t *w; |
d9acd930 | 233 | taskq_t *tq = t->tqent_taskq; |
7cf1fe63 | 234 | struct list_head *l = NULL; |
066b89e6 | 235 | unsigned long flags; |
7257ec41 | 236 | |
066b89e6 | 237 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
d9acd930 BB |
238 | |
239 | if (t->tqent_flags & TQENT_FLAG_CANCEL) { | |
240 | ASSERT(list_empty(&t->tqent_list)); | |
066b89e6 | 241 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 BB |
242 | return; |
243 | } | |
244 | ||
8f3b403a | 245 | t->tqent_birth = jiffies; |
ae38e009 PS |
246 | DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); |
247 | ||
d9acd930 BB |
248 | /* |
249 | * The priority list must be maintained in strict task id order | |
250 | * from lowest to highest for lowest_id to be easily calculable. | |
251 | */ | |
252 | list_del(&t->tqent_list); | |
253 | list_for_each_prev(l, &tq->tq_prio_list) { | |
254 | w = list_entry(l, taskq_ent_t, tqent_list); | |
255 | if (w->tqent_id < t->tqent_id) { | |
256 | list_add(&t->tqent_list, l); | |
257 | break; | |
258 | } | |
259 | } | |
260 | if (l == &tq->tq_prio_list) | |
261 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
262 | ||
066b89e6 | 263 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
7257ec41 | 264 | |
d9acd930 BB |
265 | wake_up(&tq->tq_work_waitq); |
266 | } | |
267 | ||
c9821f1c | 268 | static void |
8b8b44d0 | 269 | task_expire(spl_timer_list_t tl) |
c9821f1c | 270 | { |
8b8b44d0 RK |
271 | struct timer_list *tmr = (struct timer_list *)tl; |
272 | taskq_ent_t *t = from_timer(t, tmr, tqent_timer); | |
c9821f1c TH |
273 | task_expire_impl(t); |
274 | } | |
c9821f1c | 275 | |
d9acd930 BB |
276 | /* |
277 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
278 | * be queued on the pending list, on the priority list, on the | |
279 | * delay list, or on the work list currently being handled, but | |
280 | * it is not 100% complete yet. | |
281 | */ | |
282 | static taskqid_t | |
283 | taskq_lowest_id(taskq_t *tq) | |
284 | { | |
285 | taskqid_t lowest_id = tq->tq_next_id; | |
286 | taskq_ent_t *t; | |
287 | taskq_thread_t *tqt; | |
d9acd930 | 288 | |
d9acd930 BB |
289 | if (!list_empty(&tq->tq_pend_list)) { |
290 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | |
291 | lowest_id = MIN(lowest_id, t->tqent_id); | |
292 | } | |
293 | ||
294 | if (!list_empty(&tq->tq_prio_list)) { | |
295 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | |
296 | lowest_id = MIN(lowest_id, t->tqent_id); | |
297 | } | |
298 | ||
299 | if (!list_empty(&tq->tq_delay_list)) { | |
300 | t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); | |
301 | lowest_id = MIN(lowest_id, t->tqent_id); | |
302 | } | |
303 | ||
304 | if (!list_empty(&tq->tq_active_list)) { | |
305 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
306 | tqt_active_list); | |
cbba7146 | 307 | ASSERT(tqt->tqt_id != TASKQID_INVALID); |
d9acd930 BB |
308 | lowest_id = MIN(lowest_id, tqt->tqt_id); |
309 | } | |
310 | ||
8d9a23e8 | 311 | return (lowest_id); |
d9acd930 BB |
312 | } |
313 | ||
314 | /* | |
315 | * Insert a task into a list keeping the list sorted by increasing taskqid. | |
316 | */ | |
317 | static void | |
318 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | |
319 | { | |
320 | taskq_thread_t *w; | |
7cf1fe63 | 321 | struct list_head *l = NULL; |
d9acd930 | 322 | |
d9acd930 BB |
323 | ASSERT(tq); |
324 | ASSERT(tqt); | |
d9acd930 BB |
325 | |
326 | list_for_each_prev(l, &tq->tq_active_list) { | |
327 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
328 | if (w->tqt_id < tqt->tqt_id) { | |
329 | list_add(&tqt->tqt_active_list, l); | |
330 | break; | |
331 | } | |
332 | } | |
333 | if (l == &tq->tq_active_list) | |
334 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
d9acd930 BB |
335 | } |
336 | ||
337 | /* | |
338 | * Find and return a task from the given list if it exists. The list | |
339 | * must be in lowest to highest task id order. | |
340 | */ | |
341 | static taskq_ent_t * | |
342 | taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) | |
343 | { | |
7cf1fe63 | 344 | struct list_head *l = NULL; |
d9acd930 | 345 | taskq_ent_t *t; |
d9acd930 | 346 | |
d9acd930 BB |
347 | list_for_each(l, lh) { |
348 | t = list_entry(l, taskq_ent_t, tqent_list); | |
349 | ||
350 | if (t->tqent_id == id) | |
8d9a23e8 | 351 | return (t); |
d9acd930 BB |
352 | |
353 | if (t->tqent_id > id) | |
354 | break; | |
355 | } | |
356 | ||
8d9a23e8 | 357 | return (NULL); |
bcd68186 BB |
358 | } |
359 | ||
d9acd930 BB |
360 | /* |
361 | * Find an already dispatched task given the task id regardless of what | |
cce83ba0 CC |
362 | * state it is in. If a task is still pending it will be returned. |
363 | * If a task is executing, then -EBUSY will be returned instead. | |
364 | * If the task has already been run then NULL is returned. | |
d9acd930 BB |
365 | */ |
366 | static taskq_ent_t * | |
cce83ba0 | 367 | taskq_find(taskq_t *tq, taskqid_t id) |
d9acd930 BB |
368 | { |
369 | taskq_thread_t *tqt; | |
7cf1fe63 | 370 | struct list_head *l = NULL; |
d9acd930 | 371 | taskq_ent_t *t; |
d9acd930 | 372 | |
d9acd930 BB |
373 | t = taskq_find_list(tq, &tq->tq_delay_list, id); |
374 | if (t) | |
8d9a23e8 | 375 | return (t); |
d9acd930 BB |
376 | |
377 | t = taskq_find_list(tq, &tq->tq_prio_list, id); | |
378 | if (t) | |
8d9a23e8 | 379 | return (t); |
d9acd930 BB |
380 | |
381 | t = taskq_find_list(tq, &tq->tq_pend_list, id); | |
382 | if (t) | |
8d9a23e8 | 383 | return (t); |
d9acd930 BB |
384 | |
385 | list_for_each(l, &tq->tq_active_list) { | |
386 | tqt = list_entry(l, taskq_thread_t, tqt_active_list); | |
387 | if (tqt->tqt_id == id) { | |
cce83ba0 CC |
388 | /* |
389 | * Instead of returning tqt_task, we just return a non | |
390 | * NULL value to prevent misuse, since tqt_task only | |
391 | * has two valid fields. | |
392 | */ | |
393 | return (ERR_PTR(-EBUSY)); | |
d9acd930 BB |
394 | } |
395 | } | |
396 | ||
8d9a23e8 | 397 | return (NULL); |
d9acd930 BB |
398 | } |
399 | ||
a876b030 CD |
400 | /* |
401 | * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and | |
402 | * taskq_wait() functions below. | |
403 | * | |
404 | * Taskq waiting is accomplished by tracking the lowest outstanding task | |
405 | * id and the next available task id. As tasks are dispatched they are | |
406 | * added to the tail of the pending, priority, or delay lists. As worker | |
407 | * threads become available the tasks are removed from the heads of these | |
408 | * lists and linked to the worker threads. This ensures the lists are | |
409 | * kept sorted by lowest to highest task id. | |
410 | * | |
411 | * Therefore the lowest outstanding task id can be quickly determined by | |
412 | * checking the head item from all of these lists. This value is stored | |
413 | * with the taskq as the lowest id. It only needs to be recalculated when | |
414 | * either the task with the current lowest id completes or is canceled. | |
415 | * | |
416 | * By blocking until the lowest task id exceeds the passed task id the | |
417 | * taskq_wait_outstanding() function can be easily implemented. Similarly, | |
418 | * by blocking until the lowest task id matches the next task id taskq_wait() | |
419 | * can be implemented. | |
420 | * | |
421 | * Callers should be aware that when there are multiple worked threads it | |
422 | * is possible for larger task ids to complete before smaller ones. Also | |
423 | * when the taskq contains delay tasks with small task ids callers may | |
424 | * block for a considerable length of time waiting for them to expire and | |
425 | * execute. | |
426 | */ | |
99c452bb BB |
427 | static int |
428 | taskq_wait_id_check(taskq_t *tq, taskqid_t id) | |
f1ca4da6 | 429 | { |
99c452bb | 430 | int rc; |
066b89e6 | 431 | unsigned long flags; |
bcd68186 | 432 | |
066b89e6 | 433 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
cce83ba0 | 434 | rc = (taskq_find(tq, id) == NULL); |
066b89e6 | 435 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 436 | |
99c452bb BB |
437 | return (rc); |
438 | } | |
bcd68186 | 439 | |
99c452bb BB |
440 | /* |
441 | * The taskq_wait_id() function blocks until the passed task id completes. | |
442 | * This does not guarantee that all lower task ids have completed. | |
443 | */ | |
444 | void | |
445 | taskq_wait_id(taskq_t *tq, taskqid_t id) | |
446 | { | |
447 | wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); | |
bcd68186 | 448 | } |
aed8671c | 449 | EXPORT_SYMBOL(taskq_wait_id); |
bcd68186 | 450 | |
d9acd930 | 451 | static int |
a876b030 | 452 | taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) |
d9acd930 BB |
453 | { |
454 | int rc; | |
066b89e6 | 455 | unsigned long flags; |
d9acd930 | 456 | |
066b89e6 | 457 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
d9acd930 | 458 | rc = (id < tq->tq_lowest_id); |
066b89e6 | 459 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 460 | |
8d9a23e8 | 461 | return (rc); |
d9acd930 BB |
462 | } |
463 | ||
a876b030 CD |
464 | /* |
465 | * The taskq_wait_outstanding() function will block until all tasks with a | |
466 | * lower taskqid than the passed 'id' have been completed. Note that all | |
467 | * task id's are assigned monotonically at dispatch time. Zero may be | |
468 | * passed for the id to indicate all tasks dispatch up to this point, | |
469 | * but not after, should be waited for. | |
470 | */ | |
d9acd930 | 471 | void |
a876b030 | 472 | taskq_wait_outstanding(taskq_t *tq, taskqid_t id) |
d9acd930 | 473 | { |
b3a22a0a CC |
474 | id = id ? id : tq->tq_next_id - 1; |
475 | wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); | |
d9acd930 | 476 | } |
a876b030 | 477 | EXPORT_SYMBOL(taskq_wait_outstanding); |
d9acd930 | 478 | |
a876b030 CD |
479 | static int |
480 | taskq_wait_check(taskq_t *tq) | |
bcd68186 | 481 | { |
a876b030 | 482 | int rc; |
066b89e6 | 483 | unsigned long flags; |
bcd68186 | 484 | |
066b89e6 | 485 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
a876b030 | 486 | rc = (tq->tq_lowest_id == tq->tq_next_id); |
066b89e6 | 487 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 488 | |
a876b030 CD |
489 | return (rc); |
490 | } | |
491 | ||
492 | /* | |
493 | * The taskq_wait() function will block until the taskq is empty. | |
494 | * This means that if a taskq re-dispatches work to itself taskq_wait() | |
495 | * callers will block indefinitely. | |
496 | */ | |
497 | void | |
498 | taskq_wait(taskq_t *tq) | |
499 | { | |
500 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); | |
bcd68186 | 501 | } |
aed8671c | 502 | EXPORT_SYMBOL(taskq_wait); |
bcd68186 | 503 | |
c5a8b1e1 | 504 | int |
16522ac2 | 505 | taskq_member(taskq_t *tq, kthread_t *t) |
c5a8b1e1 | 506 | { |
16522ac2 | 507 | return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); |
c5a8b1e1 BB |
508 | } |
509 | EXPORT_SYMBOL(taskq_member); | |
510 | ||
b3212d2f MA |
511 | taskq_t * |
512 | taskq_of_curthread(void) | |
513 | { | |
514 | return (tsd_get(taskq_tsd)); | |
515 | } | |
516 | EXPORT_SYMBOL(taskq_of_curthread); | |
517 | ||
d9acd930 BB |
518 | /* |
519 | * Cancel an already dispatched task given the task id. Still pending tasks | |
520 | * will be immediately canceled, and if the task is active the function will | |
521 | * block until it completes. Preallocated tasks which are canceled must be | |
522 | * freed by the caller. | |
523 | */ | |
524 | int | |
525 | taskq_cancel_id(taskq_t *tq, taskqid_t id) | |
526 | { | |
527 | taskq_ent_t *t; | |
d9acd930 | 528 | int rc = ENOENT; |
066b89e6 | 529 | unsigned long flags; |
d9acd930 BB |
530 | |
531 | ASSERT(tq); | |
532 | ||
066b89e6 | 533 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
cce83ba0 CC |
534 | t = taskq_find(tq, id); |
535 | if (t && t != ERR_PTR(-EBUSY)) { | |
d9acd930 BB |
536 | list_del_init(&t->tqent_list); |
537 | t->tqent_flags |= TQENT_FLAG_CANCEL; | |
538 | ||
539 | /* | |
540 | * When canceling the lowest outstanding task id we | |
541 | * must recalculate the new lowest outstanding id. | |
542 | */ | |
543 | if (tq->tq_lowest_id == t->tqent_id) { | |
544 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
545 | ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); | |
546 | } | |
547 | ||
548 | /* | |
549 | * The task_expire() function takes the tq->tq_lock so drop | |
550 | * drop the lock before synchronously cancelling the timer. | |
551 | */ | |
552 | if (timer_pending(&t->tqent_timer)) { | |
066b89e6 | 553 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 554 | del_timer_sync(&t->tqent_timer); |
066b89e6 CC |
555 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
556 | tq->tq_lock_class); | |
d9acd930 BB |
557 | } |
558 | ||
559 | if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) | |
560 | task_done(tq, t); | |
561 | ||
562 | rc = 0; | |
563 | } | |
066b89e6 | 564 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
d9acd930 | 565 | |
cce83ba0 | 566 | if (t == ERR_PTR(-EBUSY)) { |
d9acd930 BB |
567 | taskq_wait_id(tq, id); |
568 | rc = EBUSY; | |
569 | } | |
570 | ||
8d9a23e8 | 571 | return (rc); |
d9acd930 BB |
572 | } |
573 | EXPORT_SYMBOL(taskq_cancel_id); | |
574 | ||
f5f2b87d | 575 | static int taskq_thread_spawn(taskq_t *tq); |
a64e5575 | 576 | |
bcd68186 | 577 | taskqid_t |
aed8671c | 578 | taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) |
bcd68186 | 579 | { |
472a34ca | 580 | taskq_ent_t *t; |
cbba7146 | 581 | taskqid_t rc = TASKQID_INVALID; |
066b89e6 | 582 | unsigned long irqflags; |
f1ca4da6 | 583 | |
472a34ca BB |
584 | ASSERT(tq); |
585 | ASSERT(func); | |
d05ec4b4 | 586 | |
066b89e6 | 587 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); |
f1ca4da6 | 588 | |
bcd68186 | 589 | /* Taskq being destroyed and all tasks drained */ |
f7a973d9 | 590 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
8d9a23e8 | 591 | goto out; |
f1ca4da6 | 592 | |
bcd68186 BB |
593 | /* Do not queue the task unless there is idle thread for it */ |
594 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
7bb5d92d TC |
595 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { |
596 | /* Dynamic taskq may be able to spawn another thread */ | |
e0bd8118 | 597 | if (taskq_thread_spawn(tq) == 0) |
7bb5d92d TC |
598 | goto out; |
599 | } | |
bcd68186 | 600 | |
066b89e6 | 601 | if ((t = task_alloc(tq, flags, &irqflags)) == NULL) |
8d9a23e8 | 602 | goto out; |
f1ca4da6 | 603 | |
046a70c9 | 604 | spin_lock(&t->tqent_lock); |
f0d8bb26 | 605 | |
7bb5d92d TC |
606 | /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ |
607 | if (flags & TQ_NOQUEUE) | |
608 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
f0d8bb26 | 609 | /* Queue to the priority list instead of the pending list */ |
7bb5d92d | 610 | else if (flags & TQ_FRONT) |
046a70c9 | 611 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); |
f0d8bb26 | 612 | else |
046a70c9 | 613 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); |
f0d8bb26 | 614 | |
046a70c9 | 615 | t->tqent_id = rc = tq->tq_next_id; |
bcd68186 | 616 | tq->tq_next_id++; |
472a34ca BB |
617 | t->tqent_func = func; |
618 | t->tqent_arg = arg; | |
d9acd930 | 619 | t->tqent_taskq = tq; |
d9acd930 BB |
620 | t->tqent_timer.function = NULL; |
621 | t->tqent_timer.expires = 0; | |
ae38e009 | 622 | |
8f3b403a | 623 | t->tqent_birth = jiffies; |
ae38e009 | 624 | DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); |
44217f7a PS |
625 | |
626 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
627 | ||
046a70c9 | 628 | spin_unlock(&t->tqent_lock); |
0bb43ca2 NB |
629 | |
630 | wake_up(&tq->tq_work_waitq); | |
e0bd8118 | 631 | |
a64e5575 | 632 | /* Spawn additional taskq threads if required. */ |
7bb5d92d | 633 | if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) |
f5f2b87d | 634 | (void) taskq_thread_spawn(tq); |
e0bd8118 | 635 | out: |
066b89e6 | 636 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
8d9a23e8 | 637 | return (rc); |
f1ca4da6 | 638 | } |
aed8671c | 639 | EXPORT_SYMBOL(taskq_dispatch); |
44217f7a | 640 | |
d9acd930 BB |
641 | taskqid_t |
642 | taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, | |
643 | uint_t flags, clock_t expire_time) | |
644 | { | |
cbba7146 | 645 | taskqid_t rc = TASKQID_INVALID; |
8d9a23e8 | 646 | taskq_ent_t *t; |
066b89e6 | 647 | unsigned long irqflags; |
d9acd930 BB |
648 | |
649 | ASSERT(tq); | |
650 | ASSERT(func); | |
651 | ||
066b89e6 | 652 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); |
d9acd930 BB |
653 | |
654 | /* Taskq being destroyed and all tasks drained */ | |
f7a973d9 | 655 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
8d9a23e8 | 656 | goto out; |
d9acd930 | 657 | |
066b89e6 | 658 | if ((t = task_alloc(tq, flags, &irqflags)) == NULL) |
8d9a23e8 | 659 | goto out; |
d9acd930 BB |
660 | |
661 | spin_lock(&t->tqent_lock); | |
662 | ||
663 | /* Queue to the delay list for subsequent execution */ | |
664 | list_add_tail(&t->tqent_list, &tq->tq_delay_list); | |
665 | ||
666 | t->tqent_id = rc = tq->tq_next_id; | |
667 | tq->tq_next_id++; | |
668 | t->tqent_func = func; | |
669 | t->tqent_arg = arg; | |
670 | t->tqent_taskq = tq; | |
d9acd930 BB |
671 | t->tqent_timer.function = task_expire; |
672 | t->tqent_timer.expires = (unsigned long)expire_time; | |
673 | add_timer(&t->tqent_timer); | |
674 | ||
675 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
676 | ||
677 | spin_unlock(&t->tqent_lock); | |
e0bd8118 | 678 | |
a64e5575 | 679 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 680 | if (tq->tq_nactive == tq->tq_nthreads) |
681 | (void) taskq_thread_spawn(tq); | |
e0bd8118 | 682 | out: |
066b89e6 | 683 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
8d9a23e8 | 684 | return (rc); |
d9acd930 BB |
685 | } |
686 | EXPORT_SYMBOL(taskq_dispatch_delay); | |
687 | ||
44217f7a | 688 | void |
aed8671c | 689 | taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, |
2c4332cf | 690 | taskq_ent_t *t) |
44217f7a | 691 | { |
066b89e6 | 692 | unsigned long irqflags; |
44217f7a PS |
693 | ASSERT(tq); |
694 | ASSERT(func); | |
44217f7a | 695 | |
066b89e6 | 696 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, |
326172d8 | 697 | tq->tq_lock_class); |
44217f7a PS |
698 | |
699 | /* Taskq being destroyed and all tasks drained */ | |
f7a973d9 | 700 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { |
cbba7146 | 701 | t->tqent_id = TASKQID_INVALID; |
44217f7a PS |
702 | goto out; |
703 | } | |
704 | ||
7bb5d92d TC |
705 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { |
706 | /* Dynamic taskq may be able to spawn another thread */ | |
e0bd8118 AM |
707 | if (taskq_thread_spawn(tq) == 0) |
708 | goto out; | |
7bb5d92d TC |
709 | flags |= TQ_FRONT; |
710 | } | |
711 | ||
44217f7a PS |
712 | spin_lock(&t->tqent_lock); |
713 | ||
9243b0fb BP |
714 | /* |
715 | * Make sure the entry is not on some other taskq; it is important to | |
716 | * ASSERT() under lock | |
717 | */ | |
718 | ASSERT(taskq_empty_ent(t)); | |
719 | ||
44217f7a PS |
720 | /* |
721 | * Mark it as a prealloc'd task. This is important | |
722 | * to ensure that we don't free it later. | |
723 | */ | |
724 | t->tqent_flags |= TQENT_FLAG_PREALLOC; | |
725 | ||
726 | /* Queue to the priority list instead of the pending list */ | |
727 | if (flags & TQ_FRONT) | |
728 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
729 | else | |
730 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
731 | ||
732 | t->tqent_id = tq->tq_next_id; | |
733 | tq->tq_next_id++; | |
734 | t->tqent_func = func; | |
735 | t->tqent_arg = arg; | |
d9acd930 | 736 | t->tqent_taskq = tq; |
ae38e009 | 737 | |
8f3b403a | 738 | t->tqent_birth = jiffies; |
ae38e009 | 739 | DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); |
44217f7a PS |
740 | |
741 | spin_unlock(&t->tqent_lock); | |
742 | ||
743 | wake_up(&tq->tq_work_waitq); | |
e0bd8118 | 744 | |
a64e5575 | 745 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 746 | if (tq->tq_nactive == tq->tq_nthreads) |
747 | (void) taskq_thread_spawn(tq); | |
e0bd8118 | 748 | out: |
066b89e6 | 749 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
44217f7a | 750 | } |
aed8671c | 751 | EXPORT_SYMBOL(taskq_dispatch_ent); |
44217f7a PS |
752 | |
753 | int | |
aed8671c | 754 | taskq_empty_ent(taskq_ent_t *t) |
44217f7a | 755 | { |
2c4332cf | 756 | return (list_empty(&t->tqent_list)); |
44217f7a | 757 | } |
aed8671c | 758 | EXPORT_SYMBOL(taskq_empty_ent); |
44217f7a PS |
759 | |
760 | void | |
aed8671c | 761 | taskq_init_ent(taskq_ent_t *t) |
44217f7a PS |
762 | { |
763 | spin_lock_init(&t->tqent_lock); | |
d9acd930 | 764 | init_waitqueue_head(&t->tqent_waitq); |
c9821f1c | 765 | timer_setup(&t->tqent_timer, NULL, 0); |
44217f7a PS |
766 | INIT_LIST_HEAD(&t->tqent_list); |
767 | t->tqent_id = 0; | |
768 | t->tqent_func = NULL; | |
769 | t->tqent_arg = NULL; | |
770 | t->tqent_flags = 0; | |
d9acd930 | 771 | t->tqent_taskq = NULL; |
44217f7a | 772 | } |
aed8671c | 773 | EXPORT_SYMBOL(taskq_init_ent); |
44217f7a | 774 | |
f7a973d9 BB |
775 | /* |
776 | * Return the next pending task, preference is given to tasks on the | |
777 | * priority list which were dispatched with TQ_FRONT. | |
778 | */ | |
779 | static taskq_ent_t * | |
780 | taskq_next_ent(taskq_t *tq) | |
781 | { | |
782 | struct list_head *list; | |
783 | ||
f7a973d9 BB |
784 | if (!list_empty(&tq->tq_prio_list)) |
785 | list = &tq->tq_prio_list; | |
786 | else if (!list_empty(&tq->tq_pend_list)) | |
787 | list = &tq->tq_pend_list; | |
788 | else | |
789 | return (NULL); | |
790 | ||
791 | return (list_entry(list->next, taskq_ent_t, tqent_list)); | |
792 | } | |
793 | ||
794 | /* | |
795 | * Spawns a new thread for the specified taskq. | |
796 | */ | |
797 | static void | |
798 | taskq_thread_spawn_task(void *arg) | |
799 | { | |
800 | taskq_t *tq = (taskq_t *)arg; | |
066b89e6 | 801 | unsigned long flags; |
f7a973d9 | 802 | |
5ce028b0 CC |
803 | if (taskq_thread_create(tq) == NULL) { |
804 | /* restore spawning count if failed */ | |
5461eefe BB |
805 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
806 | tq->tq_lock_class); | |
5ce028b0 CC |
807 | tq->tq_nspawn--; |
808 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
809 | } | |
f7a973d9 BB |
810 | } |
811 | ||
812 | /* | |
326172d8 | 813 | * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current |
f7a973d9 BB |
814 | * number of threads is insufficient to handle the pending tasks. These |
815 | * new threads must be created by the dedicated dynamic_taskq to avoid | |
816 | * deadlocks between thread creation and memory reclaim. The system_taskq | |
817 | * which is also a dynamic taskq cannot be safely used for this. | |
818 | */ | |
819 | static int | |
f5f2b87d | 820 | taskq_thread_spawn(taskq_t *tq) |
f7a973d9 BB |
821 | { |
822 | int spawning = 0; | |
823 | ||
824 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
825 | return (0); | |
826 | ||
e0bd8118 | 827 | tq->lastspawnstop = jiffies; |
f5f2b87d | 828 | if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && |
f7a973d9 BB |
829 | (tq->tq_flags & TASKQ_ACTIVE)) { |
830 | spawning = (++tq->tq_nspawn); | |
831 | taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, | |
832 | tq, TQ_NOSLEEP); | |
833 | } | |
834 | ||
835 | return (spawning); | |
836 | } | |
837 | ||
838 | /* | |
e0bd8118 AM |
839 | * Threads in a dynamic taskq may exit once there is no more work to do. |
840 | * To prevent threads from being created and destroyed too often limit | |
841 | * the exit rate to one per spl_taskq_thread_timeout_ms. | |
f7a973d9 BB |
842 | * |
843 | * The first thread is the thread list is treated as the primary thread. | |
844 | * There is nothing special about the primary thread but in order to avoid | |
845 | * all the taskq pids from changing we opt to make it long running. | |
846 | */ | |
847 | static int | |
848 | taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) | |
849 | { | |
e0bd8118 AM |
850 | ASSERT(!taskq_next_ent(tq)); |
851 | if (!(tq->tq_flags & TASKQ_DYNAMIC) || !spl_taskq_thread_dynamic) | |
f7a973d9 | 852 | return (0); |
e0bd8118 AM |
853 | if (!(tq->tq_flags & TASKQ_ACTIVE)) |
854 | return (1); | |
f7a973d9 BB |
855 | if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, |
856 | tqt_thread_list) == tqt) | |
857 | return (0); | |
e0bd8118 AM |
858 | ASSERT3U(tq->tq_nthreads, >, 1); |
859 | if (tq->tq_nspawn != 0) | |
860 | return (0); | |
861 | if (time_before(jiffies, tq->lastspawnstop + | |
862 | msecs_to_jiffies(spl_taskq_thread_timeout_ms))) | |
863 | return (0); | |
864 | tq->lastspawnstop = jiffies; | |
865 | return (1); | |
f7a973d9 BB |
866 | } |
867 | ||
bcd68186 BB |
868 | static int |
869 | taskq_thread(void *args) | |
870 | { | |
472a34ca BB |
871 | DECLARE_WAITQUEUE(wait, current); |
872 | sigset_t blocked; | |
2c02b71b | 873 | taskq_thread_t *tqt = args; |
472a34ca BB |
874 | taskq_t *tq; |
875 | taskq_ent_t *t; | |
f7a973d9 | 876 | int seq_tasks = 0; |
066b89e6 | 877 | unsigned long flags; |
cce83ba0 | 878 | taskq_ent_t dup_task = {}; |
bcd68186 | 879 | |
472a34ca | 880 | ASSERT(tqt); |
326172d8 | 881 | ASSERT(tqt->tqt_tq); |
2c02b71b | 882 | tq = tqt->tqt_tq; |
472a34ca | 883 | current->flags |= PF_NOFREEZE; |
bcd68186 | 884 | |
b4ad50ac | 885 | (void) spl_fstrans_mark(); |
d4bf6d84 | 886 | |
472a34ca BB |
887 | sigfillset(&blocked); |
888 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
889 | flush_signals(current); | |
bcd68186 | 890 | |
16522ac2 | 891 | tsd_set(taskq_tsd, tq); |
066b89e6 | 892 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
5ce028b0 CC |
893 | /* |
894 | * If we are dynamically spawned, decrease spawning count. Note that | |
895 | * we could be created during taskq_create, in which case we shouldn't | |
896 | * do the decrement. But it's fine because taskq_create will reset | |
897 | * tq_nspawn later. | |
898 | */ | |
899 | if (tq->tq_flags & TASKQ_DYNAMIC) | |
900 | tq->tq_nspawn--; | |
f7a973d9 BB |
901 | |
902 | /* Immediately exit if more threads than allowed were created. */ | |
903 | if (tq->tq_nthreads >= tq->tq_maxthreads) | |
904 | goto error; | |
905 | ||
472a34ca | 906 | tq->tq_nthreads++; |
f7a973d9 | 907 | list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); |
472a34ca BB |
908 | wake_up(&tq->tq_wait_waitq); |
909 | set_current_state(TASK_INTERRUPTIBLE); | |
bcd68186 | 910 | |
472a34ca | 911 | while (!kthread_should_stop()) { |
bcd68186 | 912 | |
f0d8bb26 NB |
913 | if (list_empty(&tq->tq_pend_list) && |
914 | list_empty(&tq->tq_prio_list)) { | |
f7a973d9 | 915 | |
e0bd8118 | 916 | if (taskq_thread_should_stop(tq, tqt)) |
f7a973d9 | 917 | break; |
f7a973d9 | 918 | |
3c6ed541 | 919 | add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); |
066b89e6 | 920 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
f7a973d9 | 921 | |
bcd68186 | 922 | schedule(); |
f7a973d9 BB |
923 | seq_tasks = 0; |
924 | ||
066b89e6 CC |
925 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
926 | tq->tq_lock_class); | |
3c6ed541 | 927 | remove_wait_queue(&tq->tq_work_waitq, &wait); |
bcd68186 BB |
928 | } else { |
929 | __set_current_state(TASK_RUNNING); | |
930 | } | |
931 | ||
f7a973d9 | 932 | if ((t = taskq_next_ent(tq)) != NULL) { |
472a34ca | 933 | list_del_init(&t->tqent_list); |
8f2503e0 | 934 | |
2c4332cf | 935 | /* |
cce83ba0 CC |
936 | * A TQENT_FLAG_PREALLOC task may be reused or freed |
937 | * during the task function call. Store tqent_id and | |
938 | * tqent_flags here. | |
939 | * | |
940 | * Also use an on stack taskq_ent_t for tqt_task | |
ae38e009 PS |
941 | * assignment in this case; we want to make sure |
942 | * to duplicate all fields, so the values are | |
943 | * correct when it's accessed via DTRACE_PROBE*. | |
2c4332cf | 944 | */ |
e7e5f78e | 945 | tqt->tqt_id = t->tqent_id; |
8f2503e0 PS |
946 | tqt->tqt_flags = t->tqent_flags; |
947 | ||
cce83ba0 | 948 | if (t->tqent_flags & TQENT_FLAG_PREALLOC) { |
ae38e009 | 949 | dup_task = *t; |
cce83ba0 CC |
950 | t = &dup_task; |
951 | } | |
952 | tqt->tqt_task = t; | |
953 | ||
2c02b71b | 954 | taskq_insert_in_order(tq, tqt); |
472a34ca | 955 | tq->tq_nactive++; |
066b89e6 | 956 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 957 | |
ae38e009 PS |
958 | DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); |
959 | ||
bcd68186 | 960 | /* Perform the requested task */ |
472a34ca | 961 | t->tqent_func(t->tqent_arg); |
bcd68186 | 962 | |
ae38e009 PS |
963 | DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); |
964 | ||
066b89e6 CC |
965 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
966 | tq->tq_lock_class); | |
472a34ca | 967 | tq->tq_nactive--; |
2c02b71b | 968 | list_del_init(&tqt->tqt_active_list); |
d9acd930 | 969 | tqt->tqt_task = NULL; |
8f2503e0 PS |
970 | |
971 | /* For prealloc'd tasks, we don't free anything. */ | |
f7a973d9 | 972 | if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) |
8f2503e0 | 973 | task_done(tq, t); |
bcd68186 | 974 | |
2c4332cf BB |
975 | /* |
976 | * When the current lowest outstanding taskqid is | |
977 | * done calculate the new lowest outstanding id | |
978 | */ | |
e7e5f78e | 979 | if (tq->tq_lowest_id == tqt->tqt_id) { |
bcd68186 | 980 | tq->tq_lowest_id = taskq_lowest_id(tq); |
e7e5f78e | 981 | ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); |
bcd68186 BB |
982 | } |
983 | ||
f7a973d9 | 984 | /* Spawn additional taskq threads if required. */ |
f5f2b87d | 985 | if ((++seq_tasks) > spl_taskq_thread_sequential && |
986 | taskq_thread_spawn(tq)) | |
f7a973d9 BB |
987 | seq_tasks = 0; |
988 | ||
cbba7146 | 989 | tqt->tqt_id = TASKQID_INVALID; |
8f2503e0 | 990 | tqt->tqt_flags = 0; |
472a34ca | 991 | wake_up_all(&tq->tq_wait_waitq); |
bcd68186 BB |
992 | } |
993 | ||
994 | set_current_state(TASK_INTERRUPTIBLE); | |
995 | ||
472a34ca | 996 | } |
bcd68186 BB |
997 | |
998 | __set_current_state(TASK_RUNNING); | |
472a34ca | 999 | tq->tq_nthreads--; |
2c02b71b | 1000 | list_del_init(&tqt->tqt_thread_list); |
f7a973d9 BB |
1001 | error: |
1002 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
066b89e6 | 1003 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 1004 | |
16522ac2 | 1005 | tsd_set(taskq_tsd, NULL); |
a4134da2 | 1006 | thread_exit(); |
16522ac2 | 1007 | |
8d9a23e8 | 1008 | return (0); |
bcd68186 BB |
1009 | } |
1010 | ||
f7a973d9 BB |
1011 | static taskq_thread_t * |
1012 | taskq_thread_create(taskq_t *tq) | |
1013 | { | |
1014 | static int last_used_cpu = 0; | |
1015 | taskq_thread_t *tqt; | |
1016 | ||
1017 | tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); | |
1018 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
1019 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
1020 | tqt->tqt_tq = tq; | |
cbba7146 | 1021 | tqt->tqt_id = TASKQID_INVALID; |
f7a973d9 BB |
1022 | |
1023 | tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, | |
1024 | "%s", tq->tq_name); | |
1025 | if (tqt->tqt_thread == NULL) { | |
1026 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
1027 | return (NULL); | |
1028 | } | |
1029 | ||
1030 | if (spl_taskq_thread_bind) { | |
1031 | last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); | |
1032 | kthread_bind(tqt->tqt_thread, last_used_cpu); | |
1033 | } | |
1034 | ||
62aa81a5 BB |
1035 | if (spl_taskq_thread_priority) |
1036 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); | |
1037 | ||
f7a973d9 BB |
1038 | wake_up_process(tqt->tqt_thread); |
1039 | ||
1040 | return (tqt); | |
1041 | } | |
1042 | ||
f1ca4da6 | 1043 | taskq_t * |
60a4c7d2 | 1044 | taskq_create(const char *name, int threads_arg, pri_t pri, |
472a34ca | 1045 | int minalloc, int maxalloc, uint_t flags) |
f1ca4da6 | 1046 | { |
472a34ca | 1047 | taskq_t *tq; |
2c02b71b | 1048 | taskq_thread_t *tqt; |
f7a973d9 | 1049 | int count = 0, rc = 0, i; |
066b89e6 | 1050 | unsigned long irqflags; |
60a4c7d2 | 1051 | int nthreads = threads_arg; |
bcd68186 | 1052 | |
472a34ca | 1053 | ASSERT(name != NULL); |
472a34ca | 1054 | ASSERT(minalloc >= 0); |
f7a973d9 | 1055 | ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ |
bcd68186 | 1056 | |
915404bd BB |
1057 | /* Scale the number of threads using nthreads as a percentage */ |
1058 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
1059 | ASSERT(nthreads <= 100); | |
1060 | ASSERT(nthreads >= 0); | |
60a4c7d2 | 1061 | nthreads = MIN(threads_arg, 100); |
915404bd | 1062 | nthreads = MAX(nthreads, 0); |
60a4c7d2 | 1063 | nthreads = MAX((num_online_cpus() * nthreads) /100, 1); |
915404bd BB |
1064 | } |
1065 | ||
f7a973d9 | 1066 | tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); |
472a34ca | 1067 | if (tq == NULL) |
8d9a23e8 | 1068 | return (NULL); |
bcd68186 | 1069 | |
60a4c7d2 PD |
1070 | tq->tq_hp_support = B_FALSE; |
1071 | #ifdef HAVE_CPU_HOTPLUG | |
1072 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
1073 | tq->tq_hp_support = B_TRUE; | |
1074 | if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, | |
1075 | &tq->tq_hp_cb_node) != 0) { | |
1076 | kmem_free(tq, sizeof (*tq)); | |
1077 | return (NULL); | |
1078 | } | |
1079 | } | |
1080 | #endif | |
1081 | ||
472a34ca | 1082 | spin_lock_init(&tq->tq_lock); |
472a34ca BB |
1083 | INIT_LIST_HEAD(&tq->tq_thread_list); |
1084 | INIT_LIST_HEAD(&tq->tq_active_list); | |
e4f5fa12 | 1085 | tq->tq_name = kmem_strdup(name); |
2c4332cf BB |
1086 | tq->tq_nactive = 0; |
1087 | tq->tq_nthreads = 0; | |
1088 | tq->tq_nspawn = 0; | |
f7a973d9 | 1089 | tq->tq_maxthreads = nthreads; |
60a4c7d2 | 1090 | tq->tq_cpu_pct = threads_arg; |
2c4332cf BB |
1091 | tq->tq_pri = pri; |
1092 | tq->tq_minalloc = minalloc; | |
1093 | tq->tq_maxalloc = maxalloc; | |
1094 | tq->tq_nalloc = 0; | |
1095 | tq->tq_flags = (flags | TASKQ_ACTIVE); | |
cbba7146 BB |
1096 | tq->tq_next_id = TASKQID_INITIAL; |
1097 | tq->tq_lowest_id = TASKQID_INITIAL; | |
e0bd8118 | 1098 | tq->lastspawnstop = jiffies; |
472a34ca BB |
1099 | INIT_LIST_HEAD(&tq->tq_free_list); |
1100 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
1101 | INIT_LIST_HEAD(&tq->tq_prio_list); | |
d9acd930 | 1102 | INIT_LIST_HEAD(&tq->tq_delay_list); |
472a34ca BB |
1103 | init_waitqueue_head(&tq->tq_work_waitq); |
1104 | init_waitqueue_head(&tq->tq_wait_waitq); | |
326172d8 | 1105 | tq->tq_lock_class = TQ_LOCK_GENERAL; |
200366f2 | 1106 | INIT_LIST_HEAD(&tq->tq_taskqs); |
bcd68186 | 1107 | |
f7a973d9 | 1108 | if (flags & TASKQ_PREPOPULATE) { |
066b89e6 | 1109 | spin_lock_irqsave_nested(&tq->tq_lock, irqflags, |
326172d8 | 1110 | tq->tq_lock_class); |
f7a973d9 | 1111 | |
472a34ca | 1112 | for (i = 0; i < minalloc; i++) |
066b89e6 CC |
1113 | task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, |
1114 | &irqflags)); | |
6e605b6e | 1115 | |
066b89e6 | 1116 | spin_unlock_irqrestore(&tq->tq_lock, irqflags); |
f7a973d9 BB |
1117 | } |
1118 | ||
1119 | if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) | |
1120 | nthreads = 1; | |
6e605b6e | 1121 | |
2c02b71b | 1122 | for (i = 0; i < nthreads; i++) { |
f7a973d9 BB |
1123 | tqt = taskq_thread_create(tq); |
1124 | if (tqt == NULL) | |
2c02b71b | 1125 | rc = 1; |
f7a973d9 BB |
1126 | else |
1127 | count++; | |
2c02b71b | 1128 | } |
bcd68186 | 1129 | |
472a34ca | 1130 | /* Wait for all threads to be started before potential destroy */ |
f7a973d9 | 1131 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); |
5ce028b0 CC |
1132 | /* |
1133 | * taskq_thread might have touched nspawn, but we don't want them to | |
1134 | * because they're not dynamically spawned. So we reset it to 0 | |
1135 | */ | |
1136 | tq->tq_nspawn = 0; | |
bcd68186 | 1137 | |
472a34ca | 1138 | if (rc) { |
aed8671c | 1139 | taskq_destroy(tq); |
472a34ca | 1140 | tq = NULL; |
200366f2 TC |
1141 | } else { |
1142 | down_write(&tq_list_sem); | |
1143 | tq->tq_instance = taskq_find_by_name(name) + 1; | |
1144 | list_add_tail(&tq->tq_taskqs, &tq_list); | |
1145 | up_write(&tq_list_sem); | |
472a34ca | 1146 | } |
bcd68186 | 1147 | |
8d9a23e8 | 1148 | return (tq); |
f1ca4da6 | 1149 | } |
aed8671c | 1150 | EXPORT_SYMBOL(taskq_create); |
b123971f BB |
1151 | |
1152 | void | |
aed8671c | 1153 | taskq_destroy(taskq_t *tq) |
b123971f | 1154 | { |
2c02b71b PS |
1155 | struct task_struct *thread; |
1156 | taskq_thread_t *tqt; | |
046a70c9 | 1157 | taskq_ent_t *t; |
066b89e6 | 1158 | unsigned long flags; |
b123971f | 1159 | |
bcd68186 | 1160 | ASSERT(tq); |
066b89e6 | 1161 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
f7a973d9 | 1162 | tq->tq_flags &= ~TASKQ_ACTIVE; |
066b89e6 | 1163 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
bcd68186 | 1164 | |
60a4c7d2 PD |
1165 | #ifdef HAVE_CPU_HOTPLUG |
1166 | if (tq->tq_hp_support) { | |
1167 | VERIFY0(cpuhp_state_remove_instance_nocalls( | |
1168 | spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); | |
1169 | } | |
1170 | #endif | |
f7a973d9 BB |
1171 | /* |
1172 | * When TASKQ_ACTIVE is clear new tasks may not be added nor may | |
1173 | * new worker threads be spawned for dynamic taskq. | |
1174 | */ | |
1175 | if (dynamic_taskq != NULL) | |
1176 | taskq_wait_outstanding(dynamic_taskq, 0); | |
1177 | ||
aed8671c | 1178 | taskq_wait(tq); |
bcd68186 | 1179 | |
200366f2 TC |
1180 | /* remove taskq from global list used by the kstats */ |
1181 | down_write(&tq_list_sem); | |
1182 | list_del(&tq->tq_taskqs); | |
1183 | up_write(&tq_list_sem); | |
1184 | ||
066b89e6 | 1185 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); |
5ce028b0 CC |
1186 | /* wait for spawning threads to insert themselves to the list */ |
1187 | while (tq->tq_nspawn) { | |
1188 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1189 | schedule_timeout_interruptible(1); | |
5461eefe BB |
1190 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
1191 | tq->tq_lock_class); | |
5ce028b0 | 1192 | } |
bcd68186 | 1193 | |
2c02b71b PS |
1194 | /* |
1195 | * Signal each thread to exit and block until it does. Each thread | |
1196 | * is responsible for removing itself from the list and freeing its | |
1197 | * taskq_thread_t. This allows for idle threads to opt to remove | |
1198 | * themselves from the taskq. They can be recreated as needed. | |
1199 | */ | |
1200 | while (!list_empty(&tq->tq_thread_list)) { | |
1201 | tqt = list_entry(tq->tq_thread_list.next, | |
f7a973d9 | 1202 | taskq_thread_t, tqt_thread_list); |
2c02b71b | 1203 | thread = tqt->tqt_thread; |
066b89e6 | 1204 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
2c02b71b PS |
1205 | |
1206 | kthread_stop(thread); | |
1207 | ||
066b89e6 | 1208 | spin_lock_irqsave_nested(&tq->tq_lock, flags, |
326172d8 | 1209 | tq->tq_lock_class); |
2c02b71b PS |
1210 | } |
1211 | ||
472a34ca | 1212 | while (!list_empty(&tq->tq_free_list)) { |
046a70c9 | 1213 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); |
44217f7a PS |
1214 | |
1215 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
1216 | ||
472a34ca BB |
1217 | list_del_init(&t->tqent_list); |
1218 | task_free(tq, t); | |
1219 | } | |
bcd68186 | 1220 | |
f7a973d9 BB |
1221 | ASSERT0(tq->tq_nthreads); |
1222 | ASSERT0(tq->tq_nalloc); | |
1223 | ASSERT0(tq->tq_nspawn); | |
472a34ca BB |
1224 | ASSERT(list_empty(&tq->tq_thread_list)); |
1225 | ASSERT(list_empty(&tq->tq_active_list)); | |
1226 | ASSERT(list_empty(&tq->tq_free_list)); | |
1227 | ASSERT(list_empty(&tq->tq_pend_list)); | |
1228 | ASSERT(list_empty(&tq->tq_prio_list)); | |
d9acd930 | 1229 | ASSERT(list_empty(&tq->tq_delay_list)); |
bcd68186 | 1230 | |
066b89e6 | 1231 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
2c02b71b | 1232 | |
e4f5fa12 | 1233 | kmem_strfree(tq->tq_name); |
f7a973d9 | 1234 | kmem_free(tq, sizeof (taskq_t)); |
b123971f | 1235 | } |
aed8671c | 1236 | EXPORT_SYMBOL(taskq_destroy); |
e9cb2b4f | 1237 | |
3bd4df38 EN |
1238 | /* |
1239 | * Create a taskq with a specified number of pool threads. Allocate | |
1240 | * and return an array of nthreads kthread_t pointers, one for each | |
1241 | * thread in the pool. The array is not ordered and must be freed | |
1242 | * by the caller. | |
1243 | */ | |
1244 | taskq_t * | |
1245 | taskq_create_synced(const char *name, int nthreads, pri_t pri, | |
1246 | int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp) | |
1247 | { | |
1248 | taskq_t *tq; | |
1249 | taskq_thread_t *tqt; | |
1250 | int i = 0; | |
1251 | kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads, | |
1252 | KM_SLEEP); | |
1253 | ||
1254 | flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH); | |
1255 | ||
1256 | /* taskq_create spawns all the threads before returning */ | |
1257 | tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX, | |
1258 | flags | TASKQ_PREPOPULATE); | |
1259 | VERIFY(tq != NULL); | |
1260 | VERIFY(tq->tq_nthreads == nthreads); | |
1261 | ||
1262 | list_for_each_entry(tqt, &tq->tq_thread_list, tqt_thread_list) { | |
1263 | kthreads[i] = tqt->tqt_thread; | |
1264 | i++; | |
1265 | } | |
1266 | ||
1267 | ASSERT3S(i, ==, nthreads); | |
1268 | *ktpp = kthreads; | |
1269 | ||
1270 | return (tq); | |
1271 | } | |
1272 | EXPORT_SYMBOL(taskq_create_synced); | |
1273 | ||
8f3b403a CC |
1274 | static unsigned int spl_taskq_kick = 0; |
1275 | ||
1276 | /* | |
1277 | * 2.6.36 API Change | |
1278 | * module_param_cb is introduced to take kernel_param_ops and | |
1279 | * module_param_call is marked as obsolete. Also set and get operations | |
1280 | * were changed to take a 'const struct kernel_param *'. | |
1281 | */ | |
1282 | static int | |
1283 | #ifdef module_param_cb | |
1284 | param_set_taskq_kick(const char *val, const struct kernel_param *kp) | |
1285 | #else | |
1286 | param_set_taskq_kick(const char *val, struct kernel_param *kp) | |
1287 | #endif | |
1288 | { | |
1289 | int ret; | |
7cf1fe63 | 1290 | taskq_t *tq = NULL; |
8f3b403a CC |
1291 | taskq_ent_t *t; |
1292 | unsigned long flags; | |
1293 | ||
1294 | ret = param_set_uint(val, kp); | |
1295 | if (ret < 0 || !spl_taskq_kick) | |
1296 | return (ret); | |
1297 | /* reset value */ | |
1298 | spl_taskq_kick = 0; | |
1299 | ||
1300 | down_read(&tq_list_sem); | |
1301 | list_for_each_entry(tq, &tq_list, tq_taskqs) { | |
1302 | spin_lock_irqsave_nested(&tq->tq_lock, flags, | |
1303 | tq->tq_lock_class); | |
1304 | /* Check if the first pending is older than 5 seconds */ | |
1305 | t = taskq_next_ent(tq); | |
1306 | if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { | |
1307 | (void) taskq_thread_spawn(tq); | |
1308 | printk(KERN_INFO "spl: Kicked taskq %s/%d\n", | |
1309 | tq->tq_name, tq->tq_instance); | |
1310 | } | |
1311 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1312 | } | |
1313 | up_read(&tq_list_sem); | |
1314 | return (ret); | |
1315 | } | |
1316 | ||
1317 | #ifdef module_param_cb | |
1318 | static const struct kernel_param_ops param_ops_taskq_kick = { | |
5461eefe BB |
1319 | .set = param_set_taskq_kick, |
1320 | .get = param_get_uint, | |
8f3b403a CC |
1321 | }; |
1322 | module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); | |
1323 | #else | |
1324 | module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, | |
5461eefe | 1325 | &spl_taskq_kick, 0644); |
8f3b403a CC |
1326 | #endif |
1327 | MODULE_PARM_DESC(spl_taskq_kick, | |
5461eefe | 1328 | "Write nonzero to kick stuck taskqs to spawn more threads"); |
8f3b403a | 1329 | |
60a4c7d2 PD |
1330 | #ifdef HAVE_CPU_HOTPLUG |
1331 | /* | |
1332 | * This callback will be called exactly once for each core that comes online, | |
1333 | * for each dynamic taskq. We attempt to expand taskqs that have | |
1334 | * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every | |
1335 | * time, to correctly determine whether or not to add a thread. | |
1336 | */ | |
1337 | static int | |
1338 | spl_taskq_expand(unsigned int cpu, struct hlist_node *node) | |
1339 | { | |
1340 | taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); | |
1341 | unsigned long flags; | |
1342 | int err = 0; | |
1343 | ||
1344 | ASSERT(tq); | |
1345 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
1346 | ||
58bf6afd PD |
1347 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { |
1348 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1349 | return (err); | |
1350 | } | |
60a4c7d2 PD |
1351 | |
1352 | ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); | |
1353 | int nthreads = MIN(tq->tq_cpu_pct, 100); | |
1354 | nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); | |
1355 | tq->tq_maxthreads = nthreads; | |
1356 | ||
1357 | if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && | |
1358 | tq->tq_maxthreads > tq->tq_nthreads) { | |
58bf6afd | 1359 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
60a4c7d2 PD |
1360 | taskq_thread_t *tqt = taskq_thread_create(tq); |
1361 | if (tqt == NULL) | |
1362 | err = -1; | |
58bf6afd | 1363 | return (err); |
60a4c7d2 | 1364 | } |
60a4c7d2 PD |
1365 | spin_unlock_irqrestore(&tq->tq_lock, flags); |
1366 | return (err); | |
1367 | } | |
1368 | ||
1369 | /* | |
1370 | * While we don't support offlining CPUs, it is possible that CPUs will fail | |
1371 | * to online successfully. We do need to be able to handle this case | |
1372 | * gracefully. | |
1373 | */ | |
1374 | static int | |
1375 | spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) | |
1376 | { | |
1377 | taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); | |
1378 | unsigned long flags; | |
1379 | ||
1380 | ASSERT(tq); | |
1381 | spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); | |
1382 | ||
1383 | if (!(tq->tq_flags & TASKQ_ACTIVE)) | |
1384 | goto out; | |
1385 | ||
1386 | ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); | |
1387 | int nthreads = MIN(tq->tq_cpu_pct, 100); | |
1388 | nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); | |
1389 | tq->tq_maxthreads = nthreads; | |
1390 | ||
1391 | if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && | |
1392 | tq->tq_maxthreads < tq->tq_nthreads) { | |
1393 | ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); | |
1394 | taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, | |
1395 | taskq_thread_t, tqt_thread_list); | |
1396 | struct task_struct *thread = tqt->tqt_thread; | |
1397 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1398 | ||
1399 | kthread_stop(thread); | |
1400 | ||
1401 | return (0); | |
1402 | } | |
1403 | ||
1404 | out: | |
1405 | spin_unlock_irqrestore(&tq->tq_lock, flags); | |
1406 | return (0); | |
1407 | } | |
1408 | #endif | |
1409 | ||
e9cb2b4f BB |
1410 | int |
1411 | spl_taskq_init(void) | |
1412 | { | |
93ce2b4c | 1413 | init_rwsem(&tq_list_sem); |
16522ac2 CC |
1414 | tsd_create(&taskq_tsd, NULL); |
1415 | ||
60a4c7d2 PD |
1416 | #ifdef HAVE_CPU_HOTPLUG |
1417 | spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | |
1418 | "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); | |
1419 | #endif | |
1420 | ||
3c82160f | 1421 | system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), |
9dc5ffbe | 1422 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); |
e9cb2b4f | 1423 | if (system_taskq == NULL) |
380b0809 | 1424 | return (-ENOMEM); |
e9cb2b4f | 1425 | |
f200b836 CC |
1426 | system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), |
1427 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); | |
1428 | if (system_delay_taskq == NULL) { | |
60a4c7d2 PD |
1429 | #ifdef HAVE_CPU_HOTPLUG |
1430 | cpuhp_remove_multi_state(spl_taskq_cpuhp_state); | |
1431 | #endif | |
f200b836 | 1432 | taskq_destroy(system_taskq); |
380b0809 | 1433 | return (-ENOMEM); |
f200b836 CC |
1434 | } |
1435 | ||
f7a973d9 | 1436 | dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, |
9dc5ffbe | 1437 | maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); |
f7a973d9 | 1438 | if (dynamic_taskq == NULL) { |
60a4c7d2 PD |
1439 | #ifdef HAVE_CPU_HOTPLUG |
1440 | cpuhp_remove_multi_state(spl_taskq_cpuhp_state); | |
1441 | #endif | |
f7a973d9 | 1442 | taskq_destroy(system_taskq); |
f200b836 | 1443 | taskq_destroy(system_delay_taskq); |
380b0809 | 1444 | return (-ENOMEM); |
f7a973d9 BB |
1445 | } |
1446 | ||
2c4332cf BB |
1447 | /* |
1448 | * This is used to annotate tq_lock, so | |
1449 | * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch | |
326172d8 OF |
1450 | * does not trigger a lockdep warning re: possible recursive locking |
1451 | */ | |
1452 | dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; | |
1453 | ||
8d9a23e8 | 1454 | return (0); |
e9cb2b4f BB |
1455 | } |
1456 | ||
1457 | void | |
1458 | spl_taskq_fini(void) | |
1459 | { | |
f7a973d9 BB |
1460 | taskq_destroy(dynamic_taskq); |
1461 | dynamic_taskq = NULL; | |
1462 | ||
f200b836 CC |
1463 | taskq_destroy(system_delay_taskq); |
1464 | system_delay_taskq = NULL; | |
1465 | ||
e9cb2b4f | 1466 | taskq_destroy(system_taskq); |
f7a973d9 | 1467 | system_taskq = NULL; |
16522ac2 CC |
1468 | |
1469 | tsd_destroy(&taskq_tsd); | |
60a4c7d2 PD |
1470 | |
1471 | #ifdef HAVE_CPU_HOTPLUG | |
1472 | cpuhp_remove_multi_state(spl_taskq_cpuhp_state); | |
1473 | spl_taskq_cpuhp_state = 0; | |
1474 | #endif | |
e9cb2b4f | 1475 | } |