]>
Commit | Line | Data |
---|---|---|
1 | /*****************************************************************************\ | |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
9 | * For details, see <http://zfsonlinux.org/>. | |
10 | * | |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Task Queue Implementation. | |
25 | \*****************************************************************************/ | |
26 | ||
27 | #include <sys/taskq.h> | |
28 | #include <sys/kmem.h> | |
29 | ||
30 | int spl_taskq_thread_bind = 0; | |
31 | module_param(spl_taskq_thread_bind, int, 0644); | |
32 | MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); | |
33 | ||
34 | ||
35 | int spl_taskq_thread_dynamic = 1; | |
36 | module_param(spl_taskq_thread_dynamic, int, 0644); | |
37 | MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); | |
38 | ||
39 | int spl_taskq_thread_sequential = 4; | |
40 | module_param(spl_taskq_thread_sequential, int, 0644); | |
41 | MODULE_PARM_DESC(spl_taskq_thread_sequential, | |
42 | "Create new taskq threads after N sequential tasks"); | |
43 | ||
44 | /* Global system-wide dynamic task queue available for all consumers */ | |
45 | taskq_t *system_taskq; | |
46 | EXPORT_SYMBOL(system_taskq); | |
47 | ||
48 | /* Private dedicated taskq for creating new taskq threads on demand. */ | |
49 | static taskq_t *dynamic_taskq; | |
50 | static taskq_thread_t *taskq_thread_create(taskq_t *); | |
51 | ||
52 | static int | |
53 | task_km_flags(uint_t flags) | |
54 | { | |
55 | if (flags & TQ_NOSLEEP) | |
56 | return KM_NOSLEEP; | |
57 | ||
58 | if (flags & TQ_PUSHPAGE) | |
59 | return KM_PUSHPAGE; | |
60 | ||
61 | return KM_SLEEP; | |
62 | } | |
63 | ||
64 | /* | |
65 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
66 | * is not attached to the free, work, or pending taskq lists. | |
67 | */ | |
68 | static taskq_ent_t * | |
69 | task_alloc(taskq_t *tq, uint_t flags) | |
70 | { | |
71 | taskq_ent_t *t; | |
72 | int count = 0; | |
73 | ||
74 | ASSERT(tq); | |
75 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
76 | retry: | |
77 | /* Acquire taskq_ent_t's from free list if available */ | |
78 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | |
79 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
80 | ||
81 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
82 | ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); | |
83 | ASSERT(!timer_pending(&t->tqent_timer)); | |
84 | ||
85 | list_del_init(&t->tqent_list); | |
86 | return (t); | |
87 | } | |
88 | ||
89 | /* Free list is empty and memory allocations are prohibited */ | |
90 | if (flags & TQ_NOALLOC) | |
91 | return (NULL); | |
92 | ||
93 | /* Hit maximum taskq_ent_t pool size */ | |
94 | if (tq->tq_nalloc >= tq->tq_maxalloc) { | |
95 | if (flags & TQ_NOSLEEP) | |
96 | return (NULL); | |
97 | ||
98 | /* | |
99 | * Sleep periodically polling the free list for an available | |
100 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | |
101 | * but we cannot block forever waiting for an taskq_ent_t to | |
102 | * show up in the free list, otherwise a deadlock can happen. | |
103 | * | |
104 | * Therefore, we need to allocate a new task even if the number | |
105 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
106 | * end up delaying the task allocation by one second, thereby | |
107 | * throttling the task dispatch rate. | |
108 | */ | |
109 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
110 | schedule_timeout(HZ / 100); | |
111 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
112 | if (count < 100) { | |
113 | count++; | |
114 | goto retry; | |
115 | } | |
116 | } | |
117 | ||
118 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
119 | t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags)); | |
120 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
121 | ||
122 | if (t) { | |
123 | taskq_init_ent(t); | |
124 | tq->tq_nalloc++; | |
125 | } | |
126 | ||
127 | return (t); | |
128 | } | |
129 | ||
130 | /* | |
131 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t | |
132 | * to already be removed from the free, work, or pending taskq lists. | |
133 | */ | |
134 | static void | |
135 | task_free(taskq_t *tq, taskq_ent_t *t) | |
136 | { | |
137 | ASSERT(tq); | |
138 | ASSERT(t); | |
139 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
140 | ASSERT(list_empty(&t->tqent_list)); | |
141 | ASSERT(!timer_pending(&t->tqent_timer)); | |
142 | ||
143 | kmem_free(t, sizeof(taskq_ent_t)); | |
144 | tq->tq_nalloc--; | |
145 | } | |
146 | ||
147 | /* | |
148 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
149 | * taskq_ent_t if too many exist or moves it to the free list for later use. | |
150 | */ | |
151 | static void | |
152 | task_done(taskq_t *tq, taskq_ent_t *t) | |
153 | { | |
154 | ASSERT(tq); | |
155 | ASSERT(t); | |
156 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
157 | ||
158 | /* Wake tasks blocked in taskq_wait_id() */ | |
159 | wake_up_all(&t->tqent_waitq); | |
160 | ||
161 | list_del_init(&t->tqent_list); | |
162 | ||
163 | if (tq->tq_nalloc <= tq->tq_minalloc) { | |
164 | t->tqent_id = 0; | |
165 | t->tqent_func = NULL; | |
166 | t->tqent_arg = NULL; | |
167 | t->tqent_flags = 0; | |
168 | ||
169 | list_add_tail(&t->tqent_list, &tq->tq_free_list); | |
170 | } else { | |
171 | task_free(tq, t); | |
172 | } | |
173 | } | |
174 | ||
175 | /* | |
176 | * When a delayed task timer expires remove it from the delay list and | |
177 | * add it to the priority list in order for immediate processing. | |
178 | */ | |
179 | static void | |
180 | task_expire(unsigned long data) | |
181 | { | |
182 | taskq_ent_t *w, *t = (taskq_ent_t *)data; | |
183 | taskq_t *tq = t->tqent_taskq; | |
184 | struct list_head *l; | |
185 | ||
186 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
187 | ||
188 | if (t->tqent_flags & TQENT_FLAG_CANCEL) { | |
189 | ASSERT(list_empty(&t->tqent_list)); | |
190 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
191 | return; | |
192 | } | |
193 | ||
194 | /* | |
195 | * The priority list must be maintained in strict task id order | |
196 | * from lowest to highest for lowest_id to be easily calculable. | |
197 | */ | |
198 | list_del(&t->tqent_list); | |
199 | list_for_each_prev(l, &tq->tq_prio_list) { | |
200 | w = list_entry(l, taskq_ent_t, tqent_list); | |
201 | if (w->tqent_id < t->tqent_id) { | |
202 | list_add(&t->tqent_list, l); | |
203 | break; | |
204 | } | |
205 | } | |
206 | if (l == &tq->tq_prio_list) | |
207 | list_add(&t->tqent_list, &tq->tq_prio_list); | |
208 | ||
209 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
210 | ||
211 | wake_up(&tq->tq_work_waitq); | |
212 | } | |
213 | ||
214 | /* | |
215 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
216 | * be queued on the pending list, on the priority list, on the | |
217 | * delay list, or on the work list currently being handled, but | |
218 | * it is not 100% complete yet. | |
219 | */ | |
220 | static taskqid_t | |
221 | taskq_lowest_id(taskq_t *tq) | |
222 | { | |
223 | taskqid_t lowest_id = tq->tq_next_id; | |
224 | taskq_ent_t *t; | |
225 | taskq_thread_t *tqt; | |
226 | ||
227 | ASSERT(tq); | |
228 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
229 | ||
230 | if (!list_empty(&tq->tq_pend_list)) { | |
231 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | |
232 | lowest_id = MIN(lowest_id, t->tqent_id); | |
233 | } | |
234 | ||
235 | if (!list_empty(&tq->tq_prio_list)) { | |
236 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | |
237 | lowest_id = MIN(lowest_id, t->tqent_id); | |
238 | } | |
239 | ||
240 | if (!list_empty(&tq->tq_delay_list)) { | |
241 | t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); | |
242 | lowest_id = MIN(lowest_id, t->tqent_id); | |
243 | } | |
244 | ||
245 | if (!list_empty(&tq->tq_active_list)) { | |
246 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
247 | tqt_active_list); | |
248 | ASSERT(tqt->tqt_id != 0); | |
249 | lowest_id = MIN(lowest_id, tqt->tqt_id); | |
250 | } | |
251 | ||
252 | return (lowest_id); | |
253 | } | |
254 | ||
255 | /* | |
256 | * Insert a task into a list keeping the list sorted by increasing taskqid. | |
257 | */ | |
258 | static void | |
259 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | |
260 | { | |
261 | taskq_thread_t *w; | |
262 | struct list_head *l; | |
263 | ||
264 | ASSERT(tq); | |
265 | ASSERT(tqt); | |
266 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
267 | ||
268 | list_for_each_prev(l, &tq->tq_active_list) { | |
269 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
270 | if (w->tqt_id < tqt->tqt_id) { | |
271 | list_add(&tqt->tqt_active_list, l); | |
272 | break; | |
273 | } | |
274 | } | |
275 | if (l == &tq->tq_active_list) | |
276 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
277 | } | |
278 | ||
279 | /* | |
280 | * Find and return a task from the given list if it exists. The list | |
281 | * must be in lowest to highest task id order. | |
282 | */ | |
283 | static taskq_ent_t * | |
284 | taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) | |
285 | { | |
286 | struct list_head *l; | |
287 | taskq_ent_t *t; | |
288 | ||
289 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
290 | ||
291 | list_for_each(l, lh) { | |
292 | t = list_entry(l, taskq_ent_t, tqent_list); | |
293 | ||
294 | if (t->tqent_id == id) | |
295 | return (t); | |
296 | ||
297 | if (t->tqent_id > id) | |
298 | break; | |
299 | } | |
300 | ||
301 | return (NULL); | |
302 | } | |
303 | ||
304 | /* | |
305 | * Find an already dispatched task given the task id regardless of what | |
306 | * state it is in. If a task is still pending or executing it will be | |
307 | * returned and 'active' set appropriately. If the task has already | |
308 | * been run then NULL is returned. | |
309 | */ | |
310 | static taskq_ent_t * | |
311 | taskq_find(taskq_t *tq, taskqid_t id, int *active) | |
312 | { | |
313 | taskq_thread_t *tqt; | |
314 | struct list_head *l; | |
315 | taskq_ent_t *t; | |
316 | ||
317 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
318 | *active = 0; | |
319 | ||
320 | t = taskq_find_list(tq, &tq->tq_delay_list, id); | |
321 | if (t) | |
322 | return (t); | |
323 | ||
324 | t = taskq_find_list(tq, &tq->tq_prio_list, id); | |
325 | if (t) | |
326 | return (t); | |
327 | ||
328 | t = taskq_find_list(tq, &tq->tq_pend_list, id); | |
329 | if (t) | |
330 | return (t); | |
331 | ||
332 | list_for_each(l, &tq->tq_active_list) { | |
333 | tqt = list_entry(l, taskq_thread_t, tqt_active_list); | |
334 | if (tqt->tqt_id == id) { | |
335 | t = tqt->tqt_task; | |
336 | *active = 1; | |
337 | return (t); | |
338 | } | |
339 | } | |
340 | ||
341 | return (NULL); | |
342 | } | |
343 | ||
344 | /* | |
345 | * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and | |
346 | * taskq_wait() functions below. | |
347 | * | |
348 | * Taskq waiting is accomplished by tracking the lowest outstanding task | |
349 | * id and the next available task id. As tasks are dispatched they are | |
350 | * added to the tail of the pending, priority, or delay lists. As worker | |
351 | * threads become available the tasks are removed from the heads of these | |
352 | * lists and linked to the worker threads. This ensures the lists are | |
353 | * kept sorted by lowest to highest task id. | |
354 | * | |
355 | * Therefore the lowest outstanding task id can be quickly determined by | |
356 | * checking the head item from all of these lists. This value is stored | |
357 | * with the taskq as the lowest id. It only needs to be recalculated when | |
358 | * either the task with the current lowest id completes or is canceled. | |
359 | * | |
360 | * By blocking until the lowest task id exceeds the passed task id the | |
361 | * taskq_wait_outstanding() function can be easily implemented. Similarly, | |
362 | * by blocking until the lowest task id matches the next task id taskq_wait() | |
363 | * can be implemented. | |
364 | * | |
365 | * Callers should be aware that when there are multiple worked threads it | |
366 | * is possible for larger task ids to complete before smaller ones. Also | |
367 | * when the taskq contains delay tasks with small task ids callers may | |
368 | * block for a considerable length of time waiting for them to expire and | |
369 | * execute. | |
370 | */ | |
371 | static int | |
372 | taskq_wait_id_check(taskq_t *tq, taskqid_t id) | |
373 | { | |
374 | int active = 0; | |
375 | int rc; | |
376 | ||
377 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
378 | rc = (taskq_find(tq, id, &active) == NULL); | |
379 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
380 | ||
381 | return (rc); | |
382 | } | |
383 | ||
384 | /* | |
385 | * The taskq_wait_id() function blocks until the passed task id completes. | |
386 | * This does not guarantee that all lower task ids have completed. | |
387 | */ | |
388 | void | |
389 | taskq_wait_id(taskq_t *tq, taskqid_t id) | |
390 | { | |
391 | wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); | |
392 | } | |
393 | EXPORT_SYMBOL(taskq_wait_id); | |
394 | ||
395 | static int | |
396 | taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) | |
397 | { | |
398 | int rc; | |
399 | ||
400 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
401 | rc = (id < tq->tq_lowest_id); | |
402 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
403 | ||
404 | return (rc); | |
405 | } | |
406 | ||
407 | /* | |
408 | * The taskq_wait_outstanding() function will block until all tasks with a | |
409 | * lower taskqid than the passed 'id' have been completed. Note that all | |
410 | * task id's are assigned monotonically at dispatch time. Zero may be | |
411 | * passed for the id to indicate all tasks dispatch up to this point, | |
412 | * but not after, should be waited for. | |
413 | */ | |
414 | void | |
415 | taskq_wait_outstanding(taskq_t *tq, taskqid_t id) | |
416 | { | |
417 | wait_event(tq->tq_wait_waitq, | |
418 | taskq_wait_outstanding_check(tq, id ? id : tq->tq_next_id - 1)); | |
419 | } | |
420 | EXPORT_SYMBOL(taskq_wait_outstanding); | |
421 | ||
422 | static int | |
423 | taskq_wait_check(taskq_t *tq) | |
424 | { | |
425 | int rc; | |
426 | ||
427 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
428 | rc = (tq->tq_lowest_id == tq->tq_next_id); | |
429 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
430 | ||
431 | return (rc); | |
432 | } | |
433 | ||
434 | /* | |
435 | * The taskq_wait() function will block until the taskq is empty. | |
436 | * This means that if a taskq re-dispatches work to itself taskq_wait() | |
437 | * callers will block indefinitely. | |
438 | */ | |
439 | void | |
440 | taskq_wait(taskq_t *tq) | |
441 | { | |
442 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); | |
443 | } | |
444 | EXPORT_SYMBOL(taskq_wait); | |
445 | ||
446 | int | |
447 | taskq_member(taskq_t *tq, void *t) | |
448 | { | |
449 | struct list_head *l; | |
450 | taskq_thread_t *tqt; | |
451 | int found = 0; | |
452 | ||
453 | ASSERT(tq); | |
454 | ASSERT(t); | |
455 | ||
456 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
457 | list_for_each(l, &tq->tq_thread_list) { | |
458 | tqt = list_entry(l, taskq_thread_t, tqt_thread_list); | |
459 | if (tqt->tqt_thread == (struct task_struct *)t) { | |
460 | found = 1; | |
461 | break; | |
462 | } | |
463 | } | |
464 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
465 | ||
466 | return (found); | |
467 | } | |
468 | EXPORT_SYMBOL(taskq_member); | |
469 | ||
470 | /* | |
471 | * Cancel an already dispatched task given the task id. Still pending tasks | |
472 | * will be immediately canceled, and if the task is active the function will | |
473 | * block until it completes. Preallocated tasks which are canceled must be | |
474 | * freed by the caller. | |
475 | */ | |
476 | int | |
477 | taskq_cancel_id(taskq_t *tq, taskqid_t id) | |
478 | { | |
479 | taskq_ent_t *t; | |
480 | int active = 0; | |
481 | int rc = ENOENT; | |
482 | ||
483 | ASSERT(tq); | |
484 | ||
485 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
486 | t = taskq_find(tq, id, &active); | |
487 | if (t && !active) { | |
488 | list_del_init(&t->tqent_list); | |
489 | t->tqent_flags |= TQENT_FLAG_CANCEL; | |
490 | ||
491 | /* | |
492 | * When canceling the lowest outstanding task id we | |
493 | * must recalculate the new lowest outstanding id. | |
494 | */ | |
495 | if (tq->tq_lowest_id == t->tqent_id) { | |
496 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
497 | ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); | |
498 | } | |
499 | ||
500 | /* | |
501 | * The task_expire() function takes the tq->tq_lock so drop | |
502 | * drop the lock before synchronously cancelling the timer. | |
503 | */ | |
504 | if (timer_pending(&t->tqent_timer)) { | |
505 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
506 | del_timer_sync(&t->tqent_timer); | |
507 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
508 | } | |
509 | ||
510 | if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) | |
511 | task_done(tq, t); | |
512 | ||
513 | rc = 0; | |
514 | } | |
515 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
516 | ||
517 | if (active) { | |
518 | taskq_wait_id(tq, id); | |
519 | rc = EBUSY; | |
520 | } | |
521 | ||
522 | return (rc); | |
523 | } | |
524 | EXPORT_SYMBOL(taskq_cancel_id); | |
525 | ||
526 | taskqid_t | |
527 | taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
528 | { | |
529 | taskq_ent_t *t; | |
530 | taskqid_t rc = 0; | |
531 | ||
532 | ASSERT(tq); | |
533 | ASSERT(func); | |
534 | ||
535 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
536 | ||
537 | /* Taskq being destroyed and all tasks drained */ | |
538 | if (!(tq->tq_flags & TASKQ_ACTIVE)) | |
539 | goto out; | |
540 | ||
541 | /* Do not queue the task unless there is idle thread for it */ | |
542 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
543 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
544 | goto out; | |
545 | ||
546 | if ((t = task_alloc(tq, flags)) == NULL) | |
547 | goto out; | |
548 | ||
549 | spin_lock(&t->tqent_lock); | |
550 | ||
551 | /* Queue to the priority list instead of the pending list */ | |
552 | if (flags & TQ_FRONT) | |
553 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
554 | else | |
555 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
556 | ||
557 | t->tqent_id = rc = tq->tq_next_id; | |
558 | tq->tq_next_id++; | |
559 | t->tqent_func = func; | |
560 | t->tqent_arg = arg; | |
561 | t->tqent_taskq = tq; | |
562 | t->tqent_timer.data = 0; | |
563 | t->tqent_timer.function = NULL; | |
564 | t->tqent_timer.expires = 0; | |
565 | ||
566 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
567 | ||
568 | spin_unlock(&t->tqent_lock); | |
569 | ||
570 | wake_up(&tq->tq_work_waitq); | |
571 | out: | |
572 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
573 | return (rc); | |
574 | } | |
575 | EXPORT_SYMBOL(taskq_dispatch); | |
576 | ||
577 | taskqid_t | |
578 | taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, | |
579 | uint_t flags, clock_t expire_time) | |
580 | { | |
581 | taskqid_t rc = 0; | |
582 | taskq_ent_t *t; | |
583 | ||
584 | ASSERT(tq); | |
585 | ASSERT(func); | |
586 | ||
587 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
588 | ||
589 | /* Taskq being destroyed and all tasks drained */ | |
590 | if (!(tq->tq_flags & TASKQ_ACTIVE)) | |
591 | goto out; | |
592 | ||
593 | if ((t = task_alloc(tq, flags)) == NULL) | |
594 | goto out; | |
595 | ||
596 | spin_lock(&t->tqent_lock); | |
597 | ||
598 | /* Queue to the delay list for subsequent execution */ | |
599 | list_add_tail(&t->tqent_list, &tq->tq_delay_list); | |
600 | ||
601 | t->tqent_id = rc = tq->tq_next_id; | |
602 | tq->tq_next_id++; | |
603 | t->tqent_func = func; | |
604 | t->tqent_arg = arg; | |
605 | t->tqent_taskq = tq; | |
606 | t->tqent_timer.data = (unsigned long)t; | |
607 | t->tqent_timer.function = task_expire; | |
608 | t->tqent_timer.expires = (unsigned long)expire_time; | |
609 | add_timer(&t->tqent_timer); | |
610 | ||
611 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
612 | ||
613 | spin_unlock(&t->tqent_lock); | |
614 | out: | |
615 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
616 | return (rc); | |
617 | } | |
618 | EXPORT_SYMBOL(taskq_dispatch_delay); | |
619 | ||
620 | void | |
621 | taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, | |
622 | taskq_ent_t *t) | |
623 | { | |
624 | ASSERT(tq); | |
625 | ASSERT(func); | |
626 | ||
627 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
628 | ||
629 | /* Taskq being destroyed and all tasks drained */ | |
630 | if (!(tq->tq_flags & TASKQ_ACTIVE)) { | |
631 | t->tqent_id = 0; | |
632 | goto out; | |
633 | } | |
634 | ||
635 | spin_lock(&t->tqent_lock); | |
636 | ||
637 | /* | |
638 | * Mark it as a prealloc'd task. This is important | |
639 | * to ensure that we don't free it later. | |
640 | */ | |
641 | t->tqent_flags |= TQENT_FLAG_PREALLOC; | |
642 | ||
643 | /* Queue to the priority list instead of the pending list */ | |
644 | if (flags & TQ_FRONT) | |
645 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
646 | else | |
647 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
648 | ||
649 | t->tqent_id = tq->tq_next_id; | |
650 | tq->tq_next_id++; | |
651 | t->tqent_func = func; | |
652 | t->tqent_arg = arg; | |
653 | t->tqent_taskq = tq; | |
654 | ||
655 | spin_unlock(&t->tqent_lock); | |
656 | ||
657 | wake_up(&tq->tq_work_waitq); | |
658 | out: | |
659 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
660 | } | |
661 | EXPORT_SYMBOL(taskq_dispatch_ent); | |
662 | ||
663 | int | |
664 | taskq_empty_ent(taskq_ent_t *t) | |
665 | { | |
666 | return list_empty(&t->tqent_list); | |
667 | } | |
668 | EXPORT_SYMBOL(taskq_empty_ent); | |
669 | ||
670 | void | |
671 | taskq_init_ent(taskq_ent_t *t) | |
672 | { | |
673 | spin_lock_init(&t->tqent_lock); | |
674 | init_waitqueue_head(&t->tqent_waitq); | |
675 | init_timer(&t->tqent_timer); | |
676 | INIT_LIST_HEAD(&t->tqent_list); | |
677 | t->tqent_id = 0; | |
678 | t->tqent_func = NULL; | |
679 | t->tqent_arg = NULL; | |
680 | t->tqent_flags = 0; | |
681 | t->tqent_taskq = NULL; | |
682 | } | |
683 | EXPORT_SYMBOL(taskq_init_ent); | |
684 | ||
685 | /* | |
686 | * Return the next pending task, preference is given to tasks on the | |
687 | * priority list which were dispatched with TQ_FRONT. | |
688 | */ | |
689 | static taskq_ent_t * | |
690 | taskq_next_ent(taskq_t *tq) | |
691 | { | |
692 | struct list_head *list; | |
693 | ||
694 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
695 | ||
696 | if (!list_empty(&tq->tq_prio_list)) | |
697 | list = &tq->tq_prio_list; | |
698 | else if (!list_empty(&tq->tq_pend_list)) | |
699 | list = &tq->tq_pend_list; | |
700 | else | |
701 | return (NULL); | |
702 | ||
703 | return (list_entry(list->next, taskq_ent_t, tqent_list)); | |
704 | } | |
705 | ||
706 | /* | |
707 | * Spawns a new thread for the specified taskq. | |
708 | */ | |
709 | static void | |
710 | taskq_thread_spawn_task(void *arg) | |
711 | { | |
712 | taskq_t *tq = (taskq_t *)arg; | |
713 | ||
714 | (void) taskq_thread_create(tq); | |
715 | ||
716 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
717 | tq->tq_nspawn--; | |
718 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
719 | } | |
720 | ||
721 | /* | |
722 | * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current | |
723 | * number of threads is insufficient to handle the pending tasks. These | |
724 | * new threads must be created by the dedicated dynamic_taskq to avoid | |
725 | * deadlocks between thread creation and memory reclaim. The system_taskq | |
726 | * which is also a dynamic taskq cannot be safely used for this. | |
727 | */ | |
728 | static int | |
729 | taskq_thread_spawn(taskq_t *tq, int seq_tasks) | |
730 | { | |
731 | int spawning = 0; | |
732 | ||
733 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
734 | return (0); | |
735 | ||
736 | if ((seq_tasks > spl_taskq_thread_sequential) && | |
737 | (tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && | |
738 | (tq->tq_flags & TASKQ_ACTIVE)) { | |
739 | spawning = (++tq->tq_nspawn); | |
740 | taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, | |
741 | tq, TQ_NOSLEEP); | |
742 | } | |
743 | ||
744 | return (spawning); | |
745 | } | |
746 | ||
747 | /* | |
748 | * Threads in a dynamic taskq should only exit once it has been completely | |
749 | * drained and no other threads are actively servicing tasks. This prevents | |
750 | * threads from being created and destroyed more than is required. | |
751 | * | |
752 | * The first thread is the thread list is treated as the primary thread. | |
753 | * There is nothing special about the primary thread but in order to avoid | |
754 | * all the taskq pids from changing we opt to make it long running. | |
755 | */ | |
756 | static int | |
757 | taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) | |
758 | { | |
759 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
760 | ||
761 | if (!(tq->tq_flags & TASKQ_DYNAMIC)) | |
762 | return (0); | |
763 | ||
764 | if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, | |
765 | tqt_thread_list) == tqt) | |
766 | return (0); | |
767 | ||
768 | return | |
769 | ((tq->tq_nspawn == 0) && /* No threads are being spawned */ | |
770 | (tq->tq_nactive == 0) && /* No threads are handling tasks */ | |
771 | (tq->tq_nthreads > 1) && /* More than 1 thread is running */ | |
772 | (!taskq_next_ent(tq)) && /* There are no pending tasks */ | |
773 | (spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */ | |
774 | } | |
775 | ||
776 | static int | |
777 | taskq_thread(void *args) | |
778 | { | |
779 | DECLARE_WAITQUEUE(wait, current); | |
780 | sigset_t blocked; | |
781 | taskq_thread_t *tqt = args; | |
782 | taskq_t *tq; | |
783 | taskq_ent_t *t; | |
784 | int seq_tasks = 0; | |
785 | ||
786 | ASSERT(tqt); | |
787 | tq = tqt->tqt_tq; | |
788 | current->flags |= PF_NOFREEZE; | |
789 | ||
790 | sigfillset(&blocked); | |
791 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
792 | flush_signals(current); | |
793 | ||
794 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
795 | ||
796 | /* Immediately exit if more threads than allowed were created. */ | |
797 | if (tq->tq_nthreads >= tq->tq_maxthreads) | |
798 | goto error; | |
799 | ||
800 | tq->tq_nthreads++; | |
801 | list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); | |
802 | wake_up(&tq->tq_wait_waitq); | |
803 | set_current_state(TASK_INTERRUPTIBLE); | |
804 | ||
805 | while (!kthread_should_stop()) { | |
806 | ||
807 | if (list_empty(&tq->tq_pend_list) && | |
808 | list_empty(&tq->tq_prio_list)) { | |
809 | ||
810 | if (taskq_thread_should_stop(tq, tqt)) { | |
811 | wake_up_all(&tq->tq_wait_waitq); | |
812 | break; | |
813 | } | |
814 | ||
815 | add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); | |
816 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
817 | ||
818 | schedule(); | |
819 | seq_tasks = 0; | |
820 | ||
821 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
822 | remove_wait_queue(&tq->tq_work_waitq, &wait); | |
823 | } else { | |
824 | __set_current_state(TASK_RUNNING); | |
825 | } | |
826 | ||
827 | if ((t = taskq_next_ent(tq)) != NULL) { | |
828 | list_del_init(&t->tqent_list); | |
829 | ||
830 | /* In order to support recursively dispatching a | |
831 | * preallocated taskq_ent_t, tqent_id must be | |
832 | * stored prior to executing tqent_func. */ | |
833 | tqt->tqt_id = t->tqent_id; | |
834 | tqt->tqt_task = t; | |
835 | ||
836 | /* We must store a copy of the flags prior to | |
837 | * servicing the task (servicing a prealloc'd task | |
838 | * returns the ownership of the tqent back to | |
839 | * the caller of taskq_dispatch). Thus, | |
840 | * tqent_flags _may_ change within the call. */ | |
841 | tqt->tqt_flags = t->tqent_flags; | |
842 | ||
843 | taskq_insert_in_order(tq, tqt); | |
844 | tq->tq_nactive++; | |
845 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
846 | ||
847 | /* Perform the requested task */ | |
848 | t->tqent_func(t->tqent_arg); | |
849 | ||
850 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
851 | tq->tq_nactive--; | |
852 | list_del_init(&tqt->tqt_active_list); | |
853 | tqt->tqt_task = NULL; | |
854 | ||
855 | /* For prealloc'd tasks, we don't free anything. */ | |
856 | if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) | |
857 | task_done(tq, t); | |
858 | ||
859 | /* When the current lowest outstanding taskqid is | |
860 | * done calculate the new lowest outstanding id */ | |
861 | if (tq->tq_lowest_id == tqt->tqt_id) { | |
862 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
863 | ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); | |
864 | } | |
865 | ||
866 | /* Spawn additional taskq threads if required. */ | |
867 | if (taskq_thread_spawn(tq, ++seq_tasks)) | |
868 | seq_tasks = 0; | |
869 | ||
870 | tqt->tqt_id = 0; | |
871 | tqt->tqt_flags = 0; | |
872 | wake_up_all(&tq->tq_wait_waitq); | |
873 | } else { | |
874 | if (taskq_thread_should_stop(tq, tqt)) | |
875 | break; | |
876 | } | |
877 | ||
878 | set_current_state(TASK_INTERRUPTIBLE); | |
879 | ||
880 | } | |
881 | ||
882 | __set_current_state(TASK_RUNNING); | |
883 | tq->tq_nthreads--; | |
884 | list_del_init(&tqt->tqt_thread_list); | |
885 | error: | |
886 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
887 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
888 | ||
889 | return (0); | |
890 | } | |
891 | ||
892 | static taskq_thread_t * | |
893 | taskq_thread_create(taskq_t *tq) | |
894 | { | |
895 | static int last_used_cpu = 0; | |
896 | taskq_thread_t *tqt; | |
897 | ||
898 | tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); | |
899 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
900 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
901 | tqt->tqt_tq = tq; | |
902 | tqt->tqt_id = 0; | |
903 | ||
904 | tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, | |
905 | "%s", tq->tq_name); | |
906 | if (tqt->tqt_thread == NULL) { | |
907 | kmem_free(tqt, sizeof (taskq_thread_t)); | |
908 | return (NULL); | |
909 | } | |
910 | ||
911 | if (spl_taskq_thread_bind) { | |
912 | last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); | |
913 | kthread_bind(tqt->tqt_thread, last_used_cpu); | |
914 | } | |
915 | ||
916 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); | |
917 | wake_up_process(tqt->tqt_thread); | |
918 | ||
919 | return (tqt); | |
920 | } | |
921 | ||
922 | taskq_t * | |
923 | taskq_create(const char *name, int nthreads, pri_t pri, | |
924 | int minalloc, int maxalloc, uint_t flags) | |
925 | { | |
926 | taskq_t *tq; | |
927 | taskq_thread_t *tqt; | |
928 | int count = 0, rc = 0, i; | |
929 | ||
930 | ASSERT(name != NULL); | |
931 | ASSERT(pri <= maxclsyspri); | |
932 | ASSERT(minalloc >= 0); | |
933 | ASSERT(maxalloc <= INT_MAX); | |
934 | ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ | |
935 | ||
936 | /* Scale the number of threads using nthreads as a percentage */ | |
937 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
938 | ASSERT(nthreads <= 100); | |
939 | ASSERT(nthreads >= 0); | |
940 | nthreads = MIN(nthreads, 100); | |
941 | nthreads = MAX(nthreads, 0); | |
942 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
943 | } | |
944 | ||
945 | tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); | |
946 | if (tq == NULL) | |
947 | return (NULL); | |
948 | ||
949 | spin_lock_init(&tq->tq_lock); | |
950 | INIT_LIST_HEAD(&tq->tq_thread_list); | |
951 | INIT_LIST_HEAD(&tq->tq_active_list); | |
952 | tq->tq_name = strdup(name); | |
953 | tq->tq_nactive = 0; | |
954 | tq->tq_nthreads = 0; | |
955 | tq->tq_nspawn = 0; | |
956 | tq->tq_maxthreads = nthreads; | |
957 | tq->tq_pri = pri; | |
958 | tq->tq_minalloc = minalloc; | |
959 | tq->tq_maxalloc = maxalloc; | |
960 | tq->tq_nalloc = 0; | |
961 | tq->tq_flags = (flags | TASKQ_ACTIVE); | |
962 | tq->tq_next_id = 1; | |
963 | tq->tq_lowest_id = 1; | |
964 | INIT_LIST_HEAD(&tq->tq_free_list); | |
965 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
966 | INIT_LIST_HEAD(&tq->tq_prio_list); | |
967 | INIT_LIST_HEAD(&tq->tq_delay_list); | |
968 | init_waitqueue_head(&tq->tq_work_waitq); | |
969 | init_waitqueue_head(&tq->tq_wait_waitq); | |
970 | ||
971 | if (flags & TASKQ_PREPOPULATE) { | |
972 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
973 | ||
974 | for (i = 0; i < minalloc; i++) | |
975 | task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW)); | |
976 | ||
977 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
978 | } | |
979 | ||
980 | if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) | |
981 | nthreads = 1; | |
982 | ||
983 | for (i = 0; i < nthreads; i++) { | |
984 | tqt = taskq_thread_create(tq); | |
985 | if (tqt == NULL) | |
986 | rc = 1; | |
987 | else | |
988 | count++; | |
989 | } | |
990 | ||
991 | /* Wait for all threads to be started before potential destroy */ | |
992 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); | |
993 | ||
994 | if (rc) { | |
995 | taskq_destroy(tq); | |
996 | tq = NULL; | |
997 | } | |
998 | ||
999 | return (tq); | |
1000 | } | |
1001 | EXPORT_SYMBOL(taskq_create); | |
1002 | ||
1003 | void | |
1004 | taskq_destroy(taskq_t *tq) | |
1005 | { | |
1006 | struct task_struct *thread; | |
1007 | taskq_thread_t *tqt; | |
1008 | taskq_ent_t *t; | |
1009 | ||
1010 | ASSERT(tq); | |
1011 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
1012 | tq->tq_flags &= ~TASKQ_ACTIVE; | |
1013 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
1014 | ||
1015 | /* | |
1016 | * When TASKQ_ACTIVE is clear new tasks may not be added nor may | |
1017 | * new worker threads be spawned for dynamic taskq. | |
1018 | */ | |
1019 | if (dynamic_taskq != NULL) | |
1020 | taskq_wait_outstanding(dynamic_taskq, 0); | |
1021 | ||
1022 | taskq_wait(tq); | |
1023 | ||
1024 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
1025 | ||
1026 | /* | |
1027 | * Signal each thread to exit and block until it does. Each thread | |
1028 | * is responsible for removing itself from the list and freeing its | |
1029 | * taskq_thread_t. This allows for idle threads to opt to remove | |
1030 | * themselves from the taskq. They can be recreated as needed. | |
1031 | */ | |
1032 | while (!list_empty(&tq->tq_thread_list)) { | |
1033 | tqt = list_entry(tq->tq_thread_list.next, | |
1034 | taskq_thread_t, tqt_thread_list); | |
1035 | thread = tqt->tqt_thread; | |
1036 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
1037 | ||
1038 | kthread_stop(thread); | |
1039 | ||
1040 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
1041 | } | |
1042 | ||
1043 | while (!list_empty(&tq->tq_free_list)) { | |
1044 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
1045 | ||
1046 | ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | |
1047 | ||
1048 | list_del_init(&t->tqent_list); | |
1049 | task_free(tq, t); | |
1050 | } | |
1051 | ||
1052 | ASSERT0(tq->tq_nthreads); | |
1053 | ASSERT0(tq->tq_nalloc); | |
1054 | ASSERT0(tq->tq_nspawn); | |
1055 | ASSERT(list_empty(&tq->tq_thread_list)); | |
1056 | ASSERT(list_empty(&tq->tq_active_list)); | |
1057 | ASSERT(list_empty(&tq->tq_free_list)); | |
1058 | ASSERT(list_empty(&tq->tq_pend_list)); | |
1059 | ASSERT(list_empty(&tq->tq_prio_list)); | |
1060 | ASSERT(list_empty(&tq->tq_delay_list)); | |
1061 | ||
1062 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
1063 | ||
1064 | strfree(tq->tq_name); | |
1065 | kmem_free(tq, sizeof (taskq_t)); | |
1066 | } | |
1067 | EXPORT_SYMBOL(taskq_destroy); | |
1068 | ||
1069 | int | |
1070 | spl_taskq_init(void) | |
1071 | { | |
1072 | /* Solaris creates a dynamic taskq of up to 64 threads, however in | |
1073 | * a Linux environment 1 thread per-core is usually about right */ | |
1074 | system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), | |
1075 | minclsyspri, 4, 512, TASKQ_PREPOPULATE); | |
1076 | if (system_taskq == NULL) | |
1077 | return (1); | |
1078 | ||
1079 | dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, | |
1080 | minclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); | |
1081 | if (dynamic_taskq == NULL) { | |
1082 | taskq_destroy(system_taskq); | |
1083 | return (1); | |
1084 | } | |
1085 | ||
1086 | return (0); | |
1087 | } | |
1088 | ||
1089 | void | |
1090 | spl_taskq_fini(void) | |
1091 | { | |
1092 | taskq_destroy(dynamic_taskq); | |
1093 | dynamic_taskq = NULL; | |
1094 | ||
1095 | taskq_destroy(system_taskq); | |
1096 | system_taskq = NULL; | |
1097 | } |