]>
Commit | Line | Data |
---|---|---|
1 | /*****************************************************************************\ | |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
9 | * For details, see <http://github.com/behlendorf/spl/>. | |
10 | * | |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Task Queue Implementation. | |
25 | \*****************************************************************************/ | |
26 | ||
27 | #include <sys/taskq.h> | |
28 | #include <sys/kmem.h> | |
29 | #include <spl-debug.h> | |
30 | ||
31 | #ifdef SS_DEBUG_SUBSYS | |
32 | #undef SS_DEBUG_SUBSYS | |
33 | #endif | |
34 | ||
35 | #define SS_DEBUG_SUBSYS SS_TASKQ | |
36 | ||
37 | /* Global system-wide dynamic task queue available for all consumers */ | |
38 | taskq_t *system_taskq; | |
39 | EXPORT_SYMBOL(system_taskq); | |
40 | ||
41 | /* | |
42 | * NOTE: Must be called with tq->tq_lock held, returns a list_t which | |
43 | * is not attached to the free, work, or pending taskq lists. | |
44 | */ | |
45 | static taskq_ent_t * | |
46 | task_alloc(taskq_t *tq, uint_t flags) | |
47 | { | |
48 | taskq_ent_t *t; | |
49 | int count = 0; | |
50 | SENTRY; | |
51 | ||
52 | ASSERT(tq); | |
53 | ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */ | |
54 | ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */ | |
55 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
56 | retry: | |
57 | /* Acquire taskq_ent_t's from free list if available */ | |
58 | if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | |
59 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
60 | list_del_init(&t->tqent_list); | |
61 | SRETURN(t); | |
62 | } | |
63 | ||
64 | /* Free list is empty and memory allocations are prohibited */ | |
65 | if (flags & TQ_NOALLOC) | |
66 | SRETURN(NULL); | |
67 | ||
68 | /* Hit maximum taskq_ent_t pool size */ | |
69 | if (tq->tq_nalloc >= tq->tq_maxalloc) { | |
70 | if (flags & TQ_NOSLEEP) | |
71 | SRETURN(NULL); | |
72 | ||
73 | /* | |
74 | * Sleep periodically polling the free list for an available | |
75 | * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | |
76 | * but we cannot block forever waiting for an taskq_entq_t to | |
77 | * show up in the free list, otherwise a deadlock can happen. | |
78 | * | |
79 | * Therefore, we need to allocate a new task even if the number | |
80 | * of allocated tasks is above tq->tq_maxalloc, but we still | |
81 | * end up delaying the task allocation by one second, thereby | |
82 | * throttling the task dispatch rate. | |
83 | */ | |
84 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
85 | schedule_timeout(HZ / 100); | |
86 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
87 | if (count < 100) | |
88 | SGOTO(retry, count++); | |
89 | } | |
90 | ||
91 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
92 | t = kmem_alloc(sizeof(taskq_ent_t), flags & (TQ_SLEEP | TQ_NOSLEEP)); | |
93 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
94 | ||
95 | if (t) { | |
96 | spin_lock_init(&t->tqent_lock); | |
97 | INIT_LIST_HEAD(&t->tqent_list); | |
98 | t->tqent_id = 0; | |
99 | t->tqent_func = NULL; | |
100 | t->tqent_arg = NULL; | |
101 | tq->tq_nalloc++; | |
102 | } | |
103 | ||
104 | SRETURN(t); | |
105 | } | |
106 | ||
107 | /* | |
108 | * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t | |
109 | * to already be removed from the free, work, or pending taskq lists. | |
110 | */ | |
111 | static void | |
112 | task_free(taskq_t *tq, taskq_ent_t *t) | |
113 | { | |
114 | SENTRY; | |
115 | ||
116 | ASSERT(tq); | |
117 | ASSERT(t); | |
118 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
119 | ASSERT(list_empty(&t->tqent_list)); | |
120 | ||
121 | kmem_free(t, sizeof(taskq_ent_t)); | |
122 | tq->tq_nalloc--; | |
123 | ||
124 | SEXIT; | |
125 | } | |
126 | ||
127 | /* | |
128 | * NOTE: Must be called with tq->tq_lock held, either destroys the | |
129 | * taskq_ent_t if too many exist or moves it to the free list for later use. | |
130 | */ | |
131 | static void | |
132 | task_done(taskq_t *tq, taskq_ent_t *t) | |
133 | { | |
134 | SENTRY; | |
135 | ASSERT(tq); | |
136 | ASSERT(t); | |
137 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
138 | ||
139 | list_del_init(&t->tqent_list); | |
140 | ||
141 | if (tq->tq_nalloc <= tq->tq_minalloc) { | |
142 | t->tqent_id = 0; | |
143 | t->tqent_func = NULL; | |
144 | t->tqent_arg = NULL; | |
145 | list_add_tail(&t->tqent_list, &tq->tq_free_list); | |
146 | } else { | |
147 | task_free(tq, t); | |
148 | } | |
149 | ||
150 | SEXIT; | |
151 | } | |
152 | ||
153 | /* | |
154 | * As tasks are submitted to the task queue they are assigned a | |
155 | * monotonically increasing taskqid and added to the tail of the pending | |
156 | * list. As worker threads become available the tasks are removed from | |
157 | * the head of the pending or priority list, giving preference to the | |
158 | * priority list. The tasks are then added to the work list, preserving | |
159 | * the ordering by taskqid. Finally, as tasks complete they are removed | |
160 | * from the work list. This means that the pending and work lists are | |
161 | * always kept sorted by taskqid. Thus the lowest outstanding | |
162 | * incomplete taskqid can be determined simply by checking the min | |
163 | * taskqid for each head item on the pending, priority, and work list. | |
164 | * This value is stored in tq->tq_lowest_id and only updated to the new | |
165 | * lowest id when the previous lowest id completes. All taskqids lower | |
166 | * than tq->tq_lowest_id must have completed. It is also possible | |
167 | * larger taskqid's have completed because they may be processed in | |
168 | * parallel by several worker threads. However, this is not a problem | |
169 | * because the behavior of taskq_wait_id() is to block until all | |
170 | * previously submitted taskqid's have completed. | |
171 | * | |
172 | * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are | |
173 | * 64-bit values so even if a taskq is processing 2^24 (16,777,216) | |
174 | * taskqid_ts per second it will still take 2^40 seconds, 34,865 years, | |
175 | * before the wrap occurs. I can live with that for now. | |
176 | */ | |
177 | static int | |
178 | taskq_wait_check(taskq_t *tq, taskqid_t id) | |
179 | { | |
180 | int rc; | |
181 | ||
182 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
183 | rc = (id < tq->tq_lowest_id); | |
184 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
185 | ||
186 | SRETURN(rc); | |
187 | } | |
188 | ||
189 | void | |
190 | __taskq_wait_id(taskq_t *tq, taskqid_t id) | |
191 | { | |
192 | SENTRY; | |
193 | ASSERT(tq); | |
194 | ||
195 | wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id)); | |
196 | ||
197 | SEXIT; | |
198 | } | |
199 | EXPORT_SYMBOL(__taskq_wait_id); | |
200 | ||
201 | void | |
202 | __taskq_wait(taskq_t *tq) | |
203 | { | |
204 | taskqid_t id; | |
205 | SENTRY; | |
206 | ASSERT(tq); | |
207 | ||
208 | /* Wait for the largest outstanding taskqid */ | |
209 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
210 | id = tq->tq_next_id - 1; | |
211 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
212 | ||
213 | __taskq_wait_id(tq, id); | |
214 | ||
215 | SEXIT; | |
216 | ||
217 | } | |
218 | EXPORT_SYMBOL(__taskq_wait); | |
219 | ||
220 | int | |
221 | __taskq_member(taskq_t *tq, void *t) | |
222 | { | |
223 | struct list_head *l; | |
224 | taskq_thread_t *tqt; | |
225 | SENTRY; | |
226 | ||
227 | ASSERT(tq); | |
228 | ASSERT(t); | |
229 | ||
230 | list_for_each(l, &tq->tq_thread_list) { | |
231 | tqt = list_entry(l, taskq_thread_t, tqt_thread_list); | |
232 | if (tqt->tqt_thread == (struct task_struct *)t) | |
233 | SRETURN(1); | |
234 | } | |
235 | ||
236 | SRETURN(0); | |
237 | } | |
238 | EXPORT_SYMBOL(__taskq_member); | |
239 | ||
240 | taskqid_t | |
241 | __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
242 | { | |
243 | taskq_ent_t *t; | |
244 | taskqid_t rc = 0; | |
245 | SENTRY; | |
246 | ||
247 | ASSERT(tq); | |
248 | ASSERT(func); | |
249 | ||
250 | /* Solaris assumes TQ_SLEEP if not passed explicitly */ | |
251 | if (!(flags & (TQ_SLEEP | TQ_NOSLEEP))) | |
252 | flags |= TQ_SLEEP; | |
253 | ||
254 | if (unlikely(in_atomic() && (flags & TQ_SLEEP))) | |
255 | PANIC("May schedule while atomic: %s/0x%08x/%d\n", | |
256 | current->comm, preempt_count(), current->pid); | |
257 | ||
258 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
259 | ||
260 | /* Taskq being destroyed and all tasks drained */ | |
261 | if (!(tq->tq_flags & TQ_ACTIVE)) | |
262 | SGOTO(out, rc = 0); | |
263 | ||
264 | /* Do not queue the task unless there is idle thread for it */ | |
265 | ASSERT(tq->tq_nactive <= tq->tq_nthreads); | |
266 | if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | |
267 | SGOTO(out, rc = 0); | |
268 | ||
269 | if ((t = task_alloc(tq, flags)) == NULL) | |
270 | SGOTO(out, rc = 0); | |
271 | ||
272 | spin_lock(&t->tqent_lock); | |
273 | ||
274 | /* Queue to the priority list instead of the pending list */ | |
275 | if (flags & TQ_FRONT) | |
276 | list_add_tail(&t->tqent_list, &tq->tq_prio_list); | |
277 | else | |
278 | list_add_tail(&t->tqent_list, &tq->tq_pend_list); | |
279 | ||
280 | t->tqent_id = rc = tq->tq_next_id; | |
281 | tq->tq_next_id++; | |
282 | t->tqent_func = func; | |
283 | t->tqent_arg = arg; | |
284 | spin_unlock(&t->tqent_lock); | |
285 | ||
286 | wake_up(&tq->tq_work_waitq); | |
287 | out: | |
288 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
289 | SRETURN(rc); | |
290 | } | |
291 | EXPORT_SYMBOL(__taskq_dispatch); | |
292 | /* | |
293 | * Returns the lowest incomplete taskqid_t. The taskqid_t may | |
294 | * be queued on the pending list, on the priority list, or on | |
295 | * the work list currently being handled, but it is not 100% | |
296 | * complete yet. | |
297 | */ | |
298 | static taskqid_t | |
299 | taskq_lowest_id(taskq_t *tq) | |
300 | { | |
301 | taskqid_t lowest_id = tq->tq_next_id; | |
302 | taskq_ent_t *t; | |
303 | taskq_thread_t *tqt; | |
304 | SENTRY; | |
305 | ||
306 | ASSERT(tq); | |
307 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
308 | ||
309 | if (!list_empty(&tq->tq_pend_list)) { | |
310 | t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | |
311 | lowest_id = MIN(lowest_id, t->tqent_id); | |
312 | } | |
313 | ||
314 | if (!list_empty(&tq->tq_prio_list)) { | |
315 | t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | |
316 | lowest_id = MIN(lowest_id, t->tqent_id); | |
317 | } | |
318 | ||
319 | if (!list_empty(&tq->tq_active_list)) { | |
320 | tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | |
321 | tqt_active_list); | |
322 | ASSERT(tqt->tqt_ent != NULL); | |
323 | lowest_id = MIN(lowest_id, tqt->tqt_ent->tqent_id); | |
324 | } | |
325 | ||
326 | SRETURN(lowest_id); | |
327 | } | |
328 | ||
329 | /* | |
330 | * Insert a task into a list keeping the list sorted by increasing | |
331 | * taskqid. | |
332 | */ | |
333 | static void | |
334 | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | |
335 | { | |
336 | taskq_thread_t *w; | |
337 | struct list_head *l; | |
338 | ||
339 | SENTRY; | |
340 | ASSERT(tq); | |
341 | ASSERT(tqt); | |
342 | ASSERT(spin_is_locked(&tq->tq_lock)); | |
343 | ||
344 | list_for_each_prev(l, &tq->tq_active_list) { | |
345 | w = list_entry(l, taskq_thread_t, tqt_active_list); | |
346 | if (w->tqt_ent->tqent_id < tqt->tqt_ent->tqent_id) { | |
347 | list_add(&tqt->tqt_active_list, l); | |
348 | break; | |
349 | } | |
350 | } | |
351 | if (l == &tq->tq_active_list) | |
352 | list_add(&tqt->tqt_active_list, &tq->tq_active_list); | |
353 | ||
354 | SEXIT; | |
355 | } | |
356 | ||
357 | static int | |
358 | taskq_thread(void *args) | |
359 | { | |
360 | DECLARE_WAITQUEUE(wait, current); | |
361 | sigset_t blocked; | |
362 | taskqid_t id; | |
363 | taskq_thread_t *tqt = args; | |
364 | taskq_t *tq; | |
365 | taskq_ent_t *t; | |
366 | struct list_head *pend_list; | |
367 | SENTRY; | |
368 | ||
369 | ASSERT(tqt); | |
370 | tq = tqt->tqt_tq; | |
371 | current->flags |= PF_NOFREEZE; | |
372 | ||
373 | /* Disable the direct memory reclaim path */ | |
374 | if (tq->tq_flags & TASKQ_NORECLAIM) | |
375 | current->flags |= PF_MEMALLOC; | |
376 | ||
377 | sigfillset(&blocked); | |
378 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
379 | flush_signals(current); | |
380 | ||
381 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
382 | tq->tq_nthreads++; | |
383 | wake_up(&tq->tq_wait_waitq); | |
384 | set_current_state(TASK_INTERRUPTIBLE); | |
385 | ||
386 | while (!kthread_should_stop()) { | |
387 | ||
388 | add_wait_queue(&tq->tq_work_waitq, &wait); | |
389 | if (list_empty(&tq->tq_pend_list) && | |
390 | list_empty(&tq->tq_prio_list)) { | |
391 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
392 | schedule(); | |
393 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
394 | } else { | |
395 | __set_current_state(TASK_RUNNING); | |
396 | } | |
397 | ||
398 | remove_wait_queue(&tq->tq_work_waitq, &wait); | |
399 | ||
400 | if (!list_empty(&tq->tq_prio_list)) | |
401 | pend_list = &tq->tq_prio_list; | |
402 | else if (!list_empty(&tq->tq_pend_list)) | |
403 | pend_list = &tq->tq_pend_list; | |
404 | else | |
405 | pend_list = NULL; | |
406 | ||
407 | if (pend_list) { | |
408 | t = list_entry(pend_list->next, taskq_ent_t, tqent_list); | |
409 | list_del_init(&t->tqent_list); | |
410 | tqt->tqt_ent = t; | |
411 | taskq_insert_in_order(tq, tqt); | |
412 | tq->tq_nactive++; | |
413 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
414 | ||
415 | /* Perform the requested task */ | |
416 | t->tqent_func(t->tqent_arg); | |
417 | ||
418 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
419 | tq->tq_nactive--; | |
420 | list_del_init(&tqt->tqt_active_list); | |
421 | tqt->tqt_ent = NULL; | |
422 | id = t->tqent_id; | |
423 | task_done(tq, t); | |
424 | ||
425 | /* When the current lowest outstanding taskqid is | |
426 | * done calculate the new lowest outstanding id */ | |
427 | if (tq->tq_lowest_id == id) { | |
428 | tq->tq_lowest_id = taskq_lowest_id(tq); | |
429 | ASSERT(tq->tq_lowest_id > id); | |
430 | } | |
431 | ||
432 | wake_up_all(&tq->tq_wait_waitq); | |
433 | } | |
434 | ||
435 | set_current_state(TASK_INTERRUPTIBLE); | |
436 | ||
437 | } | |
438 | ||
439 | __set_current_state(TASK_RUNNING); | |
440 | tq->tq_nthreads--; | |
441 | list_del_init(&tqt->tqt_thread_list); | |
442 | kmem_free(tqt, sizeof(taskq_thread_t)); | |
443 | ||
444 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
445 | ||
446 | SRETURN(0); | |
447 | } | |
448 | ||
449 | taskq_t * | |
450 | __taskq_create(const char *name, int nthreads, pri_t pri, | |
451 | int minalloc, int maxalloc, uint_t flags) | |
452 | { | |
453 | taskq_t *tq; | |
454 | taskq_thread_t *tqt; | |
455 | int rc = 0, i, j = 0; | |
456 | SENTRY; | |
457 | ||
458 | ASSERT(name != NULL); | |
459 | ASSERT(pri <= maxclsyspri); | |
460 | ASSERT(minalloc >= 0); | |
461 | ASSERT(maxalloc <= INT_MAX); | |
462 | ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */ | |
463 | ||
464 | /* Scale the number of threads using nthreads as a percentage */ | |
465 | if (flags & TASKQ_THREADS_CPU_PCT) { | |
466 | ASSERT(nthreads <= 100); | |
467 | ASSERT(nthreads >= 0); | |
468 | nthreads = MIN(nthreads, 100); | |
469 | nthreads = MAX(nthreads, 0); | |
470 | nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | |
471 | } | |
472 | ||
473 | tq = kmem_alloc(sizeof(*tq), KM_SLEEP); | |
474 | if (tq == NULL) | |
475 | SRETURN(NULL); | |
476 | ||
477 | spin_lock_init(&tq->tq_lock); | |
478 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
479 | INIT_LIST_HEAD(&tq->tq_thread_list); | |
480 | INIT_LIST_HEAD(&tq->tq_active_list); | |
481 | tq->tq_name = name; | |
482 | tq->tq_nactive = 0; | |
483 | tq->tq_nthreads = 0; | |
484 | tq->tq_pri = pri; | |
485 | tq->tq_minalloc = minalloc; | |
486 | tq->tq_maxalloc = maxalloc; | |
487 | tq->tq_nalloc = 0; | |
488 | tq->tq_flags = (flags | TQ_ACTIVE); | |
489 | tq->tq_next_id = 1; | |
490 | tq->tq_lowest_id = 1; | |
491 | INIT_LIST_HEAD(&tq->tq_free_list); | |
492 | INIT_LIST_HEAD(&tq->tq_pend_list); | |
493 | INIT_LIST_HEAD(&tq->tq_prio_list); | |
494 | init_waitqueue_head(&tq->tq_work_waitq); | |
495 | init_waitqueue_head(&tq->tq_wait_waitq); | |
496 | ||
497 | if (flags & TASKQ_PREPOPULATE) | |
498 | for (i = 0; i < minalloc; i++) | |
499 | task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW)); | |
500 | ||
501 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
502 | ||
503 | for (i = 0; i < nthreads; i++) { | |
504 | tqt = kmem_alloc(sizeof(*tqt), KM_SLEEP); | |
505 | INIT_LIST_HEAD(&tqt->tqt_thread_list); | |
506 | INIT_LIST_HEAD(&tqt->tqt_active_list); | |
507 | tqt->tqt_tq = tq; | |
508 | tqt->tqt_ent = NULL; | |
509 | ||
510 | tqt->tqt_thread = kthread_create(taskq_thread, tqt, | |
511 | "%s/%d", name, i); | |
512 | if (tqt->tqt_thread) { | |
513 | list_add(&tqt->tqt_thread_list, &tq->tq_thread_list); | |
514 | kthread_bind(tqt->tqt_thread, i % num_online_cpus()); | |
515 | set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri)); | |
516 | wake_up_process(tqt->tqt_thread); | |
517 | j++; | |
518 | } else { | |
519 | kmem_free(tqt, sizeof(taskq_thread_t)); | |
520 | rc = 1; | |
521 | } | |
522 | } | |
523 | ||
524 | /* Wait for all threads to be started before potential destroy */ | |
525 | wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j); | |
526 | ||
527 | if (rc) { | |
528 | __taskq_destroy(tq); | |
529 | tq = NULL; | |
530 | } | |
531 | ||
532 | SRETURN(tq); | |
533 | } | |
534 | EXPORT_SYMBOL(__taskq_create); | |
535 | ||
536 | void | |
537 | __taskq_destroy(taskq_t *tq) | |
538 | { | |
539 | struct task_struct *thread; | |
540 | taskq_thread_t *tqt; | |
541 | taskq_ent_t *t; | |
542 | SENTRY; | |
543 | ||
544 | ASSERT(tq); | |
545 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
546 | tq->tq_flags &= ~TQ_ACTIVE; | |
547 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
548 | ||
549 | /* TQ_ACTIVE cleared prevents new tasks being added to pending */ | |
550 | __taskq_wait(tq); | |
551 | ||
552 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
553 | ||
554 | /* | |
555 | * Signal each thread to exit and block until it does. Each thread | |
556 | * is responsible for removing itself from the list and freeing its | |
557 | * taskq_thread_t. This allows for idle threads to opt to remove | |
558 | * themselves from the taskq. They can be recreated as needed. | |
559 | */ | |
560 | while (!list_empty(&tq->tq_thread_list)) { | |
561 | tqt = list_entry(tq->tq_thread_list.next, | |
562 | taskq_thread_t, tqt_thread_list); | |
563 | thread = tqt->tqt_thread; | |
564 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
565 | ||
566 | kthread_stop(thread); | |
567 | ||
568 | spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | |
569 | } | |
570 | ||
571 | while (!list_empty(&tq->tq_free_list)) { | |
572 | t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | |
573 | list_del_init(&t->tqent_list); | |
574 | task_free(tq, t); | |
575 | } | |
576 | ||
577 | ASSERT(tq->tq_nthreads == 0); | |
578 | ASSERT(tq->tq_nalloc == 0); | |
579 | ASSERT(list_empty(&tq->tq_thread_list)); | |
580 | ASSERT(list_empty(&tq->tq_active_list)); | |
581 | ASSERT(list_empty(&tq->tq_free_list)); | |
582 | ASSERT(list_empty(&tq->tq_pend_list)); | |
583 | ASSERT(list_empty(&tq->tq_prio_list)); | |
584 | ||
585 | spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | |
586 | ||
587 | kmem_free(tq, sizeof(taskq_t)); | |
588 | ||
589 | SEXIT; | |
590 | } | |
591 | EXPORT_SYMBOL(__taskq_destroy); | |
592 | ||
593 | int | |
594 | spl_taskq_init(void) | |
595 | { | |
596 | SENTRY; | |
597 | ||
598 | /* Solaris creates a dynamic taskq of up to 64 threads, however in | |
599 | * a Linux environment 1 thread per-core is usually about right */ | |
600 | system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), | |
601 | minclsyspri, 4, 512, TASKQ_PREPOPULATE); | |
602 | if (system_taskq == NULL) | |
603 | SRETURN(1); | |
604 | ||
605 | SRETURN(0); | |
606 | } | |
607 | ||
608 | void | |
609 | spl_taskq_fini(void) | |
610 | { | |
611 | SENTRY; | |
612 | taskq_destroy(system_taskq); | |
613 | SEXIT; | |
614 | } |