]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-taskq.c
Make taskq_wait() block until the queue is empty
[mirror_spl.git] / module / splat / splat-taskq.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Task Queue Tests.
25 \*****************************************************************************/
26
27 #include <sys/kmem.h>
28 #include <sys/vmem.h>
29 #include <sys/random.h>
30 #include <sys/taskq.h>
31 #include <sys/timer.h>
32 #include <linux/delay.h>
33 #include "splat-internal.h"
34
35 #define SPLAT_TASKQ_NAME "taskq"
36 #define SPLAT_TASKQ_DESC "Kernel Task Queue Tests"
37
38 #define SPLAT_TASKQ_TEST1_ID 0x0201
39 #define SPLAT_TASKQ_TEST1_NAME "single"
40 #define SPLAT_TASKQ_TEST1_DESC "Single task queue, single task"
41
42 #define SPLAT_TASKQ_TEST2_ID 0x0202
43 #define SPLAT_TASKQ_TEST2_NAME "multiple"
44 #define SPLAT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
45
46 #define SPLAT_TASKQ_TEST3_ID 0x0203
47 #define SPLAT_TASKQ_TEST3_NAME "system"
48 #define SPLAT_TASKQ_TEST3_DESC "System task queue, multiple tasks"
49
50 #define SPLAT_TASKQ_TEST4_ID 0x0204
51 #define SPLAT_TASKQ_TEST4_NAME "wait"
52 #define SPLAT_TASKQ_TEST4_DESC "Multiple task waiting"
53
54 #define SPLAT_TASKQ_TEST5_ID 0x0205
55 #define SPLAT_TASKQ_TEST5_NAME "order"
56 #define SPLAT_TASKQ_TEST5_DESC "Correct task ordering"
57
58 #define SPLAT_TASKQ_TEST6_ID 0x0206
59 #define SPLAT_TASKQ_TEST6_NAME "front"
60 #define SPLAT_TASKQ_TEST6_DESC "Correct ordering with TQ_FRONT flag"
61
62 #define SPLAT_TASKQ_TEST7_ID 0x0207
63 #define SPLAT_TASKQ_TEST7_NAME "recurse"
64 #define SPLAT_TASKQ_TEST7_DESC "Single task queue, recursive dispatch"
65
66 #define SPLAT_TASKQ_TEST8_ID 0x0208
67 #define SPLAT_TASKQ_TEST8_NAME "contention"
68 #define SPLAT_TASKQ_TEST8_DESC "1 queue, 100 threads, 131072 tasks"
69
70 #define SPLAT_TASKQ_TEST9_ID 0x0209
71 #define SPLAT_TASKQ_TEST9_NAME "delay"
72 #define SPLAT_TASKQ_TEST9_DESC "Delayed task execution"
73
74 #define SPLAT_TASKQ_TEST10_ID 0x020a
75 #define SPLAT_TASKQ_TEST10_NAME "cancel"
76 #define SPLAT_TASKQ_TEST10_DESC "Cancel task execution"
77
78 #define SPLAT_TASKQ_ORDER_MAX 8
79 #define SPLAT_TASKQ_DEPTH_MAX 16
80
81
82 typedef struct splat_taskq_arg {
83 int flag;
84 int id;
85 atomic_t *count;
86 int order[SPLAT_TASKQ_ORDER_MAX];
87 unsigned int depth;
88 clock_t expire;
89 taskq_t *tq;
90 taskq_ent_t *tqe;
91 spinlock_t lock;
92 struct file *file;
93 const char *name;
94 } splat_taskq_arg_t;
95
96 typedef struct splat_taskq_id {
97 int id;
98 splat_taskq_arg_t *arg;
99 } splat_taskq_id_t;
100
101 /*
102 * Create a taskq, queue a task, wait until task completes, ensure
103 * task ran properly, cleanup taskq.
104 */
105 static void
106 splat_taskq_test13_func(void *arg)
107 {
108 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
109
110 ASSERT(tq_arg);
111 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST1_NAME,
112 "Taskq '%s' function '%s' setting flag\n",
113 tq_arg->name, sym2str(splat_taskq_test13_func));
114 tq_arg->flag = 1;
115 }
116
117 static int
118 splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc)
119 {
120 taskq_t *tq;
121 taskqid_t id;
122 splat_taskq_arg_t tq_arg;
123 taskq_ent_t *tqe;
124
125 tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);
126 taskq_init_ent(tqe);
127
128 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
129 "Taskq '%s' creating (%s dispatch)\n",
130 SPLAT_TASKQ_TEST1_NAME,
131 prealloc ? "prealloc" : "dynamic");
132 if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
133 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
134 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
135 "Taskq '%s' create failed\n",
136 SPLAT_TASKQ_TEST1_NAME);
137 kmem_free(tqe, sizeof (taskq_ent_t));
138 return -EINVAL;
139 }
140
141 tq_arg.flag = 0;
142 tq_arg.id = 0;
143 tq_arg.file = file;
144 tq_arg.name = SPLAT_TASKQ_TEST1_NAME;
145
146 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
147 "Taskq '%s' function '%s' dispatching\n",
148 tq_arg.name, sym2str(splat_taskq_test13_func));
149 if (prealloc) {
150 taskq_dispatch_ent(tq, splat_taskq_test13_func,
151 &tq_arg, TQ_SLEEP, tqe);
152 id = tqe->tqent_id;
153 } else {
154 id = taskq_dispatch(tq, splat_taskq_test13_func,
155 &tq_arg, TQ_SLEEP);
156 }
157
158 if (id == 0) {
159 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
160 "Taskq '%s' function '%s' dispatch failed\n",
161 tq_arg.name, sym2str(splat_taskq_test13_func));
162 kmem_free(tqe, sizeof (taskq_ent_t));
163 taskq_destroy(tq);
164 return -EINVAL;
165 }
166
167 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
168 tq_arg.name);
169 taskq_wait(tq);
170 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
171 tq_arg.name);
172
173 kmem_free(tqe, sizeof (taskq_ent_t));
174 taskq_destroy(tq);
175
176 return (tq_arg.flag) ? 0 : -EINVAL;
177 }
178
179 static int
180 splat_taskq_test1(struct file *file, void *arg)
181 {
182 int rc;
183
184 rc = splat_taskq_test1_impl(file, arg, B_FALSE);
185 if (rc)
186 return rc;
187
188 rc = splat_taskq_test1_impl(file, arg, B_TRUE);
189
190 return rc;
191 }
192
193 /*
194 * Create multiple taskq's, each with multiple tasks, wait until
195 * all tasks complete, ensure all tasks ran properly and in the
196 * correct order. Run order must be the same as the order submitted
197 * because we only have 1 thread per taskq. Finally cleanup the taskq.
198 */
199 static void
200 splat_taskq_test2_func1(void *arg)
201 {
202 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
203
204 ASSERT(tq_arg);
205 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
206 "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
207 tq_arg->name, tq_arg->id,
208 sym2str(splat_taskq_test2_func1),
209 tq_arg->flag * 2, tq_arg->flag);
210 tq_arg->flag *= 2;
211 }
212
213 static void
214 splat_taskq_test2_func2(void *arg)
215 {
216 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
217
218 ASSERT(tq_arg);
219 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
220 "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
221 tq_arg->name, tq_arg->id,
222 sym2str(splat_taskq_test2_func2),
223 tq_arg->flag + 1, tq_arg->flag);
224 tq_arg->flag += 1;
225 }
226
227 #define TEST2_TASKQS 8
228 #define TEST2_THREADS_PER_TASKQ 1
229
230 static int
231 splat_taskq_test2_impl(struct file *file, void *arg, boolean_t prealloc) {
232 taskq_t *tq[TEST2_TASKQS] = { NULL };
233 taskqid_t id;
234 splat_taskq_arg_t *tq_args[TEST2_TASKQS] = { NULL };
235 taskq_ent_t *func1_tqes = NULL;
236 taskq_ent_t *func2_tqes = NULL;
237 int i, rc = 0;
238
239 func1_tqes = kmalloc(sizeof(*func1_tqes) * TEST2_TASKQS, GFP_KERNEL);
240 if (func1_tqes == NULL) {
241 rc = -ENOMEM;
242 goto out;
243 }
244
245 func2_tqes = kmalloc(sizeof(*func2_tqes) * TEST2_TASKQS, GFP_KERNEL);
246 if (func2_tqes == NULL) {
247 rc = -ENOMEM;
248 goto out;
249 }
250
251 for (i = 0; i < TEST2_TASKQS; i++) {
252 taskq_init_ent(&func1_tqes[i]);
253 taskq_init_ent(&func2_tqes[i]);
254
255 tq_args[i] = kmalloc(sizeof (splat_taskq_arg_t), GFP_KERNEL);
256 if (tq_args[i] == NULL) {
257 rc = -ENOMEM;
258 break;
259 }
260
261 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
262 "Taskq '%s/%d' creating (%s dispatch)\n",
263 SPLAT_TASKQ_TEST2_NAME, i,
264 prealloc ? "prealloc" : "dynamic");
265 if ((tq[i] = taskq_create(SPLAT_TASKQ_TEST2_NAME,
266 TEST2_THREADS_PER_TASKQ,
267 maxclsyspri, 50, INT_MAX,
268 TASKQ_PREPOPULATE)) == NULL) {
269 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
270 "Taskq '%s/%d' create failed\n",
271 SPLAT_TASKQ_TEST2_NAME, i);
272 rc = -EINVAL;
273 break;
274 }
275
276 tq_args[i]->flag = i;
277 tq_args[i]->id = i;
278 tq_args[i]->file = file;
279 tq_args[i]->name = SPLAT_TASKQ_TEST2_NAME;
280
281 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
282 "Taskq '%s/%d' function '%s' dispatching\n",
283 tq_args[i]->name, tq_args[i]->id,
284 sym2str(splat_taskq_test2_func1));
285 if (prealloc) {
286 taskq_dispatch_ent(tq[i], splat_taskq_test2_func1,
287 tq_args[i], TQ_SLEEP, &func1_tqes[i]);
288 id = func1_tqes[i].tqent_id;
289 } else {
290 id = taskq_dispatch(tq[i], splat_taskq_test2_func1,
291 tq_args[i], TQ_SLEEP);
292 }
293
294 if (id == 0) {
295 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
296 "Taskq '%s/%d' function '%s' dispatch "
297 "failed\n", tq_args[i]->name, tq_args[i]->id,
298 sym2str(splat_taskq_test2_func1));
299 rc = -EINVAL;
300 break;
301 }
302
303 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
304 "Taskq '%s/%d' function '%s' dispatching\n",
305 tq_args[i]->name, tq_args[i]->id,
306 sym2str(splat_taskq_test2_func2));
307 if (prealloc) {
308 taskq_dispatch_ent(tq[i], splat_taskq_test2_func2,
309 tq_args[i], TQ_SLEEP, &func2_tqes[i]);
310 id = func2_tqes[i].tqent_id;
311 } else {
312 id = taskq_dispatch(tq[i], splat_taskq_test2_func2,
313 tq_args[i], TQ_SLEEP);
314 }
315
316 if (id == 0) {
317 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq "
318 "'%s/%d' function '%s' dispatch failed\n",
319 tq_args[i]->name, tq_args[i]->id,
320 sym2str(splat_taskq_test2_func2));
321 rc = -EINVAL;
322 break;
323 }
324 }
325
326 /* When rc is set we're effectively just doing cleanup here, so
327 * ignore new errors in that case. They just cause noise. */
328 for (i = 0; i < TEST2_TASKQS; i++) {
329 if (tq_args[i] == NULL)
330 continue;
331
332 if (tq[i] != NULL) {
333 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
334 "Taskq '%s/%d' waiting\n",
335 tq_args[i]->name, tq_args[i]->id);
336 taskq_wait(tq[i]);
337 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
338 "Taskq '%s/%d; destroying\n",
339 tq_args[i]->name, tq_args[i]->id);
340
341 taskq_destroy(tq[i]);
342
343 if (!rc && tq_args[i]->flag != ((i * 2) + 1)) {
344 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
345 "Taskq '%s/%d' processed tasks "
346 "out of order; %d != %d\n",
347 tq_args[i]->name, tq_args[i]->id,
348 tq_args[i]->flag, i * 2 + 1);
349 rc = -EINVAL;
350 } else {
351 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
352 "Taskq '%s/%d' processed tasks "
353 "in the correct order; %d == %d\n",
354 tq_args[i]->name, tq_args[i]->id,
355 tq_args[i]->flag, i * 2 + 1);
356 }
357
358 kfree(tq_args[i]);
359 }
360 }
361 out:
362 if (func1_tqes)
363 kfree(func1_tqes);
364
365 if (func2_tqes)
366 kfree(func2_tqes);
367
368 return rc;
369 }
370
371 static int
372 splat_taskq_test2(struct file *file, void *arg) {
373 int rc;
374
375 rc = splat_taskq_test2_impl(file, arg, B_FALSE);
376 if (rc)
377 return rc;
378
379 rc = splat_taskq_test2_impl(file, arg, B_TRUE);
380
381 return rc;
382 }
383
384 /*
385 * Use the global system task queue with a single task, wait until task
386 * completes, ensure task ran properly.
387 */
388 static int
389 splat_taskq_test3_impl(struct file *file, void *arg, boolean_t prealloc)
390 {
391 taskqid_t id;
392 splat_taskq_arg_t *tq_arg;
393 taskq_ent_t *tqe;
394 int error;
395
396 tq_arg = kmem_alloc(sizeof (splat_taskq_arg_t), KM_SLEEP);
397 tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);
398 taskq_init_ent(tqe);
399
400 tq_arg->flag = 0;
401 tq_arg->id = 0;
402 tq_arg->file = file;
403 tq_arg->name = SPLAT_TASKQ_TEST3_NAME;
404
405 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
406 "Taskq '%s' function '%s' %s dispatch\n",
407 tq_arg->name, sym2str(splat_taskq_test13_func),
408 prealloc ? "prealloc" : "dynamic");
409 if (prealloc) {
410 taskq_dispatch_ent(system_taskq, splat_taskq_test13_func,
411 tq_arg, TQ_SLEEP, tqe);
412 id = tqe->tqent_id;
413 } else {
414 id = taskq_dispatch(system_taskq, splat_taskq_test13_func,
415 tq_arg, TQ_SLEEP);
416 }
417
418 if (id == 0) {
419 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
420 "Taskq '%s' function '%s' dispatch failed\n",
421 tq_arg->name, sym2str(splat_taskq_test13_func));
422 kmem_free(tqe, sizeof (taskq_ent_t));
423 kmem_free(tq_arg, sizeof (splat_taskq_arg_t));
424 return -EINVAL;
425 }
426
427 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME, "Taskq '%s' waiting\n",
428 tq_arg->name);
429 taskq_wait(system_taskq);
430
431 error = (tq_arg->flag) ? 0 : -EINVAL;
432
433 kmem_free(tqe, sizeof (taskq_ent_t));
434 kmem_free(tq_arg, sizeof (splat_taskq_arg_t));
435
436 return (error);
437 }
438
439 static int
440 splat_taskq_test3(struct file *file, void *arg)
441 {
442 int rc;
443
444 rc = splat_taskq_test3_impl(file, arg, B_FALSE);
445 if (rc)
446 return rc;
447
448 rc = splat_taskq_test3_impl(file, arg, B_TRUE);
449
450 return rc;
451 }
452
453 /*
454 * Create a taskq and dispatch a large number of tasks to the queue.
455 * Then use taskq_wait() to block until all the tasks complete, then
456 * cross check that all the tasks ran by checking the shared atomic
457 * counter which is incremented in the task function.
458 *
459 * First we try with a large 'maxalloc' value, then we try with a small one.
460 * We should not drop tasks when TQ_SLEEP is used in taskq_dispatch(), even
461 * if the number of pending tasks is above maxalloc.
462 */
463 static void
464 splat_taskq_test4_func(void *arg)
465 {
466 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
467 ASSERT(tq_arg);
468
469 atomic_inc(tq_arg->count);
470 }
471
472 static int
473 splat_taskq_test4_common(struct file *file, void *arg, int minalloc,
474 int maxalloc, int nr_tasks, boolean_t prealloc)
475 {
476 taskq_t *tq;
477 taskqid_t id;
478 splat_taskq_arg_t tq_arg;
479 taskq_ent_t *tqes;
480 atomic_t count;
481 int i, j, rc = 0;
482
483 tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL);
484 if (tqes == NULL)
485 return -ENOMEM;
486
487 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
488 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
489 SPLAT_TASKQ_TEST4_NAME,
490 prealloc ? "prealloc" : "dynamic",
491 minalloc, maxalloc, nr_tasks);
492 if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
493 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
494 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
495 "Taskq '%s' create failed\n",
496 SPLAT_TASKQ_TEST4_NAME);
497 rc = -EINVAL;
498 goto out_free;
499 }
500
501 tq_arg.file = file;
502 tq_arg.name = SPLAT_TASKQ_TEST4_NAME;
503 tq_arg.count = &count;
504
505 for (i = 1; i <= nr_tasks; i *= 2) {
506 atomic_set(tq_arg.count, 0);
507 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
508 "Taskq '%s' function '%s' dispatched %d times\n",
509 tq_arg.name, sym2str(splat_taskq_test4_func), i);
510
511 for (j = 0; j < i; j++) {
512 taskq_init_ent(&tqes[j]);
513
514 if (prealloc) {
515 taskq_dispatch_ent(tq, splat_taskq_test4_func,
516 &tq_arg, TQ_SLEEP, &tqes[j]);
517 id = tqes[j].tqent_id;
518 } else {
519 id = taskq_dispatch(tq, splat_taskq_test4_func,
520 &tq_arg, TQ_SLEEP);
521 }
522
523 if (id == 0) {
524 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
525 "Taskq '%s' function '%s' dispatch "
526 "%d failed\n", tq_arg.name,
527 sym2str(splat_taskq_test4_func), j);
528 rc = -EINVAL;
529 goto out;
530 }
531 }
532
533 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
534 "waiting for %d dispatches\n", tq_arg.name, i);
535 taskq_wait(tq);
536 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
537 "%d/%d dispatches finished\n", tq_arg.name,
538 atomic_read(&count), i);
539 if (atomic_read(&count) != i) {
540 rc = -ERANGE;
541 goto out;
542
543 }
544 }
545 out:
546 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
547 tq_arg.name);
548 taskq_destroy(tq);
549
550 out_free:
551 kfree(tqes);
552
553 return rc;
554 }
555
556 static int
557 splat_taskq_test4_impl(struct file *file, void *arg, boolean_t prealloc)
558 {
559 int rc;
560
561 rc = splat_taskq_test4_common(file, arg, 50, INT_MAX, 1024, prealloc);
562 if (rc)
563 return rc;
564
565 rc = splat_taskq_test4_common(file, arg, 1, 1, 32, prealloc);
566
567 return rc;
568 }
569
570 static int
571 splat_taskq_test4(struct file *file, void *arg)
572 {
573 int rc;
574
575 rc = splat_taskq_test4_impl(file, arg, B_FALSE);
576 if (rc)
577 return rc;
578
579 rc = splat_taskq_test4_impl(file, arg, B_TRUE);
580
581 return rc;
582 }
583
584 /*
585 * Create a taskq and dispatch a specific sequence of tasks carefully
586 * crafted to validate the order in which tasks are processed. When
587 * there are multiple worker threads each thread will process the
588 * next pending task as soon as it completes its current task. This
589 * means that tasks do not strictly complete in order in which they
590 * were dispatched (increasing task id). This is fine but we need to
591 * verify taskq_wait_outstanding() blocks until the passed task id and
592 * all lower task ids complete. We do this by dispatching the following
593 * specific sequence of tasks each of which block for N time units.
594 * We then use taskq_wait_outstanding() to unblock at specific task id and
595 * verify the only the expected task ids have completed and in the
596 * correct order. The two cases of interest are:
597 *
598 * 1) Task ids larger than the waited for task id can run and
599 * complete as long as there is an available worker thread.
600 * 2) All task ids lower than the waited one must complete before
601 * unblocking even if the waited task id itself has completed.
602 *
603 * The following table shows each task id and how they will be
604 * scheduled. Each rows represent one time unit and each column
605 * one of the three worker threads. The places taskq_wait_outstanding()
606 * must unblock for a specific id are identified as well as the
607 * task ids which must have completed and their order.
608 *
609 * +-----+ <--- taskq_wait_outstanding(tq, 8) unblocks
610 * | | Required Completion Order: 1,2,4,5,3,8,6,7
611 * +-----+ |
612 * | | |
613 * | | +-----+
614 * | | | 8 |
615 * | | +-----+ <--- taskq_wait_outstanding(tq, 3) unblocks
616 * | | 7 | | Required Completion Order: 1,2,4,5,3
617 * | +-----+ |
618 * | 6 | | |
619 * +-----+ | |
620 * | | 5 | |
621 * | +-----+ |
622 * | 4 | | |
623 * +-----+ | |
624 * | 1 | 2 | 3 |
625 * +-----+-----+-----+
626 *
627 */
628 static void
629 splat_taskq_test5_func(void *arg)
630 {
631 splat_taskq_id_t *tq_id = (splat_taskq_id_t *)arg;
632 splat_taskq_arg_t *tq_arg = tq_id->arg;
633 int factor;
634
635 /* Delays determined by above table */
636 switch (tq_id->id) {
637 default: factor = 0; break;
638 case 1: case 8: factor = 1; break;
639 case 2: case 4: case 5: factor = 2; break;
640 case 6: case 7: factor = 4; break;
641 case 3: factor = 5; break;
642 }
643
644 msleep(factor * 100);
645 splat_vprint(tq_arg->file, tq_arg->name,
646 "Taskqid %d complete for taskq '%s'\n",
647 tq_id->id, tq_arg->name);
648
649 spin_lock(&tq_arg->lock);
650 tq_arg->order[tq_arg->flag] = tq_id->id;
651 tq_arg->flag++;
652 spin_unlock(&tq_arg->lock);
653 }
654
655 static int
656 splat_taskq_test_order(splat_taskq_arg_t *tq_arg, int *order)
657 {
658 int i, j;
659
660 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
661 if (tq_arg->order[i] != order[i]) {
662 splat_vprint(tq_arg->file, tq_arg->name,
663 "Taskq '%s' incorrect completion "
664 "order\n", tq_arg->name);
665 splat_vprint(tq_arg->file, tq_arg->name,
666 "%s", "Expected { ");
667
668 for (j = 0; j < SPLAT_TASKQ_ORDER_MAX; j++)
669 splat_print(tq_arg->file, "%d ", order[j]);
670
671 splat_print(tq_arg->file, "%s", "}\n");
672 splat_vprint(tq_arg->file, tq_arg->name,
673 "%s", "Got { ");
674
675 for (j = 0; j < SPLAT_TASKQ_ORDER_MAX; j++)
676 splat_print(tq_arg->file, "%d ",
677 tq_arg->order[j]);
678
679 splat_print(tq_arg->file, "%s", "}\n");
680 return -EILSEQ;
681 }
682 }
683
684 splat_vprint(tq_arg->file, tq_arg->name,
685 "Taskq '%s' validated correct completion order\n",
686 tq_arg->name);
687
688 return 0;
689 }
690
691 static int
692 splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc)
693 {
694 taskq_t *tq;
695 taskqid_t id;
696 splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
697 splat_taskq_arg_t tq_arg;
698 int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 };
699 int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 };
700 taskq_ent_t *tqes;
701 int i, rc = 0;
702
703 tqes = kmem_alloc(sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX, KM_SLEEP);
704 memset(tqes, 0, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
705
706 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
707 "Taskq '%s' creating (%s dispatch)\n",
708 SPLAT_TASKQ_TEST5_NAME,
709 prealloc ? "prealloc" : "dynamic");
710 if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri,
711 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
712 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
713 "Taskq '%s' create failed\n",
714 SPLAT_TASKQ_TEST5_NAME);
715 return -EINVAL;
716 }
717
718 tq_arg.flag = 0;
719 memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
720 spin_lock_init(&tq_arg.lock);
721 tq_arg.file = file;
722 tq_arg.name = SPLAT_TASKQ_TEST5_NAME;
723
724 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
725 taskq_init_ent(&tqes[i]);
726
727 tq_id[i].id = i + 1;
728 tq_id[i].arg = &tq_arg;
729
730 if (prealloc) {
731 taskq_dispatch_ent(tq, splat_taskq_test5_func,
732 &tq_id[i], TQ_SLEEP, &tqes[i]);
733 id = tqes[i].tqent_id;
734 } else {
735 id = taskq_dispatch(tq, splat_taskq_test5_func,
736 &tq_id[i], TQ_SLEEP);
737 }
738
739 if (id == 0) {
740 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
741 "Taskq '%s' function '%s' dispatch failed\n",
742 tq_arg.name, sym2str(splat_taskq_test5_func));
743 rc = -EINVAL;
744 goto out;
745 }
746
747 if (tq_id[i].id != id) {
748 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
749 "Taskq '%s' expected taskqid %d got %d\n",
750 tq_arg.name, (int)tq_id[i].id, (int)id);
751 rc = -EINVAL;
752 goto out;
753 }
754 }
755
756 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
757 "waiting for taskqid %d completion\n", tq_arg.name, 3);
758 taskq_wait_outstanding(tq, 3);
759 if ((rc = splat_taskq_test_order(&tq_arg, order1)))
760 goto out;
761
762 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
763 "waiting for taskqid %d completion\n", tq_arg.name, 8);
764 taskq_wait_outstanding(tq, 8);
765 rc = splat_taskq_test_order(&tq_arg, order2);
766
767 out:
768 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
769 "Taskq '%s' destroying\n", tq_arg.name);
770 taskq_destroy(tq);
771
772 kmem_free(tqes, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
773
774 return rc;
775 }
776
777 static int
778 splat_taskq_test5(struct file *file, void *arg)
779 {
780 int rc;
781
782 rc = splat_taskq_test5_impl(file, arg, B_FALSE);
783 if (rc)
784 return rc;
785
786 rc = splat_taskq_test5_impl(file, arg, B_TRUE);
787
788 return rc;
789 }
790
791 /*
792 * Create a single task queue with three threads. Dispatch 8 tasks,
793 * setting TQ_FRONT on only the last three. Sleep after
794 * dispatching tasks 1-3 to ensure they will run and hold the threads
795 * busy while we dispatch the remaining tasks. Verify that tasks 6-8
796 * run before task 4-5.
797 *
798 * The following table shows each task id and how they will be
799 * scheduled. Each rows represent one time unit and each column
800 * one of the three worker threads.
801 *
802 * NB: The Horizontal Line is the LAST Time unit consumed by the Task,
803 * and must be included in the factor calculation.
804 * T
805 * 17-> +-----+
806 * 16 | T6 |
807 * 15-> +-----+ |
808 * 14 | T6 | |
809 * 13-> | | 5 +-----+
810 * 12 | | | T6 |
811 * 11-> | +-----| |
812 * 10 | 4 | T6 | |
813 * 9-> +-----+ | 8 |
814 * 8 | T5 | | |
815 * 7-> | | 7 +-----+
816 * 6 | | | T7 |
817 * 5-> | +-----+ |
818 * 4 | 6 | T5 | |
819 * 3-> +-----+ | |
820 * 2 | T3 | | |
821 * 1 | 1 | 2 | 3 |
822 * 0 +-----+-----+-----+
823 *
824 */
825 static void
826 splat_taskq_test6_func(void *arg)
827 {
828 /* Delays determined by above table */
829 static const int factor[SPLAT_TASKQ_ORDER_MAX+1] = {0,3,5,7,6,6,5,6,6};
830
831 splat_taskq_id_t *tq_id = (splat_taskq_id_t *)arg;
832 splat_taskq_arg_t *tq_arg = tq_id->arg;
833
834 splat_vprint(tq_arg->file, tq_arg->name,
835 "Taskqid %d starting for taskq '%s'\n",
836 tq_id->id, tq_arg->name);
837
838 if (tq_id->id < SPLAT_TASKQ_ORDER_MAX+1) {
839 msleep(factor[tq_id->id] * 50);
840 }
841
842 spin_lock(&tq_arg->lock);
843 tq_arg->order[tq_arg->flag] = tq_id->id;
844 tq_arg->flag++;
845 spin_unlock(&tq_arg->lock);
846
847 splat_vprint(tq_arg->file, tq_arg->name,
848 "Taskqid %d complete for taskq '%s'\n",
849 tq_id->id, tq_arg->name);
850 }
851
852 static int
853 splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
854 {
855 taskq_t *tq;
856 taskqid_t id;
857 splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
858 splat_taskq_arg_t tq_arg;
859 int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
860 taskq_ent_t *tqes;
861 int i, rc = 0;
862 uint_t tflags;
863
864 tqes = kmem_alloc(sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX, KM_SLEEP);
865 memset(tqes, 0, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
866
867 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
868 "Taskq '%s' creating (%s dispatch)\n",
869 SPLAT_TASKQ_TEST6_NAME,
870 prealloc ? "prealloc" : "dynamic");
871 if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
872 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
873 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
874 "Taskq '%s' create failed\n",
875 SPLAT_TASKQ_TEST6_NAME);
876 return -EINVAL;
877 }
878
879 tq_arg.flag = 0;
880 memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
881 spin_lock_init(&tq_arg.lock);
882 tq_arg.file = file;
883 tq_arg.name = SPLAT_TASKQ_TEST6_NAME;
884
885 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
886 taskq_init_ent(&tqes[i]);
887
888 tq_id[i].id = i + 1;
889 tq_id[i].arg = &tq_arg;
890 tflags = TQ_SLEEP;
891 if (i > 4)
892 tflags |= TQ_FRONT;
893
894 if (prealloc) {
895 taskq_dispatch_ent(tq, splat_taskq_test6_func,
896 &tq_id[i], tflags, &tqes[i]);
897 id = tqes[i].tqent_id;
898 } else {
899 id = taskq_dispatch(tq, splat_taskq_test6_func,
900 &tq_id[i], tflags);
901 }
902
903 if (id == 0) {
904 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
905 "Taskq '%s' function '%s' dispatch failed\n",
906 tq_arg.name, sym2str(splat_taskq_test6_func));
907 rc = -EINVAL;
908 goto out;
909 }
910
911 if (tq_id[i].id != id) {
912 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
913 "Taskq '%s' expected taskqid %d got %d\n",
914 tq_arg.name, (int)tq_id[i].id, (int)id);
915 rc = -EINVAL;
916 goto out;
917 }
918 /* Sleep to let tasks 1-3 start executing. */
919 if ( i == 2 )
920 msleep(100);
921 }
922
923 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
924 "waiting for taskqid %d completion\n", tq_arg.name,
925 SPLAT_TASKQ_ORDER_MAX);
926 taskq_wait_outstanding(tq, SPLAT_TASKQ_ORDER_MAX);
927 rc = splat_taskq_test_order(&tq_arg, order);
928
929 out:
930 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
931 "Taskq '%s' destroying\n", tq_arg.name);
932 taskq_destroy(tq);
933
934 kmem_free(tqes, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
935
936 return rc;
937 }
938
939 static int
940 splat_taskq_test6(struct file *file, void *arg)
941 {
942 int rc;
943
944 rc = splat_taskq_test6_impl(file, arg, B_FALSE);
945 if (rc)
946 return rc;
947
948 rc = splat_taskq_test6_impl(file, arg, B_TRUE);
949
950 return rc;
951 }
952
953 static void
954 splat_taskq_test7_func(void *arg)
955 {
956 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
957 taskqid_t id;
958
959 ASSERT(tq_arg);
960
961 if (tq_arg->depth >= SPLAT_TASKQ_DEPTH_MAX)
962 return;
963
964 tq_arg->depth++;
965
966 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
967 "Taskq '%s' function '%s' dispatching (depth = %u)\n",
968 tq_arg->name, sym2str(splat_taskq_test7_func),
969 tq_arg->depth);
970
971 if (tq_arg->tqe) {
972 VERIFY(taskq_empty_ent(tq_arg->tqe));
973 taskq_dispatch_ent(tq_arg->tq, splat_taskq_test7_func,
974 tq_arg, TQ_SLEEP, tq_arg->tqe);
975 id = tq_arg->tqe->tqent_id;
976 } else {
977 id = taskq_dispatch(tq_arg->tq, splat_taskq_test7_func,
978 tq_arg, TQ_SLEEP);
979 }
980
981 if (id == 0) {
982 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
983 "Taskq '%s' function '%s' dispatch failed "
984 "(depth = %u)\n", tq_arg->name,
985 sym2str(splat_taskq_test7_func), tq_arg->depth);
986 tq_arg->flag = -EINVAL;
987 return;
988 }
989 }
990
991 static int
992 splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
993 {
994 taskq_t *tq;
995 splat_taskq_arg_t *tq_arg;
996 taskq_ent_t *tqe;
997 int error;
998
999 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
1000 "Taskq '%s' creating (%s dispatch)\n",
1001 SPLAT_TASKQ_TEST7_NAME,
1002 prealloc ? "prealloc" : "dynamic");
1003 if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
1004 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
1005 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
1006 "Taskq '%s' create failed\n",
1007 SPLAT_TASKQ_TEST7_NAME);
1008 return -EINVAL;
1009 }
1010
1011 tq_arg = kmem_alloc(sizeof (splat_taskq_arg_t), KM_SLEEP);
1012 tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);
1013
1014 tq_arg->depth = 0;
1015 tq_arg->flag = 0;
1016 tq_arg->id = 0;
1017 tq_arg->file = file;
1018 tq_arg->name = SPLAT_TASKQ_TEST7_NAME;
1019 tq_arg->tq = tq;
1020
1021 if (prealloc) {
1022 taskq_init_ent(tqe);
1023 tq_arg->tqe = tqe;
1024 } else {
1025 tq_arg->tqe = NULL;
1026 }
1027
1028 splat_taskq_test7_func(tq_arg);
1029
1030 if (tq_arg->flag == 0) {
1031 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
1032 "Taskq '%s' waiting\n", tq_arg->name);
1033 taskq_wait_outstanding(tq, SPLAT_TASKQ_DEPTH_MAX);
1034 }
1035
1036 error = (tq_arg->depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL);
1037
1038 kmem_free(tqe, sizeof (taskq_ent_t));
1039 kmem_free(tq_arg, sizeof (splat_taskq_arg_t));
1040
1041 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
1042 "Taskq '%s' destroying\n", tq_arg->name);
1043 taskq_destroy(tq);
1044
1045 return (error);
1046 }
1047
1048 static int
1049 splat_taskq_test7(struct file *file, void *arg)
1050 {
1051 int rc;
1052
1053 rc = splat_taskq_test7_impl(file, arg, B_FALSE);
1054 if (rc)
1055 return rc;
1056
1057 rc = splat_taskq_test7_impl(file, arg, B_TRUE);
1058
1059 return rc;
1060 }
1061
1062 /*
1063 * Create a taskq with 100 threads and dispatch a huge number of trivial
1064 * tasks to generate contention on tq->tq_lock. This test should always
1065 * pass. The purpose is to provide a benchmark for measuring the
1066 * effectiveness of taskq optimizations.
1067 */
1068 static void
1069 splat_taskq_test8_func(void *arg)
1070 {
1071 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1072 ASSERT(tq_arg);
1073
1074 atomic_inc(tq_arg->count);
1075 }
1076
1077 #define TEST8_NUM_TASKS 0x20000
1078 #define TEST8_THREADS_PER_TASKQ 100
1079
1080 static int
1081 splat_taskq_test8_common(struct file *file, void *arg, int minalloc,
1082 int maxalloc)
1083 {
1084 taskq_t *tq;
1085 taskqid_t id;
1086 splat_taskq_arg_t tq_arg;
1087 taskq_ent_t **tqes;
1088 atomic_t count;
1089 int i, j, rc = 0;
1090
1091 tqes = vmalloc(sizeof(*tqes) * TEST8_NUM_TASKS);
1092 if (tqes == NULL)
1093 return -ENOMEM;
1094 memset(tqes, 0, sizeof(*tqes) * TEST8_NUM_TASKS);
1095
1096 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1097 "Taskq '%s' creating (%d/%d/%d)\n",
1098 SPLAT_TASKQ_TEST8_NAME,
1099 minalloc, maxalloc, TEST8_NUM_TASKS);
1100 if ((tq = taskq_create(SPLAT_TASKQ_TEST8_NAME, TEST8_THREADS_PER_TASKQ,
1101 maxclsyspri, minalloc, maxalloc,
1102 TASKQ_PREPOPULATE)) == NULL) {
1103 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1104 "Taskq '%s' create failed\n",
1105 SPLAT_TASKQ_TEST8_NAME);
1106 rc = -EINVAL;
1107 goto out_free;
1108 }
1109
1110 tq_arg.file = file;
1111 tq_arg.name = SPLAT_TASKQ_TEST8_NAME;
1112 tq_arg.count = &count;
1113 atomic_set(tq_arg.count, 0);
1114
1115 for (i = 0; i < TEST8_NUM_TASKS; i++) {
1116 tqes[i] = kmalloc(sizeof(taskq_ent_t), GFP_KERNEL);
1117 if (tqes[i] == NULL) {
1118 rc = -ENOMEM;
1119 goto out;
1120 }
1121 taskq_init_ent(tqes[i]);
1122
1123 taskq_dispatch_ent(tq, splat_taskq_test8_func,
1124 &tq_arg, TQ_SLEEP, tqes[i]);
1125
1126 id = tqes[i]->tqent_id;
1127
1128 if (id == 0) {
1129 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1130 "Taskq '%s' function '%s' dispatch "
1131 "%d failed\n", tq_arg.name,
1132 sym2str(splat_taskq_test8_func), i);
1133 rc = -EINVAL;
1134 goto out;
1135 }
1136 }
1137
1138 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' "
1139 "waiting for %d dispatches\n", tq_arg.name,
1140 TEST8_NUM_TASKS);
1141 taskq_wait(tq);
1142 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' "
1143 "%d/%d dispatches finished\n", tq_arg.name,
1144 atomic_read(tq_arg.count), TEST8_NUM_TASKS);
1145
1146 if (atomic_read(tq_arg.count) != TEST8_NUM_TASKS)
1147 rc = -ERANGE;
1148
1149 out:
1150 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' destroying\n",
1151 tq_arg.name);
1152 taskq_destroy(tq);
1153 out_free:
1154 for (j = 0; j < TEST8_NUM_TASKS && tqes[j] != NULL; j++)
1155 kfree(tqes[j]);
1156 vfree(tqes);
1157
1158 return rc;
1159 }
1160
1161 static int
1162 splat_taskq_test8(struct file *file, void *arg)
1163 {
1164 int rc;
1165
1166 rc = splat_taskq_test8_common(file, arg, 1, 100);
1167
1168 return rc;
1169 }
1170
1171 /*
1172 * Create a taskq and dispatch a number of delayed tasks to the queue.
1173 * For each task verify that it was run no early than requested.
1174 */
1175 static void
1176 splat_taskq_test9_func(void *arg)
1177 {
1178 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1179 ASSERT(tq_arg);
1180
1181 if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg->expire))
1182 atomic_inc(tq_arg->count);
1183
1184 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1185 }
1186
1187 static int
1188 splat_taskq_test9(struct file *file, void *arg)
1189 {
1190 taskq_t *tq;
1191 atomic_t count;
1192 int i, rc = 0;
1193 int minalloc = 1;
1194 int maxalloc = 10;
1195 int nr_tasks = 100;
1196
1197 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1198 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1199 SPLAT_TASKQ_TEST9_NAME, "delay", minalloc, maxalloc, nr_tasks);
1200 if ((tq = taskq_create(SPLAT_TASKQ_TEST9_NAME, 3, maxclsyspri,
1201 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
1202 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1203 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST9_NAME);
1204 return -EINVAL;
1205 }
1206
1207 atomic_set(&count, 0);
1208
1209 for (i = 1; i <= nr_tasks; i++) {
1210 splat_taskq_arg_t *tq_arg;
1211 taskqid_t id;
1212 uint32_t rnd;
1213
1214 /* A random timeout in jiffies of at most 5 seconds */
1215 get_random_bytes((void *)&rnd, 4);
1216 rnd = rnd % (5 * HZ);
1217
1218 tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP);
1219 tq_arg->file = file;
1220 tq_arg->name = SPLAT_TASKQ_TEST9_NAME;
1221 tq_arg->expire = ddi_get_lbolt() + rnd;
1222 tq_arg->count = &count;
1223
1224 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1225 "Taskq '%s' delay dispatch %u jiffies\n",
1226 SPLAT_TASKQ_TEST9_NAME, rnd);
1227
1228 id = taskq_dispatch_delay(tq, splat_taskq_test9_func,
1229 tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd);
1230
1231 if (id == 0) {
1232 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1233 "Taskq '%s' delay dispatch failed\n",
1234 SPLAT_TASKQ_TEST9_NAME);
1235 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1236 taskq_wait(tq);
1237 rc = -EINVAL;
1238 goto out;
1239 }
1240 }
1241
1242 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' waiting for "
1243 "%d delay dispatches\n", SPLAT_TASKQ_TEST9_NAME, nr_tasks);
1244
1245 taskq_wait(tq);
1246 if (atomic_read(&count) != nr_tasks)
1247 rc = -ERANGE;
1248
1249 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' %d/%d delay "
1250 "dispatches finished on time\n", SPLAT_TASKQ_TEST9_NAME,
1251 atomic_read(&count), nr_tasks);
1252 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' destroying\n",
1253 SPLAT_TASKQ_TEST9_NAME);
1254 out:
1255 taskq_destroy(tq);
1256
1257 return rc;
1258 }
1259
1260 /*
1261 * Create a taskq and dispatch then cancel tasks in the queue.
1262 */
1263 static void
1264 splat_taskq_test10_func(void *arg)
1265 {
1266 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1267 uint8_t rnd;
1268
1269 if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg->expire))
1270 atomic_inc(tq_arg->count);
1271
1272 /* Randomly sleep to further perturb the system */
1273 get_random_bytes((void *)&rnd, 1);
1274 msleep(1 + (rnd % 9));
1275 }
1276
1277 static int
1278 splat_taskq_test10(struct file *file, void *arg)
1279 {
1280 taskq_t *tq;
1281 splat_taskq_arg_t **tqas;
1282 atomic_t count;
1283 int i, j, rc = 0;
1284 int minalloc = 1;
1285 int maxalloc = 10;
1286 int nr_tasks = 100;
1287 int canceled = 0;
1288 int completed = 0;
1289 int blocked = 0;
1290 clock_t start, cancel;
1291
1292 tqas = vmalloc(sizeof(*tqas) * nr_tasks);
1293 if (tqas == NULL)
1294 return -ENOMEM;
1295 memset(tqas, 0, sizeof(*tqas) * nr_tasks);
1296
1297 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1298 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1299 SPLAT_TASKQ_TEST10_NAME, "delay", minalloc, maxalloc, nr_tasks);
1300 if ((tq = taskq_create(SPLAT_TASKQ_TEST10_NAME, 3, maxclsyspri,
1301 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
1302 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1303 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST10_NAME);
1304 rc = -EINVAL;
1305 goto out_free;
1306 }
1307
1308 atomic_set(&count, 0);
1309
1310 for (i = 0; i < nr_tasks; i++) {
1311 splat_taskq_arg_t *tq_arg;
1312 uint32_t rnd;
1313
1314 /* A random timeout in jiffies of at most 5 seconds */
1315 get_random_bytes((void *)&rnd, 4);
1316 rnd = rnd % (5 * HZ);
1317
1318 tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP);
1319 tq_arg->file = file;
1320 tq_arg->name = SPLAT_TASKQ_TEST10_NAME;
1321 tq_arg->count = &count;
1322 tqas[i] = tq_arg;
1323
1324 /*
1325 * Dispatch every 1/3 one immediately to mix it up, the cancel
1326 * code is inherently racy and we want to try and provoke any
1327 * subtle concurrently issues.
1328 */
1329 if ((i % 3) == 0) {
1330 tq_arg->expire = ddi_get_lbolt();
1331 tq_arg->id = taskq_dispatch(tq, splat_taskq_test10_func,
1332 tq_arg, TQ_SLEEP);
1333 } else {
1334 tq_arg->expire = ddi_get_lbolt() + rnd;
1335 tq_arg->id = taskq_dispatch_delay(tq,
1336 splat_taskq_test10_func,
1337 tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd);
1338 }
1339
1340 if (tq_arg->id == 0) {
1341 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1342 "Taskq '%s' dispatch failed\n",
1343 SPLAT_TASKQ_TEST10_NAME);
1344 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1345 taskq_wait(tq);
1346 rc = -EINVAL;
1347 goto out;
1348 } else {
1349 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1350 "Taskq '%s' dispatch %lu in %lu jiffies\n",
1351 SPLAT_TASKQ_TEST10_NAME, (unsigned long)tq_arg->id,
1352 !(i % 3) ? 0 : tq_arg->expire - ddi_get_lbolt());
1353 }
1354 }
1355
1356 /*
1357 * Start randomly canceling tasks for the duration of the test. We
1358 * happen to know the valid task id's will be in the range 1..nr_tasks
1359 * because the taskq is private and was just created. However, we
1360 * have no idea of a particular task has already executed or not.
1361 */
1362 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' randomly "
1363 "canceling task ids\n", SPLAT_TASKQ_TEST10_NAME);
1364
1365 start = ddi_get_lbolt();
1366 i = 0;
1367
1368 while (ddi_time_before(ddi_get_lbolt(), start + 5 * HZ)) {
1369 taskqid_t id;
1370 uint32_t rnd;
1371
1372 i++;
1373 cancel = ddi_get_lbolt();
1374 get_random_bytes((void *)&rnd, 4);
1375 id = 1 + (rnd % nr_tasks);
1376 rc = taskq_cancel_id(tq, id);
1377
1378 /*
1379 * Keep track of the results of the random cancels.
1380 */
1381 if (rc == 0) {
1382 canceled++;
1383 } else if (rc == ENOENT) {
1384 completed++;
1385 } else if (rc == EBUSY) {
1386 blocked++;
1387 } else {
1388 rc = -EINVAL;
1389 break;
1390 }
1391
1392 /*
1393 * Verify we never get blocked to long in taskq_cancel_id().
1394 * The worst case is 10ms if we happen to cancel the task
1395 * which is currently executing. We allow a factor of 2x.
1396 */
1397 if (ddi_get_lbolt() - cancel > HZ / 50) {
1398 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1399 "Taskq '%s' cancel for %lu took %lu\n",
1400 SPLAT_TASKQ_TEST10_NAME, (unsigned long)id,
1401 ddi_get_lbolt() - cancel);
1402 rc = -ETIMEDOUT;
1403 break;
1404 }
1405
1406 get_random_bytes((void *)&rnd, 4);
1407 msleep(1 + (rnd % 100));
1408 rc = 0;
1409 }
1410
1411 taskq_wait(tq);
1412
1413 /*
1414 * Cross check the results of taskq_cancel_id() with the number of
1415 * times the dispatched function actually ran successfully.
1416 */
1417 if ((rc == 0) && (nr_tasks - canceled != atomic_read(&count)))
1418 rc = -EDOM;
1419
1420 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' %d attempts, "
1421 "%d canceled, %d completed, %d blocked, %d/%d tasks run\n",
1422 SPLAT_TASKQ_TEST10_NAME, i, canceled, completed, blocked,
1423 atomic_read(&count), nr_tasks);
1424 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' destroying %d\n",
1425 SPLAT_TASKQ_TEST10_NAME, rc);
1426 out:
1427 taskq_destroy(tq);
1428 out_free:
1429 for (j = 0; j < nr_tasks && tqas[j] != NULL; j++)
1430 kmem_free(tqas[j], sizeof(splat_taskq_arg_t));
1431 vfree(tqas);
1432
1433 return rc;
1434 }
1435
1436 splat_subsystem_t *
1437 splat_taskq_init(void)
1438 {
1439 splat_subsystem_t *sub;
1440
1441 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1442 if (sub == NULL)
1443 return NULL;
1444
1445 memset(sub, 0, sizeof(*sub));
1446 strncpy(sub->desc.name, SPLAT_TASKQ_NAME, SPLAT_NAME_SIZE);
1447 strncpy(sub->desc.desc, SPLAT_TASKQ_DESC, SPLAT_DESC_SIZE);
1448 INIT_LIST_HEAD(&sub->subsystem_list);
1449 INIT_LIST_HEAD(&sub->test_list);
1450 spin_lock_init(&sub->test_lock);
1451 sub->desc.id = SPLAT_SUBSYSTEM_TASKQ;
1452
1453 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST1_NAME, SPLAT_TASKQ_TEST1_DESC,
1454 SPLAT_TASKQ_TEST1_ID, splat_taskq_test1);
1455 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST2_NAME, SPLAT_TASKQ_TEST2_DESC,
1456 SPLAT_TASKQ_TEST2_ID, splat_taskq_test2);
1457 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST3_NAME, SPLAT_TASKQ_TEST3_DESC,
1458 SPLAT_TASKQ_TEST3_ID, splat_taskq_test3);
1459 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST4_NAME, SPLAT_TASKQ_TEST4_DESC,
1460 SPLAT_TASKQ_TEST4_ID, splat_taskq_test4);
1461 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST5_NAME, SPLAT_TASKQ_TEST5_DESC,
1462 SPLAT_TASKQ_TEST5_ID, splat_taskq_test5);
1463 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST6_NAME, SPLAT_TASKQ_TEST6_DESC,
1464 SPLAT_TASKQ_TEST6_ID, splat_taskq_test6);
1465 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST7_NAME, SPLAT_TASKQ_TEST7_DESC,
1466 SPLAT_TASKQ_TEST7_ID, splat_taskq_test7);
1467 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST8_NAME, SPLAT_TASKQ_TEST8_DESC,
1468 SPLAT_TASKQ_TEST8_ID, splat_taskq_test8);
1469 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST9_NAME, SPLAT_TASKQ_TEST9_DESC,
1470 SPLAT_TASKQ_TEST9_ID, splat_taskq_test9);
1471 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST10_NAME, SPLAT_TASKQ_TEST10_DESC,
1472 SPLAT_TASKQ_TEST10_ID, splat_taskq_test10);
1473
1474 return sub;
1475 }
1476
1477 void
1478 splat_taskq_fini(splat_subsystem_t *sub)
1479 {
1480 ASSERT(sub);
1481 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST10_ID);
1482 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST9_ID);
1483 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST8_ID);
1484 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST7_ID);
1485 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST6_ID);
1486 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST5_ID);
1487 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST4_ID);
1488 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST3_ID);
1489 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST2_ID);
1490 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST1_ID);
1491
1492 kfree(sub);
1493 }
1494
1495 int
1496 splat_taskq_id(void) {
1497 return SPLAT_SUBSYSTEM_TASKQ;
1498 }