1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Task Queue Tests.
25 \*****************************************************************************/
28 #include <sys/random.h>
29 #include <sys/taskq.h>
30 #include <linux/delay.h>
31 #include "splat-internal.h"
33 #define SPLAT_TASKQ_NAME "taskq"
34 #define SPLAT_TASKQ_DESC "Kernel Task Queue Tests"
36 #define SPLAT_TASKQ_TEST1_ID 0x0201
37 #define SPLAT_TASKQ_TEST1_NAME "single"
38 #define SPLAT_TASKQ_TEST1_DESC "Single task queue, single task"
40 #define SPLAT_TASKQ_TEST2_ID 0x0202
41 #define SPLAT_TASKQ_TEST2_NAME "multiple"
42 #define SPLAT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
44 #define SPLAT_TASKQ_TEST3_ID 0x0203
45 #define SPLAT_TASKQ_TEST3_NAME "system"
46 #define SPLAT_TASKQ_TEST3_DESC "System task queue, multiple tasks"
48 #define SPLAT_TASKQ_TEST4_ID 0x0204
49 #define SPLAT_TASKQ_TEST4_NAME "wait"
50 #define SPLAT_TASKQ_TEST4_DESC "Multiple task waiting"
52 #define SPLAT_TASKQ_TEST5_ID 0x0205
53 #define SPLAT_TASKQ_TEST5_NAME "order"
54 #define SPLAT_TASKQ_TEST5_DESC "Correct task ordering"
56 #define SPLAT_TASKQ_TEST6_ID 0x0206
57 #define SPLAT_TASKQ_TEST6_NAME "front"
58 #define SPLAT_TASKQ_TEST6_DESC "Correct ordering with TQ_FRONT flag"
60 #define SPLAT_TASKQ_TEST7_ID 0x0207
61 #define SPLAT_TASKQ_TEST7_NAME "recurse"
62 #define SPLAT_TASKQ_TEST7_DESC "Single task queue, recursive dispatch"
64 #define SPLAT_TASKQ_TEST8_ID 0x0208
65 #define SPLAT_TASKQ_TEST8_NAME "contention"
66 #define SPLAT_TASKQ_TEST8_DESC "1 queue, 100 threads, 131072 tasks"
68 #define SPLAT_TASKQ_TEST9_ID 0x0209
69 #define SPLAT_TASKQ_TEST9_NAME "delay"
70 #define SPLAT_TASKQ_TEST9_DESC "Delayed task execution"
72 #define SPLAT_TASKQ_TEST10_ID 0x020a
73 #define SPLAT_TASKQ_TEST10_NAME "cancel"
74 #define SPLAT_TASKQ_TEST10_DESC "Cancel task execution"
76 #define SPLAT_TASKQ_ORDER_MAX 8
77 #define SPLAT_TASKQ_DEPTH_MAX 16
80 typedef struct splat_taskq_arg
{
84 int order
[SPLAT_TASKQ_ORDER_MAX
];
94 typedef struct splat_taskq_id
{
96 splat_taskq_arg_t
*arg
;
100 * Create a taskq, queue a task, wait until task completes, ensure
101 * task ran properly, cleanup taskq.
104 splat_taskq_test13_func(void *arg
)
106 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
109 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST1_NAME
,
110 "Taskq '%s' function '%s' setting flag\n",
111 tq_arg
->name
, sym2str(splat_taskq_test13_func
));
116 splat_taskq_test1_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
120 splat_taskq_arg_t tq_arg
;
123 taskq_init_ent(&tqe
);
125 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
126 "Taskq '%s' creating (%s dispatch)\n",
127 SPLAT_TASKQ_TEST1_NAME
,
128 prealloc
? "prealloc" : "dynamic");
129 if ((tq
= taskq_create(SPLAT_TASKQ_TEST1_NAME
, 1, maxclsyspri
,
130 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
131 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
132 "Taskq '%s' create failed\n",
133 SPLAT_TASKQ_TEST1_NAME
);
140 tq_arg
.name
= SPLAT_TASKQ_TEST1_NAME
;
142 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
143 "Taskq '%s' function '%s' dispatching\n",
144 tq_arg
.name
, sym2str(splat_taskq_test13_func
));
146 taskq_dispatch_ent(tq
, splat_taskq_test13_func
,
147 &tq_arg
, TQ_SLEEP
, &tqe
);
150 id
= taskq_dispatch(tq
, splat_taskq_test13_func
,
155 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
156 "Taskq '%s' function '%s' dispatch failed\n",
157 tq_arg
.name
, sym2str(splat_taskq_test13_func
));
162 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
, "Taskq '%s' waiting\n",
165 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
, "Taskq '%s' destroying\n",
170 return (tq_arg
.flag
) ? 0 : -EINVAL
;
174 splat_taskq_test1(struct file
*file
, void *arg
)
178 rc
= splat_taskq_test1_impl(file
, arg
, B_FALSE
);
182 rc
= splat_taskq_test1_impl(file
, arg
, B_TRUE
);
188 * Create multiple taskq's, each with multiple tasks, wait until
189 * all tasks complete, ensure all tasks ran properly and in the
190 * correct order. Run order must be the same as the order submitted
191 * because we only have 1 thread per taskq. Finally cleanup the taskq.
194 splat_taskq_test2_func1(void *arg
)
196 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
199 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST2_NAME
,
200 "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
201 tq_arg
->name
, tq_arg
->id
,
202 sym2str(splat_taskq_test2_func1
),
203 tq_arg
->flag
* 2, tq_arg
->flag
);
208 splat_taskq_test2_func2(void *arg
)
210 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
213 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST2_NAME
,
214 "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
215 tq_arg
->name
, tq_arg
->id
,
216 sym2str(splat_taskq_test2_func2
),
217 tq_arg
->flag
+ 1, tq_arg
->flag
);
221 #define TEST2_TASKQS 8
222 #define TEST2_THREADS_PER_TASKQ 1
225 splat_taskq_test2_impl(struct file
*file
, void *arg
, boolean_t prealloc
) {
226 taskq_t
*tq
[TEST2_TASKQS
] = { NULL
};
228 splat_taskq_arg_t tq_args
[TEST2_TASKQS
];
229 taskq_ent_t
*func1_tqes
= NULL
;
230 taskq_ent_t
*func2_tqes
= NULL
;
233 func1_tqes
= kmalloc(sizeof(*func1_tqes
) * TEST2_TASKQS
, GFP_KERNEL
);
234 if (func1_tqes
== NULL
) {
239 func2_tqes
= kmalloc(sizeof(*func2_tqes
) * TEST2_TASKQS
, GFP_KERNEL
);
240 if (func2_tqes
== NULL
) {
245 for (i
= 0; i
< TEST2_TASKQS
; i
++) {
246 taskq_init_ent(&func1_tqes
[i
]);
247 taskq_init_ent(&func2_tqes
[i
]);
249 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
250 "Taskq '%s/%d' creating (%s dispatch)\n",
251 SPLAT_TASKQ_TEST2_NAME
, i
,
252 prealloc
? "prealloc" : "dynamic");
253 if ((tq
[i
] = taskq_create(SPLAT_TASKQ_TEST2_NAME
,
254 TEST2_THREADS_PER_TASKQ
,
255 maxclsyspri
, 50, INT_MAX
,
256 TASKQ_PREPOPULATE
)) == NULL
) {
257 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
258 "Taskq '%s/%d' create failed\n",
259 SPLAT_TASKQ_TEST2_NAME
, i
);
266 tq_args
[i
].file
= file
;
267 tq_args
[i
].name
= SPLAT_TASKQ_TEST2_NAME
;
269 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
270 "Taskq '%s/%d' function '%s' dispatching\n",
271 tq_args
[i
].name
, tq_args
[i
].id
,
272 sym2str(splat_taskq_test2_func1
));
274 taskq_dispatch_ent(tq
[i
], splat_taskq_test2_func1
,
275 &tq_args
[i
], TQ_SLEEP
, &func1_tqes
[i
]);
276 id
= func1_tqes
[i
].tqent_id
;
278 id
= taskq_dispatch(tq
[i
], splat_taskq_test2_func1
,
279 &tq_args
[i
], TQ_SLEEP
);
283 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
284 "Taskq '%s/%d' function '%s' dispatch "
285 "failed\n", tq_args
[i
].name
, tq_args
[i
].id
,
286 sym2str(splat_taskq_test2_func1
));
291 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
292 "Taskq '%s/%d' function '%s' dispatching\n",
293 tq_args
[i
].name
, tq_args
[i
].id
,
294 sym2str(splat_taskq_test2_func2
));
296 taskq_dispatch_ent(tq
[i
], splat_taskq_test2_func2
,
297 &tq_args
[i
], TQ_SLEEP
, &func2_tqes
[i
]);
298 id
= func2_tqes
[i
].tqent_id
;
300 id
= taskq_dispatch(tq
[i
], splat_taskq_test2_func2
,
301 &tq_args
[i
], TQ_SLEEP
);
305 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
, "Taskq "
306 "'%s/%d' function '%s' dispatch failed\n",
307 tq_args
[i
].name
, tq_args
[i
].id
,
308 sym2str(splat_taskq_test2_func2
));
314 /* When rc is set we're effectively just doing cleanup here, so
315 * ignore new errors in that case. They just cause noise. */
316 for (i
= 0; i
< TEST2_TASKQS
; i
++) {
318 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
319 "Taskq '%s/%d' waiting\n",
320 tq_args
[i
].name
, tq_args
[i
].id
);
322 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
323 "Taskq '%s/%d; destroying\n",
324 tq_args
[i
].name
, tq_args
[i
].id
);
326 taskq_destroy(tq
[i
]);
328 if (!rc
&& tq_args
[i
].flag
!= ((i
* 2) + 1)) {
329 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
330 "Taskq '%s/%d' processed tasks "
331 "out of order; %d != %d\n",
332 tq_args
[i
].name
, tq_args
[i
].id
,
333 tq_args
[i
].flag
, i
* 2 + 1);
336 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
337 "Taskq '%s/%d' processed tasks "
338 "in the correct order; %d == %d\n",
339 tq_args
[i
].name
, tq_args
[i
].id
,
340 tq_args
[i
].flag
, i
* 2 + 1);
355 splat_taskq_test2(struct file
*file
, void *arg
) {
358 rc
= splat_taskq_test2_impl(file
, arg
, B_FALSE
);
362 rc
= splat_taskq_test2_impl(file
, arg
, B_TRUE
);
368 * Use the global system task queue with a single task, wait until task
369 * completes, ensure task ran properly.
372 splat_taskq_test3_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
375 splat_taskq_arg_t tq_arg
;
378 taskq_init_ent(&tqe
);
383 tq_arg
.name
= SPLAT_TASKQ_TEST3_NAME
;
385 splat_vprint(file
, SPLAT_TASKQ_TEST3_NAME
,
386 "Taskq '%s' function '%s' %s dispatch\n",
387 tq_arg
.name
, sym2str(splat_taskq_test13_func
),
388 prealloc
? "prealloc" : "dynamic");
390 taskq_dispatch_ent(system_taskq
, splat_taskq_test13_func
,
391 &tq_arg
, TQ_SLEEP
, &tqe
);
394 id
= taskq_dispatch(system_taskq
, splat_taskq_test13_func
,
399 splat_vprint(file
, SPLAT_TASKQ_TEST3_NAME
,
400 "Taskq '%s' function '%s' dispatch failed\n",
401 tq_arg
.name
, sym2str(splat_taskq_test13_func
));
405 splat_vprint(file
, SPLAT_TASKQ_TEST3_NAME
, "Taskq '%s' waiting\n",
407 taskq_wait(system_taskq
);
409 return (tq_arg
.flag
) ? 0 : -EINVAL
;
413 splat_taskq_test3(struct file
*file
, void *arg
)
417 rc
= splat_taskq_test3_impl(file
, arg
, B_FALSE
);
421 rc
= splat_taskq_test3_impl(file
, arg
, B_TRUE
);
427 * Create a taskq and dispatch a large number of tasks to the queue.
428 * Then use taskq_wait() to block until all the tasks complete, then
429 * cross check that all the tasks ran by checking the shared atomic
430 * counter which is incremented in the task function.
432 * First we try with a large 'maxalloc' value, then we try with a small one.
433 * We should not drop tasks when TQ_SLEEP is used in taskq_dispatch(), even
434 * if the number of pending tasks is above maxalloc.
437 splat_taskq_test4_func(void *arg
)
439 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
442 atomic_inc(tq_arg
->count
);
446 splat_taskq_test4_common(struct file
*file
, void *arg
, int minalloc
,
447 int maxalloc
, int nr_tasks
, boolean_t prealloc
)
451 splat_taskq_arg_t tq_arg
;
456 tqes
= kmalloc(sizeof(*tqes
) * nr_tasks
, GFP_KERNEL
);
460 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
461 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
462 SPLAT_TASKQ_TEST4_NAME
,
463 prealloc
? "prealloc" : "dynamic",
464 minalloc
, maxalloc
, nr_tasks
);
465 if ((tq
= taskq_create(SPLAT_TASKQ_TEST4_NAME
, 1, maxclsyspri
,
466 minalloc
, maxalloc
, TASKQ_PREPOPULATE
)) == NULL
) {
467 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
468 "Taskq '%s' create failed\n",
469 SPLAT_TASKQ_TEST4_NAME
);
475 tq_arg
.name
= SPLAT_TASKQ_TEST4_NAME
;
476 tq_arg
.count
= &count
;
478 for (i
= 1; i
<= nr_tasks
; i
*= 2) {
479 atomic_set(tq_arg
.count
, 0);
480 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
481 "Taskq '%s' function '%s' dispatched %d times\n",
482 tq_arg
.name
, sym2str(splat_taskq_test4_func
), i
);
484 for (j
= 0; j
< i
; j
++) {
485 taskq_init_ent(&tqes
[j
]);
488 taskq_dispatch_ent(tq
, splat_taskq_test4_func
,
489 &tq_arg
, TQ_SLEEP
, &tqes
[j
]);
490 id
= tqes
[j
].tqent_id
;
492 id
= taskq_dispatch(tq
, splat_taskq_test4_func
,
497 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
498 "Taskq '%s' function '%s' dispatch "
499 "%d failed\n", tq_arg
.name
,
500 sym2str(splat_taskq_test4_func
), j
);
506 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
, "Taskq '%s' "
507 "waiting for %d dispatches\n", tq_arg
.name
, i
);
509 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
, "Taskq '%s' "
510 "%d/%d dispatches finished\n", tq_arg
.name
,
511 atomic_read(&count
), i
);
512 if (atomic_read(&count
) != i
) {
519 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
, "Taskq '%s' destroying\n",
530 splat_taskq_test4_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
534 rc
= splat_taskq_test4_common(file
, arg
, 50, INT_MAX
, 1024, prealloc
);
538 rc
= splat_taskq_test4_common(file
, arg
, 1, 1, 32, prealloc
);
544 splat_taskq_test4(struct file
*file
, void *arg
)
548 rc
= splat_taskq_test4_impl(file
, arg
, B_FALSE
);
552 rc
= splat_taskq_test4_impl(file
, arg
, B_TRUE
);
558 * Create a taskq and dispatch a specific sequence of tasks carefully
559 * crafted to validate the order in which tasks are processed. When
560 * there are multiple worker threads each thread will process the
561 * next pending task as soon as it completes its current task. This
562 * means that tasks do not strictly complete in order in which they
563 * were dispatched (increasing task id). This is fine but we need to
564 * verify that taskq_wait_all() blocks until the passed task id and all
565 * lower task ids complete. We do this by dispatching the following
566 * specific sequence of tasks each of which block for N time units.
567 * We then use taskq_wait_all() to unblock at specific task id and
568 * verify the only the expected task ids have completed and in the
569 * correct order. The two cases of interest are:
571 * 1) Task ids larger than the waited for task id can run and
572 * complete as long as there is an available worker thread.
573 * 2) All task ids lower than the waited one must complete before
574 * unblocking even if the waited task id itself has completed.
576 * The following table shows each task id and how they will be
577 * scheduled. Each rows represent one time unit and each column
578 * one of the three worker threads. The places taskq_wait_all()
579 * must unblock for a specific id are identified as well as the
580 * task ids which must have completed and their order.
582 * +-----+ <--- taskq_wait_all(tq, 8) unblocks
583 * | | Required Completion Order: 1,2,4,5,3,8,6,7
588 * | | +-----+ <--- taskq_wait_all(tq, 3) unblocks
589 * | | 7 | | Required Completion Order: 1,2,4,5,3
598 * +-----+-----+-----+
602 splat_taskq_test5_func(void *arg
)
604 splat_taskq_id_t
*tq_id
= (splat_taskq_id_t
*)arg
;
605 splat_taskq_arg_t
*tq_arg
= tq_id
->arg
;
608 /* Delays determined by above table */
610 default: factor
= 0; break;
611 case 1: case 8: factor
= 1; break;
612 case 2: case 4: case 5: factor
= 2; break;
613 case 6: case 7: factor
= 4; break;
614 case 3: factor
= 5; break;
617 msleep(factor
* 100);
618 splat_vprint(tq_arg
->file
, tq_arg
->name
,
619 "Taskqid %d complete for taskq '%s'\n",
620 tq_id
->id
, tq_arg
->name
);
622 spin_lock(&tq_arg
->lock
);
623 tq_arg
->order
[tq_arg
->flag
] = tq_id
->id
;
625 spin_unlock(&tq_arg
->lock
);
629 splat_taskq_test_order(splat_taskq_arg_t
*tq_arg
, int *order
)
633 for (i
= 0; i
< SPLAT_TASKQ_ORDER_MAX
; i
++) {
634 if (tq_arg
->order
[i
] != order
[i
]) {
635 splat_vprint(tq_arg
->file
, tq_arg
->name
,
636 "Taskq '%s' incorrect completion "
637 "order\n", tq_arg
->name
);
638 splat_vprint(tq_arg
->file
, tq_arg
->name
,
639 "%s", "Expected { ");
641 for (j
= 0; j
< SPLAT_TASKQ_ORDER_MAX
; j
++)
642 splat_print(tq_arg
->file
, "%d ", order
[j
]);
644 splat_print(tq_arg
->file
, "%s", "}\n");
645 splat_vprint(tq_arg
->file
, tq_arg
->name
,
648 for (j
= 0; j
< SPLAT_TASKQ_ORDER_MAX
; j
++)
649 splat_print(tq_arg
->file
, "%d ",
652 splat_print(tq_arg
->file
, "%s", "}\n");
657 splat_vprint(tq_arg
->file
, tq_arg
->name
,
658 "Taskq '%s' validated correct completion order\n",
665 splat_taskq_test5_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
669 splat_taskq_id_t tq_id
[SPLAT_TASKQ_ORDER_MAX
];
670 splat_taskq_arg_t tq_arg
;
671 int order1
[SPLAT_TASKQ_ORDER_MAX
] = { 1,2,4,5,3,0,0,0 };
672 int order2
[SPLAT_TASKQ_ORDER_MAX
] = { 1,2,4,5,3,8,6,7 };
676 tqes
= kmem_alloc(sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
, KM_SLEEP
);
677 memset(tqes
, 0, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
679 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
680 "Taskq '%s' creating (%s dispatch)\n",
681 SPLAT_TASKQ_TEST5_NAME
,
682 prealloc
? "prealloc" : "dynamic");
683 if ((tq
= taskq_create(SPLAT_TASKQ_TEST5_NAME
, 3, maxclsyspri
,
684 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
685 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
686 "Taskq '%s' create failed\n",
687 SPLAT_TASKQ_TEST5_NAME
);
692 memset(&tq_arg
.order
, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX
);
693 spin_lock_init(&tq_arg
.lock
);
695 tq_arg
.name
= SPLAT_TASKQ_TEST5_NAME
;
697 for (i
= 0; i
< SPLAT_TASKQ_ORDER_MAX
; i
++) {
698 taskq_init_ent(&tqes
[i
]);
701 tq_id
[i
].arg
= &tq_arg
;
704 taskq_dispatch_ent(tq
, splat_taskq_test5_func
,
705 &tq_id
[i
], TQ_SLEEP
, &tqes
[i
]);
706 id
= tqes
[i
].tqent_id
;
708 id
= taskq_dispatch(tq
, splat_taskq_test5_func
,
709 &tq_id
[i
], TQ_SLEEP
);
713 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
714 "Taskq '%s' function '%s' dispatch failed\n",
715 tq_arg
.name
, sym2str(splat_taskq_test5_func
));
720 if (tq_id
[i
].id
!= id
) {
721 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
722 "Taskq '%s' expected taskqid %d got %d\n",
723 tq_arg
.name
, (int)tq_id
[i
].id
, (int)id
);
729 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
, "Taskq '%s' "
730 "waiting for taskqid %d completion\n", tq_arg
.name
, 3);
731 taskq_wait_all(tq
, 3);
732 if ((rc
= splat_taskq_test_order(&tq_arg
, order1
)))
735 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
, "Taskq '%s' "
736 "waiting for taskqid %d completion\n", tq_arg
.name
, 8);
737 taskq_wait_all(tq
, 8);
738 rc
= splat_taskq_test_order(&tq_arg
, order2
);
741 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
742 "Taskq '%s' destroying\n", tq_arg
.name
);
745 kmem_free(tqes
, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
751 splat_taskq_test5(struct file
*file
, void *arg
)
755 rc
= splat_taskq_test5_impl(file
, arg
, B_FALSE
);
759 rc
= splat_taskq_test5_impl(file
, arg
, B_TRUE
);
765 * Create a single task queue with three threads. Dispatch 8 tasks,
766 * setting TQ_FRONT on only the last three. Sleep after
767 * dispatching tasks 1-3 to ensure they will run and hold the threads
768 * busy while we dispatch the remaining tasks. Verify that tasks 6-8
769 * run before task 4-5.
771 * The following table shows each task id and how they will be
772 * scheduled. Each rows represent one time unit and each column
773 * one of the three worker threads.
775 * NB: The Horizontal Line is the LAST Time unit consumed by the Task,
776 * and must be included in the factor calculation.
795 * 0 +-----+-----+-----+
799 splat_taskq_test6_func(void *arg
)
801 /* Delays determined by above table */
802 static const int factor
[SPLAT_TASKQ_ORDER_MAX
+1] = {0,3,5,7,6,6,5,6,6};
804 splat_taskq_id_t
*tq_id
= (splat_taskq_id_t
*)arg
;
805 splat_taskq_arg_t
*tq_arg
= tq_id
->arg
;
807 splat_vprint(tq_arg
->file
, tq_arg
->name
,
808 "Taskqid %d starting for taskq '%s'\n",
809 tq_id
->id
, tq_arg
->name
);
811 if (tq_id
->id
< SPLAT_TASKQ_ORDER_MAX
+1) {
812 msleep(factor
[tq_id
->id
] * 50);
815 spin_lock(&tq_arg
->lock
);
816 tq_arg
->order
[tq_arg
->flag
] = tq_id
->id
;
818 spin_unlock(&tq_arg
->lock
);
820 splat_vprint(tq_arg
->file
, tq_arg
->name
,
821 "Taskqid %d complete for taskq '%s'\n",
822 tq_id
->id
, tq_arg
->name
);
826 splat_taskq_test6_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
830 splat_taskq_id_t tq_id
[SPLAT_TASKQ_ORDER_MAX
];
831 splat_taskq_arg_t tq_arg
;
832 int order
[SPLAT_TASKQ_ORDER_MAX
] = { 1,2,3,6,7,8,4,5 };
837 tqes
= kmem_alloc(sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
, KM_SLEEP
);
838 memset(tqes
, 0, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
840 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
841 "Taskq '%s' creating (%s dispatch)\n",
842 SPLAT_TASKQ_TEST6_NAME
,
843 prealloc
? "prealloc" : "dynamic");
844 if ((tq
= taskq_create(SPLAT_TASKQ_TEST6_NAME
, 3, maxclsyspri
,
845 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
846 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
847 "Taskq '%s' create failed\n",
848 SPLAT_TASKQ_TEST6_NAME
);
853 memset(&tq_arg
.order
, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX
);
854 spin_lock_init(&tq_arg
.lock
);
856 tq_arg
.name
= SPLAT_TASKQ_TEST6_NAME
;
858 for (i
= 0; i
< SPLAT_TASKQ_ORDER_MAX
; i
++) {
859 taskq_init_ent(&tqes
[i
]);
862 tq_id
[i
].arg
= &tq_arg
;
868 taskq_dispatch_ent(tq
, splat_taskq_test6_func
,
869 &tq_id
[i
], tflags
, &tqes
[i
]);
870 id
= tqes
[i
].tqent_id
;
872 id
= taskq_dispatch(tq
, splat_taskq_test6_func
,
877 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
878 "Taskq '%s' function '%s' dispatch failed\n",
879 tq_arg
.name
, sym2str(splat_taskq_test6_func
));
884 if (tq_id
[i
].id
!= id
) {
885 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
886 "Taskq '%s' expected taskqid %d got %d\n",
887 tq_arg
.name
, (int)tq_id
[i
].id
, (int)id
);
891 /* Sleep to let tasks 1-3 start executing. */
896 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
, "Taskq '%s' "
897 "waiting for taskqid %d completion\n", tq_arg
.name
,
898 SPLAT_TASKQ_ORDER_MAX
);
899 taskq_wait_all(tq
, SPLAT_TASKQ_ORDER_MAX
);
900 rc
= splat_taskq_test_order(&tq_arg
, order
);
903 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
904 "Taskq '%s' destroying\n", tq_arg
.name
);
907 kmem_free(tqes
, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
913 splat_taskq_test6(struct file
*file
, void *arg
)
917 rc
= splat_taskq_test6_impl(file
, arg
, B_FALSE
);
921 rc
= splat_taskq_test6_impl(file
, arg
, B_TRUE
);
927 splat_taskq_test7_func(void *arg
)
929 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
934 if (tq_arg
->depth
>= SPLAT_TASKQ_DEPTH_MAX
)
939 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST7_NAME
,
940 "Taskq '%s' function '%s' dispatching (depth = %u)\n",
941 tq_arg
->name
, sym2str(splat_taskq_test7_func
),
945 VERIFY(taskq_empty_ent(tq_arg
->tqe
));
946 taskq_dispatch_ent(tq_arg
->tq
, splat_taskq_test7_func
,
947 tq_arg
, TQ_SLEEP
, tq_arg
->tqe
);
948 id
= tq_arg
->tqe
->tqent_id
;
950 id
= taskq_dispatch(tq_arg
->tq
, splat_taskq_test7_func
,
955 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST7_NAME
,
956 "Taskq '%s' function '%s' dispatch failed "
957 "(depth = %u)\n", tq_arg
->name
,
958 sym2str(splat_taskq_test7_func
), tq_arg
->depth
);
959 tq_arg
->flag
= -EINVAL
;
965 splat_taskq_test7_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
969 splat_taskq_arg_t tq_arg
;
971 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
972 "Taskq '%s' creating (%s dispatch)\n",
973 SPLAT_TASKQ_TEST7_NAME
,
974 prealloc
? "prealloc" : "dynamic");
975 if ((tq
= taskq_create(SPLAT_TASKQ_TEST7_NAME
, 1, maxclsyspri
,
976 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
977 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
978 "Taskq '%s' create failed\n",
979 SPLAT_TASKQ_TEST7_NAME
);
987 tq_arg
.name
= SPLAT_TASKQ_TEST7_NAME
;
991 taskq_init_ent(&tqe
);
997 splat_taskq_test7_func(&tq_arg
);
999 if (tq_arg
.flag
== 0) {
1000 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
1001 "Taskq '%s' waiting\n", tq_arg
.name
);
1002 taskq_wait_all(tq
, SPLAT_TASKQ_DEPTH_MAX
);
1005 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
1006 "Taskq '%s' destroying\n", tq_arg
.name
);
1009 return tq_arg
.depth
== SPLAT_TASKQ_DEPTH_MAX
? 0 : -EINVAL
;
1013 splat_taskq_test7(struct file
*file
, void *arg
)
1017 rc
= splat_taskq_test7_impl(file
, arg
, B_FALSE
);
1021 rc
= splat_taskq_test7_impl(file
, arg
, B_TRUE
);
1027 * Create a taskq with 100 threads and dispatch a huge number of trivial
1028 * tasks to generate contention on tq->tq_lock. This test should always
1029 * pass. The purpose is to provide a benchmark for measuring the
1030 * effectiveness of taskq optimizations.
1033 splat_taskq_test8_func(void *arg
)
1035 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
1038 atomic_inc(tq_arg
->count
);
1041 #define TEST8_NUM_TASKS 0x20000
1042 #define TEST8_THREADS_PER_TASKQ 100
1045 splat_taskq_test8_common(struct file
*file
, void *arg
, int minalloc
,
1050 splat_taskq_arg_t tq_arg
;
1055 tqes
= vmalloc(sizeof(*tqes
) * TEST8_NUM_TASKS
);
1058 memset(tqes
, 0, sizeof(*tqes
) * TEST8_NUM_TASKS
);
1060 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
,
1061 "Taskq '%s' creating (%d/%d/%d)\n",
1062 SPLAT_TASKQ_TEST8_NAME
,
1063 minalloc
, maxalloc
, TEST8_NUM_TASKS
);
1064 if ((tq
= taskq_create(SPLAT_TASKQ_TEST8_NAME
, TEST8_THREADS_PER_TASKQ
,
1065 maxclsyspri
, minalloc
, maxalloc
,
1066 TASKQ_PREPOPULATE
)) == NULL
) {
1067 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
,
1068 "Taskq '%s' create failed\n",
1069 SPLAT_TASKQ_TEST8_NAME
);
1075 tq_arg
.name
= SPLAT_TASKQ_TEST8_NAME
;
1076 tq_arg
.count
= &count
;
1077 atomic_set(tq_arg
.count
, 0);
1079 for (i
= 0; i
< TEST8_NUM_TASKS
; i
++) {
1080 tqes
[i
] = kmalloc(sizeof(taskq_ent_t
), GFP_KERNEL
);
1081 if (tqes
[i
] == NULL
) {
1085 taskq_init_ent(tqes
[i
]);
1087 taskq_dispatch_ent(tq
, splat_taskq_test8_func
,
1088 &tq_arg
, TQ_SLEEP
, tqes
[i
]);
1090 id
= tqes
[i
]->tqent_id
;
1093 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
,
1094 "Taskq '%s' function '%s' dispatch "
1095 "%d failed\n", tq_arg
.name
,
1096 sym2str(splat_taskq_test8_func
), i
);
1102 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
, "Taskq '%s' "
1103 "waiting for %d dispatches\n", tq_arg
.name
,
1106 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
, "Taskq '%s' "
1107 "%d/%d dispatches finished\n", tq_arg
.name
,
1108 atomic_read(tq_arg
.count
), TEST8_NUM_TASKS
);
1110 if (atomic_read(tq_arg
.count
) != TEST8_NUM_TASKS
)
1114 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
, "Taskq '%s' destroying\n",
1118 for (j
= 0; j
< TEST8_NUM_TASKS
&& tqes
[j
] != NULL
; j
++)
1126 splat_taskq_test8(struct file
*file
, void *arg
)
1130 rc
= splat_taskq_test8_common(file
, arg
, 1, 100);
1136 * Create a taskq and dispatch a number of delayed tasks to the queue.
1137 * For each task verify that it was run no early than requested.
1140 splat_taskq_test9_func(void *arg
)
1142 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
1145 if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg
->expire
))
1146 atomic_inc(tq_arg
->count
);
1148 kmem_free(tq_arg
, sizeof(splat_taskq_arg_t
));
1152 splat_taskq_test9(struct file
*file
, void *arg
)
1161 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1162 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1163 SPLAT_TASKQ_TEST9_NAME
, "delay", minalloc
, maxalloc
, nr_tasks
);
1164 if ((tq
= taskq_create(SPLAT_TASKQ_TEST9_NAME
, 3, maxclsyspri
,
1165 minalloc
, maxalloc
, TASKQ_PREPOPULATE
)) == NULL
) {
1166 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1167 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST9_NAME
);
1171 atomic_set(&count
, 0);
1173 for (i
= 1; i
<= nr_tasks
; i
++) {
1174 splat_taskq_arg_t
*tq_arg
;
1178 /* A random timeout in jiffies of at most 5 seconds */
1179 get_random_bytes((void *)&rnd
, 4);
1180 rnd
= rnd
% (5 * HZ
);
1182 tq_arg
= kmem_alloc(sizeof(splat_taskq_arg_t
), KM_SLEEP
);
1183 tq_arg
->file
= file
;
1184 tq_arg
->name
= SPLAT_TASKQ_TEST9_NAME
;
1185 tq_arg
->expire
= ddi_get_lbolt() + rnd
;
1186 tq_arg
->count
= &count
;
1188 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1189 "Taskq '%s' delay dispatch %u jiffies\n",
1190 SPLAT_TASKQ_TEST9_NAME
, rnd
);
1192 id
= taskq_dispatch_delay(tq
, splat_taskq_test9_func
,
1193 tq_arg
, TQ_SLEEP
, ddi_get_lbolt() + rnd
);
1196 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1197 "Taskq '%s' delay dispatch failed\n",
1198 SPLAT_TASKQ_TEST9_NAME
);
1199 kmem_free(tq_arg
, sizeof(splat_taskq_arg_t
));
1206 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
, "Taskq '%s' waiting for "
1207 "%d delay dispatches\n", SPLAT_TASKQ_TEST9_NAME
, nr_tasks
);
1210 if (atomic_read(&count
) != nr_tasks
)
1213 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
, "Taskq '%s' %d/%d delay "
1214 "dispatches finished on time\n", SPLAT_TASKQ_TEST9_NAME
,
1215 atomic_read(&count
), nr_tasks
);
1216 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
, "Taskq '%s' destroying\n",
1217 SPLAT_TASKQ_TEST9_NAME
);
1225 * Create a taskq and dispatch then cancel tasks in the queue.
1228 splat_taskq_test10_func(void *arg
)
1230 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
1233 if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg
->expire
))
1234 atomic_inc(tq_arg
->count
);
1236 /* Randomly sleep to further perturb the system */
1237 get_random_bytes((void *)&rnd
, 1);
1238 msleep(1 + (rnd
% 9));
1242 splat_taskq_test10(struct file
*file
, void *arg
)
1245 splat_taskq_arg_t
**tqas
;
1254 clock_t start
, cancel
;
1256 tqas
= vmalloc(sizeof(*tqas
) * nr_tasks
);
1259 memset(tqas
, 0, sizeof(*tqas
) * nr_tasks
);
1261 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1262 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1263 SPLAT_TASKQ_TEST10_NAME
, "delay", minalloc
, maxalloc
, nr_tasks
);
1264 if ((tq
= taskq_create(SPLAT_TASKQ_TEST10_NAME
, 3, maxclsyspri
,
1265 minalloc
, maxalloc
, TASKQ_PREPOPULATE
)) == NULL
) {
1266 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1267 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST10_NAME
);
1272 atomic_set(&count
, 0);
1274 for (i
= 0; i
< nr_tasks
; i
++) {
1275 splat_taskq_arg_t
*tq_arg
;
1278 /* A random timeout in jiffies of at most 5 seconds */
1279 get_random_bytes((void *)&rnd
, 4);
1280 rnd
= rnd
% (5 * HZ
);
1282 tq_arg
= kmem_alloc(sizeof(splat_taskq_arg_t
), KM_SLEEP
);
1283 tq_arg
->file
= file
;
1284 tq_arg
->name
= SPLAT_TASKQ_TEST10_NAME
;
1285 tq_arg
->count
= &count
;
1289 * Dispatch every 1/3 one immediately to mix it up, the cancel
1290 * code is inherently racy and we want to try and provoke any
1291 * subtle concurrently issues.
1294 tq_arg
->expire
= ddi_get_lbolt();
1295 tq_arg
->id
= taskq_dispatch(tq
, splat_taskq_test10_func
,
1298 tq_arg
->expire
= ddi_get_lbolt() + rnd
;
1299 tq_arg
->id
= taskq_dispatch_delay(tq
,
1300 splat_taskq_test10_func
,
1301 tq_arg
, TQ_SLEEP
, ddi_get_lbolt() + rnd
);
1304 if (tq_arg
->id
== 0) {
1305 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1306 "Taskq '%s' dispatch failed\n",
1307 SPLAT_TASKQ_TEST10_NAME
);
1308 kmem_free(tq_arg
, sizeof(splat_taskq_arg_t
));
1313 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1314 "Taskq '%s' dispatch %lu in %lu jiffies\n",
1315 SPLAT_TASKQ_TEST10_NAME
, (unsigned long)tq_arg
->id
,
1316 !(i
% 3) ? 0 : tq_arg
->expire
- ddi_get_lbolt());
1321 * Start randomly canceling tasks for the duration of the test. We
1322 * happen to know the valid task id's will be in the range 1..nr_tasks
1323 * because the taskq is private and was just created. However, we
1324 * have no idea of a particular task has already executed or not.
1326 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
, "Taskq '%s' randomly "
1327 "canceling task ids\n", SPLAT_TASKQ_TEST10_NAME
);
1329 start
= ddi_get_lbolt();
1332 while (ddi_time_before(ddi_get_lbolt(), start
+ 5 * HZ
)) {
1337 cancel
= ddi_get_lbolt();
1338 get_random_bytes((void *)&rnd
, 4);
1339 id
= 1 + (rnd
% nr_tasks
);
1340 rc
= taskq_cancel_id(tq
, id
);
1343 * Keep track of the results of the random cancels.
1347 } else if (rc
== ENOENT
) {
1349 } else if (rc
== EBUSY
) {
1357 * Verify we never get blocked to long in taskq_cancel_id().
1358 * The worst case is 10ms if we happen to cancel the task
1359 * which is currently executing. We allow a factor of 2x.
1361 if (ddi_get_lbolt() - cancel
> HZ
/ 50) {
1362 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1363 "Taskq '%s' cancel for %lu took %lu\n",
1364 SPLAT_TASKQ_TEST10_NAME
, (unsigned long)id
,
1365 ddi_get_lbolt() - cancel
);
1370 get_random_bytes((void *)&rnd
, 4);
1371 msleep(1 + (rnd
% 100));
1378 * Cross check the results of taskq_cancel_id() with the number of
1379 * times the dispatched function actually ran successfully.
1381 if ((rc
== 0) && (nr_tasks
- canceled
!= atomic_read(&count
)))
1384 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
, "Taskq '%s' %d attempts, "
1385 "%d canceled, %d completed, %d blocked, %d/%d tasks run\n",
1386 SPLAT_TASKQ_TEST10_NAME
, i
, canceled
, completed
, blocked
,
1387 atomic_read(&count
), nr_tasks
);
1388 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
, "Taskq '%s' destroying %d\n",
1389 SPLAT_TASKQ_TEST10_NAME
, rc
);
1393 for (j
= 0; j
< nr_tasks
&& tqas
[j
] != NULL
; j
++)
1394 kmem_free(tqas
[j
], sizeof(splat_taskq_arg_t
));
1401 splat_taskq_init(void)
1403 splat_subsystem_t
*sub
;
1405 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
1409 memset(sub
, 0, sizeof(*sub
));
1410 strncpy(sub
->desc
.name
, SPLAT_TASKQ_NAME
, SPLAT_NAME_SIZE
);
1411 strncpy(sub
->desc
.desc
, SPLAT_TASKQ_DESC
, SPLAT_DESC_SIZE
);
1412 INIT_LIST_HEAD(&sub
->subsystem_list
);
1413 INIT_LIST_HEAD(&sub
->test_list
);
1414 spin_lock_init(&sub
->test_lock
);
1415 sub
->desc
.id
= SPLAT_SUBSYSTEM_TASKQ
;
1417 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST1_NAME
, SPLAT_TASKQ_TEST1_DESC
,
1418 SPLAT_TASKQ_TEST1_ID
, splat_taskq_test1
);
1419 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST2_NAME
, SPLAT_TASKQ_TEST2_DESC
,
1420 SPLAT_TASKQ_TEST2_ID
, splat_taskq_test2
);
1421 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST3_NAME
, SPLAT_TASKQ_TEST3_DESC
,
1422 SPLAT_TASKQ_TEST3_ID
, splat_taskq_test3
);
1423 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST4_NAME
, SPLAT_TASKQ_TEST4_DESC
,
1424 SPLAT_TASKQ_TEST4_ID
, splat_taskq_test4
);
1425 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST5_NAME
, SPLAT_TASKQ_TEST5_DESC
,
1426 SPLAT_TASKQ_TEST5_ID
, splat_taskq_test5
);
1427 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST6_NAME
, SPLAT_TASKQ_TEST6_DESC
,
1428 SPLAT_TASKQ_TEST6_ID
, splat_taskq_test6
);
1429 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST7_NAME
, SPLAT_TASKQ_TEST7_DESC
,
1430 SPLAT_TASKQ_TEST7_ID
, splat_taskq_test7
);
1431 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST8_NAME
, SPLAT_TASKQ_TEST8_DESC
,
1432 SPLAT_TASKQ_TEST8_ID
, splat_taskq_test8
);
1433 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST9_NAME
, SPLAT_TASKQ_TEST9_DESC
,
1434 SPLAT_TASKQ_TEST9_ID
, splat_taskq_test9
);
1435 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST10_NAME
, SPLAT_TASKQ_TEST10_DESC
,
1436 SPLAT_TASKQ_TEST10_ID
, splat_taskq_test10
);
1442 splat_taskq_fini(splat_subsystem_t
*sub
)
1445 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST10_ID
);
1446 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST9_ID
);
1447 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST8_ID
);
1448 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST7_ID
);
1449 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST6_ID
);
1450 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST5_ID
);
1451 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST4_ID
);
1452 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST3_ID
);
1453 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST2_ID
);
1454 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST1_ID
);
1460 splat_taskq_id(void) {
1461 return SPLAT_SUBSYSTEM_TASKQ
;