1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Task Queue Tests.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
28 #include <sys/random.h>
30 #include "splat-internal.h"
32 #define SPLAT_TASKQ_NAME "taskq"
33 #define SPLAT_TASKQ_DESC "Kernel Task Queue Tests"
35 #define SPLAT_TASKQ_TEST1_ID 0x0201
36 #define SPLAT_TASKQ_TEST1_NAME "single"
37 #define SPLAT_TASKQ_TEST1_DESC "Single task queue, single task"
39 #define SPLAT_TASKQ_TEST2_ID 0x0202
40 #define SPLAT_TASKQ_TEST2_NAME "multiple"
41 #define SPLAT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
43 #define SPLAT_TASKQ_TEST3_ID 0x0203
44 #define SPLAT_TASKQ_TEST3_NAME "system"
45 #define SPLAT_TASKQ_TEST3_DESC "System task queue, multiple tasks"
47 #define SPLAT_TASKQ_TEST4_ID 0x0204
48 #define SPLAT_TASKQ_TEST4_NAME "wait"
49 #define SPLAT_TASKQ_TEST4_DESC "Multiple task waiting"
51 #define SPLAT_TASKQ_TEST5_ID 0x0205
52 #define SPLAT_TASKQ_TEST5_NAME "order"
53 #define SPLAT_TASKQ_TEST5_DESC "Correct task ordering"
55 #define SPLAT_TASKQ_TEST6_ID 0x0206
56 #define SPLAT_TASKQ_TEST6_NAME "front"
57 #define SPLAT_TASKQ_TEST6_DESC "Correct ordering with TQ_FRONT flag"
59 #define SPLAT_TASKQ_TEST7_ID 0x0207
60 #define SPLAT_TASKQ_TEST7_NAME "recurse"
61 #define SPLAT_TASKQ_TEST7_DESC "Single task queue, recursive dispatch"
63 #define SPLAT_TASKQ_TEST8_ID 0x0208
64 #define SPLAT_TASKQ_TEST8_NAME "contention"
65 #define SPLAT_TASKQ_TEST8_DESC "1 queue, 100 threads, 131072 tasks"
67 #define SPLAT_TASKQ_TEST9_ID 0x0209
68 #define SPLAT_TASKQ_TEST9_NAME "delay"
69 #define SPLAT_TASKQ_TEST9_DESC "Delayed task execution"
71 #define SPLAT_TASKQ_TEST10_ID 0x020a
72 #define SPLAT_TASKQ_TEST10_NAME "cancel"
73 #define SPLAT_TASKQ_TEST10_DESC "Cancel task execution"
75 #define SPLAT_TASKQ_ORDER_MAX 8
76 #define SPLAT_TASKQ_DEPTH_MAX 16
79 typedef struct splat_taskq_arg
{
83 int order
[SPLAT_TASKQ_ORDER_MAX
];
93 typedef struct splat_taskq_id
{
95 splat_taskq_arg_t
*arg
;
99 * Create a taskq, queue a task, wait until task completes, ensure
100 * task ran properly, cleanup taskq.
103 splat_taskq_test13_func(void *arg
)
105 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
108 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST1_NAME
,
109 "Taskq '%s' function '%s' setting flag\n",
110 tq_arg
->name
, sym2str(splat_taskq_test13_func
));
115 splat_taskq_test1_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
119 splat_taskq_arg_t tq_arg
;
122 taskq_init_ent(&tqe
);
124 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
125 "Taskq '%s' creating (%s dispatch)\n",
126 SPLAT_TASKQ_TEST1_NAME
,
127 prealloc
? "prealloc" : "dynamic");
128 if ((tq
= taskq_create(SPLAT_TASKQ_TEST1_NAME
, 1, maxclsyspri
,
129 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
130 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
131 "Taskq '%s' create failed\n",
132 SPLAT_TASKQ_TEST1_NAME
);
139 tq_arg
.name
= SPLAT_TASKQ_TEST1_NAME
;
141 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
142 "Taskq '%s' function '%s' dispatching\n",
143 tq_arg
.name
, sym2str(splat_taskq_test13_func
));
145 taskq_dispatch_ent(tq
, splat_taskq_test13_func
,
146 &tq_arg
, TQ_SLEEP
, &tqe
);
149 id
= taskq_dispatch(tq
, splat_taskq_test13_func
,
154 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
,
155 "Taskq '%s' function '%s' dispatch failed\n",
156 tq_arg
.name
, sym2str(splat_taskq_test13_func
));
161 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
, "Taskq '%s' waiting\n",
164 splat_vprint(file
, SPLAT_TASKQ_TEST1_NAME
, "Taskq '%s' destroying\n",
169 return (tq_arg
.flag
) ? 0 : -EINVAL
;
173 splat_taskq_test1(struct file
*file
, void *arg
)
177 rc
= splat_taskq_test1_impl(file
, arg
, B_FALSE
);
181 rc
= splat_taskq_test1_impl(file
, arg
, B_TRUE
);
187 * Create multiple taskq's, each with multiple tasks, wait until
188 * all tasks complete, ensure all tasks ran properly and in the
189 * correct order. Run order must be the same as the order submitted
190 * because we only have 1 thread per taskq. Finally cleanup the taskq.
193 splat_taskq_test2_func1(void *arg
)
195 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
198 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST2_NAME
,
199 "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
200 tq_arg
->name
, tq_arg
->id
,
201 sym2str(splat_taskq_test2_func1
),
202 tq_arg
->flag
* 2, tq_arg
->flag
);
207 splat_taskq_test2_func2(void *arg
)
209 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
212 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST2_NAME
,
213 "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
214 tq_arg
->name
, tq_arg
->id
,
215 sym2str(splat_taskq_test2_func2
),
216 tq_arg
->flag
+ 1, tq_arg
->flag
);
220 #define TEST2_TASKQS 8
221 #define TEST2_THREADS_PER_TASKQ 1
224 splat_taskq_test2_impl(struct file
*file
, void *arg
, boolean_t prealloc
) {
225 taskq_t
*tq
[TEST2_TASKQS
] = { NULL
};
227 splat_taskq_arg_t tq_args
[TEST2_TASKQS
];
228 taskq_ent_t
*func1_tqes
= NULL
;
229 taskq_ent_t
*func2_tqes
= NULL
;
232 func1_tqes
= kmalloc(sizeof(*func1_tqes
) * TEST2_TASKQS
, GFP_KERNEL
);
233 if (func1_tqes
== NULL
) {
238 func2_tqes
= kmalloc(sizeof(*func2_tqes
) * TEST2_TASKQS
, GFP_KERNEL
);
239 if (func2_tqes
== NULL
) {
244 for (i
= 0; i
< TEST2_TASKQS
; i
++) {
245 taskq_init_ent(&func1_tqes
[i
]);
246 taskq_init_ent(&func2_tqes
[i
]);
248 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
249 "Taskq '%s/%d' creating (%s dispatch)\n",
250 SPLAT_TASKQ_TEST2_NAME
, i
,
251 prealloc
? "prealloc" : "dynamic");
252 if ((tq
[i
] = taskq_create(SPLAT_TASKQ_TEST2_NAME
,
253 TEST2_THREADS_PER_TASKQ
,
254 maxclsyspri
, 50, INT_MAX
,
255 TASKQ_PREPOPULATE
)) == NULL
) {
256 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
257 "Taskq '%s/%d' create failed\n",
258 SPLAT_TASKQ_TEST2_NAME
, i
);
265 tq_args
[i
].file
= file
;
266 tq_args
[i
].name
= SPLAT_TASKQ_TEST2_NAME
;
268 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
269 "Taskq '%s/%d' function '%s' dispatching\n",
270 tq_args
[i
].name
, tq_args
[i
].id
,
271 sym2str(splat_taskq_test2_func1
));
273 taskq_dispatch_ent(tq
[i
], splat_taskq_test2_func1
,
274 &tq_args
[i
], TQ_SLEEP
, &func1_tqes
[i
]);
275 id
= func1_tqes
[i
].tqent_id
;
277 id
= taskq_dispatch(tq
[i
], splat_taskq_test2_func1
,
278 &tq_args
[i
], TQ_SLEEP
);
282 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
283 "Taskq '%s/%d' function '%s' dispatch "
284 "failed\n", tq_args
[i
].name
, tq_args
[i
].id
,
285 sym2str(splat_taskq_test2_func1
));
290 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
291 "Taskq '%s/%d' function '%s' dispatching\n",
292 tq_args
[i
].name
, tq_args
[i
].id
,
293 sym2str(splat_taskq_test2_func2
));
295 taskq_dispatch_ent(tq
[i
], splat_taskq_test2_func2
,
296 &tq_args
[i
], TQ_SLEEP
, &func2_tqes
[i
]);
297 id
= func2_tqes
[i
].tqent_id
;
299 id
= taskq_dispatch(tq
[i
], splat_taskq_test2_func2
,
300 &tq_args
[i
], TQ_SLEEP
);
304 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
, "Taskq "
305 "'%s/%d' function '%s' dispatch failed\n",
306 tq_args
[i
].name
, tq_args
[i
].id
,
307 sym2str(splat_taskq_test2_func2
));
313 /* When rc is set we're effectively just doing cleanup here, so
314 * ignore new errors in that case. They just cause noise. */
315 for (i
= 0; i
< TEST2_TASKQS
; i
++) {
317 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
318 "Taskq '%s/%d' waiting\n",
319 tq_args
[i
].name
, tq_args
[i
].id
);
321 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
322 "Taskq '%s/%d; destroying\n",
323 tq_args
[i
].name
, tq_args
[i
].id
);
325 taskq_destroy(tq
[i
]);
327 if (!rc
&& tq_args
[i
].flag
!= ((i
* 2) + 1)) {
328 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
329 "Taskq '%s/%d' processed tasks "
330 "out of order; %d != %d\n",
331 tq_args
[i
].name
, tq_args
[i
].id
,
332 tq_args
[i
].flag
, i
* 2 + 1);
335 splat_vprint(file
, SPLAT_TASKQ_TEST2_NAME
,
336 "Taskq '%s/%d' processed tasks "
337 "in the correct order; %d == %d\n",
338 tq_args
[i
].name
, tq_args
[i
].id
,
339 tq_args
[i
].flag
, i
* 2 + 1);
354 splat_taskq_test2(struct file
*file
, void *arg
) {
357 rc
= splat_taskq_test2_impl(file
, arg
, B_FALSE
);
361 rc
= splat_taskq_test2_impl(file
, arg
, B_TRUE
);
367 * Use the global system task queue with a single task, wait until task
368 * completes, ensure task ran properly.
371 splat_taskq_test3_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
374 splat_taskq_arg_t tq_arg
;
377 taskq_init_ent(&tqe
);
382 tq_arg
.name
= SPLAT_TASKQ_TEST3_NAME
;
384 splat_vprint(file
, SPLAT_TASKQ_TEST3_NAME
,
385 "Taskq '%s' function '%s' %s dispatch\n",
386 tq_arg
.name
, sym2str(splat_taskq_test13_func
),
387 prealloc
? "prealloc" : "dynamic");
389 taskq_dispatch_ent(system_taskq
, splat_taskq_test13_func
,
390 &tq_arg
, TQ_SLEEP
, &tqe
);
393 id
= taskq_dispatch(system_taskq
, splat_taskq_test13_func
,
398 splat_vprint(file
, SPLAT_TASKQ_TEST3_NAME
,
399 "Taskq '%s' function '%s' dispatch failed\n",
400 tq_arg
.name
, sym2str(splat_taskq_test13_func
));
404 splat_vprint(file
, SPLAT_TASKQ_TEST3_NAME
, "Taskq '%s' waiting\n",
406 taskq_wait(system_taskq
);
408 return (tq_arg
.flag
) ? 0 : -EINVAL
;
412 splat_taskq_test3(struct file
*file
, void *arg
)
416 rc
= splat_taskq_test3_impl(file
, arg
, B_FALSE
);
420 rc
= splat_taskq_test3_impl(file
, arg
, B_TRUE
);
426 * Create a taskq and dispatch a large number of tasks to the queue.
427 * Then use taskq_wait() to block until all the tasks complete, then
428 * cross check that all the tasks ran by checking the shared atomic
429 * counter which is incremented in the task function.
431 * First we try with a large 'maxalloc' value, then we try with a small one.
432 * We should not drop tasks when TQ_SLEEP is used in taskq_dispatch(), even
433 * if the number of pending tasks is above maxalloc.
436 splat_taskq_test4_func(void *arg
)
438 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
441 atomic_inc(tq_arg
->count
);
445 splat_taskq_test4_common(struct file
*file
, void *arg
, int minalloc
,
446 int maxalloc
, int nr_tasks
, boolean_t prealloc
)
450 splat_taskq_arg_t tq_arg
;
455 tqes
= kmalloc(sizeof(*tqes
) * nr_tasks
, GFP_KERNEL
);
459 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
460 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
461 SPLAT_TASKQ_TEST4_NAME
,
462 prealloc
? "prealloc" : "dynamic",
463 minalloc
, maxalloc
, nr_tasks
);
464 if ((tq
= taskq_create(SPLAT_TASKQ_TEST4_NAME
, 1, maxclsyspri
,
465 minalloc
, maxalloc
, TASKQ_PREPOPULATE
)) == NULL
) {
466 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
467 "Taskq '%s' create failed\n",
468 SPLAT_TASKQ_TEST4_NAME
);
474 tq_arg
.name
= SPLAT_TASKQ_TEST4_NAME
;
475 tq_arg
.count
= &count
;
477 for (i
= 1; i
<= nr_tasks
; i
*= 2) {
478 atomic_set(tq_arg
.count
, 0);
479 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
480 "Taskq '%s' function '%s' dispatched %d times\n",
481 tq_arg
.name
, sym2str(splat_taskq_test4_func
), i
);
483 for (j
= 0; j
< i
; j
++) {
484 taskq_init_ent(&tqes
[j
]);
487 taskq_dispatch_ent(tq
, splat_taskq_test4_func
,
488 &tq_arg
, TQ_SLEEP
, &tqes
[j
]);
489 id
= tqes
[j
].tqent_id
;
491 id
= taskq_dispatch(tq
, splat_taskq_test4_func
,
496 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
,
497 "Taskq '%s' function '%s' dispatch "
498 "%d failed\n", tq_arg
.name
,
499 sym2str(splat_taskq_test4_func
), j
);
505 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
, "Taskq '%s' "
506 "waiting for %d dispatches\n", tq_arg
.name
, i
);
508 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
, "Taskq '%s' "
509 "%d/%d dispatches finished\n", tq_arg
.name
,
510 atomic_read(&count
), i
);
511 if (atomic_read(&count
) != i
) {
518 splat_vprint(file
, SPLAT_TASKQ_TEST4_NAME
, "Taskq '%s' destroying\n",
529 splat_taskq_test4_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
533 rc
= splat_taskq_test4_common(file
, arg
, 50, INT_MAX
, 1024, prealloc
);
537 rc
= splat_taskq_test4_common(file
, arg
, 1, 1, 32, prealloc
);
543 splat_taskq_test4(struct file
*file
, void *arg
)
547 rc
= splat_taskq_test4_impl(file
, arg
, B_FALSE
);
551 rc
= splat_taskq_test4_impl(file
, arg
, B_TRUE
);
557 * Create a taskq and dispatch a specific sequence of tasks carefully
558 * crafted to validate the order in which tasks are processed. When
559 * there are multiple worker threads each thread will process the
560 * next pending task as soon as it completes its current task. This
561 * means that tasks do not strictly complete in order in which they
562 * were dispatched (increasing task id). This is fine but we need to
563 * verify that taskq_wait_all() blocks until the passed task id and all
564 * lower task ids complete. We do this by dispatching the following
565 * specific sequence of tasks each of which block for N time units.
566 * We then use taskq_wait_all() to unblock at specific task id and
567 * verify the only the expected task ids have completed and in the
568 * correct order. The two cases of interest are:
570 * 1) Task ids larger than the waited for task id can run and
571 * complete as long as there is an available worker thread.
572 * 2) All task ids lower than the waited one must complete before
573 * unblocking even if the waited task id itself has completed.
575 * The following table shows each task id and how they will be
576 * scheduled. Each rows represent one time unit and each column
577 * one of the three worker threads. The places taskq_wait_all()
578 * must unblock for a specific id are identified as well as the
579 * task ids which must have completed and their order.
581 * +-----+ <--- taskq_wait_all(tq, 8) unblocks
582 * | | Required Completion Order: 1,2,4,5,3,8,6,7
587 * | | +-----+ <--- taskq_wait_all(tq, 3) unblocks
588 * | | 7 | | Required Completion Order: 1,2,4,5,3
597 * +-----+-----+-----+
601 splat_taskq_test5_func(void *arg
)
603 splat_taskq_id_t
*tq_id
= (splat_taskq_id_t
*)arg
;
604 splat_taskq_arg_t
*tq_arg
= tq_id
->arg
;
607 /* Delays determined by above table */
609 default: factor
= 0; break;
610 case 1: case 8: factor
= 1; break;
611 case 2: case 4: case 5: factor
= 2; break;
612 case 6: case 7: factor
= 4; break;
613 case 3: factor
= 5; break;
616 msleep(factor
* 100);
617 splat_vprint(tq_arg
->file
, tq_arg
->name
,
618 "Taskqid %d complete for taskq '%s'\n",
619 tq_id
->id
, tq_arg
->name
);
621 spin_lock(&tq_arg
->lock
);
622 tq_arg
->order
[tq_arg
->flag
] = tq_id
->id
;
624 spin_unlock(&tq_arg
->lock
);
628 splat_taskq_test_order(splat_taskq_arg_t
*tq_arg
, int *order
)
632 for (i
= 0; i
< SPLAT_TASKQ_ORDER_MAX
; i
++) {
633 if (tq_arg
->order
[i
] != order
[i
]) {
634 splat_vprint(tq_arg
->file
, tq_arg
->name
,
635 "Taskq '%s' incorrect completion "
636 "order\n", tq_arg
->name
);
637 splat_vprint(tq_arg
->file
, tq_arg
->name
,
638 "%s", "Expected { ");
640 for (j
= 0; j
< SPLAT_TASKQ_ORDER_MAX
; j
++)
641 splat_print(tq_arg
->file
, "%d ", order
[j
]);
643 splat_print(tq_arg
->file
, "%s", "}\n");
644 splat_vprint(tq_arg
->file
, tq_arg
->name
,
647 for (j
= 0; j
< SPLAT_TASKQ_ORDER_MAX
; j
++)
648 splat_print(tq_arg
->file
, "%d ",
651 splat_print(tq_arg
->file
, "%s", "}\n");
656 splat_vprint(tq_arg
->file
, tq_arg
->name
,
657 "Taskq '%s' validated correct completion order\n",
664 splat_taskq_test5_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
668 splat_taskq_id_t tq_id
[SPLAT_TASKQ_ORDER_MAX
];
669 splat_taskq_arg_t tq_arg
;
670 int order1
[SPLAT_TASKQ_ORDER_MAX
] = { 1,2,4,5,3,0,0,0 };
671 int order2
[SPLAT_TASKQ_ORDER_MAX
] = { 1,2,4,5,3,8,6,7 };
675 tqes
= kmem_alloc(sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
, KM_SLEEP
);
676 memset(tqes
, 0, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
678 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
679 "Taskq '%s' creating (%s dispatch)\n",
680 SPLAT_TASKQ_TEST5_NAME
,
681 prealloc
? "prealloc" : "dynamic");
682 if ((tq
= taskq_create(SPLAT_TASKQ_TEST5_NAME
, 3, maxclsyspri
,
683 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
684 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
685 "Taskq '%s' create failed\n",
686 SPLAT_TASKQ_TEST5_NAME
);
691 memset(&tq_arg
.order
, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX
);
692 spin_lock_init(&tq_arg
.lock
);
694 tq_arg
.name
= SPLAT_TASKQ_TEST5_NAME
;
696 for (i
= 0; i
< SPLAT_TASKQ_ORDER_MAX
; i
++) {
697 taskq_init_ent(&tqes
[i
]);
700 tq_id
[i
].arg
= &tq_arg
;
703 taskq_dispatch_ent(tq
, splat_taskq_test5_func
,
704 &tq_id
[i
], TQ_SLEEP
, &tqes
[i
]);
705 id
= tqes
[i
].tqent_id
;
707 id
= taskq_dispatch(tq
, splat_taskq_test5_func
,
708 &tq_id
[i
], TQ_SLEEP
);
712 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
713 "Taskq '%s' function '%s' dispatch failed\n",
714 tq_arg
.name
, sym2str(splat_taskq_test5_func
));
719 if (tq_id
[i
].id
!= id
) {
720 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
721 "Taskq '%s' expected taskqid %d got %d\n",
722 tq_arg
.name
, (int)tq_id
[i
].id
, (int)id
);
728 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
, "Taskq '%s' "
729 "waiting for taskqid %d completion\n", tq_arg
.name
, 3);
730 taskq_wait_all(tq
, 3);
731 if ((rc
= splat_taskq_test_order(&tq_arg
, order1
)))
734 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
, "Taskq '%s' "
735 "waiting for taskqid %d completion\n", tq_arg
.name
, 8);
736 taskq_wait_all(tq
, 8);
737 rc
= splat_taskq_test_order(&tq_arg
, order2
);
740 splat_vprint(file
, SPLAT_TASKQ_TEST5_NAME
,
741 "Taskq '%s' destroying\n", tq_arg
.name
);
744 kmem_free(tqes
, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
750 splat_taskq_test5(struct file
*file
, void *arg
)
754 rc
= splat_taskq_test5_impl(file
, arg
, B_FALSE
);
758 rc
= splat_taskq_test5_impl(file
, arg
, B_TRUE
);
764 * Create a single task queue with three threads. Dispatch 8 tasks,
765 * setting TQ_FRONT on only the last three. Sleep after
766 * dispatching tasks 1-3 to ensure they will run and hold the threads
767 * busy while we dispatch the remaining tasks. Verify that tasks 6-8
768 * run before task 4-5.
770 * The following table shows each task id and how they will be
771 * scheduled. Each rows represent one time unit and each column
772 * one of the three worker threads.
774 * NB: The Horizontal Line is the LAST Time unit consumed by the Task,
775 * and must be included in the factor calculation.
794 * 0 +-----+-----+-----+
798 splat_taskq_test6_func(void *arg
)
800 /* Delays determined by above table */
801 static const int factor
[SPLAT_TASKQ_ORDER_MAX
+1] = {0,3,5,7,6,6,5,6,6};
803 splat_taskq_id_t
*tq_id
= (splat_taskq_id_t
*)arg
;
804 splat_taskq_arg_t
*tq_arg
= tq_id
->arg
;
806 splat_vprint(tq_arg
->file
, tq_arg
->name
,
807 "Taskqid %d starting for taskq '%s'\n",
808 tq_id
->id
, tq_arg
->name
);
810 if (tq_id
->id
< SPLAT_TASKQ_ORDER_MAX
+1) {
811 msleep(factor
[tq_id
->id
] * 50);
814 spin_lock(&tq_arg
->lock
);
815 tq_arg
->order
[tq_arg
->flag
] = tq_id
->id
;
817 splat_vprint(tq_arg
->file
, tq_arg
->name
,
818 "Taskqid %d complete for taskq '%s'\n",
819 tq_id
->id
, tq_arg
->name
);
820 spin_unlock(&tq_arg
->lock
);
824 splat_taskq_test6_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
828 splat_taskq_id_t tq_id
[SPLAT_TASKQ_ORDER_MAX
];
829 splat_taskq_arg_t tq_arg
;
830 int order
[SPLAT_TASKQ_ORDER_MAX
] = { 1,2,3,6,7,8,4,5 };
835 tqes
= kmem_alloc(sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
, KM_SLEEP
);
836 memset(tqes
, 0, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
838 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
839 "Taskq '%s' creating (%s dispatch)\n",
840 SPLAT_TASKQ_TEST6_NAME
,
841 prealloc
? "prealloc" : "dynamic");
842 if ((tq
= taskq_create(SPLAT_TASKQ_TEST6_NAME
, 3, maxclsyspri
,
843 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
844 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
845 "Taskq '%s' create failed\n",
846 SPLAT_TASKQ_TEST6_NAME
);
851 memset(&tq_arg
.order
, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX
);
852 spin_lock_init(&tq_arg
.lock
);
854 tq_arg
.name
= SPLAT_TASKQ_TEST6_NAME
;
856 for (i
= 0; i
< SPLAT_TASKQ_ORDER_MAX
; i
++) {
857 taskq_init_ent(&tqes
[i
]);
860 tq_id
[i
].arg
= &tq_arg
;
866 taskq_dispatch_ent(tq
, splat_taskq_test6_func
,
867 &tq_id
[i
], tflags
, &tqes
[i
]);
868 id
= tqes
[i
].tqent_id
;
870 id
= taskq_dispatch(tq
, splat_taskq_test6_func
,
875 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
876 "Taskq '%s' function '%s' dispatch failed\n",
877 tq_arg
.name
, sym2str(splat_taskq_test6_func
));
882 if (tq_id
[i
].id
!= id
) {
883 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
884 "Taskq '%s' expected taskqid %d got %d\n",
885 tq_arg
.name
, (int)tq_id
[i
].id
, (int)id
);
889 /* Sleep to let tasks 1-3 start executing. */
894 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
, "Taskq '%s' "
895 "waiting for taskqid %d completion\n", tq_arg
.name
,
896 SPLAT_TASKQ_ORDER_MAX
);
897 taskq_wait_all(tq
, SPLAT_TASKQ_ORDER_MAX
);
898 rc
= splat_taskq_test_order(&tq_arg
, order
);
901 splat_vprint(file
, SPLAT_TASKQ_TEST6_NAME
,
902 "Taskq '%s' destroying\n", tq_arg
.name
);
905 kmem_free(tqes
, sizeof(*tqes
) * SPLAT_TASKQ_ORDER_MAX
);
911 splat_taskq_test6(struct file
*file
, void *arg
)
915 rc
= splat_taskq_test6_impl(file
, arg
, B_FALSE
);
919 rc
= splat_taskq_test6_impl(file
, arg
, B_TRUE
);
925 splat_taskq_test7_func(void *arg
)
927 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
932 if (tq_arg
->depth
>= SPLAT_TASKQ_DEPTH_MAX
)
937 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST7_NAME
,
938 "Taskq '%s' function '%s' dispatching (depth = %u)\n",
939 tq_arg
->name
, sym2str(splat_taskq_test7_func
),
943 VERIFY(taskq_empty_ent(tq_arg
->tqe
));
944 taskq_dispatch_ent(tq_arg
->tq
, splat_taskq_test7_func
,
945 tq_arg
, TQ_SLEEP
, tq_arg
->tqe
);
946 id
= tq_arg
->tqe
->tqent_id
;
948 id
= taskq_dispatch(tq_arg
->tq
, splat_taskq_test7_func
,
953 splat_vprint(tq_arg
->file
, SPLAT_TASKQ_TEST7_NAME
,
954 "Taskq '%s' function '%s' dispatch failed "
955 "(depth = %u)\n", tq_arg
->name
,
956 sym2str(splat_taskq_test7_func
), tq_arg
->depth
);
957 tq_arg
->flag
= -EINVAL
;
963 splat_taskq_test7_impl(struct file
*file
, void *arg
, boolean_t prealloc
)
967 splat_taskq_arg_t tq_arg
;
969 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
970 "Taskq '%s' creating (%s dispatch)\n",
971 SPLAT_TASKQ_TEST7_NAME
,
972 prealloc
? "prealloc" : "dynamic");
973 if ((tq
= taskq_create(SPLAT_TASKQ_TEST7_NAME
, 1, maxclsyspri
,
974 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
975 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
976 "Taskq '%s' create failed\n",
977 SPLAT_TASKQ_TEST7_NAME
);
985 tq_arg
.name
= SPLAT_TASKQ_TEST7_NAME
;
989 taskq_init_ent(&tqe
);
995 splat_taskq_test7_func(&tq_arg
);
997 if (tq_arg
.flag
== 0) {
998 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
999 "Taskq '%s' waiting\n", tq_arg
.name
);
1000 taskq_wait_all(tq
, SPLAT_TASKQ_DEPTH_MAX
);
1003 splat_vprint(file
, SPLAT_TASKQ_TEST7_NAME
,
1004 "Taskq '%s' destroying\n", tq_arg
.name
);
1007 return tq_arg
.depth
== SPLAT_TASKQ_DEPTH_MAX
? 0 : -EINVAL
;
1011 splat_taskq_test7(struct file
*file
, void *arg
)
1015 rc
= splat_taskq_test7_impl(file
, arg
, B_FALSE
);
1019 rc
= splat_taskq_test7_impl(file
, arg
, B_TRUE
);
1025 * Create a taskq with 100 threads and dispatch a huge number of trivial
1026 * tasks to generate contention on tq->tq_lock. This test should always
1027 * pass. The purpose is to provide a benchmark for measuring the
1028 * effectiveness of taskq optimizations.
1031 splat_taskq_test8_func(void *arg
)
1033 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
1036 atomic_inc(tq_arg
->count
);
1039 #define TEST8_NUM_TASKS 0x20000
1040 #define TEST8_THREADS_PER_TASKQ 100
1043 splat_taskq_test8_common(struct file
*file
, void *arg
, int minalloc
,
1048 splat_taskq_arg_t tq_arg
;
1053 tqes
= vmalloc(sizeof(*tqes
) * TEST8_NUM_TASKS
);
1056 memset(tqes
, 0, sizeof(*tqes
) * TEST8_NUM_TASKS
);
1058 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
,
1059 "Taskq '%s' creating (%d/%d/%d)\n",
1060 SPLAT_TASKQ_TEST8_NAME
,
1061 minalloc
, maxalloc
, TEST8_NUM_TASKS
);
1062 if ((tq
= taskq_create(SPLAT_TASKQ_TEST8_NAME
, TEST8_THREADS_PER_TASKQ
,
1063 maxclsyspri
, minalloc
, maxalloc
,
1064 TASKQ_PREPOPULATE
)) == NULL
) {
1065 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
,
1066 "Taskq '%s' create failed\n",
1067 SPLAT_TASKQ_TEST8_NAME
);
1073 tq_arg
.name
= SPLAT_TASKQ_TEST8_NAME
;
1074 tq_arg
.count
= &count
;
1075 atomic_set(tq_arg
.count
, 0);
1077 for (i
= 0; i
< TEST8_NUM_TASKS
; i
++) {
1078 tqes
[i
] = kmalloc(sizeof(taskq_ent_t
), GFP_KERNEL
);
1079 if (tqes
[i
] == NULL
) {
1083 taskq_init_ent(tqes
[i
]);
1085 taskq_dispatch_ent(tq
, splat_taskq_test8_func
,
1086 &tq_arg
, TQ_SLEEP
, tqes
[i
]);
1088 id
= tqes
[i
]->tqent_id
;
1091 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
,
1092 "Taskq '%s' function '%s' dispatch "
1093 "%d failed\n", tq_arg
.name
,
1094 sym2str(splat_taskq_test8_func
), i
);
1100 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
, "Taskq '%s' "
1101 "waiting for %d dispatches\n", tq_arg
.name
,
1104 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
, "Taskq '%s' "
1105 "%d/%d dispatches finished\n", tq_arg
.name
,
1106 atomic_read(tq_arg
.count
), TEST8_NUM_TASKS
);
1108 if (atomic_read(tq_arg
.count
) != TEST8_NUM_TASKS
)
1112 splat_vprint(file
, SPLAT_TASKQ_TEST8_NAME
, "Taskq '%s' destroying\n",
1116 for (j
= 0; j
< TEST8_NUM_TASKS
&& tqes
[j
] != NULL
; j
++)
1124 splat_taskq_test8(struct file
*file
, void *arg
)
1128 rc
= splat_taskq_test8_common(file
, arg
, 1, 100);
1134 * Create a taskq and dispatch a number of delayed tasks to the queue.
1135 * For each task verify that it was run no early than requested.
1138 splat_taskq_test9_func(void *arg
)
1140 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
1143 if (ddi_get_lbolt() >= tq_arg
->expire
)
1144 atomic_inc(tq_arg
->count
);
1146 kmem_free(tq_arg
, sizeof(splat_taskq_arg_t
));
1150 splat_taskq_test9(struct file
*file
, void *arg
)
1159 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1160 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1161 SPLAT_TASKQ_TEST9_NAME
, "delay", minalloc
, maxalloc
, nr_tasks
);
1162 if ((tq
= taskq_create(SPLAT_TASKQ_TEST9_NAME
, 3, maxclsyspri
,
1163 minalloc
, maxalloc
, TASKQ_PREPOPULATE
)) == NULL
) {
1164 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1165 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST9_NAME
);
1169 atomic_set(&count
, 0);
1171 for (i
= 1; i
<= nr_tasks
; i
++) {
1172 splat_taskq_arg_t
*tq_arg
;
1176 /* A random timeout in jiffies of at most 5 seconds */
1177 get_random_bytes((void *)&rnd
, 4);
1178 rnd
= rnd
% (5 * HZ
);
1180 tq_arg
= kmem_alloc(sizeof(splat_taskq_arg_t
), KM_SLEEP
);
1181 tq_arg
->file
= file
;
1182 tq_arg
->name
= SPLAT_TASKQ_TEST9_NAME
;
1183 tq_arg
->expire
= ddi_get_lbolt() + rnd
;
1184 tq_arg
->count
= &count
;
1186 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1187 "Taskq '%s' delay dispatch %u jiffies\n",
1188 SPLAT_TASKQ_TEST9_NAME
, rnd
);
1190 id
= taskq_dispatch_delay(tq
, splat_taskq_test9_func
,
1191 tq_arg
, TQ_SLEEP
, ddi_get_lbolt() + rnd
);
1194 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
,
1195 "Taskq '%s' delay dispatch failed\n",
1196 SPLAT_TASKQ_TEST9_NAME
);
1197 kmem_free(tq_arg
, sizeof(splat_taskq_arg_t
));
1204 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
, "Taskq '%s' waiting for "
1205 "%d delay dispatches\n", SPLAT_TASKQ_TEST9_NAME
, nr_tasks
);
1208 if (atomic_read(&count
) != nr_tasks
)
1211 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
, "Taskq '%s' %d/%d delay "
1212 "dispatches finished on time\n", SPLAT_TASKQ_TEST9_NAME
,
1213 atomic_read(&count
), nr_tasks
);
1214 splat_vprint(file
, SPLAT_TASKQ_TEST9_NAME
, "Taskq '%s' destroying\n",
1215 SPLAT_TASKQ_TEST9_NAME
);
1223 * Create a taskq and dispatch then cancel tasks in the queue.
1226 splat_taskq_test10_func(void *arg
)
1228 splat_taskq_arg_t
*tq_arg
= (splat_taskq_arg_t
*)arg
;
1231 if (ddi_get_lbolt() >= tq_arg
->expire
)
1232 atomic_inc(tq_arg
->count
);
1234 /* Randomly sleep to further perturb the system */
1235 get_random_bytes((void *)&rnd
, 1);
1236 msleep(1 + (rnd
% 9));
1240 splat_taskq_test10(struct file
*file
, void *arg
)
1243 splat_taskq_arg_t
**tqas
;
1252 unsigned long start
, cancel
;
1254 tqas
= vmalloc(sizeof(*tqas
) * nr_tasks
);
1257 memset(tqas
, 0, sizeof(*tqas
) * nr_tasks
);
1259 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1260 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1261 SPLAT_TASKQ_TEST10_NAME
, "delay", minalloc
, maxalloc
, nr_tasks
);
1262 if ((tq
= taskq_create(SPLAT_TASKQ_TEST10_NAME
, 3, maxclsyspri
,
1263 minalloc
, maxalloc
, TASKQ_PREPOPULATE
)) == NULL
) {
1264 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1265 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST10_NAME
);
1270 atomic_set(&count
, 0);
1272 for (i
= 0; i
< nr_tasks
; i
++) {
1273 splat_taskq_arg_t
*tq_arg
;
1276 /* A random timeout in jiffies of at most 5 seconds */
1277 get_random_bytes((void *)&rnd
, 4);
1278 rnd
= rnd
% (5 * HZ
);
1280 tq_arg
= kmem_alloc(sizeof(splat_taskq_arg_t
), KM_SLEEP
);
1281 tq_arg
->file
= file
;
1282 tq_arg
->name
= SPLAT_TASKQ_TEST10_NAME
;
1283 tq_arg
->count
= &count
;
1287 * Dispatch every 1/3 one immediately to mix it up, the cancel
1288 * code is inherently racy and we want to try and provoke any
1289 * subtle concurrently issues.
1292 tq_arg
->expire
= ddi_get_lbolt();
1293 tq_arg
->id
= taskq_dispatch(tq
, splat_taskq_test10_func
,
1296 tq_arg
->expire
= ddi_get_lbolt() + rnd
;
1297 tq_arg
->id
= taskq_dispatch_delay(tq
,
1298 splat_taskq_test10_func
,
1299 tq_arg
, TQ_SLEEP
, ddi_get_lbolt() + rnd
);
1302 if (tq_arg
->id
== 0) {
1303 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1304 "Taskq '%s' dispatch failed\n",
1305 SPLAT_TASKQ_TEST10_NAME
);
1306 kmem_free(tq_arg
, sizeof(splat_taskq_arg_t
));
1311 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1312 "Taskq '%s' dispatch %lu in %lu jiffies\n",
1313 SPLAT_TASKQ_TEST10_NAME
, (unsigned long)tq_arg
->id
,
1314 !(i
% 3) ? 0 : tq_arg
->expire
- ddi_get_lbolt());
1319 * Start randomly canceling tasks for the duration of the test. We
1320 * happen to know the valid task id's will be in the range 1..nr_tasks
1321 * because the taskq is private and was just created. However, we
1322 * have no idea of a particular task has already executed or not.
1324 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
, "Taskq '%s' randomly "
1325 "canceling task ids\n", SPLAT_TASKQ_TEST10_NAME
);
1327 start
= ddi_get_lbolt();
1330 while (ddi_get_lbolt() < start
+ 5 * HZ
) {
1335 cancel
= ddi_get_lbolt();
1336 get_random_bytes((void *)&rnd
, 4);
1337 id
= 1 + (rnd
% nr_tasks
);
1338 rc
= taskq_cancel_id(tq
, id
);
1341 * Keep track of the results of the random cancels.
1345 } else if (rc
== ENOENT
) {
1347 } else if (rc
== EBUSY
) {
1355 * Verify we never get blocked to long in taskq_cancel_id().
1356 * The worst case is 10ms if we happen to cancel the task
1357 * which is currently executing. We allow a factor of 2x.
1359 if (ddi_get_lbolt() - cancel
> HZ
/ 50) {
1360 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
,
1361 "Taskq '%s' cancel for %lu took %lu\n",
1362 SPLAT_TASKQ_TEST10_NAME
, (unsigned long)id
,
1363 ddi_get_lbolt() - cancel
);
1368 get_random_bytes((void *)&rnd
, 4);
1369 msleep(1 + (rnd
% 100));
1376 * Cross check the results of taskq_cancel_id() with the number of
1377 * times the dispatched function actually ran successfully.
1379 if ((rc
== 0) && (nr_tasks
- canceled
!= atomic_read(&count
)))
1382 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
, "Taskq '%s' %d attempts, "
1383 "%d canceled, %d completed, %d blocked, %d/%d tasks run\n",
1384 SPLAT_TASKQ_TEST10_NAME
, i
, canceled
, completed
, blocked
,
1385 atomic_read(&count
), nr_tasks
);
1386 splat_vprint(file
, SPLAT_TASKQ_TEST10_NAME
, "Taskq '%s' destroying %d\n",
1387 SPLAT_TASKQ_TEST10_NAME
, rc
);
1391 for (j
= 0; j
< nr_tasks
&& tqas
[j
] != NULL
; j
++)
1392 kmem_free(tqas
[j
], sizeof(splat_taskq_arg_t
));
1399 splat_taskq_init(void)
1401 splat_subsystem_t
*sub
;
1403 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
1407 memset(sub
, 0, sizeof(*sub
));
1408 strncpy(sub
->desc
.name
, SPLAT_TASKQ_NAME
, SPLAT_NAME_SIZE
);
1409 strncpy(sub
->desc
.desc
, SPLAT_TASKQ_DESC
, SPLAT_DESC_SIZE
);
1410 INIT_LIST_HEAD(&sub
->subsystem_list
);
1411 INIT_LIST_HEAD(&sub
->test_list
);
1412 spin_lock_init(&sub
->test_lock
);
1413 sub
->desc
.id
= SPLAT_SUBSYSTEM_TASKQ
;
1415 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST1_NAME
, SPLAT_TASKQ_TEST1_DESC
,
1416 SPLAT_TASKQ_TEST1_ID
, splat_taskq_test1
);
1417 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST2_NAME
, SPLAT_TASKQ_TEST2_DESC
,
1418 SPLAT_TASKQ_TEST2_ID
, splat_taskq_test2
);
1419 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST3_NAME
, SPLAT_TASKQ_TEST3_DESC
,
1420 SPLAT_TASKQ_TEST3_ID
, splat_taskq_test3
);
1421 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST4_NAME
, SPLAT_TASKQ_TEST4_DESC
,
1422 SPLAT_TASKQ_TEST4_ID
, splat_taskq_test4
);
1423 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST5_NAME
, SPLAT_TASKQ_TEST5_DESC
,
1424 SPLAT_TASKQ_TEST5_ID
, splat_taskq_test5
);
1425 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST6_NAME
, SPLAT_TASKQ_TEST6_DESC
,
1426 SPLAT_TASKQ_TEST6_ID
, splat_taskq_test6
);
1427 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST7_NAME
, SPLAT_TASKQ_TEST7_DESC
,
1428 SPLAT_TASKQ_TEST7_ID
, splat_taskq_test7
);
1429 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST8_NAME
, SPLAT_TASKQ_TEST8_DESC
,
1430 SPLAT_TASKQ_TEST8_ID
, splat_taskq_test8
);
1431 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST9_NAME
, SPLAT_TASKQ_TEST9_DESC
,
1432 SPLAT_TASKQ_TEST9_ID
, splat_taskq_test9
);
1433 SPLAT_TEST_INIT(sub
, SPLAT_TASKQ_TEST10_NAME
, SPLAT_TASKQ_TEST10_DESC
,
1434 SPLAT_TASKQ_TEST10_ID
, splat_taskq_test10
);
1440 splat_taskq_fini(splat_subsystem_t
*sub
)
1443 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST10_ID
);
1444 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST9_ID
);
1445 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST8_ID
);
1446 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST7_ID
);
1447 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST6_ID
);
1448 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST5_ID
);
1449 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST4_ID
);
1450 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST3_ID
);
1451 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST2_ID
);
1452 SPLAT_TEST_FINI(sub
, SPLAT_TASKQ_TEST1_ID
);
1458 splat_taskq_id(void) {
1459 return SPLAT_SUBSYSTEM_TASKQ
;