]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-taskq.c
splat taskq:order: Reduce stack frame
[mirror_spl.git] / module / splat / splat-taskq.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Task Queue Tests.
25 \*****************************************************************************/
26
27 #include <sys/taskq.h>
28 #include <sys/random.h>
29 #include <sys/kmem.h>
30 #include "splat-internal.h"
31
32 #define SPLAT_TASKQ_NAME "taskq"
33 #define SPLAT_TASKQ_DESC "Kernel Task Queue Tests"
34
35 #define SPLAT_TASKQ_TEST1_ID 0x0201
36 #define SPLAT_TASKQ_TEST1_NAME "single"
37 #define SPLAT_TASKQ_TEST1_DESC "Single task queue, single task"
38
39 #define SPLAT_TASKQ_TEST2_ID 0x0202
40 #define SPLAT_TASKQ_TEST2_NAME "multiple"
41 #define SPLAT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
42
43 #define SPLAT_TASKQ_TEST3_ID 0x0203
44 #define SPLAT_TASKQ_TEST3_NAME "system"
45 #define SPLAT_TASKQ_TEST3_DESC "System task queue, multiple tasks"
46
47 #define SPLAT_TASKQ_TEST4_ID 0x0204
48 #define SPLAT_TASKQ_TEST4_NAME "wait"
49 #define SPLAT_TASKQ_TEST4_DESC "Multiple task waiting"
50
51 #define SPLAT_TASKQ_TEST5_ID 0x0205
52 #define SPLAT_TASKQ_TEST5_NAME "order"
53 #define SPLAT_TASKQ_TEST5_DESC "Correct task ordering"
54
55 #define SPLAT_TASKQ_TEST6_ID 0x0206
56 #define SPLAT_TASKQ_TEST6_NAME "front"
57 #define SPLAT_TASKQ_TEST6_DESC "Correct ordering with TQ_FRONT flag"
58
59 #define SPLAT_TASKQ_TEST7_ID 0x0207
60 #define SPLAT_TASKQ_TEST7_NAME "recurse"
61 #define SPLAT_TASKQ_TEST7_DESC "Single task queue, recursive dispatch"
62
63 #define SPLAT_TASKQ_TEST8_ID 0x0208
64 #define SPLAT_TASKQ_TEST8_NAME "contention"
65 #define SPLAT_TASKQ_TEST8_DESC "1 queue, 100 threads, 131072 tasks"
66
67 #define SPLAT_TASKQ_TEST9_ID 0x0209
68 #define SPLAT_TASKQ_TEST9_NAME "delay"
69 #define SPLAT_TASKQ_TEST9_DESC "Delayed task execution"
70
71 #define SPLAT_TASKQ_TEST10_ID 0x020a
72 #define SPLAT_TASKQ_TEST10_NAME "cancel"
73 #define SPLAT_TASKQ_TEST10_DESC "Cancel task execution"
74
75 #define SPLAT_TASKQ_ORDER_MAX 8
76 #define SPLAT_TASKQ_DEPTH_MAX 16
77
78
79 typedef struct splat_taskq_arg {
80 int flag;
81 int id;
82 atomic_t *count;
83 int order[SPLAT_TASKQ_ORDER_MAX];
84 unsigned int depth;
85 unsigned long expire;
86 taskq_t *tq;
87 taskq_ent_t *tqe;
88 spinlock_t lock;
89 struct file *file;
90 const char *name;
91 } splat_taskq_arg_t;
92
93 typedef struct splat_taskq_id {
94 int id;
95 splat_taskq_arg_t *arg;
96 } splat_taskq_id_t;
97
98 /*
99 * Create a taskq, queue a task, wait until task completes, ensure
100 * task ran properly, cleanup taskq.
101 */
102 static void
103 splat_taskq_test13_func(void *arg)
104 {
105 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
106
107 ASSERT(tq_arg);
108 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST1_NAME,
109 "Taskq '%s' function '%s' setting flag\n",
110 tq_arg->name, sym2str(splat_taskq_test13_func));
111 tq_arg->flag = 1;
112 }
113
114 static int
115 splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc)
116 {
117 taskq_t *tq;
118 taskqid_t id;
119 splat_taskq_arg_t tq_arg;
120 taskq_ent_t tqe;
121
122 taskq_init_ent(&tqe);
123
124 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
125 "Taskq '%s' creating (%s dispatch)\n",
126 SPLAT_TASKQ_TEST1_NAME,
127 prealloc ? "prealloc" : "dynamic");
128 if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
129 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
130 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
131 "Taskq '%s' create failed\n",
132 SPLAT_TASKQ_TEST1_NAME);
133 return -EINVAL;
134 }
135
136 tq_arg.flag = 0;
137 tq_arg.id = 0;
138 tq_arg.file = file;
139 tq_arg.name = SPLAT_TASKQ_TEST1_NAME;
140
141 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
142 "Taskq '%s' function '%s' dispatching\n",
143 tq_arg.name, sym2str(splat_taskq_test13_func));
144 if (prealloc) {
145 taskq_dispatch_ent(tq, splat_taskq_test13_func,
146 &tq_arg, TQ_SLEEP, &tqe);
147 id = tqe.tqent_id;
148 } else {
149 id = taskq_dispatch(tq, splat_taskq_test13_func,
150 &tq_arg, TQ_SLEEP);
151 }
152
153 if (id == 0) {
154 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
155 "Taskq '%s' function '%s' dispatch failed\n",
156 tq_arg.name, sym2str(splat_taskq_test13_func));
157 taskq_destroy(tq);
158 return -EINVAL;
159 }
160
161 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
162 tq_arg.name);
163 taskq_wait(tq);
164 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
165 tq_arg.name);
166
167 taskq_destroy(tq);
168
169 return (tq_arg.flag) ? 0 : -EINVAL;
170 }
171
172 static int
173 splat_taskq_test1(struct file *file, void *arg)
174 {
175 int rc;
176
177 rc = splat_taskq_test1_impl(file, arg, B_FALSE);
178 if (rc)
179 return rc;
180
181 rc = splat_taskq_test1_impl(file, arg, B_TRUE);
182
183 return rc;
184 }
185
186 /*
187 * Create multiple taskq's, each with multiple tasks, wait until
188 * all tasks complete, ensure all tasks ran properly and in the
189 * correct order. Run order must be the same as the order submitted
190 * because we only have 1 thread per taskq. Finally cleanup the taskq.
191 */
192 static void
193 splat_taskq_test2_func1(void *arg)
194 {
195 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
196
197 ASSERT(tq_arg);
198 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
199 "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
200 tq_arg->name, tq_arg->id,
201 sym2str(splat_taskq_test2_func1),
202 tq_arg->flag * 2, tq_arg->flag);
203 tq_arg->flag *= 2;
204 }
205
206 static void
207 splat_taskq_test2_func2(void *arg)
208 {
209 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
210
211 ASSERT(tq_arg);
212 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
213 "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
214 tq_arg->name, tq_arg->id,
215 sym2str(splat_taskq_test2_func2),
216 tq_arg->flag + 1, tq_arg->flag);
217 tq_arg->flag += 1;
218 }
219
220 #define TEST2_TASKQS 8
221 #define TEST2_THREADS_PER_TASKQ 1
222
223 static int
224 splat_taskq_test2_impl(struct file *file, void *arg, boolean_t prealloc) {
225 taskq_t *tq[TEST2_TASKQS] = { NULL };
226 taskqid_t id;
227 splat_taskq_arg_t tq_args[TEST2_TASKQS];
228 taskq_ent_t *func1_tqes = NULL;
229 taskq_ent_t *func2_tqes = NULL;
230 int i, rc = 0;
231
232 func1_tqes = kmalloc(sizeof(*func1_tqes) * TEST2_TASKQS, GFP_KERNEL);
233 if (func1_tqes == NULL) {
234 rc = -ENOMEM;
235 goto out;
236 }
237
238 func2_tqes = kmalloc(sizeof(*func2_tqes) * TEST2_TASKQS, GFP_KERNEL);
239 if (func2_tqes == NULL) {
240 rc = -ENOMEM;
241 goto out;
242 }
243
244 for (i = 0; i < TEST2_TASKQS; i++) {
245 taskq_init_ent(&func1_tqes[i]);
246 taskq_init_ent(&func2_tqes[i]);
247
248 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
249 "Taskq '%s/%d' creating (%s dispatch)\n",
250 SPLAT_TASKQ_TEST2_NAME, i,
251 prealloc ? "prealloc" : "dynamic");
252 if ((tq[i] = taskq_create(SPLAT_TASKQ_TEST2_NAME,
253 TEST2_THREADS_PER_TASKQ,
254 maxclsyspri, 50, INT_MAX,
255 TASKQ_PREPOPULATE)) == NULL) {
256 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
257 "Taskq '%s/%d' create failed\n",
258 SPLAT_TASKQ_TEST2_NAME, i);
259 rc = -EINVAL;
260 break;
261 }
262
263 tq_args[i].flag = i;
264 tq_args[i].id = i;
265 tq_args[i].file = file;
266 tq_args[i].name = SPLAT_TASKQ_TEST2_NAME;
267
268 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
269 "Taskq '%s/%d' function '%s' dispatching\n",
270 tq_args[i].name, tq_args[i].id,
271 sym2str(splat_taskq_test2_func1));
272 if (prealloc) {
273 taskq_dispatch_ent(tq[i], splat_taskq_test2_func1,
274 &tq_args[i], TQ_SLEEP, &func1_tqes[i]);
275 id = func1_tqes[i].tqent_id;
276 } else {
277 id = taskq_dispatch(tq[i], splat_taskq_test2_func1,
278 &tq_args[i], TQ_SLEEP);
279 }
280
281 if (id == 0) {
282 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
283 "Taskq '%s/%d' function '%s' dispatch "
284 "failed\n", tq_args[i].name, tq_args[i].id,
285 sym2str(splat_taskq_test2_func1));
286 rc = -EINVAL;
287 break;
288 }
289
290 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
291 "Taskq '%s/%d' function '%s' dispatching\n",
292 tq_args[i].name, tq_args[i].id,
293 sym2str(splat_taskq_test2_func2));
294 if (prealloc) {
295 taskq_dispatch_ent(tq[i], splat_taskq_test2_func2,
296 &tq_args[i], TQ_SLEEP, &func2_tqes[i]);
297 id = func2_tqes[i].tqent_id;
298 } else {
299 id = taskq_dispatch(tq[i], splat_taskq_test2_func2,
300 &tq_args[i], TQ_SLEEP);
301 }
302
303 if (id == 0) {
304 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq "
305 "'%s/%d' function '%s' dispatch failed\n",
306 tq_args[i].name, tq_args[i].id,
307 sym2str(splat_taskq_test2_func2));
308 rc = -EINVAL;
309 break;
310 }
311 }
312
313 /* When rc is set we're effectively just doing cleanup here, so
314 * ignore new errors in that case. They just cause noise. */
315 for (i = 0; i < TEST2_TASKQS; i++) {
316 if (tq[i] != NULL) {
317 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
318 "Taskq '%s/%d' waiting\n",
319 tq_args[i].name, tq_args[i].id);
320 taskq_wait(tq[i]);
321 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
322 "Taskq '%s/%d; destroying\n",
323 tq_args[i].name, tq_args[i].id);
324
325 taskq_destroy(tq[i]);
326
327 if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
328 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
329 "Taskq '%s/%d' processed tasks "
330 "out of order; %d != %d\n",
331 tq_args[i].name, tq_args[i].id,
332 tq_args[i].flag, i * 2 + 1);
333 rc = -EINVAL;
334 } else {
335 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
336 "Taskq '%s/%d' processed tasks "
337 "in the correct order; %d == %d\n",
338 tq_args[i].name, tq_args[i].id,
339 tq_args[i].flag, i * 2 + 1);
340 }
341 }
342 }
343 out:
344 if (func1_tqes)
345 kfree(func1_tqes);
346
347 if (func2_tqes)
348 kfree(func2_tqes);
349
350 return rc;
351 }
352
353 static int
354 splat_taskq_test2(struct file *file, void *arg) {
355 int rc;
356
357 rc = splat_taskq_test2_impl(file, arg, B_FALSE);
358 if (rc)
359 return rc;
360
361 rc = splat_taskq_test2_impl(file, arg, B_TRUE);
362
363 return rc;
364 }
365
366 /*
367 * Use the global system task queue with a single task, wait until task
368 * completes, ensure task ran properly.
369 */
370 static int
371 splat_taskq_test3_impl(struct file *file, void *arg, boolean_t prealloc)
372 {
373 taskqid_t id;
374 splat_taskq_arg_t tq_arg;
375 taskq_ent_t tqe;
376
377 taskq_init_ent(&tqe);
378
379 tq_arg.flag = 0;
380 tq_arg.id = 0;
381 tq_arg.file = file;
382 tq_arg.name = SPLAT_TASKQ_TEST3_NAME;
383
384 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
385 "Taskq '%s' function '%s' %s dispatch\n",
386 tq_arg.name, sym2str(splat_taskq_test13_func),
387 prealloc ? "prealloc" : "dynamic");
388 if (prealloc) {
389 taskq_dispatch_ent(system_taskq, splat_taskq_test13_func,
390 &tq_arg, TQ_SLEEP, &tqe);
391 id = tqe.tqent_id;
392 } else {
393 id = taskq_dispatch(system_taskq, splat_taskq_test13_func,
394 &tq_arg, TQ_SLEEP);
395 }
396
397 if (id == 0) {
398 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
399 "Taskq '%s' function '%s' dispatch failed\n",
400 tq_arg.name, sym2str(splat_taskq_test13_func));
401 return -EINVAL;
402 }
403
404 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME, "Taskq '%s' waiting\n",
405 tq_arg.name);
406 taskq_wait(system_taskq);
407
408 return (tq_arg.flag) ? 0 : -EINVAL;
409 }
410
411 static int
412 splat_taskq_test3(struct file *file, void *arg)
413 {
414 int rc;
415
416 rc = splat_taskq_test3_impl(file, arg, B_FALSE);
417 if (rc)
418 return rc;
419
420 rc = splat_taskq_test3_impl(file, arg, B_TRUE);
421
422 return rc;
423 }
424
425 /*
426 * Create a taskq and dispatch a large number of tasks to the queue.
427 * Then use taskq_wait() to block until all the tasks complete, then
428 * cross check that all the tasks ran by checking the shared atomic
429 * counter which is incremented in the task function.
430 *
431 * First we try with a large 'maxalloc' value, then we try with a small one.
432 * We should not drop tasks when TQ_SLEEP is used in taskq_dispatch(), even
433 * if the number of pending tasks is above maxalloc.
434 */
435 static void
436 splat_taskq_test4_func(void *arg)
437 {
438 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
439 ASSERT(tq_arg);
440
441 atomic_inc(tq_arg->count);
442 }
443
444 static int
445 splat_taskq_test4_common(struct file *file, void *arg, int minalloc,
446 int maxalloc, int nr_tasks, boolean_t prealloc)
447 {
448 taskq_t *tq;
449 taskqid_t id;
450 splat_taskq_arg_t tq_arg;
451 taskq_ent_t *tqes;
452 atomic_t count;
453 int i, j, rc = 0;
454
455 tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL);
456 if (tqes == NULL)
457 return -ENOMEM;
458
459 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
460 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
461 SPLAT_TASKQ_TEST4_NAME,
462 prealloc ? "prealloc" : "dynamic",
463 minalloc, maxalloc, nr_tasks);
464 if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
465 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
466 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
467 "Taskq '%s' create failed\n",
468 SPLAT_TASKQ_TEST4_NAME);
469 rc = -EINVAL;
470 goto out_free;
471 }
472
473 tq_arg.file = file;
474 tq_arg.name = SPLAT_TASKQ_TEST4_NAME;
475 tq_arg.count = &count;
476
477 for (i = 1; i <= nr_tasks; i *= 2) {
478 atomic_set(tq_arg.count, 0);
479 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
480 "Taskq '%s' function '%s' dispatched %d times\n",
481 tq_arg.name, sym2str(splat_taskq_test4_func), i);
482
483 for (j = 0; j < i; j++) {
484 taskq_init_ent(&tqes[j]);
485
486 if (prealloc) {
487 taskq_dispatch_ent(tq, splat_taskq_test4_func,
488 &tq_arg, TQ_SLEEP, &tqes[j]);
489 id = tqes[j].tqent_id;
490 } else {
491 id = taskq_dispatch(tq, splat_taskq_test4_func,
492 &tq_arg, TQ_SLEEP);
493 }
494
495 if (id == 0) {
496 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
497 "Taskq '%s' function '%s' dispatch "
498 "%d failed\n", tq_arg.name,
499 sym2str(splat_taskq_test4_func), j);
500 rc = -EINVAL;
501 goto out;
502 }
503 }
504
505 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
506 "waiting for %d dispatches\n", tq_arg.name, i);
507 taskq_wait(tq);
508 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
509 "%d/%d dispatches finished\n", tq_arg.name,
510 atomic_read(&count), i);
511 if (atomic_read(&count) != i) {
512 rc = -ERANGE;
513 goto out;
514
515 }
516 }
517 out:
518 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
519 tq_arg.name);
520 taskq_destroy(tq);
521
522 out_free:
523 kfree(tqes);
524
525 return rc;
526 }
527
528 static int
529 splat_taskq_test4_impl(struct file *file, void *arg, boolean_t prealloc)
530 {
531 int rc;
532
533 rc = splat_taskq_test4_common(file, arg, 50, INT_MAX, 1024, prealloc);
534 if (rc)
535 return rc;
536
537 rc = splat_taskq_test4_common(file, arg, 1, 1, 32, prealloc);
538
539 return rc;
540 }
541
542 static int
543 splat_taskq_test4(struct file *file, void *arg)
544 {
545 int rc;
546
547 rc = splat_taskq_test4_impl(file, arg, B_FALSE);
548 if (rc)
549 return rc;
550
551 rc = splat_taskq_test4_impl(file, arg, B_TRUE);
552
553 return rc;
554 }
555
556 /*
557 * Create a taskq and dispatch a specific sequence of tasks carefully
558 * crafted to validate the order in which tasks are processed. When
559 * there are multiple worker threads each thread will process the
560 * next pending task as soon as it completes its current task. This
561 * means that tasks do not strictly complete in order in which they
562 * were dispatched (increasing task id). This is fine but we need to
563 * verify that taskq_wait_all() blocks until the passed task id and all
564 * lower task ids complete. We do this by dispatching the following
565 * specific sequence of tasks each of which block for N time units.
566 * We then use taskq_wait_all() to unblock at specific task id and
567 * verify the only the expected task ids have completed and in the
568 * correct order. The two cases of interest are:
569 *
570 * 1) Task ids larger than the waited for task id can run and
571 * complete as long as there is an available worker thread.
572 * 2) All task ids lower than the waited one must complete before
573 * unblocking even if the waited task id itself has completed.
574 *
575 * The following table shows each task id and how they will be
576 * scheduled. Each rows represent one time unit and each column
577 * one of the three worker threads. The places taskq_wait_all()
578 * must unblock for a specific id are identified as well as the
579 * task ids which must have completed and their order.
580 *
581 * +-----+ <--- taskq_wait_all(tq, 8) unblocks
582 * | | Required Completion Order: 1,2,4,5,3,8,6,7
583 * +-----+ |
584 * | | |
585 * | | +-----+
586 * | | | 8 |
587 * | | +-----+ <--- taskq_wait_all(tq, 3) unblocks
588 * | | 7 | | Required Completion Order: 1,2,4,5,3
589 * | +-----+ |
590 * | 6 | | |
591 * +-----+ | |
592 * | | 5 | |
593 * | +-----+ |
594 * | 4 | | |
595 * +-----+ | |
596 * | 1 | 2 | 3 |
597 * +-----+-----+-----+
598 *
599 */
600 static void
601 splat_taskq_test5_func(void *arg)
602 {
603 splat_taskq_id_t *tq_id = (splat_taskq_id_t *)arg;
604 splat_taskq_arg_t *tq_arg = tq_id->arg;
605 int factor;
606
607 /* Delays determined by above table */
608 switch (tq_id->id) {
609 default: factor = 0; break;
610 case 1: case 8: factor = 1; break;
611 case 2: case 4: case 5: factor = 2; break;
612 case 6: case 7: factor = 4; break;
613 case 3: factor = 5; break;
614 }
615
616 msleep(factor * 100);
617 splat_vprint(tq_arg->file, tq_arg->name,
618 "Taskqid %d complete for taskq '%s'\n",
619 tq_id->id, tq_arg->name);
620
621 spin_lock(&tq_arg->lock);
622 tq_arg->order[tq_arg->flag] = tq_id->id;
623 tq_arg->flag++;
624 spin_unlock(&tq_arg->lock);
625 }
626
627 static int
628 splat_taskq_test_order(splat_taskq_arg_t *tq_arg, int *order)
629 {
630 int i, j;
631
632 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
633 if (tq_arg->order[i] != order[i]) {
634 splat_vprint(tq_arg->file, tq_arg->name,
635 "Taskq '%s' incorrect completion "
636 "order\n", tq_arg->name);
637 splat_vprint(tq_arg->file, tq_arg->name,
638 "%s", "Expected { ");
639
640 for (j = 0; j < SPLAT_TASKQ_ORDER_MAX; j++)
641 splat_print(tq_arg->file, "%d ", order[j]);
642
643 splat_print(tq_arg->file, "%s", "}\n");
644 splat_vprint(tq_arg->file, tq_arg->name,
645 "%s", "Got { ");
646
647 for (j = 0; j < SPLAT_TASKQ_ORDER_MAX; j++)
648 splat_print(tq_arg->file, "%d ",
649 tq_arg->order[j]);
650
651 splat_print(tq_arg->file, "%s", "}\n");
652 return -EILSEQ;
653 }
654 }
655
656 splat_vprint(tq_arg->file, tq_arg->name,
657 "Taskq '%s' validated correct completion order\n",
658 tq_arg->name);
659
660 return 0;
661 }
662
663 static int
664 splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc)
665 {
666 taskq_t *tq;
667 taskqid_t id;
668 splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
669 splat_taskq_arg_t tq_arg;
670 int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 };
671 int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 };
672 taskq_ent_t *tqes;
673 int i, rc = 0;
674
675 tqes = kmem_alloc(sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX, KM_SLEEP);
676 memset(tqes, 0, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
677
678 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
679 "Taskq '%s' creating (%s dispatch)\n",
680 SPLAT_TASKQ_TEST5_NAME,
681 prealloc ? "prealloc" : "dynamic");
682 if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri,
683 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
684 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
685 "Taskq '%s' create failed\n",
686 SPLAT_TASKQ_TEST5_NAME);
687 return -EINVAL;
688 }
689
690 tq_arg.flag = 0;
691 memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
692 spin_lock_init(&tq_arg.lock);
693 tq_arg.file = file;
694 tq_arg.name = SPLAT_TASKQ_TEST5_NAME;
695
696 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
697 taskq_init_ent(&tqes[i]);
698
699 tq_id[i].id = i + 1;
700 tq_id[i].arg = &tq_arg;
701
702 if (prealloc) {
703 taskq_dispatch_ent(tq, splat_taskq_test5_func,
704 &tq_id[i], TQ_SLEEP, &tqes[i]);
705 id = tqes[i].tqent_id;
706 } else {
707 id = taskq_dispatch(tq, splat_taskq_test5_func,
708 &tq_id[i], TQ_SLEEP);
709 }
710
711 if (id == 0) {
712 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
713 "Taskq '%s' function '%s' dispatch failed\n",
714 tq_arg.name, sym2str(splat_taskq_test5_func));
715 rc = -EINVAL;
716 goto out;
717 }
718
719 if (tq_id[i].id != id) {
720 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
721 "Taskq '%s' expected taskqid %d got %d\n",
722 tq_arg.name, (int)tq_id[i].id, (int)id);
723 rc = -EINVAL;
724 goto out;
725 }
726 }
727
728 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
729 "waiting for taskqid %d completion\n", tq_arg.name, 3);
730 taskq_wait_all(tq, 3);
731 if ((rc = splat_taskq_test_order(&tq_arg, order1)))
732 goto out;
733
734 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
735 "waiting for taskqid %d completion\n", tq_arg.name, 8);
736 taskq_wait_all(tq, 8);
737 rc = splat_taskq_test_order(&tq_arg, order2);
738
739 out:
740 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
741 "Taskq '%s' destroying\n", tq_arg.name);
742 taskq_destroy(tq);
743
744 kmem_free(tqes, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
745
746 return rc;
747 }
748
749 static int
750 splat_taskq_test5(struct file *file, void *arg)
751 {
752 int rc;
753
754 rc = splat_taskq_test5_impl(file, arg, B_FALSE);
755 if (rc)
756 return rc;
757
758 rc = splat_taskq_test5_impl(file, arg, B_TRUE);
759
760 return rc;
761 }
762
763 /*
764 * Create a single task queue with three threads. Dispatch 8 tasks,
765 * setting TQ_FRONT on only the last three. Sleep after
766 * dispatching tasks 1-3 to ensure they will run and hold the threads
767 * busy while we dispatch the remaining tasks. Verify that tasks 6-8
768 * run before task 4-5.
769 *
770 * The following table shows each task id and how they will be
771 * scheduled. Each rows represent one time unit and each column
772 * one of the three worker threads.
773 *
774 * NB: The Horizontal Line is the LAST Time unit consumed by the Task,
775 * and must be included in the factor calculation.
776 * T
777 * 17-> +-----+
778 * 16 | T6 |
779 * 15-> +-----+ |
780 * 14 | T6 | |
781 * 13-> | | 5 +-----+
782 * 12 | | | T6 |
783 * 11-> | +-----| |
784 * 10 | 4 | T6 | |
785 * 9-> +-----+ | 8 |
786 * 8 | T5 | | |
787 * 7-> | | 7 +-----+
788 * 6 | | | T7 |
789 * 5-> | +-----+ |
790 * 4 | 6 | T5 | |
791 * 3-> +-----+ | |
792 * 2 | T3 | | |
793 * 1 | 1 | 2 | 3 |
794 * 0 +-----+-----+-----+
795 *
796 */
797 static void
798 splat_taskq_test6_func(void *arg)
799 {
800 /* Delays determined by above table */
801 static const int factor[SPLAT_TASKQ_ORDER_MAX+1] = {0,3,5,7,6,6,5,6,6};
802
803 splat_taskq_id_t *tq_id = (splat_taskq_id_t *)arg;
804 splat_taskq_arg_t *tq_arg = tq_id->arg;
805
806 splat_vprint(tq_arg->file, tq_arg->name,
807 "Taskqid %d starting for taskq '%s'\n",
808 tq_id->id, tq_arg->name);
809
810 if (tq_id->id < SPLAT_TASKQ_ORDER_MAX+1) {
811 msleep(factor[tq_id->id] * 50);
812 }
813
814 spin_lock(&tq_arg->lock);
815 tq_arg->order[tq_arg->flag] = tq_id->id;
816 tq_arg->flag++;
817 splat_vprint(tq_arg->file, tq_arg->name,
818 "Taskqid %d complete for taskq '%s'\n",
819 tq_id->id, tq_arg->name);
820 spin_unlock(&tq_arg->lock);
821 }
822
823 static int
824 splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
825 {
826 taskq_t *tq;
827 taskqid_t id;
828 splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
829 splat_taskq_arg_t tq_arg;
830 int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
831 taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
832 int i, rc = 0;
833 uint_t tflags;
834
835 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
836 "Taskq '%s' creating (%s dispatch)\n",
837 SPLAT_TASKQ_TEST6_NAME,
838 prealloc ? "prealloc" : "dynamic");
839 if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
840 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
841 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
842 "Taskq '%s' create failed\n",
843 SPLAT_TASKQ_TEST6_NAME);
844 return -EINVAL;
845 }
846
847 tq_arg.flag = 0;
848 memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
849 spin_lock_init(&tq_arg.lock);
850 tq_arg.file = file;
851 tq_arg.name = SPLAT_TASKQ_TEST6_NAME;
852
853 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
854 taskq_init_ent(&tqes[i]);
855
856 tq_id[i].id = i + 1;
857 tq_id[i].arg = &tq_arg;
858 tflags = TQ_SLEEP;
859 if (i > 4)
860 tflags |= TQ_FRONT;
861
862 if (prealloc) {
863 taskq_dispatch_ent(tq, splat_taskq_test6_func,
864 &tq_id[i], tflags, &tqes[i]);
865 id = tqes[i].tqent_id;
866 } else {
867 id = taskq_dispatch(tq, splat_taskq_test6_func,
868 &tq_id[i], tflags);
869 }
870
871 if (id == 0) {
872 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
873 "Taskq '%s' function '%s' dispatch failed\n",
874 tq_arg.name, sym2str(splat_taskq_test6_func));
875 rc = -EINVAL;
876 goto out;
877 }
878
879 if (tq_id[i].id != id) {
880 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
881 "Taskq '%s' expected taskqid %d got %d\n",
882 tq_arg.name, (int)tq_id[i].id, (int)id);
883 rc = -EINVAL;
884 goto out;
885 }
886 /* Sleep to let tasks 1-3 start executing. */
887 if ( i == 2 )
888 msleep(100);
889 }
890
891 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
892 "waiting for taskqid %d completion\n", tq_arg.name,
893 SPLAT_TASKQ_ORDER_MAX);
894 taskq_wait_all(tq, SPLAT_TASKQ_ORDER_MAX);
895 rc = splat_taskq_test_order(&tq_arg, order);
896
897 out:
898 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
899 "Taskq '%s' destroying\n", tq_arg.name);
900 taskq_destroy(tq);
901
902 return rc;
903 }
904
905 static int
906 splat_taskq_test6(struct file *file, void *arg)
907 {
908 int rc;
909
910 rc = splat_taskq_test6_impl(file, arg, B_FALSE);
911 if (rc)
912 return rc;
913
914 rc = splat_taskq_test6_impl(file, arg, B_TRUE);
915
916 return rc;
917 }
918
919 static void
920 splat_taskq_test7_func(void *arg)
921 {
922 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
923 taskqid_t id;
924
925 ASSERT(tq_arg);
926
927 if (tq_arg->depth >= SPLAT_TASKQ_DEPTH_MAX)
928 return;
929
930 tq_arg->depth++;
931
932 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
933 "Taskq '%s' function '%s' dispatching (depth = %u)\n",
934 tq_arg->name, sym2str(splat_taskq_test7_func),
935 tq_arg->depth);
936
937 if (tq_arg->tqe) {
938 VERIFY(taskq_empty_ent(tq_arg->tqe));
939 taskq_dispatch_ent(tq_arg->tq, splat_taskq_test7_func,
940 tq_arg, TQ_SLEEP, tq_arg->tqe);
941 id = tq_arg->tqe->tqent_id;
942 } else {
943 id = taskq_dispatch(tq_arg->tq, splat_taskq_test7_func,
944 tq_arg, TQ_SLEEP);
945 }
946
947 if (id == 0) {
948 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
949 "Taskq '%s' function '%s' dispatch failed "
950 "(depth = %u)\n", tq_arg->name,
951 sym2str(splat_taskq_test7_func), tq_arg->depth);
952 tq_arg->flag = -EINVAL;
953 return;
954 }
955 }
956
957 static int
958 splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
959 {
960 taskq_t *tq;
961 taskq_ent_t tqe;
962 splat_taskq_arg_t tq_arg;
963
964 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
965 "Taskq '%s' creating (%s dispatch)\n",
966 SPLAT_TASKQ_TEST7_NAME,
967 prealloc ? "prealloc" : "dynamic");
968 if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
969 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
970 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
971 "Taskq '%s' create failed\n",
972 SPLAT_TASKQ_TEST7_NAME);
973 return -EINVAL;
974 }
975
976 tq_arg.depth = 0;
977 tq_arg.flag = 0;
978 tq_arg.id = 0;
979 tq_arg.file = file;
980 tq_arg.name = SPLAT_TASKQ_TEST7_NAME;
981 tq_arg.tq = tq;
982
983 if (prealloc) {
984 taskq_init_ent(&tqe);
985 tq_arg.tqe = &tqe;
986 } else {
987 tq_arg.tqe = NULL;
988 }
989
990 splat_taskq_test7_func(&tq_arg);
991
992 if (tq_arg.flag == 0) {
993 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
994 "Taskq '%s' waiting\n", tq_arg.name);
995 taskq_wait_all(tq, SPLAT_TASKQ_DEPTH_MAX);
996 }
997
998 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
999 "Taskq '%s' destroying\n", tq_arg.name);
1000 taskq_destroy(tq);
1001
1002 return tq_arg.depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL;
1003 }
1004
1005 static int
1006 splat_taskq_test7(struct file *file, void *arg)
1007 {
1008 int rc;
1009
1010 rc = splat_taskq_test7_impl(file, arg, B_FALSE);
1011 if (rc)
1012 return rc;
1013
1014 rc = splat_taskq_test7_impl(file, arg, B_TRUE);
1015
1016 return rc;
1017 }
1018
1019 /*
1020 * Create a taskq with 100 threads and dispatch a huge number of trivial
1021 * tasks to generate contention on tq->tq_lock. This test should always
1022 * pass. The purpose is to provide a benchmark for measuring the
1023 * effectiveness of taskq optimizations.
1024 */
1025 static void
1026 splat_taskq_test8_func(void *arg)
1027 {
1028 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1029 ASSERT(tq_arg);
1030
1031 atomic_inc(tq_arg->count);
1032 }
1033
1034 #define TEST8_NUM_TASKS 0x20000
1035 #define TEST8_THREADS_PER_TASKQ 100
1036
1037 static int
1038 splat_taskq_test8_common(struct file *file, void *arg, int minalloc,
1039 int maxalloc)
1040 {
1041 taskq_t *tq;
1042 taskqid_t id;
1043 splat_taskq_arg_t tq_arg;
1044 taskq_ent_t **tqes;
1045 atomic_t count;
1046 int i, j, rc = 0;
1047
1048 tqes = vmalloc(sizeof(*tqes) * TEST8_NUM_TASKS);
1049 if (tqes == NULL)
1050 return -ENOMEM;
1051 memset(tqes, 0, sizeof(*tqes) * TEST8_NUM_TASKS);
1052
1053 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1054 "Taskq '%s' creating (%d/%d/%d)\n",
1055 SPLAT_TASKQ_TEST8_NAME,
1056 minalloc, maxalloc, TEST8_NUM_TASKS);
1057 if ((tq = taskq_create(SPLAT_TASKQ_TEST8_NAME, TEST8_THREADS_PER_TASKQ,
1058 maxclsyspri, minalloc, maxalloc,
1059 TASKQ_PREPOPULATE)) == NULL) {
1060 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1061 "Taskq '%s' create failed\n",
1062 SPLAT_TASKQ_TEST8_NAME);
1063 rc = -EINVAL;
1064 goto out_free;
1065 }
1066
1067 tq_arg.file = file;
1068 tq_arg.name = SPLAT_TASKQ_TEST8_NAME;
1069 tq_arg.count = &count;
1070 atomic_set(tq_arg.count, 0);
1071
1072 for (i = 0; i < TEST8_NUM_TASKS; i++) {
1073 tqes[i] = kmalloc(sizeof(taskq_ent_t), GFP_KERNEL);
1074 if (tqes[i] == NULL) {
1075 rc = -ENOMEM;
1076 goto out;
1077 }
1078 taskq_init_ent(tqes[i]);
1079
1080 taskq_dispatch_ent(tq, splat_taskq_test8_func,
1081 &tq_arg, TQ_SLEEP, tqes[i]);
1082
1083 id = tqes[i]->tqent_id;
1084
1085 if (id == 0) {
1086 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1087 "Taskq '%s' function '%s' dispatch "
1088 "%d failed\n", tq_arg.name,
1089 sym2str(splat_taskq_test8_func), i);
1090 rc = -EINVAL;
1091 goto out;
1092 }
1093 }
1094
1095 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' "
1096 "waiting for %d dispatches\n", tq_arg.name,
1097 TEST8_NUM_TASKS);
1098 taskq_wait(tq);
1099 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' "
1100 "%d/%d dispatches finished\n", tq_arg.name,
1101 atomic_read(tq_arg.count), TEST8_NUM_TASKS);
1102
1103 if (atomic_read(tq_arg.count) != TEST8_NUM_TASKS)
1104 rc = -ERANGE;
1105
1106 out:
1107 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' destroying\n",
1108 tq_arg.name);
1109 taskq_destroy(tq);
1110 out_free:
1111 for (j = 0; j < TEST8_NUM_TASKS && tqes[j] != NULL; j++)
1112 kfree(tqes[j]);
1113 vfree(tqes);
1114
1115 return rc;
1116 }
1117
1118 static int
1119 splat_taskq_test8(struct file *file, void *arg)
1120 {
1121 int rc;
1122
1123 rc = splat_taskq_test8_common(file, arg, 1, 100);
1124
1125 return rc;
1126 }
1127
1128 /*
1129 * Create a taskq and dispatch a number of delayed tasks to the queue.
1130 * For each task verify that it was run no early than requested.
1131 */
1132 static void
1133 splat_taskq_test9_func(void *arg)
1134 {
1135 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1136 ASSERT(tq_arg);
1137
1138 if (ddi_get_lbolt() >= tq_arg->expire)
1139 atomic_inc(tq_arg->count);
1140
1141 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1142 }
1143
1144 static int
1145 splat_taskq_test9(struct file *file, void *arg)
1146 {
1147 taskq_t *tq;
1148 atomic_t count;
1149 int i, rc = 0;
1150 int minalloc = 1;
1151 int maxalloc = 10;
1152 int nr_tasks = 100;
1153
1154 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1155 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1156 SPLAT_TASKQ_TEST9_NAME, "delay", minalloc, maxalloc, nr_tasks);
1157 if ((tq = taskq_create(SPLAT_TASKQ_TEST9_NAME, 3, maxclsyspri,
1158 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
1159 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1160 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST9_NAME);
1161 return -EINVAL;
1162 }
1163
1164 atomic_set(&count, 0);
1165
1166 for (i = 1; i <= nr_tasks; i++) {
1167 splat_taskq_arg_t *tq_arg;
1168 taskqid_t id;
1169 uint32_t rnd;
1170
1171 /* A random timeout in jiffies of at most 5 seconds */
1172 get_random_bytes((void *)&rnd, 4);
1173 rnd = rnd % (5 * HZ);
1174
1175 tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP);
1176 tq_arg->file = file;
1177 tq_arg->name = SPLAT_TASKQ_TEST9_NAME;
1178 tq_arg->expire = ddi_get_lbolt() + rnd;
1179 tq_arg->count = &count;
1180
1181 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1182 "Taskq '%s' delay dispatch %u jiffies\n",
1183 SPLAT_TASKQ_TEST9_NAME, rnd);
1184
1185 id = taskq_dispatch_delay(tq, splat_taskq_test9_func,
1186 tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd);
1187
1188 if (id == 0) {
1189 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1190 "Taskq '%s' delay dispatch failed\n",
1191 SPLAT_TASKQ_TEST9_NAME);
1192 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1193 taskq_wait(tq);
1194 rc = -EINVAL;
1195 goto out;
1196 }
1197 }
1198
1199 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' waiting for "
1200 "%d delay dispatches\n", SPLAT_TASKQ_TEST9_NAME, nr_tasks);
1201
1202 taskq_wait(tq);
1203 if (atomic_read(&count) != nr_tasks)
1204 rc = -ERANGE;
1205
1206 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' %d/%d delay "
1207 "dispatches finished on time\n", SPLAT_TASKQ_TEST9_NAME,
1208 atomic_read(&count), nr_tasks);
1209 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' destroying\n",
1210 SPLAT_TASKQ_TEST9_NAME);
1211 out:
1212 taskq_destroy(tq);
1213
1214 return rc;
1215 }
1216
1217 /*
1218 * Create a taskq and dispatch then cancel tasks in the queue.
1219 */
1220 static void
1221 splat_taskq_test10_func(void *arg)
1222 {
1223 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1224 uint8_t rnd;
1225
1226 if (ddi_get_lbolt() >= tq_arg->expire)
1227 atomic_inc(tq_arg->count);
1228
1229 /* Randomly sleep to further perturb the system */
1230 get_random_bytes((void *)&rnd, 1);
1231 msleep(1 + (rnd % 9));
1232 }
1233
1234 static int
1235 splat_taskq_test10(struct file *file, void *arg)
1236 {
1237 taskq_t *tq;
1238 splat_taskq_arg_t **tqas;
1239 atomic_t count;
1240 int i, j, rc = 0;
1241 int minalloc = 1;
1242 int maxalloc = 10;
1243 int nr_tasks = 100;
1244 int canceled = 0;
1245 int completed = 0;
1246 int blocked = 0;
1247 unsigned long start, cancel;
1248
1249 tqas = vmalloc(sizeof(*tqas) * nr_tasks);
1250 if (tqas == NULL)
1251 return -ENOMEM;
1252 memset(tqas, 0, sizeof(*tqas) * nr_tasks);
1253
1254 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1255 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1256 SPLAT_TASKQ_TEST10_NAME, "delay", minalloc, maxalloc, nr_tasks);
1257 if ((tq = taskq_create(SPLAT_TASKQ_TEST10_NAME, 3, maxclsyspri,
1258 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
1259 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1260 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST10_NAME);
1261 rc = -EINVAL;
1262 goto out_free;
1263 }
1264
1265 atomic_set(&count, 0);
1266
1267 for (i = 0; i < nr_tasks; i++) {
1268 splat_taskq_arg_t *tq_arg;
1269 uint32_t rnd;
1270
1271 /* A random timeout in jiffies of at most 5 seconds */
1272 get_random_bytes((void *)&rnd, 4);
1273 rnd = rnd % (5 * HZ);
1274
1275 tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP);
1276 tq_arg->file = file;
1277 tq_arg->name = SPLAT_TASKQ_TEST10_NAME;
1278 tq_arg->count = &count;
1279 tqas[i] = tq_arg;
1280
1281 /*
1282 * Dispatch every 1/3 one immediately to mix it up, the cancel
1283 * code is inherently racy and we want to try and provoke any
1284 * subtle concurrently issues.
1285 */
1286 if ((i % 3) == 0) {
1287 tq_arg->expire = ddi_get_lbolt();
1288 tq_arg->id = taskq_dispatch(tq, splat_taskq_test10_func,
1289 tq_arg, TQ_SLEEP);
1290 } else {
1291 tq_arg->expire = ddi_get_lbolt() + rnd;
1292 tq_arg->id = taskq_dispatch_delay(tq,
1293 splat_taskq_test10_func,
1294 tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd);
1295 }
1296
1297 if (tq_arg->id == 0) {
1298 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1299 "Taskq '%s' dispatch failed\n",
1300 SPLAT_TASKQ_TEST10_NAME);
1301 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1302 taskq_wait(tq);
1303 rc = -EINVAL;
1304 goto out;
1305 } else {
1306 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1307 "Taskq '%s' dispatch %lu in %lu jiffies\n",
1308 SPLAT_TASKQ_TEST10_NAME, (unsigned long)tq_arg->id,
1309 !(i % 3) ? 0 : tq_arg->expire - ddi_get_lbolt());
1310 }
1311 }
1312
1313 /*
1314 * Start randomly canceling tasks for the duration of the test. We
1315 * happen to know the valid task id's will be in the range 1..nr_tasks
1316 * because the taskq is private and was just created. However, we
1317 * have no idea of a particular task has already executed or not.
1318 */
1319 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' randomly "
1320 "canceling task ids\n", SPLAT_TASKQ_TEST10_NAME);
1321
1322 start = ddi_get_lbolt();
1323 i = 0;
1324
1325 while (ddi_get_lbolt() < start + 5 * HZ) {
1326 taskqid_t id;
1327 uint32_t rnd;
1328
1329 i++;
1330 cancel = ddi_get_lbolt();
1331 get_random_bytes((void *)&rnd, 4);
1332 id = 1 + (rnd % nr_tasks);
1333 rc = taskq_cancel_id(tq, id);
1334
1335 /*
1336 * Keep track of the results of the random cancels.
1337 */
1338 if (rc == 0) {
1339 canceled++;
1340 } else if (rc == ENOENT) {
1341 completed++;
1342 } else if (rc == EBUSY) {
1343 blocked++;
1344 } else {
1345 rc = -EINVAL;
1346 break;
1347 }
1348
1349 /*
1350 * Verify we never get blocked to long in taskq_cancel_id().
1351 * The worst case is 10ms if we happen to cancel the task
1352 * which is currently executing. We allow a factor of 2x.
1353 */
1354 if (ddi_get_lbolt() - cancel > HZ / 50) {
1355 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1356 "Taskq '%s' cancel for %lu took %lu\n",
1357 SPLAT_TASKQ_TEST10_NAME, (unsigned long)id,
1358 ddi_get_lbolt() - cancel);
1359 rc = -ETIMEDOUT;
1360 break;
1361 }
1362
1363 get_random_bytes((void *)&rnd, 4);
1364 msleep(1 + (rnd % 100));
1365 rc = 0;
1366 }
1367
1368 taskq_wait(tq);
1369
1370 /*
1371 * Cross check the results of taskq_cancel_id() with the number of
1372 * times the dispatched function actually ran successfully.
1373 */
1374 if ((rc == 0) && (nr_tasks - canceled != atomic_read(&count)))
1375 rc = -EDOM;
1376
1377 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' %d attempts, "
1378 "%d canceled, %d completed, %d blocked, %d/%d tasks run\n",
1379 SPLAT_TASKQ_TEST10_NAME, i, canceled, completed, blocked,
1380 atomic_read(&count), nr_tasks);
1381 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' destroying %d\n",
1382 SPLAT_TASKQ_TEST10_NAME, rc);
1383 out:
1384 taskq_destroy(tq);
1385 out_free:
1386 for (j = 0; j < nr_tasks && tqas[j] != NULL; j++)
1387 kmem_free(tqas[j], sizeof(splat_taskq_arg_t));
1388 vfree(tqas);
1389
1390 return rc;
1391 }
1392
1393 splat_subsystem_t *
1394 splat_taskq_init(void)
1395 {
1396 splat_subsystem_t *sub;
1397
1398 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1399 if (sub == NULL)
1400 return NULL;
1401
1402 memset(sub, 0, sizeof(*sub));
1403 strncpy(sub->desc.name, SPLAT_TASKQ_NAME, SPLAT_NAME_SIZE);
1404 strncpy(sub->desc.desc, SPLAT_TASKQ_DESC, SPLAT_DESC_SIZE);
1405 INIT_LIST_HEAD(&sub->subsystem_list);
1406 INIT_LIST_HEAD(&sub->test_list);
1407 spin_lock_init(&sub->test_lock);
1408 sub->desc.id = SPLAT_SUBSYSTEM_TASKQ;
1409
1410 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST1_NAME, SPLAT_TASKQ_TEST1_DESC,
1411 SPLAT_TASKQ_TEST1_ID, splat_taskq_test1);
1412 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST2_NAME, SPLAT_TASKQ_TEST2_DESC,
1413 SPLAT_TASKQ_TEST2_ID, splat_taskq_test2);
1414 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST3_NAME, SPLAT_TASKQ_TEST3_DESC,
1415 SPLAT_TASKQ_TEST3_ID, splat_taskq_test3);
1416 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST4_NAME, SPLAT_TASKQ_TEST4_DESC,
1417 SPLAT_TASKQ_TEST4_ID, splat_taskq_test4);
1418 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST5_NAME, SPLAT_TASKQ_TEST5_DESC,
1419 SPLAT_TASKQ_TEST5_ID, splat_taskq_test5);
1420 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST6_NAME, SPLAT_TASKQ_TEST6_DESC,
1421 SPLAT_TASKQ_TEST6_ID, splat_taskq_test6);
1422 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST7_NAME, SPLAT_TASKQ_TEST7_DESC,
1423 SPLAT_TASKQ_TEST7_ID, splat_taskq_test7);
1424 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST8_NAME, SPLAT_TASKQ_TEST8_DESC,
1425 SPLAT_TASKQ_TEST8_ID, splat_taskq_test8);
1426 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST9_NAME, SPLAT_TASKQ_TEST9_DESC,
1427 SPLAT_TASKQ_TEST9_ID, splat_taskq_test9);
1428 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST10_NAME, SPLAT_TASKQ_TEST10_DESC,
1429 SPLAT_TASKQ_TEST10_ID, splat_taskq_test10);
1430
1431 return sub;
1432 }
1433
1434 void
1435 splat_taskq_fini(splat_subsystem_t *sub)
1436 {
1437 ASSERT(sub);
1438 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST10_ID);
1439 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST9_ID);
1440 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST8_ID);
1441 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST7_ID);
1442 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST6_ID);
1443 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST5_ID);
1444 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST4_ID);
1445 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST3_ID);
1446 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST2_ID);
1447 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST1_ID);
1448
1449 kfree(sub);
1450 }
1451
1452 int
1453 splat_taskq_id(void) {
1454 return SPLAT_SUBSYSTEM_TASKQ;
1455 }