]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-taskq.c
Remove compat includes from sys/types.h
[mirror_spl.git] / module / splat / splat-taskq.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Task Queue Tests.
25 \*****************************************************************************/
26
27 #include <sys/kmem.h>
28 #include <sys/random.h>
29 #include <sys/taskq.h>
30 #include <linux/delay.h>
31 #include "splat-internal.h"
32
33 #define SPLAT_TASKQ_NAME "taskq"
34 #define SPLAT_TASKQ_DESC "Kernel Task Queue Tests"
35
36 #define SPLAT_TASKQ_TEST1_ID 0x0201
37 #define SPLAT_TASKQ_TEST1_NAME "single"
38 #define SPLAT_TASKQ_TEST1_DESC "Single task queue, single task"
39
40 #define SPLAT_TASKQ_TEST2_ID 0x0202
41 #define SPLAT_TASKQ_TEST2_NAME "multiple"
42 #define SPLAT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
43
44 #define SPLAT_TASKQ_TEST3_ID 0x0203
45 #define SPLAT_TASKQ_TEST3_NAME "system"
46 #define SPLAT_TASKQ_TEST3_DESC "System task queue, multiple tasks"
47
48 #define SPLAT_TASKQ_TEST4_ID 0x0204
49 #define SPLAT_TASKQ_TEST4_NAME "wait"
50 #define SPLAT_TASKQ_TEST4_DESC "Multiple task waiting"
51
52 #define SPLAT_TASKQ_TEST5_ID 0x0205
53 #define SPLAT_TASKQ_TEST5_NAME "order"
54 #define SPLAT_TASKQ_TEST5_DESC "Correct task ordering"
55
56 #define SPLAT_TASKQ_TEST6_ID 0x0206
57 #define SPLAT_TASKQ_TEST6_NAME "front"
58 #define SPLAT_TASKQ_TEST6_DESC "Correct ordering with TQ_FRONT flag"
59
60 #define SPLAT_TASKQ_TEST7_ID 0x0207
61 #define SPLAT_TASKQ_TEST7_NAME "recurse"
62 #define SPLAT_TASKQ_TEST7_DESC "Single task queue, recursive dispatch"
63
64 #define SPLAT_TASKQ_TEST8_ID 0x0208
65 #define SPLAT_TASKQ_TEST8_NAME "contention"
66 #define SPLAT_TASKQ_TEST8_DESC "1 queue, 100 threads, 131072 tasks"
67
68 #define SPLAT_TASKQ_TEST9_ID 0x0209
69 #define SPLAT_TASKQ_TEST9_NAME "delay"
70 #define SPLAT_TASKQ_TEST9_DESC "Delayed task execution"
71
72 #define SPLAT_TASKQ_TEST10_ID 0x020a
73 #define SPLAT_TASKQ_TEST10_NAME "cancel"
74 #define SPLAT_TASKQ_TEST10_DESC "Cancel task execution"
75
76 #define SPLAT_TASKQ_ORDER_MAX 8
77 #define SPLAT_TASKQ_DEPTH_MAX 16
78
79
80 typedef struct splat_taskq_arg {
81 int flag;
82 int id;
83 atomic_t *count;
84 int order[SPLAT_TASKQ_ORDER_MAX];
85 unsigned int depth;
86 clock_t expire;
87 taskq_t *tq;
88 taskq_ent_t *tqe;
89 spinlock_t lock;
90 struct file *file;
91 const char *name;
92 } splat_taskq_arg_t;
93
94 typedef struct splat_taskq_id {
95 int id;
96 splat_taskq_arg_t *arg;
97 } splat_taskq_id_t;
98
99 /*
100 * Create a taskq, queue a task, wait until task completes, ensure
101 * task ran properly, cleanup taskq.
102 */
103 static void
104 splat_taskq_test13_func(void *arg)
105 {
106 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
107
108 ASSERT(tq_arg);
109 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST1_NAME,
110 "Taskq '%s' function '%s' setting flag\n",
111 tq_arg->name, sym2str(splat_taskq_test13_func));
112 tq_arg->flag = 1;
113 }
114
115 static int
116 splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc)
117 {
118 taskq_t *tq;
119 taskqid_t id;
120 splat_taskq_arg_t tq_arg;
121 taskq_ent_t tqe;
122
123 taskq_init_ent(&tqe);
124
125 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
126 "Taskq '%s' creating (%s dispatch)\n",
127 SPLAT_TASKQ_TEST1_NAME,
128 prealloc ? "prealloc" : "dynamic");
129 if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
130 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
131 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
132 "Taskq '%s' create failed\n",
133 SPLAT_TASKQ_TEST1_NAME);
134 return -EINVAL;
135 }
136
137 tq_arg.flag = 0;
138 tq_arg.id = 0;
139 tq_arg.file = file;
140 tq_arg.name = SPLAT_TASKQ_TEST1_NAME;
141
142 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
143 "Taskq '%s' function '%s' dispatching\n",
144 tq_arg.name, sym2str(splat_taskq_test13_func));
145 if (prealloc) {
146 taskq_dispatch_ent(tq, splat_taskq_test13_func,
147 &tq_arg, TQ_SLEEP, &tqe);
148 id = tqe.tqent_id;
149 } else {
150 id = taskq_dispatch(tq, splat_taskq_test13_func,
151 &tq_arg, TQ_SLEEP);
152 }
153
154 if (id == 0) {
155 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
156 "Taskq '%s' function '%s' dispatch failed\n",
157 tq_arg.name, sym2str(splat_taskq_test13_func));
158 taskq_destroy(tq);
159 return -EINVAL;
160 }
161
162 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
163 tq_arg.name);
164 taskq_wait(tq);
165 splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
166 tq_arg.name);
167
168 taskq_destroy(tq);
169
170 return (tq_arg.flag) ? 0 : -EINVAL;
171 }
172
173 static int
174 splat_taskq_test1(struct file *file, void *arg)
175 {
176 int rc;
177
178 rc = splat_taskq_test1_impl(file, arg, B_FALSE);
179 if (rc)
180 return rc;
181
182 rc = splat_taskq_test1_impl(file, arg, B_TRUE);
183
184 return rc;
185 }
186
187 /*
188 * Create multiple taskq's, each with multiple tasks, wait until
189 * all tasks complete, ensure all tasks ran properly and in the
190 * correct order. Run order must be the same as the order submitted
191 * because we only have 1 thread per taskq. Finally cleanup the taskq.
192 */
193 static void
194 splat_taskq_test2_func1(void *arg)
195 {
196 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
197
198 ASSERT(tq_arg);
199 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
200 "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
201 tq_arg->name, tq_arg->id,
202 sym2str(splat_taskq_test2_func1),
203 tq_arg->flag * 2, tq_arg->flag);
204 tq_arg->flag *= 2;
205 }
206
207 static void
208 splat_taskq_test2_func2(void *arg)
209 {
210 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
211
212 ASSERT(tq_arg);
213 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
214 "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
215 tq_arg->name, tq_arg->id,
216 sym2str(splat_taskq_test2_func2),
217 tq_arg->flag + 1, tq_arg->flag);
218 tq_arg->flag += 1;
219 }
220
221 #define TEST2_TASKQS 8
222 #define TEST2_THREADS_PER_TASKQ 1
223
224 static int
225 splat_taskq_test2_impl(struct file *file, void *arg, boolean_t prealloc) {
226 taskq_t *tq[TEST2_TASKQS] = { NULL };
227 taskqid_t id;
228 splat_taskq_arg_t tq_args[TEST2_TASKQS];
229 taskq_ent_t *func1_tqes = NULL;
230 taskq_ent_t *func2_tqes = NULL;
231 int i, rc = 0;
232
233 func1_tqes = kmalloc(sizeof(*func1_tqes) * TEST2_TASKQS, GFP_KERNEL);
234 if (func1_tqes == NULL) {
235 rc = -ENOMEM;
236 goto out;
237 }
238
239 func2_tqes = kmalloc(sizeof(*func2_tqes) * TEST2_TASKQS, GFP_KERNEL);
240 if (func2_tqes == NULL) {
241 rc = -ENOMEM;
242 goto out;
243 }
244
245 for (i = 0; i < TEST2_TASKQS; i++) {
246 taskq_init_ent(&func1_tqes[i]);
247 taskq_init_ent(&func2_tqes[i]);
248
249 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
250 "Taskq '%s/%d' creating (%s dispatch)\n",
251 SPLAT_TASKQ_TEST2_NAME, i,
252 prealloc ? "prealloc" : "dynamic");
253 if ((tq[i] = taskq_create(SPLAT_TASKQ_TEST2_NAME,
254 TEST2_THREADS_PER_TASKQ,
255 maxclsyspri, 50, INT_MAX,
256 TASKQ_PREPOPULATE)) == NULL) {
257 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
258 "Taskq '%s/%d' create failed\n",
259 SPLAT_TASKQ_TEST2_NAME, i);
260 rc = -EINVAL;
261 break;
262 }
263
264 tq_args[i].flag = i;
265 tq_args[i].id = i;
266 tq_args[i].file = file;
267 tq_args[i].name = SPLAT_TASKQ_TEST2_NAME;
268
269 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
270 "Taskq '%s/%d' function '%s' dispatching\n",
271 tq_args[i].name, tq_args[i].id,
272 sym2str(splat_taskq_test2_func1));
273 if (prealloc) {
274 taskq_dispatch_ent(tq[i], splat_taskq_test2_func1,
275 &tq_args[i], TQ_SLEEP, &func1_tqes[i]);
276 id = func1_tqes[i].tqent_id;
277 } else {
278 id = taskq_dispatch(tq[i], splat_taskq_test2_func1,
279 &tq_args[i], TQ_SLEEP);
280 }
281
282 if (id == 0) {
283 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
284 "Taskq '%s/%d' function '%s' dispatch "
285 "failed\n", tq_args[i].name, tq_args[i].id,
286 sym2str(splat_taskq_test2_func1));
287 rc = -EINVAL;
288 break;
289 }
290
291 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
292 "Taskq '%s/%d' function '%s' dispatching\n",
293 tq_args[i].name, tq_args[i].id,
294 sym2str(splat_taskq_test2_func2));
295 if (prealloc) {
296 taskq_dispatch_ent(tq[i], splat_taskq_test2_func2,
297 &tq_args[i], TQ_SLEEP, &func2_tqes[i]);
298 id = func2_tqes[i].tqent_id;
299 } else {
300 id = taskq_dispatch(tq[i], splat_taskq_test2_func2,
301 &tq_args[i], TQ_SLEEP);
302 }
303
304 if (id == 0) {
305 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq "
306 "'%s/%d' function '%s' dispatch failed\n",
307 tq_args[i].name, tq_args[i].id,
308 sym2str(splat_taskq_test2_func2));
309 rc = -EINVAL;
310 break;
311 }
312 }
313
314 /* When rc is set we're effectively just doing cleanup here, so
315 * ignore new errors in that case. They just cause noise. */
316 for (i = 0; i < TEST2_TASKQS; i++) {
317 if (tq[i] != NULL) {
318 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
319 "Taskq '%s/%d' waiting\n",
320 tq_args[i].name, tq_args[i].id);
321 taskq_wait(tq[i]);
322 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
323 "Taskq '%s/%d; destroying\n",
324 tq_args[i].name, tq_args[i].id);
325
326 taskq_destroy(tq[i]);
327
328 if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
329 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
330 "Taskq '%s/%d' processed tasks "
331 "out of order; %d != %d\n",
332 tq_args[i].name, tq_args[i].id,
333 tq_args[i].flag, i * 2 + 1);
334 rc = -EINVAL;
335 } else {
336 splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
337 "Taskq '%s/%d' processed tasks "
338 "in the correct order; %d == %d\n",
339 tq_args[i].name, tq_args[i].id,
340 tq_args[i].flag, i * 2 + 1);
341 }
342 }
343 }
344 out:
345 if (func1_tqes)
346 kfree(func1_tqes);
347
348 if (func2_tqes)
349 kfree(func2_tqes);
350
351 return rc;
352 }
353
354 static int
355 splat_taskq_test2(struct file *file, void *arg) {
356 int rc;
357
358 rc = splat_taskq_test2_impl(file, arg, B_FALSE);
359 if (rc)
360 return rc;
361
362 rc = splat_taskq_test2_impl(file, arg, B_TRUE);
363
364 return rc;
365 }
366
367 /*
368 * Use the global system task queue with a single task, wait until task
369 * completes, ensure task ran properly.
370 */
371 static int
372 splat_taskq_test3_impl(struct file *file, void *arg, boolean_t prealloc)
373 {
374 taskqid_t id;
375 splat_taskq_arg_t tq_arg;
376 taskq_ent_t tqe;
377
378 taskq_init_ent(&tqe);
379
380 tq_arg.flag = 0;
381 tq_arg.id = 0;
382 tq_arg.file = file;
383 tq_arg.name = SPLAT_TASKQ_TEST3_NAME;
384
385 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
386 "Taskq '%s' function '%s' %s dispatch\n",
387 tq_arg.name, sym2str(splat_taskq_test13_func),
388 prealloc ? "prealloc" : "dynamic");
389 if (prealloc) {
390 taskq_dispatch_ent(system_taskq, splat_taskq_test13_func,
391 &tq_arg, TQ_SLEEP, &tqe);
392 id = tqe.tqent_id;
393 } else {
394 id = taskq_dispatch(system_taskq, splat_taskq_test13_func,
395 &tq_arg, TQ_SLEEP);
396 }
397
398 if (id == 0) {
399 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
400 "Taskq '%s' function '%s' dispatch failed\n",
401 tq_arg.name, sym2str(splat_taskq_test13_func));
402 return -EINVAL;
403 }
404
405 splat_vprint(file, SPLAT_TASKQ_TEST3_NAME, "Taskq '%s' waiting\n",
406 tq_arg.name);
407 taskq_wait(system_taskq);
408
409 return (tq_arg.flag) ? 0 : -EINVAL;
410 }
411
412 static int
413 splat_taskq_test3(struct file *file, void *arg)
414 {
415 int rc;
416
417 rc = splat_taskq_test3_impl(file, arg, B_FALSE);
418 if (rc)
419 return rc;
420
421 rc = splat_taskq_test3_impl(file, arg, B_TRUE);
422
423 return rc;
424 }
425
426 /*
427 * Create a taskq and dispatch a large number of tasks to the queue.
428 * Then use taskq_wait() to block until all the tasks complete, then
429 * cross check that all the tasks ran by checking the shared atomic
430 * counter which is incremented in the task function.
431 *
432 * First we try with a large 'maxalloc' value, then we try with a small one.
433 * We should not drop tasks when TQ_SLEEP is used in taskq_dispatch(), even
434 * if the number of pending tasks is above maxalloc.
435 */
436 static void
437 splat_taskq_test4_func(void *arg)
438 {
439 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
440 ASSERT(tq_arg);
441
442 atomic_inc(tq_arg->count);
443 }
444
445 static int
446 splat_taskq_test4_common(struct file *file, void *arg, int minalloc,
447 int maxalloc, int nr_tasks, boolean_t prealloc)
448 {
449 taskq_t *tq;
450 taskqid_t id;
451 splat_taskq_arg_t tq_arg;
452 taskq_ent_t *tqes;
453 atomic_t count;
454 int i, j, rc = 0;
455
456 tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL);
457 if (tqes == NULL)
458 return -ENOMEM;
459
460 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
461 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
462 SPLAT_TASKQ_TEST4_NAME,
463 prealloc ? "prealloc" : "dynamic",
464 minalloc, maxalloc, nr_tasks);
465 if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
466 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
467 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
468 "Taskq '%s' create failed\n",
469 SPLAT_TASKQ_TEST4_NAME);
470 rc = -EINVAL;
471 goto out_free;
472 }
473
474 tq_arg.file = file;
475 tq_arg.name = SPLAT_TASKQ_TEST4_NAME;
476 tq_arg.count = &count;
477
478 for (i = 1; i <= nr_tasks; i *= 2) {
479 atomic_set(tq_arg.count, 0);
480 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
481 "Taskq '%s' function '%s' dispatched %d times\n",
482 tq_arg.name, sym2str(splat_taskq_test4_func), i);
483
484 for (j = 0; j < i; j++) {
485 taskq_init_ent(&tqes[j]);
486
487 if (prealloc) {
488 taskq_dispatch_ent(tq, splat_taskq_test4_func,
489 &tq_arg, TQ_SLEEP, &tqes[j]);
490 id = tqes[j].tqent_id;
491 } else {
492 id = taskq_dispatch(tq, splat_taskq_test4_func,
493 &tq_arg, TQ_SLEEP);
494 }
495
496 if (id == 0) {
497 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
498 "Taskq '%s' function '%s' dispatch "
499 "%d failed\n", tq_arg.name,
500 sym2str(splat_taskq_test4_func), j);
501 rc = -EINVAL;
502 goto out;
503 }
504 }
505
506 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
507 "waiting for %d dispatches\n", tq_arg.name, i);
508 taskq_wait(tq);
509 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
510 "%d/%d dispatches finished\n", tq_arg.name,
511 atomic_read(&count), i);
512 if (atomic_read(&count) != i) {
513 rc = -ERANGE;
514 goto out;
515
516 }
517 }
518 out:
519 splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
520 tq_arg.name);
521 taskq_destroy(tq);
522
523 out_free:
524 kfree(tqes);
525
526 return rc;
527 }
528
529 static int
530 splat_taskq_test4_impl(struct file *file, void *arg, boolean_t prealloc)
531 {
532 int rc;
533
534 rc = splat_taskq_test4_common(file, arg, 50, INT_MAX, 1024, prealloc);
535 if (rc)
536 return rc;
537
538 rc = splat_taskq_test4_common(file, arg, 1, 1, 32, prealloc);
539
540 return rc;
541 }
542
543 static int
544 splat_taskq_test4(struct file *file, void *arg)
545 {
546 int rc;
547
548 rc = splat_taskq_test4_impl(file, arg, B_FALSE);
549 if (rc)
550 return rc;
551
552 rc = splat_taskq_test4_impl(file, arg, B_TRUE);
553
554 return rc;
555 }
556
557 /*
558 * Create a taskq and dispatch a specific sequence of tasks carefully
559 * crafted to validate the order in which tasks are processed. When
560 * there are multiple worker threads each thread will process the
561 * next pending task as soon as it completes its current task. This
562 * means that tasks do not strictly complete in order in which they
563 * were dispatched (increasing task id). This is fine but we need to
564 * verify that taskq_wait_all() blocks until the passed task id and all
565 * lower task ids complete. We do this by dispatching the following
566 * specific sequence of tasks each of which block for N time units.
567 * We then use taskq_wait_all() to unblock at specific task id and
568 * verify the only the expected task ids have completed and in the
569 * correct order. The two cases of interest are:
570 *
571 * 1) Task ids larger than the waited for task id can run and
572 * complete as long as there is an available worker thread.
573 * 2) All task ids lower than the waited one must complete before
574 * unblocking even if the waited task id itself has completed.
575 *
576 * The following table shows each task id and how they will be
577 * scheduled. Each rows represent one time unit and each column
578 * one of the three worker threads. The places taskq_wait_all()
579 * must unblock for a specific id are identified as well as the
580 * task ids which must have completed and their order.
581 *
582 * +-----+ <--- taskq_wait_all(tq, 8) unblocks
583 * | | Required Completion Order: 1,2,4,5,3,8,6,7
584 * +-----+ |
585 * | | |
586 * | | +-----+
587 * | | | 8 |
588 * | | +-----+ <--- taskq_wait_all(tq, 3) unblocks
589 * | | 7 | | Required Completion Order: 1,2,4,5,3
590 * | +-----+ |
591 * | 6 | | |
592 * +-----+ | |
593 * | | 5 | |
594 * | +-----+ |
595 * | 4 | | |
596 * +-----+ | |
597 * | 1 | 2 | 3 |
598 * +-----+-----+-----+
599 *
600 */
601 static void
602 splat_taskq_test5_func(void *arg)
603 {
604 splat_taskq_id_t *tq_id = (splat_taskq_id_t *)arg;
605 splat_taskq_arg_t *tq_arg = tq_id->arg;
606 int factor;
607
608 /* Delays determined by above table */
609 switch (tq_id->id) {
610 default: factor = 0; break;
611 case 1: case 8: factor = 1; break;
612 case 2: case 4: case 5: factor = 2; break;
613 case 6: case 7: factor = 4; break;
614 case 3: factor = 5; break;
615 }
616
617 msleep(factor * 100);
618 splat_vprint(tq_arg->file, tq_arg->name,
619 "Taskqid %d complete for taskq '%s'\n",
620 tq_id->id, tq_arg->name);
621
622 spin_lock(&tq_arg->lock);
623 tq_arg->order[tq_arg->flag] = tq_id->id;
624 tq_arg->flag++;
625 spin_unlock(&tq_arg->lock);
626 }
627
628 static int
629 splat_taskq_test_order(splat_taskq_arg_t *tq_arg, int *order)
630 {
631 int i, j;
632
633 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
634 if (tq_arg->order[i] != order[i]) {
635 splat_vprint(tq_arg->file, tq_arg->name,
636 "Taskq '%s' incorrect completion "
637 "order\n", tq_arg->name);
638 splat_vprint(tq_arg->file, tq_arg->name,
639 "%s", "Expected { ");
640
641 for (j = 0; j < SPLAT_TASKQ_ORDER_MAX; j++)
642 splat_print(tq_arg->file, "%d ", order[j]);
643
644 splat_print(tq_arg->file, "%s", "}\n");
645 splat_vprint(tq_arg->file, tq_arg->name,
646 "%s", "Got { ");
647
648 for (j = 0; j < SPLAT_TASKQ_ORDER_MAX; j++)
649 splat_print(tq_arg->file, "%d ",
650 tq_arg->order[j]);
651
652 splat_print(tq_arg->file, "%s", "}\n");
653 return -EILSEQ;
654 }
655 }
656
657 splat_vprint(tq_arg->file, tq_arg->name,
658 "Taskq '%s' validated correct completion order\n",
659 tq_arg->name);
660
661 return 0;
662 }
663
664 static int
665 splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc)
666 {
667 taskq_t *tq;
668 taskqid_t id;
669 splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
670 splat_taskq_arg_t tq_arg;
671 int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 };
672 int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 };
673 taskq_ent_t *tqes;
674 int i, rc = 0;
675
676 tqes = kmem_alloc(sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX, KM_SLEEP);
677 memset(tqes, 0, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
678
679 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
680 "Taskq '%s' creating (%s dispatch)\n",
681 SPLAT_TASKQ_TEST5_NAME,
682 prealloc ? "prealloc" : "dynamic");
683 if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri,
684 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
685 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
686 "Taskq '%s' create failed\n",
687 SPLAT_TASKQ_TEST5_NAME);
688 return -EINVAL;
689 }
690
691 tq_arg.flag = 0;
692 memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
693 spin_lock_init(&tq_arg.lock);
694 tq_arg.file = file;
695 tq_arg.name = SPLAT_TASKQ_TEST5_NAME;
696
697 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
698 taskq_init_ent(&tqes[i]);
699
700 tq_id[i].id = i + 1;
701 tq_id[i].arg = &tq_arg;
702
703 if (prealloc) {
704 taskq_dispatch_ent(tq, splat_taskq_test5_func,
705 &tq_id[i], TQ_SLEEP, &tqes[i]);
706 id = tqes[i].tqent_id;
707 } else {
708 id = taskq_dispatch(tq, splat_taskq_test5_func,
709 &tq_id[i], TQ_SLEEP);
710 }
711
712 if (id == 0) {
713 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
714 "Taskq '%s' function '%s' dispatch failed\n",
715 tq_arg.name, sym2str(splat_taskq_test5_func));
716 rc = -EINVAL;
717 goto out;
718 }
719
720 if (tq_id[i].id != id) {
721 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
722 "Taskq '%s' expected taskqid %d got %d\n",
723 tq_arg.name, (int)tq_id[i].id, (int)id);
724 rc = -EINVAL;
725 goto out;
726 }
727 }
728
729 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
730 "waiting for taskqid %d completion\n", tq_arg.name, 3);
731 taskq_wait_all(tq, 3);
732 if ((rc = splat_taskq_test_order(&tq_arg, order1)))
733 goto out;
734
735 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
736 "waiting for taskqid %d completion\n", tq_arg.name, 8);
737 taskq_wait_all(tq, 8);
738 rc = splat_taskq_test_order(&tq_arg, order2);
739
740 out:
741 splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
742 "Taskq '%s' destroying\n", tq_arg.name);
743 taskq_destroy(tq);
744
745 kmem_free(tqes, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
746
747 return rc;
748 }
749
750 static int
751 splat_taskq_test5(struct file *file, void *arg)
752 {
753 int rc;
754
755 rc = splat_taskq_test5_impl(file, arg, B_FALSE);
756 if (rc)
757 return rc;
758
759 rc = splat_taskq_test5_impl(file, arg, B_TRUE);
760
761 return rc;
762 }
763
764 /*
765 * Create a single task queue with three threads. Dispatch 8 tasks,
766 * setting TQ_FRONT on only the last three. Sleep after
767 * dispatching tasks 1-3 to ensure they will run and hold the threads
768 * busy while we dispatch the remaining tasks. Verify that tasks 6-8
769 * run before task 4-5.
770 *
771 * The following table shows each task id and how they will be
772 * scheduled. Each rows represent one time unit and each column
773 * one of the three worker threads.
774 *
775 * NB: The Horizontal Line is the LAST Time unit consumed by the Task,
776 * and must be included in the factor calculation.
777 * T
778 * 17-> +-----+
779 * 16 | T6 |
780 * 15-> +-----+ |
781 * 14 | T6 | |
782 * 13-> | | 5 +-----+
783 * 12 | | | T6 |
784 * 11-> | +-----| |
785 * 10 | 4 | T6 | |
786 * 9-> +-----+ | 8 |
787 * 8 | T5 | | |
788 * 7-> | | 7 +-----+
789 * 6 | | | T7 |
790 * 5-> | +-----+ |
791 * 4 | 6 | T5 | |
792 * 3-> +-----+ | |
793 * 2 | T3 | | |
794 * 1 | 1 | 2 | 3 |
795 * 0 +-----+-----+-----+
796 *
797 */
798 static void
799 splat_taskq_test6_func(void *arg)
800 {
801 /* Delays determined by above table */
802 static const int factor[SPLAT_TASKQ_ORDER_MAX+1] = {0,3,5,7,6,6,5,6,6};
803
804 splat_taskq_id_t *tq_id = (splat_taskq_id_t *)arg;
805 splat_taskq_arg_t *tq_arg = tq_id->arg;
806
807 splat_vprint(tq_arg->file, tq_arg->name,
808 "Taskqid %d starting for taskq '%s'\n",
809 tq_id->id, tq_arg->name);
810
811 if (tq_id->id < SPLAT_TASKQ_ORDER_MAX+1) {
812 msleep(factor[tq_id->id] * 50);
813 }
814
815 spin_lock(&tq_arg->lock);
816 tq_arg->order[tq_arg->flag] = tq_id->id;
817 tq_arg->flag++;
818 spin_unlock(&tq_arg->lock);
819
820 splat_vprint(tq_arg->file, tq_arg->name,
821 "Taskqid %d complete for taskq '%s'\n",
822 tq_id->id, tq_arg->name);
823 }
824
825 static int
826 splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
827 {
828 taskq_t *tq;
829 taskqid_t id;
830 splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
831 splat_taskq_arg_t tq_arg;
832 int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
833 taskq_ent_t *tqes;
834 int i, rc = 0;
835 uint_t tflags;
836
837 tqes = kmem_alloc(sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX, KM_SLEEP);
838 memset(tqes, 0, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
839
840 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
841 "Taskq '%s' creating (%s dispatch)\n",
842 SPLAT_TASKQ_TEST6_NAME,
843 prealloc ? "prealloc" : "dynamic");
844 if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
845 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
846 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
847 "Taskq '%s' create failed\n",
848 SPLAT_TASKQ_TEST6_NAME);
849 return -EINVAL;
850 }
851
852 tq_arg.flag = 0;
853 memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
854 spin_lock_init(&tq_arg.lock);
855 tq_arg.file = file;
856 tq_arg.name = SPLAT_TASKQ_TEST6_NAME;
857
858 for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
859 taskq_init_ent(&tqes[i]);
860
861 tq_id[i].id = i + 1;
862 tq_id[i].arg = &tq_arg;
863 tflags = TQ_SLEEP;
864 if (i > 4)
865 tflags |= TQ_FRONT;
866
867 if (prealloc) {
868 taskq_dispatch_ent(tq, splat_taskq_test6_func,
869 &tq_id[i], tflags, &tqes[i]);
870 id = tqes[i].tqent_id;
871 } else {
872 id = taskq_dispatch(tq, splat_taskq_test6_func,
873 &tq_id[i], tflags);
874 }
875
876 if (id == 0) {
877 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
878 "Taskq '%s' function '%s' dispatch failed\n",
879 tq_arg.name, sym2str(splat_taskq_test6_func));
880 rc = -EINVAL;
881 goto out;
882 }
883
884 if (tq_id[i].id != id) {
885 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
886 "Taskq '%s' expected taskqid %d got %d\n",
887 tq_arg.name, (int)tq_id[i].id, (int)id);
888 rc = -EINVAL;
889 goto out;
890 }
891 /* Sleep to let tasks 1-3 start executing. */
892 if ( i == 2 )
893 msleep(100);
894 }
895
896 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
897 "waiting for taskqid %d completion\n", tq_arg.name,
898 SPLAT_TASKQ_ORDER_MAX);
899 taskq_wait_all(tq, SPLAT_TASKQ_ORDER_MAX);
900 rc = splat_taskq_test_order(&tq_arg, order);
901
902 out:
903 splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
904 "Taskq '%s' destroying\n", tq_arg.name);
905 taskq_destroy(tq);
906
907 kmem_free(tqes, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);
908
909 return rc;
910 }
911
912 static int
913 splat_taskq_test6(struct file *file, void *arg)
914 {
915 int rc;
916
917 rc = splat_taskq_test6_impl(file, arg, B_FALSE);
918 if (rc)
919 return rc;
920
921 rc = splat_taskq_test6_impl(file, arg, B_TRUE);
922
923 return rc;
924 }
925
926 static void
927 splat_taskq_test7_func(void *arg)
928 {
929 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
930 taskqid_t id;
931
932 ASSERT(tq_arg);
933
934 if (tq_arg->depth >= SPLAT_TASKQ_DEPTH_MAX)
935 return;
936
937 tq_arg->depth++;
938
939 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
940 "Taskq '%s' function '%s' dispatching (depth = %u)\n",
941 tq_arg->name, sym2str(splat_taskq_test7_func),
942 tq_arg->depth);
943
944 if (tq_arg->tqe) {
945 VERIFY(taskq_empty_ent(tq_arg->tqe));
946 taskq_dispatch_ent(tq_arg->tq, splat_taskq_test7_func,
947 tq_arg, TQ_SLEEP, tq_arg->tqe);
948 id = tq_arg->tqe->tqent_id;
949 } else {
950 id = taskq_dispatch(tq_arg->tq, splat_taskq_test7_func,
951 tq_arg, TQ_SLEEP);
952 }
953
954 if (id == 0) {
955 splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
956 "Taskq '%s' function '%s' dispatch failed "
957 "(depth = %u)\n", tq_arg->name,
958 sym2str(splat_taskq_test7_func), tq_arg->depth);
959 tq_arg->flag = -EINVAL;
960 return;
961 }
962 }
963
964 static int
965 splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
966 {
967 taskq_t *tq;
968 taskq_ent_t tqe;
969 splat_taskq_arg_t tq_arg;
970
971 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
972 "Taskq '%s' creating (%s dispatch)\n",
973 SPLAT_TASKQ_TEST7_NAME,
974 prealloc ? "prealloc" : "dynamic");
975 if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
976 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
977 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
978 "Taskq '%s' create failed\n",
979 SPLAT_TASKQ_TEST7_NAME);
980 return -EINVAL;
981 }
982
983 tq_arg.depth = 0;
984 tq_arg.flag = 0;
985 tq_arg.id = 0;
986 tq_arg.file = file;
987 tq_arg.name = SPLAT_TASKQ_TEST7_NAME;
988 tq_arg.tq = tq;
989
990 if (prealloc) {
991 taskq_init_ent(&tqe);
992 tq_arg.tqe = &tqe;
993 } else {
994 tq_arg.tqe = NULL;
995 }
996
997 splat_taskq_test7_func(&tq_arg);
998
999 if (tq_arg.flag == 0) {
1000 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
1001 "Taskq '%s' waiting\n", tq_arg.name);
1002 taskq_wait_all(tq, SPLAT_TASKQ_DEPTH_MAX);
1003 }
1004
1005 splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
1006 "Taskq '%s' destroying\n", tq_arg.name);
1007 taskq_destroy(tq);
1008
1009 return tq_arg.depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL;
1010 }
1011
1012 static int
1013 splat_taskq_test7(struct file *file, void *arg)
1014 {
1015 int rc;
1016
1017 rc = splat_taskq_test7_impl(file, arg, B_FALSE);
1018 if (rc)
1019 return rc;
1020
1021 rc = splat_taskq_test7_impl(file, arg, B_TRUE);
1022
1023 return rc;
1024 }
1025
1026 /*
1027 * Create a taskq with 100 threads and dispatch a huge number of trivial
1028 * tasks to generate contention on tq->tq_lock. This test should always
1029 * pass. The purpose is to provide a benchmark for measuring the
1030 * effectiveness of taskq optimizations.
1031 */
1032 static void
1033 splat_taskq_test8_func(void *arg)
1034 {
1035 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1036 ASSERT(tq_arg);
1037
1038 atomic_inc(tq_arg->count);
1039 }
1040
1041 #define TEST8_NUM_TASKS 0x20000
1042 #define TEST8_THREADS_PER_TASKQ 100
1043
1044 static int
1045 splat_taskq_test8_common(struct file *file, void *arg, int minalloc,
1046 int maxalloc)
1047 {
1048 taskq_t *tq;
1049 taskqid_t id;
1050 splat_taskq_arg_t tq_arg;
1051 taskq_ent_t **tqes;
1052 atomic_t count;
1053 int i, j, rc = 0;
1054
1055 tqes = vmalloc(sizeof(*tqes) * TEST8_NUM_TASKS);
1056 if (tqes == NULL)
1057 return -ENOMEM;
1058 memset(tqes, 0, sizeof(*tqes) * TEST8_NUM_TASKS);
1059
1060 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1061 "Taskq '%s' creating (%d/%d/%d)\n",
1062 SPLAT_TASKQ_TEST8_NAME,
1063 minalloc, maxalloc, TEST8_NUM_TASKS);
1064 if ((tq = taskq_create(SPLAT_TASKQ_TEST8_NAME, TEST8_THREADS_PER_TASKQ,
1065 maxclsyspri, minalloc, maxalloc,
1066 TASKQ_PREPOPULATE)) == NULL) {
1067 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1068 "Taskq '%s' create failed\n",
1069 SPLAT_TASKQ_TEST8_NAME);
1070 rc = -EINVAL;
1071 goto out_free;
1072 }
1073
1074 tq_arg.file = file;
1075 tq_arg.name = SPLAT_TASKQ_TEST8_NAME;
1076 tq_arg.count = &count;
1077 atomic_set(tq_arg.count, 0);
1078
1079 for (i = 0; i < TEST8_NUM_TASKS; i++) {
1080 tqes[i] = kmalloc(sizeof(taskq_ent_t), GFP_KERNEL);
1081 if (tqes[i] == NULL) {
1082 rc = -ENOMEM;
1083 goto out;
1084 }
1085 taskq_init_ent(tqes[i]);
1086
1087 taskq_dispatch_ent(tq, splat_taskq_test8_func,
1088 &tq_arg, TQ_SLEEP, tqes[i]);
1089
1090 id = tqes[i]->tqent_id;
1091
1092 if (id == 0) {
1093 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME,
1094 "Taskq '%s' function '%s' dispatch "
1095 "%d failed\n", tq_arg.name,
1096 sym2str(splat_taskq_test8_func), i);
1097 rc = -EINVAL;
1098 goto out;
1099 }
1100 }
1101
1102 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' "
1103 "waiting for %d dispatches\n", tq_arg.name,
1104 TEST8_NUM_TASKS);
1105 taskq_wait(tq);
1106 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' "
1107 "%d/%d dispatches finished\n", tq_arg.name,
1108 atomic_read(tq_arg.count), TEST8_NUM_TASKS);
1109
1110 if (atomic_read(tq_arg.count) != TEST8_NUM_TASKS)
1111 rc = -ERANGE;
1112
1113 out:
1114 splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' destroying\n",
1115 tq_arg.name);
1116 taskq_destroy(tq);
1117 out_free:
1118 for (j = 0; j < TEST8_NUM_TASKS && tqes[j] != NULL; j++)
1119 kfree(tqes[j]);
1120 vfree(tqes);
1121
1122 return rc;
1123 }
1124
1125 static int
1126 splat_taskq_test8(struct file *file, void *arg)
1127 {
1128 int rc;
1129
1130 rc = splat_taskq_test8_common(file, arg, 1, 100);
1131
1132 return rc;
1133 }
1134
1135 /*
1136 * Create a taskq and dispatch a number of delayed tasks to the queue.
1137 * For each task verify that it was run no early than requested.
1138 */
1139 static void
1140 splat_taskq_test9_func(void *arg)
1141 {
1142 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1143 ASSERT(tq_arg);
1144
1145 if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg->expire))
1146 atomic_inc(tq_arg->count);
1147
1148 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1149 }
1150
1151 static int
1152 splat_taskq_test9(struct file *file, void *arg)
1153 {
1154 taskq_t *tq;
1155 atomic_t count;
1156 int i, rc = 0;
1157 int minalloc = 1;
1158 int maxalloc = 10;
1159 int nr_tasks = 100;
1160
1161 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1162 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1163 SPLAT_TASKQ_TEST9_NAME, "delay", minalloc, maxalloc, nr_tasks);
1164 if ((tq = taskq_create(SPLAT_TASKQ_TEST9_NAME, 3, maxclsyspri,
1165 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
1166 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1167 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST9_NAME);
1168 return -EINVAL;
1169 }
1170
1171 atomic_set(&count, 0);
1172
1173 for (i = 1; i <= nr_tasks; i++) {
1174 splat_taskq_arg_t *tq_arg;
1175 taskqid_t id;
1176 uint32_t rnd;
1177
1178 /* A random timeout in jiffies of at most 5 seconds */
1179 get_random_bytes((void *)&rnd, 4);
1180 rnd = rnd % (5 * HZ);
1181
1182 tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP);
1183 tq_arg->file = file;
1184 tq_arg->name = SPLAT_TASKQ_TEST9_NAME;
1185 tq_arg->expire = ddi_get_lbolt() + rnd;
1186 tq_arg->count = &count;
1187
1188 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1189 "Taskq '%s' delay dispatch %u jiffies\n",
1190 SPLAT_TASKQ_TEST9_NAME, rnd);
1191
1192 id = taskq_dispatch_delay(tq, splat_taskq_test9_func,
1193 tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd);
1194
1195 if (id == 0) {
1196 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME,
1197 "Taskq '%s' delay dispatch failed\n",
1198 SPLAT_TASKQ_TEST9_NAME);
1199 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1200 taskq_wait(tq);
1201 rc = -EINVAL;
1202 goto out;
1203 }
1204 }
1205
1206 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' waiting for "
1207 "%d delay dispatches\n", SPLAT_TASKQ_TEST9_NAME, nr_tasks);
1208
1209 taskq_wait(tq);
1210 if (atomic_read(&count) != nr_tasks)
1211 rc = -ERANGE;
1212
1213 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' %d/%d delay "
1214 "dispatches finished on time\n", SPLAT_TASKQ_TEST9_NAME,
1215 atomic_read(&count), nr_tasks);
1216 splat_vprint(file, SPLAT_TASKQ_TEST9_NAME, "Taskq '%s' destroying\n",
1217 SPLAT_TASKQ_TEST9_NAME);
1218 out:
1219 taskq_destroy(tq);
1220
1221 return rc;
1222 }
1223
1224 /*
1225 * Create a taskq and dispatch then cancel tasks in the queue.
1226 */
1227 static void
1228 splat_taskq_test10_func(void *arg)
1229 {
1230 splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
1231 uint8_t rnd;
1232
1233 if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg->expire))
1234 atomic_inc(tq_arg->count);
1235
1236 /* Randomly sleep to further perturb the system */
1237 get_random_bytes((void *)&rnd, 1);
1238 msleep(1 + (rnd % 9));
1239 }
1240
1241 static int
1242 splat_taskq_test10(struct file *file, void *arg)
1243 {
1244 taskq_t *tq;
1245 splat_taskq_arg_t **tqas;
1246 atomic_t count;
1247 int i, j, rc = 0;
1248 int minalloc = 1;
1249 int maxalloc = 10;
1250 int nr_tasks = 100;
1251 int canceled = 0;
1252 int completed = 0;
1253 int blocked = 0;
1254 clock_t start, cancel;
1255
1256 tqas = vmalloc(sizeof(*tqas) * nr_tasks);
1257 if (tqas == NULL)
1258 return -ENOMEM;
1259 memset(tqas, 0, sizeof(*tqas) * nr_tasks);
1260
1261 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1262 "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
1263 SPLAT_TASKQ_TEST10_NAME, "delay", minalloc, maxalloc, nr_tasks);
1264 if ((tq = taskq_create(SPLAT_TASKQ_TEST10_NAME, 3, maxclsyspri,
1265 minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
1266 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1267 "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST10_NAME);
1268 rc = -EINVAL;
1269 goto out_free;
1270 }
1271
1272 atomic_set(&count, 0);
1273
1274 for (i = 0; i < nr_tasks; i++) {
1275 splat_taskq_arg_t *tq_arg;
1276 uint32_t rnd;
1277
1278 /* A random timeout in jiffies of at most 5 seconds */
1279 get_random_bytes((void *)&rnd, 4);
1280 rnd = rnd % (5 * HZ);
1281
1282 tq_arg = kmem_alloc(sizeof(splat_taskq_arg_t), KM_SLEEP);
1283 tq_arg->file = file;
1284 tq_arg->name = SPLAT_TASKQ_TEST10_NAME;
1285 tq_arg->count = &count;
1286 tqas[i] = tq_arg;
1287
1288 /*
1289 * Dispatch every 1/3 one immediately to mix it up, the cancel
1290 * code is inherently racy and we want to try and provoke any
1291 * subtle concurrently issues.
1292 */
1293 if ((i % 3) == 0) {
1294 tq_arg->expire = ddi_get_lbolt();
1295 tq_arg->id = taskq_dispatch(tq, splat_taskq_test10_func,
1296 tq_arg, TQ_SLEEP);
1297 } else {
1298 tq_arg->expire = ddi_get_lbolt() + rnd;
1299 tq_arg->id = taskq_dispatch_delay(tq,
1300 splat_taskq_test10_func,
1301 tq_arg, TQ_SLEEP, ddi_get_lbolt() + rnd);
1302 }
1303
1304 if (tq_arg->id == 0) {
1305 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1306 "Taskq '%s' dispatch failed\n",
1307 SPLAT_TASKQ_TEST10_NAME);
1308 kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
1309 taskq_wait(tq);
1310 rc = -EINVAL;
1311 goto out;
1312 } else {
1313 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1314 "Taskq '%s' dispatch %lu in %lu jiffies\n",
1315 SPLAT_TASKQ_TEST10_NAME, (unsigned long)tq_arg->id,
1316 !(i % 3) ? 0 : tq_arg->expire - ddi_get_lbolt());
1317 }
1318 }
1319
1320 /*
1321 * Start randomly canceling tasks for the duration of the test. We
1322 * happen to know the valid task id's will be in the range 1..nr_tasks
1323 * because the taskq is private and was just created. However, we
1324 * have no idea of a particular task has already executed or not.
1325 */
1326 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' randomly "
1327 "canceling task ids\n", SPLAT_TASKQ_TEST10_NAME);
1328
1329 start = ddi_get_lbolt();
1330 i = 0;
1331
1332 while (ddi_time_before(ddi_get_lbolt(), start + 5 * HZ)) {
1333 taskqid_t id;
1334 uint32_t rnd;
1335
1336 i++;
1337 cancel = ddi_get_lbolt();
1338 get_random_bytes((void *)&rnd, 4);
1339 id = 1 + (rnd % nr_tasks);
1340 rc = taskq_cancel_id(tq, id);
1341
1342 /*
1343 * Keep track of the results of the random cancels.
1344 */
1345 if (rc == 0) {
1346 canceled++;
1347 } else if (rc == ENOENT) {
1348 completed++;
1349 } else if (rc == EBUSY) {
1350 blocked++;
1351 } else {
1352 rc = -EINVAL;
1353 break;
1354 }
1355
1356 /*
1357 * Verify we never get blocked to long in taskq_cancel_id().
1358 * The worst case is 10ms if we happen to cancel the task
1359 * which is currently executing. We allow a factor of 2x.
1360 */
1361 if (ddi_get_lbolt() - cancel > HZ / 50) {
1362 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME,
1363 "Taskq '%s' cancel for %lu took %lu\n",
1364 SPLAT_TASKQ_TEST10_NAME, (unsigned long)id,
1365 ddi_get_lbolt() - cancel);
1366 rc = -ETIMEDOUT;
1367 break;
1368 }
1369
1370 get_random_bytes((void *)&rnd, 4);
1371 msleep(1 + (rnd % 100));
1372 rc = 0;
1373 }
1374
1375 taskq_wait(tq);
1376
1377 /*
1378 * Cross check the results of taskq_cancel_id() with the number of
1379 * times the dispatched function actually ran successfully.
1380 */
1381 if ((rc == 0) && (nr_tasks - canceled != atomic_read(&count)))
1382 rc = -EDOM;
1383
1384 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' %d attempts, "
1385 "%d canceled, %d completed, %d blocked, %d/%d tasks run\n",
1386 SPLAT_TASKQ_TEST10_NAME, i, canceled, completed, blocked,
1387 atomic_read(&count), nr_tasks);
1388 splat_vprint(file, SPLAT_TASKQ_TEST10_NAME, "Taskq '%s' destroying %d\n",
1389 SPLAT_TASKQ_TEST10_NAME, rc);
1390 out:
1391 taskq_destroy(tq);
1392 out_free:
1393 for (j = 0; j < nr_tasks && tqas[j] != NULL; j++)
1394 kmem_free(tqas[j], sizeof(splat_taskq_arg_t));
1395 vfree(tqas);
1396
1397 return rc;
1398 }
1399
1400 splat_subsystem_t *
1401 splat_taskq_init(void)
1402 {
1403 splat_subsystem_t *sub;
1404
1405 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1406 if (sub == NULL)
1407 return NULL;
1408
1409 memset(sub, 0, sizeof(*sub));
1410 strncpy(sub->desc.name, SPLAT_TASKQ_NAME, SPLAT_NAME_SIZE);
1411 strncpy(sub->desc.desc, SPLAT_TASKQ_DESC, SPLAT_DESC_SIZE);
1412 INIT_LIST_HEAD(&sub->subsystem_list);
1413 INIT_LIST_HEAD(&sub->test_list);
1414 spin_lock_init(&sub->test_lock);
1415 sub->desc.id = SPLAT_SUBSYSTEM_TASKQ;
1416
1417 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST1_NAME, SPLAT_TASKQ_TEST1_DESC,
1418 SPLAT_TASKQ_TEST1_ID, splat_taskq_test1);
1419 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST2_NAME, SPLAT_TASKQ_TEST2_DESC,
1420 SPLAT_TASKQ_TEST2_ID, splat_taskq_test2);
1421 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST3_NAME, SPLAT_TASKQ_TEST3_DESC,
1422 SPLAT_TASKQ_TEST3_ID, splat_taskq_test3);
1423 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST4_NAME, SPLAT_TASKQ_TEST4_DESC,
1424 SPLAT_TASKQ_TEST4_ID, splat_taskq_test4);
1425 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST5_NAME, SPLAT_TASKQ_TEST5_DESC,
1426 SPLAT_TASKQ_TEST5_ID, splat_taskq_test5);
1427 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST6_NAME, SPLAT_TASKQ_TEST6_DESC,
1428 SPLAT_TASKQ_TEST6_ID, splat_taskq_test6);
1429 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST7_NAME, SPLAT_TASKQ_TEST7_DESC,
1430 SPLAT_TASKQ_TEST7_ID, splat_taskq_test7);
1431 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST8_NAME, SPLAT_TASKQ_TEST8_DESC,
1432 SPLAT_TASKQ_TEST8_ID, splat_taskq_test8);
1433 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST9_NAME, SPLAT_TASKQ_TEST9_DESC,
1434 SPLAT_TASKQ_TEST9_ID, splat_taskq_test9);
1435 SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST10_NAME, SPLAT_TASKQ_TEST10_DESC,
1436 SPLAT_TASKQ_TEST10_ID, splat_taskq_test10);
1437
1438 return sub;
1439 }
1440
1441 void
1442 splat_taskq_fini(splat_subsystem_t *sub)
1443 {
1444 ASSERT(sub);
1445 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST10_ID);
1446 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST9_ID);
1447 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST8_ID);
1448 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST7_ID);
1449 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST6_ID);
1450 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST5_ID);
1451 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST4_ID);
1452 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST3_ID);
1453 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST2_ID);
1454 SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST1_ID);
1455
1456 kfree(sub);
1457 }
1458
1459 int
1460 splat_taskq_id(void) {
1461 return SPLAT_SUBSYSTEM_TASKQ;
1462 }