]> git.proxmox.com Git - mirror_zfs.git/blob - module/splat/splat-ctl.c
Remove the gawk dependency.
[mirror_zfs.git] / module / splat / splat-ctl.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Test Control Interface.
25 *
26 * The 'splat' (Solaris Porting LAyer Tests) module is designed as a
27 * framework which runs various in kernel regression tests to validate
28 * the SPL primitives honor the Solaris ABI.
29 *
30 * The splat module is constructed of various splat_* source files each
31 * of which contain regression tests for a particular subsystem. For
32 * example, the splat_kmem.c file contains all the tests for validating
33 * the kmem interfaces have been implemented correctly. When the splat
34 * module is loaded splat_*_init() will be called for each subsystems
35 * tests. It is the responsibility of splat_*_init() to register all
36 * the tests for this subsystem using the SPLAT_TEST_INIT() macro.
37 * Similarly splat_*_fini() is called when the splat module is removed
38 * and is responsible for unregistering its tests via the SPLAT_TEST_FINI
39 * macro. Once a test is registered it can then be run with an ioctl()
40 * call which specifies the subsystem and test to be run. The provided
41 * splat command line tool can be used to display all available
42 * subsystems and tests. It can also be used to run the full suite
43 * of regression tests or particular tests.
44 \*****************************************************************************/
45
46 #include "splat-internal.h"
47
48 static spl_class *splat_class;
49 static spl_device *splat_device;
50 static struct list_head splat_module_list;
51 static spinlock_t splat_module_lock;
52
53 static int
54 splat_open(struct inode *inode, struct file *file)
55 {
56 unsigned int minor = iminor(inode);
57 splat_info_t *info;
58
59 if (minor >= SPLAT_MINORS)
60 return -ENXIO;
61
62 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
63 if (info == NULL)
64 return -ENOMEM;
65
66 spin_lock_init(&info->info_lock);
67 info->info_size = SPLAT_INFO_BUFFER_SIZE;
68 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
69 if (info->info_buffer == NULL) {
70 kfree(info);
71 return -ENOMEM;
72 }
73 memset(info->info_buffer, 0, info->info_size);
74
75 info->info_head = info->info_buffer;
76 file->private_data = (void *)info;
77
78 splat_print(file, "%s\n", spl_version);
79
80 return 0;
81 }
82
83 static int
84 splat_release(struct inode *inode, struct file *file)
85 {
86 unsigned int minor = iminor(inode);
87 splat_info_t *info = (splat_info_t *)file->private_data;
88
89 if (minor >= SPLAT_MINORS)
90 return -ENXIO;
91
92 ASSERT(info);
93 ASSERT(info->info_buffer);
94
95 vfree(info->info_buffer);
96 kfree(info);
97
98 return 0;
99 }
100
101 static int
102 splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
103 {
104 splat_info_t *info = (splat_info_t *)file->private_data;
105
106 ASSERT(info);
107 ASSERT(info->info_buffer);
108
109 spin_lock(&info->info_lock);
110 memset(info->info_buffer, 0, info->info_size);
111 info->info_head = info->info_buffer;
112 spin_unlock(&info->info_lock);
113
114 return 0;
115 }
116
117 static int
118 splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
119 {
120 splat_info_t *info = (splat_info_t *)file->private_data;
121 char *buf;
122 int min, size, rc = 0;
123
124 ASSERT(info);
125 ASSERT(info->info_buffer);
126
127 spin_lock(&info->info_lock);
128 if (kcfg->cfg_arg1 > 0) {
129
130 size = kcfg->cfg_arg1;
131 buf = (char *)vmalloc(size);
132 if (buf == NULL) {
133 rc = -ENOMEM;
134 goto out;
135 }
136
137 /* Zero fill and truncate contents when coping buffer */
138 min = ((size < info->info_size) ? size : info->info_size);
139 memset(buf, 0, size);
140 memcpy(buf, info->info_buffer, min);
141 vfree(info->info_buffer);
142 info->info_size = size;
143 info->info_buffer = buf;
144 info->info_head = info->info_buffer;
145 }
146
147 kcfg->cfg_rc1 = info->info_size;
148
149 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
150 rc = -EFAULT;
151 out:
152 spin_unlock(&info->info_lock);
153
154 return rc;
155 }
156
157
158 static splat_subsystem_t *
159 splat_subsystem_find(int id) {
160 splat_subsystem_t *sub;
161
162 spin_lock(&splat_module_lock);
163 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
164 if (id == sub->desc.id) {
165 spin_unlock(&splat_module_lock);
166 return sub;
167 }
168 }
169 spin_unlock(&splat_module_lock);
170
171 return NULL;
172 }
173
174 static int
175 splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
176 {
177 splat_subsystem_t *sub;
178 int i = 0;
179
180 spin_lock(&splat_module_lock);
181 list_for_each_entry(sub, &splat_module_list, subsystem_list)
182 i++;
183
184 spin_unlock(&splat_module_lock);
185 kcfg->cfg_rc1 = i;
186
187 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
188 return -EFAULT;
189
190 return 0;
191 }
192
193 static int
194 splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
195 {
196 splat_subsystem_t *sub;
197 splat_cfg_t *tmp;
198 int size, i = 0;
199
200 /* Structure will be sized large enough for N subsystem entries
201 * which is passed in by the caller. On exit the number of
202 * entries filled in with valid subsystems will be stored in
203 * cfg_rc1. If the caller does not provide enough entries
204 * for all subsystems we will truncate the list to avoid overrun.
205 */
206 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
207 sizeof(splat_user_t);
208 tmp = kmalloc(size, GFP_KERNEL);
209 if (tmp == NULL)
210 return -ENOMEM;
211
212 /* Local 'tmp' is used as the structure copied back to user space */
213 memset(tmp, 0, size);
214 memcpy(tmp, kcfg, sizeof(*kcfg));
215
216 spin_lock(&splat_module_lock);
217 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
218 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
219 sub->desc.name, SPLAT_NAME_SIZE);
220 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
221 sub->desc.desc, SPLAT_DESC_SIZE);
222 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
223
224 /* Truncate list if we are about to overrun alloc'ed memory */
225 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
226 break;
227 }
228 spin_unlock(&splat_module_lock);
229 tmp->cfg_rc1 = i;
230
231 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
232 kfree(tmp);
233 return -EFAULT;
234 }
235
236 kfree(tmp);
237 return 0;
238 }
239
240 static int
241 splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
242 {
243 splat_subsystem_t *sub;
244 splat_test_t *test;
245 int i = 0;
246
247 /* Subsystem ID passed as arg1 */
248 sub = splat_subsystem_find(kcfg->cfg_arg1);
249 if (sub == NULL)
250 return -EINVAL;
251
252 spin_lock(&(sub->test_lock));
253 list_for_each_entry(test, &(sub->test_list), test_list)
254 i++;
255
256 spin_unlock(&(sub->test_lock));
257 kcfg->cfg_rc1 = i;
258
259 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
260 return -EFAULT;
261
262 return 0;
263 }
264
265 static int
266 splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
267 {
268 splat_subsystem_t *sub;
269 splat_test_t *test;
270 splat_cfg_t *tmp;
271 int size, i = 0;
272
273 /* Subsystem ID passed as arg1 */
274 sub = splat_subsystem_find(kcfg->cfg_arg1);
275 if (sub == NULL)
276 return -EINVAL;
277
278 /* Structure will be sized large enough for N test entries
279 * which is passed in by the caller. On exit the number of
280 * entries filled in with valid tests will be stored in
281 * cfg_rc1. If the caller does not provide enough entries
282 * for all tests we will truncate the list to avoid overrun.
283 */
284 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
285 tmp = kmalloc(size, GFP_KERNEL);
286 if (tmp == NULL)
287 return -ENOMEM;
288
289 /* Local 'tmp' is used as the structure copied back to user space */
290 memset(tmp, 0, size);
291 memcpy(tmp, kcfg, sizeof(*kcfg));
292
293 spin_lock(&(sub->test_lock));
294 list_for_each_entry(test, &(sub->test_list), test_list) {
295 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
296 test->desc.name, SPLAT_NAME_SIZE);
297 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
298 test->desc.desc, SPLAT_DESC_SIZE);
299 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
300
301 /* Truncate list if we are about to overrun alloc'ed memory */
302 if ((i++) == kcfg->cfg_data.splat_tests.size)
303 break;
304 }
305 spin_unlock(&(sub->test_lock));
306 tmp->cfg_rc1 = i;
307
308 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
309 kfree(tmp);
310 return -EFAULT;
311 }
312
313 kfree(tmp);
314 return 0;
315 }
316
317 static int
318 splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
319 {
320 splat_test_t *test;
321
322 spin_lock(&(sub->test_lock));
323 list_for_each_entry(test, &(sub->test_list), test_list) {
324 if (test->desc.id == cmd) {
325 spin_unlock(&(sub->test_lock));
326 return test->test(file, arg);
327 }
328 }
329 spin_unlock(&(sub->test_lock));
330
331 return -EINVAL;
332 }
333
334 static int
335 splat_ioctl_cfg(struct file *file, unsigned int cmd, unsigned long arg)
336 {
337 splat_cfg_t kcfg;
338 int rc = 0;
339
340 /* User and kernel space agree about arg size */
341 if (_IOC_SIZE(cmd) != sizeof(kcfg))
342 return -EBADMSG;
343
344 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
345 return -EFAULT;
346
347 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
348 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
349 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
350 return -EINVAL;
351 }
352
353 switch (kcfg.cfg_cmd) {
354 case SPLAT_CFG_BUFFER_CLEAR:
355 /* cfg_arg1 - Unused
356 * cfg_rc1 - Unused
357 */
358 rc = splat_buffer_clear(file, &kcfg, arg);
359 break;
360 case SPLAT_CFG_BUFFER_SIZE:
361 /* cfg_arg1 - 0 - query size; >0 resize
362 * cfg_rc1 - Set to current buffer size
363 */
364 rc = splat_buffer_size(file, &kcfg, arg);
365 break;
366 case SPLAT_CFG_SUBSYSTEM_COUNT:
367 /* cfg_arg1 - Unused
368 * cfg_rc1 - Set to number of subsystems
369 */
370 rc = splat_subsystem_count(&kcfg, arg);
371 break;
372 case SPLAT_CFG_SUBSYSTEM_LIST:
373 /* cfg_arg1 - Unused
374 * cfg_rc1 - Set to number of subsystems
375 * cfg_data.splat_subsystems - Set with subsystems
376 */
377 rc = splat_subsystem_list(&kcfg, arg);
378 break;
379 case SPLAT_CFG_TEST_COUNT:
380 /* cfg_arg1 - Set to a target subsystem
381 * cfg_rc1 - Set to number of tests
382 */
383 rc = splat_test_count(&kcfg, arg);
384 break;
385 case SPLAT_CFG_TEST_LIST:
386 /* cfg_arg1 - Set to a target subsystem
387 * cfg_rc1 - Set to number of tests
388 * cfg_data.splat_subsystems - Populated with tests
389 */
390 rc = splat_test_list(&kcfg, arg);
391 break;
392 default:
393 splat_print(file, "Bad config command %d\n",
394 kcfg.cfg_cmd);
395 rc = -EINVAL;
396 break;
397 }
398
399 return rc;
400 }
401
402 static int
403 splat_ioctl_cmd(struct file *file, unsigned int cmd, unsigned long arg)
404 {
405 splat_subsystem_t *sub;
406 splat_cmd_t kcmd;
407 int rc = -EINVAL;
408 void *data = NULL;
409
410 /* User and kernel space agree about arg size */
411 if (_IOC_SIZE(cmd) != sizeof(kcmd))
412 return -EBADMSG;
413
414 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
415 return -EFAULT;
416
417 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
418 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
419 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
420 return -EINVAL;
421 }
422
423 /* Allocate memory for any opaque data the caller needed to pass on */
424 if (kcmd.cmd_data_size > 0) {
425 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
426 if (data == NULL)
427 return -ENOMEM;
428
429 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
430 cmd_data_str)), kcmd.cmd_data_size)) {
431 kfree(data);
432 return -EFAULT;
433 }
434 }
435
436 sub = splat_subsystem_find(kcmd.cmd_subsystem);
437 if (sub != NULL)
438 rc = splat_validate(file, sub, kcmd.cmd_test, data);
439 else
440 rc = -EINVAL;
441
442 if (data != NULL)
443 kfree(data);
444
445 return rc;
446 }
447
448 static long
449 splat_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
450 {
451 unsigned int minor = iminor(file->f_dentry->d_inode);
452 int rc = 0;
453
454 /* Ignore tty ioctls */
455 if ((cmd & 0xffffff00) == ((int)'T') << 8)
456 return -ENOTTY;
457
458 if (minor >= SPLAT_MINORS)
459 return -ENXIO;
460
461 switch (cmd) {
462 case SPLAT_CFG:
463 rc = splat_ioctl_cfg(file, cmd, arg);
464 break;
465 case SPLAT_CMD:
466 rc = splat_ioctl_cmd(file, cmd, arg);
467 break;
468 default:
469 splat_print(file, "Bad ioctl command %d\n", cmd);
470 rc = -EINVAL;
471 break;
472 }
473
474 return rc;
475 }
476
477 #ifdef CONFIG_COMPAT
478 /* Compatibility handler for ioctls from 32-bit ELF binaries */
479 static long
480 splat_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
481 {
482 return splat_unlocked_ioctl(file, cmd, arg);
483 }
484 #endif /* CONFIG_COMPAT */
485
486 /* I'm not sure why you would want to write in to this buffer from
487 * user space since its principle use is to pass test status info
488 * back to the user space, but I don't see any reason to prevent it.
489 */
490 static ssize_t splat_write(struct file *file, const char __user *buf,
491 size_t count, loff_t *ppos)
492 {
493 unsigned int minor = iminor(file->f_dentry->d_inode);
494 splat_info_t *info = (splat_info_t *)file->private_data;
495 int rc = 0;
496
497 if (minor >= SPLAT_MINORS)
498 return -ENXIO;
499
500 ASSERT(info);
501 ASSERT(info->info_buffer);
502
503 spin_lock(&info->info_lock);
504
505 /* Write beyond EOF */
506 if (*ppos >= info->info_size) {
507 rc = -EFBIG;
508 goto out;
509 }
510
511 /* Resize count if beyond EOF */
512 if (*ppos + count > info->info_size)
513 count = info->info_size - *ppos;
514
515 if (copy_from_user(info->info_buffer, buf, count)) {
516 rc = -EFAULT;
517 goto out;
518 }
519
520 *ppos += count;
521 rc = count;
522 out:
523 spin_unlock(&info->info_lock);
524 return rc;
525 }
526
527 static ssize_t splat_read(struct file *file, char __user *buf,
528 size_t count, loff_t *ppos)
529 {
530 unsigned int minor = iminor(file->f_dentry->d_inode);
531 splat_info_t *info = (splat_info_t *)file->private_data;
532 int rc = 0;
533
534 if (minor >= SPLAT_MINORS)
535 return -ENXIO;
536
537 ASSERT(info);
538 ASSERT(info->info_buffer);
539
540 spin_lock(&info->info_lock);
541
542 /* Read beyond EOF */
543 if (*ppos >= info->info_size)
544 goto out;
545
546 /* Resize count if beyond EOF */
547 if (*ppos + count > info->info_size)
548 count = info->info_size - *ppos;
549
550 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
551 rc = -EFAULT;
552 goto out;
553 }
554
555 *ppos += count;
556 rc = count;
557 out:
558 spin_unlock(&info->info_lock);
559 return rc;
560 }
561
562 static loff_t splat_seek(struct file *file, loff_t offset, int origin)
563 {
564 unsigned int minor = iminor(file->f_dentry->d_inode);
565 splat_info_t *info = (splat_info_t *)file->private_data;
566 int rc = -EINVAL;
567
568 if (minor >= SPLAT_MINORS)
569 return -ENXIO;
570
571 ASSERT(info);
572 ASSERT(info->info_buffer);
573
574 spin_lock(&info->info_lock);
575
576 switch (origin) {
577 case 0: /* SEEK_SET - No-op just do it */
578 break;
579 case 1: /* SEEK_CUR - Seek from current */
580 offset = file->f_pos + offset;
581 break;
582 case 2: /* SEEK_END - Seek from end */
583 offset = info->info_size + offset;
584 break;
585 }
586
587 if (offset >= 0) {
588 file->f_pos = offset;
589 file->f_version = 0;
590 rc = offset;
591 }
592
593 spin_unlock(&info->info_lock);
594
595 return rc;
596 }
597
598 static struct cdev splat_cdev;
599 static struct file_operations splat_fops = {
600 .owner = THIS_MODULE,
601 .open = splat_open,
602 .release = splat_release,
603 .unlocked_ioctl = splat_unlocked_ioctl,
604 #ifdef CONFIG_COMPAT
605 .compat_ioctl = splat_compat_ioctl,
606 #endif
607 .read = splat_read,
608 .write = splat_write,
609 .llseek = splat_seek,
610 };
611
612 static int
613 splat_init(void)
614 {
615 dev_t dev;
616 int rc;
617
618 spin_lock_init(&splat_module_lock);
619 INIT_LIST_HEAD(&splat_module_list);
620
621 SPLAT_SUBSYSTEM_INIT(kmem);
622 SPLAT_SUBSYSTEM_INIT(taskq);
623 SPLAT_SUBSYSTEM_INIT(krng);
624 SPLAT_SUBSYSTEM_INIT(mutex);
625 SPLAT_SUBSYSTEM_INIT(condvar);
626 SPLAT_SUBSYSTEM_INIT(thread);
627 SPLAT_SUBSYSTEM_INIT(rwlock);
628 SPLAT_SUBSYSTEM_INIT(time);
629 SPLAT_SUBSYSTEM_INIT(vnode);
630 SPLAT_SUBSYSTEM_INIT(kobj);
631 SPLAT_SUBSYSTEM_INIT(atomic);
632 SPLAT_SUBSYSTEM_INIT(list);
633 SPLAT_SUBSYSTEM_INIT(generic);
634 SPLAT_SUBSYSTEM_INIT(cred);
635 SPLAT_SUBSYSTEM_INIT(zlib);
636
637 dev = MKDEV(SPLAT_MAJOR, 0);
638 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
639 goto error;
640
641 /* Support for registering a character driver */
642 cdev_init(&splat_cdev, &splat_fops);
643 splat_cdev.owner = THIS_MODULE;
644 kobject_set_name(&splat_cdev.kobj, SPLAT_NAME);
645 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
646 printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
647 kobject_put(&splat_cdev.kobj);
648 unregister_chrdev_region(dev, SPLAT_MINORS);
649 goto error;
650 }
651
652 /* Support for udev make driver info available in sysfs */
653 splat_class = spl_class_create(THIS_MODULE, "splat");
654 if (IS_ERR(splat_class)) {
655 rc = PTR_ERR(splat_class);
656 printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
657 cdev_del(&splat_cdev);
658 unregister_chrdev_region(dev, SPLAT_MINORS);
659 goto error;
660 }
661
662 splat_device = spl_device_create(splat_class, NULL,
663 MKDEV(SPLAT_MAJOR, 0),
664 NULL, SPLAT_NAME);
665
666 printk(KERN_INFO "SPLAT: Loaded Solaris Porting LAyer "
667 "Tests v%s\n", SPL_META_VERSION);
668 return 0;
669 error:
670 printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
671 return rc;
672 }
673
674 static int
675 splat_fini(void)
676 {
677 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
678
679 spl_device_destroy(splat_class, splat_device, dev);
680 spl_class_destroy(splat_class);
681 cdev_del(&splat_cdev);
682 unregister_chrdev_region(dev, SPLAT_MINORS);
683
684 SPLAT_SUBSYSTEM_FINI(zlib);
685 SPLAT_SUBSYSTEM_FINI(cred);
686 SPLAT_SUBSYSTEM_FINI(generic);
687 SPLAT_SUBSYSTEM_FINI(list);
688 SPLAT_SUBSYSTEM_FINI(atomic);
689 SPLAT_SUBSYSTEM_FINI(kobj);
690 SPLAT_SUBSYSTEM_FINI(vnode);
691 SPLAT_SUBSYSTEM_FINI(time);
692 SPLAT_SUBSYSTEM_FINI(rwlock);
693 SPLAT_SUBSYSTEM_FINI(thread);
694 SPLAT_SUBSYSTEM_FINI(condvar);
695 SPLAT_SUBSYSTEM_FINI(mutex);
696 SPLAT_SUBSYSTEM_FINI(krng);
697 SPLAT_SUBSYSTEM_FINI(taskq);
698 SPLAT_SUBSYSTEM_FINI(kmem);
699
700 ASSERT(list_empty(&splat_module_list));
701 printk(KERN_INFO "SPLAT: Unloaded Solaris Porting LAyer "
702 "Tests v%s\n", SPL_META_VERSION);
703
704 return 0;
705 }
706
707 spl_module_init(splat_init);
708 spl_module_exit(splat_fini);
709
710 MODULE_AUTHOR("Lawrence Livermore National Labs");
711 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
712 MODULE_LICENSE("GPL");