]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-ctl.c
c68281a84733024dcce35749ba537db84db49183
[mirror_spl-debian.git] / module / splat / splat-ctl.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Test Control Interface.
25 *
26 * The 'splat' (Solaris Porting LAyer Tests) module is designed as a
27 * framework which runs various in kernel regression tests to validate
28 * the SPL primitives honor the Solaris ABI.
29 *
30 * The splat module is constructed of various splat_* source files each
31 * of which contain regression tests for a particular subsystem. For
32 * example, the splat_kmem.c file contains all the tests for validating
33 * the kmem interfaces have been implemented correctly. When the splat
34 * module is loaded splat_*_init() will be called for each subsystems
35 * tests. It is the responsibility of splat_*_init() to register all
36 * the tests for this subsystem using the SPLAT_TEST_INIT() macro.
37 * Similarly splat_*_fini() is called when the splat module is removed
38 * and is responsible for unregistering its tests via the SPLAT_TEST_FINI
39 * macro. Once a test is registered it can then be run with an ioctl()
40 * call which specifies the subsystem and test to be run. The provided
41 * splat command line tool can be used to display all available
42 * subsystems and tests. It can also be used to run the full suite
43 * of regression tests or particular tests.
44 \*****************************************************************************/
45
46 #include <linux/module.h>
47 #include <linux/slab.h>
48 #include <linux/vmalloc.h>
49 #include <linux/cdev.h>
50 #include <linux/fs.h>
51 #include <linux/uaccess.h>
52 #include <sys/types.h>
53 #include <sys/debug.h>
54 #include "splat-internal.h"
55
56 static spl_class *splat_class;
57 static spl_device *splat_device;
58 static struct list_head splat_module_list;
59 static spinlock_t splat_module_lock;
60
61 static int
62 splat_open(struct inode *inode, struct file *file)
63 {
64 unsigned int minor = iminor(inode);
65 splat_info_t *info;
66
67 if (minor >= SPLAT_MINORS)
68 return -ENXIO;
69
70 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
71 if (info == NULL)
72 return -ENOMEM;
73
74 mutex_init(&info->info_lock);
75 info->info_size = SPLAT_INFO_BUFFER_SIZE;
76 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
77 if (info->info_buffer == NULL) {
78 kfree(info);
79 return -ENOMEM;
80 }
81 memset(info->info_buffer, 0, info->info_size);
82
83 info->info_head = info->info_buffer;
84 file->private_data = (void *)info;
85
86 splat_print(file, "%s\n", spl_version);
87
88 return 0;
89 }
90
91 static int
92 splat_release(struct inode *inode, struct file *file)
93 {
94 unsigned int minor = iminor(inode);
95 splat_info_t *info = (splat_info_t *)file->private_data;
96
97 if (minor >= SPLAT_MINORS)
98 return -ENXIO;
99
100 ASSERT(info);
101 ASSERT(info->info_buffer);
102
103 mutex_destroy(&info->info_lock);
104 vfree(info->info_buffer);
105 kfree(info);
106
107 return 0;
108 }
109
110 static int
111 splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
112 {
113 splat_info_t *info = (splat_info_t *)file->private_data;
114
115 ASSERT(info);
116 ASSERT(info->info_buffer);
117
118 mutex_lock(&info->info_lock);
119 memset(info->info_buffer, 0, info->info_size);
120 info->info_head = info->info_buffer;
121 mutex_unlock(&info->info_lock);
122
123 return 0;
124 }
125
126 static int
127 splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
128 {
129 splat_info_t *info = (splat_info_t *)file->private_data;
130 char *buf;
131 int min, size, rc = 0;
132
133 ASSERT(info);
134 ASSERT(info->info_buffer);
135
136 mutex_lock(&info->info_lock);
137 if (kcfg->cfg_arg1 > 0) {
138
139 size = kcfg->cfg_arg1;
140 buf = (char *)vmalloc(size);
141 if (buf == NULL) {
142 rc = -ENOMEM;
143 goto out;
144 }
145
146 /* Zero fill and truncate contents when coping buffer */
147 min = ((size < info->info_size) ? size : info->info_size);
148 memset(buf, 0, size);
149 memcpy(buf, info->info_buffer, min);
150 vfree(info->info_buffer);
151 info->info_size = size;
152 info->info_buffer = buf;
153 info->info_head = info->info_buffer;
154 }
155
156 kcfg->cfg_rc1 = info->info_size;
157
158 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
159 rc = -EFAULT;
160 out:
161 mutex_unlock(&info->info_lock);
162
163 return rc;
164 }
165
166
167 static splat_subsystem_t *
168 splat_subsystem_find(int id) {
169 splat_subsystem_t *sub;
170
171 spin_lock(&splat_module_lock);
172 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
173 if (id == sub->desc.id) {
174 spin_unlock(&splat_module_lock);
175 return sub;
176 }
177 }
178 spin_unlock(&splat_module_lock);
179
180 return NULL;
181 }
182
183 static int
184 splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
185 {
186 splat_subsystem_t *sub;
187 int i = 0;
188
189 spin_lock(&splat_module_lock);
190 list_for_each_entry(sub, &splat_module_list, subsystem_list)
191 i++;
192
193 spin_unlock(&splat_module_lock);
194 kcfg->cfg_rc1 = i;
195
196 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
197 return -EFAULT;
198
199 return 0;
200 }
201
202 static int
203 splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
204 {
205 splat_subsystem_t *sub;
206 splat_cfg_t *tmp;
207 int size, i = 0;
208
209 /* Structure will be sized large enough for N subsystem entries
210 * which is passed in by the caller. On exit the number of
211 * entries filled in with valid subsystems will be stored in
212 * cfg_rc1. If the caller does not provide enough entries
213 * for all subsystems we will truncate the list to avoid overrun.
214 */
215 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
216 sizeof(splat_user_t);
217 tmp = kmalloc(size, GFP_KERNEL);
218 if (tmp == NULL)
219 return -ENOMEM;
220
221 /* Local 'tmp' is used as the structure copied back to user space */
222 memset(tmp, 0, size);
223 memcpy(tmp, kcfg, sizeof(*kcfg));
224
225 spin_lock(&splat_module_lock);
226 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
227 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
228 sub->desc.name, SPLAT_NAME_SIZE);
229 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
230 sub->desc.desc, SPLAT_DESC_SIZE);
231 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
232
233 /* Truncate list if we are about to overrun alloc'ed memory */
234 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
235 break;
236 }
237 spin_unlock(&splat_module_lock);
238 tmp->cfg_rc1 = i;
239
240 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
241 kfree(tmp);
242 return -EFAULT;
243 }
244
245 kfree(tmp);
246 return 0;
247 }
248
249 static int
250 splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
251 {
252 splat_subsystem_t *sub;
253 splat_test_t *test;
254 int i = 0;
255
256 /* Subsystem ID passed as arg1 */
257 sub = splat_subsystem_find(kcfg->cfg_arg1);
258 if (sub == NULL)
259 return -EINVAL;
260
261 spin_lock(&(sub->test_lock));
262 list_for_each_entry(test, &(sub->test_list), test_list)
263 i++;
264
265 spin_unlock(&(sub->test_lock));
266 kcfg->cfg_rc1 = i;
267
268 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
269 return -EFAULT;
270
271 return 0;
272 }
273
274 static int
275 splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
276 {
277 splat_subsystem_t *sub;
278 splat_test_t *test;
279 splat_cfg_t *tmp;
280 int size, i = 0;
281
282 /* Subsystem ID passed as arg1 */
283 sub = splat_subsystem_find(kcfg->cfg_arg1);
284 if (sub == NULL)
285 return -EINVAL;
286
287 /* Structure will be sized large enough for N test entries
288 * which is passed in by the caller. On exit the number of
289 * entries filled in with valid tests will be stored in
290 * cfg_rc1. If the caller does not provide enough entries
291 * for all tests we will truncate the list to avoid overrun.
292 */
293 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
294 tmp = kmalloc(size, GFP_KERNEL);
295 if (tmp == NULL)
296 return -ENOMEM;
297
298 /* Local 'tmp' is used as the structure copied back to user space */
299 memset(tmp, 0, size);
300 memcpy(tmp, kcfg, sizeof(*kcfg));
301
302 spin_lock(&(sub->test_lock));
303 list_for_each_entry(test, &(sub->test_list), test_list) {
304 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
305 test->desc.name, SPLAT_NAME_SIZE);
306 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
307 test->desc.desc, SPLAT_DESC_SIZE);
308 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
309
310 /* Truncate list if we are about to overrun alloc'ed memory */
311 if ((i++) == kcfg->cfg_data.splat_tests.size)
312 break;
313 }
314 spin_unlock(&(sub->test_lock));
315 tmp->cfg_rc1 = i;
316
317 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
318 kfree(tmp);
319 return -EFAULT;
320 }
321
322 kfree(tmp);
323 return 0;
324 }
325
326 static int
327 splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
328 {
329 splat_test_t *test;
330
331 spin_lock(&(sub->test_lock));
332 list_for_each_entry(test, &(sub->test_list), test_list) {
333 if (test->desc.id == cmd) {
334 spin_unlock(&(sub->test_lock));
335 return test->test(file, arg);
336 }
337 }
338 spin_unlock(&(sub->test_lock));
339
340 return -EINVAL;
341 }
342
343 static int
344 splat_ioctl_cfg(struct file *file, unsigned int cmd, unsigned long arg)
345 {
346 splat_cfg_t kcfg;
347 int rc = 0;
348
349 /* User and kernel space agree about arg size */
350 if (_IOC_SIZE(cmd) != sizeof(kcfg))
351 return -EBADMSG;
352
353 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
354 return -EFAULT;
355
356 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
357 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
358 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
359 return -EINVAL;
360 }
361
362 switch (kcfg.cfg_cmd) {
363 case SPLAT_CFG_BUFFER_CLEAR:
364 /* cfg_arg1 - Unused
365 * cfg_rc1 - Unused
366 */
367 rc = splat_buffer_clear(file, &kcfg, arg);
368 break;
369 case SPLAT_CFG_BUFFER_SIZE:
370 /* cfg_arg1 - 0 - query size; >0 resize
371 * cfg_rc1 - Set to current buffer size
372 */
373 rc = splat_buffer_size(file, &kcfg, arg);
374 break;
375 case SPLAT_CFG_SUBSYSTEM_COUNT:
376 /* cfg_arg1 - Unused
377 * cfg_rc1 - Set to number of subsystems
378 */
379 rc = splat_subsystem_count(&kcfg, arg);
380 break;
381 case SPLAT_CFG_SUBSYSTEM_LIST:
382 /* cfg_arg1 - Unused
383 * cfg_rc1 - Set to number of subsystems
384 * cfg_data.splat_subsystems - Set with subsystems
385 */
386 rc = splat_subsystem_list(&kcfg, arg);
387 break;
388 case SPLAT_CFG_TEST_COUNT:
389 /* cfg_arg1 - Set to a target subsystem
390 * cfg_rc1 - Set to number of tests
391 */
392 rc = splat_test_count(&kcfg, arg);
393 break;
394 case SPLAT_CFG_TEST_LIST:
395 /* cfg_arg1 - Set to a target subsystem
396 * cfg_rc1 - Set to number of tests
397 * cfg_data.splat_subsystems - Populated with tests
398 */
399 rc = splat_test_list(&kcfg, arg);
400 break;
401 default:
402 splat_print(file, "Bad config command %d\n",
403 kcfg.cfg_cmd);
404 rc = -EINVAL;
405 break;
406 }
407
408 return rc;
409 }
410
411 static int
412 splat_ioctl_cmd(struct file *file, unsigned int cmd, unsigned long arg)
413 {
414 splat_subsystem_t *sub;
415 splat_cmd_t kcmd;
416 int rc = -EINVAL;
417 void *data = NULL;
418
419 /* User and kernel space agree about arg size */
420 if (_IOC_SIZE(cmd) != sizeof(kcmd))
421 return -EBADMSG;
422
423 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
424 return -EFAULT;
425
426 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
427 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
428 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
429 return -EINVAL;
430 }
431
432 /* Allocate memory for any opaque data the caller needed to pass on */
433 if (kcmd.cmd_data_size > 0) {
434 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
435 if (data == NULL)
436 return -ENOMEM;
437
438 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
439 cmd_data_str)), kcmd.cmd_data_size)) {
440 kfree(data);
441 return -EFAULT;
442 }
443 }
444
445 sub = splat_subsystem_find(kcmd.cmd_subsystem);
446 if (sub != NULL)
447 rc = splat_validate(file, sub, kcmd.cmd_test, data);
448 else
449 rc = -EINVAL;
450
451 if (data != NULL)
452 kfree(data);
453
454 return rc;
455 }
456
457 static long
458 splat_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
459 {
460 unsigned int minor = iminor(file->f_dentry->d_inode);
461 int rc = 0;
462
463 /* Ignore tty ioctls */
464 if ((cmd & 0xffffff00) == ((int)'T') << 8)
465 return -ENOTTY;
466
467 if (minor >= SPLAT_MINORS)
468 return -ENXIO;
469
470 switch (cmd) {
471 case SPLAT_CFG:
472 rc = splat_ioctl_cfg(file, cmd, arg);
473 break;
474 case SPLAT_CMD:
475 rc = splat_ioctl_cmd(file, cmd, arg);
476 break;
477 default:
478 splat_print(file, "Bad ioctl command %d\n", cmd);
479 rc = -EINVAL;
480 break;
481 }
482
483 return rc;
484 }
485
486 #ifdef CONFIG_COMPAT
487 /* Compatibility handler for ioctls from 32-bit ELF binaries */
488 static long
489 splat_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
490 {
491 return splat_unlocked_ioctl(file, cmd, arg);
492 }
493 #endif /* CONFIG_COMPAT */
494
495 /* I'm not sure why you would want to write in to this buffer from
496 * user space since its principle use is to pass test status info
497 * back to the user space, but I don't see any reason to prevent it.
498 */
499 static ssize_t splat_write(struct file *file, const char __user *buf,
500 size_t count, loff_t *ppos)
501 {
502 unsigned int minor = iminor(file->f_dentry->d_inode);
503 splat_info_t *info = (splat_info_t *)file->private_data;
504 int rc = 0;
505
506 if (minor >= SPLAT_MINORS)
507 return -ENXIO;
508
509 ASSERT(info);
510 ASSERT(info->info_buffer);
511
512 mutex_lock(&info->info_lock);
513
514 /* Write beyond EOF */
515 if (*ppos >= info->info_size) {
516 rc = -EFBIG;
517 goto out;
518 }
519
520 /* Resize count if beyond EOF */
521 if (*ppos + count > info->info_size)
522 count = info->info_size - *ppos;
523
524 if (copy_from_user(info->info_buffer, buf, count)) {
525 rc = -EFAULT;
526 goto out;
527 }
528
529 *ppos += count;
530 rc = count;
531 out:
532 mutex_unlock(&info->info_lock);
533 return rc;
534 }
535
536 static ssize_t splat_read(struct file *file, char __user *buf,
537 size_t count, loff_t *ppos)
538 {
539 unsigned int minor = iminor(file->f_dentry->d_inode);
540 splat_info_t *info = (splat_info_t *)file->private_data;
541 int rc = 0;
542
543 if (minor >= SPLAT_MINORS)
544 return -ENXIO;
545
546 ASSERT(info);
547 ASSERT(info->info_buffer);
548
549 mutex_lock(&info->info_lock);
550
551 /* Read beyond EOF */
552 if (*ppos >= info->info_size)
553 goto out;
554
555 /* Resize count if beyond EOF */
556 if (*ppos + count > info->info_size)
557 count = info->info_size - *ppos;
558
559 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
560 rc = -EFAULT;
561 goto out;
562 }
563
564 *ppos += count;
565 rc = count;
566 out:
567 mutex_unlock(&info->info_lock);
568 return rc;
569 }
570
571 static loff_t splat_seek(struct file *file, loff_t offset, int origin)
572 {
573 unsigned int minor = iminor(file->f_dentry->d_inode);
574 splat_info_t *info = (splat_info_t *)file->private_data;
575 int rc = -EINVAL;
576
577 if (minor >= SPLAT_MINORS)
578 return -ENXIO;
579
580 ASSERT(info);
581 ASSERT(info->info_buffer);
582
583 mutex_lock(&info->info_lock);
584
585 switch (origin) {
586 case 0: /* SEEK_SET - No-op just do it */
587 break;
588 case 1: /* SEEK_CUR - Seek from current */
589 offset = file->f_pos + offset;
590 break;
591 case 2: /* SEEK_END - Seek from end */
592 offset = info->info_size + offset;
593 break;
594 }
595
596 if (offset >= 0) {
597 file->f_pos = offset;
598 file->f_version = 0;
599 rc = offset;
600 }
601
602 mutex_unlock(&info->info_lock);
603
604 return rc;
605 }
606
607 static struct cdev splat_cdev;
608 static struct file_operations splat_fops = {
609 .owner = THIS_MODULE,
610 .open = splat_open,
611 .release = splat_release,
612 .unlocked_ioctl = splat_unlocked_ioctl,
613 #ifdef CONFIG_COMPAT
614 .compat_ioctl = splat_compat_ioctl,
615 #endif
616 .read = splat_read,
617 .write = splat_write,
618 .llseek = splat_seek,
619 };
620
621 static int
622 splat_init(void)
623 {
624 dev_t dev;
625 int rc;
626
627 spin_lock_init(&splat_module_lock);
628 INIT_LIST_HEAD(&splat_module_list);
629
630 SPLAT_SUBSYSTEM_INIT(kmem);
631 SPLAT_SUBSYSTEM_INIT(taskq);
632 SPLAT_SUBSYSTEM_INIT(krng);
633 SPLAT_SUBSYSTEM_INIT(mutex);
634 SPLAT_SUBSYSTEM_INIT(condvar);
635 SPLAT_SUBSYSTEM_INIT(thread);
636 SPLAT_SUBSYSTEM_INIT(rwlock);
637 SPLAT_SUBSYSTEM_INIT(time);
638 SPLAT_SUBSYSTEM_INIT(vnode);
639 SPLAT_SUBSYSTEM_INIT(kobj);
640 SPLAT_SUBSYSTEM_INIT(atomic);
641 SPLAT_SUBSYSTEM_INIT(list);
642 SPLAT_SUBSYSTEM_INIT(generic);
643 SPLAT_SUBSYSTEM_INIT(cred);
644 SPLAT_SUBSYSTEM_INIT(zlib);
645 SPLAT_SUBSYSTEM_INIT(linux);
646
647 dev = MKDEV(SPLAT_MAJOR, 0);
648 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
649 goto error;
650
651 /* Support for registering a character driver */
652 cdev_init(&splat_cdev, &splat_fops);
653 splat_cdev.owner = THIS_MODULE;
654 kobject_set_name(&splat_cdev.kobj, SPLAT_NAME);
655 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
656 printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
657 kobject_put(&splat_cdev.kobj);
658 unregister_chrdev_region(dev, SPLAT_MINORS);
659 goto error;
660 }
661
662 /* Support for udev make driver info available in sysfs */
663 splat_class = spl_class_create(THIS_MODULE, "splat");
664 if (IS_ERR(splat_class)) {
665 rc = PTR_ERR(splat_class);
666 printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
667 cdev_del(&splat_cdev);
668 unregister_chrdev_region(dev, SPLAT_MINORS);
669 goto error;
670 }
671
672 splat_device = spl_device_create(splat_class, NULL,
673 MKDEV(SPLAT_MAJOR, 0),
674 NULL, SPLAT_NAME);
675
676 printk(KERN_INFO "SPLAT: Loaded module v%s-%s%s\n",
677 SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
678 return 0;
679 error:
680 printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
681 return rc;
682 }
683
684 static int
685 splat_fini(void)
686 {
687 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
688
689 spl_device_destroy(splat_class, splat_device, dev);
690 spl_class_destroy(splat_class);
691 cdev_del(&splat_cdev);
692 unregister_chrdev_region(dev, SPLAT_MINORS);
693
694 SPLAT_SUBSYSTEM_FINI(linux);
695 SPLAT_SUBSYSTEM_FINI(zlib);
696 SPLAT_SUBSYSTEM_FINI(cred);
697 SPLAT_SUBSYSTEM_FINI(generic);
698 SPLAT_SUBSYSTEM_FINI(list);
699 SPLAT_SUBSYSTEM_FINI(atomic);
700 SPLAT_SUBSYSTEM_FINI(kobj);
701 SPLAT_SUBSYSTEM_FINI(vnode);
702 SPLAT_SUBSYSTEM_FINI(time);
703 SPLAT_SUBSYSTEM_FINI(rwlock);
704 SPLAT_SUBSYSTEM_FINI(thread);
705 SPLAT_SUBSYSTEM_FINI(condvar);
706 SPLAT_SUBSYSTEM_FINI(mutex);
707 SPLAT_SUBSYSTEM_FINI(krng);
708 SPLAT_SUBSYSTEM_FINI(taskq);
709 SPLAT_SUBSYSTEM_FINI(kmem);
710
711 ASSERT(list_empty(&splat_module_list));
712 printk(KERN_INFO "SPLAT: Unloaded module v%s-%s%s\n",
713 SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
714
715 return 0;
716 }
717
718 spl_module_init(splat_init);
719 spl_module_exit(splat_fini);
720
721 MODULE_AUTHOR("Lawrence Livermore National Labs");
722 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
723 MODULE_LICENSE("GPL");