]> git.proxmox.com Git - mirror_spl-debian.git/blame - modules/splat/splat-ctl.c
Initial pass at a file API getf/releasef hooks
[mirror_spl-debian.git] / modules / splat / splat-ctl.c
CommitLineData
f1ca4da6 1/*
7c50328b 2 * My intent is to create a loadable 'splat' (solaris porting layer
3 * aggressive test) module which can be used as an access point to
4 * run in kernel Solaris ABI regression tests. This provides a
5 * nice mechanism to validate the shim primates are working properly.
f1ca4da6 6 *
7c50328b 7 * The basic design is the splat module is that it is constructed of
8 * various splat_* source files each of which contains regression tests.
9 * For example the splat_linux_kmem.c file contains tests for validating
10 * kmem correctness. When the splat module is loaded splat_*_init()
11 * will be called for each subsystems tests, similarly splat_*_fini() is
12 * called when the splat module is removed. Each test can then be
f1ca4da6 13 * run by making an ioctl() call from a userspace control application
14 * to pick the subsystem and test which should be run.
15 *
16 * Author: Brian Behlendorf
17 */
18
7c50328b 19#include "splat-internal.h"
20#include <config.h>
f1ca4da6 21
22#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
23#include <linux/devfs_fs_kernel.h>
24#endif
25
26#include <linux/cdev.h>
27
28
29#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
7c50328b 30static struct class_simple *splat_class;
f1ca4da6 31#else
7c50328b 32static struct class *splat_class;
f1ca4da6 33#endif
7c50328b 34static struct list_head splat_module_list;
35static spinlock_t splat_module_lock;
f1ca4da6 36
37static int
7c50328b 38splat_open(struct inode *inode, struct file *file)
f1ca4da6 39{
40 unsigned int minor = iminor(inode);
7c50328b 41 splat_info_t *info;
f1ca4da6 42
7c50328b 43 if (minor >= SPLAT_MINORS)
f1ca4da6 44 return -ENXIO;
45
7c50328b 46 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
f1ca4da6 47 if (info == NULL)
48 return -ENOMEM;
49
50 spin_lock_init(&info->info_lock);
7c50328b 51 info->info_size = SPLAT_INFO_BUFFER_SIZE;
52 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
f1ca4da6 53 if (info->info_buffer == NULL) {
54 kfree(info);
55 return -ENOMEM;
56 }
57
58 info->info_head = info->info_buffer;
59 file->private_data = (void *)info;
60
f1ca4da6 61 return 0;
62}
63
64static int
7c50328b 65splat_release(struct inode *inode, struct file *file)
f1ca4da6 66{
67 unsigned int minor = iminor(inode);
7c50328b 68 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 69
7c50328b 70 if (minor >= SPLAT_MINORS)
f1ca4da6 71 return -ENXIO;
72
73 ASSERT(info);
74 ASSERT(info->info_buffer);
75
76 vfree(info->info_buffer);
77 kfree(info);
78
79 return 0;
80}
81
82static int
7c50328b 83splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 84{
7c50328b 85 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 86
87 ASSERT(info);
88 ASSERT(info->info_buffer);
89
90 spin_lock(&info->info_lock);
91 memset(info->info_buffer, 0, info->info_size);
92 info->info_head = info->info_buffer;
93 spin_unlock(&info->info_lock);
94
95 return 0;
96}
97
98static int
7c50328b 99splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 100{
7c50328b 101 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 102 char *buf;
103 int min, size, rc = 0;
104
105 ASSERT(info);
106 ASSERT(info->info_buffer);
107
108 spin_lock(&info->info_lock);
109 if (kcfg->cfg_arg1 > 0) {
110
111 size = kcfg->cfg_arg1;
112 buf = (char *)vmalloc(size);
113 if (buf == NULL) {
114 rc = -ENOMEM;
115 goto out;
116 }
117
118 /* Zero fill and truncate contents when coping buffer */
119 min = ((size < info->info_size) ? size : info->info_size);
120 memset(buf, 0, size);
121 memcpy(buf, info->info_buffer, min);
122 vfree(info->info_buffer);
123 info->info_size = size;
124 info->info_buffer = buf;
125 info->info_head = info->info_buffer;
126 }
127
128 kcfg->cfg_rc1 = info->info_size;
129
7c50328b 130 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 131 rc = -EFAULT;
132out:
133 spin_unlock(&info->info_lock);
134
135 return rc;
136}
137
138
7c50328b 139static splat_subsystem_t *
140splat_subsystem_find(int id) {
141 splat_subsystem_t *sub;
f1ca4da6 142
7c50328b 143 spin_lock(&splat_module_lock);
144 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
f1ca4da6 145 if (id == sub->desc.id) {
7c50328b 146 spin_unlock(&splat_module_lock);
f1ca4da6 147 return sub;
148 }
149 }
7c50328b 150 spin_unlock(&splat_module_lock);
f1ca4da6 151
152 return NULL;
153}
154
155static int
7c50328b 156splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 157{
7c50328b 158 splat_subsystem_t *sub;
f1ca4da6 159 int i = 0;
160
7c50328b 161 spin_lock(&splat_module_lock);
162 list_for_each_entry(sub, &splat_module_list, subsystem_list)
f1ca4da6 163 i++;
164
7c50328b 165 spin_unlock(&splat_module_lock);
f1ca4da6 166 kcfg->cfg_rc1 = i;
167
7c50328b 168 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 169 return -EFAULT;
170
171 return 0;
172}
173
174static int
7c50328b 175splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 176{
7c50328b 177 splat_subsystem_t *sub;
178 splat_cfg_t *tmp;
f1ca4da6 179 int size, i = 0;
180
181 /* Structure will be sized large enough for N subsystem entries
182 * which is passed in by the caller. On exit the number of
183 * entries filled in with valid subsystems will be stored in
184 * cfg_rc1. If the caller does not provide enough entries
185 * for all subsystems we will truncate the list to avoid overrun.
186 */
7c50328b 187 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
188 sizeof(splat_user_t);
f1ca4da6 189 tmp = kmalloc(size, GFP_KERNEL);
190 if (tmp == NULL)
191 return -ENOMEM;
192
193 /* Local 'tmp' is used as the structure copied back to user space */
194 memset(tmp, 0, size);
195 memcpy(tmp, kcfg, sizeof(*kcfg));
196
7c50328b 197 spin_lock(&splat_module_lock);
198 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
199 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
200 sub->desc.name, SPLAT_NAME_SIZE);
201 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
202 sub->desc.desc, SPLAT_DESC_SIZE);
203 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
f1ca4da6 204
205 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 206 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
f1ca4da6 207 break;
208 }
7c50328b 209 spin_unlock(&splat_module_lock);
f1ca4da6 210 tmp->cfg_rc1 = i;
211
7c50328b 212 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 213 kfree(tmp);
214 return -EFAULT;
215 }
216
217 kfree(tmp);
218 return 0;
219}
220
221static int
7c50328b 222splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 223{
7c50328b 224 splat_subsystem_t *sub;
225 splat_test_t *test;
f1b59d26 226 int i = 0;
f1ca4da6 227
228 /* Subsystem ID passed as arg1 */
7c50328b 229 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 230 if (sub == NULL)
231 return -EINVAL;
232
233 spin_lock(&(sub->test_lock));
234 list_for_each_entry(test, &(sub->test_list), test_list)
235 i++;
236
237 spin_unlock(&(sub->test_lock));
238 kcfg->cfg_rc1 = i;
239
7c50328b 240 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 241 return -EFAULT;
242
243 return 0;
244}
245
246static int
7c50328b 247splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 248{
7c50328b 249 splat_subsystem_t *sub;
250 splat_test_t *test;
251 splat_cfg_t *tmp;
f1b59d26 252 int size, i = 0;
f1ca4da6 253
254 /* Subsystem ID passed as arg1 */
7c50328b 255 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 256 if (sub == NULL)
257 return -EINVAL;
258
259 /* Structure will be sized large enough for N test entries
260 * which is passed in by the caller. On exit the number of
261 * entries filled in with valid tests will be stored in
262 * cfg_rc1. If the caller does not provide enough entries
263 * for all tests we will truncate the list to avoid overrun.
264 */
7c50328b 265 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
f1ca4da6 266 tmp = kmalloc(size, GFP_KERNEL);
267 if (tmp == NULL)
268 return -ENOMEM;
269
270 /* Local 'tmp' is used as the structure copied back to user space */
271 memset(tmp, 0, size);
272 memcpy(tmp, kcfg, sizeof(*kcfg));
273
274 spin_lock(&(sub->test_lock));
275 list_for_each_entry(test, &(sub->test_list), test_list) {
7c50328b 276 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
277 test->desc.name, SPLAT_NAME_SIZE);
278 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
279 test->desc.desc, SPLAT_DESC_SIZE);
280 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
f1ca4da6 281
282 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 283 if ((i++) == kcfg->cfg_data.splat_tests.size)
f1ca4da6 284 break;
285 }
286 spin_unlock(&(sub->test_lock));
287 tmp->cfg_rc1 = i;
288
7c50328b 289 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 290 kfree(tmp);
291 return -EFAULT;
292 }
293
294 kfree(tmp);
295 return 0;
296}
297
298static int
7c50328b 299splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
f1ca4da6 300{
7c50328b 301 splat_test_t *test;
f1ca4da6 302
303 spin_lock(&(sub->test_lock));
304 list_for_each_entry(test, &(sub->test_list), test_list) {
305 if (test->desc.id == cmd) {
306 spin_unlock(&(sub->test_lock));
307 return test->test(file, arg);
308 }
309 }
310 spin_unlock(&(sub->test_lock));
311
312 return -EINVAL;
313}
314
315static int
7c50328b 316splat_ioctl_cfg(struct file *file, unsigned long arg)
f1ca4da6 317{
7c50328b 318 splat_cfg_t kcfg;
f1ca4da6 319 int rc = 0;
320
7c50328b 321 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
f1ca4da6 322 return -EFAULT;
323
7c50328b 324 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
325 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
326 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
f1ca4da6 327 return -EINVAL;
328 }
329
330 switch (kcfg.cfg_cmd) {
7c50328b 331 case SPLAT_CFG_BUFFER_CLEAR:
f1ca4da6 332 /* cfg_arg1 - Unused
333 * cfg_rc1 - Unused
334 */
7c50328b 335 rc = splat_buffer_clear(file, &kcfg, arg);
f1ca4da6 336 break;
7c50328b 337 case SPLAT_CFG_BUFFER_SIZE:
f1ca4da6 338 /* cfg_arg1 - 0 - query size; >0 resize
339 * cfg_rc1 - Set to current buffer size
340 */
7c50328b 341 rc = splat_buffer_size(file, &kcfg, arg);
f1ca4da6 342 break;
7c50328b 343 case SPLAT_CFG_SUBSYSTEM_COUNT:
f1ca4da6 344 /* cfg_arg1 - Unused
345 * cfg_rc1 - Set to number of subsystems
346 */
7c50328b 347 rc = splat_subsystem_count(&kcfg, arg);
f1ca4da6 348 break;
7c50328b 349 case SPLAT_CFG_SUBSYSTEM_LIST:
f1ca4da6 350 /* cfg_arg1 - Unused
351 * cfg_rc1 - Set to number of subsystems
7c50328b 352 * cfg_data.splat_subsystems - Populated with subsystems
f1ca4da6 353 */
7c50328b 354 rc = splat_subsystem_list(&kcfg, arg);
f1ca4da6 355 break;
7c50328b 356 case SPLAT_CFG_TEST_COUNT:
f1ca4da6 357 /* cfg_arg1 - Set to a target subsystem
358 * cfg_rc1 - Set to number of tests
359 */
7c50328b 360 rc = splat_test_count(&kcfg, arg);
f1ca4da6 361 break;
7c50328b 362 case SPLAT_CFG_TEST_LIST:
f1ca4da6 363 /* cfg_arg1 - Set to a target subsystem
364 * cfg_rc1 - Set to number of tests
7c50328b 365 * cfg_data.splat_subsystems - Populated with tests
f1ca4da6 366 */
7c50328b 367 rc = splat_test_list(&kcfg, arg);
f1ca4da6 368 break;
369 default:
7c50328b 370 splat_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
f1ca4da6 371 rc = -EINVAL;
372 break;
373 }
374
375 return rc;
376}
377
378static int
7c50328b 379splat_ioctl_cmd(struct file *file, unsigned long arg)
f1ca4da6 380{
7c50328b 381 splat_subsystem_t *sub;
382 splat_cmd_t kcmd;
f1ca4da6 383 int rc = -EINVAL;
384 void *data = NULL;
385
7c50328b 386 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
f1ca4da6 387 return -EFAULT;
388
7c50328b 389 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
390 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
391 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
f1ca4da6 392 return -EINVAL;
393 }
394
395 /* Allocate memory for any opaque data the caller needed to pass on */
396 if (kcmd.cmd_data_size > 0) {
397 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
398 if (data == NULL)
399 return -ENOMEM;
400
7c50328b 401 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
f1ca4da6 402 cmd_data_str)), kcmd.cmd_data_size)) {
403 kfree(data);
404 return -EFAULT;
405 }
406 }
407
7c50328b 408 sub = splat_subsystem_find(kcmd.cmd_subsystem);
f1ca4da6 409 if (sub != NULL)
7c50328b 410 rc = splat_validate(file, sub, kcmd.cmd_test, data);
f1ca4da6 411 else
412 rc = -EINVAL;
413
414 if (data != NULL)
415 kfree(data);
416
417 return rc;
418}
419
420static int
7c50328b 421splat_ioctl(struct inode *inode, struct file *file,
f1ca4da6 422 unsigned int cmd, unsigned long arg)
423{
f1b59d26 424 unsigned int minor = iminor(file->f_dentry->d_inode);
425 int rc = 0;
f1ca4da6 426
427 /* Ignore tty ioctls */
428 if ((cmd & 0xffffff00) == ((int)'T') << 8)
429 return -ENOTTY;
430
7c50328b 431 if (minor >= SPLAT_MINORS)
f1ca4da6 432 return -ENXIO;
433
434 switch (cmd) {
7c50328b 435 case SPLAT_CFG:
436 rc = splat_ioctl_cfg(file, arg);
f1ca4da6 437 break;
7c50328b 438 case SPLAT_CMD:
439 rc = splat_ioctl_cmd(file, arg);
f1ca4da6 440 break;
441 default:
7c50328b 442 splat_print(file, "Bad ioctl command %d\n", cmd);
f1ca4da6 443 rc = -EINVAL;
444 break;
445 }
446
447 return rc;
448}
449
450/* I'm not sure why you would want to write in to this buffer from
451 * user space since its principle use is to pass test status info
452 * back to the user space, but I don't see any reason to prevent it.
453 */
7c50328b 454static ssize_t splat_write(struct file *file, const char __user *buf,
f1ca4da6 455 size_t count, loff_t *ppos)
456{
457 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 458 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 459 int rc = 0;
460
7c50328b 461 if (minor >= SPLAT_MINORS)
f1ca4da6 462 return -ENXIO;
463
464 ASSERT(info);
465 ASSERT(info->info_buffer);
466
467 spin_lock(&info->info_lock);
468
469 /* Write beyond EOF */
470 if (*ppos >= info->info_size) {
471 rc = -EFBIG;
472 goto out;
473 }
474
475 /* Resize count if beyond EOF */
476 if (*ppos + count > info->info_size)
477 count = info->info_size - *ppos;
478
479 if (copy_from_user(info->info_buffer, buf, count)) {
480 rc = -EFAULT;
481 goto out;
482 }
483
484 *ppos += count;
485 rc = count;
486out:
487 spin_unlock(&info->info_lock);
488 return rc;
489}
490
7c50328b 491static ssize_t splat_read(struct file *file, char __user *buf,
f1ca4da6 492 size_t count, loff_t *ppos)
493{
494 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 495 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 496 int rc = 0;
497
7c50328b 498 if (minor >= SPLAT_MINORS)
f1ca4da6 499 return -ENXIO;
500
501 ASSERT(info);
502 ASSERT(info->info_buffer);
503
504 spin_lock(&info->info_lock);
505
506 /* Read beyond EOF */
507 if (*ppos >= info->info_size)
508 goto out;
509
510 /* Resize count if beyond EOF */
511 if (*ppos + count > info->info_size)
512 count = info->info_size - *ppos;
513
514 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
515 rc = -EFAULT;
516 goto out;
517 }
518
519 *ppos += count;
520 rc = count;
521out:
522 spin_unlock(&info->info_lock);
523 return rc;
524}
525
7c50328b 526static loff_t splat_seek(struct file *file, loff_t offset, int origin)
f1ca4da6 527{
528 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 529 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 530 int rc = -EINVAL;
531
7c50328b 532 if (minor >= SPLAT_MINORS)
f1ca4da6 533 return -ENXIO;
534
535 ASSERT(info);
536 ASSERT(info->info_buffer);
537
538 spin_lock(&info->info_lock);
539
540 switch (origin) {
541 case 0: /* SEEK_SET - No-op just do it */
542 break;
543 case 1: /* SEEK_CUR - Seek from current */
544 offset = file->f_pos + offset;
545 break;
546 case 2: /* SEEK_END - Seek from end */
547 offset = info->info_size + offset;
548 break;
549 }
550
551 if (offset >= 0) {
552 file->f_pos = offset;
553 file->f_version = 0;
554 rc = offset;
555 }
556
557 spin_unlock(&info->info_lock);
558
559 return rc;
560}
561
7c50328b 562static struct file_operations splat_fops = {
f1ca4da6 563 .owner = THIS_MODULE,
7c50328b 564 .open = splat_open,
565 .release = splat_release,
566 .ioctl = splat_ioctl,
567 .read = splat_read,
568 .write = splat_write,
569 .llseek = splat_seek,
f1ca4da6 570};
571
7c50328b 572static struct cdev splat_cdev = {
f1ca4da6 573 .owner = THIS_MODULE,
7c50328b 574 .kobj = { .name = "splatctl", },
f1ca4da6 575};
576
577static int __init
7c50328b 578splat_init(void)
f1ca4da6 579{
580 dev_t dev;
f1b59d26 581 int rc;
f1ca4da6 582
7c50328b 583 spin_lock_init(&splat_module_lock);
584 INIT_LIST_HEAD(&splat_module_list);
f1ca4da6 585
7c50328b 586 SPLAT_SUBSYSTEM_INIT(kmem);
587 SPLAT_SUBSYSTEM_INIT(taskq);
588 SPLAT_SUBSYSTEM_INIT(krng);
589 SPLAT_SUBSYSTEM_INIT(mutex);
590 SPLAT_SUBSYSTEM_INIT(condvar);
591 SPLAT_SUBSYSTEM_INIT(thread);
592 SPLAT_SUBSYSTEM_INIT(rwlock);
593 SPLAT_SUBSYSTEM_INIT(time);
4b171585 594 SPLAT_SUBSYSTEM_INIT(vnode);
9490c148 595 SPLAT_SUBSYSTEM_INIT(kobj);
5d86345d 596 SPLAT_SUBSYSTEM_INIT(file);
f1ca4da6 597
7c50328b 598 dev = MKDEV(SPLAT_MAJOR, 0);
599 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, "splatctl")))
f1ca4da6 600 goto error;
601
602 /* Support for registering a character driver */
7c50328b 603 cdev_init(&splat_cdev, &splat_fops);
604 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
605 printk(KERN_ERR "splat: Error adding cdev, %d\n", rc);
606 kobject_put(&splat_cdev.kobj);
607 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 608 goto error;
609 }
610
611 /* Support for udev make driver info available in sysfs */
612#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
7c50328b 613 splat_class = class_simple_create(THIS_MODULE, "splat");
f1ca4da6 614#else
7c50328b 615 splat_class = class_create(THIS_MODULE, "splat");
f1ca4da6 616#endif
7c50328b 617 if (IS_ERR(splat_class)) {
618 rc = PTR_ERR(splat_class);
619 printk(KERN_ERR "splat: Error creating splat class, %d\n", rc);
620 cdev_del(&splat_cdev);
621 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 622 goto error;
623 }
624
625#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
7c50328b 626 class_simple_device_add(splat_class, MKDEV(SPLAT_MAJOR, 0),
627 NULL, "splatctl");
f1ca4da6 628#else
7c50328b 629 class_device_create(splat_class, NULL, MKDEV(SPLAT_MAJOR, 0),
630 NULL, "splatctl");
f1ca4da6 631#endif
632
7c50328b 633 printk(KERN_INFO "splat: Loaded Solaris Porting Layer "
634 "Aggressive Tests v%s\n", VERSION);
f1ca4da6 635 return 0;
636error:
7c50328b 637 printk(KERN_ERR "splat: Error registering splat device, %d\n", rc);
f1ca4da6 638 return rc;
639}
640
641static void
7c50328b 642splat_fini(void)
f1ca4da6 643{
7c50328b 644 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
f1ca4da6 645
646#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
647 class_simple_device_remove(dev);
7c50328b 648 class_simple_destroy(splat_class);
649 devfs_remove("splat/splatctl");
650 devfs_remove("splat");
f1ca4da6 651#else
7c50328b 652 class_device_destroy(splat_class, dev);
653 class_destroy(splat_class);
f1ca4da6 654#endif
7c50328b 655 cdev_del(&splat_cdev);
656 unregister_chrdev_region(dev, SPLAT_MINORS);
657
5d86345d 658 SPLAT_SUBSYSTEM_FINI(file);
9490c148 659 SPLAT_SUBSYSTEM_FINI(kobj);
4b171585 660 SPLAT_SUBSYSTEM_FINI(vnode);
7c50328b 661 SPLAT_SUBSYSTEM_FINI(time);
662 SPLAT_SUBSYSTEM_FINI(rwlock);
663 SPLAT_SUBSYSTEM_FINI(thread);
664 SPLAT_SUBSYSTEM_FINI(condvar);
665 SPLAT_SUBSYSTEM_FINI(mutex);
666 SPLAT_SUBSYSTEM_FINI(krng);
667 SPLAT_SUBSYSTEM_FINI(taskq);
668 SPLAT_SUBSYSTEM_FINI(kmem);
669
670 ASSERT(list_empty(&splat_module_list));
671 printk(KERN_INFO "splat: Unloaded Solaris Porting Layer "
672 "Aggressive Tests v%s\n", VERSION);
f1ca4da6 673}
674
7c50328b 675module_init(splat_init);
676module_exit(splat_fini);
f1ca4da6 677
678MODULE_AUTHOR("Lawrence Livermore National Labs");
7c50328b 679MODULE_DESCRIPTION("Solaris Porting Layer Aggresive Tests");
f1ca4da6 680MODULE_LICENSE("GPL");