]> git.proxmox.com Git - mirror_spl-debian.git/blame - modules/splat/splat-ctl.c
Go through and add a header with the proper UCRL number.
[mirror_spl-debian.git] / modules / splat / splat-ctl.c
CommitLineData
f1ca4da6 1/*
715f6251 2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/*
28 * My intent is to create a loadable 'splat' (Solaris Porting LAyer
29 * Tests) module which can be used as an access point to run
30 * in kernel Solaris ABI regression tests. This provides a
7c50328b 31 * nice mechanism to validate the shim primates are working properly.
f1ca4da6 32 *
7c50328b 33 * The basic design is the splat module is that it is constructed of
34 * various splat_* source files each of which contains regression tests.
35 * For example the splat_linux_kmem.c file contains tests for validating
36 * kmem correctness. When the splat module is loaded splat_*_init()
37 * will be called for each subsystems tests, similarly splat_*_fini() is
38 * called when the splat module is removed. Each test can then be
f1ca4da6 39 * run by making an ioctl() call from a userspace control application
40 * to pick the subsystem and test which should be run.
f1ca4da6 41 */
42
7c50328b 43#include "splat-internal.h"
44#include <config.h>
f1ca4da6 45
46#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
47#include <linux/devfs_fs_kernel.h>
48#endif
49
50#include <linux/cdev.h>
51
52
53#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
7c50328b 54static struct class_simple *splat_class;
f1ca4da6 55#else
7c50328b 56static struct class *splat_class;
f1ca4da6 57#endif
7c50328b 58static struct list_head splat_module_list;
59static spinlock_t splat_module_lock;
f1ca4da6 60
61static int
7c50328b 62splat_open(struct inode *inode, struct file *file)
f1ca4da6 63{
64 unsigned int minor = iminor(inode);
7c50328b 65 splat_info_t *info;
f1ca4da6 66
7c50328b 67 if (minor >= SPLAT_MINORS)
f1ca4da6 68 return -ENXIO;
69
7c50328b 70 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
f1ca4da6 71 if (info == NULL)
72 return -ENOMEM;
73
74 spin_lock_init(&info->info_lock);
7c50328b 75 info->info_size = SPLAT_INFO_BUFFER_SIZE;
76 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
f1ca4da6 77 if (info->info_buffer == NULL) {
78 kfree(info);
79 return -ENOMEM;
80 }
81
82 info->info_head = info->info_buffer;
83 file->private_data = (void *)info;
84
f1ca4da6 85 return 0;
86}
87
88static int
7c50328b 89splat_release(struct inode *inode, struct file *file)
f1ca4da6 90{
91 unsigned int minor = iminor(inode);
7c50328b 92 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 93
7c50328b 94 if (minor >= SPLAT_MINORS)
f1ca4da6 95 return -ENXIO;
96
97 ASSERT(info);
98 ASSERT(info->info_buffer);
99
100 vfree(info->info_buffer);
101 kfree(info);
102
103 return 0;
104}
105
106static int
7c50328b 107splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 108{
7c50328b 109 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 110
111 ASSERT(info);
112 ASSERT(info->info_buffer);
113
114 spin_lock(&info->info_lock);
115 memset(info->info_buffer, 0, info->info_size);
116 info->info_head = info->info_buffer;
117 spin_unlock(&info->info_lock);
118
119 return 0;
120}
121
122static int
7c50328b 123splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 124{
7c50328b 125 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 126 char *buf;
127 int min, size, rc = 0;
128
129 ASSERT(info);
130 ASSERT(info->info_buffer);
131
132 spin_lock(&info->info_lock);
133 if (kcfg->cfg_arg1 > 0) {
134
135 size = kcfg->cfg_arg1;
136 buf = (char *)vmalloc(size);
137 if (buf == NULL) {
138 rc = -ENOMEM;
139 goto out;
140 }
141
142 /* Zero fill and truncate contents when coping buffer */
143 min = ((size < info->info_size) ? size : info->info_size);
144 memset(buf, 0, size);
145 memcpy(buf, info->info_buffer, min);
146 vfree(info->info_buffer);
147 info->info_size = size;
148 info->info_buffer = buf;
149 info->info_head = info->info_buffer;
150 }
151
152 kcfg->cfg_rc1 = info->info_size;
153
7c50328b 154 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 155 rc = -EFAULT;
156out:
157 spin_unlock(&info->info_lock);
158
159 return rc;
160}
161
162
7c50328b 163static splat_subsystem_t *
164splat_subsystem_find(int id) {
165 splat_subsystem_t *sub;
f1ca4da6 166
7c50328b 167 spin_lock(&splat_module_lock);
168 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
f1ca4da6 169 if (id == sub->desc.id) {
7c50328b 170 spin_unlock(&splat_module_lock);
f1ca4da6 171 return sub;
172 }
173 }
7c50328b 174 spin_unlock(&splat_module_lock);
f1ca4da6 175
176 return NULL;
177}
178
179static int
7c50328b 180splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 181{
7c50328b 182 splat_subsystem_t *sub;
f1ca4da6 183 int i = 0;
184
7c50328b 185 spin_lock(&splat_module_lock);
186 list_for_each_entry(sub, &splat_module_list, subsystem_list)
f1ca4da6 187 i++;
188
7c50328b 189 spin_unlock(&splat_module_lock);
f1ca4da6 190 kcfg->cfg_rc1 = i;
191
7c50328b 192 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 193 return -EFAULT;
194
195 return 0;
196}
197
198static int
7c50328b 199splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 200{
7c50328b 201 splat_subsystem_t *sub;
202 splat_cfg_t *tmp;
f1ca4da6 203 int size, i = 0;
204
205 /* Structure will be sized large enough for N subsystem entries
206 * which is passed in by the caller. On exit the number of
207 * entries filled in with valid subsystems will be stored in
208 * cfg_rc1. If the caller does not provide enough entries
209 * for all subsystems we will truncate the list to avoid overrun.
210 */
7c50328b 211 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
212 sizeof(splat_user_t);
f1ca4da6 213 tmp = kmalloc(size, GFP_KERNEL);
214 if (tmp == NULL)
215 return -ENOMEM;
216
217 /* Local 'tmp' is used as the structure copied back to user space */
218 memset(tmp, 0, size);
219 memcpy(tmp, kcfg, sizeof(*kcfg));
220
7c50328b 221 spin_lock(&splat_module_lock);
222 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
223 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
224 sub->desc.name, SPLAT_NAME_SIZE);
225 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
226 sub->desc.desc, SPLAT_DESC_SIZE);
227 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
f1ca4da6 228
229 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 230 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
f1ca4da6 231 break;
232 }
7c50328b 233 spin_unlock(&splat_module_lock);
f1ca4da6 234 tmp->cfg_rc1 = i;
235
7c50328b 236 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 237 kfree(tmp);
238 return -EFAULT;
239 }
240
241 kfree(tmp);
242 return 0;
243}
244
245static int
7c50328b 246splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 247{
7c50328b 248 splat_subsystem_t *sub;
249 splat_test_t *test;
f1b59d26 250 int i = 0;
f1ca4da6 251
252 /* Subsystem ID passed as arg1 */
7c50328b 253 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 254 if (sub == NULL)
255 return -EINVAL;
256
257 spin_lock(&(sub->test_lock));
258 list_for_each_entry(test, &(sub->test_list), test_list)
259 i++;
260
261 spin_unlock(&(sub->test_lock));
262 kcfg->cfg_rc1 = i;
263
7c50328b 264 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 265 return -EFAULT;
266
267 return 0;
268}
269
270static int
7c50328b 271splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 272{
7c50328b 273 splat_subsystem_t *sub;
274 splat_test_t *test;
275 splat_cfg_t *tmp;
f1b59d26 276 int size, i = 0;
f1ca4da6 277
278 /* Subsystem ID passed as arg1 */
7c50328b 279 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 280 if (sub == NULL)
281 return -EINVAL;
282
283 /* Structure will be sized large enough for N test entries
284 * which is passed in by the caller. On exit the number of
285 * entries filled in with valid tests will be stored in
286 * cfg_rc1. If the caller does not provide enough entries
287 * for all tests we will truncate the list to avoid overrun.
288 */
7c50328b 289 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
f1ca4da6 290 tmp = kmalloc(size, GFP_KERNEL);
291 if (tmp == NULL)
292 return -ENOMEM;
293
294 /* Local 'tmp' is used as the structure copied back to user space */
295 memset(tmp, 0, size);
296 memcpy(tmp, kcfg, sizeof(*kcfg));
297
298 spin_lock(&(sub->test_lock));
299 list_for_each_entry(test, &(sub->test_list), test_list) {
7c50328b 300 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
301 test->desc.name, SPLAT_NAME_SIZE);
302 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
303 test->desc.desc, SPLAT_DESC_SIZE);
304 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
f1ca4da6 305
306 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 307 if ((i++) == kcfg->cfg_data.splat_tests.size)
f1ca4da6 308 break;
309 }
310 spin_unlock(&(sub->test_lock));
311 tmp->cfg_rc1 = i;
312
7c50328b 313 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 314 kfree(tmp);
315 return -EFAULT;
316 }
317
318 kfree(tmp);
319 return 0;
320}
321
322static int
7c50328b 323splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
f1ca4da6 324{
7c50328b 325 splat_test_t *test;
f1ca4da6 326
327 spin_lock(&(sub->test_lock));
328 list_for_each_entry(test, &(sub->test_list), test_list) {
329 if (test->desc.id == cmd) {
330 spin_unlock(&(sub->test_lock));
331 return test->test(file, arg);
332 }
333 }
334 spin_unlock(&(sub->test_lock));
335
336 return -EINVAL;
337}
338
339static int
7c50328b 340splat_ioctl_cfg(struct file *file, unsigned long arg)
f1ca4da6 341{
7c50328b 342 splat_cfg_t kcfg;
f1ca4da6 343 int rc = 0;
344
7c50328b 345 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
f1ca4da6 346 return -EFAULT;
347
7c50328b 348 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
349 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
350 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
f1ca4da6 351 return -EINVAL;
352 }
353
354 switch (kcfg.cfg_cmd) {
7c50328b 355 case SPLAT_CFG_BUFFER_CLEAR:
f1ca4da6 356 /* cfg_arg1 - Unused
357 * cfg_rc1 - Unused
358 */
7c50328b 359 rc = splat_buffer_clear(file, &kcfg, arg);
f1ca4da6 360 break;
7c50328b 361 case SPLAT_CFG_BUFFER_SIZE:
f1ca4da6 362 /* cfg_arg1 - 0 - query size; >0 resize
363 * cfg_rc1 - Set to current buffer size
364 */
7c50328b 365 rc = splat_buffer_size(file, &kcfg, arg);
f1ca4da6 366 break;
7c50328b 367 case SPLAT_CFG_SUBSYSTEM_COUNT:
f1ca4da6 368 /* cfg_arg1 - Unused
369 * cfg_rc1 - Set to number of subsystems
370 */
7c50328b 371 rc = splat_subsystem_count(&kcfg, arg);
f1ca4da6 372 break;
7c50328b 373 case SPLAT_CFG_SUBSYSTEM_LIST:
f1ca4da6 374 /* cfg_arg1 - Unused
375 * cfg_rc1 - Set to number of subsystems
7c50328b 376 * cfg_data.splat_subsystems - Populated with subsystems
f1ca4da6 377 */
7c50328b 378 rc = splat_subsystem_list(&kcfg, arg);
f1ca4da6 379 break;
7c50328b 380 case SPLAT_CFG_TEST_COUNT:
f1ca4da6 381 /* cfg_arg1 - Set to a target subsystem
382 * cfg_rc1 - Set to number of tests
383 */
7c50328b 384 rc = splat_test_count(&kcfg, arg);
f1ca4da6 385 break;
7c50328b 386 case SPLAT_CFG_TEST_LIST:
f1ca4da6 387 /* cfg_arg1 - Set to a target subsystem
388 * cfg_rc1 - Set to number of tests
7c50328b 389 * cfg_data.splat_subsystems - Populated with tests
f1ca4da6 390 */
7c50328b 391 rc = splat_test_list(&kcfg, arg);
f1ca4da6 392 break;
393 default:
7c50328b 394 splat_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
f1ca4da6 395 rc = -EINVAL;
396 break;
397 }
398
399 return rc;
400}
401
402static int
7c50328b 403splat_ioctl_cmd(struct file *file, unsigned long arg)
f1ca4da6 404{
7c50328b 405 splat_subsystem_t *sub;
406 splat_cmd_t kcmd;
f1ca4da6 407 int rc = -EINVAL;
408 void *data = NULL;
409
7c50328b 410 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
f1ca4da6 411 return -EFAULT;
412
7c50328b 413 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
414 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
415 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
f1ca4da6 416 return -EINVAL;
417 }
418
419 /* Allocate memory for any opaque data the caller needed to pass on */
420 if (kcmd.cmd_data_size > 0) {
421 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
422 if (data == NULL)
423 return -ENOMEM;
424
7c50328b 425 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
f1ca4da6 426 cmd_data_str)), kcmd.cmd_data_size)) {
427 kfree(data);
428 return -EFAULT;
429 }
430 }
431
7c50328b 432 sub = splat_subsystem_find(kcmd.cmd_subsystem);
f1ca4da6 433 if (sub != NULL)
7c50328b 434 rc = splat_validate(file, sub, kcmd.cmd_test, data);
f1ca4da6 435 else
436 rc = -EINVAL;
437
438 if (data != NULL)
439 kfree(data);
440
441 return rc;
442}
443
444static int
7c50328b 445splat_ioctl(struct inode *inode, struct file *file,
f1ca4da6 446 unsigned int cmd, unsigned long arg)
447{
f1b59d26 448 unsigned int minor = iminor(file->f_dentry->d_inode);
449 int rc = 0;
f1ca4da6 450
451 /* Ignore tty ioctls */
452 if ((cmd & 0xffffff00) == ((int)'T') << 8)
453 return -ENOTTY;
454
7c50328b 455 if (minor >= SPLAT_MINORS)
f1ca4da6 456 return -ENXIO;
457
458 switch (cmd) {
7c50328b 459 case SPLAT_CFG:
460 rc = splat_ioctl_cfg(file, arg);
f1ca4da6 461 break;
7c50328b 462 case SPLAT_CMD:
463 rc = splat_ioctl_cmd(file, arg);
f1ca4da6 464 break;
465 default:
7c50328b 466 splat_print(file, "Bad ioctl command %d\n", cmd);
f1ca4da6 467 rc = -EINVAL;
468 break;
469 }
470
471 return rc;
472}
473
474/* I'm not sure why you would want to write in to this buffer from
475 * user space since its principle use is to pass test status info
476 * back to the user space, but I don't see any reason to prevent it.
477 */
7c50328b 478static ssize_t splat_write(struct file *file, const char __user *buf,
f1ca4da6 479 size_t count, loff_t *ppos)
480{
481 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 482 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 483 int rc = 0;
484
7c50328b 485 if (minor >= SPLAT_MINORS)
f1ca4da6 486 return -ENXIO;
487
488 ASSERT(info);
489 ASSERT(info->info_buffer);
490
491 spin_lock(&info->info_lock);
492
493 /* Write beyond EOF */
494 if (*ppos >= info->info_size) {
495 rc = -EFBIG;
496 goto out;
497 }
498
499 /* Resize count if beyond EOF */
500 if (*ppos + count > info->info_size)
501 count = info->info_size - *ppos;
502
503 if (copy_from_user(info->info_buffer, buf, count)) {
504 rc = -EFAULT;
505 goto out;
506 }
507
508 *ppos += count;
509 rc = count;
510out:
511 spin_unlock(&info->info_lock);
512 return rc;
513}
514
7c50328b 515static ssize_t splat_read(struct file *file, char __user *buf,
f1ca4da6 516 size_t count, loff_t *ppos)
517{
518 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 519 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 520 int rc = 0;
521
7c50328b 522 if (minor >= SPLAT_MINORS)
f1ca4da6 523 return -ENXIO;
524
525 ASSERT(info);
526 ASSERT(info->info_buffer);
527
528 spin_lock(&info->info_lock);
529
530 /* Read beyond EOF */
531 if (*ppos >= info->info_size)
532 goto out;
533
534 /* Resize count if beyond EOF */
535 if (*ppos + count > info->info_size)
536 count = info->info_size - *ppos;
537
538 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
539 rc = -EFAULT;
540 goto out;
541 }
542
543 *ppos += count;
544 rc = count;
545out:
546 spin_unlock(&info->info_lock);
547 return rc;
548}
549
7c50328b 550static loff_t splat_seek(struct file *file, loff_t offset, int origin)
f1ca4da6 551{
552 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 553 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 554 int rc = -EINVAL;
555
7c50328b 556 if (minor >= SPLAT_MINORS)
f1ca4da6 557 return -ENXIO;
558
559 ASSERT(info);
560 ASSERT(info->info_buffer);
561
562 spin_lock(&info->info_lock);
563
564 switch (origin) {
565 case 0: /* SEEK_SET - No-op just do it */
566 break;
567 case 1: /* SEEK_CUR - Seek from current */
568 offset = file->f_pos + offset;
569 break;
570 case 2: /* SEEK_END - Seek from end */
571 offset = info->info_size + offset;
572 break;
573 }
574
575 if (offset >= 0) {
576 file->f_pos = offset;
577 file->f_version = 0;
578 rc = offset;
579 }
580
581 spin_unlock(&info->info_lock);
582
583 return rc;
584}
585
7c50328b 586static struct file_operations splat_fops = {
f1ca4da6 587 .owner = THIS_MODULE,
7c50328b 588 .open = splat_open,
589 .release = splat_release,
590 .ioctl = splat_ioctl,
591 .read = splat_read,
592 .write = splat_write,
593 .llseek = splat_seek,
f1ca4da6 594};
595
7c50328b 596static struct cdev splat_cdev = {
f1ca4da6 597 .owner = THIS_MODULE,
7c50328b 598 .kobj = { .name = "splatctl", },
f1ca4da6 599};
600
601static int __init
7c50328b 602splat_init(void)
f1ca4da6 603{
604 dev_t dev;
f1b59d26 605 int rc;
f1ca4da6 606
7c50328b 607 spin_lock_init(&splat_module_lock);
608 INIT_LIST_HEAD(&splat_module_list);
f1ca4da6 609
7c50328b 610 SPLAT_SUBSYSTEM_INIT(kmem);
611 SPLAT_SUBSYSTEM_INIT(taskq);
612 SPLAT_SUBSYSTEM_INIT(krng);
613 SPLAT_SUBSYSTEM_INIT(mutex);
614 SPLAT_SUBSYSTEM_INIT(condvar);
615 SPLAT_SUBSYSTEM_INIT(thread);
616 SPLAT_SUBSYSTEM_INIT(rwlock);
617 SPLAT_SUBSYSTEM_INIT(time);
4b171585 618 SPLAT_SUBSYSTEM_INIT(vnode);
9490c148 619 SPLAT_SUBSYSTEM_INIT(kobj);
9f4c835a 620 SPLAT_SUBSYSTEM_INIT(atomic);
f1ca4da6 621
7c50328b 622 dev = MKDEV(SPLAT_MAJOR, 0);
623 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, "splatctl")))
f1ca4da6 624 goto error;
625
626 /* Support for registering a character driver */
7c50328b 627 cdev_init(&splat_cdev, &splat_fops);
628 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
629 printk(KERN_ERR "splat: Error adding cdev, %d\n", rc);
630 kobject_put(&splat_cdev.kobj);
631 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 632 goto error;
633 }
634
635 /* Support for udev make driver info available in sysfs */
636#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
7c50328b 637 splat_class = class_simple_create(THIS_MODULE, "splat");
f1ca4da6 638#else
7c50328b 639 splat_class = class_create(THIS_MODULE, "splat");
f1ca4da6 640#endif
7c50328b 641 if (IS_ERR(splat_class)) {
642 rc = PTR_ERR(splat_class);
643 printk(KERN_ERR "splat: Error creating splat class, %d\n", rc);
644 cdev_del(&splat_cdev);
645 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 646 goto error;
647 }
648
649#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
7c50328b 650 class_simple_device_add(splat_class, MKDEV(SPLAT_MAJOR, 0),
651 NULL, "splatctl");
f1ca4da6 652#else
7c50328b 653 class_device_create(splat_class, NULL, MKDEV(SPLAT_MAJOR, 0),
654 NULL, "splatctl");
f1ca4da6 655#endif
656
715f6251 657 printk(KERN_INFO "splat: Loaded Solaris Porting LAyer "
658 "Tests v%s\n", VERSION);
f1ca4da6 659 return 0;
660error:
7c50328b 661 printk(KERN_ERR "splat: Error registering splat device, %d\n", rc);
f1ca4da6 662 return rc;
663}
664
665static void
7c50328b 666splat_fini(void)
f1ca4da6 667{
7c50328b 668 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
f1ca4da6 669
670#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
671 class_simple_device_remove(dev);
7c50328b 672 class_simple_destroy(splat_class);
673 devfs_remove("splat/splatctl");
674 devfs_remove("splat");
f1ca4da6 675#else
7c50328b 676 class_device_destroy(splat_class, dev);
677 class_destroy(splat_class);
f1ca4da6 678#endif
7c50328b 679 cdev_del(&splat_cdev);
680 unregister_chrdev_region(dev, SPLAT_MINORS);
681
9f4c835a 682 SPLAT_SUBSYSTEM_FINI(atomic);
9490c148 683 SPLAT_SUBSYSTEM_FINI(kobj);
4b171585 684 SPLAT_SUBSYSTEM_FINI(vnode);
7c50328b 685 SPLAT_SUBSYSTEM_FINI(time);
686 SPLAT_SUBSYSTEM_FINI(rwlock);
687 SPLAT_SUBSYSTEM_FINI(thread);
688 SPLAT_SUBSYSTEM_FINI(condvar);
689 SPLAT_SUBSYSTEM_FINI(mutex);
690 SPLAT_SUBSYSTEM_FINI(krng);
691 SPLAT_SUBSYSTEM_FINI(taskq);
692 SPLAT_SUBSYSTEM_FINI(kmem);
693
694 ASSERT(list_empty(&splat_module_list));
715f6251 695 printk(KERN_INFO "splat: Unloaded Solaris Porting LAyer "
696 "Tests v%s\n", VERSION);
f1ca4da6 697}
698
7c50328b 699module_init(splat_init);
700module_exit(splat_fini);
f1ca4da6 701
702MODULE_AUTHOR("Lawrence Livermore National Labs");
715f6251 703MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
f1ca4da6 704MODULE_LICENSE("GPL");