]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/splat/splat-ctl.c
Public Release Prep
[mirror_spl-debian.git] / module / splat / splat-ctl.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5
BB
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Test Control Interface.
25 *
26 * The 'splat' (Solaris Porting LAyer Tests) module is designed as a
27 * framework which runs various in kernel regression tests to validate
28 * the SPL primitives honor the Solaris ABI.
f1ca4da6 29 *
716154c5
BB
30 * The splat module is constructed of various splat_* source files each
31 * of which contain regression tests for a particular subsystem. For
32 * example, the splat_kmem.c file contains all the tests for validating
33 * the kmem interfaces have been implemented correctly. When the splat
34 * module is loaded splat_*_init() will be called for each subsystems
35 * tests. It is the responsibility of splat_*_init() to register all
36 * the tests for this subsystem using the SPLAT_TEST_INIT() macro.
37 * Similarly splat_*_fini() is called when the splat module is removed
38 * and is responsible for unregistering its tests via the SPLAT_TEST_FINI
39 * macro. Once a test is registered it can then be run with an ioctl()
40 * call which specifies the subsystem and test to be run. The provided
41 * splat command line tool can be used to display all available
42 * subsystems and tests. It can also be used to run the full suite
43 * of regression tests or particular tests.
44\*****************************************************************************/
f1ca4da6 45
7c50328b 46#include "splat-internal.h"
f1ca4da6 47
46c685d0 48static spl_class *splat_class;
25557fd8 49static spl_device *splat_device;
7c50328b 50static struct list_head splat_module_list;
51static spinlock_t splat_module_lock;
f1ca4da6 52
53static int
7c50328b 54splat_open(struct inode *inode, struct file *file)
f1ca4da6 55{
56 unsigned int minor = iminor(inode);
7c50328b 57 splat_info_t *info;
f1ca4da6 58
7c50328b 59 if (minor >= SPLAT_MINORS)
f1ca4da6 60 return -ENXIO;
61
7c50328b 62 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
f1ca4da6 63 if (info == NULL)
64 return -ENOMEM;
65
66 spin_lock_init(&info->info_lock);
7c50328b 67 info->info_size = SPLAT_INFO_BUFFER_SIZE;
68 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
f1ca4da6 69 if (info->info_buffer == NULL) {
70 kfree(info);
71 return -ENOMEM;
72 }
73
74 info->info_head = info->info_buffer;
75 file->private_data = (void *)info;
76
f1ca4da6 77 return 0;
78}
79
80static int
7c50328b 81splat_release(struct inode *inode, struct file *file)
f1ca4da6 82{
83 unsigned int minor = iminor(inode);
7c50328b 84 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 85
7c50328b 86 if (minor >= SPLAT_MINORS)
f1ca4da6 87 return -ENXIO;
88
89 ASSERT(info);
90 ASSERT(info->info_buffer);
91
92 vfree(info->info_buffer);
93 kfree(info);
94
95 return 0;
96}
97
98static int
7c50328b 99splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 100{
7c50328b 101 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 102
103 ASSERT(info);
104 ASSERT(info->info_buffer);
105
106 spin_lock(&info->info_lock);
107 memset(info->info_buffer, 0, info->info_size);
108 info->info_head = info->info_buffer;
109 spin_unlock(&info->info_lock);
110
111 return 0;
112}
113
114static int
7c50328b 115splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 116{
7c50328b 117 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 118 char *buf;
119 int min, size, rc = 0;
120
121 ASSERT(info);
122 ASSERT(info->info_buffer);
123
124 spin_lock(&info->info_lock);
125 if (kcfg->cfg_arg1 > 0) {
126
127 size = kcfg->cfg_arg1;
128 buf = (char *)vmalloc(size);
129 if (buf == NULL) {
130 rc = -ENOMEM;
131 goto out;
132 }
133
134 /* Zero fill and truncate contents when coping buffer */
135 min = ((size < info->info_size) ? size : info->info_size);
136 memset(buf, 0, size);
137 memcpy(buf, info->info_buffer, min);
138 vfree(info->info_buffer);
139 info->info_size = size;
140 info->info_buffer = buf;
141 info->info_head = info->info_buffer;
142 }
143
144 kcfg->cfg_rc1 = info->info_size;
145
7c50328b 146 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 147 rc = -EFAULT;
148out:
149 spin_unlock(&info->info_lock);
150
151 return rc;
152}
153
154
7c50328b 155static splat_subsystem_t *
156splat_subsystem_find(int id) {
157 splat_subsystem_t *sub;
f1ca4da6 158
7c50328b 159 spin_lock(&splat_module_lock);
160 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
f1ca4da6 161 if (id == sub->desc.id) {
7c50328b 162 spin_unlock(&splat_module_lock);
f1ca4da6 163 return sub;
164 }
165 }
7c50328b 166 spin_unlock(&splat_module_lock);
f1ca4da6 167
168 return NULL;
169}
170
171static int
7c50328b 172splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 173{
7c50328b 174 splat_subsystem_t *sub;
f1ca4da6 175 int i = 0;
176
7c50328b 177 spin_lock(&splat_module_lock);
178 list_for_each_entry(sub, &splat_module_list, subsystem_list)
f1ca4da6 179 i++;
180
7c50328b 181 spin_unlock(&splat_module_lock);
f1ca4da6 182 kcfg->cfg_rc1 = i;
183
7c50328b 184 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 185 return -EFAULT;
186
187 return 0;
188}
189
190static int
7c50328b 191splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 192{
7c50328b 193 splat_subsystem_t *sub;
194 splat_cfg_t *tmp;
f1ca4da6 195 int size, i = 0;
196
197 /* Structure will be sized large enough for N subsystem entries
198 * which is passed in by the caller. On exit the number of
199 * entries filled in with valid subsystems will be stored in
200 * cfg_rc1. If the caller does not provide enough entries
201 * for all subsystems we will truncate the list to avoid overrun.
202 */
7c50328b 203 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
204 sizeof(splat_user_t);
f1ca4da6 205 tmp = kmalloc(size, GFP_KERNEL);
206 if (tmp == NULL)
207 return -ENOMEM;
208
209 /* Local 'tmp' is used as the structure copied back to user space */
210 memset(tmp, 0, size);
211 memcpy(tmp, kcfg, sizeof(*kcfg));
212
7c50328b 213 spin_lock(&splat_module_lock);
214 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
215 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
216 sub->desc.name, SPLAT_NAME_SIZE);
217 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
218 sub->desc.desc, SPLAT_DESC_SIZE);
219 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
f1ca4da6 220
221 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 222 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
f1ca4da6 223 break;
224 }
7c50328b 225 spin_unlock(&splat_module_lock);
f1ca4da6 226 tmp->cfg_rc1 = i;
227
7c50328b 228 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 229 kfree(tmp);
230 return -EFAULT;
231 }
232
233 kfree(tmp);
234 return 0;
235}
236
237static int
7c50328b 238splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 239{
7c50328b 240 splat_subsystem_t *sub;
241 splat_test_t *test;
f1b59d26 242 int i = 0;
f1ca4da6 243
244 /* Subsystem ID passed as arg1 */
7c50328b 245 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 246 if (sub == NULL)
247 return -EINVAL;
248
249 spin_lock(&(sub->test_lock));
250 list_for_each_entry(test, &(sub->test_list), test_list)
251 i++;
252
253 spin_unlock(&(sub->test_lock));
254 kcfg->cfg_rc1 = i;
255
7c50328b 256 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 257 return -EFAULT;
258
259 return 0;
260}
261
262static int
7c50328b 263splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 264{
7c50328b 265 splat_subsystem_t *sub;
266 splat_test_t *test;
267 splat_cfg_t *tmp;
f1b59d26 268 int size, i = 0;
f1ca4da6 269
270 /* Subsystem ID passed as arg1 */
7c50328b 271 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 272 if (sub == NULL)
273 return -EINVAL;
274
275 /* Structure will be sized large enough for N test entries
276 * which is passed in by the caller. On exit the number of
277 * entries filled in with valid tests will be stored in
278 * cfg_rc1. If the caller does not provide enough entries
279 * for all tests we will truncate the list to avoid overrun.
280 */
7c50328b 281 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
f1ca4da6 282 tmp = kmalloc(size, GFP_KERNEL);
283 if (tmp == NULL)
284 return -ENOMEM;
285
286 /* Local 'tmp' is used as the structure copied back to user space */
287 memset(tmp, 0, size);
288 memcpy(tmp, kcfg, sizeof(*kcfg));
289
290 spin_lock(&(sub->test_lock));
291 list_for_each_entry(test, &(sub->test_list), test_list) {
7c50328b 292 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
293 test->desc.name, SPLAT_NAME_SIZE);
294 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
295 test->desc.desc, SPLAT_DESC_SIZE);
296 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
f1ca4da6 297
298 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 299 if ((i++) == kcfg->cfg_data.splat_tests.size)
f1ca4da6 300 break;
301 }
302 spin_unlock(&(sub->test_lock));
303 tmp->cfg_rc1 = i;
304
7c50328b 305 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 306 kfree(tmp);
307 return -EFAULT;
308 }
309
310 kfree(tmp);
311 return 0;
312}
313
314static int
7c50328b 315splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
f1ca4da6 316{
7c50328b 317 splat_test_t *test;
f1ca4da6 318
319 spin_lock(&(sub->test_lock));
320 list_for_each_entry(test, &(sub->test_list), test_list) {
321 if (test->desc.id == cmd) {
322 spin_unlock(&(sub->test_lock));
323 return test->test(file, arg);
324 }
325 }
326 spin_unlock(&(sub->test_lock));
327
328 return -EINVAL;
329}
330
331static int
e554dffa 332splat_ioctl_cfg(struct file *file, unsigned int cmd, unsigned long arg)
f1ca4da6 333{
7c50328b 334 splat_cfg_t kcfg;
f1ca4da6 335 int rc = 0;
336
e554dffa
BB
337 /* User and kernel space agree about arg size */
338 if (_IOC_SIZE(cmd) != sizeof(kcfg))
339 return -EBADMSG;
340
7c50328b 341 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
f1ca4da6 342 return -EFAULT;
343
7c50328b 344 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
345 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
346 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
f1ca4da6 347 return -EINVAL;
348 }
349
350 switch (kcfg.cfg_cmd) {
7c50328b 351 case SPLAT_CFG_BUFFER_CLEAR:
f1ca4da6 352 /* cfg_arg1 - Unused
353 * cfg_rc1 - Unused
354 */
7c50328b 355 rc = splat_buffer_clear(file, &kcfg, arg);
f1ca4da6 356 break;
7c50328b 357 case SPLAT_CFG_BUFFER_SIZE:
f1ca4da6 358 /* cfg_arg1 - 0 - query size; >0 resize
359 * cfg_rc1 - Set to current buffer size
360 */
7c50328b 361 rc = splat_buffer_size(file, &kcfg, arg);
f1ca4da6 362 break;
7c50328b 363 case SPLAT_CFG_SUBSYSTEM_COUNT:
f1ca4da6 364 /* cfg_arg1 - Unused
365 * cfg_rc1 - Set to number of subsystems
366 */
7c50328b 367 rc = splat_subsystem_count(&kcfg, arg);
f1ca4da6 368 break;
7c50328b 369 case SPLAT_CFG_SUBSYSTEM_LIST:
f1ca4da6 370 /* cfg_arg1 - Unused
371 * cfg_rc1 - Set to number of subsystems
e554dffa 372 * cfg_data.splat_subsystems - Set with subsystems
f1ca4da6 373 */
7c50328b 374 rc = splat_subsystem_list(&kcfg, arg);
f1ca4da6 375 break;
7c50328b 376 case SPLAT_CFG_TEST_COUNT:
f1ca4da6 377 /* cfg_arg1 - Set to a target subsystem
378 * cfg_rc1 - Set to number of tests
379 */
7c50328b 380 rc = splat_test_count(&kcfg, arg);
f1ca4da6 381 break;
7c50328b 382 case SPLAT_CFG_TEST_LIST:
f1ca4da6 383 /* cfg_arg1 - Set to a target subsystem
384 * cfg_rc1 - Set to number of tests
7c50328b 385 * cfg_data.splat_subsystems - Populated with tests
f1ca4da6 386 */
7c50328b 387 rc = splat_test_list(&kcfg, arg);
f1ca4da6 388 break;
389 default:
e554dffa
BB
390 splat_print(file, "Bad config command %d\n",
391 kcfg.cfg_cmd);
f1ca4da6 392 rc = -EINVAL;
393 break;
394 }
395
396 return rc;
397}
398
399static int
e554dffa 400splat_ioctl_cmd(struct file *file, unsigned int cmd, unsigned long arg)
f1ca4da6 401{
7c50328b 402 splat_subsystem_t *sub;
403 splat_cmd_t kcmd;
f1ca4da6 404 int rc = -EINVAL;
405 void *data = NULL;
406
e554dffa
BB
407 /* User and kernel space agree about arg size */
408 if (_IOC_SIZE(cmd) != sizeof(kcmd))
409 return -EBADMSG;
410
7c50328b 411 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
f1ca4da6 412 return -EFAULT;
413
7c50328b 414 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
415 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
416 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
f1ca4da6 417 return -EINVAL;
418 }
419
420 /* Allocate memory for any opaque data the caller needed to pass on */
421 if (kcmd.cmd_data_size > 0) {
422 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
423 if (data == NULL)
424 return -ENOMEM;
425
7c50328b 426 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
f1ca4da6 427 cmd_data_str)), kcmd.cmd_data_size)) {
428 kfree(data);
429 return -EFAULT;
430 }
431 }
432
7c50328b 433 sub = splat_subsystem_find(kcmd.cmd_subsystem);
f1ca4da6 434 if (sub != NULL)
7c50328b 435 rc = splat_validate(file, sub, kcmd.cmd_test, data);
f1ca4da6 436 else
437 rc = -EINVAL;
438
439 if (data != NULL)
440 kfree(data);
441
442 return rc;
443}
444
445static int
7c50328b 446splat_ioctl(struct inode *inode, struct file *file,
e554dffa 447 unsigned int cmd, unsigned long arg)
f1ca4da6 448{
f1b59d26 449 unsigned int minor = iminor(file->f_dentry->d_inode);
450 int rc = 0;
f1ca4da6 451
452 /* Ignore tty ioctls */
453 if ((cmd & 0xffffff00) == ((int)'T') << 8)
454 return -ENOTTY;
455
7c50328b 456 if (minor >= SPLAT_MINORS)
f1ca4da6 457 return -ENXIO;
458
459 switch (cmd) {
7c50328b 460 case SPLAT_CFG:
e554dffa 461 rc = splat_ioctl_cfg(file, cmd, arg);
f1ca4da6 462 break;
7c50328b 463 case SPLAT_CMD:
e554dffa 464 rc = splat_ioctl_cmd(file, cmd, arg);
f1ca4da6 465 break;
466 default:
7c50328b 467 splat_print(file, "Bad ioctl command %d\n", cmd);
f1ca4da6 468 rc = -EINVAL;
469 break;
470 }
471
472 return rc;
473}
474
9593ef76
BB
475#ifdef CONFIG_COMPAT
476/* Compatibility handler for ioctls from 32-bit ELF binaries */
477static long
478splat_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
479{
480 return splat_ioctl(NULL, file, cmd, arg);
481}
482#endif /* CONFIG_COMPAT */
483
f1ca4da6 484/* I'm not sure why you would want to write in to this buffer from
485 * user space since its principle use is to pass test status info
486 * back to the user space, but I don't see any reason to prevent it.
487 */
7c50328b 488static ssize_t splat_write(struct file *file, const char __user *buf,
f1ca4da6 489 size_t count, loff_t *ppos)
490{
491 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 492 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 493 int rc = 0;
494
7c50328b 495 if (minor >= SPLAT_MINORS)
f1ca4da6 496 return -ENXIO;
497
498 ASSERT(info);
499 ASSERT(info->info_buffer);
500
501 spin_lock(&info->info_lock);
502
503 /* Write beyond EOF */
504 if (*ppos >= info->info_size) {
505 rc = -EFBIG;
506 goto out;
507 }
508
509 /* Resize count if beyond EOF */
510 if (*ppos + count > info->info_size)
511 count = info->info_size - *ppos;
512
513 if (copy_from_user(info->info_buffer, buf, count)) {
514 rc = -EFAULT;
515 goto out;
516 }
517
518 *ppos += count;
519 rc = count;
520out:
521 spin_unlock(&info->info_lock);
522 return rc;
523}
524
7c50328b 525static ssize_t splat_read(struct file *file, char __user *buf,
f1ca4da6 526 size_t count, loff_t *ppos)
527{
528 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 529 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 530 int rc = 0;
531
7c50328b 532 if (minor >= SPLAT_MINORS)
f1ca4da6 533 return -ENXIO;
534
535 ASSERT(info);
536 ASSERT(info->info_buffer);
537
538 spin_lock(&info->info_lock);
539
540 /* Read beyond EOF */
541 if (*ppos >= info->info_size)
542 goto out;
543
544 /* Resize count if beyond EOF */
545 if (*ppos + count > info->info_size)
546 count = info->info_size - *ppos;
547
548 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
549 rc = -EFAULT;
550 goto out;
551 }
552
553 *ppos += count;
554 rc = count;
555out:
556 spin_unlock(&info->info_lock);
557 return rc;
558}
559
7c50328b 560static loff_t splat_seek(struct file *file, loff_t offset, int origin)
f1ca4da6 561{
562 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 563 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 564 int rc = -EINVAL;
565
7c50328b 566 if (minor >= SPLAT_MINORS)
f1ca4da6 567 return -ENXIO;
568
569 ASSERT(info);
570 ASSERT(info->info_buffer);
571
572 spin_lock(&info->info_lock);
573
574 switch (origin) {
575 case 0: /* SEEK_SET - No-op just do it */
576 break;
577 case 1: /* SEEK_CUR - Seek from current */
578 offset = file->f_pos + offset;
579 break;
580 case 2: /* SEEK_END - Seek from end */
581 offset = info->info_size + offset;
582 break;
583 }
584
585 if (offset >= 0) {
586 file->f_pos = offset;
587 file->f_version = 0;
588 rc = offset;
589 }
590
591 spin_unlock(&info->info_lock);
592
593 return rc;
594}
595
39ab5440 596static struct cdev splat_cdev;
7c50328b 597static struct file_operations splat_fops = {
9593ef76
BB
598 .owner = THIS_MODULE,
599 .open = splat_open,
600 .release = splat_release,
601 .ioctl = splat_ioctl,
602#ifdef CONFIG_COMPAT
603 .compat_ioctl = splat_compat_ioctl,
604#endif
605 .read = splat_read,
606 .write = splat_write,
607 .llseek = splat_seek,
f1ca4da6 608};
609
51a727e9 610static int
7c50328b 611splat_init(void)
f1ca4da6 612{
613 dev_t dev;
f1b59d26 614 int rc;
f1ca4da6 615
7c50328b 616 spin_lock_init(&splat_module_lock);
617 INIT_LIST_HEAD(&splat_module_list);
f1ca4da6 618
7c50328b 619 SPLAT_SUBSYSTEM_INIT(kmem);
620 SPLAT_SUBSYSTEM_INIT(taskq);
621 SPLAT_SUBSYSTEM_INIT(krng);
622 SPLAT_SUBSYSTEM_INIT(mutex);
623 SPLAT_SUBSYSTEM_INIT(condvar);
624 SPLAT_SUBSYSTEM_INIT(thread);
625 SPLAT_SUBSYSTEM_INIT(rwlock);
626 SPLAT_SUBSYSTEM_INIT(time);
4b171585 627 SPLAT_SUBSYSTEM_INIT(vnode);
9490c148 628 SPLAT_SUBSYSTEM_INIT(kobj);
9f4c835a 629 SPLAT_SUBSYSTEM_INIT(atomic);
d702c04f 630 SPLAT_SUBSYSTEM_INIT(list);
b871b8cd 631 SPLAT_SUBSYSTEM_INIT(generic);
ec7d53e9 632 SPLAT_SUBSYSTEM_INIT(cred);
f1ca4da6 633
7c50328b 634 dev = MKDEV(SPLAT_MAJOR, 0);
46c685d0 635 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
f1ca4da6 636 goto error;
637
638 /* Support for registering a character driver */
7c50328b 639 cdev_init(&splat_cdev, &splat_fops);
39ab5440
BB
640 splat_cdev.owner = THIS_MODULE;
641 kobject_set_name(&splat_cdev.kobj, SPLAT_NAME);
7c50328b 642 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
f6a19c0d 643 printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
7c50328b 644 kobject_put(&splat_cdev.kobj);
645 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 646 goto error;
647 }
648
649 /* Support for udev make driver info available in sysfs */
46c685d0 650 splat_class = spl_class_create(THIS_MODULE, "splat");
7c50328b 651 if (IS_ERR(splat_class)) {
652 rc = PTR_ERR(splat_class);
f6a19c0d 653 printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
7c50328b 654 cdev_del(&splat_cdev);
655 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 656 goto error;
657 }
658
25557fd8 659 splat_device = spl_device_create(splat_class, NULL,
660 MKDEV(SPLAT_MAJOR, 0),
661 NULL, SPLAT_NAME);
f1ca4da6 662
f6a19c0d 663 printk(KERN_INFO "SPLAT: Loaded Solaris Porting LAyer "
0cbaeb11 664 "Tests v%s\n", SPL_META_VERSION);
f1ca4da6 665 return 0;
666error:
f6a19c0d 667 printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
f1ca4da6 668 return rc;
669}
670
51a727e9 671static int
7c50328b 672splat_fini(void)
f1ca4da6 673{
7c50328b 674 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
f1ca4da6 675
25557fd8 676 spl_device_destroy(splat_class, splat_device, dev);
46c685d0 677 spl_class_destroy(splat_class);
7c50328b 678 cdev_del(&splat_cdev);
679 unregister_chrdev_region(dev, SPLAT_MINORS);
680
ec7d53e9 681 SPLAT_SUBSYSTEM_FINI(cred);
b871b8cd 682 SPLAT_SUBSYSTEM_FINI(generic);
d702c04f 683 SPLAT_SUBSYSTEM_FINI(list);
9f4c835a 684 SPLAT_SUBSYSTEM_FINI(atomic);
9490c148 685 SPLAT_SUBSYSTEM_FINI(kobj);
4b171585 686 SPLAT_SUBSYSTEM_FINI(vnode);
7c50328b 687 SPLAT_SUBSYSTEM_FINI(time);
688 SPLAT_SUBSYSTEM_FINI(rwlock);
689 SPLAT_SUBSYSTEM_FINI(thread);
690 SPLAT_SUBSYSTEM_FINI(condvar);
691 SPLAT_SUBSYSTEM_FINI(mutex);
692 SPLAT_SUBSYSTEM_FINI(krng);
693 SPLAT_SUBSYSTEM_FINI(taskq);
694 SPLAT_SUBSYSTEM_FINI(kmem);
695
696 ASSERT(list_empty(&splat_module_list));
f6a19c0d 697 printk(KERN_INFO "SPLAT: Unloaded Solaris Porting LAyer "
0cbaeb11 698 "Tests v%s\n", SPL_META_VERSION);
51a727e9
BB
699
700 return 0;
f1ca4da6 701}
702
51a727e9
BB
703spl_module_init(splat_init);
704spl_module_exit(splat_fini);
f1ca4da6 705
706MODULE_AUTHOR("Lawrence Livermore National Labs");
715f6251 707MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
f1ca4da6 708MODULE_LICENSE("GPL");