]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/splat/splat-ctl.c
Use kobject_set_name() for increased portability.
[mirror_spl-debian.git] / module / splat / splat-ctl.c
CommitLineData
f1ca4da6 1/*
715f6251 2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/*
28 * My intent is to create a loadable 'splat' (Solaris Porting LAyer
29 * Tests) module which can be used as an access point to run
30 * in kernel Solaris ABI regression tests. This provides a
7c50328b 31 * nice mechanism to validate the shim primates are working properly.
f1ca4da6 32 *
7c50328b 33 * The basic design is the splat module is that it is constructed of
34 * various splat_* source files each of which contains regression tests.
35 * For example the splat_linux_kmem.c file contains tests for validating
36 * kmem correctness. When the splat module is loaded splat_*_init()
37 * will be called for each subsystems tests, similarly splat_*_fini() is
38 * called when the splat module is removed. Each test can then be
f1ca4da6 39 * run by making an ioctl() call from a userspace control application
40 * to pick the subsystem and test which should be run.
f1ca4da6 41 */
42
7c50328b 43#include "splat-internal.h"
f1ca4da6 44
46c685d0 45static spl_class *splat_class;
25557fd8 46static spl_device *splat_device;
7c50328b 47static struct list_head splat_module_list;
48static spinlock_t splat_module_lock;
f1ca4da6 49
50static int
7c50328b 51splat_open(struct inode *inode, struct file *file)
f1ca4da6 52{
53 unsigned int minor = iminor(inode);
7c50328b 54 splat_info_t *info;
f1ca4da6 55
7c50328b 56 if (minor >= SPLAT_MINORS)
f1ca4da6 57 return -ENXIO;
58
7c50328b 59 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
f1ca4da6 60 if (info == NULL)
61 return -ENOMEM;
62
63 spin_lock_init(&info->info_lock);
7c50328b 64 info->info_size = SPLAT_INFO_BUFFER_SIZE;
65 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
f1ca4da6 66 if (info->info_buffer == NULL) {
67 kfree(info);
68 return -ENOMEM;
69 }
70
71 info->info_head = info->info_buffer;
72 file->private_data = (void *)info;
73
f1ca4da6 74 return 0;
75}
76
77static int
7c50328b 78splat_release(struct inode *inode, struct file *file)
f1ca4da6 79{
80 unsigned int minor = iminor(inode);
7c50328b 81 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 82
7c50328b 83 if (minor >= SPLAT_MINORS)
f1ca4da6 84 return -ENXIO;
85
86 ASSERT(info);
87 ASSERT(info->info_buffer);
88
89 vfree(info->info_buffer);
90 kfree(info);
91
92 return 0;
93}
94
95static int
7c50328b 96splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 97{
7c50328b 98 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 99
100 ASSERT(info);
101 ASSERT(info->info_buffer);
102
103 spin_lock(&info->info_lock);
104 memset(info->info_buffer, 0, info->info_size);
105 info->info_head = info->info_buffer;
106 spin_unlock(&info->info_lock);
107
108 return 0;
109}
110
111static int
7c50328b 112splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 113{
7c50328b 114 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 115 char *buf;
116 int min, size, rc = 0;
117
118 ASSERT(info);
119 ASSERT(info->info_buffer);
120
121 spin_lock(&info->info_lock);
122 if (kcfg->cfg_arg1 > 0) {
123
124 size = kcfg->cfg_arg1;
125 buf = (char *)vmalloc(size);
126 if (buf == NULL) {
127 rc = -ENOMEM;
128 goto out;
129 }
130
131 /* Zero fill and truncate contents when coping buffer */
132 min = ((size < info->info_size) ? size : info->info_size);
133 memset(buf, 0, size);
134 memcpy(buf, info->info_buffer, min);
135 vfree(info->info_buffer);
136 info->info_size = size;
137 info->info_buffer = buf;
138 info->info_head = info->info_buffer;
139 }
140
141 kcfg->cfg_rc1 = info->info_size;
142
7c50328b 143 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 144 rc = -EFAULT;
145out:
146 spin_unlock(&info->info_lock);
147
148 return rc;
149}
150
151
7c50328b 152static splat_subsystem_t *
153splat_subsystem_find(int id) {
154 splat_subsystem_t *sub;
f1ca4da6 155
7c50328b 156 spin_lock(&splat_module_lock);
157 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
f1ca4da6 158 if (id == sub->desc.id) {
7c50328b 159 spin_unlock(&splat_module_lock);
f1ca4da6 160 return sub;
161 }
162 }
7c50328b 163 spin_unlock(&splat_module_lock);
f1ca4da6 164
165 return NULL;
166}
167
168static int
7c50328b 169splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 170{
7c50328b 171 splat_subsystem_t *sub;
f1ca4da6 172 int i = 0;
173
7c50328b 174 spin_lock(&splat_module_lock);
175 list_for_each_entry(sub, &splat_module_list, subsystem_list)
f1ca4da6 176 i++;
177
7c50328b 178 spin_unlock(&splat_module_lock);
f1ca4da6 179 kcfg->cfg_rc1 = i;
180
7c50328b 181 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 182 return -EFAULT;
183
184 return 0;
185}
186
187static int
7c50328b 188splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 189{
7c50328b 190 splat_subsystem_t *sub;
191 splat_cfg_t *tmp;
f1ca4da6 192 int size, i = 0;
193
194 /* Structure will be sized large enough for N subsystem entries
195 * which is passed in by the caller. On exit the number of
196 * entries filled in with valid subsystems will be stored in
197 * cfg_rc1. If the caller does not provide enough entries
198 * for all subsystems we will truncate the list to avoid overrun.
199 */
7c50328b 200 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
201 sizeof(splat_user_t);
f1ca4da6 202 tmp = kmalloc(size, GFP_KERNEL);
203 if (tmp == NULL)
204 return -ENOMEM;
205
206 /* Local 'tmp' is used as the structure copied back to user space */
207 memset(tmp, 0, size);
208 memcpy(tmp, kcfg, sizeof(*kcfg));
209
7c50328b 210 spin_lock(&splat_module_lock);
211 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
212 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
213 sub->desc.name, SPLAT_NAME_SIZE);
214 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
215 sub->desc.desc, SPLAT_DESC_SIZE);
216 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
f1ca4da6 217
218 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 219 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
f1ca4da6 220 break;
221 }
7c50328b 222 spin_unlock(&splat_module_lock);
f1ca4da6 223 tmp->cfg_rc1 = i;
224
7c50328b 225 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 226 kfree(tmp);
227 return -EFAULT;
228 }
229
230 kfree(tmp);
231 return 0;
232}
233
234static int
7c50328b 235splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 236{
7c50328b 237 splat_subsystem_t *sub;
238 splat_test_t *test;
f1b59d26 239 int i = 0;
f1ca4da6 240
241 /* Subsystem ID passed as arg1 */
7c50328b 242 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 243 if (sub == NULL)
244 return -EINVAL;
245
246 spin_lock(&(sub->test_lock));
247 list_for_each_entry(test, &(sub->test_list), test_list)
248 i++;
249
250 spin_unlock(&(sub->test_lock));
251 kcfg->cfg_rc1 = i;
252
7c50328b 253 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
f1ca4da6 254 return -EFAULT;
255
256 return 0;
257}
258
259static int
7c50328b 260splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
f1ca4da6 261{
7c50328b 262 splat_subsystem_t *sub;
263 splat_test_t *test;
264 splat_cfg_t *tmp;
f1b59d26 265 int size, i = 0;
f1ca4da6 266
267 /* Subsystem ID passed as arg1 */
7c50328b 268 sub = splat_subsystem_find(kcfg->cfg_arg1);
f1ca4da6 269 if (sub == NULL)
270 return -EINVAL;
271
272 /* Structure will be sized large enough for N test entries
273 * which is passed in by the caller. On exit the number of
274 * entries filled in with valid tests will be stored in
275 * cfg_rc1. If the caller does not provide enough entries
276 * for all tests we will truncate the list to avoid overrun.
277 */
7c50328b 278 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
f1ca4da6 279 tmp = kmalloc(size, GFP_KERNEL);
280 if (tmp == NULL)
281 return -ENOMEM;
282
283 /* Local 'tmp' is used as the structure copied back to user space */
284 memset(tmp, 0, size);
285 memcpy(tmp, kcfg, sizeof(*kcfg));
286
287 spin_lock(&(sub->test_lock));
288 list_for_each_entry(test, &(sub->test_list), test_list) {
7c50328b 289 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
290 test->desc.name, SPLAT_NAME_SIZE);
291 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
292 test->desc.desc, SPLAT_DESC_SIZE);
293 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
f1ca4da6 294
295 /* Truncate list if we are about to overrun alloc'ed memory */
7c50328b 296 if ((i++) == kcfg->cfg_data.splat_tests.size)
f1ca4da6 297 break;
298 }
299 spin_unlock(&(sub->test_lock));
300 tmp->cfg_rc1 = i;
301
7c50328b 302 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
f1ca4da6 303 kfree(tmp);
304 return -EFAULT;
305 }
306
307 kfree(tmp);
308 return 0;
309}
310
311static int
7c50328b 312splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
f1ca4da6 313{
7c50328b 314 splat_test_t *test;
f1ca4da6 315
316 spin_lock(&(sub->test_lock));
317 list_for_each_entry(test, &(sub->test_list), test_list) {
318 if (test->desc.id == cmd) {
319 spin_unlock(&(sub->test_lock));
320 return test->test(file, arg);
321 }
322 }
323 spin_unlock(&(sub->test_lock));
324
325 return -EINVAL;
326}
327
328static int
e554dffa 329splat_ioctl_cfg(struct file *file, unsigned int cmd, unsigned long arg)
f1ca4da6 330{
7c50328b 331 splat_cfg_t kcfg;
f1ca4da6 332 int rc = 0;
333
e554dffa
BB
334 /* User and kernel space agree about arg size */
335 if (_IOC_SIZE(cmd) != sizeof(kcfg))
336 return -EBADMSG;
337
7c50328b 338 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
f1ca4da6 339 return -EFAULT;
340
7c50328b 341 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
342 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
343 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
f1ca4da6 344 return -EINVAL;
345 }
346
347 switch (kcfg.cfg_cmd) {
7c50328b 348 case SPLAT_CFG_BUFFER_CLEAR:
f1ca4da6 349 /* cfg_arg1 - Unused
350 * cfg_rc1 - Unused
351 */
7c50328b 352 rc = splat_buffer_clear(file, &kcfg, arg);
f1ca4da6 353 break;
7c50328b 354 case SPLAT_CFG_BUFFER_SIZE:
f1ca4da6 355 /* cfg_arg1 - 0 - query size; >0 resize
356 * cfg_rc1 - Set to current buffer size
357 */
7c50328b 358 rc = splat_buffer_size(file, &kcfg, arg);
f1ca4da6 359 break;
7c50328b 360 case SPLAT_CFG_SUBSYSTEM_COUNT:
f1ca4da6 361 /* cfg_arg1 - Unused
362 * cfg_rc1 - Set to number of subsystems
363 */
7c50328b 364 rc = splat_subsystem_count(&kcfg, arg);
f1ca4da6 365 break;
7c50328b 366 case SPLAT_CFG_SUBSYSTEM_LIST:
f1ca4da6 367 /* cfg_arg1 - Unused
368 * cfg_rc1 - Set to number of subsystems
e554dffa 369 * cfg_data.splat_subsystems - Set with subsystems
f1ca4da6 370 */
7c50328b 371 rc = splat_subsystem_list(&kcfg, arg);
f1ca4da6 372 break;
7c50328b 373 case SPLAT_CFG_TEST_COUNT:
f1ca4da6 374 /* cfg_arg1 - Set to a target subsystem
375 * cfg_rc1 - Set to number of tests
376 */
7c50328b 377 rc = splat_test_count(&kcfg, arg);
f1ca4da6 378 break;
7c50328b 379 case SPLAT_CFG_TEST_LIST:
f1ca4da6 380 /* cfg_arg1 - Set to a target subsystem
381 * cfg_rc1 - Set to number of tests
7c50328b 382 * cfg_data.splat_subsystems - Populated with tests
f1ca4da6 383 */
7c50328b 384 rc = splat_test_list(&kcfg, arg);
f1ca4da6 385 break;
386 default:
e554dffa
BB
387 splat_print(file, "Bad config command %d\n",
388 kcfg.cfg_cmd);
f1ca4da6 389 rc = -EINVAL;
390 break;
391 }
392
393 return rc;
394}
395
396static int
e554dffa 397splat_ioctl_cmd(struct file *file, unsigned int cmd, unsigned long arg)
f1ca4da6 398{
7c50328b 399 splat_subsystem_t *sub;
400 splat_cmd_t kcmd;
f1ca4da6 401 int rc = -EINVAL;
402 void *data = NULL;
403
e554dffa
BB
404 /* User and kernel space agree about arg size */
405 if (_IOC_SIZE(cmd) != sizeof(kcmd))
406 return -EBADMSG;
407
7c50328b 408 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
f1ca4da6 409 return -EFAULT;
410
7c50328b 411 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
412 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
413 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
f1ca4da6 414 return -EINVAL;
415 }
416
417 /* Allocate memory for any opaque data the caller needed to pass on */
418 if (kcmd.cmd_data_size > 0) {
419 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
420 if (data == NULL)
421 return -ENOMEM;
422
7c50328b 423 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
f1ca4da6 424 cmd_data_str)), kcmd.cmd_data_size)) {
425 kfree(data);
426 return -EFAULT;
427 }
428 }
429
7c50328b 430 sub = splat_subsystem_find(kcmd.cmd_subsystem);
f1ca4da6 431 if (sub != NULL)
7c50328b 432 rc = splat_validate(file, sub, kcmd.cmd_test, data);
f1ca4da6 433 else
434 rc = -EINVAL;
435
436 if (data != NULL)
437 kfree(data);
438
439 return rc;
440}
441
442static int
7c50328b 443splat_ioctl(struct inode *inode, struct file *file,
e554dffa 444 unsigned int cmd, unsigned long arg)
f1ca4da6 445{
f1b59d26 446 unsigned int minor = iminor(file->f_dentry->d_inode);
447 int rc = 0;
f1ca4da6 448
449 /* Ignore tty ioctls */
450 if ((cmd & 0xffffff00) == ((int)'T') << 8)
451 return -ENOTTY;
452
7c50328b 453 if (minor >= SPLAT_MINORS)
f1ca4da6 454 return -ENXIO;
455
456 switch (cmd) {
7c50328b 457 case SPLAT_CFG:
e554dffa 458 rc = splat_ioctl_cfg(file, cmd, arg);
f1ca4da6 459 break;
7c50328b 460 case SPLAT_CMD:
e554dffa 461 rc = splat_ioctl_cmd(file, cmd, arg);
f1ca4da6 462 break;
463 default:
7c50328b 464 splat_print(file, "Bad ioctl command %d\n", cmd);
f1ca4da6 465 rc = -EINVAL;
466 break;
467 }
468
469 return rc;
470}
471
9593ef76
BB
472#ifdef CONFIG_COMPAT
473/* Compatibility handler for ioctls from 32-bit ELF binaries */
474static long
475splat_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
476{
477 return splat_ioctl(NULL, file, cmd, arg);
478}
479#endif /* CONFIG_COMPAT */
480
f1ca4da6 481/* I'm not sure why you would want to write in to this buffer from
482 * user space since its principle use is to pass test status info
483 * back to the user space, but I don't see any reason to prevent it.
484 */
7c50328b 485static ssize_t splat_write(struct file *file, const char __user *buf,
f1ca4da6 486 size_t count, loff_t *ppos)
487{
488 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 489 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 490 int rc = 0;
491
7c50328b 492 if (minor >= SPLAT_MINORS)
f1ca4da6 493 return -ENXIO;
494
495 ASSERT(info);
496 ASSERT(info->info_buffer);
497
498 spin_lock(&info->info_lock);
499
500 /* Write beyond EOF */
501 if (*ppos >= info->info_size) {
502 rc = -EFBIG;
503 goto out;
504 }
505
506 /* Resize count if beyond EOF */
507 if (*ppos + count > info->info_size)
508 count = info->info_size - *ppos;
509
510 if (copy_from_user(info->info_buffer, buf, count)) {
511 rc = -EFAULT;
512 goto out;
513 }
514
515 *ppos += count;
516 rc = count;
517out:
518 spin_unlock(&info->info_lock);
519 return rc;
520}
521
7c50328b 522static ssize_t splat_read(struct file *file, char __user *buf,
f1ca4da6 523 size_t count, loff_t *ppos)
524{
525 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 526 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 527 int rc = 0;
528
7c50328b 529 if (minor >= SPLAT_MINORS)
f1ca4da6 530 return -ENXIO;
531
532 ASSERT(info);
533 ASSERT(info->info_buffer);
534
535 spin_lock(&info->info_lock);
536
537 /* Read beyond EOF */
538 if (*ppos >= info->info_size)
539 goto out;
540
541 /* Resize count if beyond EOF */
542 if (*ppos + count > info->info_size)
543 count = info->info_size - *ppos;
544
545 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
546 rc = -EFAULT;
547 goto out;
548 }
549
550 *ppos += count;
551 rc = count;
552out:
553 spin_unlock(&info->info_lock);
554 return rc;
555}
556
7c50328b 557static loff_t splat_seek(struct file *file, loff_t offset, int origin)
f1ca4da6 558{
559 unsigned int minor = iminor(file->f_dentry->d_inode);
7c50328b 560 splat_info_t *info = (splat_info_t *)file->private_data;
f1ca4da6 561 int rc = -EINVAL;
562
7c50328b 563 if (minor >= SPLAT_MINORS)
f1ca4da6 564 return -ENXIO;
565
566 ASSERT(info);
567 ASSERT(info->info_buffer);
568
569 spin_lock(&info->info_lock);
570
571 switch (origin) {
572 case 0: /* SEEK_SET - No-op just do it */
573 break;
574 case 1: /* SEEK_CUR - Seek from current */
575 offset = file->f_pos + offset;
576 break;
577 case 2: /* SEEK_END - Seek from end */
578 offset = info->info_size + offset;
579 break;
580 }
581
582 if (offset >= 0) {
583 file->f_pos = offset;
584 file->f_version = 0;
585 rc = offset;
586 }
587
588 spin_unlock(&info->info_lock);
589
590 return rc;
591}
592
39ab5440 593static struct cdev splat_cdev;
7c50328b 594static struct file_operations splat_fops = {
9593ef76
BB
595 .owner = THIS_MODULE,
596 .open = splat_open,
597 .release = splat_release,
598 .ioctl = splat_ioctl,
599#ifdef CONFIG_COMPAT
600 .compat_ioctl = splat_compat_ioctl,
601#endif
602 .read = splat_read,
603 .write = splat_write,
604 .llseek = splat_seek,
f1ca4da6 605};
606
51a727e9 607static int
7c50328b 608splat_init(void)
f1ca4da6 609{
610 dev_t dev;
f1b59d26 611 int rc;
f1ca4da6 612
7c50328b 613 spin_lock_init(&splat_module_lock);
614 INIT_LIST_HEAD(&splat_module_list);
f1ca4da6 615
7c50328b 616 SPLAT_SUBSYSTEM_INIT(kmem);
617 SPLAT_SUBSYSTEM_INIT(taskq);
618 SPLAT_SUBSYSTEM_INIT(krng);
619 SPLAT_SUBSYSTEM_INIT(mutex);
620 SPLAT_SUBSYSTEM_INIT(condvar);
621 SPLAT_SUBSYSTEM_INIT(thread);
622 SPLAT_SUBSYSTEM_INIT(rwlock);
623 SPLAT_SUBSYSTEM_INIT(time);
4b171585 624 SPLAT_SUBSYSTEM_INIT(vnode);
9490c148 625 SPLAT_SUBSYSTEM_INIT(kobj);
9f4c835a 626 SPLAT_SUBSYSTEM_INIT(atomic);
d702c04f 627 SPLAT_SUBSYSTEM_INIT(list);
b871b8cd 628 SPLAT_SUBSYSTEM_INIT(generic);
ec7d53e9 629 SPLAT_SUBSYSTEM_INIT(cred);
f1ca4da6 630
7c50328b 631 dev = MKDEV(SPLAT_MAJOR, 0);
46c685d0 632 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
f1ca4da6 633 goto error;
634
635 /* Support for registering a character driver */
7c50328b 636 cdev_init(&splat_cdev, &splat_fops);
39ab5440
BB
637 splat_cdev.owner = THIS_MODULE;
638 kobject_set_name(&splat_cdev.kobj, SPLAT_NAME);
7c50328b 639 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
f6a19c0d 640 printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
7c50328b 641 kobject_put(&splat_cdev.kobj);
642 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 643 goto error;
644 }
645
646 /* Support for udev make driver info available in sysfs */
46c685d0 647 splat_class = spl_class_create(THIS_MODULE, "splat");
7c50328b 648 if (IS_ERR(splat_class)) {
649 rc = PTR_ERR(splat_class);
f6a19c0d 650 printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
7c50328b 651 cdev_del(&splat_cdev);
652 unregister_chrdev_region(dev, SPLAT_MINORS);
f1ca4da6 653 goto error;
654 }
655
25557fd8 656 splat_device = spl_device_create(splat_class, NULL,
657 MKDEV(SPLAT_MAJOR, 0),
658 NULL, SPLAT_NAME);
f1ca4da6 659
f6a19c0d 660 printk(KERN_INFO "SPLAT: Loaded Solaris Porting LAyer "
0cbaeb11 661 "Tests v%s\n", SPL_META_VERSION);
f1ca4da6 662 return 0;
663error:
f6a19c0d 664 printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
f1ca4da6 665 return rc;
666}
667
51a727e9 668static int
7c50328b 669splat_fini(void)
f1ca4da6 670{
7c50328b 671 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
f1ca4da6 672
25557fd8 673 spl_device_destroy(splat_class, splat_device, dev);
46c685d0 674 spl_class_destroy(splat_class);
7c50328b 675 cdev_del(&splat_cdev);
676 unregister_chrdev_region(dev, SPLAT_MINORS);
677
ec7d53e9 678 SPLAT_SUBSYSTEM_FINI(cred);
b871b8cd 679 SPLAT_SUBSYSTEM_FINI(generic);
d702c04f 680 SPLAT_SUBSYSTEM_FINI(list);
9f4c835a 681 SPLAT_SUBSYSTEM_FINI(atomic);
9490c148 682 SPLAT_SUBSYSTEM_FINI(kobj);
4b171585 683 SPLAT_SUBSYSTEM_FINI(vnode);
7c50328b 684 SPLAT_SUBSYSTEM_FINI(time);
685 SPLAT_SUBSYSTEM_FINI(rwlock);
686 SPLAT_SUBSYSTEM_FINI(thread);
687 SPLAT_SUBSYSTEM_FINI(condvar);
688 SPLAT_SUBSYSTEM_FINI(mutex);
689 SPLAT_SUBSYSTEM_FINI(krng);
690 SPLAT_SUBSYSTEM_FINI(taskq);
691 SPLAT_SUBSYSTEM_FINI(kmem);
692
693 ASSERT(list_empty(&splat_module_list));
f6a19c0d 694 printk(KERN_INFO "SPLAT: Unloaded Solaris Porting LAyer "
0cbaeb11 695 "Tests v%s\n", SPL_META_VERSION);
51a727e9
BB
696
697 return 0;
f1ca4da6 698}
699
51a727e9
BB
700spl_module_init(splat_init);
701spl_module_exit(splat_fini);
f1ca4da6 702
703MODULE_AUTHOR("Lawrence Livermore National Labs");
715f6251 704MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
f1ca4da6 705MODULE_LICENSE("GPL");