1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Test Control Interface.
26 * The 'splat' (Solaris Porting LAyer Tests) module is designed as a
27 * framework which runs various in kernel regression tests to validate
28 * the SPL primitives honor the Solaris ABI.
30 * The splat module is constructed of various splat_* source files each
31 * of which contain regression tests for a particular subsystem. For
32 * example, the splat_kmem.c file contains all the tests for validating
33 * the kmem interfaces have been implemented correctly. When the splat
34 * module is loaded splat_*_init() will be called for each subsystems
35 * tests. It is the responsibility of splat_*_init() to register all
36 * the tests for this subsystem using the SPLAT_TEST_INIT() macro.
37 * Similarly splat_*_fini() is called when the splat module is removed
38 * and is responsible for unregistering its tests via the SPLAT_TEST_FINI
39 * macro. Once a test is registered it can then be run with an ioctl()
40 * call which specifies the subsystem and test to be run. The provided
41 * splat command line tool can be used to display all available
42 * subsystems and tests. It can also be used to run the full suite
43 * of regression tests or particular tests.
44 \*****************************************************************************/
46 #include "splat-internal.h"
48 static spl_class
*splat_class
;
49 static spl_device
*splat_device
;
50 static struct list_head splat_module_list
;
51 static spinlock_t splat_module_lock
;
54 splat_open(struct inode
*inode
, struct file
*file
)
56 unsigned int minor
= iminor(inode
);
59 if (minor
>= SPLAT_MINORS
)
62 info
= (splat_info_t
*)kmalloc(sizeof(*info
), GFP_KERNEL
);
66 spin_lock_init(&info
->info_lock
);
67 info
->info_size
= SPLAT_INFO_BUFFER_SIZE
;
68 info
->info_buffer
= (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE
);
69 if (info
->info_buffer
== NULL
) {
73 memset(info
->info_buffer
, 0, info
->info_size
);
75 info
->info_head
= info
->info_buffer
;
76 file
->private_data
= (void *)info
;
78 splat_print(file
, "%s\n", spl_version
);
84 splat_release(struct inode
*inode
, struct file
*file
)
86 unsigned int minor
= iminor(inode
);
87 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
89 if (minor
>= SPLAT_MINORS
)
93 ASSERT(info
->info_buffer
);
95 vfree(info
->info_buffer
);
102 splat_buffer_clear(struct file
*file
, splat_cfg_t
*kcfg
, unsigned long arg
)
104 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
107 ASSERT(info
->info_buffer
);
109 spin_lock(&info
->info_lock
);
110 memset(info
->info_buffer
, 0, info
->info_size
);
111 info
->info_head
= info
->info_buffer
;
112 spin_unlock(&info
->info_lock
);
118 splat_buffer_size(struct file
*file
, splat_cfg_t
*kcfg
, unsigned long arg
)
120 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
122 int min
, size
, rc
= 0;
125 ASSERT(info
->info_buffer
);
127 spin_lock(&info
->info_lock
);
128 if (kcfg
->cfg_arg1
> 0) {
130 size
= kcfg
->cfg_arg1
;
131 buf
= (char *)vmalloc(size
);
137 /* Zero fill and truncate contents when coping buffer */
138 min
= ((size
< info
->info_size
) ? size
: info
->info_size
);
139 memset(buf
, 0, size
);
140 memcpy(buf
, info
->info_buffer
, min
);
141 vfree(info
->info_buffer
);
142 info
->info_size
= size
;
143 info
->info_buffer
= buf
;
144 info
->info_head
= info
->info_buffer
;
147 kcfg
->cfg_rc1
= info
->info_size
;
149 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
152 spin_unlock(&info
->info_lock
);
158 static splat_subsystem_t
*
159 splat_subsystem_find(int id
) {
160 splat_subsystem_t
*sub
;
162 spin_lock(&splat_module_lock
);
163 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
) {
164 if (id
== sub
->desc
.id
) {
165 spin_unlock(&splat_module_lock
);
169 spin_unlock(&splat_module_lock
);
175 splat_subsystem_count(splat_cfg_t
*kcfg
, unsigned long arg
)
177 splat_subsystem_t
*sub
;
180 spin_lock(&splat_module_lock
);
181 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
)
184 spin_unlock(&splat_module_lock
);
187 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
194 splat_subsystem_list(splat_cfg_t
*kcfg
, unsigned long arg
)
196 splat_subsystem_t
*sub
;
200 /* Structure will be sized large enough for N subsystem entries
201 * which is passed in by the caller. On exit the number of
202 * entries filled in with valid subsystems will be stored in
203 * cfg_rc1. If the caller does not provide enough entries
204 * for all subsystems we will truncate the list to avoid overrun.
206 size
= sizeof(*tmp
) + kcfg
->cfg_data
.splat_subsystems
.size
*
207 sizeof(splat_user_t
);
208 tmp
= kmalloc(size
, GFP_KERNEL
);
212 /* Local 'tmp' is used as the structure copied back to user space */
213 memset(tmp
, 0, size
);
214 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
216 spin_lock(&splat_module_lock
);
217 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
) {
218 strncpy(tmp
->cfg_data
.splat_subsystems
.descs
[i
].name
,
219 sub
->desc
.name
, SPLAT_NAME_SIZE
);
220 strncpy(tmp
->cfg_data
.splat_subsystems
.descs
[i
].desc
,
221 sub
->desc
.desc
, SPLAT_DESC_SIZE
);
222 tmp
->cfg_data
.splat_subsystems
.descs
[i
].id
= sub
->desc
.id
;
224 /* Truncate list if we are about to overrun alloc'ed memory */
225 if ((i
++) == kcfg
->cfg_data
.splat_subsystems
.size
)
228 spin_unlock(&splat_module_lock
);
231 if (copy_to_user((struct splat_cfg_t __user
*)arg
, tmp
, size
)) {
241 splat_test_count(splat_cfg_t
*kcfg
, unsigned long arg
)
243 splat_subsystem_t
*sub
;
247 /* Subsystem ID passed as arg1 */
248 sub
= splat_subsystem_find(kcfg
->cfg_arg1
);
252 spin_lock(&(sub
->test_lock
));
253 list_for_each_entry(test
, &(sub
->test_list
), test_list
)
256 spin_unlock(&(sub
->test_lock
));
259 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
266 splat_test_list(splat_cfg_t
*kcfg
, unsigned long arg
)
268 splat_subsystem_t
*sub
;
273 /* Subsystem ID passed as arg1 */
274 sub
= splat_subsystem_find(kcfg
->cfg_arg1
);
278 /* Structure will be sized large enough for N test entries
279 * which is passed in by the caller. On exit the number of
280 * entries filled in with valid tests will be stored in
281 * cfg_rc1. If the caller does not provide enough entries
282 * for all tests we will truncate the list to avoid overrun.
284 size
= sizeof(*tmp
)+kcfg
->cfg_data
.splat_tests
.size
*sizeof(splat_user_t
);
285 tmp
= kmalloc(size
, GFP_KERNEL
);
289 /* Local 'tmp' is used as the structure copied back to user space */
290 memset(tmp
, 0, size
);
291 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
293 spin_lock(&(sub
->test_lock
));
294 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
295 strncpy(tmp
->cfg_data
.splat_tests
.descs
[i
].name
,
296 test
->desc
.name
, SPLAT_NAME_SIZE
);
297 strncpy(tmp
->cfg_data
.splat_tests
.descs
[i
].desc
,
298 test
->desc
.desc
, SPLAT_DESC_SIZE
);
299 tmp
->cfg_data
.splat_tests
.descs
[i
].id
= test
->desc
.id
;
301 /* Truncate list if we are about to overrun alloc'ed memory */
302 if ((i
++) == kcfg
->cfg_data
.splat_tests
.size
)
305 spin_unlock(&(sub
->test_lock
));
308 if (copy_to_user((struct splat_cfg_t __user
*)arg
, tmp
, size
)) {
318 splat_validate(struct file
*file
, splat_subsystem_t
*sub
, int cmd
, void *arg
)
322 spin_lock(&(sub
->test_lock
));
323 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
324 if (test
->desc
.id
== cmd
) {
325 spin_unlock(&(sub
->test_lock
));
326 return test
->test(file
, arg
);
329 spin_unlock(&(sub
->test_lock
));
335 splat_ioctl_cfg(struct file
*file
, unsigned int cmd
, unsigned long arg
)
340 /* User and kernel space agree about arg size */
341 if (_IOC_SIZE(cmd
) != sizeof(kcfg
))
344 if (copy_from_user(&kcfg
, (splat_cfg_t
*)arg
, sizeof(kcfg
)))
347 if (kcfg
.cfg_magic
!= SPLAT_CFG_MAGIC
) {
348 splat_print(file
, "Bad config magic 0x%x != 0x%x\n",
349 kcfg
.cfg_magic
, SPLAT_CFG_MAGIC
);
353 switch (kcfg
.cfg_cmd
) {
354 case SPLAT_CFG_BUFFER_CLEAR
:
358 rc
= splat_buffer_clear(file
, &kcfg
, arg
);
360 case SPLAT_CFG_BUFFER_SIZE
:
361 /* cfg_arg1 - 0 - query size; >0 resize
362 * cfg_rc1 - Set to current buffer size
364 rc
= splat_buffer_size(file
, &kcfg
, arg
);
366 case SPLAT_CFG_SUBSYSTEM_COUNT
:
368 * cfg_rc1 - Set to number of subsystems
370 rc
= splat_subsystem_count(&kcfg
, arg
);
372 case SPLAT_CFG_SUBSYSTEM_LIST
:
374 * cfg_rc1 - Set to number of subsystems
375 * cfg_data.splat_subsystems - Set with subsystems
377 rc
= splat_subsystem_list(&kcfg
, arg
);
379 case SPLAT_CFG_TEST_COUNT
:
380 /* cfg_arg1 - Set to a target subsystem
381 * cfg_rc1 - Set to number of tests
383 rc
= splat_test_count(&kcfg
, arg
);
385 case SPLAT_CFG_TEST_LIST
:
386 /* cfg_arg1 - Set to a target subsystem
387 * cfg_rc1 - Set to number of tests
388 * cfg_data.splat_subsystems - Populated with tests
390 rc
= splat_test_list(&kcfg
, arg
);
393 splat_print(file
, "Bad config command %d\n",
403 splat_ioctl_cmd(struct file
*file
, unsigned int cmd
, unsigned long arg
)
405 splat_subsystem_t
*sub
;
410 /* User and kernel space agree about arg size */
411 if (_IOC_SIZE(cmd
) != sizeof(kcmd
))
414 if (copy_from_user(&kcmd
, (splat_cfg_t
*)arg
, sizeof(kcmd
)))
417 if (kcmd
.cmd_magic
!= SPLAT_CMD_MAGIC
) {
418 splat_print(file
, "Bad command magic 0x%x != 0x%x\n",
419 kcmd
.cmd_magic
, SPLAT_CFG_MAGIC
);
423 /* Allocate memory for any opaque data the caller needed to pass on */
424 if (kcmd
.cmd_data_size
> 0) {
425 data
= (void *)kmalloc(kcmd
.cmd_data_size
, GFP_KERNEL
);
429 if (copy_from_user(data
, (void *)(arg
+ offsetof(splat_cmd_t
,
430 cmd_data_str
)), kcmd
.cmd_data_size
)) {
436 sub
= splat_subsystem_find(kcmd
.cmd_subsystem
);
438 rc
= splat_validate(file
, sub
, kcmd
.cmd_test
, data
);
449 splat_ioctl(struct inode
*inode
, struct file
*file
,
450 unsigned int cmd
, unsigned long arg
)
452 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
455 /* Ignore tty ioctls */
456 if ((cmd
& 0xffffff00) == ((int)'T') << 8)
459 if (minor
>= SPLAT_MINORS
)
464 rc
= splat_ioctl_cfg(file
, cmd
, arg
);
467 rc
= splat_ioctl_cmd(file
, cmd
, arg
);
470 splat_print(file
, "Bad ioctl command %d\n", cmd
);
479 /* Compatibility handler for ioctls from 32-bit ELF binaries */
481 splat_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
483 return splat_ioctl(NULL
, file
, cmd
, arg
);
485 #endif /* CONFIG_COMPAT */
487 /* I'm not sure why you would want to write in to this buffer from
488 * user space since its principle use is to pass test status info
489 * back to the user space, but I don't see any reason to prevent it.
491 static ssize_t
splat_write(struct file
*file
, const char __user
*buf
,
492 size_t count
, loff_t
*ppos
)
494 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
495 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
498 if (minor
>= SPLAT_MINORS
)
502 ASSERT(info
->info_buffer
);
504 spin_lock(&info
->info_lock
);
506 /* Write beyond EOF */
507 if (*ppos
>= info
->info_size
) {
512 /* Resize count if beyond EOF */
513 if (*ppos
+ count
> info
->info_size
)
514 count
= info
->info_size
- *ppos
;
516 if (copy_from_user(info
->info_buffer
, buf
, count
)) {
524 spin_unlock(&info
->info_lock
);
528 static ssize_t
splat_read(struct file
*file
, char __user
*buf
,
529 size_t count
, loff_t
*ppos
)
531 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
532 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
535 if (minor
>= SPLAT_MINORS
)
539 ASSERT(info
->info_buffer
);
541 spin_lock(&info
->info_lock
);
543 /* Read beyond EOF */
544 if (*ppos
>= info
->info_size
)
547 /* Resize count if beyond EOF */
548 if (*ppos
+ count
> info
->info_size
)
549 count
= info
->info_size
- *ppos
;
551 if (copy_to_user(buf
, info
->info_buffer
+ *ppos
, count
)) {
559 spin_unlock(&info
->info_lock
);
563 static loff_t
splat_seek(struct file
*file
, loff_t offset
, int origin
)
565 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
566 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
569 if (minor
>= SPLAT_MINORS
)
573 ASSERT(info
->info_buffer
);
575 spin_lock(&info
->info_lock
);
578 case 0: /* SEEK_SET - No-op just do it */
580 case 1: /* SEEK_CUR - Seek from current */
581 offset
= file
->f_pos
+ offset
;
583 case 2: /* SEEK_END - Seek from end */
584 offset
= info
->info_size
+ offset
;
589 file
->f_pos
= offset
;
594 spin_unlock(&info
->info_lock
);
599 static struct cdev splat_cdev
;
600 static struct file_operations splat_fops
= {
601 .owner
= THIS_MODULE
,
603 .release
= splat_release
,
604 .ioctl
= splat_ioctl
,
606 .compat_ioctl
= splat_compat_ioctl
,
609 .write
= splat_write
,
610 .llseek
= splat_seek
,
619 spin_lock_init(&splat_module_lock
);
620 INIT_LIST_HEAD(&splat_module_list
);
622 SPLAT_SUBSYSTEM_INIT(kmem
);
623 SPLAT_SUBSYSTEM_INIT(taskq
);
624 SPLAT_SUBSYSTEM_INIT(krng
);
625 SPLAT_SUBSYSTEM_INIT(mutex
);
626 SPLAT_SUBSYSTEM_INIT(condvar
);
627 SPLAT_SUBSYSTEM_INIT(thread
);
628 SPLAT_SUBSYSTEM_INIT(rwlock
);
629 SPLAT_SUBSYSTEM_INIT(time
);
630 SPLAT_SUBSYSTEM_INIT(vnode
);
631 SPLAT_SUBSYSTEM_INIT(kobj
);
632 SPLAT_SUBSYSTEM_INIT(atomic
);
633 SPLAT_SUBSYSTEM_INIT(list
);
634 SPLAT_SUBSYSTEM_INIT(generic
);
635 SPLAT_SUBSYSTEM_INIT(cred
);
637 dev
= MKDEV(SPLAT_MAJOR
, 0);
638 if ((rc
= register_chrdev_region(dev
, SPLAT_MINORS
, SPLAT_NAME
)))
641 /* Support for registering a character driver */
642 cdev_init(&splat_cdev
, &splat_fops
);
643 splat_cdev
.owner
= THIS_MODULE
;
644 kobject_set_name(&splat_cdev
.kobj
, SPLAT_NAME
);
645 if ((rc
= cdev_add(&splat_cdev
, dev
, SPLAT_MINORS
))) {
646 printk(KERN_ERR
"SPLAT: Error adding cdev, %d\n", rc
);
647 kobject_put(&splat_cdev
.kobj
);
648 unregister_chrdev_region(dev
, SPLAT_MINORS
);
652 /* Support for udev make driver info available in sysfs */
653 splat_class
= spl_class_create(THIS_MODULE
, "splat");
654 if (IS_ERR(splat_class
)) {
655 rc
= PTR_ERR(splat_class
);
656 printk(KERN_ERR
"SPLAT: Error creating splat class, %d\n", rc
);
657 cdev_del(&splat_cdev
);
658 unregister_chrdev_region(dev
, SPLAT_MINORS
);
662 splat_device
= spl_device_create(splat_class
, NULL
,
663 MKDEV(SPLAT_MAJOR
, 0),
666 printk(KERN_INFO
"SPLAT: Loaded Solaris Porting LAyer "
667 "Tests v%s\n", SPL_META_VERSION
);
670 printk(KERN_ERR
"SPLAT: Error registering splat device, %d\n", rc
);
677 dev_t dev
= MKDEV(SPLAT_MAJOR
, 0);
679 spl_device_destroy(splat_class
, splat_device
, dev
);
680 spl_class_destroy(splat_class
);
681 cdev_del(&splat_cdev
);
682 unregister_chrdev_region(dev
, SPLAT_MINORS
);
684 SPLAT_SUBSYSTEM_FINI(cred
);
685 SPLAT_SUBSYSTEM_FINI(generic
);
686 SPLAT_SUBSYSTEM_FINI(list
);
687 SPLAT_SUBSYSTEM_FINI(atomic
);
688 SPLAT_SUBSYSTEM_FINI(kobj
);
689 SPLAT_SUBSYSTEM_FINI(vnode
);
690 SPLAT_SUBSYSTEM_FINI(time
);
691 SPLAT_SUBSYSTEM_FINI(rwlock
);
692 SPLAT_SUBSYSTEM_FINI(thread
);
693 SPLAT_SUBSYSTEM_FINI(condvar
);
694 SPLAT_SUBSYSTEM_FINI(mutex
);
695 SPLAT_SUBSYSTEM_FINI(krng
);
696 SPLAT_SUBSYSTEM_FINI(taskq
);
697 SPLAT_SUBSYSTEM_FINI(kmem
);
699 ASSERT(list_empty(&splat_module_list
));
700 printk(KERN_INFO
"SPLAT: Unloaded Solaris Porting LAyer "
701 "Tests v%s\n", SPL_META_VERSION
);
706 spl_module_init(splat_init
);
707 spl_module_exit(splat_fini
);
709 MODULE_AUTHOR("Lawrence Livermore National Labs");
710 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
711 MODULE_LICENSE("GPL");