1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Test Control Interface.
26 * The 'splat' (Solaris Porting LAyer Tests) module is designed as a
27 * framework which runs various in kernel regression tests to validate
28 * the SPL primitives honor the Solaris ABI.
30 * The splat module is constructed of various splat_* source files each
31 * of which contain regression tests for a particular subsystem. For
32 * example, the splat_kmem.c file contains all the tests for validating
33 * the kmem interfaces have been implemented correctly. When the splat
34 * module is loaded splat_*_init() will be called for each subsystems
35 * tests. It is the responsibility of splat_*_init() to register all
36 * the tests for this subsystem using the SPLAT_TEST_INIT() macro.
37 * Similarly splat_*_fini() is called when the splat module is removed
38 * and is responsible for unregistering its tests via the SPLAT_TEST_FINI
39 * macro. Once a test is registered it can then be run with an ioctl()
40 * call which specifies the subsystem and test to be run. The provided
41 * splat command line tool can be used to display all available
42 * subsystems and tests. It can also be used to run the full suite
43 * of regression tests or particular tests.
44 \*****************************************************************************/
46 #include <linux/module.h>
47 #include <linux/slab.h>
48 #include <linux/vmalloc.h>
49 #include <linux/cdev.h>
51 #include <linux/uaccess.h>
52 #include <linux/miscdevice.h>
53 #include <sys/types.h>
54 #include <sys/debug.h>
55 #include <sys/mutex.h>
56 #include "splat-internal.h"
58 static struct list_head splat_module_list
;
59 static spinlock_t splat_module_lock
;
62 splat_open(struct inode
*inode
, struct file
*file
)
66 info
= (splat_info_t
*)kmalloc(sizeof(*info
), GFP_KERNEL
);
70 mutex_init(&info
->info_lock
, SPLAT_NAME
, MUTEX_DEFAULT
, NULL
);
71 info
->info_size
= SPLAT_INFO_BUFFER_SIZE
;
72 info
->info_buffer
= (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE
);
73 if (info
->info_buffer
== NULL
) {
77 memset(info
->info_buffer
, 0, info
->info_size
);
79 info
->info_head
= info
->info_buffer
;
80 file
->private_data
= (void *)info
;
82 splat_print(file
, "%s\n", spl_version
);
88 splat_release(struct inode
*inode
, struct file
*file
)
90 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
93 ASSERT(info
->info_buffer
);
95 mutex_destroy(&info
->info_lock
);
96 vfree(info
->info_buffer
);
103 splat_buffer_clear(struct file
*file
, splat_cfg_t
*kcfg
, unsigned long arg
)
105 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
108 ASSERT(info
->info_buffer
);
110 mutex_enter(&info
->info_lock
);
111 memset(info
->info_buffer
, 0, info
->info_size
);
112 info
->info_head
= info
->info_buffer
;
113 mutex_exit(&info
->info_lock
);
119 splat_buffer_size(struct file
*file
, splat_cfg_t
*kcfg
, unsigned long arg
)
121 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
123 int min
, size
, rc
= 0;
126 ASSERT(info
->info_buffer
);
128 mutex_enter(&info
->info_lock
);
129 if (kcfg
->cfg_arg1
> 0) {
131 size
= kcfg
->cfg_arg1
;
132 buf
= (char *)vmalloc(size
);
138 /* Zero fill and truncate contents when coping buffer */
139 min
= ((size
< info
->info_size
) ? size
: info
->info_size
);
140 memset(buf
, 0, size
);
141 memcpy(buf
, info
->info_buffer
, min
);
142 vfree(info
->info_buffer
);
143 info
->info_size
= size
;
144 info
->info_buffer
= buf
;
145 info
->info_head
= info
->info_buffer
;
148 kcfg
->cfg_rc1
= info
->info_size
;
150 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
153 mutex_exit(&info
->info_lock
);
159 static splat_subsystem_t
*
160 splat_subsystem_find(int id
) {
161 splat_subsystem_t
*sub
;
163 spin_lock(&splat_module_lock
);
164 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
) {
165 if (id
== sub
->desc
.id
) {
166 spin_unlock(&splat_module_lock
);
170 spin_unlock(&splat_module_lock
);
176 splat_subsystem_count(splat_cfg_t
*kcfg
, unsigned long arg
)
178 splat_subsystem_t
*sub
;
181 spin_lock(&splat_module_lock
);
182 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
)
185 spin_unlock(&splat_module_lock
);
188 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
195 splat_subsystem_list(splat_cfg_t
*kcfg
, unsigned long arg
)
197 splat_subsystem_t
*sub
;
201 /* Structure will be sized large enough for N subsystem entries
202 * which is passed in by the caller. On exit the number of
203 * entries filled in with valid subsystems will be stored in
204 * cfg_rc1. If the caller does not provide enough entries
205 * for all subsystems we will truncate the list to avoid overrun.
207 size
= sizeof(*tmp
) + kcfg
->cfg_data
.splat_subsystems
.size
*
208 sizeof(splat_user_t
);
209 tmp
= kmalloc(size
, GFP_KERNEL
);
213 /* Local 'tmp' is used as the structure copied back to user space */
214 memset(tmp
, 0, size
);
215 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
217 spin_lock(&splat_module_lock
);
218 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
) {
219 strncpy(tmp
->cfg_data
.splat_subsystems
.descs
[i
].name
,
220 sub
->desc
.name
, SPLAT_NAME_SIZE
);
221 strncpy(tmp
->cfg_data
.splat_subsystems
.descs
[i
].desc
,
222 sub
->desc
.desc
, SPLAT_DESC_SIZE
);
223 tmp
->cfg_data
.splat_subsystems
.descs
[i
].id
= sub
->desc
.id
;
225 /* Truncate list if we are about to overrun alloc'ed memory */
226 if ((i
++) == kcfg
->cfg_data
.splat_subsystems
.size
)
229 spin_unlock(&splat_module_lock
);
232 if (copy_to_user((struct splat_cfg_t __user
*)arg
, tmp
, size
)) {
242 splat_test_count(splat_cfg_t
*kcfg
, unsigned long arg
)
244 splat_subsystem_t
*sub
;
248 /* Subsystem ID passed as arg1 */
249 sub
= splat_subsystem_find(kcfg
->cfg_arg1
);
253 spin_lock(&(sub
->test_lock
));
254 list_for_each_entry(test
, &(sub
->test_list
), test_list
)
257 spin_unlock(&(sub
->test_lock
));
260 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
267 splat_test_list(splat_cfg_t
*kcfg
, unsigned long arg
)
269 splat_subsystem_t
*sub
;
274 /* Subsystem ID passed as arg1 */
275 sub
= splat_subsystem_find(kcfg
->cfg_arg1
);
279 /* Structure will be sized large enough for N test entries
280 * which is passed in by the caller. On exit the number of
281 * entries filled in with valid tests will be stored in
282 * cfg_rc1. If the caller does not provide enough entries
283 * for all tests we will truncate the list to avoid overrun.
285 size
= sizeof(*tmp
)+kcfg
->cfg_data
.splat_tests
.size
*sizeof(splat_user_t
);
286 tmp
= kmalloc(size
, GFP_KERNEL
);
290 /* Local 'tmp' is used as the structure copied back to user space */
291 memset(tmp
, 0, size
);
292 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
294 spin_lock(&(sub
->test_lock
));
295 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
296 strncpy(tmp
->cfg_data
.splat_tests
.descs
[i
].name
,
297 test
->desc
.name
, SPLAT_NAME_SIZE
);
298 strncpy(tmp
->cfg_data
.splat_tests
.descs
[i
].desc
,
299 test
->desc
.desc
, SPLAT_DESC_SIZE
);
300 tmp
->cfg_data
.splat_tests
.descs
[i
].id
= test
->desc
.id
;
302 /* Truncate list if we are about to overrun alloc'ed memory */
303 if ((i
++) == kcfg
->cfg_data
.splat_tests
.size
)
306 spin_unlock(&(sub
->test_lock
));
309 if (copy_to_user((struct splat_cfg_t __user
*)arg
, tmp
, size
)) {
319 splat_validate(struct file
*file
, splat_subsystem_t
*sub
, int cmd
, void *arg
)
323 spin_lock(&(sub
->test_lock
));
324 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
325 if (test
->desc
.id
== cmd
) {
326 spin_unlock(&(sub
->test_lock
));
327 return test
->test(file
, arg
);
330 spin_unlock(&(sub
->test_lock
));
336 splat_ioctl_cfg(struct file
*file
, unsigned int cmd
, unsigned long arg
)
341 /* User and kernel space agree about arg size */
342 if (_IOC_SIZE(cmd
) != sizeof(kcfg
))
345 if (copy_from_user(&kcfg
, (splat_cfg_t
*)arg
, sizeof(kcfg
)))
348 if (kcfg
.cfg_magic
!= SPLAT_CFG_MAGIC
) {
349 splat_print(file
, "Bad config magic 0x%x != 0x%x\n",
350 kcfg
.cfg_magic
, SPLAT_CFG_MAGIC
);
354 switch (kcfg
.cfg_cmd
) {
355 case SPLAT_CFG_BUFFER_CLEAR
:
359 rc
= splat_buffer_clear(file
, &kcfg
, arg
);
361 case SPLAT_CFG_BUFFER_SIZE
:
362 /* cfg_arg1 - 0 - query size; >0 resize
363 * cfg_rc1 - Set to current buffer size
365 rc
= splat_buffer_size(file
, &kcfg
, arg
);
367 case SPLAT_CFG_SUBSYSTEM_COUNT
:
369 * cfg_rc1 - Set to number of subsystems
371 rc
= splat_subsystem_count(&kcfg
, arg
);
373 case SPLAT_CFG_SUBSYSTEM_LIST
:
375 * cfg_rc1 - Set to number of subsystems
376 * cfg_data.splat_subsystems - Set with subsystems
378 rc
= splat_subsystem_list(&kcfg
, arg
);
380 case SPLAT_CFG_TEST_COUNT
:
381 /* cfg_arg1 - Set to a target subsystem
382 * cfg_rc1 - Set to number of tests
384 rc
= splat_test_count(&kcfg
, arg
);
386 case SPLAT_CFG_TEST_LIST
:
387 /* cfg_arg1 - Set to a target subsystem
388 * cfg_rc1 - Set to number of tests
389 * cfg_data.splat_subsystems - Populated with tests
391 rc
= splat_test_list(&kcfg
, arg
);
394 splat_print(file
, "Bad config command %d\n",
404 splat_ioctl_cmd(struct file
*file
, unsigned int cmd
, unsigned long arg
)
406 splat_subsystem_t
*sub
;
411 /* User and kernel space agree about arg size */
412 if (_IOC_SIZE(cmd
) != sizeof(kcmd
))
415 if (copy_from_user(&kcmd
, (splat_cfg_t
*)arg
, sizeof(kcmd
)))
418 if (kcmd
.cmd_magic
!= SPLAT_CMD_MAGIC
) {
419 splat_print(file
, "Bad command magic 0x%x != 0x%x\n",
420 kcmd
.cmd_magic
, SPLAT_CFG_MAGIC
);
424 /* Allocate memory for any opaque data the caller needed to pass on */
425 if (kcmd
.cmd_data_size
> 0) {
426 data
= (void *)kmalloc(kcmd
.cmd_data_size
, GFP_KERNEL
);
430 if (copy_from_user(data
, (void *)(arg
+ offsetof(splat_cmd_t
,
431 cmd_data_str
)), kcmd
.cmd_data_size
)) {
437 sub
= splat_subsystem_find(kcmd
.cmd_subsystem
);
439 rc
= splat_validate(file
, sub
, kcmd
.cmd_test
, data
);
450 splat_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
454 /* Ignore tty ioctls */
455 if ((cmd
& 0xffffff00) == ((int)'T') << 8)
460 rc
= splat_ioctl_cfg(file
, cmd
, arg
);
463 rc
= splat_ioctl_cmd(file
, cmd
, arg
);
466 splat_print(file
, "Bad ioctl command %d\n", cmd
);
475 /* Compatibility handler for ioctls from 32-bit ELF binaries */
477 splat_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
479 return splat_unlocked_ioctl(file
, cmd
, arg
);
481 #endif /* CONFIG_COMPAT */
483 /* I'm not sure why you would want to write in to this buffer from
484 * user space since its principle use is to pass test status info
485 * back to the user space, but I don't see any reason to prevent it.
487 static ssize_t
splat_write(struct file
*file
, const char __user
*buf
,
488 size_t count
, loff_t
*ppos
)
490 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
494 ASSERT(info
->info_buffer
);
496 mutex_enter(&info
->info_lock
);
498 /* Write beyond EOF */
499 if (*ppos
>= info
->info_size
) {
504 /* Resize count if beyond EOF */
505 if (*ppos
+ count
> info
->info_size
)
506 count
= info
->info_size
- *ppos
;
508 if (copy_from_user(info
->info_buffer
, buf
, count
)) {
516 mutex_exit(&info
->info_lock
);
520 static ssize_t
splat_read(struct file
*file
, char __user
*buf
,
521 size_t count
, loff_t
*ppos
)
523 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
527 ASSERT(info
->info_buffer
);
529 mutex_enter(&info
->info_lock
);
531 /* Read beyond EOF */
532 if (*ppos
>= info
->info_size
)
535 /* Resize count if beyond EOF */
536 if (*ppos
+ count
> info
->info_size
)
537 count
= info
->info_size
- *ppos
;
539 if (copy_to_user(buf
, info
->info_buffer
+ *ppos
, count
)) {
547 mutex_exit(&info
->info_lock
);
551 static loff_t
splat_seek(struct file
*file
, loff_t offset
, int origin
)
553 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
557 ASSERT(info
->info_buffer
);
559 mutex_enter(&info
->info_lock
);
562 case 0: /* SEEK_SET - No-op just do it */
564 case 1: /* SEEK_CUR - Seek from current */
565 offset
= file
->f_pos
+ offset
;
567 case 2: /* SEEK_END - Seek from end */
568 offset
= info
->info_size
+ offset
;
573 file
->f_pos
= offset
;
578 mutex_exit(&info
->info_lock
);
583 static struct file_operations splat_fops
= {
584 .owner
= THIS_MODULE
,
586 .release
= splat_release
,
587 .unlocked_ioctl
= splat_unlocked_ioctl
,
589 .compat_ioctl
= splat_compat_ioctl
,
592 .write
= splat_write
,
593 .llseek
= splat_seek
,
596 static struct miscdevice splat_misc
= {
597 .minor
= MISC_DYNAMIC_MINOR
,
607 spin_lock_init(&splat_module_lock
);
608 INIT_LIST_HEAD(&splat_module_list
);
610 SPLAT_SUBSYSTEM_INIT(kmem
);
611 SPLAT_SUBSYSTEM_INIT(taskq
);
612 SPLAT_SUBSYSTEM_INIT(krng
);
613 SPLAT_SUBSYSTEM_INIT(mutex
);
614 SPLAT_SUBSYSTEM_INIT(condvar
);
615 SPLAT_SUBSYSTEM_INIT(thread
);
616 SPLAT_SUBSYSTEM_INIT(rwlock
);
617 SPLAT_SUBSYSTEM_INIT(time
);
618 SPLAT_SUBSYSTEM_INIT(vnode
);
619 SPLAT_SUBSYSTEM_INIT(kobj
);
620 SPLAT_SUBSYSTEM_INIT(atomic
);
621 SPLAT_SUBSYSTEM_INIT(list
);
622 SPLAT_SUBSYSTEM_INIT(generic
);
623 SPLAT_SUBSYSTEM_INIT(cred
);
624 SPLAT_SUBSYSTEM_INIT(zlib
);
625 SPLAT_SUBSYSTEM_INIT(linux
);
627 error
= misc_register(&splat_misc
);
629 printk(KERN_INFO
"SPLAT: misc_register() failed %d\n", error
);
631 printk(KERN_INFO
"SPLAT: Loaded module v%s-%s%s\n",
632 SPL_META_VERSION
, SPL_META_RELEASE
, SPL_DEBUG_STR
);
643 error
= misc_deregister(&splat_misc
);
645 printk(KERN_INFO
"SPLAT: misc_deregister() failed %d\n", error
);
647 SPLAT_SUBSYSTEM_FINI(linux
);
648 SPLAT_SUBSYSTEM_FINI(zlib
);
649 SPLAT_SUBSYSTEM_FINI(cred
);
650 SPLAT_SUBSYSTEM_FINI(generic
);
651 SPLAT_SUBSYSTEM_FINI(list
);
652 SPLAT_SUBSYSTEM_FINI(atomic
);
653 SPLAT_SUBSYSTEM_FINI(kobj
);
654 SPLAT_SUBSYSTEM_FINI(vnode
);
655 SPLAT_SUBSYSTEM_FINI(time
);
656 SPLAT_SUBSYSTEM_FINI(rwlock
);
657 SPLAT_SUBSYSTEM_FINI(thread
);
658 SPLAT_SUBSYSTEM_FINI(condvar
);
659 SPLAT_SUBSYSTEM_FINI(mutex
);
660 SPLAT_SUBSYSTEM_FINI(krng
);
661 SPLAT_SUBSYSTEM_FINI(taskq
);
662 SPLAT_SUBSYSTEM_FINI(kmem
);
664 ASSERT(list_empty(&splat_module_list
));
665 printk(KERN_INFO
"SPLAT: Unloaded module v%s-%s%s\n",
666 SPL_META_VERSION
, SPL_META_RELEASE
, SPL_DEBUG_STR
);
671 spl_module_init(splat_init
);
672 spl_module_exit(splat_fini
);
674 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
675 MODULE_AUTHOR(SPL_META_AUTHOR
);
676 MODULE_LICENSE(SPL_META_LICENSE
);
677 MODULE_VERSION(SPL_META_VERSION
"-" SPL_META_RELEASE
);