2 * My intent is the create a loadable kzt (kernel ZFS test) module
3 * which can be used as an access point to run in kernel ZFS regression
4 * tests. Why do we need this when we have ztest? Well ztest.c only
5 * excersises the ZFS code proper, it cannot be used to validate the
6 * linux kernel shim primatives. This also provides a nice hook for
7 * any other in kernel regression tests we wish to run such as direct
8 * in-kernel tests against the DMU.
10 * The basic design is the kzt module is that it is constructed of
11 * various kzt_* source files each of which contains regression tests.
12 * For example the kzt_linux_kmem.c file contains tests for validating
13 * kmem correctness. When the kzt module is loaded kzt_*_init()
14 * will be called for each subsystems tests, similarly kzt_*_fini() is
15 * called when the kzt module is removed. Each test can then be
16 * run by making an ioctl() call from a userspace control application
17 * to pick the subsystem and test which should be run.
19 * Author: Brian Behlendorf
22 #include <splat-ctl.h>
24 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
25 #include <linux/devfs_fs_kernel.h>
28 #include <linux/cdev.h>
31 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
32 static struct class_simple
*kzt_class
;
34 static struct class *kzt_class
;
36 static struct list_head kzt_module_list
;
37 static spinlock_t kzt_module_lock
;
40 kzt_open(struct inode
*inode
, struct file
*file
)
42 unsigned int minor
= iminor(inode
);
45 if (minor
>= KZT_MINORS
)
48 info
= (kzt_info_t
*)kmalloc(sizeof(*info
), GFP_KERNEL
);
52 spin_lock_init(&info
->info_lock
);
53 info
->info_size
= KZT_INFO_BUFFER_SIZE
;
54 info
->info_buffer
= (char *)vmalloc(KZT_INFO_BUFFER_SIZE
);
55 if (info
->info_buffer
== NULL
) {
60 info
->info_head
= info
->info_buffer
;
61 file
->private_data
= (void *)info
;
63 kzt_print(file
, "Kernel ZFS Tests %s\n", KZT_VERSION
);
69 kzt_release(struct inode
*inode
, struct file
*file
)
71 unsigned int minor
= iminor(inode
);
72 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
74 if (minor
>= KZT_MINORS
)
78 ASSERT(info
->info_buffer
);
80 vfree(info
->info_buffer
);
87 kzt_buffer_clear(struct file
*file
, kzt_cfg_t
*kcfg
, unsigned long arg
)
89 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
92 ASSERT(info
->info_buffer
);
94 spin_lock(&info
->info_lock
);
95 memset(info
->info_buffer
, 0, info
->info_size
);
96 info
->info_head
= info
->info_buffer
;
97 spin_unlock(&info
->info_lock
);
103 kzt_buffer_size(struct file
*file
, kzt_cfg_t
*kcfg
, unsigned long arg
)
105 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
107 int min
, size
, rc
= 0;
110 ASSERT(info
->info_buffer
);
112 spin_lock(&info
->info_lock
);
113 if (kcfg
->cfg_arg1
> 0) {
115 size
= kcfg
->cfg_arg1
;
116 buf
= (char *)vmalloc(size
);
122 /* Zero fill and truncate contents when coping buffer */
123 min
= ((size
< info
->info_size
) ? size
: info
->info_size
);
124 memset(buf
, 0, size
);
125 memcpy(buf
, info
->info_buffer
, min
);
126 vfree(info
->info_buffer
);
127 info
->info_size
= size
;
128 info
->info_buffer
= buf
;
129 info
->info_head
= info
->info_buffer
;
132 kcfg
->cfg_rc1
= info
->info_size
;
134 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
137 spin_unlock(&info
->info_lock
);
143 static kzt_subsystem_t
*
144 kzt_subsystem_find(int id
) {
145 kzt_subsystem_t
*sub
;
147 spin_lock(&kzt_module_lock
);
148 list_for_each_entry(sub
, &kzt_module_list
, subsystem_list
) {
149 if (id
== sub
->desc
.id
) {
150 spin_unlock(&kzt_module_lock
);
154 spin_unlock(&kzt_module_lock
);
160 kzt_subsystem_count(kzt_cfg_t
*kcfg
, unsigned long arg
)
162 kzt_subsystem_t
*sub
;
165 spin_lock(&kzt_module_lock
);
166 list_for_each_entry(sub
, &kzt_module_list
, subsystem_list
)
169 spin_unlock(&kzt_module_lock
);
172 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
179 kzt_subsystem_list(kzt_cfg_t
*kcfg
, unsigned long arg
)
181 kzt_subsystem_t
*sub
;
185 /* Structure will be sized large enough for N subsystem entries
186 * which is passed in by the caller. On exit the number of
187 * entries filled in with valid subsystems will be stored in
188 * cfg_rc1. If the caller does not provide enough entries
189 * for all subsystems we will truncate the list to avoid overrun.
191 size
= sizeof(*tmp
) + kcfg
->cfg_data
.kzt_subsystems
.size
*
193 tmp
= kmalloc(size
, GFP_KERNEL
);
197 /* Local 'tmp' is used as the structure copied back to user space */
198 memset(tmp
, 0, size
);
199 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
201 spin_lock(&kzt_module_lock
);
202 list_for_each_entry(sub
, &kzt_module_list
, subsystem_list
) {
203 strncpy(tmp
->cfg_data
.kzt_subsystems
.descs
[i
].name
,
204 sub
->desc
.name
, KZT_NAME_SIZE
);
205 strncpy(tmp
->cfg_data
.kzt_subsystems
.descs
[i
].desc
,
206 sub
->desc
.desc
, KZT_DESC_SIZE
);
207 tmp
->cfg_data
.kzt_subsystems
.descs
[i
].id
= sub
->desc
.id
;
209 /* Truncate list if we are about to overrun alloc'ed memory */
210 if ((i
++) == kcfg
->cfg_data
.kzt_subsystems
.size
)
213 spin_unlock(&kzt_module_lock
);
216 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, tmp
, size
)) {
226 kzt_test_count(kzt_cfg_t
*kcfg
, unsigned long arg
)
228 kzt_subsystem_t
*sub
;
232 /* Subsystem ID passed as arg1 */
233 sub
= kzt_subsystem_find(kcfg
->cfg_arg1
);
237 spin_lock(&(sub
->test_lock
));
238 list_for_each_entry(test
, &(sub
->test_list
), test_list
)
241 spin_unlock(&(sub
->test_lock
));
244 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
251 kzt_test_list(kzt_cfg_t
*kcfg
, unsigned long arg
)
253 kzt_subsystem_t
*sub
;
258 /* Subsystem ID passed as arg1 */
259 sub
= kzt_subsystem_find(kcfg
->cfg_arg1
);
263 /* Structure will be sized large enough for N test entries
264 * which is passed in by the caller. On exit the number of
265 * entries filled in with valid tests will be stored in
266 * cfg_rc1. If the caller does not provide enough entries
267 * for all tests we will truncate the list to avoid overrun.
269 size
= sizeof(*tmp
)+kcfg
->cfg_data
.kzt_tests
.size
*sizeof(kzt_user_t
);
270 tmp
= kmalloc(size
, GFP_KERNEL
);
274 /* Local 'tmp' is used as the structure copied back to user space */
275 memset(tmp
, 0, size
);
276 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
278 spin_lock(&(sub
->test_lock
));
279 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
280 strncpy(tmp
->cfg_data
.kzt_tests
.descs
[i
].name
,
281 test
->desc
.name
, KZT_NAME_SIZE
);
282 strncpy(tmp
->cfg_data
.kzt_tests
.descs
[i
].desc
,
283 test
->desc
.desc
, KZT_DESC_SIZE
);
284 tmp
->cfg_data
.kzt_tests
.descs
[i
].id
= test
->desc
.id
;
286 /* Truncate list if we are about to overrun alloc'ed memory */
287 if ((i
++) == kcfg
->cfg_data
.kzt_tests
.size
)
290 spin_unlock(&(sub
->test_lock
));
293 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, tmp
, size
)) {
303 kzt_validate(struct file
*file
, kzt_subsystem_t
*sub
, int cmd
, void *arg
)
307 spin_lock(&(sub
->test_lock
));
308 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
309 if (test
->desc
.id
== cmd
) {
310 spin_unlock(&(sub
->test_lock
));
311 return test
->test(file
, arg
);
314 spin_unlock(&(sub
->test_lock
));
320 kzt_ioctl_cfg(struct file
*file
, unsigned long arg
)
325 if (copy_from_user(&kcfg
, (kzt_cfg_t
*)arg
, sizeof(kcfg
)))
328 if (kcfg
.cfg_magic
!= KZT_CFG_MAGIC
) {
329 kzt_print(file
, "Bad config magic 0x%x != 0x%x\n",
330 kcfg
.cfg_magic
, KZT_CFG_MAGIC
);
334 switch (kcfg
.cfg_cmd
) {
335 case KZT_CFG_BUFFER_CLEAR
:
339 rc
= kzt_buffer_clear(file
, &kcfg
, arg
);
341 case KZT_CFG_BUFFER_SIZE
:
342 /* cfg_arg1 - 0 - query size; >0 resize
343 * cfg_rc1 - Set to current buffer size
345 rc
= kzt_buffer_size(file
, &kcfg
, arg
);
347 case KZT_CFG_SUBSYSTEM_COUNT
:
349 * cfg_rc1 - Set to number of subsystems
351 rc
= kzt_subsystem_count(&kcfg
, arg
);
353 case KZT_CFG_SUBSYSTEM_LIST
:
355 * cfg_rc1 - Set to number of subsystems
356 * cfg_data.kzt_subsystems - Populated with subsystems
358 rc
= kzt_subsystem_list(&kcfg
, arg
);
360 case KZT_CFG_TEST_COUNT
:
361 /* cfg_arg1 - Set to a target subsystem
362 * cfg_rc1 - Set to number of tests
364 rc
= kzt_test_count(&kcfg
, arg
);
366 case KZT_CFG_TEST_LIST
:
367 /* cfg_arg1 - Set to a target subsystem
368 * cfg_rc1 - Set to number of tests
369 * cfg_data.kzt_subsystems - Populated with tests
371 rc
= kzt_test_list(&kcfg
, arg
);
374 kzt_print(file
, "Bad config command %d\n", kcfg
.cfg_cmd
);
383 kzt_ioctl_cmd(struct file
*file
, unsigned long arg
)
385 kzt_subsystem_t
*sub
;
390 if (copy_from_user(&kcmd
, (kzt_cfg_t
*)arg
, sizeof(kcmd
)))
393 if (kcmd
.cmd_magic
!= KZT_CMD_MAGIC
) {
394 kzt_print(file
, "Bad command magic 0x%x != 0x%x\n",
395 kcmd
.cmd_magic
, KZT_CFG_MAGIC
);
399 /* Allocate memory for any opaque data the caller needed to pass on */
400 if (kcmd
.cmd_data_size
> 0) {
401 data
= (void *)kmalloc(kcmd
.cmd_data_size
, GFP_KERNEL
);
405 if (copy_from_user(data
, (void *)(arg
+ offsetof(kzt_cmd_t
,
406 cmd_data_str
)), kcmd
.cmd_data_size
)) {
412 sub
= kzt_subsystem_find(kcmd
.cmd_subsystem
);
414 rc
= kzt_validate(file
, sub
, kcmd
.cmd_test
, data
);
425 kzt_ioctl(struct inode
*inode
, struct file
*file
,
426 unsigned int cmd
, unsigned long arg
)
428 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
431 /* Ignore tty ioctls */
432 if ((cmd
& 0xffffff00) == ((int)'T') << 8)
435 if (minor
>= KZT_MINORS
)
440 rc
= kzt_ioctl_cfg(file
, arg
);
443 rc
= kzt_ioctl_cmd(file
, arg
);
446 kzt_print(file
, "Bad ioctl command %d\n", cmd
);
454 /* I'm not sure why you would want to write in to this buffer from
455 * user space since its principle use is to pass test status info
456 * back to the user space, but I don't see any reason to prevent it.
458 static ssize_t
kzt_write(struct file
*file
, const char __user
*buf
,
459 size_t count
, loff_t
*ppos
)
461 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
462 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
465 if (minor
>= KZT_MINORS
)
469 ASSERT(info
->info_buffer
);
471 spin_lock(&info
->info_lock
);
473 /* Write beyond EOF */
474 if (*ppos
>= info
->info_size
) {
479 /* Resize count if beyond EOF */
480 if (*ppos
+ count
> info
->info_size
)
481 count
= info
->info_size
- *ppos
;
483 if (copy_from_user(info
->info_buffer
, buf
, count
)) {
491 spin_unlock(&info
->info_lock
);
495 static ssize_t
kzt_read(struct file
*file
, char __user
*buf
,
496 size_t count
, loff_t
*ppos
)
498 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
499 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
502 if (minor
>= KZT_MINORS
)
506 ASSERT(info
->info_buffer
);
508 spin_lock(&info
->info_lock
);
510 /* Read beyond EOF */
511 if (*ppos
>= info
->info_size
)
514 /* Resize count if beyond EOF */
515 if (*ppos
+ count
> info
->info_size
)
516 count
= info
->info_size
- *ppos
;
518 if (copy_to_user(buf
, info
->info_buffer
+ *ppos
, count
)) {
526 spin_unlock(&info
->info_lock
);
530 static loff_t
kzt_seek(struct file
*file
, loff_t offset
, int origin
)
532 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
533 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
536 if (minor
>= KZT_MINORS
)
540 ASSERT(info
->info_buffer
);
542 spin_lock(&info
->info_lock
);
545 case 0: /* SEEK_SET - No-op just do it */
547 case 1: /* SEEK_CUR - Seek from current */
548 offset
= file
->f_pos
+ offset
;
550 case 2: /* SEEK_END - Seek from end */
551 offset
= info
->info_size
+ offset
;
556 file
->f_pos
= offset
;
561 spin_unlock(&info
->info_lock
);
566 static struct file_operations kzt_fops
= {
567 .owner
= THIS_MODULE
,
569 .release
= kzt_release
,
576 static struct cdev kzt_cdev
= {
577 .owner
= THIS_MODULE
,
578 .kobj
= { .name
= "kztctl", },
587 spin_lock_init(&kzt_module_lock
);
588 INIT_LIST_HEAD(&kzt_module_list
);
590 KZT_SUBSYSTEM_INIT(kmem
);
591 KZT_SUBSYSTEM_INIT(taskq
);
592 KZT_SUBSYSTEM_INIT(krng
);
593 KZT_SUBSYSTEM_INIT(mutex
);
594 KZT_SUBSYSTEM_INIT(condvar
);
595 KZT_SUBSYSTEM_INIT(thread
);
596 KZT_SUBSYSTEM_INIT(rwlock
);
597 KZT_SUBSYSTEM_INIT(time
);
599 dev
= MKDEV(KZT_MAJOR
, 0);
600 if ((rc
= register_chrdev_region(dev
, KZT_MINORS
, "kztctl")))
603 /* Support for registering a character driver */
604 cdev_init(&kzt_cdev
, &kzt_fops
);
605 if ((rc
= cdev_add(&kzt_cdev
, dev
, KZT_MINORS
))) {
606 printk(KERN_ERR
"kzt: Error adding cdev, %d\n", rc
);
607 kobject_put(&kzt_cdev
.kobj
);
608 unregister_chrdev_region(dev
, KZT_MINORS
);
612 /* Support for udev make driver info available in sysfs */
613 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
614 kzt_class
= class_simple_create(THIS_MODULE
, "kzt");
616 kzt_class
= class_create(THIS_MODULE
, "kzt");
618 if (IS_ERR(kzt_class
)) {
619 rc
= PTR_ERR(kzt_class
);
620 printk(KERN_ERR
"kzt: Error creating kzt class, %d\n", rc
);
622 unregister_chrdev_region(dev
, KZT_MINORS
);
626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
627 class_simple_device_add(kzt_class
, MKDEV(KZT_MAJOR
, 0),
630 class_device_create(kzt_class
, NULL
, MKDEV(KZT_MAJOR
, 0),
634 printk(KERN_INFO
"kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION
);
637 printk(KERN_ERR
"kzt: Error registering kzt device, %d\n", rc
);
644 dev_t dev
= MKDEV(KZT_MAJOR
, 0);
646 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
647 class_simple_device_remove(dev
);
648 class_simple_destroy(kzt_class
);
649 devfs_remove("kzt/kztctl");
652 class_device_destroy(kzt_class
, dev
);
653 class_destroy(kzt_class
);
656 unregister_chrdev_region(dev
, KZT_MINORS
);
658 KZT_SUBSYSTEM_FINI(time
);
659 KZT_SUBSYSTEM_FINI(rwlock
);
660 KZT_SUBSYSTEM_FINI(thread
);
661 KZT_SUBSYSTEM_FINI(condvar
);
662 KZT_SUBSYSTEM_FINI(mutex
);
663 KZT_SUBSYSTEM_FINI(krng
);
664 KZT_SUBSYSTEM_FINI(taskq
);
665 KZT_SUBSYSTEM_FINI(kmem
);
667 ASSERT(list_empty(&kzt_module_list
));
668 printk(KERN_INFO
"kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION
);
671 module_init(kzt_init
);
672 module_exit(kzt_fini
);
674 MODULE_AUTHOR("Lawrence Livermore National Labs");
675 MODULE_DESCRIPTION("Kernel ZFS Test");
676 MODULE_LICENSE("GPL");