2 * My intent is the create a loadable kzt (kernel ZFS test) module
3 * which can be used as an access point to run in kernel ZFS regression
4 * tests. Why do we need this when we have ztest? Well ztest.c only
5 * excersises the ZFS code proper, it cannot be used to validate the
6 * linux kernel shim primatives. This also provides a nice hook for
7 * any other in kernel regression tests we wish to run such as direct
8 * in-kernel tests against the DMU.
10 * The basic design is the kzt module is that it is constructed of
11 * various kzt_* source files each of which contains regression tests.
12 * For example the kzt_linux_kmem.c file contains tests for validating
13 * kmem correctness. When the kzt module is loaded kzt_*_init()
14 * will be called for each subsystems tests, similarly kzt_*_fini() is
15 * called when the kzt module is removed. Each test can then be
16 * run by making an ioctl() call from a userspace control application
17 * to pick the subsystem and test which should be run.
19 * Author: Brian Behlendorf
22 #include <sys/zfs_context.h>
23 #include <sys/splat-ctl.h>
25 #include <linux/version.h>
26 #include <linux/vmalloc.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
31 #include <linux/devfs_fs_kernel.h>
34 #include <linux/cdev.h>
37 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
38 static struct class_simple
*kzt_class
;
40 static struct class *kzt_class
;
42 static struct list_head kzt_module_list
;
43 static spinlock_t kzt_module_lock
;
46 kzt_open(struct inode
*inode
, struct file
*file
)
48 unsigned int minor
= iminor(inode
);
51 if (minor
>= KZT_MINORS
)
54 info
= (kzt_info_t
*)kmalloc(sizeof(*info
), GFP_KERNEL
);
58 spin_lock_init(&info
->info_lock
);
59 info
->info_size
= KZT_INFO_BUFFER_SIZE
;
60 info
->info_buffer
= (char *)vmalloc(KZT_INFO_BUFFER_SIZE
);
61 if (info
->info_buffer
== NULL
) {
66 info
->info_head
= info
->info_buffer
;
67 file
->private_data
= (void *)info
;
69 kzt_print(file
, "Kernel ZFS Tests %s\n", KZT_VERSION
);
75 kzt_release(struct inode
*inode
, struct file
*file
)
77 unsigned int minor
= iminor(inode
);
78 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
80 if (minor
>= KZT_MINORS
)
84 ASSERT(info
->info_buffer
);
86 vfree(info
->info_buffer
);
93 kzt_buffer_clear(struct file
*file
, kzt_cfg_t
*kcfg
, unsigned long arg
)
95 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
98 ASSERT(info
->info_buffer
);
100 spin_lock(&info
->info_lock
);
101 memset(info
->info_buffer
, 0, info
->info_size
);
102 info
->info_head
= info
->info_buffer
;
103 spin_unlock(&info
->info_lock
);
109 kzt_buffer_size(struct file
*file
, kzt_cfg_t
*kcfg
, unsigned long arg
)
111 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
113 int min
, size
, rc
= 0;
116 ASSERT(info
->info_buffer
);
118 spin_lock(&info
->info_lock
);
119 if (kcfg
->cfg_arg1
> 0) {
121 size
= kcfg
->cfg_arg1
;
122 buf
= (char *)vmalloc(size
);
128 /* Zero fill and truncate contents when coping buffer */
129 min
= ((size
< info
->info_size
) ? size
: info
->info_size
);
130 memset(buf
, 0, size
);
131 memcpy(buf
, info
->info_buffer
, min
);
132 vfree(info
->info_buffer
);
133 info
->info_size
= size
;
134 info
->info_buffer
= buf
;
135 info
->info_head
= info
->info_buffer
;
138 kcfg
->cfg_rc1
= info
->info_size
;
140 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
143 spin_unlock(&info
->info_lock
);
149 static kzt_subsystem_t
*
150 kzt_subsystem_find(int id
) {
151 kzt_subsystem_t
*sub
;
153 spin_lock(&kzt_module_lock
);
154 list_for_each_entry(sub
, &kzt_module_list
, subsystem_list
) {
155 if (id
== sub
->desc
.id
) {
156 spin_unlock(&kzt_module_lock
);
160 spin_unlock(&kzt_module_lock
);
166 kzt_subsystem_count(kzt_cfg_t
*kcfg
, unsigned long arg
)
168 kzt_subsystem_t
*sub
;
171 spin_lock(&kzt_module_lock
);
172 list_for_each_entry(sub
, &kzt_module_list
, subsystem_list
)
175 spin_unlock(&kzt_module_lock
);
178 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
185 kzt_subsystem_list(kzt_cfg_t
*kcfg
, unsigned long arg
)
187 kzt_subsystem_t
*sub
;
191 /* Structure will be sized large enough for N subsystem entries
192 * which is passed in by the caller. On exit the number of
193 * entries filled in with valid subsystems will be stored in
194 * cfg_rc1. If the caller does not provide enough entries
195 * for all subsystems we will truncate the list to avoid overrun.
197 size
= sizeof(*tmp
) + kcfg
->cfg_data
.kzt_subsystems
.size
*
199 tmp
= kmalloc(size
, GFP_KERNEL
);
203 /* Local 'tmp' is used as the structure copied back to user space */
204 memset(tmp
, 0, size
);
205 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
207 spin_lock(&kzt_module_lock
);
208 list_for_each_entry(sub
, &kzt_module_list
, subsystem_list
) {
209 strncpy(tmp
->cfg_data
.kzt_subsystems
.descs
[i
].name
,
210 sub
->desc
.name
, KZT_NAME_SIZE
);
211 strncpy(tmp
->cfg_data
.kzt_subsystems
.descs
[i
].desc
,
212 sub
->desc
.desc
, KZT_DESC_SIZE
);
213 tmp
->cfg_data
.kzt_subsystems
.descs
[i
].id
= sub
->desc
.id
;
215 /* Truncate list if we are about to overrun alloc'ed memory */
216 if ((i
++) == kcfg
->cfg_data
.kzt_subsystems
.size
)
219 spin_unlock(&kzt_module_lock
);
222 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, tmp
, size
)) {
232 kzt_test_count(kzt_cfg_t
*kcfg
, unsigned long arg
)
234 kzt_subsystem_t
*sub
;
238 /* Subsystem ID passed as arg1 */
239 sub
= kzt_subsystem_find(kcfg
->cfg_arg1
);
243 spin_lock(&(sub
->test_lock
));
244 list_for_each_entry(test
, &(sub
->test_list
), test_list
)
247 spin_unlock(&(sub
->test_lock
));
250 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
257 kzt_test_list(kzt_cfg_t
*kcfg
, unsigned long arg
)
259 kzt_subsystem_t
*sub
;
264 /* Subsystem ID passed as arg1 */
265 sub
= kzt_subsystem_find(kcfg
->cfg_arg1
);
269 /* Structure will be sized large enough for N test entries
270 * which is passed in by the caller. On exit the number of
271 * entries filled in with valid tests will be stored in
272 * cfg_rc1. If the caller does not provide enough entries
273 * for all tests we will truncate the list to avoid overrun.
275 size
= sizeof(*tmp
)+kcfg
->cfg_data
.kzt_tests
.size
*sizeof(kzt_user_t
);
276 tmp
= kmalloc(size
, GFP_KERNEL
);
280 /* Local 'tmp' is used as the structure copied back to user space */
281 memset(tmp
, 0, size
);
282 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
284 spin_lock(&(sub
->test_lock
));
285 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
286 strncpy(tmp
->cfg_data
.kzt_tests
.descs
[i
].name
,
287 test
->desc
.name
, KZT_NAME_SIZE
);
288 strncpy(tmp
->cfg_data
.kzt_tests
.descs
[i
].desc
,
289 test
->desc
.desc
, KZT_DESC_SIZE
);
290 tmp
->cfg_data
.kzt_tests
.descs
[i
].id
= test
->desc
.id
;
292 /* Truncate list if we are about to overrun alloc'ed memory */
293 if ((i
++) == kcfg
->cfg_data
.kzt_tests
.size
)
296 spin_unlock(&(sub
->test_lock
));
299 if (copy_to_user((struct kzt_cfg_t __user
*)arg
, tmp
, size
)) {
309 kzt_validate(struct file
*file
, kzt_subsystem_t
*sub
, int cmd
, void *arg
)
314 spin_lock(&(sub
->test_lock
));
315 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
316 if (test
->desc
.id
== cmd
) {
317 spin_unlock(&(sub
->test_lock
));
318 return test
->test(file
, arg
);
321 spin_unlock(&(sub
->test_lock
));
327 kzt_ioctl_cfg(struct file
*file
, unsigned long arg
)
332 if (copy_from_user(&kcfg
, (kzt_cfg_t
*)arg
, sizeof(kcfg
)))
335 if (kcfg
.cfg_magic
!= KZT_CFG_MAGIC
) {
336 kzt_print(file
, "Bad config magic 0x%x != 0x%x\n",
337 kcfg
.cfg_magic
, KZT_CFG_MAGIC
);
341 switch (kcfg
.cfg_cmd
) {
342 case KZT_CFG_BUFFER_CLEAR
:
346 rc
= kzt_buffer_clear(file
, &kcfg
, arg
);
348 case KZT_CFG_BUFFER_SIZE
:
349 /* cfg_arg1 - 0 - query size; >0 resize
350 * cfg_rc1 - Set to current buffer size
352 rc
= kzt_buffer_size(file
, &kcfg
, arg
);
354 case KZT_CFG_SUBSYSTEM_COUNT
:
356 * cfg_rc1 - Set to number of subsystems
358 rc
= kzt_subsystem_count(&kcfg
, arg
);
360 case KZT_CFG_SUBSYSTEM_LIST
:
362 * cfg_rc1 - Set to number of subsystems
363 * cfg_data.kzt_subsystems - Populated with subsystems
365 rc
= kzt_subsystem_list(&kcfg
, arg
);
367 case KZT_CFG_TEST_COUNT
:
368 /* cfg_arg1 - Set to a target subsystem
369 * cfg_rc1 - Set to number of tests
371 rc
= kzt_test_count(&kcfg
, arg
);
373 case KZT_CFG_TEST_LIST
:
374 /* cfg_arg1 - Set to a target subsystem
375 * cfg_rc1 - Set to number of tests
376 * cfg_data.kzt_subsystems - Populated with tests
378 rc
= kzt_test_list(&kcfg
, arg
);
381 kzt_print(file
, "Bad config command %d\n", kcfg
.cfg_cmd
);
390 kzt_ioctl_cmd(struct file
*file
, unsigned long arg
)
392 kzt_subsystem_t
*sub
;
397 if (copy_from_user(&kcmd
, (kzt_cfg_t
*)arg
, sizeof(kcmd
)))
400 if (kcmd
.cmd_magic
!= KZT_CMD_MAGIC
) {
401 kzt_print(file
, "Bad command magic 0x%x != 0x%x\n",
402 kcmd
.cmd_magic
, KZT_CFG_MAGIC
);
406 /* Allocate memory for any opaque data the caller needed to pass on */
407 if (kcmd
.cmd_data_size
> 0) {
408 data
= (void *)kmalloc(kcmd
.cmd_data_size
, GFP_KERNEL
);
412 if (copy_from_user(data
, (void *)(arg
+ offsetof(kzt_cmd_t
,
413 cmd_data_str
)), kcmd
.cmd_data_size
)) {
419 sub
= kzt_subsystem_find(kcmd
.cmd_subsystem
);
421 rc
= kzt_validate(file
, sub
, kcmd
.cmd_test
, data
);
432 kzt_ioctl(struct inode
*inode
, struct file
*file
,
433 unsigned int cmd
, unsigned long arg
)
437 /* Ignore tty ioctls */
438 if ((cmd
& 0xffffff00) == ((int)'T') << 8)
441 if (minor
>= KZT_MINORS
)
446 rc
= kzt_ioctl_cfg(file
, arg
);
449 rc
= kzt_ioctl_cmd(file
, arg
);
452 kzt_print(file
, "Bad ioctl command %d\n", cmd
);
460 /* I'm not sure why you would want to write in to this buffer from
461 * user space since its principle use is to pass test status info
462 * back to the user space, but I don't see any reason to prevent it.
464 static ssize_t
kzt_write(struct file
*file
, const char __user
*buf
,
465 size_t count
, loff_t
*ppos
)
467 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
468 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
471 if (minor
>= KZT_MINORS
)
475 ASSERT(info
->info_buffer
);
477 spin_lock(&info
->info_lock
);
479 /* Write beyond EOF */
480 if (*ppos
>= info
->info_size
) {
485 /* Resize count if beyond EOF */
486 if (*ppos
+ count
> info
->info_size
)
487 count
= info
->info_size
- *ppos
;
489 if (copy_from_user(info
->info_buffer
, buf
, count
)) {
497 spin_unlock(&info
->info_lock
);
501 static ssize_t
kzt_read(struct file
*file
, char __user
*buf
,
502 size_t count
, loff_t
*ppos
)
504 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
505 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
508 if (minor
>= KZT_MINORS
)
512 ASSERT(info
->info_buffer
);
514 spin_lock(&info
->info_lock
);
516 /* Read beyond EOF */
517 if (*ppos
>= info
->info_size
)
520 /* Resize count if beyond EOF */
521 if (*ppos
+ count
> info
->info_size
)
522 count
= info
->info_size
- *ppos
;
524 if (copy_to_user(buf
, info
->info_buffer
+ *ppos
, count
)) {
532 spin_unlock(&info
->info_lock
);
536 static loff_t
kzt_seek(struct file
*file
, loff_t offset
, int origin
)
538 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
539 kzt_info_t
*info
= (kzt_info_t
*)file
->private_data
;
542 if (minor
>= KZT_MINORS
)
546 ASSERT(info
->info_buffer
);
548 spin_lock(&info
->info_lock
);
551 case 0: /* SEEK_SET - No-op just do it */
553 case 1: /* SEEK_CUR - Seek from current */
554 offset
= file
->f_pos
+ offset
;
556 case 2: /* SEEK_END - Seek from end */
557 offset
= info
->info_size
+ offset
;
562 file
->f_pos
= offset
;
567 spin_unlock(&info
->info_lock
);
572 static struct file_operations kzt_fops
= {
573 .owner
= THIS_MODULE
,
575 .release
= kzt_release
,
582 static struct cdev kzt_cdev
= {
583 .owner
= THIS_MODULE
,
584 .kobj
= { .name
= "kztctl", },
593 spin_lock_init(&kzt_module_lock
);
594 INIT_LIST_HEAD(&kzt_module_list
);
596 KZT_SUBSYSTEM_INIT(kmem
);
597 KZT_SUBSYSTEM_INIT(taskq
);
598 KZT_SUBSYSTEM_INIT(krng
);
599 KZT_SUBSYSTEM_INIT(mutex
);
600 KZT_SUBSYSTEM_INIT(condvar
);
601 KZT_SUBSYSTEM_INIT(thread
);
602 KZT_SUBSYSTEM_INIT(rwlock
);
603 KZT_SUBSYSTEM_INIT(time
);
605 dev
= MKDEV(KZT_MAJOR
, 0);
606 if (rc
= register_chrdev_region(dev
, KZT_MINORS
, "kztctl"))
609 /* Support for registering a character driver */
610 cdev_init(&kzt_cdev
, &kzt_fops
);
611 if ((rc
= cdev_add(&kzt_cdev
, dev
, KZT_MINORS
))) {
612 printk(KERN_ERR
"kzt: Error adding cdev, %d\n", rc
);
613 kobject_put(&kzt_cdev
.kobj
);
614 unregister_chrdev_region(dev
, KZT_MINORS
);
618 /* Support for udev make driver info available in sysfs */
619 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
620 kzt_class
= class_simple_create(THIS_MODULE
, "kzt");
622 kzt_class
= class_create(THIS_MODULE
, "kzt");
624 if (IS_ERR(kzt_class
)) {
625 rc
= PTR_ERR(kzt_class
);
626 printk(KERN_ERR
"kzt: Error creating kzt class, %d\n", rc
);
628 unregister_chrdev_region(dev
, KZT_MINORS
);
632 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
633 class_simple_device_add(kzt_class
, MKDEV(KZT_MAJOR
, 0),
636 class_device_create(kzt_class
, NULL
, MKDEV(KZT_MAJOR
, 0),
640 printk(KERN_INFO
"kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION
);
643 printk(KERN_ERR
"kzt: Error registering kzt device, %d\n", rc
);
650 dev_t dev
= MKDEV(KZT_MAJOR
, 0);
653 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
654 class_simple_device_remove(dev
);
655 class_simple_destroy(kzt_class
);
656 devfs_remove("kzt/kztctl");
659 class_device_destroy(kzt_class
, dev
);
660 class_destroy(kzt_class
);
663 unregister_chrdev_region(dev
, KZT_MINORS
);
665 KZT_SUBSYSTEM_FINI(time
);
666 KZT_SUBSYSTEM_FINI(rwlock
);
667 KZT_SUBSYSTEM_FINI(thread
);
668 KZT_SUBSYSTEM_FINI(condvar
);
669 KZT_SUBSYSTEM_FINI(mutex
);
670 KZT_SUBSYSTEM_FINI(krng
);
671 KZT_SUBSYSTEM_FINI(taskq
);
672 KZT_SUBSYSTEM_FINI(kmem
);
674 ASSERT(list_empty(&kzt_module_list
));
675 printk(KERN_INFO
"kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION
);
678 module_init(kzt_init
);
679 module_exit(kzt_fini
);
681 MODULE_AUTHOR("Lawrence Livermore National Labs");
682 MODULE_DESCRIPTION("Kernel ZFS Test");
683 MODULE_LICENSE("GPL");