2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
28 * My intent is to create a loadable 'splat' (Solaris Porting LAyer
29 * Tests) module which can be used as an access point to run
30 * in kernel Solaris ABI regression tests. This provides a
31 * nice mechanism to validate the shim primates are working properly.
33 * The basic design is the splat module is that it is constructed of
34 * various splat_* source files each of which contains regression tests.
35 * For example the splat_linux_kmem.c file contains tests for validating
36 * kmem correctness. When the splat module is loaded splat_*_init()
37 * will be called for each subsystems tests, similarly splat_*_fini() is
38 * called when the splat module is removed. Each test can then be
39 * run by making an ioctl() call from a userspace control application
40 * to pick the subsystem and test which should be run.
43 #include "splat-internal.h"
46 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
47 #include <linux/devfs_fs_kernel.h>
50 #include <linux/cdev.h>
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
54 static struct class_simple
*splat_class
;
56 static struct class *splat_class
;
58 static struct list_head splat_module_list
;
59 static spinlock_t splat_module_lock
;
62 splat_open(struct inode
*inode
, struct file
*file
)
64 unsigned int minor
= iminor(inode
);
67 if (minor
>= SPLAT_MINORS
)
70 info
= (splat_info_t
*)kmalloc(sizeof(*info
), GFP_KERNEL
);
74 spin_lock_init(&info
->info_lock
);
75 info
->info_size
= SPLAT_INFO_BUFFER_SIZE
;
76 info
->info_buffer
= (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE
);
77 if (info
->info_buffer
== NULL
) {
82 info
->info_head
= info
->info_buffer
;
83 file
->private_data
= (void *)info
;
89 splat_release(struct inode
*inode
, struct file
*file
)
91 unsigned int minor
= iminor(inode
);
92 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
94 if (minor
>= SPLAT_MINORS
)
98 ASSERT(info
->info_buffer
);
100 vfree(info
->info_buffer
);
107 splat_buffer_clear(struct file
*file
, splat_cfg_t
*kcfg
, unsigned long arg
)
109 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
112 ASSERT(info
->info_buffer
);
114 spin_lock(&info
->info_lock
);
115 memset(info
->info_buffer
, 0, info
->info_size
);
116 info
->info_head
= info
->info_buffer
;
117 spin_unlock(&info
->info_lock
);
123 splat_buffer_size(struct file
*file
, splat_cfg_t
*kcfg
, unsigned long arg
)
125 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
127 int min
, size
, rc
= 0;
130 ASSERT(info
->info_buffer
);
132 spin_lock(&info
->info_lock
);
133 if (kcfg
->cfg_arg1
> 0) {
135 size
= kcfg
->cfg_arg1
;
136 buf
= (char *)vmalloc(size
);
142 /* Zero fill and truncate contents when coping buffer */
143 min
= ((size
< info
->info_size
) ? size
: info
->info_size
);
144 memset(buf
, 0, size
);
145 memcpy(buf
, info
->info_buffer
, min
);
146 vfree(info
->info_buffer
);
147 info
->info_size
= size
;
148 info
->info_buffer
= buf
;
149 info
->info_head
= info
->info_buffer
;
152 kcfg
->cfg_rc1
= info
->info_size
;
154 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
157 spin_unlock(&info
->info_lock
);
163 static splat_subsystem_t
*
164 splat_subsystem_find(int id
) {
165 splat_subsystem_t
*sub
;
167 spin_lock(&splat_module_lock
);
168 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
) {
169 if (id
== sub
->desc
.id
) {
170 spin_unlock(&splat_module_lock
);
174 spin_unlock(&splat_module_lock
);
180 splat_subsystem_count(splat_cfg_t
*kcfg
, unsigned long arg
)
182 splat_subsystem_t
*sub
;
185 spin_lock(&splat_module_lock
);
186 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
)
189 spin_unlock(&splat_module_lock
);
192 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
199 splat_subsystem_list(splat_cfg_t
*kcfg
, unsigned long arg
)
201 splat_subsystem_t
*sub
;
205 /* Structure will be sized large enough for N subsystem entries
206 * which is passed in by the caller. On exit the number of
207 * entries filled in with valid subsystems will be stored in
208 * cfg_rc1. If the caller does not provide enough entries
209 * for all subsystems we will truncate the list to avoid overrun.
211 size
= sizeof(*tmp
) + kcfg
->cfg_data
.splat_subsystems
.size
*
212 sizeof(splat_user_t
);
213 tmp
= kmalloc(size
, GFP_KERNEL
);
217 /* Local 'tmp' is used as the structure copied back to user space */
218 memset(tmp
, 0, size
);
219 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
221 spin_lock(&splat_module_lock
);
222 list_for_each_entry(sub
, &splat_module_list
, subsystem_list
) {
223 strncpy(tmp
->cfg_data
.splat_subsystems
.descs
[i
].name
,
224 sub
->desc
.name
, SPLAT_NAME_SIZE
);
225 strncpy(tmp
->cfg_data
.splat_subsystems
.descs
[i
].desc
,
226 sub
->desc
.desc
, SPLAT_DESC_SIZE
);
227 tmp
->cfg_data
.splat_subsystems
.descs
[i
].id
= sub
->desc
.id
;
229 /* Truncate list if we are about to overrun alloc'ed memory */
230 if ((i
++) == kcfg
->cfg_data
.splat_subsystems
.size
)
233 spin_unlock(&splat_module_lock
);
236 if (copy_to_user((struct splat_cfg_t __user
*)arg
, tmp
, size
)) {
246 splat_test_count(splat_cfg_t
*kcfg
, unsigned long arg
)
248 splat_subsystem_t
*sub
;
252 /* Subsystem ID passed as arg1 */
253 sub
= splat_subsystem_find(kcfg
->cfg_arg1
);
257 spin_lock(&(sub
->test_lock
));
258 list_for_each_entry(test
, &(sub
->test_list
), test_list
)
261 spin_unlock(&(sub
->test_lock
));
264 if (copy_to_user((struct splat_cfg_t __user
*)arg
, kcfg
, sizeof(*kcfg
)))
271 splat_test_list(splat_cfg_t
*kcfg
, unsigned long arg
)
273 splat_subsystem_t
*sub
;
278 /* Subsystem ID passed as arg1 */
279 sub
= splat_subsystem_find(kcfg
->cfg_arg1
);
283 /* Structure will be sized large enough for N test entries
284 * which is passed in by the caller. On exit the number of
285 * entries filled in with valid tests will be stored in
286 * cfg_rc1. If the caller does not provide enough entries
287 * for all tests we will truncate the list to avoid overrun.
289 size
= sizeof(*tmp
)+kcfg
->cfg_data
.splat_tests
.size
*sizeof(splat_user_t
);
290 tmp
= kmalloc(size
, GFP_KERNEL
);
294 /* Local 'tmp' is used as the structure copied back to user space */
295 memset(tmp
, 0, size
);
296 memcpy(tmp
, kcfg
, sizeof(*kcfg
));
298 spin_lock(&(sub
->test_lock
));
299 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
300 strncpy(tmp
->cfg_data
.splat_tests
.descs
[i
].name
,
301 test
->desc
.name
, SPLAT_NAME_SIZE
);
302 strncpy(tmp
->cfg_data
.splat_tests
.descs
[i
].desc
,
303 test
->desc
.desc
, SPLAT_DESC_SIZE
);
304 tmp
->cfg_data
.splat_tests
.descs
[i
].id
= test
->desc
.id
;
306 /* Truncate list if we are about to overrun alloc'ed memory */
307 if ((i
++) == kcfg
->cfg_data
.splat_tests
.size
)
310 spin_unlock(&(sub
->test_lock
));
313 if (copy_to_user((struct splat_cfg_t __user
*)arg
, tmp
, size
)) {
323 splat_validate(struct file
*file
, splat_subsystem_t
*sub
, int cmd
, void *arg
)
327 spin_lock(&(sub
->test_lock
));
328 list_for_each_entry(test
, &(sub
->test_list
), test_list
) {
329 if (test
->desc
.id
== cmd
) {
330 spin_unlock(&(sub
->test_lock
));
331 return test
->test(file
, arg
);
334 spin_unlock(&(sub
->test_lock
));
340 splat_ioctl_cfg(struct file
*file
, unsigned long arg
)
345 if (copy_from_user(&kcfg
, (splat_cfg_t
*)arg
, sizeof(kcfg
)))
348 if (kcfg
.cfg_magic
!= SPLAT_CFG_MAGIC
) {
349 splat_print(file
, "Bad config magic 0x%x != 0x%x\n",
350 kcfg
.cfg_magic
, SPLAT_CFG_MAGIC
);
354 switch (kcfg
.cfg_cmd
) {
355 case SPLAT_CFG_BUFFER_CLEAR
:
359 rc
= splat_buffer_clear(file
, &kcfg
, arg
);
361 case SPLAT_CFG_BUFFER_SIZE
:
362 /* cfg_arg1 - 0 - query size; >0 resize
363 * cfg_rc1 - Set to current buffer size
365 rc
= splat_buffer_size(file
, &kcfg
, arg
);
367 case SPLAT_CFG_SUBSYSTEM_COUNT
:
369 * cfg_rc1 - Set to number of subsystems
371 rc
= splat_subsystem_count(&kcfg
, arg
);
373 case SPLAT_CFG_SUBSYSTEM_LIST
:
375 * cfg_rc1 - Set to number of subsystems
376 * cfg_data.splat_subsystems - Populated with subsystems
378 rc
= splat_subsystem_list(&kcfg
, arg
);
380 case SPLAT_CFG_TEST_COUNT
:
381 /* cfg_arg1 - Set to a target subsystem
382 * cfg_rc1 - Set to number of tests
384 rc
= splat_test_count(&kcfg
, arg
);
386 case SPLAT_CFG_TEST_LIST
:
387 /* cfg_arg1 - Set to a target subsystem
388 * cfg_rc1 - Set to number of tests
389 * cfg_data.splat_subsystems - Populated with tests
391 rc
= splat_test_list(&kcfg
, arg
);
394 splat_print(file
, "Bad config command %d\n", kcfg
.cfg_cmd
);
403 splat_ioctl_cmd(struct file
*file
, unsigned long arg
)
405 splat_subsystem_t
*sub
;
410 if (copy_from_user(&kcmd
, (splat_cfg_t
*)arg
, sizeof(kcmd
)))
413 if (kcmd
.cmd_magic
!= SPLAT_CMD_MAGIC
) {
414 splat_print(file
, "Bad command magic 0x%x != 0x%x\n",
415 kcmd
.cmd_magic
, SPLAT_CFG_MAGIC
);
419 /* Allocate memory for any opaque data the caller needed to pass on */
420 if (kcmd
.cmd_data_size
> 0) {
421 data
= (void *)kmalloc(kcmd
.cmd_data_size
, GFP_KERNEL
);
425 if (copy_from_user(data
, (void *)(arg
+ offsetof(splat_cmd_t
,
426 cmd_data_str
)), kcmd
.cmd_data_size
)) {
432 sub
= splat_subsystem_find(kcmd
.cmd_subsystem
);
434 rc
= splat_validate(file
, sub
, kcmd
.cmd_test
, data
);
445 splat_ioctl(struct inode
*inode
, struct file
*file
,
446 unsigned int cmd
, unsigned long arg
)
448 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
451 /* Ignore tty ioctls */
452 if ((cmd
& 0xffffff00) == ((int)'T') << 8)
455 if (minor
>= SPLAT_MINORS
)
460 rc
= splat_ioctl_cfg(file
, arg
);
463 rc
= splat_ioctl_cmd(file
, arg
);
466 splat_print(file
, "Bad ioctl command %d\n", cmd
);
474 /* I'm not sure why you would want to write in to this buffer from
475 * user space since its principle use is to pass test status info
476 * back to the user space, but I don't see any reason to prevent it.
478 static ssize_t
splat_write(struct file
*file
, const char __user
*buf
,
479 size_t count
, loff_t
*ppos
)
481 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
482 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
485 if (minor
>= SPLAT_MINORS
)
489 ASSERT(info
->info_buffer
);
491 spin_lock(&info
->info_lock
);
493 /* Write beyond EOF */
494 if (*ppos
>= info
->info_size
) {
499 /* Resize count if beyond EOF */
500 if (*ppos
+ count
> info
->info_size
)
501 count
= info
->info_size
- *ppos
;
503 if (copy_from_user(info
->info_buffer
, buf
, count
)) {
511 spin_unlock(&info
->info_lock
);
515 static ssize_t
splat_read(struct file
*file
, char __user
*buf
,
516 size_t count
, loff_t
*ppos
)
518 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
519 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
522 if (minor
>= SPLAT_MINORS
)
526 ASSERT(info
->info_buffer
);
528 spin_lock(&info
->info_lock
);
530 /* Read beyond EOF */
531 if (*ppos
>= info
->info_size
)
534 /* Resize count if beyond EOF */
535 if (*ppos
+ count
> info
->info_size
)
536 count
= info
->info_size
- *ppos
;
538 if (copy_to_user(buf
, info
->info_buffer
+ *ppos
, count
)) {
546 spin_unlock(&info
->info_lock
);
550 static loff_t
splat_seek(struct file
*file
, loff_t offset
, int origin
)
552 unsigned int minor
= iminor(file
->f_dentry
->d_inode
);
553 splat_info_t
*info
= (splat_info_t
*)file
->private_data
;
556 if (minor
>= SPLAT_MINORS
)
560 ASSERT(info
->info_buffer
);
562 spin_lock(&info
->info_lock
);
565 case 0: /* SEEK_SET - No-op just do it */
567 case 1: /* SEEK_CUR - Seek from current */
568 offset
= file
->f_pos
+ offset
;
570 case 2: /* SEEK_END - Seek from end */
571 offset
= info
->info_size
+ offset
;
576 file
->f_pos
= offset
;
581 spin_unlock(&info
->info_lock
);
586 static struct file_operations splat_fops
= {
587 .owner
= THIS_MODULE
,
589 .release
= splat_release
,
590 .ioctl
= splat_ioctl
,
592 .write
= splat_write
,
593 .llseek
= splat_seek
,
596 static struct cdev splat_cdev
= {
597 .owner
= THIS_MODULE
,
598 .kobj
= { .name
= "splatctl", },
607 spin_lock_init(&splat_module_lock
);
608 INIT_LIST_HEAD(&splat_module_list
);
610 SPLAT_SUBSYSTEM_INIT(kmem
);
611 SPLAT_SUBSYSTEM_INIT(taskq
);
612 SPLAT_SUBSYSTEM_INIT(krng
);
613 SPLAT_SUBSYSTEM_INIT(mutex
);
614 SPLAT_SUBSYSTEM_INIT(condvar
);
615 SPLAT_SUBSYSTEM_INIT(thread
);
616 SPLAT_SUBSYSTEM_INIT(rwlock
);
617 SPLAT_SUBSYSTEM_INIT(time
);
618 SPLAT_SUBSYSTEM_INIT(vnode
);
619 SPLAT_SUBSYSTEM_INIT(kobj
);
620 SPLAT_SUBSYSTEM_INIT(atomic
);
622 dev
= MKDEV(SPLAT_MAJOR
, 0);
623 if ((rc
= register_chrdev_region(dev
, SPLAT_MINORS
, "splatctl")))
626 /* Support for registering a character driver */
627 cdev_init(&splat_cdev
, &splat_fops
);
628 if ((rc
= cdev_add(&splat_cdev
, dev
, SPLAT_MINORS
))) {
629 printk(KERN_ERR
"splat: Error adding cdev, %d\n", rc
);
630 kobject_put(&splat_cdev
.kobj
);
631 unregister_chrdev_region(dev
, SPLAT_MINORS
);
635 /* Support for udev make driver info available in sysfs */
636 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
637 splat_class
= class_simple_create(THIS_MODULE
, "splat");
639 splat_class
= class_create(THIS_MODULE
, "splat");
641 if (IS_ERR(splat_class
)) {
642 rc
= PTR_ERR(splat_class
);
643 printk(KERN_ERR
"splat: Error creating splat class, %d\n", rc
);
644 cdev_del(&splat_cdev
);
645 unregister_chrdev_region(dev
, SPLAT_MINORS
);
649 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
650 class_simple_device_add(splat_class
, MKDEV(SPLAT_MAJOR
, 0),
653 class_device_create(splat_class
, NULL
, MKDEV(SPLAT_MAJOR
, 0),
657 printk(KERN_INFO
"splat: Loaded Solaris Porting LAyer "
658 "Tests v%s\n", VERSION
);
661 printk(KERN_ERR
"splat: Error registering splat device, %d\n", rc
);
668 dev_t dev
= MKDEV(SPLAT_MAJOR
, 0);
670 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
671 class_simple_device_remove(dev
);
672 class_simple_destroy(splat_class
);
673 devfs_remove("splat/splatctl");
674 devfs_remove("splat");
676 class_device_destroy(splat_class
, dev
);
677 class_destroy(splat_class
);
679 cdev_del(&splat_cdev
);
680 unregister_chrdev_region(dev
, SPLAT_MINORS
);
682 SPLAT_SUBSYSTEM_FINI(atomic
);
683 SPLAT_SUBSYSTEM_FINI(kobj
);
684 SPLAT_SUBSYSTEM_FINI(vnode
);
685 SPLAT_SUBSYSTEM_FINI(time
);
686 SPLAT_SUBSYSTEM_FINI(rwlock
);
687 SPLAT_SUBSYSTEM_FINI(thread
);
688 SPLAT_SUBSYSTEM_FINI(condvar
);
689 SPLAT_SUBSYSTEM_FINI(mutex
);
690 SPLAT_SUBSYSTEM_FINI(krng
);
691 SPLAT_SUBSYSTEM_FINI(taskq
);
692 SPLAT_SUBSYSTEM_FINI(kmem
);
694 ASSERT(list_empty(&splat_module_list
));
695 printk(KERN_INFO
"splat: Unloaded Solaris Porting LAyer "
696 "Tests v%s\n", VERSION
);
699 module_init(splat_init
);
700 module_exit(splat_fini
);
702 MODULE_AUTHOR("Lawrence Livermore National Labs");
703 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
704 MODULE_LICENSE("GPL");