]> git.proxmox.com Git - mirror_zfs.git/blob - splat/splat-ctl.c
New approach
[mirror_zfs.git] / splat / splat-ctl.c
1 /*
2 * My intent is the create a loadable kzt (kernel ZFS test) module
3 * which can be used as an access point to run in kernel ZFS regression
4 * tests. Why do we need this when we have ztest? Well ztest.c only
5 * excersises the ZFS code proper, it cannot be used to validate the
6 * linux kernel shim primatives. This also provides a nice hook for
7 * any other in kernel regression tests we wish to run such as direct
8 * in-kernel tests against the DMU.
9 *
10 * The basic design is the kzt module is that it is constructed of
11 * various kzt_* source files each of which contains regression tests.
12 * For example the kzt_linux_kmem.c file contains tests for validating
13 * kmem correctness. When the kzt module is loaded kzt_*_init()
14 * will be called for each subsystems tests, similarly kzt_*_fini() is
15 * called when the kzt module is removed. Each test can then be
16 * run by making an ioctl() call from a userspace control application
17 * to pick the subsystem and test which should be run.
18 *
19 * Author: Brian Behlendorf
20 */
21
22 #include <splat-ctl.h>
23
24 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
25 #include <linux/devfs_fs_kernel.h>
26 #endif
27
28 #include <linux/cdev.h>
29
30
31 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
32 static struct class_simple *kzt_class;
33 #else
34 static struct class *kzt_class;
35 #endif
36 static struct list_head kzt_module_list;
37 static spinlock_t kzt_module_lock;
38
39 static int
40 kzt_open(struct inode *inode, struct file *file)
41 {
42 unsigned int minor = iminor(inode);
43 kzt_info_t *info;
44
45 if (minor >= KZT_MINORS)
46 return -ENXIO;
47
48 info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
49 if (info == NULL)
50 return -ENOMEM;
51
52 spin_lock_init(&info->info_lock);
53 info->info_size = KZT_INFO_BUFFER_SIZE;
54 info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
55 if (info->info_buffer == NULL) {
56 kfree(info);
57 return -ENOMEM;
58 }
59
60 info->info_head = info->info_buffer;
61 file->private_data = (void *)info;
62
63 kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
64
65 return 0;
66 }
67
68 static int
69 kzt_release(struct inode *inode, struct file *file)
70 {
71 unsigned int minor = iminor(inode);
72 kzt_info_t *info = (kzt_info_t *)file->private_data;
73
74 if (minor >= KZT_MINORS)
75 return -ENXIO;
76
77 ASSERT(info);
78 ASSERT(info->info_buffer);
79
80 vfree(info->info_buffer);
81 kfree(info);
82
83 return 0;
84 }
85
86 static int
87 kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
88 {
89 kzt_info_t *info = (kzt_info_t *)file->private_data;
90
91 ASSERT(info);
92 ASSERT(info->info_buffer);
93
94 spin_lock(&info->info_lock);
95 memset(info->info_buffer, 0, info->info_size);
96 info->info_head = info->info_buffer;
97 spin_unlock(&info->info_lock);
98
99 return 0;
100 }
101
102 static int
103 kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
104 {
105 kzt_info_t *info = (kzt_info_t *)file->private_data;
106 char *buf;
107 int min, size, rc = 0;
108
109 ASSERT(info);
110 ASSERT(info->info_buffer);
111
112 spin_lock(&info->info_lock);
113 if (kcfg->cfg_arg1 > 0) {
114
115 size = kcfg->cfg_arg1;
116 buf = (char *)vmalloc(size);
117 if (buf == NULL) {
118 rc = -ENOMEM;
119 goto out;
120 }
121
122 /* Zero fill and truncate contents when coping buffer */
123 min = ((size < info->info_size) ? size : info->info_size);
124 memset(buf, 0, size);
125 memcpy(buf, info->info_buffer, min);
126 vfree(info->info_buffer);
127 info->info_size = size;
128 info->info_buffer = buf;
129 info->info_head = info->info_buffer;
130 }
131
132 kcfg->cfg_rc1 = info->info_size;
133
134 if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
135 rc = -EFAULT;
136 out:
137 spin_unlock(&info->info_lock);
138
139 return rc;
140 }
141
142
143 static kzt_subsystem_t *
144 kzt_subsystem_find(int id) {
145 kzt_subsystem_t *sub;
146
147 spin_lock(&kzt_module_lock);
148 list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
149 if (id == sub->desc.id) {
150 spin_unlock(&kzt_module_lock);
151 return sub;
152 }
153 }
154 spin_unlock(&kzt_module_lock);
155
156 return NULL;
157 }
158
159 static int
160 kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
161 {
162 kzt_subsystem_t *sub;
163 int i = 0;
164
165 spin_lock(&kzt_module_lock);
166 list_for_each_entry(sub, &kzt_module_list, subsystem_list)
167 i++;
168
169 spin_unlock(&kzt_module_lock);
170 kcfg->cfg_rc1 = i;
171
172 if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
173 return -EFAULT;
174
175 return 0;
176 }
177
178 static int
179 kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
180 {
181 kzt_subsystem_t *sub;
182 kzt_cfg_t *tmp;
183 int size, i = 0;
184
185 /* Structure will be sized large enough for N subsystem entries
186 * which is passed in by the caller. On exit the number of
187 * entries filled in with valid subsystems will be stored in
188 * cfg_rc1. If the caller does not provide enough entries
189 * for all subsystems we will truncate the list to avoid overrun.
190 */
191 size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
192 sizeof(kzt_user_t);
193 tmp = kmalloc(size, GFP_KERNEL);
194 if (tmp == NULL)
195 return -ENOMEM;
196
197 /* Local 'tmp' is used as the structure copied back to user space */
198 memset(tmp, 0, size);
199 memcpy(tmp, kcfg, sizeof(*kcfg));
200
201 spin_lock(&kzt_module_lock);
202 list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
203 strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
204 sub->desc.name, KZT_NAME_SIZE);
205 strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
206 sub->desc.desc, KZT_DESC_SIZE);
207 tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
208
209 /* Truncate list if we are about to overrun alloc'ed memory */
210 if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
211 break;
212 }
213 spin_unlock(&kzt_module_lock);
214 tmp->cfg_rc1 = i;
215
216 if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
217 kfree(tmp);
218 return -EFAULT;
219 }
220
221 kfree(tmp);
222 return 0;
223 }
224
225 static int
226 kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
227 {
228 kzt_subsystem_t *sub;
229 kzt_test_t *test;
230 int i = 0;
231
232 /* Subsystem ID passed as arg1 */
233 sub = kzt_subsystem_find(kcfg->cfg_arg1);
234 if (sub == NULL)
235 return -EINVAL;
236
237 spin_lock(&(sub->test_lock));
238 list_for_each_entry(test, &(sub->test_list), test_list)
239 i++;
240
241 spin_unlock(&(sub->test_lock));
242 kcfg->cfg_rc1 = i;
243
244 if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
245 return -EFAULT;
246
247 return 0;
248 }
249
250 static int
251 kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
252 {
253 kzt_subsystem_t *sub;
254 kzt_test_t *test;
255 kzt_cfg_t *tmp;
256 int size, i = 0;
257
258 /* Subsystem ID passed as arg1 */
259 sub = kzt_subsystem_find(kcfg->cfg_arg1);
260 if (sub == NULL)
261 return -EINVAL;
262
263 /* Structure will be sized large enough for N test entries
264 * which is passed in by the caller. On exit the number of
265 * entries filled in with valid tests will be stored in
266 * cfg_rc1. If the caller does not provide enough entries
267 * for all tests we will truncate the list to avoid overrun.
268 */
269 size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
270 tmp = kmalloc(size, GFP_KERNEL);
271 if (tmp == NULL)
272 return -ENOMEM;
273
274 /* Local 'tmp' is used as the structure copied back to user space */
275 memset(tmp, 0, size);
276 memcpy(tmp, kcfg, sizeof(*kcfg));
277
278 spin_lock(&(sub->test_lock));
279 list_for_each_entry(test, &(sub->test_list), test_list) {
280 strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
281 test->desc.name, KZT_NAME_SIZE);
282 strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
283 test->desc.desc, KZT_DESC_SIZE);
284 tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
285
286 /* Truncate list if we are about to overrun alloc'ed memory */
287 if ((i++) == kcfg->cfg_data.kzt_tests.size)
288 break;
289 }
290 spin_unlock(&(sub->test_lock));
291 tmp->cfg_rc1 = i;
292
293 if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
294 kfree(tmp);
295 return -EFAULT;
296 }
297
298 kfree(tmp);
299 return 0;
300 }
301
302 static int
303 kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
304 {
305 kzt_test_t *test;
306
307 spin_lock(&(sub->test_lock));
308 list_for_each_entry(test, &(sub->test_list), test_list) {
309 if (test->desc.id == cmd) {
310 spin_unlock(&(sub->test_lock));
311 return test->test(file, arg);
312 }
313 }
314 spin_unlock(&(sub->test_lock));
315
316 return -EINVAL;
317 }
318
319 static int
320 kzt_ioctl_cfg(struct file *file, unsigned long arg)
321 {
322 kzt_cfg_t kcfg;
323 int rc = 0;
324
325 if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
326 return -EFAULT;
327
328 if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
329 kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
330 kcfg.cfg_magic, KZT_CFG_MAGIC);
331 return -EINVAL;
332 }
333
334 switch (kcfg.cfg_cmd) {
335 case KZT_CFG_BUFFER_CLEAR:
336 /* cfg_arg1 - Unused
337 * cfg_rc1 - Unused
338 */
339 rc = kzt_buffer_clear(file, &kcfg, arg);
340 break;
341 case KZT_CFG_BUFFER_SIZE:
342 /* cfg_arg1 - 0 - query size; >0 resize
343 * cfg_rc1 - Set to current buffer size
344 */
345 rc = kzt_buffer_size(file, &kcfg, arg);
346 break;
347 case KZT_CFG_SUBSYSTEM_COUNT:
348 /* cfg_arg1 - Unused
349 * cfg_rc1 - Set to number of subsystems
350 */
351 rc = kzt_subsystem_count(&kcfg, arg);
352 break;
353 case KZT_CFG_SUBSYSTEM_LIST:
354 /* cfg_arg1 - Unused
355 * cfg_rc1 - Set to number of subsystems
356 * cfg_data.kzt_subsystems - Populated with subsystems
357 */
358 rc = kzt_subsystem_list(&kcfg, arg);
359 break;
360 case KZT_CFG_TEST_COUNT:
361 /* cfg_arg1 - Set to a target subsystem
362 * cfg_rc1 - Set to number of tests
363 */
364 rc = kzt_test_count(&kcfg, arg);
365 break;
366 case KZT_CFG_TEST_LIST:
367 /* cfg_arg1 - Set to a target subsystem
368 * cfg_rc1 - Set to number of tests
369 * cfg_data.kzt_subsystems - Populated with tests
370 */
371 rc = kzt_test_list(&kcfg, arg);
372 break;
373 default:
374 kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
375 rc = -EINVAL;
376 break;
377 }
378
379 return rc;
380 }
381
382 static int
383 kzt_ioctl_cmd(struct file *file, unsigned long arg)
384 {
385 kzt_subsystem_t *sub;
386 kzt_cmd_t kcmd;
387 int rc = -EINVAL;
388 void *data = NULL;
389
390 if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
391 return -EFAULT;
392
393 if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
394 kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
395 kcmd.cmd_magic, KZT_CFG_MAGIC);
396 return -EINVAL;
397 }
398
399 /* Allocate memory for any opaque data the caller needed to pass on */
400 if (kcmd.cmd_data_size > 0) {
401 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
402 if (data == NULL)
403 return -ENOMEM;
404
405 if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
406 cmd_data_str)), kcmd.cmd_data_size)) {
407 kfree(data);
408 return -EFAULT;
409 }
410 }
411
412 sub = kzt_subsystem_find(kcmd.cmd_subsystem);
413 if (sub != NULL)
414 rc = kzt_validate(file, sub, kcmd.cmd_test, data);
415 else
416 rc = -EINVAL;
417
418 if (data != NULL)
419 kfree(data);
420
421 return rc;
422 }
423
424 static int
425 kzt_ioctl(struct inode *inode, struct file *file,
426 unsigned int cmd, unsigned long arg)
427 {
428 unsigned int minor = iminor(file->f_dentry->d_inode);
429 int rc = 0;
430
431 /* Ignore tty ioctls */
432 if ((cmd & 0xffffff00) == ((int)'T') << 8)
433 return -ENOTTY;
434
435 if (minor >= KZT_MINORS)
436 return -ENXIO;
437
438 switch (cmd) {
439 case KZT_CFG:
440 rc = kzt_ioctl_cfg(file, arg);
441 break;
442 case KZT_CMD:
443 rc = kzt_ioctl_cmd(file, arg);
444 break;
445 default:
446 kzt_print(file, "Bad ioctl command %d\n", cmd);
447 rc = -EINVAL;
448 break;
449 }
450
451 return rc;
452 }
453
454 /* I'm not sure why you would want to write in to this buffer from
455 * user space since its principle use is to pass test status info
456 * back to the user space, but I don't see any reason to prevent it.
457 */
458 static ssize_t kzt_write(struct file *file, const char __user *buf,
459 size_t count, loff_t *ppos)
460 {
461 unsigned int minor = iminor(file->f_dentry->d_inode);
462 kzt_info_t *info = (kzt_info_t *)file->private_data;
463 int rc = 0;
464
465 if (minor >= KZT_MINORS)
466 return -ENXIO;
467
468 ASSERT(info);
469 ASSERT(info->info_buffer);
470
471 spin_lock(&info->info_lock);
472
473 /* Write beyond EOF */
474 if (*ppos >= info->info_size) {
475 rc = -EFBIG;
476 goto out;
477 }
478
479 /* Resize count if beyond EOF */
480 if (*ppos + count > info->info_size)
481 count = info->info_size - *ppos;
482
483 if (copy_from_user(info->info_buffer, buf, count)) {
484 rc = -EFAULT;
485 goto out;
486 }
487
488 *ppos += count;
489 rc = count;
490 out:
491 spin_unlock(&info->info_lock);
492 return rc;
493 }
494
495 static ssize_t kzt_read(struct file *file, char __user *buf,
496 size_t count, loff_t *ppos)
497 {
498 unsigned int minor = iminor(file->f_dentry->d_inode);
499 kzt_info_t *info = (kzt_info_t *)file->private_data;
500 int rc = 0;
501
502 if (minor >= KZT_MINORS)
503 return -ENXIO;
504
505 ASSERT(info);
506 ASSERT(info->info_buffer);
507
508 spin_lock(&info->info_lock);
509
510 /* Read beyond EOF */
511 if (*ppos >= info->info_size)
512 goto out;
513
514 /* Resize count if beyond EOF */
515 if (*ppos + count > info->info_size)
516 count = info->info_size - *ppos;
517
518 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
519 rc = -EFAULT;
520 goto out;
521 }
522
523 *ppos += count;
524 rc = count;
525 out:
526 spin_unlock(&info->info_lock);
527 return rc;
528 }
529
530 static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
531 {
532 unsigned int minor = iminor(file->f_dentry->d_inode);
533 kzt_info_t *info = (kzt_info_t *)file->private_data;
534 int rc = -EINVAL;
535
536 if (minor >= KZT_MINORS)
537 return -ENXIO;
538
539 ASSERT(info);
540 ASSERT(info->info_buffer);
541
542 spin_lock(&info->info_lock);
543
544 switch (origin) {
545 case 0: /* SEEK_SET - No-op just do it */
546 break;
547 case 1: /* SEEK_CUR - Seek from current */
548 offset = file->f_pos + offset;
549 break;
550 case 2: /* SEEK_END - Seek from end */
551 offset = info->info_size + offset;
552 break;
553 }
554
555 if (offset >= 0) {
556 file->f_pos = offset;
557 file->f_version = 0;
558 rc = offset;
559 }
560
561 spin_unlock(&info->info_lock);
562
563 return rc;
564 }
565
566 static struct file_operations kzt_fops = {
567 .owner = THIS_MODULE,
568 .open = kzt_open,
569 .release = kzt_release,
570 .ioctl = kzt_ioctl,
571 .read = kzt_read,
572 .write = kzt_write,
573 .llseek = kzt_seek,
574 };
575
576 static struct cdev kzt_cdev = {
577 .owner = THIS_MODULE,
578 .kobj = { .name = "kztctl", },
579 };
580
581 static int __init
582 kzt_init(void)
583 {
584 dev_t dev;
585 int rc;
586
587 spin_lock_init(&kzt_module_lock);
588 INIT_LIST_HEAD(&kzt_module_list);
589
590 KZT_SUBSYSTEM_INIT(kmem);
591 KZT_SUBSYSTEM_INIT(taskq);
592 KZT_SUBSYSTEM_INIT(krng);
593 KZT_SUBSYSTEM_INIT(mutex);
594 KZT_SUBSYSTEM_INIT(condvar);
595 KZT_SUBSYSTEM_INIT(thread);
596 KZT_SUBSYSTEM_INIT(rwlock);
597 KZT_SUBSYSTEM_INIT(time);
598
599 dev = MKDEV(KZT_MAJOR, 0);
600 if ((rc = register_chrdev_region(dev, KZT_MINORS, "kztctl")))
601 goto error;
602
603 /* Support for registering a character driver */
604 cdev_init(&kzt_cdev, &kzt_fops);
605 if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
606 printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
607 kobject_put(&kzt_cdev.kobj);
608 unregister_chrdev_region(dev, KZT_MINORS);
609 goto error;
610 }
611
612 /* Support for udev make driver info available in sysfs */
613 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
614 kzt_class = class_simple_create(THIS_MODULE, "kzt");
615 #else
616 kzt_class = class_create(THIS_MODULE, "kzt");
617 #endif
618 if (IS_ERR(kzt_class)) {
619 rc = PTR_ERR(kzt_class);
620 printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
621 cdev_del(&kzt_cdev);
622 unregister_chrdev_region(dev, KZT_MINORS);
623 goto error;
624 }
625
626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
627 class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
628 NULL, "kztctl");
629 #else
630 class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
631 NULL, "kztctl");
632 #endif
633
634 printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
635 return 0;
636 error:
637 printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
638 return rc;
639 }
640
641 static void
642 kzt_fini(void)
643 {
644 dev_t dev = MKDEV(KZT_MAJOR, 0);
645
646 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
647 class_simple_device_remove(dev);
648 class_simple_destroy(kzt_class);
649 devfs_remove("kzt/kztctl");
650 devfs_remove("kzt");
651 #else
652 class_device_destroy(kzt_class, dev);
653 class_destroy(kzt_class);
654 #endif
655 cdev_del(&kzt_cdev);
656 unregister_chrdev_region(dev, KZT_MINORS);
657
658 KZT_SUBSYSTEM_FINI(time);
659 KZT_SUBSYSTEM_FINI(rwlock);
660 KZT_SUBSYSTEM_FINI(thread);
661 KZT_SUBSYSTEM_FINI(condvar);
662 KZT_SUBSYSTEM_FINI(mutex);
663 KZT_SUBSYSTEM_FINI(krng);
664 KZT_SUBSYSTEM_FINI(taskq);
665 KZT_SUBSYSTEM_FINI(kmem);
666
667 ASSERT(list_empty(&kzt_module_list));
668 printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
669 }
670
671 module_init(kzt_init);
672 module_exit(kzt_fini);
673
674 MODULE_AUTHOR("Lawrence Livermore National Labs");
675 MODULE_DESCRIPTION("Kernel ZFS Test");
676 MODULE_LICENSE("GPL");
677