]> git.proxmox.com Git - mirror_spl.git/blob - src/splat/splat-ctl.c
Initial commit. All spl source written up to this point wrapped
[mirror_spl.git] / src / splat / splat-ctl.c
1 /*
2 * My intent is the create a loadable kzt (kernel ZFS test) module
3 * which can be used as an access point to run in kernel ZFS regression
4 * tests. Why do we need this when we have ztest? Well ztest.c only
5 * excersises the ZFS code proper, it cannot be used to validate the
6 * linux kernel shim primatives. This also provides a nice hook for
7 * any other in kernel regression tests we wish to run such as direct
8 * in-kernel tests against the DMU.
9 *
10 * The basic design is the kzt module is that it is constructed of
11 * various kzt_* source files each of which contains regression tests.
12 * For example the kzt_linux_kmem.c file contains tests for validating
13 * kmem correctness. When the kzt module is loaded kzt_*_init()
14 * will be called for each subsystems tests, similarly kzt_*_fini() is
15 * called when the kzt module is removed. Each test can then be
16 * run by making an ioctl() call from a userspace control application
17 * to pick the subsystem and test which should be run.
18 *
19 * Author: Brian Behlendorf
20 */
21
22 #include <sys/zfs_context.h>
23 #include <sys/splat-ctl.h>
24
25 #include <linux/version.h>
26 #include <linux/vmalloc.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
31 #include <linux/devfs_fs_kernel.h>
32 #endif
33
34 #include <linux/cdev.h>
35
36
37 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
38 static struct class_simple *kzt_class;
39 #else
40 static struct class *kzt_class;
41 #endif
42 static struct list_head kzt_module_list;
43 static spinlock_t kzt_module_lock;
44
45 static int
46 kzt_open(struct inode *inode, struct file *file)
47 {
48 unsigned int minor = iminor(inode);
49 kzt_info_t *info;
50
51 if (minor >= KZT_MINORS)
52 return -ENXIO;
53
54 info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
55 if (info == NULL)
56 return -ENOMEM;
57
58 spin_lock_init(&info->info_lock);
59 info->info_size = KZT_INFO_BUFFER_SIZE;
60 info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
61 if (info->info_buffer == NULL) {
62 kfree(info);
63 return -ENOMEM;
64 }
65
66 info->info_head = info->info_buffer;
67 file->private_data = (void *)info;
68
69 kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
70
71 return 0;
72 }
73
74 static int
75 kzt_release(struct inode *inode, struct file *file)
76 {
77 unsigned int minor = iminor(inode);
78 kzt_info_t *info = (kzt_info_t *)file->private_data;
79
80 if (minor >= KZT_MINORS)
81 return -ENXIO;
82
83 ASSERT(info);
84 ASSERT(info->info_buffer);
85
86 vfree(info->info_buffer);
87 kfree(info);
88
89 return 0;
90 }
91
92 static int
93 kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
94 {
95 kzt_info_t *info = (kzt_info_t *)file->private_data;
96
97 ASSERT(info);
98 ASSERT(info->info_buffer);
99
100 spin_lock(&info->info_lock);
101 memset(info->info_buffer, 0, info->info_size);
102 info->info_head = info->info_buffer;
103 spin_unlock(&info->info_lock);
104
105 return 0;
106 }
107
108 static int
109 kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
110 {
111 kzt_info_t *info = (kzt_info_t *)file->private_data;
112 char *buf;
113 int min, size, rc = 0;
114
115 ASSERT(info);
116 ASSERT(info->info_buffer);
117
118 spin_lock(&info->info_lock);
119 if (kcfg->cfg_arg1 > 0) {
120
121 size = kcfg->cfg_arg1;
122 buf = (char *)vmalloc(size);
123 if (buf == NULL) {
124 rc = -ENOMEM;
125 goto out;
126 }
127
128 /* Zero fill and truncate contents when coping buffer */
129 min = ((size < info->info_size) ? size : info->info_size);
130 memset(buf, 0, size);
131 memcpy(buf, info->info_buffer, min);
132 vfree(info->info_buffer);
133 info->info_size = size;
134 info->info_buffer = buf;
135 info->info_head = info->info_buffer;
136 }
137
138 kcfg->cfg_rc1 = info->info_size;
139
140 if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
141 rc = -EFAULT;
142 out:
143 spin_unlock(&info->info_lock);
144
145 return rc;
146 }
147
148
149 static kzt_subsystem_t *
150 kzt_subsystem_find(int id) {
151 kzt_subsystem_t *sub;
152
153 spin_lock(&kzt_module_lock);
154 list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
155 if (id == sub->desc.id) {
156 spin_unlock(&kzt_module_lock);
157 return sub;
158 }
159 }
160 spin_unlock(&kzt_module_lock);
161
162 return NULL;
163 }
164
165 static int
166 kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
167 {
168 kzt_subsystem_t *sub;
169 int i = 0;
170
171 spin_lock(&kzt_module_lock);
172 list_for_each_entry(sub, &kzt_module_list, subsystem_list)
173 i++;
174
175 spin_unlock(&kzt_module_lock);
176 kcfg->cfg_rc1 = i;
177
178 if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
179 return -EFAULT;
180
181 return 0;
182 }
183
184 static int
185 kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
186 {
187 kzt_subsystem_t *sub;
188 kzt_cfg_t *tmp;
189 int size, i = 0;
190
191 /* Structure will be sized large enough for N subsystem entries
192 * which is passed in by the caller. On exit the number of
193 * entries filled in with valid subsystems will be stored in
194 * cfg_rc1. If the caller does not provide enough entries
195 * for all subsystems we will truncate the list to avoid overrun.
196 */
197 size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
198 sizeof(kzt_user_t);
199 tmp = kmalloc(size, GFP_KERNEL);
200 if (tmp == NULL)
201 return -ENOMEM;
202
203 /* Local 'tmp' is used as the structure copied back to user space */
204 memset(tmp, 0, size);
205 memcpy(tmp, kcfg, sizeof(*kcfg));
206
207 spin_lock(&kzt_module_lock);
208 list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
209 strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
210 sub->desc.name, KZT_NAME_SIZE);
211 strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
212 sub->desc.desc, KZT_DESC_SIZE);
213 tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
214
215 /* Truncate list if we are about to overrun alloc'ed memory */
216 if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
217 break;
218 }
219 spin_unlock(&kzt_module_lock);
220 tmp->cfg_rc1 = i;
221
222 if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
223 kfree(tmp);
224 return -EFAULT;
225 }
226
227 kfree(tmp);
228 return 0;
229 }
230
231 static int
232 kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
233 {
234 kzt_subsystem_t *sub;
235 kzt_test_t *test;
236 int rc, i = 0;
237
238 /* Subsystem ID passed as arg1 */
239 sub = kzt_subsystem_find(kcfg->cfg_arg1);
240 if (sub == NULL)
241 return -EINVAL;
242
243 spin_lock(&(sub->test_lock));
244 list_for_each_entry(test, &(sub->test_list), test_list)
245 i++;
246
247 spin_unlock(&(sub->test_lock));
248 kcfg->cfg_rc1 = i;
249
250 if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
251 return -EFAULT;
252
253 return 0;
254 }
255
256 static int
257 kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
258 {
259 kzt_subsystem_t *sub;
260 kzt_test_t *test;
261 kzt_cfg_t *tmp;
262 int size, rc, i = 0;
263
264 /* Subsystem ID passed as arg1 */
265 sub = kzt_subsystem_find(kcfg->cfg_arg1);
266 if (sub == NULL)
267 return -EINVAL;
268
269 /* Structure will be sized large enough for N test entries
270 * which is passed in by the caller. On exit the number of
271 * entries filled in with valid tests will be stored in
272 * cfg_rc1. If the caller does not provide enough entries
273 * for all tests we will truncate the list to avoid overrun.
274 */
275 size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
276 tmp = kmalloc(size, GFP_KERNEL);
277 if (tmp == NULL)
278 return -ENOMEM;
279
280 /* Local 'tmp' is used as the structure copied back to user space */
281 memset(tmp, 0, size);
282 memcpy(tmp, kcfg, sizeof(*kcfg));
283
284 spin_lock(&(sub->test_lock));
285 list_for_each_entry(test, &(sub->test_list), test_list) {
286 strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
287 test->desc.name, KZT_NAME_SIZE);
288 strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
289 test->desc.desc, KZT_DESC_SIZE);
290 tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
291
292 /* Truncate list if we are about to overrun alloc'ed memory */
293 if ((i++) == kcfg->cfg_data.kzt_tests.size)
294 break;
295 }
296 spin_unlock(&(sub->test_lock));
297 tmp->cfg_rc1 = i;
298
299 if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
300 kfree(tmp);
301 return -EFAULT;
302 }
303
304 kfree(tmp);
305 return 0;
306 }
307
308 static int
309 kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
310 {
311 kzt_test_t *test;
312 int rc = 0;
313
314 spin_lock(&(sub->test_lock));
315 list_for_each_entry(test, &(sub->test_list), test_list) {
316 if (test->desc.id == cmd) {
317 spin_unlock(&(sub->test_lock));
318 return test->test(file, arg);
319 }
320 }
321 spin_unlock(&(sub->test_lock));
322
323 return -EINVAL;
324 }
325
326 static int
327 kzt_ioctl_cfg(struct file *file, unsigned long arg)
328 {
329 kzt_cfg_t kcfg;
330 int rc = 0;
331
332 if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
333 return -EFAULT;
334
335 if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
336 kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
337 kcfg.cfg_magic, KZT_CFG_MAGIC);
338 return -EINVAL;
339 }
340
341 switch (kcfg.cfg_cmd) {
342 case KZT_CFG_BUFFER_CLEAR:
343 /* cfg_arg1 - Unused
344 * cfg_rc1 - Unused
345 */
346 rc = kzt_buffer_clear(file, &kcfg, arg);
347 break;
348 case KZT_CFG_BUFFER_SIZE:
349 /* cfg_arg1 - 0 - query size; >0 resize
350 * cfg_rc1 - Set to current buffer size
351 */
352 rc = kzt_buffer_size(file, &kcfg, arg);
353 break;
354 case KZT_CFG_SUBSYSTEM_COUNT:
355 /* cfg_arg1 - Unused
356 * cfg_rc1 - Set to number of subsystems
357 */
358 rc = kzt_subsystem_count(&kcfg, arg);
359 break;
360 case KZT_CFG_SUBSYSTEM_LIST:
361 /* cfg_arg1 - Unused
362 * cfg_rc1 - Set to number of subsystems
363 * cfg_data.kzt_subsystems - Populated with subsystems
364 */
365 rc = kzt_subsystem_list(&kcfg, arg);
366 break;
367 case KZT_CFG_TEST_COUNT:
368 /* cfg_arg1 - Set to a target subsystem
369 * cfg_rc1 - Set to number of tests
370 */
371 rc = kzt_test_count(&kcfg, arg);
372 break;
373 case KZT_CFG_TEST_LIST:
374 /* cfg_arg1 - Set to a target subsystem
375 * cfg_rc1 - Set to number of tests
376 * cfg_data.kzt_subsystems - Populated with tests
377 */
378 rc = kzt_test_list(&kcfg, arg);
379 break;
380 default:
381 kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
382 rc = -EINVAL;
383 break;
384 }
385
386 return rc;
387 }
388
389 static int
390 kzt_ioctl_cmd(struct file *file, unsigned long arg)
391 {
392 kzt_subsystem_t *sub;
393 kzt_cmd_t kcmd;
394 int rc = -EINVAL;
395 void *data = NULL;
396
397 if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
398 return -EFAULT;
399
400 if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
401 kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
402 kcmd.cmd_magic, KZT_CFG_MAGIC);
403 return -EINVAL;
404 }
405
406 /* Allocate memory for any opaque data the caller needed to pass on */
407 if (kcmd.cmd_data_size > 0) {
408 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
409 if (data == NULL)
410 return -ENOMEM;
411
412 if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
413 cmd_data_str)), kcmd.cmd_data_size)) {
414 kfree(data);
415 return -EFAULT;
416 }
417 }
418
419 sub = kzt_subsystem_find(kcmd.cmd_subsystem);
420 if (sub != NULL)
421 rc = kzt_validate(file, sub, kcmd.cmd_test, data);
422 else
423 rc = -EINVAL;
424
425 if (data != NULL)
426 kfree(data);
427
428 return rc;
429 }
430
431 static int
432 kzt_ioctl(struct inode *inode, struct file *file,
433 unsigned int cmd, unsigned long arg)
434 {
435 int minor, rc = 0;
436
437 /* Ignore tty ioctls */
438 if ((cmd & 0xffffff00) == ((int)'T') << 8)
439 return -ENOTTY;
440
441 if (minor >= KZT_MINORS)
442 return -ENXIO;
443
444 switch (cmd) {
445 case KZT_CFG:
446 rc = kzt_ioctl_cfg(file, arg);
447 break;
448 case KZT_CMD:
449 rc = kzt_ioctl_cmd(file, arg);
450 break;
451 default:
452 kzt_print(file, "Bad ioctl command %d\n", cmd);
453 rc = -EINVAL;
454 break;
455 }
456
457 return rc;
458 }
459
460 /* I'm not sure why you would want to write in to this buffer from
461 * user space since its principle use is to pass test status info
462 * back to the user space, but I don't see any reason to prevent it.
463 */
464 static ssize_t kzt_write(struct file *file, const char __user *buf,
465 size_t count, loff_t *ppos)
466 {
467 unsigned int minor = iminor(file->f_dentry->d_inode);
468 kzt_info_t *info = (kzt_info_t *)file->private_data;
469 int rc = 0;
470
471 if (minor >= KZT_MINORS)
472 return -ENXIO;
473
474 ASSERT(info);
475 ASSERT(info->info_buffer);
476
477 spin_lock(&info->info_lock);
478
479 /* Write beyond EOF */
480 if (*ppos >= info->info_size) {
481 rc = -EFBIG;
482 goto out;
483 }
484
485 /* Resize count if beyond EOF */
486 if (*ppos + count > info->info_size)
487 count = info->info_size - *ppos;
488
489 if (copy_from_user(info->info_buffer, buf, count)) {
490 rc = -EFAULT;
491 goto out;
492 }
493
494 *ppos += count;
495 rc = count;
496 out:
497 spin_unlock(&info->info_lock);
498 return rc;
499 }
500
501 static ssize_t kzt_read(struct file *file, char __user *buf,
502 size_t count, loff_t *ppos)
503 {
504 unsigned int minor = iminor(file->f_dentry->d_inode);
505 kzt_info_t *info = (kzt_info_t *)file->private_data;
506 int rc = 0;
507
508 if (minor >= KZT_MINORS)
509 return -ENXIO;
510
511 ASSERT(info);
512 ASSERT(info->info_buffer);
513
514 spin_lock(&info->info_lock);
515
516 /* Read beyond EOF */
517 if (*ppos >= info->info_size)
518 goto out;
519
520 /* Resize count if beyond EOF */
521 if (*ppos + count > info->info_size)
522 count = info->info_size - *ppos;
523
524 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
525 rc = -EFAULT;
526 goto out;
527 }
528
529 *ppos += count;
530 rc = count;
531 out:
532 spin_unlock(&info->info_lock);
533 return rc;
534 }
535
536 static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
537 {
538 unsigned int minor = iminor(file->f_dentry->d_inode);
539 kzt_info_t *info = (kzt_info_t *)file->private_data;
540 int rc = -EINVAL;
541
542 if (minor >= KZT_MINORS)
543 return -ENXIO;
544
545 ASSERT(info);
546 ASSERT(info->info_buffer);
547
548 spin_lock(&info->info_lock);
549
550 switch (origin) {
551 case 0: /* SEEK_SET - No-op just do it */
552 break;
553 case 1: /* SEEK_CUR - Seek from current */
554 offset = file->f_pos + offset;
555 break;
556 case 2: /* SEEK_END - Seek from end */
557 offset = info->info_size + offset;
558 break;
559 }
560
561 if (offset >= 0) {
562 file->f_pos = offset;
563 file->f_version = 0;
564 rc = offset;
565 }
566
567 spin_unlock(&info->info_lock);
568
569 return rc;
570 }
571
572 static struct file_operations kzt_fops = {
573 .owner = THIS_MODULE,
574 .open = kzt_open,
575 .release = kzt_release,
576 .ioctl = kzt_ioctl,
577 .read = kzt_read,
578 .write = kzt_write,
579 .llseek = kzt_seek,
580 };
581
582 static struct cdev kzt_cdev = {
583 .owner = THIS_MODULE,
584 .kobj = { .name = "kztctl", },
585 };
586
587 static int __init
588 kzt_init(void)
589 {
590 dev_t dev;
591 int i, rc;
592
593 spin_lock_init(&kzt_module_lock);
594 INIT_LIST_HEAD(&kzt_module_list);
595
596 KZT_SUBSYSTEM_INIT(kmem);
597 KZT_SUBSYSTEM_INIT(taskq);
598 KZT_SUBSYSTEM_INIT(krng);
599 KZT_SUBSYSTEM_INIT(mutex);
600 KZT_SUBSYSTEM_INIT(condvar);
601 KZT_SUBSYSTEM_INIT(thread);
602 KZT_SUBSYSTEM_INIT(rwlock);
603 KZT_SUBSYSTEM_INIT(time);
604
605 dev = MKDEV(KZT_MAJOR, 0);
606 if (rc = register_chrdev_region(dev, KZT_MINORS, "kztctl"))
607 goto error;
608
609 /* Support for registering a character driver */
610 cdev_init(&kzt_cdev, &kzt_fops);
611 if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
612 printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
613 kobject_put(&kzt_cdev.kobj);
614 unregister_chrdev_region(dev, KZT_MINORS);
615 goto error;
616 }
617
618 /* Support for udev make driver info available in sysfs */
619 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
620 kzt_class = class_simple_create(THIS_MODULE, "kzt");
621 #else
622 kzt_class = class_create(THIS_MODULE, "kzt");
623 #endif
624 if (IS_ERR(kzt_class)) {
625 rc = PTR_ERR(kzt_class);
626 printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
627 cdev_del(&kzt_cdev);
628 unregister_chrdev_region(dev, KZT_MINORS);
629 goto error;
630 }
631
632 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
633 class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
634 NULL, "kztctl");
635 #else
636 class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
637 NULL, "kztctl");
638 #endif
639
640 printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
641 return 0;
642 error:
643 printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
644 return rc;
645 }
646
647 static void
648 kzt_fini(void)
649 {
650 dev_t dev = MKDEV(KZT_MAJOR, 0);
651 int i;
652
653 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
654 class_simple_device_remove(dev);
655 class_simple_destroy(kzt_class);
656 devfs_remove("kzt/kztctl");
657 devfs_remove("kzt");
658 #else
659 class_device_destroy(kzt_class, dev);
660 class_destroy(kzt_class);
661 #endif
662 cdev_del(&kzt_cdev);
663 unregister_chrdev_region(dev, KZT_MINORS);
664
665 KZT_SUBSYSTEM_FINI(time);
666 KZT_SUBSYSTEM_FINI(rwlock);
667 KZT_SUBSYSTEM_FINI(thread);
668 KZT_SUBSYSTEM_FINI(condvar);
669 KZT_SUBSYSTEM_FINI(mutex);
670 KZT_SUBSYSTEM_FINI(krng);
671 KZT_SUBSYSTEM_FINI(taskq);
672 KZT_SUBSYSTEM_FINI(kmem);
673
674 ASSERT(list_empty(&kzt_module_list));
675 printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
676 }
677
678 module_init(kzt_init);
679 module_exit(kzt_fini);
680
681 MODULE_AUTHOR("Lawrence Livermore National Labs");
682 MODULE_DESCRIPTION("Kernel ZFS Test");
683 MODULE_LICENSE("GPL");
684