]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-ctl.c
4d4148d427f21d90e4afc58d7470243c309918ed
[mirror_spl-debian.git] / module / splat / splat-ctl.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Test Control Interface.
25 *
26 * The 'splat' (Solaris Porting LAyer Tests) module is designed as a
27 * framework which runs various in kernel regression tests to validate
28 * the SPL primitives honor the Solaris ABI.
29 *
30 * The splat module is constructed of various splat_* source files each
31 * of which contain regression tests for a particular subsystem. For
32 * example, the splat_kmem.c file contains all the tests for validating
33 * the kmem interfaces have been implemented correctly. When the splat
34 * module is loaded splat_*_init() will be called for each subsystems
35 * tests. It is the responsibility of splat_*_init() to register all
36 * the tests for this subsystem using the SPLAT_TEST_INIT() macro.
37 * Similarly splat_*_fini() is called when the splat module is removed
38 * and is responsible for unregistering its tests via the SPLAT_TEST_FINI
39 * macro. Once a test is registered it can then be run with an ioctl()
40 * call which specifies the subsystem and test to be run. The provided
41 * splat command line tool can be used to display all available
42 * subsystems and tests. It can also be used to run the full suite
43 * of regression tests or particular tests.
44 \*****************************************************************************/
45
46 #include <sys/debug.h>
47 #include <sys/mutex.h>
48 #include <sys/types.h>
49 #include <linux/cdev.h>
50 #include <linux/fs.h>
51 #include <linux/miscdevice.h>
52 #include <linux/module.h>
53 #include <linux/slab.h>
54 #include <linux/uaccess.h>
55 #include <linux/vmalloc.h>
56 #include "splat-internal.h"
57
58 static struct list_head splat_module_list;
59 static spinlock_t splat_module_lock;
60
61 static int
62 splat_open(struct inode *inode, struct file *file)
63 {
64 splat_info_t *info;
65
66 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
67 if (info == NULL)
68 return -ENOMEM;
69
70 mutex_init(&info->info_lock, SPLAT_NAME, MUTEX_DEFAULT, NULL);
71 info->info_size = SPLAT_INFO_BUFFER_SIZE;
72 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
73 if (info->info_buffer == NULL) {
74 kfree(info);
75 return -ENOMEM;
76 }
77 memset(info->info_buffer, 0, info->info_size);
78
79 info->info_head = info->info_buffer;
80 file->private_data = (void *)info;
81
82 splat_print(file, "%s\n", spl_version);
83
84 return 0;
85 }
86
87 static int
88 splat_release(struct inode *inode, struct file *file)
89 {
90 splat_info_t *info = (splat_info_t *)file->private_data;
91
92 ASSERT(info);
93 ASSERT(info->info_buffer);
94
95 mutex_destroy(&info->info_lock);
96 vfree(info->info_buffer);
97 kfree(info);
98
99 return 0;
100 }
101
102 static int
103 splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
104 {
105 splat_info_t *info = (splat_info_t *)file->private_data;
106
107 ASSERT(info);
108 ASSERT(info->info_buffer);
109
110 mutex_enter(&info->info_lock);
111 memset(info->info_buffer, 0, info->info_size);
112 info->info_head = info->info_buffer;
113 mutex_exit(&info->info_lock);
114
115 return 0;
116 }
117
118 static int
119 splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
120 {
121 splat_info_t *info = (splat_info_t *)file->private_data;
122 char *buf;
123 int min, size, rc = 0;
124
125 ASSERT(info);
126 ASSERT(info->info_buffer);
127
128 mutex_enter(&info->info_lock);
129 if (kcfg->cfg_arg1 > 0) {
130
131 size = kcfg->cfg_arg1;
132 buf = (char *)vmalloc(size);
133 if (buf == NULL) {
134 rc = -ENOMEM;
135 goto out;
136 }
137
138 /* Zero fill and truncate contents when coping buffer */
139 min = ((size < info->info_size) ? size : info->info_size);
140 memset(buf, 0, size);
141 memcpy(buf, info->info_buffer, min);
142 vfree(info->info_buffer);
143 info->info_size = size;
144 info->info_buffer = buf;
145 info->info_head = info->info_buffer;
146 }
147
148 kcfg->cfg_rc1 = info->info_size;
149
150 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
151 rc = -EFAULT;
152 out:
153 mutex_exit(&info->info_lock);
154
155 return rc;
156 }
157
158
159 static splat_subsystem_t *
160 splat_subsystem_find(int id) {
161 splat_subsystem_t *sub;
162
163 spin_lock(&splat_module_lock);
164 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
165 if (id == sub->desc.id) {
166 spin_unlock(&splat_module_lock);
167 return sub;
168 }
169 }
170 spin_unlock(&splat_module_lock);
171
172 return NULL;
173 }
174
175 static int
176 splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
177 {
178 splat_subsystem_t *sub;
179 int i = 0;
180
181 spin_lock(&splat_module_lock);
182 list_for_each_entry(sub, &splat_module_list, subsystem_list)
183 i++;
184
185 spin_unlock(&splat_module_lock);
186 kcfg->cfg_rc1 = i;
187
188 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
189 return -EFAULT;
190
191 return 0;
192 }
193
194 static int
195 splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
196 {
197 splat_subsystem_t *sub;
198 splat_cfg_t *tmp;
199 int size, i = 0;
200
201 /* Structure will be sized large enough for N subsystem entries
202 * which is passed in by the caller. On exit the number of
203 * entries filled in with valid subsystems will be stored in
204 * cfg_rc1. If the caller does not provide enough entries
205 * for all subsystems we will truncate the list to avoid overrun.
206 */
207 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
208 sizeof(splat_user_t);
209 tmp = kmalloc(size, GFP_KERNEL);
210 if (tmp == NULL)
211 return -ENOMEM;
212
213 /* Local 'tmp' is used as the structure copied back to user space */
214 memset(tmp, 0, size);
215 memcpy(tmp, kcfg, sizeof(*kcfg));
216
217 spin_lock(&splat_module_lock);
218 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
219 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
220 sub->desc.name, SPLAT_NAME_SIZE);
221 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
222 sub->desc.desc, SPLAT_DESC_SIZE);
223 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
224
225 /* Truncate list if we are about to overrun alloc'ed memory */
226 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
227 break;
228 }
229 spin_unlock(&splat_module_lock);
230 tmp->cfg_rc1 = i;
231
232 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
233 kfree(tmp);
234 return -EFAULT;
235 }
236
237 kfree(tmp);
238 return 0;
239 }
240
241 static int
242 splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
243 {
244 splat_subsystem_t *sub;
245 splat_test_t *test;
246 int i = 0;
247
248 /* Subsystem ID passed as arg1 */
249 sub = splat_subsystem_find(kcfg->cfg_arg1);
250 if (sub == NULL)
251 return -EINVAL;
252
253 spin_lock(&(sub->test_lock));
254 list_for_each_entry(test, &(sub->test_list), test_list)
255 i++;
256
257 spin_unlock(&(sub->test_lock));
258 kcfg->cfg_rc1 = i;
259
260 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
261 return -EFAULT;
262
263 return 0;
264 }
265
266 static int
267 splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
268 {
269 splat_subsystem_t *sub;
270 splat_test_t *test;
271 splat_cfg_t *tmp;
272 int size, i = 0;
273
274 /* Subsystem ID passed as arg1 */
275 sub = splat_subsystem_find(kcfg->cfg_arg1);
276 if (sub == NULL)
277 return -EINVAL;
278
279 /* Structure will be sized large enough for N test entries
280 * which is passed in by the caller. On exit the number of
281 * entries filled in with valid tests will be stored in
282 * cfg_rc1. If the caller does not provide enough entries
283 * for all tests we will truncate the list to avoid overrun.
284 */
285 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
286 tmp = kmalloc(size, GFP_KERNEL);
287 if (tmp == NULL)
288 return -ENOMEM;
289
290 /* Local 'tmp' is used as the structure copied back to user space */
291 memset(tmp, 0, size);
292 memcpy(tmp, kcfg, sizeof(*kcfg));
293
294 spin_lock(&(sub->test_lock));
295 list_for_each_entry(test, &(sub->test_list), test_list) {
296 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
297 test->desc.name, SPLAT_NAME_SIZE);
298 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
299 test->desc.desc, SPLAT_DESC_SIZE);
300 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
301
302 /* Truncate list if we are about to overrun alloc'ed memory */
303 if ((i++) == kcfg->cfg_data.splat_tests.size)
304 break;
305 }
306 spin_unlock(&(sub->test_lock));
307 tmp->cfg_rc1 = i;
308
309 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
310 kfree(tmp);
311 return -EFAULT;
312 }
313
314 kfree(tmp);
315 return 0;
316 }
317
318 static int
319 splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
320 {
321 splat_test_t *test;
322
323 spin_lock(&(sub->test_lock));
324 list_for_each_entry(test, &(sub->test_list), test_list) {
325 if (test->desc.id == cmd) {
326 spin_unlock(&(sub->test_lock));
327 return test->test(file, arg);
328 }
329 }
330 spin_unlock(&(sub->test_lock));
331
332 return -EINVAL;
333 }
334
335 static int
336 splat_ioctl_cfg(struct file *file, unsigned int cmd, unsigned long arg)
337 {
338 splat_cfg_t kcfg;
339 int rc = 0;
340
341 /* User and kernel space agree about arg size */
342 if (_IOC_SIZE(cmd) != sizeof(kcfg))
343 return -EBADMSG;
344
345 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
346 return -EFAULT;
347
348 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
349 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
350 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
351 return -EINVAL;
352 }
353
354 switch (kcfg.cfg_cmd) {
355 case SPLAT_CFG_BUFFER_CLEAR:
356 /* cfg_arg1 - Unused
357 * cfg_rc1 - Unused
358 */
359 rc = splat_buffer_clear(file, &kcfg, arg);
360 break;
361 case SPLAT_CFG_BUFFER_SIZE:
362 /* cfg_arg1 - 0 - query size; >0 resize
363 * cfg_rc1 - Set to current buffer size
364 */
365 rc = splat_buffer_size(file, &kcfg, arg);
366 break;
367 case SPLAT_CFG_SUBSYSTEM_COUNT:
368 /* cfg_arg1 - Unused
369 * cfg_rc1 - Set to number of subsystems
370 */
371 rc = splat_subsystem_count(&kcfg, arg);
372 break;
373 case SPLAT_CFG_SUBSYSTEM_LIST:
374 /* cfg_arg1 - Unused
375 * cfg_rc1 - Set to number of subsystems
376 * cfg_data.splat_subsystems - Set with subsystems
377 */
378 rc = splat_subsystem_list(&kcfg, arg);
379 break;
380 case SPLAT_CFG_TEST_COUNT:
381 /* cfg_arg1 - Set to a target subsystem
382 * cfg_rc1 - Set to number of tests
383 */
384 rc = splat_test_count(&kcfg, arg);
385 break;
386 case SPLAT_CFG_TEST_LIST:
387 /* cfg_arg1 - Set to a target subsystem
388 * cfg_rc1 - Set to number of tests
389 * cfg_data.splat_subsystems - Populated with tests
390 */
391 rc = splat_test_list(&kcfg, arg);
392 break;
393 default:
394 splat_print(file, "Bad config command %d\n",
395 kcfg.cfg_cmd);
396 rc = -EINVAL;
397 break;
398 }
399
400 return rc;
401 }
402
403 static int
404 splat_ioctl_cmd(struct file *file, unsigned int cmd, unsigned long arg)
405 {
406 splat_subsystem_t *sub;
407 splat_cmd_t kcmd;
408 int rc = -EINVAL;
409 void *data = NULL;
410
411 /* User and kernel space agree about arg size */
412 if (_IOC_SIZE(cmd) != sizeof(kcmd))
413 return -EBADMSG;
414
415 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
416 return -EFAULT;
417
418 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
419 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
420 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
421 return -EINVAL;
422 }
423
424 /* Allocate memory for any opaque data the caller needed to pass on */
425 if (kcmd.cmd_data_size > 0) {
426 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
427 if (data == NULL)
428 return -ENOMEM;
429
430 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
431 cmd_data_str)), kcmd.cmd_data_size)) {
432 kfree(data);
433 return -EFAULT;
434 }
435 }
436
437 sub = splat_subsystem_find(kcmd.cmd_subsystem);
438 if (sub != NULL)
439 rc = splat_validate(file, sub, kcmd.cmd_test, data);
440 else
441 rc = -EINVAL;
442
443 if (data != NULL)
444 kfree(data);
445
446 return rc;
447 }
448
449 static long
450 splat_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
451 {
452 int rc = 0;
453
454 /* Ignore tty ioctls */
455 if ((cmd & 0xffffff00) == ((int)'T') << 8)
456 return -ENOTTY;
457
458 switch (cmd) {
459 case SPLAT_CFG:
460 rc = splat_ioctl_cfg(file, cmd, arg);
461 break;
462 case SPLAT_CMD:
463 rc = splat_ioctl_cmd(file, cmd, arg);
464 break;
465 default:
466 splat_print(file, "Bad ioctl command %d\n", cmd);
467 rc = -EINVAL;
468 break;
469 }
470
471 return rc;
472 }
473
474 #ifdef CONFIG_COMPAT
475 /* Compatibility handler for ioctls from 32-bit ELF binaries */
476 static long
477 splat_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
478 {
479 return splat_unlocked_ioctl(file, cmd, arg);
480 }
481 #endif /* CONFIG_COMPAT */
482
483 /* I'm not sure why you would want to write in to this buffer from
484 * user space since its principle use is to pass test status info
485 * back to the user space, but I don't see any reason to prevent it.
486 */
487 static ssize_t splat_write(struct file *file, const char __user *buf,
488 size_t count, loff_t *ppos)
489 {
490 splat_info_t *info = (splat_info_t *)file->private_data;
491 int rc = 0;
492
493 ASSERT(info);
494 ASSERT(info->info_buffer);
495
496 mutex_enter(&info->info_lock);
497
498 /* Write beyond EOF */
499 if (*ppos >= info->info_size) {
500 rc = -EFBIG;
501 goto out;
502 }
503
504 /* Resize count if beyond EOF */
505 if (*ppos + count > info->info_size)
506 count = info->info_size - *ppos;
507
508 if (copy_from_user(info->info_buffer, buf, count)) {
509 rc = -EFAULT;
510 goto out;
511 }
512
513 *ppos += count;
514 rc = count;
515 out:
516 mutex_exit(&info->info_lock);
517 return rc;
518 }
519
520 static ssize_t splat_read(struct file *file, char __user *buf,
521 size_t count, loff_t *ppos)
522 {
523 splat_info_t *info = (splat_info_t *)file->private_data;
524 int rc = 0;
525
526 ASSERT(info);
527 ASSERT(info->info_buffer);
528
529 mutex_enter(&info->info_lock);
530
531 /* Read beyond EOF */
532 if (*ppos >= info->info_size)
533 goto out;
534
535 /* Resize count if beyond EOF */
536 if (*ppos + count > info->info_size)
537 count = info->info_size - *ppos;
538
539 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
540 rc = -EFAULT;
541 goto out;
542 }
543
544 *ppos += count;
545 rc = count;
546 out:
547 mutex_exit(&info->info_lock);
548 return rc;
549 }
550
551 static loff_t splat_seek(struct file *file, loff_t offset, int origin)
552 {
553 splat_info_t *info = (splat_info_t *)file->private_data;
554 int rc = -EINVAL;
555
556 ASSERT(info);
557 ASSERT(info->info_buffer);
558
559 mutex_enter(&info->info_lock);
560
561 switch (origin) {
562 case 0: /* SEEK_SET - No-op just do it */
563 break;
564 case 1: /* SEEK_CUR - Seek from current */
565 offset = file->f_pos + offset;
566 break;
567 case 2: /* SEEK_END - Seek from end */
568 offset = info->info_size + offset;
569 break;
570 }
571
572 if (offset >= 0) {
573 file->f_pos = offset;
574 file->f_version = 0;
575 rc = offset;
576 }
577
578 mutex_exit(&info->info_lock);
579
580 return rc;
581 }
582
583 static struct file_operations splat_fops = {
584 .owner = THIS_MODULE,
585 .open = splat_open,
586 .release = splat_release,
587 .unlocked_ioctl = splat_unlocked_ioctl,
588 #ifdef CONFIG_COMPAT
589 .compat_ioctl = splat_compat_ioctl,
590 #endif
591 .read = splat_read,
592 .write = splat_write,
593 .llseek = splat_seek,
594 };
595
596 static struct miscdevice splat_misc = {
597 .minor = MISC_DYNAMIC_MINOR,
598 .name = SPLAT_NAME,
599 .fops = &splat_fops,
600 };
601
602 static int __init
603 splat_init(void)
604 {
605 int error;
606
607 spin_lock_init(&splat_module_lock);
608 INIT_LIST_HEAD(&splat_module_list);
609
610 SPLAT_SUBSYSTEM_INIT(kmem);
611 SPLAT_SUBSYSTEM_INIT(taskq);
612 SPLAT_SUBSYSTEM_INIT(krng);
613 SPLAT_SUBSYSTEM_INIT(mutex);
614 SPLAT_SUBSYSTEM_INIT(condvar);
615 SPLAT_SUBSYSTEM_INIT(thread);
616 SPLAT_SUBSYSTEM_INIT(rwlock);
617 SPLAT_SUBSYSTEM_INIT(time);
618 SPLAT_SUBSYSTEM_INIT(vnode);
619 SPLAT_SUBSYSTEM_INIT(kobj);
620 SPLAT_SUBSYSTEM_INIT(atomic);
621 SPLAT_SUBSYSTEM_INIT(list);
622 SPLAT_SUBSYSTEM_INIT(generic);
623 SPLAT_SUBSYSTEM_INIT(cred);
624 SPLAT_SUBSYSTEM_INIT(zlib);
625 SPLAT_SUBSYSTEM_INIT(linux);
626
627 error = misc_register(&splat_misc);
628 if (error) {
629 printk(KERN_INFO "SPLAT: misc_register() failed %d\n", error);
630 } else {
631 printk(KERN_INFO "SPLAT: Loaded module v%s-%s%s\n",
632 SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
633 }
634
635 return (error);
636 }
637
638 static void __exit
639 splat_fini(void)
640 {
641 misc_deregister(&splat_misc);
642
643 SPLAT_SUBSYSTEM_FINI(linux);
644 SPLAT_SUBSYSTEM_FINI(zlib);
645 SPLAT_SUBSYSTEM_FINI(cred);
646 SPLAT_SUBSYSTEM_FINI(generic);
647 SPLAT_SUBSYSTEM_FINI(list);
648 SPLAT_SUBSYSTEM_FINI(atomic);
649 SPLAT_SUBSYSTEM_FINI(kobj);
650 SPLAT_SUBSYSTEM_FINI(vnode);
651 SPLAT_SUBSYSTEM_FINI(time);
652 SPLAT_SUBSYSTEM_FINI(rwlock);
653 SPLAT_SUBSYSTEM_FINI(thread);
654 SPLAT_SUBSYSTEM_FINI(condvar);
655 SPLAT_SUBSYSTEM_FINI(mutex);
656 SPLAT_SUBSYSTEM_FINI(krng);
657 SPLAT_SUBSYSTEM_FINI(taskq);
658 SPLAT_SUBSYSTEM_FINI(kmem);
659
660 ASSERT(list_empty(&splat_module_list));
661 printk(KERN_INFO "SPLAT: Unloaded module v%s-%s%s\n",
662 SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
663 }
664
665 module_init(splat_init);
666 module_exit(splat_fini);
667
668 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
669 MODULE_AUTHOR(SPL_META_AUTHOR);
670 MODULE_LICENSE(SPL_META_LICENSE);
671 MODULE_VERSION(SPL_META_VERSION "-" SPL_META_RELEASE);