]> git.proxmox.com Git - mirror_spl.git/blob - modules/splat/splat-ctl.c
Breaking the world for a little bit. If anyone is going to continue
[mirror_spl.git] / modules / splat / splat-ctl.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 /*
28 * My intent is to create a loadable 'splat' (Solaris Porting LAyer
29 * Tests) module which can be used as an access point to run
30 * in kernel Solaris ABI regression tests. This provides a
31 * nice mechanism to validate the shim primates are working properly.
32 *
33 * The basic design is the splat module is that it is constructed of
34 * various splat_* source files each of which contains regression tests.
35 * For example the splat_linux_kmem.c file contains tests for validating
36 * kmem correctness. When the splat module is loaded splat_*_init()
37 * will be called for each subsystems tests, similarly splat_*_fini() is
38 * called when the splat module is removed. Each test can then be
39 * run by making an ioctl() call from a userspace control application
40 * to pick the subsystem and test which should be run.
41 */
42
43 #include "splat-internal.h"
44
45 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
46 #include <linux/devfs_fs_kernel.h>
47 #endif
48
49 #include <linux/cdev.h>
50
51
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
53 static struct class_simple *splat_class;
54 #else
55 static struct class *splat_class;
56 #endif
57 static struct list_head splat_module_list;
58 static spinlock_t splat_module_lock;
59
60 static int
61 splat_open(struct inode *inode, struct file *file)
62 {
63 unsigned int minor = iminor(inode);
64 splat_info_t *info;
65
66 if (minor >= SPLAT_MINORS)
67 return -ENXIO;
68
69 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
70 if (info == NULL)
71 return -ENOMEM;
72
73 spin_lock_init(&info->info_lock);
74 info->info_size = SPLAT_INFO_BUFFER_SIZE;
75 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
76 if (info->info_buffer == NULL) {
77 kfree(info);
78 return -ENOMEM;
79 }
80
81 info->info_head = info->info_buffer;
82 file->private_data = (void *)info;
83
84 return 0;
85 }
86
87 static int
88 splat_release(struct inode *inode, struct file *file)
89 {
90 unsigned int minor = iminor(inode);
91 splat_info_t *info = (splat_info_t *)file->private_data;
92
93 if (minor >= SPLAT_MINORS)
94 return -ENXIO;
95
96 ASSERT(info);
97 ASSERT(info->info_buffer);
98
99 vfree(info->info_buffer);
100 kfree(info);
101
102 return 0;
103 }
104
105 static int
106 splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
107 {
108 splat_info_t *info = (splat_info_t *)file->private_data;
109
110 ASSERT(info);
111 ASSERT(info->info_buffer);
112
113 spin_lock(&info->info_lock);
114 memset(info->info_buffer, 0, info->info_size);
115 info->info_head = info->info_buffer;
116 spin_unlock(&info->info_lock);
117
118 return 0;
119 }
120
121 static int
122 splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
123 {
124 splat_info_t *info = (splat_info_t *)file->private_data;
125 char *buf;
126 int min, size, rc = 0;
127
128 ASSERT(info);
129 ASSERT(info->info_buffer);
130
131 spin_lock(&info->info_lock);
132 if (kcfg->cfg_arg1 > 0) {
133
134 size = kcfg->cfg_arg1;
135 buf = (char *)vmalloc(size);
136 if (buf == NULL) {
137 rc = -ENOMEM;
138 goto out;
139 }
140
141 /* Zero fill and truncate contents when coping buffer */
142 min = ((size < info->info_size) ? size : info->info_size);
143 memset(buf, 0, size);
144 memcpy(buf, info->info_buffer, min);
145 vfree(info->info_buffer);
146 info->info_size = size;
147 info->info_buffer = buf;
148 info->info_head = info->info_buffer;
149 }
150
151 kcfg->cfg_rc1 = info->info_size;
152
153 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
154 rc = -EFAULT;
155 out:
156 spin_unlock(&info->info_lock);
157
158 return rc;
159 }
160
161
162 static splat_subsystem_t *
163 splat_subsystem_find(int id) {
164 splat_subsystem_t *sub;
165
166 spin_lock(&splat_module_lock);
167 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
168 if (id == sub->desc.id) {
169 spin_unlock(&splat_module_lock);
170 return sub;
171 }
172 }
173 spin_unlock(&splat_module_lock);
174
175 return NULL;
176 }
177
178 static int
179 splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
180 {
181 splat_subsystem_t *sub;
182 int i = 0;
183
184 spin_lock(&splat_module_lock);
185 list_for_each_entry(sub, &splat_module_list, subsystem_list)
186 i++;
187
188 spin_unlock(&splat_module_lock);
189 kcfg->cfg_rc1 = i;
190
191 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
192 return -EFAULT;
193
194 return 0;
195 }
196
197 static int
198 splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
199 {
200 splat_subsystem_t *sub;
201 splat_cfg_t *tmp;
202 int size, i = 0;
203
204 /* Structure will be sized large enough for N subsystem entries
205 * which is passed in by the caller. On exit the number of
206 * entries filled in with valid subsystems will be stored in
207 * cfg_rc1. If the caller does not provide enough entries
208 * for all subsystems we will truncate the list to avoid overrun.
209 */
210 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
211 sizeof(splat_user_t);
212 tmp = kmalloc(size, GFP_KERNEL);
213 if (tmp == NULL)
214 return -ENOMEM;
215
216 /* Local 'tmp' is used as the structure copied back to user space */
217 memset(tmp, 0, size);
218 memcpy(tmp, kcfg, sizeof(*kcfg));
219
220 spin_lock(&splat_module_lock);
221 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
222 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
223 sub->desc.name, SPLAT_NAME_SIZE);
224 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
225 sub->desc.desc, SPLAT_DESC_SIZE);
226 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
227
228 /* Truncate list if we are about to overrun alloc'ed memory */
229 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
230 break;
231 }
232 spin_unlock(&splat_module_lock);
233 tmp->cfg_rc1 = i;
234
235 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
236 kfree(tmp);
237 return -EFAULT;
238 }
239
240 kfree(tmp);
241 return 0;
242 }
243
244 static int
245 splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
246 {
247 splat_subsystem_t *sub;
248 splat_test_t *test;
249 int i = 0;
250
251 /* Subsystem ID passed as arg1 */
252 sub = splat_subsystem_find(kcfg->cfg_arg1);
253 if (sub == NULL)
254 return -EINVAL;
255
256 spin_lock(&(sub->test_lock));
257 list_for_each_entry(test, &(sub->test_list), test_list)
258 i++;
259
260 spin_unlock(&(sub->test_lock));
261 kcfg->cfg_rc1 = i;
262
263 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
264 return -EFAULT;
265
266 return 0;
267 }
268
269 static int
270 splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
271 {
272 splat_subsystem_t *sub;
273 splat_test_t *test;
274 splat_cfg_t *tmp;
275 int size, i = 0;
276
277 /* Subsystem ID passed as arg1 */
278 sub = splat_subsystem_find(kcfg->cfg_arg1);
279 if (sub == NULL)
280 return -EINVAL;
281
282 /* Structure will be sized large enough for N test entries
283 * which is passed in by the caller. On exit the number of
284 * entries filled in with valid tests will be stored in
285 * cfg_rc1. If the caller does not provide enough entries
286 * for all tests we will truncate the list to avoid overrun.
287 */
288 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
289 tmp = kmalloc(size, GFP_KERNEL);
290 if (tmp == NULL)
291 return -ENOMEM;
292
293 /* Local 'tmp' is used as the structure copied back to user space */
294 memset(tmp, 0, size);
295 memcpy(tmp, kcfg, sizeof(*kcfg));
296
297 spin_lock(&(sub->test_lock));
298 list_for_each_entry(test, &(sub->test_list), test_list) {
299 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
300 test->desc.name, SPLAT_NAME_SIZE);
301 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
302 test->desc.desc, SPLAT_DESC_SIZE);
303 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
304
305 /* Truncate list if we are about to overrun alloc'ed memory */
306 if ((i++) == kcfg->cfg_data.splat_tests.size)
307 break;
308 }
309 spin_unlock(&(sub->test_lock));
310 tmp->cfg_rc1 = i;
311
312 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
313 kfree(tmp);
314 return -EFAULT;
315 }
316
317 kfree(tmp);
318 return 0;
319 }
320
321 static int
322 splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
323 {
324 splat_test_t *test;
325
326 spin_lock(&(sub->test_lock));
327 list_for_each_entry(test, &(sub->test_list), test_list) {
328 if (test->desc.id == cmd) {
329 spin_unlock(&(sub->test_lock));
330 return test->test(file, arg);
331 }
332 }
333 spin_unlock(&(sub->test_lock));
334
335 return -EINVAL;
336 }
337
338 static int
339 splat_ioctl_cfg(struct file *file, unsigned long arg)
340 {
341 splat_cfg_t kcfg;
342 int rc = 0;
343
344 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
345 return -EFAULT;
346
347 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
348 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
349 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
350 return -EINVAL;
351 }
352
353 switch (kcfg.cfg_cmd) {
354 case SPLAT_CFG_BUFFER_CLEAR:
355 /* cfg_arg1 - Unused
356 * cfg_rc1 - Unused
357 */
358 rc = splat_buffer_clear(file, &kcfg, arg);
359 break;
360 case SPLAT_CFG_BUFFER_SIZE:
361 /* cfg_arg1 - 0 - query size; >0 resize
362 * cfg_rc1 - Set to current buffer size
363 */
364 rc = splat_buffer_size(file, &kcfg, arg);
365 break;
366 case SPLAT_CFG_SUBSYSTEM_COUNT:
367 /* cfg_arg1 - Unused
368 * cfg_rc1 - Set to number of subsystems
369 */
370 rc = splat_subsystem_count(&kcfg, arg);
371 break;
372 case SPLAT_CFG_SUBSYSTEM_LIST:
373 /* cfg_arg1 - Unused
374 * cfg_rc1 - Set to number of subsystems
375 * cfg_data.splat_subsystems - Populated with subsystems
376 */
377 rc = splat_subsystem_list(&kcfg, arg);
378 break;
379 case SPLAT_CFG_TEST_COUNT:
380 /* cfg_arg1 - Set to a target subsystem
381 * cfg_rc1 - Set to number of tests
382 */
383 rc = splat_test_count(&kcfg, arg);
384 break;
385 case SPLAT_CFG_TEST_LIST:
386 /* cfg_arg1 - Set to a target subsystem
387 * cfg_rc1 - Set to number of tests
388 * cfg_data.splat_subsystems - Populated with tests
389 */
390 rc = splat_test_list(&kcfg, arg);
391 break;
392 default:
393 splat_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
394 rc = -EINVAL;
395 break;
396 }
397
398 return rc;
399 }
400
401 static int
402 splat_ioctl_cmd(struct file *file, unsigned long arg)
403 {
404 splat_subsystem_t *sub;
405 splat_cmd_t kcmd;
406 int rc = -EINVAL;
407 void *data = NULL;
408
409 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
410 return -EFAULT;
411
412 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
413 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
414 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
415 return -EINVAL;
416 }
417
418 /* Allocate memory for any opaque data the caller needed to pass on */
419 if (kcmd.cmd_data_size > 0) {
420 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
421 if (data == NULL)
422 return -ENOMEM;
423
424 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
425 cmd_data_str)), kcmd.cmd_data_size)) {
426 kfree(data);
427 return -EFAULT;
428 }
429 }
430
431 sub = splat_subsystem_find(kcmd.cmd_subsystem);
432 if (sub != NULL)
433 rc = splat_validate(file, sub, kcmd.cmd_test, data);
434 else
435 rc = -EINVAL;
436
437 if (data != NULL)
438 kfree(data);
439
440 return rc;
441 }
442
443 static int
444 splat_ioctl(struct inode *inode, struct file *file,
445 unsigned int cmd, unsigned long arg)
446 {
447 unsigned int minor = iminor(file->f_dentry->d_inode);
448 int rc = 0;
449
450 /* Ignore tty ioctls */
451 if ((cmd & 0xffffff00) == ((int)'T') << 8)
452 return -ENOTTY;
453
454 if (minor >= SPLAT_MINORS)
455 return -ENXIO;
456
457 switch (cmd) {
458 case SPLAT_CFG:
459 rc = splat_ioctl_cfg(file, arg);
460 break;
461 case SPLAT_CMD:
462 rc = splat_ioctl_cmd(file, arg);
463 break;
464 default:
465 splat_print(file, "Bad ioctl command %d\n", cmd);
466 rc = -EINVAL;
467 break;
468 }
469
470 return rc;
471 }
472
473 /* I'm not sure why you would want to write in to this buffer from
474 * user space since its principle use is to pass test status info
475 * back to the user space, but I don't see any reason to prevent it.
476 */
477 static ssize_t splat_write(struct file *file, const char __user *buf,
478 size_t count, loff_t *ppos)
479 {
480 unsigned int minor = iminor(file->f_dentry->d_inode);
481 splat_info_t *info = (splat_info_t *)file->private_data;
482 int rc = 0;
483
484 if (minor >= SPLAT_MINORS)
485 return -ENXIO;
486
487 ASSERT(info);
488 ASSERT(info->info_buffer);
489
490 spin_lock(&info->info_lock);
491
492 /* Write beyond EOF */
493 if (*ppos >= info->info_size) {
494 rc = -EFBIG;
495 goto out;
496 }
497
498 /* Resize count if beyond EOF */
499 if (*ppos + count > info->info_size)
500 count = info->info_size - *ppos;
501
502 if (copy_from_user(info->info_buffer, buf, count)) {
503 rc = -EFAULT;
504 goto out;
505 }
506
507 *ppos += count;
508 rc = count;
509 out:
510 spin_unlock(&info->info_lock);
511 return rc;
512 }
513
514 static ssize_t splat_read(struct file *file, char __user *buf,
515 size_t count, loff_t *ppos)
516 {
517 unsigned int minor = iminor(file->f_dentry->d_inode);
518 splat_info_t *info = (splat_info_t *)file->private_data;
519 int rc = 0;
520
521 if (minor >= SPLAT_MINORS)
522 return -ENXIO;
523
524 ASSERT(info);
525 ASSERT(info->info_buffer);
526
527 spin_lock(&info->info_lock);
528
529 /* Read beyond EOF */
530 if (*ppos >= info->info_size)
531 goto out;
532
533 /* Resize count if beyond EOF */
534 if (*ppos + count > info->info_size)
535 count = info->info_size - *ppos;
536
537 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
538 rc = -EFAULT;
539 goto out;
540 }
541
542 *ppos += count;
543 rc = count;
544 out:
545 spin_unlock(&info->info_lock);
546 return rc;
547 }
548
549 static loff_t splat_seek(struct file *file, loff_t offset, int origin)
550 {
551 unsigned int minor = iminor(file->f_dentry->d_inode);
552 splat_info_t *info = (splat_info_t *)file->private_data;
553 int rc = -EINVAL;
554
555 if (minor >= SPLAT_MINORS)
556 return -ENXIO;
557
558 ASSERT(info);
559 ASSERT(info->info_buffer);
560
561 spin_lock(&info->info_lock);
562
563 switch (origin) {
564 case 0: /* SEEK_SET - No-op just do it */
565 break;
566 case 1: /* SEEK_CUR - Seek from current */
567 offset = file->f_pos + offset;
568 break;
569 case 2: /* SEEK_END - Seek from end */
570 offset = info->info_size + offset;
571 break;
572 }
573
574 if (offset >= 0) {
575 file->f_pos = offset;
576 file->f_version = 0;
577 rc = offset;
578 }
579
580 spin_unlock(&info->info_lock);
581
582 return rc;
583 }
584
585 static struct file_operations splat_fops = {
586 .owner = THIS_MODULE,
587 .open = splat_open,
588 .release = splat_release,
589 .ioctl = splat_ioctl,
590 .read = splat_read,
591 .write = splat_write,
592 .llseek = splat_seek,
593 };
594
595 static struct cdev splat_cdev = {
596 .owner = THIS_MODULE,
597 .kobj = { .name = "splatctl", },
598 };
599
600 static int __init
601 splat_init(void)
602 {
603 dev_t dev;
604 int rc;
605
606 spin_lock_init(&splat_module_lock);
607 INIT_LIST_HEAD(&splat_module_list);
608
609 SPLAT_SUBSYSTEM_INIT(kmem);
610 SPLAT_SUBSYSTEM_INIT(taskq);
611 SPLAT_SUBSYSTEM_INIT(krng);
612 SPLAT_SUBSYSTEM_INIT(mutex);
613 SPLAT_SUBSYSTEM_INIT(condvar);
614 SPLAT_SUBSYSTEM_INIT(thread);
615 SPLAT_SUBSYSTEM_INIT(rwlock);
616 SPLAT_SUBSYSTEM_INIT(time);
617 SPLAT_SUBSYSTEM_INIT(vnode);
618 SPLAT_SUBSYSTEM_INIT(kobj);
619 SPLAT_SUBSYSTEM_INIT(atomic);
620
621 dev = MKDEV(SPLAT_MAJOR, 0);
622 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, "splatctl")))
623 goto error;
624
625 /* Support for registering a character driver */
626 cdev_init(&splat_cdev, &splat_fops);
627 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
628 printk(KERN_ERR "splat: Error adding cdev, %d\n", rc);
629 kobject_put(&splat_cdev.kobj);
630 unregister_chrdev_region(dev, SPLAT_MINORS);
631 goto error;
632 }
633
634 /* Support for udev make driver info available in sysfs */
635 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
636 splat_class = class_simple_create(THIS_MODULE, "splat");
637 #else
638 splat_class = class_create(THIS_MODULE, "splat");
639 #endif
640 if (IS_ERR(splat_class)) {
641 rc = PTR_ERR(splat_class);
642 printk(KERN_ERR "splat: Error creating splat class, %d\n", rc);
643 cdev_del(&splat_cdev);
644 unregister_chrdev_region(dev, SPLAT_MINORS);
645 goto error;
646 }
647
648 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
649 class_simple_device_add(splat_class, MKDEV(SPLAT_MAJOR, 0),
650 NULL, "splatctl");
651 #else
652 class_device_create(splat_class, NULL, MKDEV(SPLAT_MAJOR, 0),
653 NULL, "splatctl");
654 #endif
655
656 printk(KERN_INFO "splat: Loaded Solaris Porting LAyer "
657 "Tests v%s\n", VERSION);
658 return 0;
659 error:
660 printk(KERN_ERR "splat: Error registering splat device, %d\n", rc);
661 return rc;
662 }
663
664 static void
665 splat_fini(void)
666 {
667 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
668
669 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
670 class_simple_device_remove(dev);
671 class_simple_destroy(splat_class);
672 devfs_remove("splat/splatctl");
673 devfs_remove("splat");
674 #else
675 class_device_destroy(splat_class, dev);
676 class_destroy(splat_class);
677 #endif
678 cdev_del(&splat_cdev);
679 unregister_chrdev_region(dev, SPLAT_MINORS);
680
681 SPLAT_SUBSYSTEM_FINI(atomic);
682 SPLAT_SUBSYSTEM_FINI(kobj);
683 SPLAT_SUBSYSTEM_FINI(vnode);
684 SPLAT_SUBSYSTEM_FINI(time);
685 SPLAT_SUBSYSTEM_FINI(rwlock);
686 SPLAT_SUBSYSTEM_FINI(thread);
687 SPLAT_SUBSYSTEM_FINI(condvar);
688 SPLAT_SUBSYSTEM_FINI(mutex);
689 SPLAT_SUBSYSTEM_FINI(krng);
690 SPLAT_SUBSYSTEM_FINI(taskq);
691 SPLAT_SUBSYSTEM_FINI(kmem);
692
693 ASSERT(list_empty(&splat_module_list));
694 printk(KERN_INFO "splat: Unloaded Solaris Porting LAyer "
695 "Tests v%s\n", VERSION);
696 }
697
698 module_init(splat_init);
699 module_exit(splat_fini);
700
701 MODULE_AUTHOR("Lawrence Livermore National Labs");
702 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
703 MODULE_LICENSE("GPL");