]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-ctl.c
Allow spl_config.h to be included by dependant packages
[mirror_spl-debian.git] / module / splat / splat-ctl.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 /*
28 * My intent is to create a loadable 'splat' (Solaris Porting LAyer
29 * Tests) module which can be used as an access point to run
30 * in kernel Solaris ABI regression tests. This provides a
31 * nice mechanism to validate the shim primates are working properly.
32 *
33 * The basic design is the splat module is that it is constructed of
34 * various splat_* source files each of which contains regression tests.
35 * For example the splat_linux_kmem.c file contains tests for validating
36 * kmem correctness. When the splat module is loaded splat_*_init()
37 * will be called for each subsystems tests, similarly splat_*_fini() is
38 * called when the splat module is removed. Each test can then be
39 * run by making an ioctl() call from a userspace control application
40 * to pick the subsystem and test which should be run.
41 */
42
43 #include "splat-internal.h"
44
45 static spl_class *splat_class;
46 static spl_device *splat_device;
47 static struct list_head splat_module_list;
48 static spinlock_t splat_module_lock;
49
50 static int
51 splat_open(struct inode *inode, struct file *file)
52 {
53 unsigned int minor = iminor(inode);
54 splat_info_t *info;
55
56 if (minor >= SPLAT_MINORS)
57 return -ENXIO;
58
59 info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
60 if (info == NULL)
61 return -ENOMEM;
62
63 spin_lock_init(&info->info_lock);
64 info->info_size = SPLAT_INFO_BUFFER_SIZE;
65 info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
66 if (info->info_buffer == NULL) {
67 kfree(info);
68 return -ENOMEM;
69 }
70
71 info->info_head = info->info_buffer;
72 file->private_data = (void *)info;
73
74 return 0;
75 }
76
77 static int
78 splat_release(struct inode *inode, struct file *file)
79 {
80 unsigned int minor = iminor(inode);
81 splat_info_t *info = (splat_info_t *)file->private_data;
82
83 if (minor >= SPLAT_MINORS)
84 return -ENXIO;
85
86 ASSERT(info);
87 ASSERT(info->info_buffer);
88
89 vfree(info->info_buffer);
90 kfree(info);
91
92 return 0;
93 }
94
95 static int
96 splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
97 {
98 splat_info_t *info = (splat_info_t *)file->private_data;
99
100 ASSERT(info);
101 ASSERT(info->info_buffer);
102
103 spin_lock(&info->info_lock);
104 memset(info->info_buffer, 0, info->info_size);
105 info->info_head = info->info_buffer;
106 spin_unlock(&info->info_lock);
107
108 return 0;
109 }
110
111 static int
112 splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
113 {
114 splat_info_t *info = (splat_info_t *)file->private_data;
115 char *buf;
116 int min, size, rc = 0;
117
118 ASSERT(info);
119 ASSERT(info->info_buffer);
120
121 spin_lock(&info->info_lock);
122 if (kcfg->cfg_arg1 > 0) {
123
124 size = kcfg->cfg_arg1;
125 buf = (char *)vmalloc(size);
126 if (buf == NULL) {
127 rc = -ENOMEM;
128 goto out;
129 }
130
131 /* Zero fill and truncate contents when coping buffer */
132 min = ((size < info->info_size) ? size : info->info_size);
133 memset(buf, 0, size);
134 memcpy(buf, info->info_buffer, min);
135 vfree(info->info_buffer);
136 info->info_size = size;
137 info->info_buffer = buf;
138 info->info_head = info->info_buffer;
139 }
140
141 kcfg->cfg_rc1 = info->info_size;
142
143 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
144 rc = -EFAULT;
145 out:
146 spin_unlock(&info->info_lock);
147
148 return rc;
149 }
150
151
152 static splat_subsystem_t *
153 splat_subsystem_find(int id) {
154 splat_subsystem_t *sub;
155
156 spin_lock(&splat_module_lock);
157 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
158 if (id == sub->desc.id) {
159 spin_unlock(&splat_module_lock);
160 return sub;
161 }
162 }
163 spin_unlock(&splat_module_lock);
164
165 return NULL;
166 }
167
168 static int
169 splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
170 {
171 splat_subsystem_t *sub;
172 int i = 0;
173
174 spin_lock(&splat_module_lock);
175 list_for_each_entry(sub, &splat_module_list, subsystem_list)
176 i++;
177
178 spin_unlock(&splat_module_lock);
179 kcfg->cfg_rc1 = i;
180
181 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
182 return -EFAULT;
183
184 return 0;
185 }
186
187 static int
188 splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
189 {
190 splat_subsystem_t *sub;
191 splat_cfg_t *tmp;
192 int size, i = 0;
193
194 /* Structure will be sized large enough for N subsystem entries
195 * which is passed in by the caller. On exit the number of
196 * entries filled in with valid subsystems will be stored in
197 * cfg_rc1. If the caller does not provide enough entries
198 * for all subsystems we will truncate the list to avoid overrun.
199 */
200 size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
201 sizeof(splat_user_t);
202 tmp = kmalloc(size, GFP_KERNEL);
203 if (tmp == NULL)
204 return -ENOMEM;
205
206 /* Local 'tmp' is used as the structure copied back to user space */
207 memset(tmp, 0, size);
208 memcpy(tmp, kcfg, sizeof(*kcfg));
209
210 spin_lock(&splat_module_lock);
211 list_for_each_entry(sub, &splat_module_list, subsystem_list) {
212 strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
213 sub->desc.name, SPLAT_NAME_SIZE);
214 strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
215 sub->desc.desc, SPLAT_DESC_SIZE);
216 tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
217
218 /* Truncate list if we are about to overrun alloc'ed memory */
219 if ((i++) == kcfg->cfg_data.splat_subsystems.size)
220 break;
221 }
222 spin_unlock(&splat_module_lock);
223 tmp->cfg_rc1 = i;
224
225 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
226 kfree(tmp);
227 return -EFAULT;
228 }
229
230 kfree(tmp);
231 return 0;
232 }
233
234 static int
235 splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
236 {
237 splat_subsystem_t *sub;
238 splat_test_t *test;
239 int i = 0;
240
241 /* Subsystem ID passed as arg1 */
242 sub = splat_subsystem_find(kcfg->cfg_arg1);
243 if (sub == NULL)
244 return -EINVAL;
245
246 spin_lock(&(sub->test_lock));
247 list_for_each_entry(test, &(sub->test_list), test_list)
248 i++;
249
250 spin_unlock(&(sub->test_lock));
251 kcfg->cfg_rc1 = i;
252
253 if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
254 return -EFAULT;
255
256 return 0;
257 }
258
259 static int
260 splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
261 {
262 splat_subsystem_t *sub;
263 splat_test_t *test;
264 splat_cfg_t *tmp;
265 int size, i = 0;
266
267 /* Subsystem ID passed as arg1 */
268 sub = splat_subsystem_find(kcfg->cfg_arg1);
269 if (sub == NULL)
270 return -EINVAL;
271
272 /* Structure will be sized large enough for N test entries
273 * which is passed in by the caller. On exit the number of
274 * entries filled in with valid tests will be stored in
275 * cfg_rc1. If the caller does not provide enough entries
276 * for all tests we will truncate the list to avoid overrun.
277 */
278 size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
279 tmp = kmalloc(size, GFP_KERNEL);
280 if (tmp == NULL)
281 return -ENOMEM;
282
283 /* Local 'tmp' is used as the structure copied back to user space */
284 memset(tmp, 0, size);
285 memcpy(tmp, kcfg, sizeof(*kcfg));
286
287 spin_lock(&(sub->test_lock));
288 list_for_each_entry(test, &(sub->test_list), test_list) {
289 strncpy(tmp->cfg_data.splat_tests.descs[i].name,
290 test->desc.name, SPLAT_NAME_SIZE);
291 strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
292 test->desc.desc, SPLAT_DESC_SIZE);
293 tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
294
295 /* Truncate list if we are about to overrun alloc'ed memory */
296 if ((i++) == kcfg->cfg_data.splat_tests.size)
297 break;
298 }
299 spin_unlock(&(sub->test_lock));
300 tmp->cfg_rc1 = i;
301
302 if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
303 kfree(tmp);
304 return -EFAULT;
305 }
306
307 kfree(tmp);
308 return 0;
309 }
310
311 static int
312 splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
313 {
314 splat_test_t *test;
315
316 spin_lock(&(sub->test_lock));
317 list_for_each_entry(test, &(sub->test_list), test_list) {
318 if (test->desc.id == cmd) {
319 spin_unlock(&(sub->test_lock));
320 return test->test(file, arg);
321 }
322 }
323 spin_unlock(&(sub->test_lock));
324
325 return -EINVAL;
326 }
327
328 static int
329 splat_ioctl_cfg(struct file *file, unsigned long arg)
330 {
331 splat_cfg_t kcfg;
332 int rc = 0;
333
334 if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
335 return -EFAULT;
336
337 if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
338 splat_print(file, "Bad config magic 0x%x != 0x%x\n",
339 kcfg.cfg_magic, SPLAT_CFG_MAGIC);
340 return -EINVAL;
341 }
342
343 switch (kcfg.cfg_cmd) {
344 case SPLAT_CFG_BUFFER_CLEAR:
345 /* cfg_arg1 - Unused
346 * cfg_rc1 - Unused
347 */
348 rc = splat_buffer_clear(file, &kcfg, arg);
349 break;
350 case SPLAT_CFG_BUFFER_SIZE:
351 /* cfg_arg1 - 0 - query size; >0 resize
352 * cfg_rc1 - Set to current buffer size
353 */
354 rc = splat_buffer_size(file, &kcfg, arg);
355 break;
356 case SPLAT_CFG_SUBSYSTEM_COUNT:
357 /* cfg_arg1 - Unused
358 * cfg_rc1 - Set to number of subsystems
359 */
360 rc = splat_subsystem_count(&kcfg, arg);
361 break;
362 case SPLAT_CFG_SUBSYSTEM_LIST:
363 /* cfg_arg1 - Unused
364 * cfg_rc1 - Set to number of subsystems
365 * cfg_data.splat_subsystems - Populated with subsystems
366 */
367 rc = splat_subsystem_list(&kcfg, arg);
368 break;
369 case SPLAT_CFG_TEST_COUNT:
370 /* cfg_arg1 - Set to a target subsystem
371 * cfg_rc1 - Set to number of tests
372 */
373 rc = splat_test_count(&kcfg, arg);
374 break;
375 case SPLAT_CFG_TEST_LIST:
376 /* cfg_arg1 - Set to a target subsystem
377 * cfg_rc1 - Set to number of tests
378 * cfg_data.splat_subsystems - Populated with tests
379 */
380 rc = splat_test_list(&kcfg, arg);
381 break;
382 default:
383 splat_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
384 rc = -EINVAL;
385 break;
386 }
387
388 return rc;
389 }
390
391 static int
392 splat_ioctl_cmd(struct file *file, unsigned long arg)
393 {
394 splat_subsystem_t *sub;
395 splat_cmd_t kcmd;
396 int rc = -EINVAL;
397 void *data = NULL;
398
399 if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
400 return -EFAULT;
401
402 if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
403 splat_print(file, "Bad command magic 0x%x != 0x%x\n",
404 kcmd.cmd_magic, SPLAT_CFG_MAGIC);
405 return -EINVAL;
406 }
407
408 /* Allocate memory for any opaque data the caller needed to pass on */
409 if (kcmd.cmd_data_size > 0) {
410 data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
411 if (data == NULL)
412 return -ENOMEM;
413
414 if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
415 cmd_data_str)), kcmd.cmd_data_size)) {
416 kfree(data);
417 return -EFAULT;
418 }
419 }
420
421 sub = splat_subsystem_find(kcmd.cmd_subsystem);
422 if (sub != NULL)
423 rc = splat_validate(file, sub, kcmd.cmd_test, data);
424 else
425 rc = -EINVAL;
426
427 if (data != NULL)
428 kfree(data);
429
430 return rc;
431 }
432
433 static int
434 splat_ioctl(struct inode *inode, struct file *file,
435 unsigned int cmd, unsigned long arg)
436 {
437 unsigned int minor = iminor(file->f_dentry->d_inode);
438 int rc = 0;
439
440 /* Ignore tty ioctls */
441 if ((cmd & 0xffffff00) == ((int)'T') << 8)
442 return -ENOTTY;
443
444 if (minor >= SPLAT_MINORS)
445 return -ENXIO;
446
447 switch (cmd) {
448 case SPLAT_CFG:
449 rc = splat_ioctl_cfg(file, arg);
450 break;
451 case SPLAT_CMD:
452 rc = splat_ioctl_cmd(file, arg);
453 break;
454 default:
455 splat_print(file, "Bad ioctl command %d\n", cmd);
456 rc = -EINVAL;
457 break;
458 }
459
460 return rc;
461 }
462
463 /* I'm not sure why you would want to write in to this buffer from
464 * user space since its principle use is to pass test status info
465 * back to the user space, but I don't see any reason to prevent it.
466 */
467 static ssize_t splat_write(struct file *file, const char __user *buf,
468 size_t count, loff_t *ppos)
469 {
470 unsigned int minor = iminor(file->f_dentry->d_inode);
471 splat_info_t *info = (splat_info_t *)file->private_data;
472 int rc = 0;
473
474 if (minor >= SPLAT_MINORS)
475 return -ENXIO;
476
477 ASSERT(info);
478 ASSERT(info->info_buffer);
479
480 spin_lock(&info->info_lock);
481
482 /* Write beyond EOF */
483 if (*ppos >= info->info_size) {
484 rc = -EFBIG;
485 goto out;
486 }
487
488 /* Resize count if beyond EOF */
489 if (*ppos + count > info->info_size)
490 count = info->info_size - *ppos;
491
492 if (copy_from_user(info->info_buffer, buf, count)) {
493 rc = -EFAULT;
494 goto out;
495 }
496
497 *ppos += count;
498 rc = count;
499 out:
500 spin_unlock(&info->info_lock);
501 return rc;
502 }
503
504 static ssize_t splat_read(struct file *file, char __user *buf,
505 size_t count, loff_t *ppos)
506 {
507 unsigned int minor = iminor(file->f_dentry->d_inode);
508 splat_info_t *info = (splat_info_t *)file->private_data;
509 int rc = 0;
510
511 if (minor >= SPLAT_MINORS)
512 return -ENXIO;
513
514 ASSERT(info);
515 ASSERT(info->info_buffer);
516
517 spin_lock(&info->info_lock);
518
519 /* Read beyond EOF */
520 if (*ppos >= info->info_size)
521 goto out;
522
523 /* Resize count if beyond EOF */
524 if (*ppos + count > info->info_size)
525 count = info->info_size - *ppos;
526
527 if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
528 rc = -EFAULT;
529 goto out;
530 }
531
532 *ppos += count;
533 rc = count;
534 out:
535 spin_unlock(&info->info_lock);
536 return rc;
537 }
538
539 static loff_t splat_seek(struct file *file, loff_t offset, int origin)
540 {
541 unsigned int minor = iminor(file->f_dentry->d_inode);
542 splat_info_t *info = (splat_info_t *)file->private_data;
543 int rc = -EINVAL;
544
545 if (minor >= SPLAT_MINORS)
546 return -ENXIO;
547
548 ASSERT(info);
549 ASSERT(info->info_buffer);
550
551 spin_lock(&info->info_lock);
552
553 switch (origin) {
554 case 0: /* SEEK_SET - No-op just do it */
555 break;
556 case 1: /* SEEK_CUR - Seek from current */
557 offset = file->f_pos + offset;
558 break;
559 case 2: /* SEEK_END - Seek from end */
560 offset = info->info_size + offset;
561 break;
562 }
563
564 if (offset >= 0) {
565 file->f_pos = offset;
566 file->f_version = 0;
567 rc = offset;
568 }
569
570 spin_unlock(&info->info_lock);
571
572 return rc;
573 }
574
575 static struct file_operations splat_fops = {
576 .owner = THIS_MODULE,
577 .open = splat_open,
578 .release = splat_release,
579 .ioctl = splat_ioctl,
580 .read = splat_read,
581 .write = splat_write,
582 .llseek = splat_seek,
583 };
584
585 static struct cdev splat_cdev = {
586 .owner = THIS_MODULE,
587 .kobj = { .name = SPLAT_NAME, },
588 };
589
590 static int __init
591 splat_init(void)
592 {
593 dev_t dev;
594 int rc;
595
596 spin_lock_init(&splat_module_lock);
597 INIT_LIST_HEAD(&splat_module_list);
598
599 SPLAT_SUBSYSTEM_INIT(kmem);
600 SPLAT_SUBSYSTEM_INIT(taskq);
601 SPLAT_SUBSYSTEM_INIT(krng);
602 SPLAT_SUBSYSTEM_INIT(mutex);
603 SPLAT_SUBSYSTEM_INIT(condvar);
604 SPLAT_SUBSYSTEM_INIT(thread);
605 SPLAT_SUBSYSTEM_INIT(rwlock);
606 SPLAT_SUBSYSTEM_INIT(time);
607 SPLAT_SUBSYSTEM_INIT(vnode);
608 SPLAT_SUBSYSTEM_INIT(kobj);
609 SPLAT_SUBSYSTEM_INIT(atomic);
610 SPLAT_SUBSYSTEM_INIT(list);
611 SPLAT_SUBSYSTEM_INIT(generic);
612
613 dev = MKDEV(SPLAT_MAJOR, 0);
614 if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
615 goto error;
616
617 /* Support for registering a character driver */
618 cdev_init(&splat_cdev, &splat_fops);
619 if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
620 printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
621 kobject_put(&splat_cdev.kobj);
622 unregister_chrdev_region(dev, SPLAT_MINORS);
623 goto error;
624 }
625
626 /* Support for udev make driver info available in sysfs */
627 splat_class = spl_class_create(THIS_MODULE, "splat");
628 if (IS_ERR(splat_class)) {
629 rc = PTR_ERR(splat_class);
630 printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
631 cdev_del(&splat_cdev);
632 unregister_chrdev_region(dev, SPLAT_MINORS);
633 goto error;
634 }
635
636 splat_device = spl_device_create(splat_class, NULL,
637 MKDEV(SPLAT_MAJOR, 0),
638 NULL, SPLAT_NAME);
639
640 printk(KERN_INFO "SPLAT: Loaded Solaris Porting LAyer "
641 "Tests v%s\n", SPL_META_VERSION);
642 return 0;
643 error:
644 printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
645 return rc;
646 }
647
648 static void
649 splat_fini(void)
650 {
651 dev_t dev = MKDEV(SPLAT_MAJOR, 0);
652
653 spl_device_destroy(splat_class, splat_device, dev);
654 spl_class_destroy(splat_class);
655 cdev_del(&splat_cdev);
656 unregister_chrdev_region(dev, SPLAT_MINORS);
657
658 SPLAT_SUBSYSTEM_FINI(generic);
659 SPLAT_SUBSYSTEM_FINI(list);
660 SPLAT_SUBSYSTEM_FINI(atomic);
661 SPLAT_SUBSYSTEM_FINI(kobj);
662 SPLAT_SUBSYSTEM_FINI(vnode);
663 SPLAT_SUBSYSTEM_FINI(time);
664 SPLAT_SUBSYSTEM_FINI(rwlock);
665 SPLAT_SUBSYSTEM_FINI(thread);
666 SPLAT_SUBSYSTEM_FINI(condvar);
667 SPLAT_SUBSYSTEM_FINI(mutex);
668 SPLAT_SUBSYSTEM_FINI(krng);
669 SPLAT_SUBSYSTEM_FINI(taskq);
670 SPLAT_SUBSYSTEM_FINI(kmem);
671
672 ASSERT(list_empty(&splat_module_list));
673 printk(KERN_INFO "SPLAT: Unloaded Solaris Porting LAyer "
674 "Tests v%s\n", SPL_META_VERSION);
675 }
676
677 module_init(splat_init);
678 module_exit(splat_fini);
679
680 MODULE_AUTHOR("Lawrence Livermore National Labs");
681 MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
682 MODULE_LICENSE("GPL");