]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
27 | /* | |
28 | * My intent is to create a loadable 'splat' (Solaris Porting LAyer | |
29 | * Tests) module which can be used as an access point to run | |
30 | * in kernel Solaris ABI regression tests. This provides a | |
31 | * nice mechanism to validate the shim primates are working properly. | |
32 | * | |
33 | * The basic design is the splat module is that it is constructed of | |
34 | * various splat_* source files each of which contains regression tests. | |
35 | * For example the splat_linux_kmem.c file contains tests for validating | |
36 | * kmem correctness. When the splat module is loaded splat_*_init() | |
37 | * will be called for each subsystems tests, similarly splat_*_fini() is | |
38 | * called when the splat module is removed. Each test can then be | |
39 | * run by making an ioctl() call from a userspace control application | |
40 | * to pick the subsystem and test which should be run. | |
41 | */ | |
42 | ||
43 | #include "splat-internal.h" | |
44 | #include <config.h> | |
45 | ||
46 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) | |
47 | #include <linux/devfs_fs_kernel.h> | |
48 | #endif | |
49 | ||
50 | #include <linux/cdev.h> | |
51 | ||
52 | ||
53 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) | |
54 | static struct class_simple *splat_class; | |
55 | #else | |
56 | static struct class *splat_class; | |
57 | #endif | |
58 | static struct list_head splat_module_list; | |
59 | static spinlock_t splat_module_lock; | |
60 | ||
61 | static int | |
62 | splat_open(struct inode *inode, struct file *file) | |
63 | { | |
64 | unsigned int minor = iminor(inode); | |
65 | splat_info_t *info; | |
66 | ||
67 | if (minor >= SPLAT_MINORS) | |
68 | return -ENXIO; | |
69 | ||
70 | info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL); | |
71 | if (info == NULL) | |
72 | return -ENOMEM; | |
73 | ||
74 | spin_lock_init(&info->info_lock); | |
75 | info->info_size = SPLAT_INFO_BUFFER_SIZE; | |
76 | info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE); | |
77 | if (info->info_buffer == NULL) { | |
78 | kfree(info); | |
79 | return -ENOMEM; | |
80 | } | |
81 | ||
82 | info->info_head = info->info_buffer; | |
83 | file->private_data = (void *)info; | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | static int | |
89 | splat_release(struct inode *inode, struct file *file) | |
90 | { | |
91 | unsigned int minor = iminor(inode); | |
92 | splat_info_t *info = (splat_info_t *)file->private_data; | |
93 | ||
94 | if (minor >= SPLAT_MINORS) | |
95 | return -ENXIO; | |
96 | ||
97 | ASSERT(info); | |
98 | ASSERT(info->info_buffer); | |
99 | ||
100 | vfree(info->info_buffer); | |
101 | kfree(info); | |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
106 | static int | |
107 | splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg) | |
108 | { | |
109 | splat_info_t *info = (splat_info_t *)file->private_data; | |
110 | ||
111 | ASSERT(info); | |
112 | ASSERT(info->info_buffer); | |
113 | ||
114 | spin_lock(&info->info_lock); | |
115 | memset(info->info_buffer, 0, info->info_size); | |
116 | info->info_head = info->info_buffer; | |
117 | spin_unlock(&info->info_lock); | |
118 | ||
119 | return 0; | |
120 | } | |
121 | ||
122 | static int | |
123 | splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg) | |
124 | { | |
125 | splat_info_t *info = (splat_info_t *)file->private_data; | |
126 | char *buf; | |
127 | int min, size, rc = 0; | |
128 | ||
129 | ASSERT(info); | |
130 | ASSERT(info->info_buffer); | |
131 | ||
132 | spin_lock(&info->info_lock); | |
133 | if (kcfg->cfg_arg1 > 0) { | |
134 | ||
135 | size = kcfg->cfg_arg1; | |
136 | buf = (char *)vmalloc(size); | |
137 | if (buf == NULL) { | |
138 | rc = -ENOMEM; | |
139 | goto out; | |
140 | } | |
141 | ||
142 | /* Zero fill and truncate contents when coping buffer */ | |
143 | min = ((size < info->info_size) ? size : info->info_size); | |
144 | memset(buf, 0, size); | |
145 | memcpy(buf, info->info_buffer, min); | |
146 | vfree(info->info_buffer); | |
147 | info->info_size = size; | |
148 | info->info_buffer = buf; | |
149 | info->info_head = info->info_buffer; | |
150 | } | |
151 | ||
152 | kcfg->cfg_rc1 = info->info_size; | |
153 | ||
154 | if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg))) | |
155 | rc = -EFAULT; | |
156 | out: | |
157 | spin_unlock(&info->info_lock); | |
158 | ||
159 | return rc; | |
160 | } | |
161 | ||
162 | ||
163 | static splat_subsystem_t * | |
164 | splat_subsystem_find(int id) { | |
165 | splat_subsystem_t *sub; | |
166 | ||
167 | spin_lock(&splat_module_lock); | |
168 | list_for_each_entry(sub, &splat_module_list, subsystem_list) { | |
169 | if (id == sub->desc.id) { | |
170 | spin_unlock(&splat_module_lock); | |
171 | return sub; | |
172 | } | |
173 | } | |
174 | spin_unlock(&splat_module_lock); | |
175 | ||
176 | return NULL; | |
177 | } | |
178 | ||
179 | static int | |
180 | splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg) | |
181 | { | |
182 | splat_subsystem_t *sub; | |
183 | int i = 0; | |
184 | ||
185 | spin_lock(&splat_module_lock); | |
186 | list_for_each_entry(sub, &splat_module_list, subsystem_list) | |
187 | i++; | |
188 | ||
189 | spin_unlock(&splat_module_lock); | |
190 | kcfg->cfg_rc1 = i; | |
191 | ||
192 | if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg))) | |
193 | return -EFAULT; | |
194 | ||
195 | return 0; | |
196 | } | |
197 | ||
198 | static int | |
199 | splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg) | |
200 | { | |
201 | splat_subsystem_t *sub; | |
202 | splat_cfg_t *tmp; | |
203 | int size, i = 0; | |
204 | ||
205 | /* Structure will be sized large enough for N subsystem entries | |
206 | * which is passed in by the caller. On exit the number of | |
207 | * entries filled in with valid subsystems will be stored in | |
208 | * cfg_rc1. If the caller does not provide enough entries | |
209 | * for all subsystems we will truncate the list to avoid overrun. | |
210 | */ | |
211 | size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size * | |
212 | sizeof(splat_user_t); | |
213 | tmp = kmalloc(size, GFP_KERNEL); | |
214 | if (tmp == NULL) | |
215 | return -ENOMEM; | |
216 | ||
217 | /* Local 'tmp' is used as the structure copied back to user space */ | |
218 | memset(tmp, 0, size); | |
219 | memcpy(tmp, kcfg, sizeof(*kcfg)); | |
220 | ||
221 | spin_lock(&splat_module_lock); | |
222 | list_for_each_entry(sub, &splat_module_list, subsystem_list) { | |
223 | strncpy(tmp->cfg_data.splat_subsystems.descs[i].name, | |
224 | sub->desc.name, SPLAT_NAME_SIZE); | |
225 | strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc, | |
226 | sub->desc.desc, SPLAT_DESC_SIZE); | |
227 | tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id; | |
228 | ||
229 | /* Truncate list if we are about to overrun alloc'ed memory */ | |
230 | if ((i++) == kcfg->cfg_data.splat_subsystems.size) | |
231 | break; | |
232 | } | |
233 | spin_unlock(&splat_module_lock); | |
234 | tmp->cfg_rc1 = i; | |
235 | ||
236 | if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) { | |
237 | kfree(tmp); | |
238 | return -EFAULT; | |
239 | } | |
240 | ||
241 | kfree(tmp); | |
242 | return 0; | |
243 | } | |
244 | ||
245 | static int | |
246 | splat_test_count(splat_cfg_t *kcfg, unsigned long arg) | |
247 | { | |
248 | splat_subsystem_t *sub; | |
249 | splat_test_t *test; | |
250 | int i = 0; | |
251 | ||
252 | /* Subsystem ID passed as arg1 */ | |
253 | sub = splat_subsystem_find(kcfg->cfg_arg1); | |
254 | if (sub == NULL) | |
255 | return -EINVAL; | |
256 | ||
257 | spin_lock(&(sub->test_lock)); | |
258 | list_for_each_entry(test, &(sub->test_list), test_list) | |
259 | i++; | |
260 | ||
261 | spin_unlock(&(sub->test_lock)); | |
262 | kcfg->cfg_rc1 = i; | |
263 | ||
264 | if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg))) | |
265 | return -EFAULT; | |
266 | ||
267 | return 0; | |
268 | } | |
269 | ||
270 | static int | |
271 | splat_test_list(splat_cfg_t *kcfg, unsigned long arg) | |
272 | { | |
273 | splat_subsystem_t *sub; | |
274 | splat_test_t *test; | |
275 | splat_cfg_t *tmp; | |
276 | int size, i = 0; | |
277 | ||
278 | /* Subsystem ID passed as arg1 */ | |
279 | sub = splat_subsystem_find(kcfg->cfg_arg1); | |
280 | if (sub == NULL) | |
281 | return -EINVAL; | |
282 | ||
283 | /* Structure will be sized large enough for N test entries | |
284 | * which is passed in by the caller. On exit the number of | |
285 | * entries filled in with valid tests will be stored in | |
286 | * cfg_rc1. If the caller does not provide enough entries | |
287 | * for all tests we will truncate the list to avoid overrun. | |
288 | */ | |
289 | size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t); | |
290 | tmp = kmalloc(size, GFP_KERNEL); | |
291 | if (tmp == NULL) | |
292 | return -ENOMEM; | |
293 | ||
294 | /* Local 'tmp' is used as the structure copied back to user space */ | |
295 | memset(tmp, 0, size); | |
296 | memcpy(tmp, kcfg, sizeof(*kcfg)); | |
297 | ||
298 | spin_lock(&(sub->test_lock)); | |
299 | list_for_each_entry(test, &(sub->test_list), test_list) { | |
300 | strncpy(tmp->cfg_data.splat_tests.descs[i].name, | |
301 | test->desc.name, SPLAT_NAME_SIZE); | |
302 | strncpy(tmp->cfg_data.splat_tests.descs[i].desc, | |
303 | test->desc.desc, SPLAT_DESC_SIZE); | |
304 | tmp->cfg_data.splat_tests.descs[i].id = test->desc.id; | |
305 | ||
306 | /* Truncate list if we are about to overrun alloc'ed memory */ | |
307 | if ((i++) == kcfg->cfg_data.splat_tests.size) | |
308 | break; | |
309 | } | |
310 | spin_unlock(&(sub->test_lock)); | |
311 | tmp->cfg_rc1 = i; | |
312 | ||
313 | if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) { | |
314 | kfree(tmp); | |
315 | return -EFAULT; | |
316 | } | |
317 | ||
318 | kfree(tmp); | |
319 | return 0; | |
320 | } | |
321 | ||
322 | static int | |
323 | splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg) | |
324 | { | |
325 | splat_test_t *test; | |
326 | ||
327 | spin_lock(&(sub->test_lock)); | |
328 | list_for_each_entry(test, &(sub->test_list), test_list) { | |
329 | if (test->desc.id == cmd) { | |
330 | spin_unlock(&(sub->test_lock)); | |
331 | return test->test(file, arg); | |
332 | } | |
333 | } | |
334 | spin_unlock(&(sub->test_lock)); | |
335 | ||
336 | return -EINVAL; | |
337 | } | |
338 | ||
339 | static int | |
340 | splat_ioctl_cfg(struct file *file, unsigned long arg) | |
341 | { | |
342 | splat_cfg_t kcfg; | |
343 | int rc = 0; | |
344 | ||
345 | if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg))) | |
346 | return -EFAULT; | |
347 | ||
348 | if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) { | |
349 | splat_print(file, "Bad config magic 0x%x != 0x%x\n", | |
350 | kcfg.cfg_magic, SPLAT_CFG_MAGIC); | |
351 | return -EINVAL; | |
352 | } | |
353 | ||
354 | switch (kcfg.cfg_cmd) { | |
355 | case SPLAT_CFG_BUFFER_CLEAR: | |
356 | /* cfg_arg1 - Unused | |
357 | * cfg_rc1 - Unused | |
358 | */ | |
359 | rc = splat_buffer_clear(file, &kcfg, arg); | |
360 | break; | |
361 | case SPLAT_CFG_BUFFER_SIZE: | |
362 | /* cfg_arg1 - 0 - query size; >0 resize | |
363 | * cfg_rc1 - Set to current buffer size | |
364 | */ | |
365 | rc = splat_buffer_size(file, &kcfg, arg); | |
366 | break; | |
367 | case SPLAT_CFG_SUBSYSTEM_COUNT: | |
368 | /* cfg_arg1 - Unused | |
369 | * cfg_rc1 - Set to number of subsystems | |
370 | */ | |
371 | rc = splat_subsystem_count(&kcfg, arg); | |
372 | break; | |
373 | case SPLAT_CFG_SUBSYSTEM_LIST: | |
374 | /* cfg_arg1 - Unused | |
375 | * cfg_rc1 - Set to number of subsystems | |
376 | * cfg_data.splat_subsystems - Populated with subsystems | |
377 | */ | |
378 | rc = splat_subsystem_list(&kcfg, arg); | |
379 | break; | |
380 | case SPLAT_CFG_TEST_COUNT: | |
381 | /* cfg_arg1 - Set to a target subsystem | |
382 | * cfg_rc1 - Set to number of tests | |
383 | */ | |
384 | rc = splat_test_count(&kcfg, arg); | |
385 | break; | |
386 | case SPLAT_CFG_TEST_LIST: | |
387 | /* cfg_arg1 - Set to a target subsystem | |
388 | * cfg_rc1 - Set to number of tests | |
389 | * cfg_data.splat_subsystems - Populated with tests | |
390 | */ | |
391 | rc = splat_test_list(&kcfg, arg); | |
392 | break; | |
393 | default: | |
394 | splat_print(file, "Bad config command %d\n", kcfg.cfg_cmd); | |
395 | rc = -EINVAL; | |
396 | break; | |
397 | } | |
398 | ||
399 | return rc; | |
400 | } | |
401 | ||
402 | static int | |
403 | splat_ioctl_cmd(struct file *file, unsigned long arg) | |
404 | { | |
405 | splat_subsystem_t *sub; | |
406 | splat_cmd_t kcmd; | |
407 | int rc = -EINVAL; | |
408 | void *data = NULL; | |
409 | ||
410 | if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd))) | |
411 | return -EFAULT; | |
412 | ||
413 | if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) { | |
414 | splat_print(file, "Bad command magic 0x%x != 0x%x\n", | |
415 | kcmd.cmd_magic, SPLAT_CFG_MAGIC); | |
416 | return -EINVAL; | |
417 | } | |
418 | ||
419 | /* Allocate memory for any opaque data the caller needed to pass on */ | |
420 | if (kcmd.cmd_data_size > 0) { | |
421 | data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL); | |
422 | if (data == NULL) | |
423 | return -ENOMEM; | |
424 | ||
425 | if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t, | |
426 | cmd_data_str)), kcmd.cmd_data_size)) { | |
427 | kfree(data); | |
428 | return -EFAULT; | |
429 | } | |
430 | } | |
431 | ||
432 | sub = splat_subsystem_find(kcmd.cmd_subsystem); | |
433 | if (sub != NULL) | |
434 | rc = splat_validate(file, sub, kcmd.cmd_test, data); | |
435 | else | |
436 | rc = -EINVAL; | |
437 | ||
438 | if (data != NULL) | |
439 | kfree(data); | |
440 | ||
441 | return rc; | |
442 | } | |
443 | ||
444 | static int | |
445 | splat_ioctl(struct inode *inode, struct file *file, | |
446 | unsigned int cmd, unsigned long arg) | |
447 | { | |
448 | unsigned int minor = iminor(file->f_dentry->d_inode); | |
449 | int rc = 0; | |
450 | ||
451 | /* Ignore tty ioctls */ | |
452 | if ((cmd & 0xffffff00) == ((int)'T') << 8) | |
453 | return -ENOTTY; | |
454 | ||
455 | if (minor >= SPLAT_MINORS) | |
456 | return -ENXIO; | |
457 | ||
458 | switch (cmd) { | |
459 | case SPLAT_CFG: | |
460 | rc = splat_ioctl_cfg(file, arg); | |
461 | break; | |
462 | case SPLAT_CMD: | |
463 | rc = splat_ioctl_cmd(file, arg); | |
464 | break; | |
465 | default: | |
466 | splat_print(file, "Bad ioctl command %d\n", cmd); | |
467 | rc = -EINVAL; | |
468 | break; | |
469 | } | |
470 | ||
471 | return rc; | |
472 | } | |
473 | ||
474 | /* I'm not sure why you would want to write in to this buffer from | |
475 | * user space since its principle use is to pass test status info | |
476 | * back to the user space, but I don't see any reason to prevent it. | |
477 | */ | |
478 | static ssize_t splat_write(struct file *file, const char __user *buf, | |
479 | size_t count, loff_t *ppos) | |
480 | { | |
481 | unsigned int minor = iminor(file->f_dentry->d_inode); | |
482 | splat_info_t *info = (splat_info_t *)file->private_data; | |
483 | int rc = 0; | |
484 | ||
485 | if (minor >= SPLAT_MINORS) | |
486 | return -ENXIO; | |
487 | ||
488 | ASSERT(info); | |
489 | ASSERT(info->info_buffer); | |
490 | ||
491 | spin_lock(&info->info_lock); | |
492 | ||
493 | /* Write beyond EOF */ | |
494 | if (*ppos >= info->info_size) { | |
495 | rc = -EFBIG; | |
496 | goto out; | |
497 | } | |
498 | ||
499 | /* Resize count if beyond EOF */ | |
500 | if (*ppos + count > info->info_size) | |
501 | count = info->info_size - *ppos; | |
502 | ||
503 | if (copy_from_user(info->info_buffer, buf, count)) { | |
504 | rc = -EFAULT; | |
505 | goto out; | |
506 | } | |
507 | ||
508 | *ppos += count; | |
509 | rc = count; | |
510 | out: | |
511 | spin_unlock(&info->info_lock); | |
512 | return rc; | |
513 | } | |
514 | ||
515 | static ssize_t splat_read(struct file *file, char __user *buf, | |
516 | size_t count, loff_t *ppos) | |
517 | { | |
518 | unsigned int minor = iminor(file->f_dentry->d_inode); | |
519 | splat_info_t *info = (splat_info_t *)file->private_data; | |
520 | int rc = 0; | |
521 | ||
522 | if (minor >= SPLAT_MINORS) | |
523 | return -ENXIO; | |
524 | ||
525 | ASSERT(info); | |
526 | ASSERT(info->info_buffer); | |
527 | ||
528 | spin_lock(&info->info_lock); | |
529 | ||
530 | /* Read beyond EOF */ | |
531 | if (*ppos >= info->info_size) | |
532 | goto out; | |
533 | ||
534 | /* Resize count if beyond EOF */ | |
535 | if (*ppos + count > info->info_size) | |
536 | count = info->info_size - *ppos; | |
537 | ||
538 | if (copy_to_user(buf, info->info_buffer + *ppos, count)) { | |
539 | rc = -EFAULT; | |
540 | goto out; | |
541 | } | |
542 | ||
543 | *ppos += count; | |
544 | rc = count; | |
545 | out: | |
546 | spin_unlock(&info->info_lock); | |
547 | return rc; | |
548 | } | |
549 | ||
550 | static loff_t splat_seek(struct file *file, loff_t offset, int origin) | |
551 | { | |
552 | unsigned int minor = iminor(file->f_dentry->d_inode); | |
553 | splat_info_t *info = (splat_info_t *)file->private_data; | |
554 | int rc = -EINVAL; | |
555 | ||
556 | if (minor >= SPLAT_MINORS) | |
557 | return -ENXIO; | |
558 | ||
559 | ASSERT(info); | |
560 | ASSERT(info->info_buffer); | |
561 | ||
562 | spin_lock(&info->info_lock); | |
563 | ||
564 | switch (origin) { | |
565 | case 0: /* SEEK_SET - No-op just do it */ | |
566 | break; | |
567 | case 1: /* SEEK_CUR - Seek from current */ | |
568 | offset = file->f_pos + offset; | |
569 | break; | |
570 | case 2: /* SEEK_END - Seek from end */ | |
571 | offset = info->info_size + offset; | |
572 | break; | |
573 | } | |
574 | ||
575 | if (offset >= 0) { | |
576 | file->f_pos = offset; | |
577 | file->f_version = 0; | |
578 | rc = offset; | |
579 | } | |
580 | ||
581 | spin_unlock(&info->info_lock); | |
582 | ||
583 | return rc; | |
584 | } | |
585 | ||
586 | static struct file_operations splat_fops = { | |
587 | .owner = THIS_MODULE, | |
588 | .open = splat_open, | |
589 | .release = splat_release, | |
590 | .ioctl = splat_ioctl, | |
591 | .read = splat_read, | |
592 | .write = splat_write, | |
593 | .llseek = splat_seek, | |
594 | }; | |
595 | ||
596 | static struct cdev splat_cdev = { | |
597 | .owner = THIS_MODULE, | |
598 | .kobj = { .name = "splatctl", }, | |
599 | }; | |
600 | ||
601 | static int __init | |
602 | splat_init(void) | |
603 | { | |
604 | dev_t dev; | |
605 | int rc; | |
606 | ||
607 | spin_lock_init(&splat_module_lock); | |
608 | INIT_LIST_HEAD(&splat_module_list); | |
609 | ||
610 | SPLAT_SUBSYSTEM_INIT(kmem); | |
611 | SPLAT_SUBSYSTEM_INIT(taskq); | |
612 | SPLAT_SUBSYSTEM_INIT(krng); | |
613 | SPLAT_SUBSYSTEM_INIT(mutex); | |
614 | SPLAT_SUBSYSTEM_INIT(condvar); | |
615 | SPLAT_SUBSYSTEM_INIT(thread); | |
616 | SPLAT_SUBSYSTEM_INIT(rwlock); | |
617 | SPLAT_SUBSYSTEM_INIT(time); | |
618 | SPLAT_SUBSYSTEM_INIT(vnode); | |
619 | SPLAT_SUBSYSTEM_INIT(kobj); | |
620 | SPLAT_SUBSYSTEM_INIT(atomic); | |
621 | ||
622 | dev = MKDEV(SPLAT_MAJOR, 0); | |
623 | if ((rc = register_chrdev_region(dev, SPLAT_MINORS, "splatctl"))) | |
624 | goto error; | |
625 | ||
626 | /* Support for registering a character driver */ | |
627 | cdev_init(&splat_cdev, &splat_fops); | |
628 | if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) { | |
629 | printk(KERN_ERR "splat: Error adding cdev, %d\n", rc); | |
630 | kobject_put(&splat_cdev.kobj); | |
631 | unregister_chrdev_region(dev, SPLAT_MINORS); | |
632 | goto error; | |
633 | } | |
634 | ||
635 | /* Support for udev make driver info available in sysfs */ | |
636 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) | |
637 | splat_class = class_simple_create(THIS_MODULE, "splat"); | |
638 | #else | |
639 | splat_class = class_create(THIS_MODULE, "splat"); | |
640 | #endif | |
641 | if (IS_ERR(splat_class)) { | |
642 | rc = PTR_ERR(splat_class); | |
643 | printk(KERN_ERR "splat: Error creating splat class, %d\n", rc); | |
644 | cdev_del(&splat_cdev); | |
645 | unregister_chrdev_region(dev, SPLAT_MINORS); | |
646 | goto error; | |
647 | } | |
648 | ||
649 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) | |
650 | class_simple_device_add(splat_class, MKDEV(SPLAT_MAJOR, 0), | |
651 | NULL, "splatctl"); | |
652 | #else | |
653 | class_device_create(splat_class, NULL, MKDEV(SPLAT_MAJOR, 0), | |
654 | NULL, "splatctl"); | |
655 | #endif | |
656 | ||
657 | printk(KERN_INFO "splat: Loaded Solaris Porting LAyer " | |
658 | "Tests v%s\n", VERSION); | |
659 | return 0; | |
660 | error: | |
661 | printk(KERN_ERR "splat: Error registering splat device, %d\n", rc); | |
662 | return rc; | |
663 | } | |
664 | ||
665 | static void | |
666 | splat_fini(void) | |
667 | { | |
668 | dev_t dev = MKDEV(SPLAT_MAJOR, 0); | |
669 | ||
670 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) | |
671 | class_simple_device_remove(dev); | |
672 | class_simple_destroy(splat_class); | |
673 | devfs_remove("splat/splatctl"); | |
674 | devfs_remove("splat"); | |
675 | #else | |
676 | class_device_destroy(splat_class, dev); | |
677 | class_destroy(splat_class); | |
678 | #endif | |
679 | cdev_del(&splat_cdev); | |
680 | unregister_chrdev_region(dev, SPLAT_MINORS); | |
681 | ||
682 | SPLAT_SUBSYSTEM_FINI(atomic); | |
683 | SPLAT_SUBSYSTEM_FINI(kobj); | |
684 | SPLAT_SUBSYSTEM_FINI(vnode); | |
685 | SPLAT_SUBSYSTEM_FINI(time); | |
686 | SPLAT_SUBSYSTEM_FINI(rwlock); | |
687 | SPLAT_SUBSYSTEM_FINI(thread); | |
688 | SPLAT_SUBSYSTEM_FINI(condvar); | |
689 | SPLAT_SUBSYSTEM_FINI(mutex); | |
690 | SPLAT_SUBSYSTEM_FINI(krng); | |
691 | SPLAT_SUBSYSTEM_FINI(taskq); | |
692 | SPLAT_SUBSYSTEM_FINI(kmem); | |
693 | ||
694 | ASSERT(list_empty(&splat_module_list)); | |
695 | printk(KERN_INFO "splat: Unloaded Solaris Porting LAyer " | |
696 | "Tests v%s\n", VERSION); | |
697 | } | |
698 | ||
699 | module_init(splat_init); | |
700 | module_exit(splat_fini); | |
701 | ||
702 | MODULE_AUTHOR("Lawrence Livermore National Labs"); | |
703 | MODULE_DESCRIPTION("Solaris Porting LAyer Tests"); | |
704 | MODULE_LICENSE("GPL"); |