]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-proc.c
345d0d5c7a444e4ea08fe8028c22f113f1aaf07d
[mirror_spl.git] / module / spl / spl-proc.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
26
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <sys/kmem.h>
30 #include <sys/kmem_cache.h>
31 #include <sys/vmem.h>
32 #include <sys/taskq.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_compat.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
39
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
45
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
48
49 static struct ctl_table_header *spl_header = NULL;
50 static struct proc_dir_entry *proc_spl = NULL;
51 static struct proc_dir_entry *proc_spl_kmem = NULL;
52 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
53 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54 static struct proc_dir_entry *proc_spl_taskq = NULL;
55 struct proc_dir_entry *proc_spl_kstat = NULL;
56
57 static int
58 proc_copyin_string(char *kbuffer, int kbuffer_size,
59 const char *ubuffer, int ubuffer_size)
60 {
61 int size;
62
63 if (ubuffer_size > kbuffer_size)
64 return -EOVERFLOW;
65
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return -EFAULT;
68
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
74
75 /* empty string */
76 if (size < 0)
77 return -EINVAL;
78
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return -EOVERFLOW;
82
83 kbuffer[size + 1] = 0;
84 return 0;
85 }
86
87 static int
88 proc_copyout_string(char *ubuffer, int ubuffer_size,
89 const char *kbuffer, char *append)
90 {
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
94 */
95 int size = MIN(strlen(kbuffer), ubuffer_size);
96
97 if (copy_to_user(ubuffer, kbuffer, size))
98 return -EFAULT;
99
100 if (append != NULL && size < ubuffer_size) {
101 if (copy_to_user(ubuffer + size, append, 1))
102 return -EFAULT;
103
104 size++;
105 }
106
107 return size;
108 }
109
110 #ifdef DEBUG_KMEM
111 static int
112 proc_domemused(struct ctl_table *table, int write,
113 void __user *buffer, size_t *lenp, loff_t *ppos)
114 {
115 int rc = 0;
116 unsigned long min = 0, max = ~0, val;
117 spl_ctl_table dummy = *table;
118
119 dummy.data = &val;
120 dummy.proc_handler = &proc_dointvec;
121 dummy.extra1 = &min;
122 dummy.extra2 = &max;
123
124 if (write) {
125 *ppos += *lenp;
126 } else {
127 # ifdef HAVE_ATOMIC64_T
128 val = atomic64_read((atomic64_t *)table->data);
129 # else
130 val = atomic_read((atomic_t *)table->data);
131 # endif /* HAVE_ATOMIC64_T */
132 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
133 }
134
135 return (rc);
136 }
137 #endif /* DEBUG_KMEM */
138
139 static int
140 proc_doslab(struct ctl_table *table, int write,
141 void __user *buffer, size_t *lenp, loff_t *ppos)
142 {
143 int rc = 0;
144 unsigned long min = 0, max = ~0, val = 0, mask;
145 spl_ctl_table dummy = *table;
146 spl_kmem_cache_t *skc;
147
148 dummy.data = &val;
149 dummy.proc_handler = &proc_dointvec;
150 dummy.extra1 = &min;
151 dummy.extra2 = &max;
152
153 if (write) {
154 *ppos += *lenp;
155 } else {
156 down_read(&spl_kmem_cache_sem);
157 mask = (unsigned long)table->data;
158
159 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
160
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc->skc_flags & mask))
163 continue;
164
165 /* Sum the specified field for selected slabs */
166 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
167 case KMC_TOTAL:
168 val += skc->skc_slab_size * skc->skc_slab_total;
169 break;
170 case KMC_ALLOC:
171 val += skc->skc_obj_size * skc->skc_obj_alloc;
172 break;
173 case KMC_MAX:
174 val += skc->skc_obj_size * skc->skc_obj_max;
175 break;
176 }
177 }
178
179 up_read(&spl_kmem_cache_sem);
180 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
181 }
182
183 return (rc);
184 }
185
186 static int
187 proc_dohostid(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos)
189 {
190 int len, rc = 0;
191 char *end, str[32];
192
193 if (write) {
194 /* We can't use proc_doulongvec_minmax() in the write
195 * case here because hostid while a hex value has no
196 * leading 0x which confuses the helper function. */
197 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
198 if (rc < 0)
199 return (rc);
200
201 spl_hostid = simple_strtoul(str, &end, 16);
202 if (str == end)
203 return (-EINVAL);
204
205 } else {
206 len = snprintf(str, sizeof(str), "%lx", spl_hostid);
207 if (*ppos >= len)
208 rc = 0;
209 else
210 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
211
212 if (rc >= 0) {
213 *lenp = rc;
214 *ppos += rc;
215 }
216 }
217
218 return (rc);
219 }
220
221 static void
222 taskq_seq_show_headers(struct seq_file *f)
223 {
224 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
225 "taskq", "act", "nthr", "spwn", "maxt", "pri",
226 "mina", "maxa", "cura", "flags");
227 }
228
229 /* indices into the lheads array below */
230 #define LHEAD_PEND 0
231 #define LHEAD_PRIO 1
232 #define LHEAD_DELAY 2
233 #define LHEAD_WAIT 3
234 #define LHEAD_ACTIVE 4
235 #define LHEAD_SIZE 5
236
237 static unsigned int spl_max_show_tasks = 512;
238 module_param(spl_max_show_tasks, uint, 0644);
239 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
240
241 static int
242 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
243 {
244 taskq_t *tq = p;
245 taskq_thread_t *tqt;
246 wait_queue_t *wq;
247 struct task_struct *tsk;
248 taskq_ent_t *tqe;
249 char name[100];
250 struct list_head *lheads[LHEAD_SIZE], *lh;
251 static char *list_names[LHEAD_SIZE] =
252 {"pend", "prio", "delay", "wait", "active" };
253 int i, j, have_lheads = 0;
254 unsigned long wflags, flags;
255
256 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
257 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
258
259 /* get the various lists and check whether they're empty */
260 lheads[LHEAD_PEND] = &tq->tq_pend_list;
261 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
262 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
263 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
264 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
265
266 for (i = 0; i < LHEAD_SIZE; ++i) {
267 if (list_empty(lheads[i]))
268 lheads[i] = NULL;
269 else
270 ++have_lheads;
271 }
272
273 /* early return in non-"all" mode if lists are all empty */
274 if (!allflag && !have_lheads) {
275 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
276 spin_unlock_irqrestore(&tq->tq_lock, flags);
277 return (0);
278 }
279
280 /* unlock the waitq quickly */
281 if (!lheads[LHEAD_WAIT])
282 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
283
284 /* show the base taskq contents */
285 snprintf(name, sizeof(name), "%s/%d", tq->tq_name, tq->tq_instance);
286 seq_printf(f, "%-25s ", name);
287 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
288 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
289 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
290 tq->tq_nalloc, tq->tq_flags);
291
292 /* show the active list */
293 if (lheads[LHEAD_ACTIVE]) {
294 j = 0;
295 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
296 if (j == 0)
297 seq_printf(f, "\t%s:", list_names[LHEAD_ACTIVE]);
298 else if (j == 2) {
299 seq_printf(f, "\n\t ");
300 j = 0;
301 }
302 seq_printf(f, " [%d]%pf(%ps)",
303 tqt->tqt_thread->pid,
304 tqt->tqt_task->tqent_func,
305 tqt->tqt_task->tqent_arg);
306 ++j;
307 }
308 seq_printf(f, "\n");
309 }
310
311 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
312 if (lheads[i]) {
313 j = 0;
314 list_for_each(lh, lheads[i]) {
315 if (spl_max_show_tasks != 0 &&
316 j >= spl_max_show_tasks) {
317 seq_printf(f, "\n\t(truncated)");
318 break;
319 }
320 /* show the wait waitq list */
321 if (i == LHEAD_WAIT) {
322 wq = list_entry(lh, wait_queue_t, task_list);
323 if (j == 0)
324 seq_printf(f, "\t%s:",
325 list_names[i]);
326 else if (j % 8 == 0)
327 seq_printf(f, "\n\t ");
328
329 tsk = wq->private;
330 seq_printf(f, " %d", tsk->pid);
331 /* pend, prio and delay lists */
332 } else {
333 tqe = list_entry(lh, taskq_ent_t,
334 tqent_list);
335 if (j == 0)
336 seq_printf(f, "\t%s:",
337 list_names[i]);
338 else if (j % 2 == 0)
339 seq_printf(f, "\n\t ");
340
341 seq_printf(f, " %pf(%ps)",
342 tqe->tqent_func,
343 tqe->tqent_arg);
344 }
345 ++j;
346 }
347 seq_printf(f, "\n");
348 }
349 if (lheads[LHEAD_WAIT])
350 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
351 spin_unlock_irqrestore(&tq->tq_lock, flags);
352
353 return (0);
354 }
355
356 static int
357 taskq_all_seq_show(struct seq_file *f, void *p)
358 {
359 return (taskq_seq_show_impl(f, p, B_TRUE));
360 }
361
362 static int
363 taskq_seq_show(struct seq_file *f, void *p)
364 {
365 return (taskq_seq_show_impl(f, p, B_FALSE));
366 }
367
368 static void *
369 taskq_seq_start(struct seq_file *f, loff_t *pos)
370 {
371 struct list_head *p;
372 loff_t n = *pos;
373
374 down_read(&tq_list_sem);
375 if (!n)
376 taskq_seq_show_headers(f);
377
378 p = tq_list.next;
379 while (n--) {
380 p = p->next;
381 if (p == &tq_list)
382 return (NULL);
383 }
384
385 return (list_entry(p, taskq_t, tq_taskqs));
386 }
387
388 static void *
389 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
390 {
391 taskq_t *tq = p;
392
393 ++*pos;
394 return ((tq->tq_taskqs.next == &tq_list) ?
395 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
396 }
397
398 static void
399 slab_seq_show_headers(struct seq_file *f)
400 {
401 seq_printf(f,
402 "--------------------- cache ----------"
403 "--------------------------------------------- "
404 "----- slab ------ "
405 "---- object ----- "
406 "--- emergency ---\n");
407 seq_printf(f,
408 "name "
409 " flags size alloc slabsize objsize "
410 "total alloc max "
411 "total alloc max "
412 "dlock alloc max\n");
413 }
414
415 static int
416 slab_seq_show(struct seq_file *f, void *p)
417 {
418 spl_kmem_cache_t *skc = p;
419
420 ASSERT(skc->skc_magic == SKC_MAGIC);
421
422 /*
423 * Backed by Linux slab see /proc/slabinfo.
424 */
425 if (skc->skc_flags & KMC_SLAB)
426 return (0);
427
428 spin_lock(&skc->skc_lock);
429 seq_printf(f, "%-36s ", skc->skc_name);
430 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
431 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
432 (long unsigned)skc->skc_flags,
433 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
434 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
435 (unsigned)skc->skc_slab_size,
436 (unsigned)skc->skc_obj_size,
437 (long unsigned)skc->skc_slab_total,
438 (long unsigned)skc->skc_slab_alloc,
439 (long unsigned)skc->skc_slab_max,
440 (long unsigned)skc->skc_obj_total,
441 (long unsigned)skc->skc_obj_alloc,
442 (long unsigned)skc->skc_obj_max,
443 (long unsigned)skc->skc_obj_deadlock,
444 (long unsigned)skc->skc_obj_emergency,
445 (long unsigned)skc->skc_obj_emergency_max);
446
447 spin_unlock(&skc->skc_lock);
448
449 return 0;
450 }
451
452 static void *
453 slab_seq_start(struct seq_file *f, loff_t *pos)
454 {
455 struct list_head *p;
456 loff_t n = *pos;
457
458 down_read(&spl_kmem_cache_sem);
459 if (!n)
460 slab_seq_show_headers(f);
461
462 p = spl_kmem_cache_list.next;
463 while (n--) {
464 p = p->next;
465 if (p == &spl_kmem_cache_list)
466 return (NULL);
467 }
468
469 return (list_entry(p, spl_kmem_cache_t, skc_list));
470 }
471
472 static void *
473 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
474 {
475 spl_kmem_cache_t *skc = p;
476
477 ++*pos;
478 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
479 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
480 }
481
482 static void
483 slab_seq_stop(struct seq_file *f, void *v)
484 {
485 up_read(&spl_kmem_cache_sem);
486 }
487
488 static struct seq_operations slab_seq_ops = {
489 .show = slab_seq_show,
490 .start = slab_seq_start,
491 .next = slab_seq_next,
492 .stop = slab_seq_stop,
493 };
494
495 static int
496 proc_slab_open(struct inode *inode, struct file *filp)
497 {
498 return seq_open(filp, &slab_seq_ops);
499 }
500
501 static struct file_operations proc_slab_operations = {
502 .open = proc_slab_open,
503 .read = seq_read,
504 .llseek = seq_lseek,
505 .release = seq_release,
506 };
507
508 static void
509 taskq_seq_stop(struct seq_file *f, void *v)
510 {
511 up_read(&tq_list_sem);
512 }
513
514 static struct seq_operations taskq_all_seq_ops = {
515 .show = taskq_all_seq_show,
516 .start = taskq_seq_start,
517 .next = taskq_seq_next,
518 .stop = taskq_seq_stop,
519 };
520
521 static struct seq_operations taskq_seq_ops = {
522 .show = taskq_seq_show,
523 .start = taskq_seq_start,
524 .next = taskq_seq_next,
525 .stop = taskq_seq_stop,
526 };
527
528 static int
529 proc_taskq_all_open(struct inode *inode, struct file *filp)
530 {
531 return seq_open(filp, &taskq_all_seq_ops);
532 }
533
534 static int
535 proc_taskq_open(struct inode *inode, struct file *filp)
536 {
537 return seq_open(filp, &taskq_seq_ops);
538 }
539
540 static struct file_operations proc_taskq_all_operations = {
541 .open = proc_taskq_all_open,
542 .read = seq_read,
543 .llseek = seq_lseek,
544 .release = seq_release,
545 };
546
547 static struct file_operations proc_taskq_operations = {
548 .open = proc_taskq_open,
549 .read = seq_read,
550 .llseek = seq_lseek,
551 .release = seq_release,
552 };
553
554 static struct ctl_table spl_kmem_table[] = {
555 #ifdef DEBUG_KMEM
556 {
557 .procname = "kmem_used",
558 .data = &kmem_alloc_used,
559 # ifdef HAVE_ATOMIC64_T
560 .maxlen = sizeof(atomic64_t),
561 # else
562 .maxlen = sizeof(atomic_t),
563 # endif /* HAVE_ATOMIC64_T */
564 .mode = 0444,
565 .proc_handler = &proc_domemused,
566 },
567 {
568 .procname = "kmem_max",
569 .data = &kmem_alloc_max,
570 .maxlen = sizeof(unsigned long),
571 .extra1 = &table_min,
572 .extra2 = &table_max,
573 .mode = 0444,
574 .proc_handler = &proc_doulongvec_minmax,
575 },
576 #endif /* DEBUG_KMEM */
577 {
578 .procname = "slab_kmem_total",
579 .data = (void *)(KMC_KMEM | KMC_TOTAL),
580 .maxlen = sizeof(unsigned long),
581 .extra1 = &table_min,
582 .extra2 = &table_max,
583 .mode = 0444,
584 .proc_handler = &proc_doslab,
585 },
586 {
587 .procname = "slab_kmem_alloc",
588 .data = (void *)(KMC_KMEM | KMC_ALLOC),
589 .maxlen = sizeof(unsigned long),
590 .extra1 = &table_min,
591 .extra2 = &table_max,
592 .mode = 0444,
593 .proc_handler = &proc_doslab,
594 },
595 {
596 .procname = "slab_kmem_max",
597 .data = (void *)(KMC_KMEM | KMC_MAX),
598 .maxlen = sizeof(unsigned long),
599 .extra1 = &table_min,
600 .extra2 = &table_max,
601 .mode = 0444,
602 .proc_handler = &proc_doslab,
603 },
604 {
605 .procname = "slab_vmem_total",
606 .data = (void *)(KMC_VMEM | KMC_TOTAL),
607 .maxlen = sizeof(unsigned long),
608 .extra1 = &table_min,
609 .extra2 = &table_max,
610 .mode = 0444,
611 .proc_handler = &proc_doslab,
612 },
613 {
614 .procname = "slab_vmem_alloc",
615 .data = (void *)(KMC_VMEM | KMC_ALLOC),
616 .maxlen = sizeof(unsigned long),
617 .extra1 = &table_min,
618 .extra2 = &table_max,
619 .mode = 0444,
620 .proc_handler = &proc_doslab,
621 },
622 {
623 .procname = "slab_vmem_max",
624 .data = (void *)(KMC_VMEM | KMC_MAX),
625 .maxlen = sizeof(unsigned long),
626 .extra1 = &table_min,
627 .extra2 = &table_max,
628 .mode = 0444,
629 .proc_handler = &proc_doslab,
630 },
631 {0},
632 };
633
634 static struct ctl_table spl_kstat_table[] = {
635 {0},
636 };
637
638 static struct ctl_table spl_table[] = {
639 /* NB No .strategy entries have been provided since
640 * sysctl(8) prefers to go via /proc for portability.
641 */
642 {
643 .procname = "version",
644 .data = spl_version,
645 .maxlen = sizeof(spl_version),
646 .mode = 0444,
647 .proc_handler = &proc_dostring,
648 },
649 {
650 .procname = "hostid",
651 .data = &spl_hostid,
652 .maxlen = sizeof(unsigned long),
653 .mode = 0644,
654 .proc_handler = &proc_dohostid,
655 },
656 {
657 .procname = "kmem",
658 .mode = 0555,
659 .child = spl_kmem_table,
660 },
661 {
662 .procname = "kstat",
663 .mode = 0555,
664 .child = spl_kstat_table,
665 },
666 { 0 },
667 };
668
669 static struct ctl_table spl_dir[] = {
670 {
671 .procname = "spl",
672 .mode = 0555,
673 .child = spl_table,
674 },
675 { 0 }
676 };
677
678 static struct ctl_table spl_root[] = {
679 {
680 #ifdef HAVE_CTL_NAME
681 .ctl_name = CTL_KERN,
682 #endif
683 .procname = "kernel",
684 .mode = 0555,
685 .child = spl_dir,
686 },
687 { 0 }
688 };
689
690 int
691 spl_proc_init(void)
692 {
693 int rc = 0;
694
695 spl_header = register_sysctl_table(spl_root);
696 if (spl_header == NULL)
697 return (-EUNATCH);
698
699 proc_spl = proc_mkdir("spl", NULL);
700 if (proc_spl == NULL) {
701 rc = -EUNATCH;
702 goto out;
703 }
704
705 proc_spl_taskq_all = proc_create_data("taskq-all", 0444,
706 proc_spl, &proc_taskq_all_operations, NULL);
707 if (proc_spl_taskq_all == NULL) {
708 rc = -EUNATCH;
709 goto out;
710 }
711
712 proc_spl_taskq = proc_create_data("taskq", 0444,
713 proc_spl, &proc_taskq_operations, NULL);
714 if (proc_spl_taskq == NULL) {
715 rc = -EUNATCH;
716 goto out;
717 }
718
719 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
720 if (proc_spl_kmem == NULL) {
721 rc = -EUNATCH;
722 goto out;
723 }
724
725 proc_spl_kmem_slab = proc_create_data("slab", 0444,
726 proc_spl_kmem, &proc_slab_operations, NULL);
727 if (proc_spl_kmem_slab == NULL) {
728 rc = -EUNATCH;
729 goto out;
730 }
731
732 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
733 if (proc_spl_kstat == NULL) {
734 rc = -EUNATCH;
735 goto out;
736 }
737 out:
738 if (rc) {
739 remove_proc_entry("kstat", proc_spl);
740 remove_proc_entry("slab", proc_spl_kmem);
741 remove_proc_entry("kmem", proc_spl);
742 remove_proc_entry("taskq-all", proc_spl);
743 remove_proc_entry("taskq", proc_spl);
744 remove_proc_entry("spl", NULL);
745 unregister_sysctl_table(spl_header);
746 }
747
748 return (rc);
749 }
750
751 void
752 spl_proc_fini(void)
753 {
754 remove_proc_entry("kstat", proc_spl);
755 remove_proc_entry("slab", proc_spl_kmem);
756 remove_proc_entry("kmem", proc_spl);
757 remove_proc_entry("taskq-all", proc_spl);
758 remove_proc_entry("taskq", proc_spl);
759 remove_proc_entry("spl", NULL);
760
761 ASSERT(spl_header != NULL);
762 unregister_sysctl_table(spl_header);
763 }