]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-proc.c
Provide kstat for taskqs
[mirror_spl.git] / module / spl / spl-proc.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
26
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <sys/kmem.h>
30 #include <sys/kmem_cache.h>
31 #include <sys/vmem.h>
32 #include <sys/taskq.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_compat.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
39
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
45
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
48
49 static struct ctl_table_header *spl_header = NULL;
50 static struct proc_dir_entry *proc_spl = NULL;
51 static struct proc_dir_entry *proc_spl_kmem = NULL;
52 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
53 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54 static struct proc_dir_entry *proc_spl_taskq = NULL;
55 struct proc_dir_entry *proc_spl_kstat = NULL;
56
57 static int
58 proc_copyin_string(char *kbuffer, int kbuffer_size,
59 const char *ubuffer, int ubuffer_size)
60 {
61 int size;
62
63 if (ubuffer_size > kbuffer_size)
64 return -EOVERFLOW;
65
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return -EFAULT;
68
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
74
75 /* empty string */
76 if (size < 0)
77 return -EINVAL;
78
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return -EOVERFLOW;
82
83 kbuffer[size + 1] = 0;
84 return 0;
85 }
86
87 static int
88 proc_copyout_string(char *ubuffer, int ubuffer_size,
89 const char *kbuffer, char *append)
90 {
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
94 */
95 int size = MIN(strlen(kbuffer), ubuffer_size);
96
97 if (copy_to_user(ubuffer, kbuffer, size))
98 return -EFAULT;
99
100 if (append != NULL && size < ubuffer_size) {
101 if (copy_to_user(ubuffer + size, append, 1))
102 return -EFAULT;
103
104 size++;
105 }
106
107 return size;
108 }
109
110 #ifdef DEBUG_KMEM
111 static int
112 proc_domemused(struct ctl_table *table, int write,
113 void __user *buffer, size_t *lenp, loff_t *ppos)
114 {
115 int rc = 0;
116 unsigned long min = 0, max = ~0, val;
117 spl_ctl_table dummy = *table;
118
119 dummy.data = &val;
120 dummy.proc_handler = &proc_dointvec;
121 dummy.extra1 = &min;
122 dummy.extra2 = &max;
123
124 if (write) {
125 *ppos += *lenp;
126 } else {
127 # ifdef HAVE_ATOMIC64_T
128 val = atomic64_read((atomic64_t *)table->data);
129 # else
130 val = atomic_read((atomic_t *)table->data);
131 # endif /* HAVE_ATOMIC64_T */
132 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
133 }
134
135 return (rc);
136 }
137 #endif /* DEBUG_KMEM */
138
139 static int
140 proc_doslab(struct ctl_table *table, int write,
141 void __user *buffer, size_t *lenp, loff_t *ppos)
142 {
143 int rc = 0;
144 unsigned long min = 0, max = ~0, val = 0, mask;
145 spl_ctl_table dummy = *table;
146 spl_kmem_cache_t *skc;
147
148 dummy.data = &val;
149 dummy.proc_handler = &proc_dointvec;
150 dummy.extra1 = &min;
151 dummy.extra2 = &max;
152
153 if (write) {
154 *ppos += *lenp;
155 } else {
156 down_read(&spl_kmem_cache_sem);
157 mask = (unsigned long)table->data;
158
159 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
160
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc->skc_flags & mask))
163 continue;
164
165 /* Sum the specified field for selected slabs */
166 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
167 case KMC_TOTAL:
168 val += skc->skc_slab_size * skc->skc_slab_total;
169 break;
170 case KMC_ALLOC:
171 val += skc->skc_obj_size * skc->skc_obj_alloc;
172 break;
173 case KMC_MAX:
174 val += skc->skc_obj_size * skc->skc_obj_max;
175 break;
176 }
177 }
178
179 up_read(&spl_kmem_cache_sem);
180 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
181 }
182
183 return (rc);
184 }
185
186 static int
187 proc_dohostid(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos)
189 {
190 int len, rc = 0;
191 char *end, str[32];
192
193 if (write) {
194 /* We can't use proc_doulongvec_minmax() in the write
195 * case here because hostid while a hex value has no
196 * leading 0x which confuses the helper function. */
197 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
198 if (rc < 0)
199 return (rc);
200
201 spl_hostid = simple_strtoul(str, &end, 16);
202 if (str == end)
203 return (-EINVAL);
204
205 } else {
206 len = snprintf(str, sizeof(str), "%lx", spl_hostid);
207 if (*ppos >= len)
208 rc = 0;
209 else
210 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
211
212 if (rc >= 0) {
213 *lenp = rc;
214 *ppos += rc;
215 }
216 }
217
218 return (rc);
219 }
220
221 static void
222 taskq_seq_show_headers(struct seq_file *f)
223 {
224 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
225 "taskq", "act", "nthr", "spwn", "maxt", "pri",
226 "mina", "maxa", "cura", "flags");
227 }
228
229 /* indices into the lheads array below */
230 #define LHEAD_PEND 0
231 #define LHEAD_PRIO 1
232 #define LHEAD_DELAY 2
233 #define LHEAD_WAIT 3
234 #define LHEAD_ACTIVE 4
235 #define LHEAD_SIZE 5
236
237 static int
238 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
239 {
240 taskq_t *tq = p;
241 taskq_thread_t *tqt;
242 wait_queue_t *wq;
243 struct task_struct *tsk;
244 taskq_ent_t *tqe;
245 char name[100];
246 struct list_head *lheads[LHEAD_SIZE], *lh;
247 static char *list_names[LHEAD_SIZE] =
248 {"pend", "prio", "delay", "wait", "active" };
249 int i, j, have_lheads = 0;
250 unsigned long wflags, flags;
251
252 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
253 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
254
255 /* get the various lists and check whether they're empty */
256 lheads[LHEAD_PEND] = &tq->tq_pend_list;
257 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
258 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
259 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
260 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
261
262 for (i = 0; i < LHEAD_SIZE; ++i) {
263 if (list_empty(lheads[i]))
264 lheads[i] = NULL;
265 else
266 ++have_lheads;
267 }
268
269 /* early return in non-"all" mode if lists are all empty */
270 if (!allflag && !have_lheads) {
271 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
272 spin_unlock_irqrestore(&tq->tq_lock, flags);
273 return (0);
274 }
275
276 /* unlock the waitq quickly */
277 if (!lheads[LHEAD_WAIT])
278 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
279
280 /* show the base taskq contents */
281 snprintf(name, sizeof(name), "%s/%d", tq->tq_name, tq->tq_instance);
282 seq_printf(f, "%-25s ", name);
283 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
284 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
285 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
286 tq->tq_nalloc, tq->tq_flags);
287
288 /* show the active list */
289 if (lheads[LHEAD_ACTIVE]) {
290 j = 0;
291 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
292 if (j == 0)
293 seq_printf(f, "\t%s:", list_names[LHEAD_ACTIVE]);
294 else if (j == 2) {
295 seq_printf(f, "\n\t ");
296 j = 0;
297 }
298 seq_printf(f, " [%d]%pf(%ps)",
299 tqt->tqt_thread->pid,
300 tqt->tqt_task->tqent_func,
301 tqt->tqt_task->tqent_arg);
302 ++j;
303 }
304 seq_printf(f, "\n");
305 }
306
307 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
308 if (lheads[i]) {
309 j = 0;
310 list_for_each(lh, lheads[i]) {
311 /* show the wait waitq list */
312 if (i == LHEAD_WAIT) {
313 wq = list_entry(lh, wait_queue_t, task_list);
314 if (j == 0)
315 seq_printf(f, "\t%s:",
316 list_names[i]);
317 else if (j == 12) {
318 seq_printf(f, "\n\t ");
319 j = 0;
320 }
321 tsk = wq->private;
322 seq_printf(f, " %d", tsk->pid);
323 /* pend, prio and delay lists */
324 } else {
325 tqe = list_entry(lh, taskq_ent_t,
326 tqent_list);
327 if (j == 0)
328 seq_printf(f, "\t%s:",
329 list_names[i]);
330 else if (j == 2) {
331 seq_printf(f, "\n\t ");
332 j = 0;
333 }
334 seq_printf(f, " %pf(%ps)",
335 tqe->tqent_func,
336 tqe->tqent_arg);
337 }
338 ++j;
339 }
340 seq_printf(f, "\n");
341 }
342 if (lheads[LHEAD_WAIT])
343 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
344 spin_unlock_irqrestore(&tq->tq_lock, flags);
345
346 return (0);
347 }
348
349 static int
350 taskq_all_seq_show(struct seq_file *f, void *p)
351 {
352 return (taskq_seq_show_impl(f, p, B_TRUE));
353 }
354
355 static int
356 taskq_seq_show(struct seq_file *f, void *p)
357 {
358 return (taskq_seq_show_impl(f, p, B_FALSE));
359 }
360
361 static void *
362 taskq_seq_start(struct seq_file *f, loff_t *pos)
363 {
364 struct list_head *p;
365 loff_t n = *pos;
366
367 down_read(&tq_list_sem);
368 if (!n)
369 taskq_seq_show_headers(f);
370
371 p = tq_list.next;
372 while (n--) {
373 p = p->next;
374 if (p == &tq_list)
375 return (NULL);
376 }
377
378 return (list_entry(p, taskq_t, tq_taskqs));
379 }
380
381 static void *
382 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
383 {
384 taskq_t *tq = p;
385
386 ++*pos;
387 return ((tq->tq_taskqs.next == &tq_list) ?
388 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
389 }
390
391 static void
392 slab_seq_show_headers(struct seq_file *f)
393 {
394 seq_printf(f,
395 "--------------------- cache ----------"
396 "--------------------------------------------- "
397 "----- slab ------ "
398 "---- object ----- "
399 "--- emergency ---\n");
400 seq_printf(f,
401 "name "
402 " flags size alloc slabsize objsize "
403 "total alloc max "
404 "total alloc max "
405 "dlock alloc max\n");
406 }
407
408 static int
409 slab_seq_show(struct seq_file *f, void *p)
410 {
411 spl_kmem_cache_t *skc = p;
412
413 ASSERT(skc->skc_magic == SKC_MAGIC);
414
415 /*
416 * Backed by Linux slab see /proc/slabinfo.
417 */
418 if (skc->skc_flags & KMC_SLAB)
419 return (0);
420
421 spin_lock(&skc->skc_lock);
422 seq_printf(f, "%-36s ", skc->skc_name);
423 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
424 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
425 (long unsigned)skc->skc_flags,
426 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
427 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
428 (unsigned)skc->skc_slab_size,
429 (unsigned)skc->skc_obj_size,
430 (long unsigned)skc->skc_slab_total,
431 (long unsigned)skc->skc_slab_alloc,
432 (long unsigned)skc->skc_slab_max,
433 (long unsigned)skc->skc_obj_total,
434 (long unsigned)skc->skc_obj_alloc,
435 (long unsigned)skc->skc_obj_max,
436 (long unsigned)skc->skc_obj_deadlock,
437 (long unsigned)skc->skc_obj_emergency,
438 (long unsigned)skc->skc_obj_emergency_max);
439
440 spin_unlock(&skc->skc_lock);
441
442 return 0;
443 }
444
445 static void *
446 slab_seq_start(struct seq_file *f, loff_t *pos)
447 {
448 struct list_head *p;
449 loff_t n = *pos;
450
451 down_read(&spl_kmem_cache_sem);
452 if (!n)
453 slab_seq_show_headers(f);
454
455 p = spl_kmem_cache_list.next;
456 while (n--) {
457 p = p->next;
458 if (p == &spl_kmem_cache_list)
459 return (NULL);
460 }
461
462 return (list_entry(p, spl_kmem_cache_t, skc_list));
463 }
464
465 static void *
466 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
467 {
468 spl_kmem_cache_t *skc = p;
469
470 ++*pos;
471 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
472 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
473 }
474
475 static void
476 slab_seq_stop(struct seq_file *f, void *v)
477 {
478 up_read(&spl_kmem_cache_sem);
479 }
480
481 static struct seq_operations slab_seq_ops = {
482 .show = slab_seq_show,
483 .start = slab_seq_start,
484 .next = slab_seq_next,
485 .stop = slab_seq_stop,
486 };
487
488 static int
489 proc_slab_open(struct inode *inode, struct file *filp)
490 {
491 return seq_open(filp, &slab_seq_ops);
492 }
493
494 static struct file_operations proc_slab_operations = {
495 .open = proc_slab_open,
496 .read = seq_read,
497 .llseek = seq_lseek,
498 .release = seq_release,
499 };
500
501 static void
502 taskq_seq_stop(struct seq_file *f, void *v)
503 {
504 up_read(&tq_list_sem);
505 }
506
507 static struct seq_operations taskq_all_seq_ops = {
508 .show = taskq_all_seq_show,
509 .start = taskq_seq_start,
510 .next = taskq_seq_next,
511 .stop = taskq_seq_stop,
512 };
513
514 static struct seq_operations taskq_seq_ops = {
515 .show = taskq_seq_show,
516 .start = taskq_seq_start,
517 .next = taskq_seq_next,
518 .stop = taskq_seq_stop,
519 };
520
521 static int
522 proc_taskq_all_open(struct inode *inode, struct file *filp)
523 {
524 return seq_open(filp, &taskq_all_seq_ops);
525 }
526
527 static int
528 proc_taskq_open(struct inode *inode, struct file *filp)
529 {
530 return seq_open(filp, &taskq_seq_ops);
531 }
532
533 static struct file_operations proc_taskq_all_operations = {
534 .open = proc_taskq_all_open,
535 .read = seq_read,
536 .llseek = seq_lseek,
537 .release = seq_release,
538 };
539
540 static struct file_operations proc_taskq_operations = {
541 .open = proc_taskq_open,
542 .read = seq_read,
543 .llseek = seq_lseek,
544 .release = seq_release,
545 };
546
547 static struct ctl_table spl_kmem_table[] = {
548 #ifdef DEBUG_KMEM
549 {
550 .procname = "kmem_used",
551 .data = &kmem_alloc_used,
552 # ifdef HAVE_ATOMIC64_T
553 .maxlen = sizeof(atomic64_t),
554 # else
555 .maxlen = sizeof(atomic_t),
556 # endif /* HAVE_ATOMIC64_T */
557 .mode = 0444,
558 .proc_handler = &proc_domemused,
559 },
560 {
561 .procname = "kmem_max",
562 .data = &kmem_alloc_max,
563 .maxlen = sizeof(unsigned long),
564 .extra1 = &table_min,
565 .extra2 = &table_max,
566 .mode = 0444,
567 .proc_handler = &proc_doulongvec_minmax,
568 },
569 #endif /* DEBUG_KMEM */
570 {
571 .procname = "slab_kmem_total",
572 .data = (void *)(KMC_KMEM | KMC_TOTAL),
573 .maxlen = sizeof(unsigned long),
574 .extra1 = &table_min,
575 .extra2 = &table_max,
576 .mode = 0444,
577 .proc_handler = &proc_doslab,
578 },
579 {
580 .procname = "slab_kmem_alloc",
581 .data = (void *)(KMC_KMEM | KMC_ALLOC),
582 .maxlen = sizeof(unsigned long),
583 .extra1 = &table_min,
584 .extra2 = &table_max,
585 .mode = 0444,
586 .proc_handler = &proc_doslab,
587 },
588 {
589 .procname = "slab_kmem_max",
590 .data = (void *)(KMC_KMEM | KMC_MAX),
591 .maxlen = sizeof(unsigned long),
592 .extra1 = &table_min,
593 .extra2 = &table_max,
594 .mode = 0444,
595 .proc_handler = &proc_doslab,
596 },
597 {
598 .procname = "slab_vmem_total",
599 .data = (void *)(KMC_VMEM | KMC_TOTAL),
600 .maxlen = sizeof(unsigned long),
601 .extra1 = &table_min,
602 .extra2 = &table_max,
603 .mode = 0444,
604 .proc_handler = &proc_doslab,
605 },
606 {
607 .procname = "slab_vmem_alloc",
608 .data = (void *)(KMC_VMEM | KMC_ALLOC),
609 .maxlen = sizeof(unsigned long),
610 .extra1 = &table_min,
611 .extra2 = &table_max,
612 .mode = 0444,
613 .proc_handler = &proc_doslab,
614 },
615 {
616 .procname = "slab_vmem_max",
617 .data = (void *)(KMC_VMEM | KMC_MAX),
618 .maxlen = sizeof(unsigned long),
619 .extra1 = &table_min,
620 .extra2 = &table_max,
621 .mode = 0444,
622 .proc_handler = &proc_doslab,
623 },
624 {0},
625 };
626
627 static struct ctl_table spl_kstat_table[] = {
628 {0},
629 };
630
631 static struct ctl_table spl_table[] = {
632 /* NB No .strategy entries have been provided since
633 * sysctl(8) prefers to go via /proc for portability.
634 */
635 {
636 .procname = "version",
637 .data = spl_version,
638 .maxlen = sizeof(spl_version),
639 .mode = 0444,
640 .proc_handler = &proc_dostring,
641 },
642 {
643 .procname = "hostid",
644 .data = &spl_hostid,
645 .maxlen = sizeof(unsigned long),
646 .mode = 0644,
647 .proc_handler = &proc_dohostid,
648 },
649 {
650 .procname = "kmem",
651 .mode = 0555,
652 .child = spl_kmem_table,
653 },
654 {
655 .procname = "kstat",
656 .mode = 0555,
657 .child = spl_kstat_table,
658 },
659 { 0 },
660 };
661
662 static struct ctl_table spl_dir[] = {
663 {
664 .procname = "spl",
665 .mode = 0555,
666 .child = spl_table,
667 },
668 { 0 }
669 };
670
671 static struct ctl_table spl_root[] = {
672 {
673 #ifdef HAVE_CTL_NAME
674 .ctl_name = CTL_KERN,
675 #endif
676 .procname = "kernel",
677 .mode = 0555,
678 .child = spl_dir,
679 },
680 { 0 }
681 };
682
683 int
684 spl_proc_init(void)
685 {
686 int rc = 0;
687
688 spl_header = register_sysctl_table(spl_root);
689 if (spl_header == NULL)
690 return (-EUNATCH);
691
692 proc_spl = proc_mkdir("spl", NULL);
693 if (proc_spl == NULL) {
694 rc = -EUNATCH;
695 goto out;
696 }
697
698 proc_spl_taskq_all = proc_create_data("taskq-all", 0444,
699 proc_spl, &proc_taskq_all_operations, NULL);
700 if (proc_spl_taskq_all == NULL) {
701 rc = -EUNATCH;
702 goto out;
703 }
704
705 proc_spl_taskq = proc_create_data("taskq", 0444,
706 proc_spl, &proc_taskq_operations, NULL);
707 if (proc_spl_taskq == NULL) {
708 rc = -EUNATCH;
709 goto out;
710 }
711
712 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
713 if (proc_spl_kmem == NULL) {
714 rc = -EUNATCH;
715 goto out;
716 }
717
718 proc_spl_kmem_slab = proc_create_data("slab", 0444,
719 proc_spl_kmem, &proc_slab_operations, NULL);
720 if (proc_spl_kmem_slab == NULL) {
721 rc = -EUNATCH;
722 goto out;
723 }
724
725 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
726 if (proc_spl_kstat == NULL) {
727 rc = -EUNATCH;
728 goto out;
729 }
730 out:
731 if (rc) {
732 remove_proc_entry("kstat", proc_spl);
733 remove_proc_entry("slab", proc_spl_kmem);
734 remove_proc_entry("kmem", proc_spl);
735 remove_proc_entry("taskq-all", proc_spl);
736 remove_proc_entry("taskq", proc_spl);
737 remove_proc_entry("spl", NULL);
738 unregister_sysctl_table(spl_header);
739 }
740
741 return (rc);
742 }
743
744 void
745 spl_proc_fini(void)
746 {
747 remove_proc_entry("kstat", proc_spl);
748 remove_proc_entry("slab", proc_spl_kmem);
749 remove_proc_entry("kmem", proc_spl);
750 remove_proc_entry("taskq-all", proc_spl);
751 remove_proc_entry("taskq", proc_spl);
752 remove_proc_entry("spl", NULL);
753
754 ASSERT(spl_header != NULL);
755 unregister_sysctl_table(spl_header);
756 }