]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-proc.c
Fix cstyle warnings
[mirror_spl.git] / module / spl / spl-proc.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 */
26
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <sys/kmem.h>
30 #include <sys/kmem_cache.h>
31 #include <sys/vmem.h>
32 #include <sys/taskq.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_compat.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
39
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
45
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
48
49 static struct ctl_table_header *spl_header = NULL;
50 static struct proc_dir_entry *proc_spl = NULL;
51 static struct proc_dir_entry *proc_spl_kmem = NULL;
52 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
53 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54 static struct proc_dir_entry *proc_spl_taskq = NULL;
55 struct proc_dir_entry *proc_spl_kstat = NULL;
56
57 static int
58 proc_copyin_string(char *kbuffer, int kbuffer_size, const char *ubuffer,
59 int ubuffer_size)
60 {
61 int size;
62
63 if (ubuffer_size > kbuffer_size)
64 return (-EOVERFLOW);
65
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return (-EFAULT);
68
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
74
75 /* empty string */
76 if (size < 0)
77 return (-EINVAL);
78
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return (-EOVERFLOW);
82
83 kbuffer[size + 1] = 0;
84 return (0);
85 }
86
87 static int
88 proc_copyout_string(char *ubuffer, int ubuffer_size,
89 const char *kbuffer, char *append)
90 {
91 /*
92 * NB if 'append' != NULL, it's a single character to append to the
93 * copied out string - usually "\n", for /proc entries and
94 * (i.e. a terminating zero byte) for sysctl entries
95 */
96 int size = MIN(strlen(kbuffer), ubuffer_size);
97
98 if (copy_to_user(ubuffer, kbuffer, size))
99 return (-EFAULT);
100
101 if (append != NULL && size < ubuffer_size) {
102 if (copy_to_user(ubuffer + size, append, 1))
103 return (-EFAULT);
104
105 size++;
106 }
107
108 return (size);
109 }
110
111 #ifdef DEBUG_KMEM
112 static int
113 proc_domemused(struct ctl_table *table, int write,
114 void __user *buffer, size_t *lenp, loff_t *ppos)
115 {
116 int rc = 0;
117 unsigned long min = 0, max = ~0, val;
118 spl_ctl_table dummy = *table;
119
120 dummy.data = &val;
121 dummy.proc_handler = &proc_dointvec;
122 dummy.extra1 = &min;
123 dummy.extra2 = &max;
124
125 if (write) {
126 *ppos += *lenp;
127 } else {
128 #ifdef HAVE_ATOMIC64_T
129 val = atomic64_read((atomic64_t *)table->data);
130 #else
131 val = atomic_read((atomic_t *)table->data);
132 #endif /* HAVE_ATOMIC64_T */
133 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
134 }
135
136 return (rc);
137 }
138 #endif /* DEBUG_KMEM */
139
140 static int
141 proc_doslab(struct ctl_table *table, int write,
142 void __user *buffer, size_t *lenp, loff_t *ppos)
143 {
144 int rc = 0;
145 unsigned long min = 0, max = ~0, val = 0, mask;
146 spl_ctl_table dummy = *table;
147 spl_kmem_cache_t *skc;
148
149 dummy.data = &val;
150 dummy.proc_handler = &proc_dointvec;
151 dummy.extra1 = &min;
152 dummy.extra2 = &max;
153
154 if (write) {
155 *ppos += *lenp;
156 } else {
157 down_read(&spl_kmem_cache_sem);
158 mask = (unsigned long)table->data;
159
160 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
161
162 /* Only use slabs of the correct kmem/vmem type */
163 if (!(skc->skc_flags & mask))
164 continue;
165
166 /* Sum the specified field for selected slabs */
167 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
168 case KMC_TOTAL:
169 val += skc->skc_slab_size * skc->skc_slab_total;
170 break;
171 case KMC_ALLOC:
172 val += skc->skc_obj_size * skc->skc_obj_alloc;
173 break;
174 case KMC_MAX:
175 val += skc->skc_obj_size * skc->skc_obj_max;
176 break;
177 }
178 }
179
180 up_read(&spl_kmem_cache_sem);
181 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
182 }
183
184 return (rc);
185 }
186
187 static int
188 proc_dohostid(struct ctl_table *table, int write,
189 void __user *buffer, size_t *lenp, loff_t *ppos)
190 {
191 int len, rc = 0;
192 char *end, str[32];
193
194 if (write) {
195 /*
196 * We can't use proc_doulongvec_minmax() in the write
197 * case here because hostid while a hex value has no
198 * leading 0x which confuses the helper function.
199 */
200 rc = proc_copyin_string(str, sizeof (str), buffer, *lenp);
201 if (rc < 0)
202 return (rc);
203
204 spl_hostid = simple_strtoul(str, &end, 16);
205 if (str == end)
206 return (-EINVAL);
207
208 } else {
209 len = snprintf(str, sizeof (str), "%lx",
210 (unsigned long) zone_get_hostid(NULL));
211 if (*ppos >= len)
212 rc = 0;
213 else
214 rc = proc_copyout_string(buffer,
215 *lenp, str + *ppos, "\n");
216
217 if (rc >= 0) {
218 *lenp = rc;
219 *ppos += rc;
220 }
221 }
222
223 return (rc);
224 }
225
226 static void
227 taskq_seq_show_headers(struct seq_file *f)
228 {
229 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
230 "taskq", "act", "nthr", "spwn", "maxt", "pri",
231 "mina", "maxa", "cura", "flags");
232 }
233
234 /* indices into the lheads array below */
235 #define LHEAD_PEND 0
236 #define LHEAD_PRIO 1
237 #define LHEAD_DELAY 2
238 #define LHEAD_WAIT 3
239 #define LHEAD_ACTIVE 4
240 #define LHEAD_SIZE 5
241
242 static unsigned int spl_max_show_tasks = 512;
243 module_param(spl_max_show_tasks, uint, 0644);
244 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
245
246 static int
247 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
248 {
249 taskq_t *tq = p;
250 taskq_thread_t *tqt;
251 spl_wait_queue_entry_t *wq;
252 struct task_struct *tsk;
253 taskq_ent_t *tqe;
254 char name[100];
255 struct list_head *lheads[LHEAD_SIZE], *lh;
256 static char *list_names[LHEAD_SIZE] =
257 {"pend", "prio", "delay", "wait", "active" };
258 int i, j, have_lheads = 0;
259 unsigned long wflags, flags;
260
261 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
262 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
263
264 /* get the various lists and check whether they're empty */
265 lheads[LHEAD_PEND] = &tq->tq_pend_list;
266 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
267 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
268 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
269 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
270 #else
271 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
272 #endif
273 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
274
275 for (i = 0; i < LHEAD_SIZE; ++i) {
276 if (list_empty(lheads[i]))
277 lheads[i] = NULL;
278 else
279 ++have_lheads;
280 }
281
282 /* early return in non-"all" mode if lists are all empty */
283 if (!allflag && !have_lheads) {
284 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
285 spin_unlock_irqrestore(&tq->tq_lock, flags);
286 return (0);
287 }
288
289 /* unlock the waitq quickly */
290 if (!lheads[LHEAD_WAIT])
291 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
292
293 /* show the base taskq contents */
294 snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
295 seq_printf(f, "%-25s ", name);
296 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
297 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
298 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
299 tq->tq_nalloc, tq->tq_flags);
300
301 /* show the active list */
302 if (lheads[LHEAD_ACTIVE]) {
303 j = 0;
304 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
305 if (j == 0)
306 seq_printf(f, "\t%s:",
307 list_names[LHEAD_ACTIVE]);
308 else if (j == 2) {
309 seq_printf(f, "\n\t ");
310 j = 0;
311 }
312 seq_printf(f, " [%d]%pf(%ps)",
313 tqt->tqt_thread->pid,
314 tqt->tqt_task->tqent_func,
315 tqt->tqt_task->tqent_arg);
316 ++j;
317 }
318 seq_printf(f, "\n");
319 }
320
321 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
322 if (lheads[i]) {
323 j = 0;
324 list_for_each(lh, lheads[i]) {
325 if (spl_max_show_tasks != 0 &&
326 j >= spl_max_show_tasks) {
327 seq_printf(f, "\n\t(truncated)");
328 break;
329 }
330 /* show the wait waitq list */
331 if (i == LHEAD_WAIT) {
332 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
333 wq = list_entry(lh,
334 spl_wait_queue_entry_t, entry);
335 #else
336 wq = list_entry(lh,
337 spl_wait_queue_entry_t, task_list);
338 #endif
339 if (j == 0)
340 seq_printf(f, "\t%s:",
341 list_names[i]);
342 else if (j % 8 == 0)
343 seq_printf(f, "\n\t ");
344
345 tsk = wq->private;
346 seq_printf(f, " %d", tsk->pid);
347 /* pend, prio and delay lists */
348 } else {
349 tqe = list_entry(lh, taskq_ent_t,
350 tqent_list);
351 if (j == 0)
352 seq_printf(f, "\t%s:",
353 list_names[i]);
354 else if (j % 2 == 0)
355 seq_printf(f, "\n\t ");
356
357 seq_printf(f, " %pf(%ps)",
358 tqe->tqent_func,
359 tqe->tqent_arg);
360 }
361 ++j;
362 }
363 seq_printf(f, "\n");
364 }
365 if (lheads[LHEAD_WAIT])
366 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
367 spin_unlock_irqrestore(&tq->tq_lock, flags);
368
369 return (0);
370 }
371
372 static int
373 taskq_all_seq_show(struct seq_file *f, void *p)
374 {
375 return (taskq_seq_show_impl(f, p, B_TRUE));
376 }
377
378 static int
379 taskq_seq_show(struct seq_file *f, void *p)
380 {
381 return (taskq_seq_show_impl(f, p, B_FALSE));
382 }
383
384 static void *
385 taskq_seq_start(struct seq_file *f, loff_t *pos)
386 {
387 struct list_head *p;
388 loff_t n = *pos;
389
390 down_read(&tq_list_sem);
391 if (!n)
392 taskq_seq_show_headers(f);
393
394 p = tq_list.next;
395 while (n--) {
396 p = p->next;
397 if (p == &tq_list)
398 return (NULL);
399 }
400
401 return (list_entry(p, taskq_t, tq_taskqs));
402 }
403
404 static void *
405 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
406 {
407 taskq_t *tq = p;
408
409 ++*pos;
410 return ((tq->tq_taskqs.next == &tq_list) ?
411 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
412 }
413
414 static void
415 slab_seq_show_headers(struct seq_file *f)
416 {
417 seq_printf(f,
418 "--------------------- cache ----------"
419 "--------------------------------------------- "
420 "----- slab ------ "
421 "---- object ----- "
422 "--- emergency ---\n");
423 seq_printf(f,
424 "name "
425 " flags size alloc slabsize objsize "
426 "total alloc max "
427 "total alloc max "
428 "dlock alloc max\n");
429 }
430
431 static int
432 slab_seq_show(struct seq_file *f, void *p)
433 {
434 spl_kmem_cache_t *skc = p;
435
436 ASSERT(skc->skc_magic == SKC_MAGIC);
437
438 /*
439 * Backed by Linux slab see /proc/slabinfo.
440 */
441 if (skc->skc_flags & KMC_SLAB)
442 return (0);
443
444 spin_lock(&skc->skc_lock);
445 seq_printf(f, "%-36s ", skc->skc_name);
446 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
447 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
448 (long unsigned)skc->skc_flags,
449 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
450 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
451 (unsigned)skc->skc_slab_size,
452 (unsigned)skc->skc_obj_size,
453 (long unsigned)skc->skc_slab_total,
454 (long unsigned)skc->skc_slab_alloc,
455 (long unsigned)skc->skc_slab_max,
456 (long unsigned)skc->skc_obj_total,
457 (long unsigned)skc->skc_obj_alloc,
458 (long unsigned)skc->skc_obj_max,
459 (long unsigned)skc->skc_obj_deadlock,
460 (long unsigned)skc->skc_obj_emergency,
461 (long unsigned)skc->skc_obj_emergency_max);
462
463 spin_unlock(&skc->skc_lock);
464
465 return (0);
466 }
467
468 static void *
469 slab_seq_start(struct seq_file *f, loff_t *pos)
470 {
471 struct list_head *p;
472 loff_t n = *pos;
473
474 down_read(&spl_kmem_cache_sem);
475 if (!n)
476 slab_seq_show_headers(f);
477
478 p = spl_kmem_cache_list.next;
479 while (n--) {
480 p = p->next;
481 if (p == &spl_kmem_cache_list)
482 return (NULL);
483 }
484
485 return (list_entry(p, spl_kmem_cache_t, skc_list));
486 }
487
488 static void *
489 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
490 {
491 spl_kmem_cache_t *skc = p;
492
493 ++*pos;
494 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
495 NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
496 }
497
498 static void
499 slab_seq_stop(struct seq_file *f, void *v)
500 {
501 up_read(&spl_kmem_cache_sem);
502 }
503
504 static struct seq_operations slab_seq_ops = {
505 .show = slab_seq_show,
506 .start = slab_seq_start,
507 .next = slab_seq_next,
508 .stop = slab_seq_stop,
509 };
510
511 static int
512 proc_slab_open(struct inode *inode, struct file *filp)
513 {
514 return (seq_open(filp, &slab_seq_ops));
515 }
516
517 static struct file_operations proc_slab_operations = {
518 .open = proc_slab_open,
519 .read = seq_read,
520 .llseek = seq_lseek,
521 .release = seq_release,
522 };
523
524 static void
525 taskq_seq_stop(struct seq_file *f, void *v)
526 {
527 up_read(&tq_list_sem);
528 }
529
530 static struct seq_operations taskq_all_seq_ops = {
531 .show = taskq_all_seq_show,
532 .start = taskq_seq_start,
533 .next = taskq_seq_next,
534 .stop = taskq_seq_stop,
535 };
536
537 static struct seq_operations taskq_seq_ops = {
538 .show = taskq_seq_show,
539 .start = taskq_seq_start,
540 .next = taskq_seq_next,
541 .stop = taskq_seq_stop,
542 };
543
544 static int
545 proc_taskq_all_open(struct inode *inode, struct file *filp)
546 {
547 return (seq_open(filp, &taskq_all_seq_ops));
548 }
549
550 static int
551 proc_taskq_open(struct inode *inode, struct file *filp)
552 {
553 return (seq_open(filp, &taskq_seq_ops));
554 }
555
556 static struct file_operations proc_taskq_all_operations = {
557 .open = proc_taskq_all_open,
558 .read = seq_read,
559 .llseek = seq_lseek,
560 .release = seq_release,
561 };
562
563 static struct file_operations proc_taskq_operations = {
564 .open = proc_taskq_open,
565 .read = seq_read,
566 .llseek = seq_lseek,
567 .release = seq_release,
568 };
569
570 static struct ctl_table spl_kmem_table[] = {
571 #ifdef DEBUG_KMEM
572 {
573 .procname = "kmem_used",
574 .data = &kmem_alloc_used,
575 #ifdef HAVE_ATOMIC64_T
576 .maxlen = sizeof (atomic64_t),
577 #else
578 .maxlen = sizeof (atomic_t),
579 #endif /* HAVE_ATOMIC64_T */
580 .mode = 0444,
581 .proc_handler = &proc_domemused,
582 },
583 {
584 .procname = "kmem_max",
585 .data = &kmem_alloc_max,
586 .maxlen = sizeof (unsigned long),
587 .extra1 = &table_min,
588 .extra2 = &table_max,
589 .mode = 0444,
590 .proc_handler = &proc_doulongvec_minmax,
591 },
592 #endif /* DEBUG_KMEM */
593 {
594 .procname = "slab_kmem_total",
595 .data = (void *)(KMC_KMEM | KMC_TOTAL),
596 .maxlen = sizeof (unsigned long),
597 .extra1 = &table_min,
598 .extra2 = &table_max,
599 .mode = 0444,
600 .proc_handler = &proc_doslab,
601 },
602 {
603 .procname = "slab_kmem_alloc",
604 .data = (void *)(KMC_KMEM | KMC_ALLOC),
605 .maxlen = sizeof (unsigned long),
606 .extra1 = &table_min,
607 .extra2 = &table_max,
608 .mode = 0444,
609 .proc_handler = &proc_doslab,
610 },
611 {
612 .procname = "slab_kmem_max",
613 .data = (void *)(KMC_KMEM | KMC_MAX),
614 .maxlen = sizeof (unsigned long),
615 .extra1 = &table_min,
616 .extra2 = &table_max,
617 .mode = 0444,
618 .proc_handler = &proc_doslab,
619 },
620 {
621 .procname = "slab_vmem_total",
622 .data = (void *)(KMC_VMEM | KMC_TOTAL),
623 .maxlen = sizeof (unsigned long),
624 .extra1 = &table_min,
625 .extra2 = &table_max,
626 .mode = 0444,
627 .proc_handler = &proc_doslab,
628 },
629 {
630 .procname = "slab_vmem_alloc",
631 .data = (void *)(KMC_VMEM | KMC_ALLOC),
632 .maxlen = sizeof (unsigned long),
633 .extra1 = &table_min,
634 .extra2 = &table_max,
635 .mode = 0444,
636 .proc_handler = &proc_doslab,
637 },
638 {
639 .procname = "slab_vmem_max",
640 .data = (void *)(KMC_VMEM | KMC_MAX),
641 .maxlen = sizeof (unsigned long),
642 .extra1 = &table_min,
643 .extra2 = &table_max,
644 .mode = 0444,
645 .proc_handler = &proc_doslab,
646 },
647 {},
648 };
649
650 static struct ctl_table spl_kstat_table[] = {
651 {},
652 };
653
654 static struct ctl_table spl_table[] = {
655 /*
656 * NB No .strategy entries have been provided since
657 * sysctl(8) prefers to go via /proc for portability.
658 */
659 {
660 .procname = "version",
661 .data = spl_version,
662 .maxlen = sizeof (spl_version),
663 .mode = 0444,
664 .proc_handler = &proc_dostring,
665 },
666 {
667 .procname = "hostid",
668 .data = &spl_hostid,
669 .maxlen = sizeof (unsigned long),
670 .mode = 0644,
671 .proc_handler = &proc_dohostid,
672 },
673 {
674 .procname = "kmem",
675 .mode = 0555,
676 .child = spl_kmem_table,
677 },
678 {
679 .procname = "kstat",
680 .mode = 0555,
681 .child = spl_kstat_table,
682 },
683 {},
684 };
685
686 static struct ctl_table spl_dir[] = {
687 {
688 .procname = "spl",
689 .mode = 0555,
690 .child = spl_table,
691 },
692 {}
693 };
694
695 static struct ctl_table spl_root[] = {
696 {
697 #ifdef HAVE_CTL_NAME
698 .ctl_name = CTL_KERN,
699 #endif
700 .procname = "kernel",
701 .mode = 0555,
702 .child = spl_dir,
703 },
704 {}
705 };
706
707 int
708 spl_proc_init(void)
709 {
710 int rc = 0;
711
712 spl_header = register_sysctl_table(spl_root);
713 if (spl_header == NULL)
714 return (-EUNATCH);
715
716 proc_spl = proc_mkdir("spl", NULL);
717 if (proc_spl == NULL) {
718 rc = -EUNATCH;
719 goto out;
720 }
721
722 proc_spl_taskq_all = proc_create_data("taskq-all", 0444,
723 proc_spl, &proc_taskq_all_operations, NULL);
724 if (proc_spl_taskq_all == NULL) {
725 rc = -EUNATCH;
726 goto out;
727 }
728
729 proc_spl_taskq = proc_create_data("taskq", 0444,
730 proc_spl, &proc_taskq_operations, NULL);
731 if (proc_spl_taskq == NULL) {
732 rc = -EUNATCH;
733 goto out;
734 }
735
736 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
737 if (proc_spl_kmem == NULL) {
738 rc = -EUNATCH;
739 goto out;
740 }
741
742 proc_spl_kmem_slab = proc_create_data("slab", 0444,
743 proc_spl_kmem, &proc_slab_operations, NULL);
744 if (proc_spl_kmem_slab == NULL) {
745 rc = -EUNATCH;
746 goto out;
747 }
748
749 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
750 if (proc_spl_kstat == NULL) {
751 rc = -EUNATCH;
752 goto out;
753 }
754 out:
755 if (rc) {
756 remove_proc_entry("kstat", proc_spl);
757 remove_proc_entry("slab", proc_spl_kmem);
758 remove_proc_entry("kmem", proc_spl);
759 remove_proc_entry("taskq-all", proc_spl);
760 remove_proc_entry("taskq", proc_spl);
761 remove_proc_entry("spl", NULL);
762 unregister_sysctl_table(spl_header);
763 }
764
765 return (rc);
766 }
767
768 void
769 spl_proc_fini(void)
770 {
771 remove_proc_entry("kstat", proc_spl);
772 remove_proc_entry("slab", proc_spl_kmem);
773 remove_proc_entry("kmem", proc_spl);
774 remove_proc_entry("taskq-all", proc_spl);
775 remove_proc_entry("taskq", proc_spl);
776 remove_proc_entry("spl", NULL);
777
778 ASSERT(spl_header != NULL);
779 unregister_sysctl_table(spl_header);
780 }