]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-proc.c
New upstream version 0.7.2
[mirror_spl-debian.git] / module / spl / spl-proc.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
26
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <sys/kmem.h>
30 #include <sys/kmem_cache.h>
31 #include <sys/vmem.h>
32 #include <sys/taskq.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_compat.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
39
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
45
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
48
49 static struct ctl_table_header *spl_header = NULL;
50 static struct proc_dir_entry *proc_spl = NULL;
51 static struct proc_dir_entry *proc_spl_kmem = NULL;
52 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
53 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54 static struct proc_dir_entry *proc_spl_taskq = NULL;
55 struct proc_dir_entry *proc_spl_kstat = NULL;
56
57 static int
58 proc_copyin_string(char *kbuffer, int kbuffer_size,
59 const char *ubuffer, int ubuffer_size)
60 {
61 int size;
62
63 if (ubuffer_size > kbuffer_size)
64 return -EOVERFLOW;
65
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return -EFAULT;
68
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
74
75 /* empty string */
76 if (size < 0)
77 return -EINVAL;
78
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return -EOVERFLOW;
82
83 kbuffer[size + 1] = 0;
84 return 0;
85 }
86
87 static int
88 proc_copyout_string(char *ubuffer, int ubuffer_size,
89 const char *kbuffer, char *append)
90 {
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
94 */
95 int size = MIN(strlen(kbuffer), ubuffer_size);
96
97 if (copy_to_user(ubuffer, kbuffer, size))
98 return -EFAULT;
99
100 if (append != NULL && size < ubuffer_size) {
101 if (copy_to_user(ubuffer + size, append, 1))
102 return -EFAULT;
103
104 size++;
105 }
106
107 return size;
108 }
109
110 #ifdef DEBUG_KMEM
111 static int
112 proc_domemused(struct ctl_table *table, int write,
113 void __user *buffer, size_t *lenp, loff_t *ppos)
114 {
115 int rc = 0;
116 unsigned long min = 0, max = ~0, val;
117 spl_ctl_table dummy = *table;
118
119 dummy.data = &val;
120 dummy.proc_handler = &proc_dointvec;
121 dummy.extra1 = &min;
122 dummy.extra2 = &max;
123
124 if (write) {
125 *ppos += *lenp;
126 } else {
127 # ifdef HAVE_ATOMIC64_T
128 val = atomic64_read((atomic64_t *)table->data);
129 # else
130 val = atomic_read((atomic_t *)table->data);
131 # endif /* HAVE_ATOMIC64_T */
132 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
133 }
134
135 return (rc);
136 }
137 #endif /* DEBUG_KMEM */
138
139 static int
140 proc_doslab(struct ctl_table *table, int write,
141 void __user *buffer, size_t *lenp, loff_t *ppos)
142 {
143 int rc = 0;
144 unsigned long min = 0, max = ~0, val = 0, mask;
145 spl_ctl_table dummy = *table;
146 spl_kmem_cache_t *skc;
147
148 dummy.data = &val;
149 dummy.proc_handler = &proc_dointvec;
150 dummy.extra1 = &min;
151 dummy.extra2 = &max;
152
153 if (write) {
154 *ppos += *lenp;
155 } else {
156 down_read(&spl_kmem_cache_sem);
157 mask = (unsigned long)table->data;
158
159 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
160
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc->skc_flags & mask))
163 continue;
164
165 /* Sum the specified field for selected slabs */
166 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
167 case KMC_TOTAL:
168 val += skc->skc_slab_size * skc->skc_slab_total;
169 break;
170 case KMC_ALLOC:
171 val += skc->skc_obj_size * skc->skc_obj_alloc;
172 break;
173 case KMC_MAX:
174 val += skc->skc_obj_size * skc->skc_obj_max;
175 break;
176 }
177 }
178
179 up_read(&spl_kmem_cache_sem);
180 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
181 }
182
183 return (rc);
184 }
185
186 static int
187 proc_dohostid(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos)
189 {
190 int len, rc = 0;
191 char *end, str[32];
192
193 if (write) {
194 /* We can't use proc_doulongvec_minmax() in the write
195 * case here because hostid while a hex value has no
196 * leading 0x which confuses the helper function. */
197 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
198 if (rc < 0)
199 return (rc);
200
201 spl_hostid = simple_strtoul(str, &end, 16);
202 if (str == end)
203 return (-EINVAL);
204
205 } else {
206 len = snprintf(str, sizeof(str), "%lx",
207 (unsigned long) zone_get_hostid(NULL));
208 if (*ppos >= len)
209 rc = 0;
210 else
211 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
212
213 if (rc >= 0) {
214 *lenp = rc;
215 *ppos += rc;
216 }
217 }
218
219 return (rc);
220 }
221
222 static void
223 taskq_seq_show_headers(struct seq_file *f)
224 {
225 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
226 "taskq", "act", "nthr", "spwn", "maxt", "pri",
227 "mina", "maxa", "cura", "flags");
228 }
229
230 /* indices into the lheads array below */
231 #define LHEAD_PEND 0
232 #define LHEAD_PRIO 1
233 #define LHEAD_DELAY 2
234 #define LHEAD_WAIT 3
235 #define LHEAD_ACTIVE 4
236 #define LHEAD_SIZE 5
237
238 static unsigned int spl_max_show_tasks = 512;
239 module_param(spl_max_show_tasks, uint, 0644);
240 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
241
242 static int
243 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
244 {
245 taskq_t *tq = p;
246 taskq_thread_t *tqt;
247 spl_wait_queue_entry_t *wq;
248 struct task_struct *tsk;
249 taskq_ent_t *tqe;
250 char name[100];
251 struct list_head *lheads[LHEAD_SIZE], *lh;
252 static char *list_names[LHEAD_SIZE] =
253 {"pend", "prio", "delay", "wait", "active" };
254 int i, j, have_lheads = 0;
255 unsigned long wflags, flags;
256
257 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
258 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
259
260 /* get the various lists and check whether they're empty */
261 lheads[LHEAD_PEND] = &tq->tq_pend_list;
262 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
263 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
264 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
265 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
266 #else
267 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
268 #endif
269 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
270
271 for (i = 0; i < LHEAD_SIZE; ++i) {
272 if (list_empty(lheads[i]))
273 lheads[i] = NULL;
274 else
275 ++have_lheads;
276 }
277
278 /* early return in non-"all" mode if lists are all empty */
279 if (!allflag && !have_lheads) {
280 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
281 spin_unlock_irqrestore(&tq->tq_lock, flags);
282 return (0);
283 }
284
285 /* unlock the waitq quickly */
286 if (!lheads[LHEAD_WAIT])
287 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
288
289 /* show the base taskq contents */
290 snprintf(name, sizeof(name), "%s/%d", tq->tq_name, tq->tq_instance);
291 seq_printf(f, "%-25s ", name);
292 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
293 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
294 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
295 tq->tq_nalloc, tq->tq_flags);
296
297 /* show the active list */
298 if (lheads[LHEAD_ACTIVE]) {
299 j = 0;
300 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
301 if (j == 0)
302 seq_printf(f, "\t%s:", list_names[LHEAD_ACTIVE]);
303 else if (j == 2) {
304 seq_printf(f, "\n\t ");
305 j = 0;
306 }
307 seq_printf(f, " [%d]%pf(%ps)",
308 tqt->tqt_thread->pid,
309 tqt->tqt_task->tqent_func,
310 tqt->tqt_task->tqent_arg);
311 ++j;
312 }
313 seq_printf(f, "\n");
314 }
315
316 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
317 if (lheads[i]) {
318 j = 0;
319 list_for_each(lh, lheads[i]) {
320 if (spl_max_show_tasks != 0 &&
321 j >= spl_max_show_tasks) {
322 seq_printf(f, "\n\t(truncated)");
323 break;
324 }
325 /* show the wait waitq list */
326 if (i == LHEAD_WAIT) {
327 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
328 wq = list_entry(lh,
329 spl_wait_queue_entry_t, entry);
330 #else
331 wq = list_entry(lh,
332 spl_wait_queue_entry_t, task_list);
333 #endif
334 if (j == 0)
335 seq_printf(f, "\t%s:",
336 list_names[i]);
337 else if (j % 8 == 0)
338 seq_printf(f, "\n\t ");
339
340 tsk = wq->private;
341 seq_printf(f, " %d", tsk->pid);
342 /* pend, prio and delay lists */
343 } else {
344 tqe = list_entry(lh, taskq_ent_t,
345 tqent_list);
346 if (j == 0)
347 seq_printf(f, "\t%s:",
348 list_names[i]);
349 else if (j % 2 == 0)
350 seq_printf(f, "\n\t ");
351
352 seq_printf(f, " %pf(%ps)",
353 tqe->tqent_func,
354 tqe->tqent_arg);
355 }
356 ++j;
357 }
358 seq_printf(f, "\n");
359 }
360 if (lheads[LHEAD_WAIT])
361 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
362 spin_unlock_irqrestore(&tq->tq_lock, flags);
363
364 return (0);
365 }
366
367 static int
368 taskq_all_seq_show(struct seq_file *f, void *p)
369 {
370 return (taskq_seq_show_impl(f, p, B_TRUE));
371 }
372
373 static int
374 taskq_seq_show(struct seq_file *f, void *p)
375 {
376 return (taskq_seq_show_impl(f, p, B_FALSE));
377 }
378
379 static void *
380 taskq_seq_start(struct seq_file *f, loff_t *pos)
381 {
382 struct list_head *p;
383 loff_t n = *pos;
384
385 down_read(&tq_list_sem);
386 if (!n)
387 taskq_seq_show_headers(f);
388
389 p = tq_list.next;
390 while (n--) {
391 p = p->next;
392 if (p == &tq_list)
393 return (NULL);
394 }
395
396 return (list_entry(p, taskq_t, tq_taskqs));
397 }
398
399 static void *
400 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
401 {
402 taskq_t *tq = p;
403
404 ++*pos;
405 return ((tq->tq_taskqs.next == &tq_list) ?
406 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
407 }
408
409 static void
410 slab_seq_show_headers(struct seq_file *f)
411 {
412 seq_printf(f,
413 "--------------------- cache ----------"
414 "--------------------------------------------- "
415 "----- slab ------ "
416 "---- object ----- "
417 "--- emergency ---\n");
418 seq_printf(f,
419 "name "
420 " flags size alloc slabsize objsize "
421 "total alloc max "
422 "total alloc max "
423 "dlock alloc max\n");
424 }
425
426 static int
427 slab_seq_show(struct seq_file *f, void *p)
428 {
429 spl_kmem_cache_t *skc = p;
430
431 ASSERT(skc->skc_magic == SKC_MAGIC);
432
433 /*
434 * Backed by Linux slab see /proc/slabinfo.
435 */
436 if (skc->skc_flags & KMC_SLAB)
437 return (0);
438
439 spin_lock(&skc->skc_lock);
440 seq_printf(f, "%-36s ", skc->skc_name);
441 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
442 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
443 (long unsigned)skc->skc_flags,
444 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
445 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
446 (unsigned)skc->skc_slab_size,
447 (unsigned)skc->skc_obj_size,
448 (long unsigned)skc->skc_slab_total,
449 (long unsigned)skc->skc_slab_alloc,
450 (long unsigned)skc->skc_slab_max,
451 (long unsigned)skc->skc_obj_total,
452 (long unsigned)skc->skc_obj_alloc,
453 (long unsigned)skc->skc_obj_max,
454 (long unsigned)skc->skc_obj_deadlock,
455 (long unsigned)skc->skc_obj_emergency,
456 (long unsigned)skc->skc_obj_emergency_max);
457
458 spin_unlock(&skc->skc_lock);
459
460 return 0;
461 }
462
463 static void *
464 slab_seq_start(struct seq_file *f, loff_t *pos)
465 {
466 struct list_head *p;
467 loff_t n = *pos;
468
469 down_read(&spl_kmem_cache_sem);
470 if (!n)
471 slab_seq_show_headers(f);
472
473 p = spl_kmem_cache_list.next;
474 while (n--) {
475 p = p->next;
476 if (p == &spl_kmem_cache_list)
477 return (NULL);
478 }
479
480 return (list_entry(p, spl_kmem_cache_t, skc_list));
481 }
482
483 static void *
484 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
485 {
486 spl_kmem_cache_t *skc = p;
487
488 ++*pos;
489 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
490 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
491 }
492
493 static void
494 slab_seq_stop(struct seq_file *f, void *v)
495 {
496 up_read(&spl_kmem_cache_sem);
497 }
498
499 static struct seq_operations slab_seq_ops = {
500 .show = slab_seq_show,
501 .start = slab_seq_start,
502 .next = slab_seq_next,
503 .stop = slab_seq_stop,
504 };
505
506 static int
507 proc_slab_open(struct inode *inode, struct file *filp)
508 {
509 return seq_open(filp, &slab_seq_ops);
510 }
511
512 static struct file_operations proc_slab_operations = {
513 .open = proc_slab_open,
514 .read = seq_read,
515 .llseek = seq_lseek,
516 .release = seq_release,
517 };
518
519 static void
520 taskq_seq_stop(struct seq_file *f, void *v)
521 {
522 up_read(&tq_list_sem);
523 }
524
525 static struct seq_operations taskq_all_seq_ops = {
526 .show = taskq_all_seq_show,
527 .start = taskq_seq_start,
528 .next = taskq_seq_next,
529 .stop = taskq_seq_stop,
530 };
531
532 static struct seq_operations taskq_seq_ops = {
533 .show = taskq_seq_show,
534 .start = taskq_seq_start,
535 .next = taskq_seq_next,
536 .stop = taskq_seq_stop,
537 };
538
539 static int
540 proc_taskq_all_open(struct inode *inode, struct file *filp)
541 {
542 return seq_open(filp, &taskq_all_seq_ops);
543 }
544
545 static int
546 proc_taskq_open(struct inode *inode, struct file *filp)
547 {
548 return seq_open(filp, &taskq_seq_ops);
549 }
550
551 static struct file_operations proc_taskq_all_operations = {
552 .open = proc_taskq_all_open,
553 .read = seq_read,
554 .llseek = seq_lseek,
555 .release = seq_release,
556 };
557
558 static struct file_operations proc_taskq_operations = {
559 .open = proc_taskq_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
562 .release = seq_release,
563 };
564
565 static struct ctl_table spl_kmem_table[] = {
566 #ifdef DEBUG_KMEM
567 {
568 .procname = "kmem_used",
569 .data = &kmem_alloc_used,
570 # ifdef HAVE_ATOMIC64_T
571 .maxlen = sizeof(atomic64_t),
572 # else
573 .maxlen = sizeof(atomic_t),
574 # endif /* HAVE_ATOMIC64_T */
575 .mode = 0444,
576 .proc_handler = &proc_domemused,
577 },
578 {
579 .procname = "kmem_max",
580 .data = &kmem_alloc_max,
581 .maxlen = sizeof(unsigned long),
582 .extra1 = &table_min,
583 .extra2 = &table_max,
584 .mode = 0444,
585 .proc_handler = &proc_doulongvec_minmax,
586 },
587 #endif /* DEBUG_KMEM */
588 {
589 .procname = "slab_kmem_total",
590 .data = (void *)(KMC_KMEM | KMC_TOTAL),
591 .maxlen = sizeof(unsigned long),
592 .extra1 = &table_min,
593 .extra2 = &table_max,
594 .mode = 0444,
595 .proc_handler = &proc_doslab,
596 },
597 {
598 .procname = "slab_kmem_alloc",
599 .data = (void *)(KMC_KMEM | KMC_ALLOC),
600 .maxlen = sizeof(unsigned long),
601 .extra1 = &table_min,
602 .extra2 = &table_max,
603 .mode = 0444,
604 .proc_handler = &proc_doslab,
605 },
606 {
607 .procname = "slab_kmem_max",
608 .data = (void *)(KMC_KMEM | KMC_MAX),
609 .maxlen = sizeof(unsigned long),
610 .extra1 = &table_min,
611 .extra2 = &table_max,
612 .mode = 0444,
613 .proc_handler = &proc_doslab,
614 },
615 {
616 .procname = "slab_vmem_total",
617 .data = (void *)(KMC_VMEM | KMC_TOTAL),
618 .maxlen = sizeof(unsigned long),
619 .extra1 = &table_min,
620 .extra2 = &table_max,
621 .mode = 0444,
622 .proc_handler = &proc_doslab,
623 },
624 {
625 .procname = "slab_vmem_alloc",
626 .data = (void *)(KMC_VMEM | KMC_ALLOC),
627 .maxlen = sizeof(unsigned long),
628 .extra1 = &table_min,
629 .extra2 = &table_max,
630 .mode = 0444,
631 .proc_handler = &proc_doslab,
632 },
633 {
634 .procname = "slab_vmem_max",
635 .data = (void *)(KMC_VMEM | KMC_MAX),
636 .maxlen = sizeof(unsigned long),
637 .extra1 = &table_min,
638 .extra2 = &table_max,
639 .mode = 0444,
640 .proc_handler = &proc_doslab,
641 },
642 {},
643 };
644
645 static struct ctl_table spl_kstat_table[] = {
646 {},
647 };
648
649 static struct ctl_table spl_table[] = {
650 /* NB No .strategy entries have been provided since
651 * sysctl(8) prefers to go via /proc for portability.
652 */
653 {
654 .procname = "version",
655 .data = spl_version,
656 .maxlen = sizeof(spl_version),
657 .mode = 0444,
658 .proc_handler = &proc_dostring,
659 },
660 {
661 .procname = "hostid",
662 .data = &spl_hostid,
663 .maxlen = sizeof(unsigned long),
664 .mode = 0644,
665 .proc_handler = &proc_dohostid,
666 },
667 {
668 .procname = "kmem",
669 .mode = 0555,
670 .child = spl_kmem_table,
671 },
672 {
673 .procname = "kstat",
674 .mode = 0555,
675 .child = spl_kstat_table,
676 },
677 {},
678 };
679
680 static struct ctl_table spl_dir[] = {
681 {
682 .procname = "spl",
683 .mode = 0555,
684 .child = spl_table,
685 },
686 {}
687 };
688
689 static struct ctl_table spl_root[] = {
690 {
691 #ifdef HAVE_CTL_NAME
692 .ctl_name = CTL_KERN,
693 #endif
694 .procname = "kernel",
695 .mode = 0555,
696 .child = spl_dir,
697 },
698 {}
699 };
700
701 int
702 spl_proc_init(void)
703 {
704 int rc = 0;
705
706 spl_header = register_sysctl_table(spl_root);
707 if (spl_header == NULL)
708 return (-EUNATCH);
709
710 proc_spl = proc_mkdir("spl", NULL);
711 if (proc_spl == NULL) {
712 rc = -EUNATCH;
713 goto out;
714 }
715
716 proc_spl_taskq_all = proc_create_data("taskq-all", 0444,
717 proc_spl, &proc_taskq_all_operations, NULL);
718 if (proc_spl_taskq_all == NULL) {
719 rc = -EUNATCH;
720 goto out;
721 }
722
723 proc_spl_taskq = proc_create_data("taskq", 0444,
724 proc_spl, &proc_taskq_operations, NULL);
725 if (proc_spl_taskq == NULL) {
726 rc = -EUNATCH;
727 goto out;
728 }
729
730 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
731 if (proc_spl_kmem == NULL) {
732 rc = -EUNATCH;
733 goto out;
734 }
735
736 proc_spl_kmem_slab = proc_create_data("slab", 0444,
737 proc_spl_kmem, &proc_slab_operations, NULL);
738 if (proc_spl_kmem_slab == NULL) {
739 rc = -EUNATCH;
740 goto out;
741 }
742
743 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
744 if (proc_spl_kstat == NULL) {
745 rc = -EUNATCH;
746 goto out;
747 }
748 out:
749 if (rc) {
750 remove_proc_entry("kstat", proc_spl);
751 remove_proc_entry("slab", proc_spl_kmem);
752 remove_proc_entry("kmem", proc_spl);
753 remove_proc_entry("taskq-all", proc_spl);
754 remove_proc_entry("taskq", proc_spl);
755 remove_proc_entry("spl", NULL);
756 unregister_sysctl_table(spl_header);
757 }
758
759 return (rc);
760 }
761
762 void
763 spl_proc_fini(void)
764 {
765 remove_proc_entry("kstat", proc_spl);
766 remove_proc_entry("slab", proc_spl_kmem);
767 remove_proc_entry("kmem", proc_spl);
768 remove_proc_entry("taskq-all", proc_spl);
769 remove_proc_entry("taskq", proc_spl);
770 remove_proc_entry("spl", NULL);
771
772 ASSERT(spl_header != NULL);
773 unregister_sysctl_table(spl_header);
774 }