]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-proc.c
Add support for rw semaphore under PREEMPT_RT_FULL
[mirror_spl.git] / module / spl / spl-proc.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25\*****************************************************************************/
715f6251 26
ae4c36ad
BB
27#include <sys/systeminfo.h>
28#include <sys/kstat.h>
e5b9b344
BB
29#include <sys/kmem.h>
30#include <sys/kmem_cache.h>
31#include <sys/vmem.h>
200366f2 32#include <sys/taskq.h>
e5b9b344 33#include <linux/ctype.h>
ae4c36ad
BB
34#include <linux/kmod.h>
35#include <linux/seq_file.h>
36#include <linux/proc_compat.h>
e5b9b344 37#include <linux/uaccess.h>
e3c4d448 38#include <linux/version.h>
57d1b188 39
e3c4d448
RY
40#if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41typedef struct ctl_table __no_const spl_ctl_table;
42#else
43typedef struct ctl_table spl_ctl_table;
44#endif
45
57d1b188 46static unsigned long table_min = 0;
47static unsigned long table_max = ~0;
404992e3 48
404992e3 49static struct ctl_table_header *spl_header = NULL;
c30df9c8 50static struct proc_dir_entry *proc_spl = NULL;
c30df9c8 51static struct proc_dir_entry *proc_spl_kmem = NULL;
ff449ac4 52static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
200366f2
TC
53static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54static struct proc_dir_entry *proc_spl_taskq = NULL;
c30df9c8 55struct proc_dir_entry *proc_spl_kstat = NULL;
57d1b188 56
57d1b188 57static int
58proc_copyin_string(char *kbuffer, int kbuffer_size,
59 const char *ubuffer, int ubuffer_size)
60{
61 int size;
62
63 if (ubuffer_size > kbuffer_size)
64 return -EOVERFLOW;
65
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return -EFAULT;
68
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
74
75 /* empty string */
76 if (size < 0)
77 return -EINVAL;
78
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return -EOVERFLOW;
82
83 kbuffer[size + 1] = 0;
84 return 0;
85}
86
87static int
88proc_copyout_string(char *ubuffer, int ubuffer_size,
89 const char *kbuffer, char *append)
90{
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
94 */
95 int size = MIN(strlen(kbuffer), ubuffer_size);
96
97 if (copy_to_user(ubuffer, kbuffer, size))
98 return -EFAULT;
99
100 if (append != NULL && size < ubuffer_size) {
101 if (copy_to_user(ubuffer + size, append, 1))
102 return -EFAULT;
103
104 size++;
105 }
106
107 return size;
108}
109
c6dc93d6 110#ifdef DEBUG_KMEM
0fac9c9e
BB
111static int
112proc_domemused(struct ctl_table *table, int write,
113 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 114{
115 int rc = 0;
116 unsigned long min = 0, max = ~0, val;
e3c4d448 117 spl_ctl_table dummy = *table;
57d1b188 118
119 dummy.data = &val;
120 dummy.proc_handler = &proc_dointvec;
121 dummy.extra1 = &min;
122 dummy.extra2 = &max;
123
124 if (write) {
125 *ppos += *lenp;
126 } else {
d04c8a56 127# ifdef HAVE_ATOMIC64_T
550f1705 128 val = atomic64_read((atomic64_t *)table->data);
d04c8a56
BB
129# else
130 val = atomic_read((atomic_t *)table->data);
131# endif /* HAVE_ATOMIC64_T */
0fac9c9e 132 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
57d1b188 133 }
134
8d9a23e8 135 return (rc);
57d1b188 136}
9eb361aa 137#endif /* DEBUG_KMEM */
3336e29c 138
0fac9c9e
BB
139static int
140proc_doslab(struct ctl_table *table, int write,
141 void __user *buffer, size_t *lenp, loff_t *ppos)
3336e29c
BB
142{
143 int rc = 0;
144 unsigned long min = 0, max = ~0, val = 0, mask;
e3c4d448 145 spl_ctl_table dummy = *table;
3336e29c 146 spl_kmem_cache_t *skc;
3336e29c
BB
147
148 dummy.data = &val;
149 dummy.proc_handler = &proc_dointvec;
150 dummy.extra1 = &min;
151 dummy.extra2 = &max;
152
153 if (write) {
154 *ppos += *lenp;
155 } else {
156 down_read(&spl_kmem_cache_sem);
157 mask = (unsigned long)table->data;
158
159 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
160
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc->skc_flags & mask))
163 continue;
164
165 /* Sum the specified field for selected slabs */
166 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
167 case KMC_TOTAL:
168 val += skc->skc_slab_size * skc->skc_slab_total;
169 break;
170 case KMC_ALLOC:
171 val += skc->skc_obj_size * skc->skc_obj_alloc;
172 break;
173 case KMC_MAX:
174 val += skc->skc_obj_size * skc->skc_obj_max;
175 break;
176 }
177 }
178
179 up_read(&spl_kmem_cache_sem);
0fac9c9e 180 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
3336e29c
BB
181 }
182
8d9a23e8 183 return (rc);
3336e29c 184}
57d1b188 185
0fac9c9e
BB
186static int
187proc_dohostid(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 189{
190 int len, rc = 0;
57d1b188 191 char *end, str[32];
57d1b188 192
193 if (write) {
0fac9c9e 194 /* We can't use proc_doulongvec_minmax() in the write
c95b308d 195 * case here because hostid while a hex value has no
a0b5ae8a 196 * leading 0x which confuses the helper function. */
57d1b188 197 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
198 if (rc < 0)
8d9a23e8 199 return (rc);
57d1b188 200
fa6f7d8f 201 spl_hostid = simple_strtoul(str, &end, 16);
a0b5ae8a 202 if (str == end)
8d9a23e8 203 return (-EINVAL);
57d1b188 204
57d1b188 205 } else {
c95b308d 206 len = snprintf(str, sizeof(str), "%lx", spl_hostid);
57d1b188 207 if (*ppos >= len)
208 rc = 0;
209 else
3977f837 210 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
57d1b188 211
212 if (rc >= 0) {
213 *lenp = rc;
214 *ppos += rc;
215 }
216 }
217
8d9a23e8 218 return (rc);
57d1b188 219}
220
200366f2
TC
221static void
222taskq_seq_show_headers(struct seq_file *f)
223{
224 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
225 "taskq", "act", "nthr", "spwn", "maxt", "pri",
226 "mina", "maxa", "cura", "flags");
227}
228
229/* indices into the lheads array below */
230#define LHEAD_PEND 0
231#define LHEAD_PRIO 1
232#define LHEAD_DELAY 2
233#define LHEAD_WAIT 3
234#define LHEAD_ACTIVE 4
235#define LHEAD_SIZE 5
236
49349255
CC
237static unsigned int spl_max_show_tasks = 512;
238module_param(spl_max_show_tasks, uint, 0644);
239MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
240
200366f2
TC
241static int
242taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
243{
244 taskq_t *tq = p;
245 taskq_thread_t *tqt;
246 wait_queue_t *wq;
247 struct task_struct *tsk;
248 taskq_ent_t *tqe;
249 char name[100];
250 struct list_head *lheads[LHEAD_SIZE], *lh;
251 static char *list_names[LHEAD_SIZE] =
252 {"pend", "prio", "delay", "wait", "active" };
253 int i, j, have_lheads = 0;
254 unsigned long wflags, flags;
255
256 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
257 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
258
259 /* get the various lists and check whether they're empty */
260 lheads[LHEAD_PEND] = &tq->tq_pend_list;
261 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
262 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
263 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
264 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
265
266 for (i = 0; i < LHEAD_SIZE; ++i) {
267 if (list_empty(lheads[i]))
268 lheads[i] = NULL;
269 else
270 ++have_lheads;
271 }
272
273 /* early return in non-"all" mode if lists are all empty */
274 if (!allflag && !have_lheads) {
275 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
276 spin_unlock_irqrestore(&tq->tq_lock, flags);
277 return (0);
278 }
279
280 /* unlock the waitq quickly */
281 if (!lheads[LHEAD_WAIT])
282 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
283
284 /* show the base taskq contents */
285 snprintf(name, sizeof(name), "%s/%d", tq->tq_name, tq->tq_instance);
286 seq_printf(f, "%-25s ", name);
287 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
288 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
289 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
290 tq->tq_nalloc, tq->tq_flags);
291
292 /* show the active list */
293 if (lheads[LHEAD_ACTIVE]) {
294 j = 0;
295 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
296 if (j == 0)
297 seq_printf(f, "\t%s:", list_names[LHEAD_ACTIVE]);
298 else if (j == 2) {
299 seq_printf(f, "\n\t ");
300 j = 0;
301 }
302 seq_printf(f, " [%d]%pf(%ps)",
303 tqt->tqt_thread->pid,
304 tqt->tqt_task->tqent_func,
305 tqt->tqt_task->tqent_arg);
306 ++j;
307 }
308 seq_printf(f, "\n");
309 }
310
311 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
312 if (lheads[i]) {
313 j = 0;
314 list_for_each(lh, lheads[i]) {
49349255
CC
315 if (spl_max_show_tasks != 0 &&
316 j >= spl_max_show_tasks) {
317 seq_printf(f, "\n\t(truncated)");
318 break;
319 }
200366f2
TC
320 /* show the wait waitq list */
321 if (i == LHEAD_WAIT) {
322 wq = list_entry(lh, wait_queue_t, task_list);
323 if (j == 0)
324 seq_printf(f, "\t%s:",
325 list_names[i]);
49349255 326 else if (j % 8 == 0)
200366f2 327 seq_printf(f, "\n\t ");
49349255 328
200366f2
TC
329 tsk = wq->private;
330 seq_printf(f, " %d", tsk->pid);
331 /* pend, prio and delay lists */
332 } else {
333 tqe = list_entry(lh, taskq_ent_t,
334 tqent_list);
335 if (j == 0)
336 seq_printf(f, "\t%s:",
337 list_names[i]);
49349255 338 else if (j % 2 == 0)
200366f2 339 seq_printf(f, "\n\t ");
49349255 340
200366f2
TC
341 seq_printf(f, " %pf(%ps)",
342 tqe->tqent_func,
343 tqe->tqent_arg);
344 }
345 ++j;
346 }
347 seq_printf(f, "\n");
348 }
349 if (lheads[LHEAD_WAIT])
350 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
351 spin_unlock_irqrestore(&tq->tq_lock, flags);
352
353 return (0);
354}
355
356static int
357taskq_all_seq_show(struct seq_file *f, void *p)
358{
359 return (taskq_seq_show_impl(f, p, B_TRUE));
360}
361
362static int
363taskq_seq_show(struct seq_file *f, void *p)
364{
365 return (taskq_seq_show_impl(f, p, B_FALSE));
366}
367
368static void *
369taskq_seq_start(struct seq_file *f, loff_t *pos)
370{
371 struct list_head *p;
372 loff_t n = *pos;
373
374 down_read(&tq_list_sem);
375 if (!n)
376 taskq_seq_show_headers(f);
377
378 p = tq_list.next;
379 while (n--) {
380 p = p->next;
381 if (p == &tq_list)
382 return (NULL);
383 }
384
385 return (list_entry(p, taskq_t, tq_taskqs));
386}
387
388static void *
389taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
390{
391 taskq_t *tq = p;
392
393 ++*pos;
394 return ((tq->tq_taskqs.next == &tq_list) ?
395 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
396}
397
ff449ac4 398static void
399slab_seq_show_headers(struct seq_file *f)
400{
d0a1038f
BB
401 seq_printf(f,
402 "--------------------- cache ----------"
403 "--------------------------------------------- "
404 "----- slab ------ "
165f13c3
BB
405 "---- object ----- "
406 "--- emergency ---\n");
d0a1038f
BB
407 seq_printf(f,
408 "name "
409 " flags size alloc slabsize objsize "
410 "total alloc max "
165f13c3
BB
411 "total alloc max "
412 "dlock alloc max\n");
ff449ac4 413}
414
415static int
416slab_seq_show(struct seq_file *f, void *p)
417{
242f539a 418 spl_kmem_cache_t *skc = p;
ff449ac4 419
242f539a 420 ASSERT(skc->skc_magic == SKC_MAGIC);
ff449ac4 421
a073aeb0
BB
422 /*
423 * Backed by Linux slab see /proc/slabinfo.
424 */
425 if (skc->skc_flags & KMC_SLAB)
426 return (0);
427
242f539a 428 spin_lock(&skc->skc_lock);
d0a1038f
BB
429 seq_printf(f, "%-36s ", skc->skc_name);
430 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
165f13c3 431 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
d0a1038f
BB
432 (long unsigned)skc->skc_flags,
433 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
434 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
435 (unsigned)skc->skc_slab_size,
436 (unsigned)skc->skc_obj_size,
437 (long unsigned)skc->skc_slab_total,
438 (long unsigned)skc->skc_slab_alloc,
439 (long unsigned)skc->skc_slab_max,
440 (long unsigned)skc->skc_obj_total,
441 (long unsigned)skc->skc_obj_alloc,
e2dcc6e2 442 (long unsigned)skc->skc_obj_max,
165f13c3 443 (long unsigned)skc->skc_obj_deadlock,
e2dcc6e2
BB
444 (long unsigned)skc->skc_obj_emergency,
445 (long unsigned)skc->skc_obj_emergency_max);
242f539a
BB
446
447 spin_unlock(&skc->skc_lock);
ff449ac4 448
449 return 0;
450}
451
452static void *
453slab_seq_start(struct seq_file *f, loff_t *pos)
454{
455 struct list_head *p;
456 loff_t n = *pos;
ff449ac4 457
458 down_read(&spl_kmem_cache_sem);
459 if (!n)
460 slab_seq_show_headers(f);
461
462 p = spl_kmem_cache_list.next;
463 while (n--) {
464 p = p->next;
465 if (p == &spl_kmem_cache_list)
8d9a23e8 466 return (NULL);
ff449ac4 467 }
468
8d9a23e8 469 return (list_entry(p, spl_kmem_cache_t, skc_list));
ff449ac4 470}
471
472static void *
473slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
474{
475 spl_kmem_cache_t *skc = p;
ff449ac4 476
477 ++*pos;
8d9a23e8 478 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
3977f837 479 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
ff449ac4 480}
481
482static void
483slab_seq_stop(struct seq_file *f, void *v)
484{
485 up_read(&spl_kmem_cache_sem);
486}
487
488static struct seq_operations slab_seq_ops = {
489 .show = slab_seq_show,
490 .start = slab_seq_start,
491 .next = slab_seq_next,
492 .stop = slab_seq_stop,
493};
494
495static int
496proc_slab_open(struct inode *inode, struct file *filp)
497{
498 return seq_open(filp, &slab_seq_ops);
499}
500
501static struct file_operations proc_slab_operations = {
502 .open = proc_slab_open,
503 .read = seq_read,
504 .llseek = seq_lseek,
505 .release = seq_release,
506};
ff449ac4 507
200366f2
TC
508static void
509taskq_seq_stop(struct seq_file *f, void *v)
510{
511 up_read(&tq_list_sem);
512}
513
514static struct seq_operations taskq_all_seq_ops = {
515 .show = taskq_all_seq_show,
516 .start = taskq_seq_start,
517 .next = taskq_seq_next,
518 .stop = taskq_seq_stop,
519};
520
521static struct seq_operations taskq_seq_ops = {
522 .show = taskq_seq_show,
523 .start = taskq_seq_start,
524 .next = taskq_seq_next,
525 .stop = taskq_seq_stop,
526};
527
528static int
529proc_taskq_all_open(struct inode *inode, struct file *filp)
530{
531 return seq_open(filp, &taskq_all_seq_ops);
532}
533
534static int
535proc_taskq_open(struct inode *inode, struct file *filp)
536{
537 return seq_open(filp, &taskq_seq_ops);
538}
539
540static struct file_operations proc_taskq_all_operations = {
541 .open = proc_taskq_all_open,
542 .read = seq_read,
543 .llseek = seq_lseek,
544 .release = seq_release,
545};
546
547static struct file_operations proc_taskq_operations = {
548 .open = proc_taskq_open,
549 .read = seq_read,
550 .llseek = seq_lseek,
551 .release = seq_release,
552};
553
9ab1ac14 554static struct ctl_table spl_kmem_table[] = {
9eb361aa 555#ifdef DEBUG_KMEM
57d1b188 556 {
57d1b188 557 .procname = "kmem_used",
558 .data = &kmem_alloc_used,
d04c8a56 559# ifdef HAVE_ATOMIC64_T
57d1b188 560 .maxlen = sizeof(atomic64_t),
d04c8a56
BB
561# else
562 .maxlen = sizeof(atomic_t),
563# endif /* HAVE_ATOMIC64_T */
57d1b188 564 .mode = 0444,
d04c8a56 565 .proc_handler = &proc_domemused,
57d1b188 566 },
567 {
57d1b188 568 .procname = "kmem_max",
569 .data = &kmem_alloc_max,
570 .maxlen = sizeof(unsigned long),
571 .extra1 = &table_min,
572 .extra2 = &table_max,
573 .mode = 0444,
574 .proc_handler = &proc_doulongvec_minmax,
575 },
9eb361aa 576#endif /* DEBUG_KMEM */
3336e29c 577 {
3336e29c
BB
578 .procname = "slab_kmem_total",
579 .data = (void *)(KMC_KMEM | KMC_TOTAL),
580 .maxlen = sizeof(unsigned long),
581 .extra1 = &table_min,
582 .extra2 = &table_max,
583 .mode = 0444,
584 .proc_handler = &proc_doslab,
585 },
586 {
3336e29c
BB
587 .procname = "slab_kmem_alloc",
588 .data = (void *)(KMC_KMEM | KMC_ALLOC),
589 .maxlen = sizeof(unsigned long),
590 .extra1 = &table_min,
591 .extra2 = &table_max,
592 .mode = 0444,
593 .proc_handler = &proc_doslab,
594 },
595 {
3336e29c
BB
596 .procname = "slab_kmem_max",
597 .data = (void *)(KMC_KMEM | KMC_MAX),
598 .maxlen = sizeof(unsigned long),
599 .extra1 = &table_min,
600 .extra2 = &table_max,
601 .mode = 0444,
602 .proc_handler = &proc_doslab,
603 },
604 {
3336e29c
BB
605 .procname = "slab_vmem_total",
606 .data = (void *)(KMC_VMEM | KMC_TOTAL),
607 .maxlen = sizeof(unsigned long),
608 .extra1 = &table_min,
609 .extra2 = &table_max,
610 .mode = 0444,
611 .proc_handler = &proc_doslab,
612 },
613 {
3336e29c
BB
614 .procname = "slab_vmem_alloc",
615 .data = (void *)(KMC_VMEM | KMC_ALLOC),
616 .maxlen = sizeof(unsigned long),
617 .extra1 = &table_min,
618 .extra2 = &table_max,
619 .mode = 0444,
620 .proc_handler = &proc_doslab,
621 },
622 {
3336e29c
BB
623 .procname = "slab_vmem_max",
624 .data = (void *)(KMC_VMEM | KMC_MAX),
625 .maxlen = sizeof(unsigned long),
626 .extra1 = &table_min,
627 .extra2 = &table_max,
628 .mode = 0444,
629 .proc_handler = &proc_doslab,
630 },
9ab1ac14 631 {0},
632};
04a479f7 633
04a479f7 634static struct ctl_table spl_kstat_table[] = {
635 {0},
636};
9ab1ac14 637
638static struct ctl_table spl_table[] = {
639 /* NB No .strategy entries have been provided since
640 * sysctl(8) prefers to go via /proc for portability.
641 */
642 {
9ab1ac14 643 .procname = "version",
644 .data = spl_version,
645 .maxlen = sizeof(spl_version),
646 .mode = 0444,
647 .proc_handler = &proc_dostring,
648 },
57d1b188 649 {
57d1b188 650 .procname = "hostid",
651 .data = &spl_hostid,
652 .maxlen = sizeof(unsigned long),
653 .mode = 0644,
654 .proc_handler = &proc_dohostid,
655 },
9ab1ac14 656 {
9ab1ac14 657 .procname = "kmem",
658 .mode = 0555,
659 .child = spl_kmem_table,
660 },
04a479f7 661 {
04a479f7 662 .procname = "kstat",
663 .mode = 0555,
664 .child = spl_kstat_table,
665 },
57d1b188 666 { 0 },
667};
668
9ab1ac14 669static struct ctl_table spl_dir[] = {
57d1b188 670 {
57d1b188 671 .procname = "spl",
672 .mode = 0555,
673 .child = spl_table,
674 },
57d86234 675 { 0 }
676};
677
678static struct ctl_table spl_root[] = {
679 {
9c91800d
BB
680#ifdef HAVE_CTL_NAME
681 .ctl_name = CTL_KERN,
682#endif
57d86234 683 .procname = "kernel",
684 .mode = 0555,
685 .child = spl_dir,
686 },
687 { 0 }
57d1b188 688};
689
690int
1114ae6a 691spl_proc_init(void)
57d1b188 692{
404992e3 693 int rc = 0;
57d1b188 694
b38bf6a4 695 spl_header = register_sysctl_table(spl_root);
57d1b188 696 if (spl_header == NULL)
8d9a23e8 697 return (-EUNATCH);
9ab1ac14 698
c30df9c8 699 proc_spl = proc_mkdir("spl", NULL);
8d9a23e8
BB
700 if (proc_spl == NULL) {
701 rc = -EUNATCH;
702 goto out;
703 }
404992e3 704
200366f2
TC
705 proc_spl_taskq_all = proc_create_data("taskq-all", 0444,
706 proc_spl, &proc_taskq_all_operations, NULL);
707 if (proc_spl_taskq_all == NULL) {
708 rc = -EUNATCH;
709 goto out;
710 }
711
712 proc_spl_taskq = proc_create_data("taskq", 0444,
713 proc_spl, &proc_taskq_operations, NULL);
714 if (proc_spl_taskq == NULL) {
715 rc = -EUNATCH;
716 goto out;
717 }
718
c30df9c8 719 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
8d9a23e8
BB
720 if (proc_spl_kmem == NULL) {
721 rc = -EUNATCH;
722 goto out;
723 }
ff449ac4 724
f2a745c4
RY
725 proc_spl_kmem_slab = proc_create_data("slab", 0444,
726 proc_spl_kmem, &proc_slab_operations, NULL);
8d9a23e8
BB
727 if (proc_spl_kmem_slab == NULL) {
728 rc = -EUNATCH;
729 goto out;
730 }
ff449ac4 731
c30df9c8 732 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
8d9a23e8
BB
733 if (proc_spl_kstat == NULL) {
734 rc = -EUNATCH;
735 goto out;
736 }
404992e3 737out:
c30df9c8 738 if (rc) {
739 remove_proc_entry("kstat", proc_spl);
ff449ac4 740 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 741 remove_proc_entry("kmem", proc_spl);
200366f2
TC
742 remove_proc_entry("taskq-all", proc_spl);
743 remove_proc_entry("taskq", proc_spl);
a02118a8 744 remove_proc_entry("spl", NULL);
b38bf6a4 745 unregister_sysctl_table(spl_header);
c30df9c8 746 }
c30df9c8 747
8d9a23e8 748 return (rc);
57d1b188 749}
750
751void
1114ae6a 752spl_proc_fini(void)
57d1b188 753{
c30df9c8 754 remove_proc_entry("kstat", proc_spl);
ff449ac4 755 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 756 remove_proc_entry("kmem", proc_spl);
200366f2
TC
757 remove_proc_entry("taskq-all", proc_spl);
758 remove_proc_entry("taskq", proc_spl);
a02118a8 759 remove_proc_entry("spl", NULL);
c30df9c8 760
57d1b188 761 ASSERT(spl_header != NULL);
b38bf6a4 762 unregister_sysctl_table(spl_header);
57d1b188 763}