]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-proc.c
New upstream version 0.7.2
[mirror_spl-debian.git] / module / spl / spl-proc.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25\*****************************************************************************/
715f6251 26
ae4c36ad
BB
27#include <sys/systeminfo.h>
28#include <sys/kstat.h>
10946b02
AX
29#include <sys/kmem.h>
30#include <sys/kmem_cache.h>
31#include <sys/vmem.h>
ec06701b 32#include <sys/taskq.h>
10946b02 33#include <linux/ctype.h>
ae4c36ad
BB
34#include <linux/kmod.h>
35#include <linux/seq_file.h>
36#include <linux/proc_compat.h>
10946b02 37#include <linux/uaccess.h>
80093b6f 38#include <linux/version.h>
57d1b188 39
80093b6f
AX
40#if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41typedef struct ctl_table __no_const spl_ctl_table;
42#else
43typedef struct ctl_table spl_ctl_table;
44#endif
45
57d1b188 46static unsigned long table_min = 0;
47static unsigned long table_max = ~0;
404992e3 48
404992e3 49static struct ctl_table_header *spl_header = NULL;
c30df9c8 50static struct proc_dir_entry *proc_spl = NULL;
c30df9c8 51static struct proc_dir_entry *proc_spl_kmem = NULL;
ff449ac4 52static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
ec06701b
AX
53static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54static struct proc_dir_entry *proc_spl_taskq = NULL;
c30df9c8 55struct proc_dir_entry *proc_spl_kstat = NULL;
57d1b188 56
57d1b188 57static int
58proc_copyin_string(char *kbuffer, int kbuffer_size,
59 const char *ubuffer, int ubuffer_size)
60{
61 int size;
62
63 if (ubuffer_size > kbuffer_size)
64 return -EOVERFLOW;
65
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return -EFAULT;
68
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
74
75 /* empty string */
76 if (size < 0)
77 return -EINVAL;
78
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return -EOVERFLOW;
82
83 kbuffer[size + 1] = 0;
84 return 0;
85}
86
87static int
88proc_copyout_string(char *ubuffer, int ubuffer_size,
89 const char *kbuffer, char *append)
90{
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
94 */
95 int size = MIN(strlen(kbuffer), ubuffer_size);
96
97 if (copy_to_user(ubuffer, kbuffer, size))
98 return -EFAULT;
99
100 if (append != NULL && size < ubuffer_size) {
101 if (copy_to_user(ubuffer + size, append, 1))
102 return -EFAULT;
103
104 size++;
105 }
106
107 return size;
108}
109
c6dc93d6 110#ifdef DEBUG_KMEM
10946b02
AX
111static int
112proc_domemused(struct ctl_table *table, int write,
113 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 114{
115 int rc = 0;
116 unsigned long min = 0, max = ~0, val;
80093b6f 117 spl_ctl_table dummy = *table;
57d1b188 118
119 dummy.data = &val;
120 dummy.proc_handler = &proc_dointvec;
121 dummy.extra1 = &min;
122 dummy.extra2 = &max;
123
124 if (write) {
125 *ppos += *lenp;
126 } else {
d04c8a56 127# ifdef HAVE_ATOMIC64_T
550f1705 128 val = atomic64_read((atomic64_t *)table->data);
d04c8a56
BB
129# else
130 val = atomic_read((atomic_t *)table->data);
131# endif /* HAVE_ATOMIC64_T */
10946b02 132 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
57d1b188 133 }
134
10946b02 135 return (rc);
57d1b188 136}
f6188ddd 137#endif /* DEBUG_KMEM */
3336e29c 138
10946b02
AX
139static int
140proc_doslab(struct ctl_table *table, int write,
141 void __user *buffer, size_t *lenp, loff_t *ppos)
3336e29c
BB
142{
143 int rc = 0;
144 unsigned long min = 0, max = ~0, val = 0, mask;
80093b6f 145 spl_ctl_table dummy = *table;
3336e29c 146 spl_kmem_cache_t *skc;
3336e29c
BB
147
148 dummy.data = &val;
149 dummy.proc_handler = &proc_dointvec;
150 dummy.extra1 = &min;
151 dummy.extra2 = &max;
152
153 if (write) {
154 *ppos += *lenp;
155 } else {
156 down_read(&spl_kmem_cache_sem);
157 mask = (unsigned long)table->data;
158
159 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
160
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc->skc_flags & mask))
163 continue;
164
165 /* Sum the specified field for selected slabs */
166 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
167 case KMC_TOTAL:
168 val += skc->skc_slab_size * skc->skc_slab_total;
169 break;
170 case KMC_ALLOC:
171 val += skc->skc_obj_size * skc->skc_obj_alloc;
172 break;
173 case KMC_MAX:
174 val += skc->skc_obj_size * skc->skc_obj_max;
175 break;
176 }
177 }
178
179 up_read(&spl_kmem_cache_sem);
10946b02 180 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
3336e29c
BB
181 }
182
10946b02 183 return (rc);
3336e29c 184}
57d1b188 185
10946b02
AX
186static int
187proc_dohostid(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 189{
190 int len, rc = 0;
57d1b188 191 char *end, str[32];
57d1b188 192
193 if (write) {
10946b02 194 /* We can't use proc_doulongvec_minmax() in the write
c95b308d 195 * case here because hostid while a hex value has no
a0b5ae8a 196 * leading 0x which confuses the helper function. */
57d1b188 197 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
198 if (rc < 0)
10946b02 199 return (rc);
57d1b188 200
fa6f7d8f 201 spl_hostid = simple_strtoul(str, &end, 16);
a0b5ae8a 202 if (str == end)
10946b02 203 return (-EINVAL);
57d1b188 204
57d1b188 205 } else {
ec06701b
AX
206 len = snprintf(str, sizeof(str), "%lx",
207 (unsigned long) zone_get_hostid(NULL));
57d1b188 208 if (*ppos >= len)
209 rc = 0;
210 else
3977f837 211 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
57d1b188 212
213 if (rc >= 0) {
214 *lenp = rc;
215 *ppos += rc;
216 }
217 }
218
10946b02 219 return (rc);
4ab13d3b
BB
220}
221
ec06701b
AX
222static void
223taskq_seq_show_headers(struct seq_file *f)
224{
225 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
226 "taskq", "act", "nthr", "spwn", "maxt", "pri",
227 "mina", "maxa", "cura", "flags");
228}
229
230/* indices into the lheads array below */
231#define LHEAD_PEND 0
232#define LHEAD_PRIO 1
233#define LHEAD_DELAY 2
234#define LHEAD_WAIT 3
235#define LHEAD_ACTIVE 4
236#define LHEAD_SIZE 5
237
238static unsigned int spl_max_show_tasks = 512;
239module_param(spl_max_show_tasks, uint, 0644);
240MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
241
242static int
243taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
244{
245 taskq_t *tq = p;
246 taskq_thread_t *tqt;
247 spl_wait_queue_entry_t *wq;
248 struct task_struct *tsk;
249 taskq_ent_t *tqe;
250 char name[100];
251 struct list_head *lheads[LHEAD_SIZE], *lh;
252 static char *list_names[LHEAD_SIZE] =
253 {"pend", "prio", "delay", "wait", "active" };
254 int i, j, have_lheads = 0;
255 unsigned long wflags, flags;
256
257 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
258 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
259
260 /* get the various lists and check whether they're empty */
261 lheads[LHEAD_PEND] = &tq->tq_pend_list;
262 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
263 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
264#ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
265 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
266#else
267 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
268#endif
269 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
270
271 for (i = 0; i < LHEAD_SIZE; ++i) {
272 if (list_empty(lheads[i]))
273 lheads[i] = NULL;
274 else
275 ++have_lheads;
276 }
277
278 /* early return in non-"all" mode if lists are all empty */
279 if (!allflag && !have_lheads) {
280 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
281 spin_unlock_irqrestore(&tq->tq_lock, flags);
282 return (0);
283 }
284
285 /* unlock the waitq quickly */
286 if (!lheads[LHEAD_WAIT])
287 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
288
289 /* show the base taskq contents */
290 snprintf(name, sizeof(name), "%s/%d", tq->tq_name, tq->tq_instance);
291 seq_printf(f, "%-25s ", name);
292 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
293 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
294 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
295 tq->tq_nalloc, tq->tq_flags);
296
297 /* show the active list */
298 if (lheads[LHEAD_ACTIVE]) {
299 j = 0;
300 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
301 if (j == 0)
302 seq_printf(f, "\t%s:", list_names[LHEAD_ACTIVE]);
303 else if (j == 2) {
304 seq_printf(f, "\n\t ");
305 j = 0;
306 }
307 seq_printf(f, " [%d]%pf(%ps)",
308 tqt->tqt_thread->pid,
309 tqt->tqt_task->tqent_func,
310 tqt->tqt_task->tqent_arg);
311 ++j;
312 }
313 seq_printf(f, "\n");
314 }
315
316 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
317 if (lheads[i]) {
318 j = 0;
319 list_for_each(lh, lheads[i]) {
320 if (spl_max_show_tasks != 0 &&
321 j >= spl_max_show_tasks) {
322 seq_printf(f, "\n\t(truncated)");
323 break;
324 }
325 /* show the wait waitq list */
326 if (i == LHEAD_WAIT) {
327#ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
328 wq = list_entry(lh,
329 spl_wait_queue_entry_t, entry);
330#else
331 wq = list_entry(lh,
332 spl_wait_queue_entry_t, task_list);
333#endif
334 if (j == 0)
335 seq_printf(f, "\t%s:",
336 list_names[i]);
337 else if (j % 8 == 0)
338 seq_printf(f, "\n\t ");
339
340 tsk = wq->private;
341 seq_printf(f, " %d", tsk->pid);
342 /* pend, prio and delay lists */
343 } else {
344 tqe = list_entry(lh, taskq_ent_t,
345 tqent_list);
346 if (j == 0)
347 seq_printf(f, "\t%s:",
348 list_names[i]);
349 else if (j % 2 == 0)
350 seq_printf(f, "\n\t ");
351
352 seq_printf(f, " %pf(%ps)",
353 tqe->tqent_func,
354 tqe->tqent_arg);
355 }
356 ++j;
357 }
358 seq_printf(f, "\n");
359 }
360 if (lheads[LHEAD_WAIT])
361 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
362 spin_unlock_irqrestore(&tq->tq_lock, flags);
363
364 return (0);
365}
366
367static int
368taskq_all_seq_show(struct seq_file *f, void *p)
369{
370 return (taskq_seq_show_impl(f, p, B_TRUE));
371}
372
373static int
374taskq_seq_show(struct seq_file *f, void *p)
375{
376 return (taskq_seq_show_impl(f, p, B_FALSE));
377}
378
379static void *
380taskq_seq_start(struct seq_file *f, loff_t *pos)
381{
382 struct list_head *p;
383 loff_t n = *pos;
384
385 down_read(&tq_list_sem);
386 if (!n)
387 taskq_seq_show_headers(f);
388
389 p = tq_list.next;
390 while (n--) {
391 p = p->next;
392 if (p == &tq_list)
393 return (NULL);
394 }
395
396 return (list_entry(p, taskq_t, tq_taskqs));
397}
398
399static void *
400taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
401{
402 taskq_t *tq = p;
403
404 ++*pos;
405 return ((tq->tq_taskqs.next == &tq_list) ?
406 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
407}
408
ff449ac4 409static void
410slab_seq_show_headers(struct seq_file *f)
411{
d0a1038f
BB
412 seq_printf(f,
413 "--------------------- cache ----------"
414 "--------------------------------------------- "
415 "----- slab ------ "
165f13c3
BB
416 "---- object ----- "
417 "--- emergency ---\n");
d0a1038f
BB
418 seq_printf(f,
419 "name "
420 " flags size alloc slabsize objsize "
421 "total alloc max "
165f13c3
BB
422 "total alloc max "
423 "dlock alloc max\n");
ff449ac4 424}
425
426static int
427slab_seq_show(struct seq_file *f, void *p)
428{
242f539a 429 spl_kmem_cache_t *skc = p;
ff449ac4 430
242f539a 431 ASSERT(skc->skc_magic == SKC_MAGIC);
ff449ac4 432
9e4fb5c2
LG
433 /*
434 * Backed by Linux slab see /proc/slabinfo.
435 */
436 if (skc->skc_flags & KMC_SLAB)
437 return (0);
438
242f539a 439 spin_lock(&skc->skc_lock);
d0a1038f
BB
440 seq_printf(f, "%-36s ", skc->skc_name);
441 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
165f13c3 442 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
d0a1038f
BB
443 (long unsigned)skc->skc_flags,
444 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
445 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
446 (unsigned)skc->skc_slab_size,
447 (unsigned)skc->skc_obj_size,
448 (long unsigned)skc->skc_slab_total,
449 (long unsigned)skc->skc_slab_alloc,
450 (long unsigned)skc->skc_slab_max,
451 (long unsigned)skc->skc_obj_total,
452 (long unsigned)skc->skc_obj_alloc,
e2dcc6e2 453 (long unsigned)skc->skc_obj_max,
165f13c3 454 (long unsigned)skc->skc_obj_deadlock,
e2dcc6e2
BB
455 (long unsigned)skc->skc_obj_emergency,
456 (long unsigned)skc->skc_obj_emergency_max);
242f539a
BB
457
458 spin_unlock(&skc->skc_lock);
ff449ac4 459
460 return 0;
461}
462
463static void *
464slab_seq_start(struct seq_file *f, loff_t *pos)
465{
466 struct list_head *p;
467 loff_t n = *pos;
ff449ac4 468
469 down_read(&spl_kmem_cache_sem);
470 if (!n)
471 slab_seq_show_headers(f);
472
473 p = spl_kmem_cache_list.next;
474 while (n--) {
475 p = p->next;
476 if (p == &spl_kmem_cache_list)
10946b02 477 return (NULL);
ff449ac4 478 }
479
10946b02 480 return (list_entry(p, spl_kmem_cache_t, skc_list));
ff449ac4 481}
482
483static void *
484slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
485{
486 spl_kmem_cache_t *skc = p;
ff449ac4 487
488 ++*pos;
10946b02 489 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
3977f837 490 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
ff449ac4 491}
492
493static void
494slab_seq_stop(struct seq_file *f, void *v)
495{
496 up_read(&spl_kmem_cache_sem);
497}
498
499static struct seq_operations slab_seq_ops = {
500 .show = slab_seq_show,
501 .start = slab_seq_start,
502 .next = slab_seq_next,
503 .stop = slab_seq_stop,
504};
505
506static int
507proc_slab_open(struct inode *inode, struct file *filp)
508{
509 return seq_open(filp, &slab_seq_ops);
510}
511
512static struct file_operations proc_slab_operations = {
513 .open = proc_slab_open,
514 .read = seq_read,
515 .llseek = seq_lseek,
516 .release = seq_release,
517};
ff449ac4 518
ec06701b
AX
519static void
520taskq_seq_stop(struct seq_file *f, void *v)
521{
522 up_read(&tq_list_sem);
523}
524
525static struct seq_operations taskq_all_seq_ops = {
526 .show = taskq_all_seq_show,
527 .start = taskq_seq_start,
528 .next = taskq_seq_next,
529 .stop = taskq_seq_stop,
530};
531
532static struct seq_operations taskq_seq_ops = {
533 .show = taskq_seq_show,
534 .start = taskq_seq_start,
535 .next = taskq_seq_next,
536 .stop = taskq_seq_stop,
537};
538
539static int
540proc_taskq_all_open(struct inode *inode, struct file *filp)
541{
542 return seq_open(filp, &taskq_all_seq_ops);
543}
544
545static int
546proc_taskq_open(struct inode *inode, struct file *filp)
547{
548 return seq_open(filp, &taskq_seq_ops);
549}
550
551static struct file_operations proc_taskq_all_operations = {
552 .open = proc_taskq_all_open,
553 .read = seq_read,
554 .llseek = seq_lseek,
555 .release = seq_release,
556};
557
558static struct file_operations proc_taskq_operations = {
559 .open = proc_taskq_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
562 .release = seq_release,
563};
564
9ab1ac14 565static struct ctl_table spl_kmem_table[] = {
f6188ddd 566#ifdef DEBUG_KMEM
57d1b188 567 {
57d1b188 568 .procname = "kmem_used",
569 .data = &kmem_alloc_used,
d04c8a56 570# ifdef HAVE_ATOMIC64_T
57d1b188 571 .maxlen = sizeof(atomic64_t),
d04c8a56
BB
572# else
573 .maxlen = sizeof(atomic_t),
574# endif /* HAVE_ATOMIC64_T */
57d1b188 575 .mode = 0444,
d04c8a56 576 .proc_handler = &proc_domemused,
57d1b188 577 },
578 {
57d1b188 579 .procname = "kmem_max",
580 .data = &kmem_alloc_max,
581 .maxlen = sizeof(unsigned long),
582 .extra1 = &table_min,
583 .extra2 = &table_max,
584 .mode = 0444,
585 .proc_handler = &proc_doulongvec_minmax,
586 },
f6188ddd 587#endif /* DEBUG_KMEM */
57d1b188 588 {
3336e29c
BB
589 .procname = "slab_kmem_total",
590 .data = (void *)(KMC_KMEM | KMC_TOTAL),
591 .maxlen = sizeof(unsigned long),
592 .extra1 = &table_min,
593 .extra2 = &table_max,
594 .mode = 0444,
595 .proc_handler = &proc_doslab,
596 },
597 {
3336e29c
BB
598 .procname = "slab_kmem_alloc",
599 .data = (void *)(KMC_KMEM | KMC_ALLOC),
600 .maxlen = sizeof(unsigned long),
601 .extra1 = &table_min,
602 .extra2 = &table_max,
603 .mode = 0444,
604 .proc_handler = &proc_doslab,
605 },
606 {
3336e29c
BB
607 .procname = "slab_kmem_max",
608 .data = (void *)(KMC_KMEM | KMC_MAX),
609 .maxlen = sizeof(unsigned long),
610 .extra1 = &table_min,
611 .extra2 = &table_max,
612 .mode = 0444,
613 .proc_handler = &proc_doslab,
614 },
615 {
3336e29c
BB
616 .procname = "slab_vmem_total",
617 .data = (void *)(KMC_VMEM | KMC_TOTAL),
618 .maxlen = sizeof(unsigned long),
619 .extra1 = &table_min,
620 .extra2 = &table_max,
621 .mode = 0444,
622 .proc_handler = &proc_doslab,
623 },
624 {
3336e29c
BB
625 .procname = "slab_vmem_alloc",
626 .data = (void *)(KMC_VMEM | KMC_ALLOC),
627 .maxlen = sizeof(unsigned long),
628 .extra1 = &table_min,
629 .extra2 = &table_max,
630 .mode = 0444,
631 .proc_handler = &proc_doslab,
632 },
633 {
3336e29c
BB
634 .procname = "slab_vmem_max",
635 .data = (void *)(KMC_VMEM | KMC_MAX),
636 .maxlen = sizeof(unsigned long),
637 .extra1 = &table_min,
638 .extra2 = &table_max,
639 .mode = 0444,
640 .proc_handler = &proc_doslab,
641 },
ec06701b 642 {},
9ab1ac14 643};
04a479f7 644
04a479f7 645static struct ctl_table spl_kstat_table[] = {
ec06701b 646 {},
04a479f7 647};
9ab1ac14 648
649static struct ctl_table spl_table[] = {
650 /* NB No .strategy entries have been provided since
651 * sysctl(8) prefers to go via /proc for portability.
652 */
653 {
9ab1ac14 654 .procname = "version",
655 .data = spl_version,
656 .maxlen = sizeof(spl_version),
657 .mode = 0444,
658 .proc_handler = &proc_dostring,
659 },
57d1b188 660 {
57d1b188 661 .procname = "hostid",
662 .data = &spl_hostid,
663 .maxlen = sizeof(unsigned long),
664 .mode = 0644,
665 .proc_handler = &proc_dohostid,
666 },
9ab1ac14 667 {
9ab1ac14 668 .procname = "kmem",
669 .mode = 0555,
670 .child = spl_kmem_table,
671 },
04a479f7 672 {
04a479f7 673 .procname = "kstat",
674 .mode = 0555,
675 .child = spl_kstat_table,
676 },
ec06701b 677 {},
57d1b188 678};
679
9ab1ac14 680static struct ctl_table spl_dir[] = {
57d1b188 681 {
57d1b188 682 .procname = "spl",
683 .mode = 0555,
684 .child = spl_table,
685 },
ec06701b 686 {}
57d86234 687};
688
689static struct ctl_table spl_root[] = {
690 {
10946b02
AX
691#ifdef HAVE_CTL_NAME
692 .ctl_name = CTL_KERN,
693#endif
57d86234 694 .procname = "kernel",
695 .mode = 0555,
696 .child = spl_dir,
697 },
ec06701b 698 {}
57d1b188 699};
700
701int
1114ae6a 702spl_proc_init(void)
57d1b188 703{
404992e3 704 int rc = 0;
57d1b188 705
10946b02 706 spl_header = register_sysctl_table(spl_root);
57d1b188 707 if (spl_header == NULL)
10946b02 708 return (-EUNATCH);
9ab1ac14 709
c30df9c8 710 proc_spl = proc_mkdir("spl", NULL);
10946b02
AX
711 if (proc_spl == NULL) {
712 rc = -EUNATCH;
713 goto out;
714 }
404992e3 715
ec06701b
AX
716 proc_spl_taskq_all = proc_create_data("taskq-all", 0444,
717 proc_spl, &proc_taskq_all_operations, NULL);
718 if (proc_spl_taskq_all == NULL) {
719 rc = -EUNATCH;
720 goto out;
721 }
722
723 proc_spl_taskq = proc_create_data("taskq", 0444,
724 proc_spl, &proc_taskq_operations, NULL);
725 if (proc_spl_taskq == NULL) {
726 rc = -EUNATCH;
727 goto out;
728 }
729
c30df9c8 730 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
10946b02
AX
731 if (proc_spl_kmem == NULL) {
732 rc = -EUNATCH;
733 goto out;
734 }
ff449ac4 735
80093b6f
AX
736 proc_spl_kmem_slab = proc_create_data("slab", 0444,
737 proc_spl_kmem, &proc_slab_operations, NULL);
10946b02
AX
738 if (proc_spl_kmem_slab == NULL) {
739 rc = -EUNATCH;
740 goto out;
741 }
ff449ac4 742
c30df9c8 743 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
10946b02
AX
744 if (proc_spl_kstat == NULL) {
745 rc = -EUNATCH;
746 goto out;
747 }
404992e3 748out:
c30df9c8 749 if (rc) {
750 remove_proc_entry("kstat", proc_spl);
ff449ac4 751 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 752 remove_proc_entry("kmem", proc_spl);
ec06701b
AX
753 remove_proc_entry("taskq-all", proc_spl);
754 remove_proc_entry("taskq", proc_spl);
a02118a8 755 remove_proc_entry("spl", NULL);
10946b02 756 unregister_sysctl_table(spl_header);
c30df9c8 757 }
c30df9c8 758
10946b02 759 return (rc);
57d1b188 760}
761
762void
1114ae6a 763spl_proc_fini(void)
57d1b188 764{
c30df9c8 765 remove_proc_entry("kstat", proc_spl);
ff449ac4 766 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 767 remove_proc_entry("kmem", proc_spl);
ec06701b
AX
768 remove_proc_entry("taskq-all", proc_spl);
769 remove_proc_entry("taskq", proc_spl);
a02118a8 770 remove_proc_entry("spl", NULL);
c30df9c8 771
57d1b188 772 ASSERT(spl_header != NULL);
10946b02 773 unregister_sysctl_table(spl_header);
57d1b188 774}