]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-proc.c
Fix more cstyle warnings
[mirror_spl.git] / module / spl / spl-proc.c
CommitLineData
4b393c50 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
5461eefe 23 *
716154c5 24 * Solaris Porting Layer (SPL) Proc Implementation.
4b393c50 25 */
715f6251 26
ae4c36ad
BB
27#include <sys/systeminfo.h>
28#include <sys/kstat.h>
e5b9b344
BB
29#include <sys/kmem.h>
30#include <sys/kmem_cache.h>
31#include <sys/vmem.h>
200366f2 32#include <sys/taskq.h>
e5b9b344 33#include <linux/ctype.h>
ae4c36ad
BB
34#include <linux/kmod.h>
35#include <linux/seq_file.h>
36#include <linux/proc_compat.h>
e5b9b344 37#include <linux/uaccess.h>
e3c4d448 38#include <linux/version.h>
57d1b188 39
5461eefe 40#if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
e3c4d448
RY
41typedef struct ctl_table __no_const spl_ctl_table;
42#else
43typedef struct ctl_table spl_ctl_table;
44#endif
45
57d1b188 46static unsigned long table_min = 0;
47static unsigned long table_max = ~0;
404992e3 48
404992e3 49static struct ctl_table_header *spl_header = NULL;
c30df9c8 50static struct proc_dir_entry *proc_spl = NULL;
c30df9c8 51static struct proc_dir_entry *proc_spl_kmem = NULL;
ff449ac4 52static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
200366f2
TC
53static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54static struct proc_dir_entry *proc_spl_taskq = NULL;
c30df9c8 55struct proc_dir_entry *proc_spl_kstat = NULL;
57d1b188 56
57d1b188 57static int
5461eefe
BB
58proc_copyin_string(char *kbuffer, int kbuffer_size, const char *ubuffer,
59 int ubuffer_size)
57d1b188 60{
5461eefe 61 int size;
57d1b188 62
5461eefe
BB
63 if (ubuffer_size > kbuffer_size)
64 return (-EOVERFLOW);
57d1b188 65
5461eefe
BB
66 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 return (-EFAULT);
57d1b188 68
5461eefe
BB
69 /* strip trailing whitespace */
70 size = strnlen(kbuffer, ubuffer_size);
71 while (size-- >= 0)
72 if (!isspace(kbuffer[size]))
73 break;
57d1b188 74
5461eefe
BB
75 /* empty string */
76 if (size < 0)
77 return (-EINVAL);
57d1b188 78
5461eefe
BB
79 /* no space to terminate */
80 if (size == kbuffer_size)
81 return (-EOVERFLOW);
57d1b188 82
5461eefe
BB
83 kbuffer[size + 1] = 0;
84 return (0);
57d1b188 85}
86
87static int
3673d032
BB
88proc_copyout_string(char *ubuffer, int ubuffer_size, const char *kbuffer,
89 char *append)
57d1b188 90{
5461eefe
BB
91 /*
92 * NB if 'append' != NULL, it's a single character to append to the
93 * copied out string - usually "\n", for /proc entries and
94 * (i.e. a terminating zero byte) for sysctl entries
95 */
96 int size = MIN(strlen(kbuffer), ubuffer_size);
57d1b188 97
5461eefe
BB
98 if (copy_to_user(ubuffer, kbuffer, size))
99 return (-EFAULT);
57d1b188 100
5461eefe
BB
101 if (append != NULL && size < ubuffer_size) {
102 if (copy_to_user(ubuffer + size, append, 1))
103 return (-EFAULT);
57d1b188 104
5461eefe
BB
105 size++;
106 }
57d1b188 107
5461eefe 108 return (size);
57d1b188 109}
110
c6dc93d6 111#ifdef DEBUG_KMEM
0fac9c9e
BB
112static int
113proc_domemused(struct ctl_table *table, int write,
114 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 115{
5461eefe
BB
116 int rc = 0;
117 unsigned long min = 0, max = ~0, val;
118 spl_ctl_table dummy = *table;
119
120 dummy.data = &val;
121 dummy.proc_handler = &proc_dointvec;
122 dummy.extra1 = &min;
123 dummy.extra2 = &max;
124
125 if (write) {
126 *ppos += *lenp;
127 } else {
128#ifdef HAVE_ATOMIC64_T
129 val = atomic64_read((atomic64_t *)table->data);
130#else
131 val = atomic_read((atomic_t *)table->data);
132#endif /* HAVE_ATOMIC64_T */
133 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
134 }
135
136 return (rc);
57d1b188 137}
9eb361aa 138#endif /* DEBUG_KMEM */
3336e29c 139
0fac9c9e
BB
140static int
141proc_doslab(struct ctl_table *table, int write,
142 void __user *buffer, size_t *lenp, loff_t *ppos)
3336e29c 143{
5461eefe
BB
144 int rc = 0;
145 unsigned long min = 0, max = ~0, val = 0, mask;
146 spl_ctl_table dummy = *table;
147 spl_kmem_cache_t *skc;
3336e29c 148
5461eefe
BB
149 dummy.data = &val;
150 dummy.proc_handler = &proc_dointvec;
151 dummy.extra1 = &min;
152 dummy.extra2 = &max;
3336e29c 153
5461eefe
BB
154 if (write) {
155 *ppos += *lenp;
156 } else {
157 down_read(&spl_kmem_cache_sem);
158 mask = (unsigned long)table->data;
3336e29c 159
5461eefe 160 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
3336e29c
BB
161
162 /* Only use slabs of the correct kmem/vmem type */
163 if (!(skc->skc_flags & mask))
164 continue;
165
166 /* Sum the specified field for selected slabs */
167 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
168 case KMC_TOTAL:
5461eefe 169 val += skc->skc_slab_size * skc->skc_slab_total;
3336e29c
BB
170 break;
171 case KMC_ALLOC:
5461eefe 172 val += skc->skc_obj_size * skc->skc_obj_alloc;
3336e29c
BB
173 break;
174 case KMC_MAX:
5461eefe 175 val += skc->skc_obj_size * skc->skc_obj_max;
3336e29c
BB
176 break;
177 }
5461eefe 178 }
3336e29c 179
5461eefe
BB
180 up_read(&spl_kmem_cache_sem);
181 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
182 }
3336e29c 183
5461eefe 184 return (rc);
3336e29c 185}
57d1b188 186
0fac9c9e
BB
187static int
188proc_dohostid(struct ctl_table *table, int write,
189 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 190{
5461eefe
BB
191 int len, rc = 0;
192 char *end, str[32];
193
194 if (write) {
195 /*
196 * We can't use proc_doulongvec_minmax() in the write
197 * case here because hostid while a hex value has no
198 * leading 0x which confuses the helper function.
199 */
200 rc = proc_copyin_string(str, sizeof (str), buffer, *lenp);
201 if (rc < 0)
202 return (rc);
203
204 spl_hostid = simple_strtoul(str, &end, 16);
205 if (str == end)
206 return (-EINVAL);
207
208 } else {
209 len = snprintf(str, sizeof (str), "%lx",
c93d9dff 210 (unsigned long) zone_get_hostid(NULL));
5461eefe
BB
211 if (*ppos >= len)
212 rc = 0;
213 else
214 rc = proc_copyout_string(buffer,
215 *lenp, str + *ppos, "\n");
216
217 if (rc >= 0) {
218 *lenp = rc;
219 *ppos += rc;
220 }
221 }
222
223 return (rc);
57d1b188 224}
225
200366f2
TC
226static void
227taskq_seq_show_headers(struct seq_file *f)
228{
229 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
230 "taskq", "act", "nthr", "spwn", "maxt", "pri",
231 "mina", "maxa", "cura", "flags");
232}
233
234/* indices into the lheads array below */
235#define LHEAD_PEND 0
5461eefe
BB
236#define LHEAD_PRIO 1
237#define LHEAD_DELAY 2
238#define LHEAD_WAIT 3
239#define LHEAD_ACTIVE 4
240#define LHEAD_SIZE 5
200366f2 241
3673d032 242/* BEGIN CSTYLED */
49349255
CC
243static unsigned int spl_max_show_tasks = 512;
244module_param(spl_max_show_tasks, uint, 0644);
245MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
3673d032 246/* END CSTYLED */
49349255 247
200366f2
TC
248static int
249taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
250{
251 taskq_t *tq = p;
252 taskq_thread_t *tqt;
94411751 253 spl_wait_queue_entry_t *wq;
200366f2
TC
254 struct task_struct *tsk;
255 taskq_ent_t *tqe;
256 char name[100];
257 struct list_head *lheads[LHEAD_SIZE], *lh;
258 static char *list_names[LHEAD_SIZE] =
259 {"pend", "prio", "delay", "wait", "active" };
260 int i, j, have_lheads = 0;
261 unsigned long wflags, flags;
262
263 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
264 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
265
266 /* get the various lists and check whether they're empty */
267 lheads[LHEAD_PEND] = &tq->tq_pend_list;
268 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
269 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
94411751
BB
270#ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
271 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
272#else
200366f2 273 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
94411751 274#endif
200366f2
TC
275 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
276
277 for (i = 0; i < LHEAD_SIZE; ++i) {
278 if (list_empty(lheads[i]))
279 lheads[i] = NULL;
280 else
281 ++have_lheads;
282 }
283
284 /* early return in non-"all" mode if lists are all empty */
285 if (!allflag && !have_lheads) {
286 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
287 spin_unlock_irqrestore(&tq->tq_lock, flags);
288 return (0);
289 }
290
291 /* unlock the waitq quickly */
292 if (!lheads[LHEAD_WAIT])
293 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
294
295 /* show the base taskq contents */
5461eefe 296 snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
200366f2
TC
297 seq_printf(f, "%-25s ", name);
298 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
299 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
300 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
301 tq->tq_nalloc, tq->tq_flags);
302
303 /* show the active list */
304 if (lheads[LHEAD_ACTIVE]) {
305 j = 0;
306 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
307 if (j == 0)
5461eefe
BB
308 seq_printf(f, "\t%s:",
309 list_names[LHEAD_ACTIVE]);
200366f2
TC
310 else if (j == 2) {
311 seq_printf(f, "\n\t ");
312 j = 0;
313 }
314 seq_printf(f, " [%d]%pf(%ps)",
315 tqt->tqt_thread->pid,
316 tqt->tqt_task->tqent_func,
317 tqt->tqt_task->tqent_arg);
318 ++j;
319 }
320 seq_printf(f, "\n");
321 }
322
323 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
324 if (lheads[i]) {
325 j = 0;
326 list_for_each(lh, lheads[i]) {
49349255
CC
327 if (spl_max_show_tasks != 0 &&
328 j >= spl_max_show_tasks) {
329 seq_printf(f, "\n\t(truncated)");
330 break;
331 }
200366f2
TC
332 /* show the wait waitq list */
333 if (i == LHEAD_WAIT) {
94411751
BB
334#ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
335 wq = list_entry(lh,
336 spl_wait_queue_entry_t, entry);
337#else
338 wq = list_entry(lh,
339 spl_wait_queue_entry_t, task_list);
340#endif
200366f2
TC
341 if (j == 0)
342 seq_printf(f, "\t%s:",
343 list_names[i]);
49349255 344 else if (j % 8 == 0)
200366f2 345 seq_printf(f, "\n\t ");
49349255 346
200366f2
TC
347 tsk = wq->private;
348 seq_printf(f, " %d", tsk->pid);
349 /* pend, prio and delay lists */
350 } else {
351 tqe = list_entry(lh, taskq_ent_t,
352 tqent_list);
353 if (j == 0)
354 seq_printf(f, "\t%s:",
355 list_names[i]);
49349255 356 else if (j % 2 == 0)
200366f2 357 seq_printf(f, "\n\t ");
49349255 358
200366f2
TC
359 seq_printf(f, " %pf(%ps)",
360 tqe->tqent_func,
361 tqe->tqent_arg);
362 }
363 ++j;
364 }
365 seq_printf(f, "\n");
366 }
367 if (lheads[LHEAD_WAIT])
368 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
369 spin_unlock_irqrestore(&tq->tq_lock, flags);
370
371 return (0);
372}
373
374static int
375taskq_all_seq_show(struct seq_file *f, void *p)
376{
377 return (taskq_seq_show_impl(f, p, B_TRUE));
378}
379
380static int
381taskq_seq_show(struct seq_file *f, void *p)
382{
383 return (taskq_seq_show_impl(f, p, B_FALSE));
384}
385
386static void *
387taskq_seq_start(struct seq_file *f, loff_t *pos)
388{
389 struct list_head *p;
390 loff_t n = *pos;
391
392 down_read(&tq_list_sem);
393 if (!n)
394 taskq_seq_show_headers(f);
395
396 p = tq_list.next;
397 while (n--) {
398 p = p->next;
399 if (p == &tq_list)
400 return (NULL);
401 }
402
403 return (list_entry(p, taskq_t, tq_taskqs));
404}
405
406static void *
407taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
408{
409 taskq_t *tq = p;
410
411 ++*pos;
412 return ((tq->tq_taskqs.next == &tq_list) ?
5461eefe 413 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
200366f2
TC
414}
415
ff449ac4 416static void
417slab_seq_show_headers(struct seq_file *f)
418{
5461eefe
BB
419 seq_printf(f,
420 "--------------------- cache ----------"
421 "--------------------------------------------- "
422 "----- slab ------ "
423 "---- object ----- "
424 "--- emergency ---\n");
425 seq_printf(f,
426 "name "
427 " flags size alloc slabsize objsize "
428 "total alloc max "
429 "total alloc max "
430 "dlock alloc max\n");
ff449ac4 431}
432
433static int
434slab_seq_show(struct seq_file *f, void *p)
435{
5461eefe 436 spl_kmem_cache_t *skc = p;
ff449ac4 437
5461eefe 438 ASSERT(skc->skc_magic == SKC_MAGIC);
ff449ac4 439
a073aeb0
BB
440 /*
441 * Backed by Linux slab see /proc/slabinfo.
442 */
443 if (skc->skc_flags & KMC_SLAB)
444 return (0);
445
5461eefe
BB
446 spin_lock(&skc->skc_lock);
447 seq_printf(f, "%-36s ", skc->skc_name);
448 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
449 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
450 (long unsigned)skc->skc_flags,
451 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
452 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
453 (unsigned)skc->skc_slab_size,
454 (unsigned)skc->skc_obj_size,
455 (long unsigned)skc->skc_slab_total,
456 (long unsigned)skc->skc_slab_alloc,
457 (long unsigned)skc->skc_slab_max,
458 (long unsigned)skc->skc_obj_total,
459 (long unsigned)skc->skc_obj_alloc,
460 (long unsigned)skc->skc_obj_max,
461 (long unsigned)skc->skc_obj_deadlock,
462 (long unsigned)skc->skc_obj_emergency,
463 (long unsigned)skc->skc_obj_emergency_max);
464
465 spin_unlock(&skc->skc_lock);
466
467 return (0);
ff449ac4 468}
469
470static void *
471slab_seq_start(struct seq_file *f, loff_t *pos)
472{
5461eefe
BB
473 struct list_head *p;
474 loff_t n = *pos;
ff449ac4 475
476 down_read(&spl_kmem_cache_sem);
5461eefe
BB
477 if (!n)
478 slab_seq_show_headers(f);
ff449ac4 479
5461eefe
BB
480 p = spl_kmem_cache_list.next;
481 while (n--) {
482 p = p->next;
483 if (p == &spl_kmem_cache_list)
484 return (NULL);
485 }
ff449ac4 486
5461eefe 487 return (list_entry(p, spl_kmem_cache_t, skc_list));
ff449ac4 488}
489
490static void *
491slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
492{
493 spl_kmem_cache_t *skc = p;
ff449ac4 494
5461eefe
BB
495 ++*pos;
496 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
497 NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
ff449ac4 498}
499
500static void
501slab_seq_stop(struct seq_file *f, void *v)
502{
503 up_read(&spl_kmem_cache_sem);
504}
505
506static struct seq_operations slab_seq_ops = {
5461eefe
BB
507 .show = slab_seq_show,
508 .start = slab_seq_start,
509 .next = slab_seq_next,
510 .stop = slab_seq_stop,
ff449ac4 511};
512
513static int
514proc_slab_open(struct inode *inode, struct file *filp)
515{
5461eefe 516 return (seq_open(filp, &slab_seq_ops));
ff449ac4 517}
518
519static struct file_operations proc_slab_operations = {
5461eefe
BB
520 .open = proc_slab_open,
521 .read = seq_read,
522 .llseek = seq_lseek,
523 .release = seq_release,
ff449ac4 524};
ff449ac4 525
200366f2
TC
526static void
527taskq_seq_stop(struct seq_file *f, void *v)
528{
529 up_read(&tq_list_sem);
530}
531
532static struct seq_operations taskq_all_seq_ops = {
5461eefe
BB
533 .show = taskq_all_seq_show,
534 .start = taskq_seq_start,
535 .next = taskq_seq_next,
536 .stop = taskq_seq_stop,
200366f2
TC
537};
538
539static struct seq_operations taskq_seq_ops = {
5461eefe
BB
540 .show = taskq_seq_show,
541 .start = taskq_seq_start,
542 .next = taskq_seq_next,
543 .stop = taskq_seq_stop,
200366f2
TC
544};
545
546static int
547proc_taskq_all_open(struct inode *inode, struct file *filp)
548{
5461eefe 549 return (seq_open(filp, &taskq_all_seq_ops));
200366f2
TC
550}
551
552static int
553proc_taskq_open(struct inode *inode, struct file *filp)
554{
5461eefe 555 return (seq_open(filp, &taskq_seq_ops));
200366f2
TC
556}
557
558static struct file_operations proc_taskq_all_operations = {
5461eefe
BB
559 .open = proc_taskq_all_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
562 .release = seq_release,
200366f2
TC
563};
564
565static struct file_operations proc_taskq_operations = {
5461eefe
BB
566 .open = proc_taskq_open,
567 .read = seq_read,
568 .llseek = seq_lseek,
569 .release = seq_release,
200366f2
TC
570};
571
9ab1ac14 572static struct ctl_table spl_kmem_table[] = {
9eb361aa 573#ifdef DEBUG_KMEM
5461eefe
BB
574 {
575 .procname = "kmem_used",
576 .data = &kmem_alloc_used,
577#ifdef HAVE_ATOMIC64_T
578 .maxlen = sizeof (atomic64_t),
579#else
580 .maxlen = sizeof (atomic_t),
581#endif /* HAVE_ATOMIC64_T */
582 .mode = 0444,
583 .proc_handler = &proc_domemused,
584 },
585 {
586 .procname = "kmem_max",
587 .data = &kmem_alloc_max,
588 .maxlen = sizeof (unsigned long),
589 .extra1 = &table_min,
590 .extra2 = &table_max,
591 .mode = 0444,
592 .proc_handler = &proc_doulongvec_minmax,
593 },
9eb361aa 594#endif /* DEBUG_KMEM */
5461eefe
BB
595 {
596 .procname = "slab_kmem_total",
597 .data = (void *)(KMC_KMEM | KMC_TOTAL),
598 .maxlen = sizeof (unsigned long),
599 .extra1 = &table_min,
600 .extra2 = &table_max,
601 .mode = 0444,
602 .proc_handler = &proc_doslab,
603 },
604 {
605 .procname = "slab_kmem_alloc",
606 .data = (void *)(KMC_KMEM | KMC_ALLOC),
607 .maxlen = sizeof (unsigned long),
608 .extra1 = &table_min,
609 .extra2 = &table_max,
610 .mode = 0444,
611 .proc_handler = &proc_doslab,
612 },
613 {
614 .procname = "slab_kmem_max",
615 .data = (void *)(KMC_KMEM | KMC_MAX),
616 .maxlen = sizeof (unsigned long),
617 .extra1 = &table_min,
618 .extra2 = &table_max,
619 .mode = 0444,
620 .proc_handler = &proc_doslab,
621 },
622 {
623 .procname = "slab_vmem_total",
624 .data = (void *)(KMC_VMEM | KMC_TOTAL),
625 .maxlen = sizeof (unsigned long),
626 .extra1 = &table_min,
627 .extra2 = &table_max,
628 .mode = 0444,
629 .proc_handler = &proc_doslab,
630 },
631 {
632 .procname = "slab_vmem_alloc",
633 .data = (void *)(KMC_VMEM | KMC_ALLOC),
634 .maxlen = sizeof (unsigned long),
635 .extra1 = &table_min,
636 .extra2 = &table_max,
637 .mode = 0444,
638 .proc_handler = &proc_doslab,
639 },
640 {
641 .procname = "slab_vmem_max",
642 .data = (void *)(KMC_VMEM | KMC_MAX),
643 .maxlen = sizeof (unsigned long),
644 .extra1 = &table_min,
645 .extra2 = &table_max,
646 .mode = 0444,
647 .proc_handler = &proc_doslab,
648 },
120faefe 649 {},
9ab1ac14 650};
04a479f7 651
04a479f7 652static struct ctl_table spl_kstat_table[] = {
120faefe 653 {},
04a479f7 654};
9ab1ac14 655
656static struct ctl_table spl_table[] = {
5461eefe
BB
657 /*
658 * NB No .strategy entries have been provided since
659 * sysctl(8) prefers to go via /proc for portability.
660 */
661 {
662 .procname = "version",
663 .data = spl_version,
664 .maxlen = sizeof (spl_version),
665 .mode = 0444,
666 .proc_handler = &proc_dostring,
667 },
668 {
669 .procname = "hostid",
670 .data = &spl_hostid,
671 .maxlen = sizeof (unsigned long),
672 .mode = 0644,
673 .proc_handler = &proc_dohostid,
674 },
9ab1ac14 675 {
5461eefe
BB
676 .procname = "kmem",
677 .mode = 0555,
678 .child = spl_kmem_table,
9ab1ac14 679 },
04a479f7 680 {
5461eefe
BB
681 .procname = "kstat",
682 .mode = 0555,
683 .child = spl_kstat_table,
04a479f7 684 },
5461eefe 685 {},
57d1b188 686};
687
9ab1ac14 688static struct ctl_table spl_dir[] = {
5461eefe
BB
689 {
690 .procname = "spl",
691 .mode = 0555,
692 .child = spl_table,
693 },
694 {}
57d86234 695};
696
697static struct ctl_table spl_root[] = {
698 {
9c91800d
BB
699#ifdef HAVE_CTL_NAME
700 .ctl_name = CTL_KERN,
701#endif
57d86234 702 .procname = "kernel",
703 .mode = 0555,
704 .child = spl_dir,
705 },
120faefe 706 {}
57d1b188 707};
708
709int
1114ae6a 710spl_proc_init(void)
57d1b188 711{
404992e3 712 int rc = 0;
57d1b188 713
5461eefe 714 spl_header = register_sysctl_table(spl_root);
57d1b188 715 if (spl_header == NULL)
8d9a23e8 716 return (-EUNATCH);
9ab1ac14 717
c30df9c8 718 proc_spl = proc_mkdir("spl", NULL);
8d9a23e8
BB
719 if (proc_spl == NULL) {
720 rc = -EUNATCH;
721 goto out;
722 }
404992e3 723
3673d032
BB
724 proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
725 &proc_taskq_all_operations, NULL);
200366f2
TC
726 if (proc_spl_taskq_all == NULL) {
727 rc = -EUNATCH;
728 goto out;
729 }
730
3673d032
BB
731 proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
732 &proc_taskq_operations, NULL);
200366f2
TC
733 if (proc_spl_taskq == NULL) {
734 rc = -EUNATCH;
735 goto out;
736 }
737
5461eefe
BB
738 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
739 if (proc_spl_kmem == NULL) {
740 rc = -EUNATCH;
8d9a23e8
BB
741 goto out;
742 }
ff449ac4 743
3673d032
BB
744 proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
745 &proc_slab_operations, NULL);
5461eefe 746 if (proc_spl_kmem_slab == NULL) {
8d9a23e8
BB
747 rc = -EUNATCH;
748 goto out;
749 }
ff449ac4 750
5461eefe
BB
751 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
752 if (proc_spl_kstat == NULL) {
753 rc = -EUNATCH;
8d9a23e8
BB
754 goto out;
755 }
404992e3 756out:
c30df9c8 757 if (rc) {
758 remove_proc_entry("kstat", proc_spl);
5461eefe 759 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 760 remove_proc_entry("kmem", proc_spl);
200366f2
TC
761 remove_proc_entry("taskq-all", proc_spl);
762 remove_proc_entry("taskq", proc_spl);
a02118a8 763 remove_proc_entry("spl", NULL);
5461eefe 764 unregister_sysctl_table(spl_header);
c30df9c8 765 }
c30df9c8 766
5461eefe 767 return (rc);
57d1b188 768}
769
770void
1114ae6a 771spl_proc_fini(void)
57d1b188 772{
c30df9c8 773 remove_proc_entry("kstat", proc_spl);
5461eefe 774 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 775 remove_proc_entry("kmem", proc_spl);
200366f2
TC
776 remove_proc_entry("taskq-all", proc_spl);
777 remove_proc_entry("taskq", proc_spl);
a02118a8 778 remove_proc_entry("spl", NULL);
c30df9c8 779
5461eefe
BB
780 ASSERT(spl_header != NULL);
781 unregister_sysctl_table(spl_header);
57d1b188 782}