]> git.proxmox.com Git - mirror_zfs.git/blame - module/os/linux/spl/spl-proc.c
Revert "Linux 6.5 compat: register_sysctl_table removed"
[mirror_zfs.git] / module / os / linux / spl / spl-proc.c
CommitLineData
4b393c50 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251
BB
6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
716154c5
BB
9 *
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
715f6251 14 *
716154c5 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251
BB
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
716154c5 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
5461eefe 22 *
716154c5 23 * Solaris Porting Layer (SPL) Proc Implementation.
4b393c50 24 */
715f6251 25
ae4c36ad
BB
26#include <sys/systeminfo.h>
27#include <sys/kstat.h>
e5b9b344
BB
28#include <sys/kmem.h>
29#include <sys/kmem_cache.h>
30#include <sys/vmem.h>
200366f2 31#include <sys/taskq.h>
a9125891 32#include <sys/proc.h>
e5b9b344 33#include <linux/ctype.h>
ae4c36ad
BB
34#include <linux/kmod.h>
35#include <linux/seq_file.h>
e5b9b344 36#include <linux/uaccess.h>
e3c4d448 37#include <linux/version.h>
18168da7 38#include "zfs_gitrev.h"
57d1b188 39
5461eefe 40#if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
e3c4d448
RY
41typedef struct ctl_table __no_const spl_ctl_table;
42#else
43typedef struct ctl_table spl_ctl_table;
44#endif
45
57d1b188
BB
46static unsigned long table_min = 0;
47static unsigned long table_max = ~0;
404992e3 48
404992e3 49static struct ctl_table_header *spl_header = NULL;
c30df9c8 50static struct proc_dir_entry *proc_spl = NULL;
c30df9c8 51static struct proc_dir_entry *proc_spl_kmem = NULL;
ff449ac4 52static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
200366f2
TC
53static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54static struct proc_dir_entry *proc_spl_taskq = NULL;
c30df9c8 55struct proc_dir_entry *proc_spl_kstat = NULL;
57d1b188 56
c6dc93d6 57#ifdef DEBUG_KMEM
0fac9c9e
BB
58static int
59proc_domemused(struct ctl_table *table, int write,
60 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 61{
5461eefe 62 int rc = 0;
23b6f17a 63 unsigned long val;
5461eefe
BB
64 spl_ctl_table dummy = *table;
65
66 dummy.data = &val;
67 dummy.proc_handler = &proc_dointvec;
23b6f17a
AZ
68 dummy.extra1 = &table_min;
69 dummy.extra2 = &table_max;
5461eefe
BB
70
71 if (write) {
72 *ppos += *lenp;
73 } else {
74#ifdef HAVE_ATOMIC64_T
75 val = atomic64_read((atomic64_t *)table->data);
76#else
77 val = atomic_read((atomic_t *)table->data);
78#endif /* HAVE_ATOMIC64_T */
79 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
80 }
81
82 return (rc);
57d1b188 83}
9eb361aa 84#endif /* DEBUG_KMEM */
3336e29c 85
0fac9c9e
BB
86static int
87proc_doslab(struct ctl_table *table, int write,
88 void __user *buffer, size_t *lenp, loff_t *ppos)
3336e29c 89{
5461eefe 90 int rc = 0;
23b6f17a 91 unsigned long val = 0, mask;
5461eefe 92 spl_ctl_table dummy = *table;
7cf1fe63 93 spl_kmem_cache_t *skc = NULL;
3336e29c 94
5461eefe
BB
95 dummy.data = &val;
96 dummy.proc_handler = &proc_dointvec;
23b6f17a
AZ
97 dummy.extra1 = &table_min;
98 dummy.extra2 = &table_max;
3336e29c 99
5461eefe
BB
100 if (write) {
101 *ppos += *lenp;
102 } else {
103 down_read(&spl_kmem_cache_sem);
104 mask = (unsigned long)table->data;
3336e29c 105
5461eefe 106 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
3336e29c
BB
107
108 /* Only use slabs of the correct kmem/vmem type */
109 if (!(skc->skc_flags & mask))
110 continue;
111
112 /* Sum the specified field for selected slabs */
113 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
114 case KMC_TOTAL:
5461eefe 115 val += skc->skc_slab_size * skc->skc_slab_total;
3336e29c
BB
116 break;
117 case KMC_ALLOC:
5461eefe 118 val += skc->skc_obj_size * skc->skc_obj_alloc;
3336e29c
BB
119 break;
120 case KMC_MAX:
5461eefe 121 val += skc->skc_obj_size * skc->skc_obj_max;
3336e29c
BB
122 break;
123 }
5461eefe 124 }
3336e29c 125
5461eefe
BB
126 up_read(&spl_kmem_cache_sem);
127 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
128 }
3336e29c 129
5461eefe 130 return (rc);
3336e29c 131}
57d1b188 132
0fac9c9e
BB
133static int
134proc_dohostid(struct ctl_table *table, int write,
135 void __user *buffer, size_t *lenp, loff_t *ppos)
57d1b188 136{
5461eefe 137 char *end, str[32];
7de4c88b
AZ
138 unsigned long hid;
139 spl_ctl_table dummy = *table;
140
141 dummy.data = str;
142 dummy.maxlen = sizeof (str) - 1;
143
144 if (!write)
145 snprintf(str, sizeof (str), "%lx",
146 (unsigned long) zone_get_hostid(NULL));
147
148 /* always returns 0 */
149 proc_dostring(&dummy, write, buffer, lenp, ppos);
5461eefe
BB
150
151 if (write) {
152 /*
153 * We can't use proc_doulongvec_minmax() in the write
7de4c88b
AZ
154 * case here because hostid, while a hex value, has no
155 * leading 0x, which confuses the helper function.
5461eefe 156 */
5461eefe 157
7de4c88b 158 hid = simple_strtoul(str, &end, 16);
5461eefe
BB
159 if (str == end)
160 return (-EINVAL);
7de4c88b 161 spl_hostid = hid;
5461eefe
BB
162 }
163
7de4c88b 164 return (0);
57d1b188
BB
165}
166
200366f2
TC
167static void
168taskq_seq_show_headers(struct seq_file *f)
169{
170 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
171 "taskq", "act", "nthr", "spwn", "maxt", "pri",
172 "mina", "maxa", "cura", "flags");
173}
174
175/* indices into the lheads array below */
176#define LHEAD_PEND 0
5461eefe
BB
177#define LHEAD_PRIO 1
178#define LHEAD_DELAY 2
179#define LHEAD_WAIT 3
180#define LHEAD_ACTIVE 4
181#define LHEAD_SIZE 5
200366f2 182
49349255 183static unsigned int spl_max_show_tasks = 512;
7ada752a 184/* CSTYLED */
49349255
CC
185module_param(spl_max_show_tasks, uint, 0644);
186MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
187
200366f2
TC
188static int
189taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
190{
191 taskq_t *tq = p;
7cf1fe63 192 taskq_thread_t *tqt = NULL;
94411751 193 spl_wait_queue_entry_t *wq;
200366f2
TC
194 struct task_struct *tsk;
195 taskq_ent_t *tqe;
196 char name[100];
197 struct list_head *lheads[LHEAD_SIZE], *lh;
198 static char *list_names[LHEAD_SIZE] =
199 {"pend", "prio", "delay", "wait", "active" };
200 int i, j, have_lheads = 0;
201 unsigned long wflags, flags;
202
203 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
204 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
205
206 /* get the various lists and check whether they're empty */
207 lheads[LHEAD_PEND] = &tq->tq_pend_list;
208 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
209 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
94411751
BB
210#ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
211 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
212#else
200366f2 213 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
94411751 214#endif
200366f2
TC
215 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
216
217 for (i = 0; i < LHEAD_SIZE; ++i) {
218 if (list_empty(lheads[i]))
219 lheads[i] = NULL;
220 else
221 ++have_lheads;
222 }
223
224 /* early return in non-"all" mode if lists are all empty */
225 if (!allflag && !have_lheads) {
226 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
227 spin_unlock_irqrestore(&tq->tq_lock, flags);
228 return (0);
229 }
230
231 /* unlock the waitq quickly */
232 if (!lheads[LHEAD_WAIT])
233 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
234
235 /* show the base taskq contents */
5461eefe 236 snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
200366f2
TC
237 seq_printf(f, "%-25s ", name);
238 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
239 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
240 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
241 tq->tq_nalloc, tq->tq_flags);
242
243 /* show the active list */
244 if (lheads[LHEAD_ACTIVE]) {
245 j = 0;
246 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
247 if (j == 0)
5461eefe
BB
248 seq_printf(f, "\t%s:",
249 list_names[LHEAD_ACTIVE]);
200366f2
TC
250 else if (j == 2) {
251 seq_printf(f, "\n\t ");
252 j = 0;
253 }
254 seq_printf(f, " [%d]%pf(%ps)",
255 tqt->tqt_thread->pid,
256 tqt->tqt_task->tqent_func,
257 tqt->tqt_task->tqent_arg);
258 ++j;
259 }
260 seq_printf(f, "\n");
261 }
262
263 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
264 if (lheads[i]) {
265 j = 0;
266 list_for_each(lh, lheads[i]) {
49349255
CC
267 if (spl_max_show_tasks != 0 &&
268 j >= spl_max_show_tasks) {
269 seq_printf(f, "\n\t(truncated)");
270 break;
271 }
200366f2
TC
272 /* show the wait waitq list */
273 if (i == LHEAD_WAIT) {
94411751
BB
274#ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
275 wq = list_entry(lh,
276 spl_wait_queue_entry_t, entry);
277#else
278 wq = list_entry(lh,
279 spl_wait_queue_entry_t, task_list);
280#endif
200366f2
TC
281 if (j == 0)
282 seq_printf(f, "\t%s:",
283 list_names[i]);
49349255 284 else if (j % 8 == 0)
200366f2 285 seq_printf(f, "\n\t ");
49349255 286
200366f2
TC
287 tsk = wq->private;
288 seq_printf(f, " %d", tsk->pid);
289 /* pend, prio and delay lists */
290 } else {
291 tqe = list_entry(lh, taskq_ent_t,
292 tqent_list);
293 if (j == 0)
294 seq_printf(f, "\t%s:",
295 list_names[i]);
49349255 296 else if (j % 2 == 0)
200366f2 297 seq_printf(f, "\n\t ");
49349255 298
200366f2
TC
299 seq_printf(f, " %pf(%ps)",
300 tqe->tqent_func,
301 tqe->tqent_arg);
302 }
303 ++j;
304 }
305 seq_printf(f, "\n");
306 }
307 if (lheads[LHEAD_WAIT])
308 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
309 spin_unlock_irqrestore(&tq->tq_lock, flags);
310
311 return (0);
312}
313
314static int
315taskq_all_seq_show(struct seq_file *f, void *p)
316{
317 return (taskq_seq_show_impl(f, p, B_TRUE));
318}
319
320static int
321taskq_seq_show(struct seq_file *f, void *p)
322{
323 return (taskq_seq_show_impl(f, p, B_FALSE));
324}
325
326static void *
327taskq_seq_start(struct seq_file *f, loff_t *pos)
328{
329 struct list_head *p;
330 loff_t n = *pos;
331
332 down_read(&tq_list_sem);
333 if (!n)
334 taskq_seq_show_headers(f);
335
336 p = tq_list.next;
337 while (n--) {
338 p = p->next;
339 if (p == &tq_list)
340 return (NULL);
341 }
342
343 return (list_entry(p, taskq_t, tq_taskqs));
344}
345
346static void *
347taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
348{
349 taskq_t *tq = p;
350
351 ++*pos;
352 return ((tq->tq_taskqs.next == &tq_list) ?
5461eefe 353 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
200366f2
TC
354}
355
ff449ac4
BB
356static void
357slab_seq_show_headers(struct seq_file *f)
358{
5461eefe
BB
359 seq_printf(f,
360 "--------------------- cache ----------"
361 "--------------------------------------------- "
362 "----- slab ------ "
363 "---- object ----- "
364 "--- emergency ---\n");
365 seq_printf(f,
366 "name "
367 " flags size alloc slabsize objsize "
368 "total alloc max "
369 "total alloc max "
370 "dlock alloc max\n");
ff449ac4
BB
371}
372
373static int
374slab_seq_show(struct seq_file *f, void *p)
375{
5461eefe 376 spl_kmem_cache_t *skc = p;
ff449ac4 377
5461eefe 378 ASSERT(skc->skc_magic == SKC_MAGIC);
ff449ac4 379
851eda35
SD
380 if (skc->skc_flags & KMC_SLAB) {
381 /*
382 * This cache is backed by a generic Linux kmem cache which
383 * has its own accounting. For these caches we only track
384 * the number of active allocated objects that exist within
385 * the underlying Linux slabs. For the overall statistics of
386 * the underlying Linux cache please refer to /proc/slabinfo.
387 */
388 spin_lock(&skc->skc_lock);
ec1fea45
SD
389 uint64_t objs_allocated =
390 percpu_counter_sum(&skc->skc_linux_alloc);
851eda35
SD
391 seq_printf(f, "%-36s ", skc->skc_name);
392 seq_printf(f, "0x%05lx %9s %9lu %8s %8u "
393 "%5s %5s %5s %5s %5lu %5s %5s %5s %5s\n",
394 (long unsigned)skc->skc_flags,
395 "-",
ec1fea45 396 (long unsigned)(skc->skc_obj_size * objs_allocated),
851eda35
SD
397 "-",
398 (unsigned)skc->skc_obj_size,
399 "-", "-", "-", "-",
ec1fea45 400 (long unsigned)objs_allocated,
851eda35
SD
401 "-", "-", "-", "-");
402 spin_unlock(&skc->skc_lock);
a073aeb0 403 return (0);
851eda35 404 }
a073aeb0 405
5461eefe
BB
406 spin_lock(&skc->skc_lock);
407 seq_printf(f, "%-36s ", skc->skc_name);
408 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
409 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
410 (long unsigned)skc->skc_flags,
411 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
412 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
413 (unsigned)skc->skc_slab_size,
414 (unsigned)skc->skc_obj_size,
415 (long unsigned)skc->skc_slab_total,
416 (long unsigned)skc->skc_slab_alloc,
417 (long unsigned)skc->skc_slab_max,
418 (long unsigned)skc->skc_obj_total,
419 (long unsigned)skc->skc_obj_alloc,
420 (long unsigned)skc->skc_obj_max,
421 (long unsigned)skc->skc_obj_deadlock,
422 (long unsigned)skc->skc_obj_emergency,
423 (long unsigned)skc->skc_obj_emergency_max);
5461eefe 424 spin_unlock(&skc->skc_lock);
5461eefe 425 return (0);
ff449ac4
BB
426}
427
428static void *
429slab_seq_start(struct seq_file *f, loff_t *pos)
430{
5461eefe
BB
431 struct list_head *p;
432 loff_t n = *pos;
ff449ac4
BB
433
434 down_read(&spl_kmem_cache_sem);
5461eefe
BB
435 if (!n)
436 slab_seq_show_headers(f);
ff449ac4 437
5461eefe
BB
438 p = spl_kmem_cache_list.next;
439 while (n--) {
440 p = p->next;
441 if (p == &spl_kmem_cache_list)
442 return (NULL);
443 }
ff449ac4 444
5461eefe 445 return (list_entry(p, spl_kmem_cache_t, skc_list));
ff449ac4
BB
446}
447
448static void *
449slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
450{
451 spl_kmem_cache_t *skc = p;
ff449ac4 452
5461eefe
BB
453 ++*pos;
454 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
455 NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
ff449ac4
BB
456}
457
458static void
459slab_seq_stop(struct seq_file *f, void *v)
460{
461 up_read(&spl_kmem_cache_sem);
462}
463
18168da7 464static const struct seq_operations slab_seq_ops = {
5461eefe
BB
465 .show = slab_seq_show,
466 .start = slab_seq_start,
467 .next = slab_seq_next,
468 .stop = slab_seq_stop,
ff449ac4
BB
469};
470
471static int
472proc_slab_open(struct inode *inode, struct file *filp)
473{
5461eefe 474 return (seq_open(filp, &slab_seq_ops));
ff449ac4
BB
475}
476
0dd73648
BB
477static const kstat_proc_op_t proc_slab_operations = {
478#ifdef HAVE_PROC_OPS_STRUCT
479 .proc_open = proc_slab_open,
480 .proc_read = seq_read,
481 .proc_lseek = seq_lseek,
482 .proc_release = seq_release,
483#else
484 .open = proc_slab_open,
485 .read = seq_read,
486 .llseek = seq_lseek,
5461eefe 487 .release = seq_release,
0dd73648 488#endif
ff449ac4 489};
ff449ac4 490
200366f2
TC
491static void
492taskq_seq_stop(struct seq_file *f, void *v)
493{
494 up_read(&tq_list_sem);
495}
496
18168da7 497static const struct seq_operations taskq_all_seq_ops = {
5461eefe
BB
498 .show = taskq_all_seq_show,
499 .start = taskq_seq_start,
500 .next = taskq_seq_next,
501 .stop = taskq_seq_stop,
200366f2
TC
502};
503
18168da7 504static const struct seq_operations taskq_seq_ops = {
5461eefe
BB
505 .show = taskq_seq_show,
506 .start = taskq_seq_start,
507 .next = taskq_seq_next,
508 .stop = taskq_seq_stop,
200366f2
TC
509};
510
511static int
512proc_taskq_all_open(struct inode *inode, struct file *filp)
513{
5461eefe 514 return (seq_open(filp, &taskq_all_seq_ops));
200366f2
TC
515}
516
517static int
518proc_taskq_open(struct inode *inode, struct file *filp)
519{
5461eefe 520 return (seq_open(filp, &taskq_seq_ops));
200366f2
TC
521}
522
0dd73648
BB
523static const kstat_proc_op_t proc_taskq_all_operations = {
524#ifdef HAVE_PROC_OPS_STRUCT
525 .proc_open = proc_taskq_all_open,
526 .proc_read = seq_read,
527 .proc_lseek = seq_lseek,
528 .proc_release = seq_release,
529#else
5461eefe
BB
530 .open = proc_taskq_all_open,
531 .read = seq_read,
532 .llseek = seq_lseek,
533 .release = seq_release,
0dd73648 534#endif
200366f2
TC
535};
536
0dd73648
BB
537static const kstat_proc_op_t proc_taskq_operations = {
538#ifdef HAVE_PROC_OPS_STRUCT
539 .proc_open = proc_taskq_open,
540 .proc_read = seq_read,
541 .proc_lseek = seq_lseek,
542 .proc_release = seq_release,
543#else
5461eefe
BB
544 .open = proc_taskq_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = seq_release,
0dd73648 548#endif
200366f2
TC
549};
550
9ab1ac14 551static struct ctl_table spl_kmem_table[] = {
9eb361aa 552#ifdef DEBUG_KMEM
5461eefe
BB
553 {
554 .procname = "kmem_used",
555 .data = &kmem_alloc_used,
556#ifdef HAVE_ATOMIC64_T
557 .maxlen = sizeof (atomic64_t),
558#else
559 .maxlen = sizeof (atomic_t),
560#endif /* HAVE_ATOMIC64_T */
561 .mode = 0444,
562 .proc_handler = &proc_domemused,
563 },
564 {
565 .procname = "kmem_max",
566 .data = &kmem_alloc_max,
567 .maxlen = sizeof (unsigned long),
568 .extra1 = &table_min,
569 .extra2 = &table_max,
570 .mode = 0444,
571 .proc_handler = &proc_doulongvec_minmax,
572 },
9eb361aa 573#endif /* DEBUG_KMEM */
6d948c35
MN
574 {
575 .procname = "slab_kvmem_total",
576 .data = (void *)(KMC_KVMEM | KMC_TOTAL),
577 .maxlen = sizeof (unsigned long),
578 .extra1 = &table_min,
579 .extra2 = &table_max,
580 .mode = 0444,
581 .proc_handler = &proc_doslab,
582 },
583 {
584 .procname = "slab_kvmem_alloc",
585 .data = (void *)(KMC_KVMEM | KMC_ALLOC),
586 .maxlen = sizeof (unsigned long),
587 .extra1 = &table_min,
588 .extra2 = &table_max,
589 .mode = 0444,
590 .proc_handler = &proc_doslab,
591 },
592 {
593 .procname = "slab_kvmem_max",
594 .data = (void *)(KMC_KVMEM | KMC_MAX),
595 .maxlen = sizeof (unsigned long),
596 .extra1 = &table_min,
597 .extra2 = &table_max,
598 .mode = 0444,
599 .proc_handler = &proc_doslab,
600 },
120faefe 601 {},
9ab1ac14 602};
04a479f7 603
04a479f7 604static struct ctl_table spl_kstat_table[] = {
120faefe 605 {},
04a479f7 606};
9ab1ac14
BB
607
608static struct ctl_table spl_table[] = {
5461eefe
BB
609 /*
610 * NB No .strategy entries have been provided since
611 * sysctl(8) prefers to go via /proc for portability.
612 */
613 {
4cbde2ec 614 .procname = "gitrev",
18168da7
AZ
615 .data = (char *)ZFS_META_GITREV,
616 .maxlen = sizeof (ZFS_META_GITREV),
5461eefe
BB
617 .mode = 0444,
618 .proc_handler = &proc_dostring,
619 },
620 {
621 .procname = "hostid",
622 .data = &spl_hostid,
623 .maxlen = sizeof (unsigned long),
624 .mode = 0644,
625 .proc_handler = &proc_dohostid,
626 },
9ab1ac14 627 {
5461eefe
BB
628 .procname = "kmem",
629 .mode = 0555,
630 .child = spl_kmem_table,
9ab1ac14 631 },
04a479f7 632 {
5461eefe
BB
633 .procname = "kstat",
634 .mode = 0555,
635 .child = spl_kstat_table,
04a479f7 636 },
5461eefe 637 {},
57d1b188
BB
638};
639
9ab1ac14 640static struct ctl_table spl_dir[] = {
5461eefe
BB
641 {
642 .procname = "spl",
643 .mode = 0555,
644 .child = spl_table,
645 },
646 {}
57d86234
BB
647};
648
649static struct ctl_table spl_root[] = {
650 {
3a68f3c5
BA
651 .procname = "kernel",
652 .mode = 0555,
653 .child = spl_dir,
57d86234 654 },
120faefe 655 {}
57d1b188
BB
656};
657
658int
1114ae6a 659spl_proc_init(void)
57d1b188 660{
404992e3 661 int rc = 0;
57d1b188 662
5461eefe 663 spl_header = register_sysctl_table(spl_root);
57d1b188 664 if (spl_header == NULL)
8d9a23e8 665 return (-EUNATCH);
9ab1ac14 666
c30df9c8 667 proc_spl = proc_mkdir("spl", NULL);
8d9a23e8
BB
668 if (proc_spl == NULL) {
669 rc = -EUNATCH;
670 goto out;
671 }
404992e3 672
3673d032
BB
673 proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
674 &proc_taskq_all_operations, NULL);
200366f2
TC
675 if (proc_spl_taskq_all == NULL) {
676 rc = -EUNATCH;
677 goto out;
678 }
679
3673d032
BB
680 proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
681 &proc_taskq_operations, NULL);
200366f2
TC
682 if (proc_spl_taskq == NULL) {
683 rc = -EUNATCH;
684 goto out;
685 }
686
5461eefe
BB
687 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
688 if (proc_spl_kmem == NULL) {
689 rc = -EUNATCH;
8d9a23e8
BB
690 goto out;
691 }
ff449ac4 692
3673d032
BB
693 proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
694 &proc_slab_operations, NULL);
5461eefe 695 if (proc_spl_kmem_slab == NULL) {
8d9a23e8
BB
696 rc = -EUNATCH;
697 goto out;
698 }
ff449ac4 699
5461eefe
BB
700 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
701 if (proc_spl_kstat == NULL) {
702 rc = -EUNATCH;
8d9a23e8
BB
703 goto out;
704 }
404992e3 705out:
c30df9c8
BB
706 if (rc) {
707 remove_proc_entry("kstat", proc_spl);
5461eefe 708 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 709 remove_proc_entry("kmem", proc_spl);
200366f2
TC
710 remove_proc_entry("taskq-all", proc_spl);
711 remove_proc_entry("taskq", proc_spl);
a02118a8 712 remove_proc_entry("spl", NULL);
5461eefe 713 unregister_sysctl_table(spl_header);
c30df9c8 714 }
c30df9c8 715
5461eefe 716 return (rc);
57d1b188
BB
717}
718
719void
1114ae6a 720spl_proc_fini(void)
57d1b188 721{
c30df9c8 722 remove_proc_entry("kstat", proc_spl);
5461eefe 723 remove_proc_entry("slab", proc_spl_kmem);
c30df9c8 724 remove_proc_entry("kmem", proc_spl);
200366f2
TC
725 remove_proc_entry("taskq-all", proc_spl);
726 remove_proc_entry("taskq", proc_spl);
a02118a8 727 remove_proc_entry("spl", NULL);
c30df9c8 728
5461eefe
BB
729 ASSERT(spl_header != NULL);
730 unregister_sysctl_table(spl_header);
57d1b188 731}