1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
30 #include <sys/kmem_cache.h>
32 #include <sys/taskq.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_compat.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41 typedef struct ctl_table __no_const spl_ctl_table
;
43 typedef struct ctl_table spl_ctl_table
;
46 static unsigned long table_min
= 0;
47 static unsigned long table_max
= ~0;
49 static struct ctl_table_header
*spl_header
= NULL
;
50 static struct proc_dir_entry
*proc_spl
= NULL
;
51 static struct proc_dir_entry
*proc_spl_kmem
= NULL
;
52 static struct proc_dir_entry
*proc_spl_kmem_slab
= NULL
;
53 static struct proc_dir_entry
*proc_spl_taskq_all
= NULL
;
54 static struct proc_dir_entry
*proc_spl_taskq
= NULL
;
55 struct proc_dir_entry
*proc_spl_kstat
= NULL
;
58 proc_copyin_string(char *kbuffer
, int kbuffer_size
,
59 const char *ubuffer
, int ubuffer_size
)
63 if (ubuffer_size
> kbuffer_size
)
66 if (copy_from_user((void *)kbuffer
, (void *)ubuffer
, ubuffer_size
))
69 /* strip trailing whitespace */
70 size
= strnlen(kbuffer
, ubuffer_size
);
72 if (!isspace(kbuffer
[size
]))
79 /* no space to terminate */
80 if (size
== kbuffer_size
)
83 kbuffer
[size
+ 1] = 0;
88 proc_copyout_string(char *ubuffer
, int ubuffer_size
,
89 const char *kbuffer
, char *append
)
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
95 int size
= MIN(strlen(kbuffer
), ubuffer_size
);
97 if (copy_to_user(ubuffer
, kbuffer
, size
))
100 if (append
!= NULL
&& size
< ubuffer_size
) {
101 if (copy_to_user(ubuffer
+ size
, append
, 1))
112 proc_domemused(struct ctl_table
*table
, int write
,
113 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
116 unsigned long min
= 0, max
= ~0, val
;
117 spl_ctl_table dummy
= *table
;
120 dummy
.proc_handler
= &proc_dointvec
;
127 # ifdef HAVE_ATOMIC64_T
128 val
= atomic64_read((atomic64_t
*)table
->data
);
130 val
= atomic_read((atomic_t
*)table
->data
);
131 # endif /* HAVE_ATOMIC64_T */
132 rc
= proc_doulongvec_minmax(&dummy
, write
, buffer
, lenp
, ppos
);
137 #endif /* DEBUG_KMEM */
140 proc_doslab(struct ctl_table
*table
, int write
,
141 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
144 unsigned long min
= 0, max
= ~0, val
= 0, mask
;
145 spl_ctl_table dummy
= *table
;
146 spl_kmem_cache_t
*skc
;
149 dummy
.proc_handler
= &proc_dointvec
;
156 down_read(&spl_kmem_cache_sem
);
157 mask
= (unsigned long)table
->data
;
159 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc
->skc_flags
& mask
))
165 /* Sum the specified field for selected slabs */
166 switch (mask
& (KMC_TOTAL
| KMC_ALLOC
| KMC_MAX
)) {
168 val
+= skc
->skc_slab_size
* skc
->skc_slab_total
;
171 val
+= skc
->skc_obj_size
* skc
->skc_obj_alloc
;
174 val
+= skc
->skc_obj_size
* skc
->skc_obj_max
;
179 up_read(&spl_kmem_cache_sem
);
180 rc
= proc_doulongvec_minmax(&dummy
, write
, buffer
, lenp
, ppos
);
187 proc_dohostid(struct ctl_table
*table
, int write
,
188 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
194 /* We can't use proc_doulongvec_minmax() in the write
195 * case here because hostid while a hex value has no
196 * leading 0x which confuses the helper function. */
197 rc
= proc_copyin_string(str
, sizeof(str
), buffer
, *lenp
);
201 spl_hostid
= simple_strtoul(str
, &end
, 16);
206 len
= snprintf(str
, sizeof(str
), "%lx", spl_hostid
);
210 rc
= proc_copyout_string(buffer
,*lenp
,str
+*ppos
,"\n");
222 taskq_seq_show_headers(struct seq_file
*f
)
224 seq_printf(f
, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
225 "taskq", "act", "nthr", "spwn", "maxt", "pri",
226 "mina", "maxa", "cura", "flags");
229 /* indices into the lheads array below */
232 #define LHEAD_DELAY 2
234 #define LHEAD_ACTIVE 4
237 static unsigned int spl_max_show_tasks
= 512;
238 module_param(spl_max_show_tasks
, uint
, 0644);
239 MODULE_PARM_DESC(spl_max_show_tasks
, "Max number of tasks shown in taskq proc");
242 taskq_seq_show_impl(struct seq_file
*f
, void *p
, boolean_t allflag
)
247 struct task_struct
*tsk
;
250 struct list_head
*lheads
[LHEAD_SIZE
], *lh
;
251 static char *list_names
[LHEAD_SIZE
] =
252 {"pend", "prio", "delay", "wait", "active" };
253 int i
, j
, have_lheads
= 0;
254 unsigned long wflags
, flags
;
256 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
257 spin_lock_irqsave(&tq
->tq_wait_waitq
.lock
, wflags
);
259 /* get the various lists and check whether they're empty */
260 lheads
[LHEAD_PEND
] = &tq
->tq_pend_list
;
261 lheads
[LHEAD_PRIO
] = &tq
->tq_prio_list
;
262 lheads
[LHEAD_DELAY
] = &tq
->tq_delay_list
;
263 lheads
[LHEAD_WAIT
] = &tq
->tq_wait_waitq
.task_list
;
264 lheads
[LHEAD_ACTIVE
] = &tq
->tq_active_list
;
266 for (i
= 0; i
< LHEAD_SIZE
; ++i
) {
267 if (list_empty(lheads
[i
]))
273 /* early return in non-"all" mode if lists are all empty */
274 if (!allflag
&& !have_lheads
) {
275 spin_unlock_irqrestore(&tq
->tq_wait_waitq
.lock
, wflags
);
276 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
280 /* unlock the waitq quickly */
281 if (!lheads
[LHEAD_WAIT
])
282 spin_unlock_irqrestore(&tq
->tq_wait_waitq
.lock
, wflags
);
284 /* show the base taskq contents */
285 snprintf(name
, sizeof(name
), "%s/%d", tq
->tq_name
, tq
->tq_instance
);
286 seq_printf(f
, "%-25s ", name
);
287 seq_printf(f
, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
288 tq
->tq_nactive
, tq
->tq_nthreads
, tq
->tq_nspawn
,
289 tq
->tq_maxthreads
, tq
->tq_pri
, tq
->tq_minalloc
, tq
->tq_maxalloc
,
290 tq
->tq_nalloc
, tq
->tq_flags
);
292 /* show the active list */
293 if (lheads
[LHEAD_ACTIVE
]) {
295 list_for_each_entry(tqt
, &tq
->tq_active_list
, tqt_active_list
) {
297 seq_printf(f
, "\t%s:", list_names
[LHEAD_ACTIVE
]);
299 seq_printf(f
, "\n\t ");
302 seq_printf(f
, " [%d]%pf(%ps)",
303 tqt
->tqt_thread
->pid
,
304 tqt
->tqt_task
->tqent_func
,
305 tqt
->tqt_task
->tqent_arg
);
311 for (i
= LHEAD_PEND
; i
<= LHEAD_WAIT
; ++i
)
314 list_for_each(lh
, lheads
[i
]) {
315 if (spl_max_show_tasks
!= 0 &&
316 j
>= spl_max_show_tasks
) {
317 seq_printf(f
, "\n\t(truncated)");
320 /* show the wait waitq list */
321 if (i
== LHEAD_WAIT
) {
322 wq
= list_entry(lh
, wait_queue_t
, task_list
);
324 seq_printf(f
, "\t%s:",
327 seq_printf(f
, "\n\t ");
330 seq_printf(f
, " %d", tsk
->pid
);
331 /* pend, prio and delay lists */
333 tqe
= list_entry(lh
, taskq_ent_t
,
336 seq_printf(f
, "\t%s:",
339 seq_printf(f
, "\n\t ");
341 seq_printf(f
, " %pf(%ps)",
349 if (lheads
[LHEAD_WAIT
])
350 spin_unlock_irqrestore(&tq
->tq_wait_waitq
.lock
, wflags
);
351 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
357 taskq_all_seq_show(struct seq_file
*f
, void *p
)
359 return (taskq_seq_show_impl(f
, p
, B_TRUE
));
363 taskq_seq_show(struct seq_file
*f
, void *p
)
365 return (taskq_seq_show_impl(f
, p
, B_FALSE
));
369 taskq_seq_start(struct seq_file
*f
, loff_t
*pos
)
374 down_read(&tq_list_sem
);
376 taskq_seq_show_headers(f
);
385 return (list_entry(p
, taskq_t
, tq_taskqs
));
389 taskq_seq_next(struct seq_file
*f
, void *p
, loff_t
*pos
)
394 return ((tq
->tq_taskqs
.next
== &tq_list
) ?
395 NULL
: list_entry(tq
->tq_taskqs
.next
, taskq_t
, tq_taskqs
));
399 slab_seq_show_headers(struct seq_file
*f
)
402 "--------------------- cache ----------"
403 "--------------------------------------------- "
406 "--- emergency ---\n");
409 " flags size alloc slabsize objsize "
412 "dlock alloc max\n");
416 slab_seq_show(struct seq_file
*f
, void *p
)
418 spl_kmem_cache_t
*skc
= p
;
420 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
423 * Backed by Linux slab see /proc/slabinfo.
425 if (skc
->skc_flags
& KMC_SLAB
)
428 spin_lock(&skc
->skc_lock
);
429 seq_printf(f
, "%-36s ", skc
->skc_name
);
430 seq_printf(f
, "0x%05lx %9lu %9lu %8u %8u "
431 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
432 (long unsigned)skc
->skc_flags
,
433 (long unsigned)(skc
->skc_slab_size
* skc
->skc_slab_total
),
434 (long unsigned)(skc
->skc_obj_size
* skc
->skc_obj_alloc
),
435 (unsigned)skc
->skc_slab_size
,
436 (unsigned)skc
->skc_obj_size
,
437 (long unsigned)skc
->skc_slab_total
,
438 (long unsigned)skc
->skc_slab_alloc
,
439 (long unsigned)skc
->skc_slab_max
,
440 (long unsigned)skc
->skc_obj_total
,
441 (long unsigned)skc
->skc_obj_alloc
,
442 (long unsigned)skc
->skc_obj_max
,
443 (long unsigned)skc
->skc_obj_deadlock
,
444 (long unsigned)skc
->skc_obj_emergency
,
445 (long unsigned)skc
->skc_obj_emergency_max
);
447 spin_unlock(&skc
->skc_lock
);
453 slab_seq_start(struct seq_file
*f
, loff_t
*pos
)
458 down_read(&spl_kmem_cache_sem
);
460 slab_seq_show_headers(f
);
462 p
= spl_kmem_cache_list
.next
;
465 if (p
== &spl_kmem_cache_list
)
469 return (list_entry(p
, spl_kmem_cache_t
, skc_list
));
473 slab_seq_next(struct seq_file
*f
, void *p
, loff_t
*pos
)
475 spl_kmem_cache_t
*skc
= p
;
478 return ((skc
->skc_list
.next
== &spl_kmem_cache_list
) ?
479 NULL
: list_entry(skc
->skc_list
.next
,spl_kmem_cache_t
,skc_list
));
483 slab_seq_stop(struct seq_file
*f
, void *v
)
485 up_read(&spl_kmem_cache_sem
);
488 static struct seq_operations slab_seq_ops
= {
489 .show
= slab_seq_show
,
490 .start
= slab_seq_start
,
491 .next
= slab_seq_next
,
492 .stop
= slab_seq_stop
,
496 proc_slab_open(struct inode
*inode
, struct file
*filp
)
498 return seq_open(filp
, &slab_seq_ops
);
501 static struct file_operations proc_slab_operations
= {
502 .open
= proc_slab_open
,
505 .release
= seq_release
,
509 taskq_seq_stop(struct seq_file
*f
, void *v
)
511 up_read(&tq_list_sem
);
514 static struct seq_operations taskq_all_seq_ops
= {
515 .show
= taskq_all_seq_show
,
516 .start
= taskq_seq_start
,
517 .next
= taskq_seq_next
,
518 .stop
= taskq_seq_stop
,
521 static struct seq_operations taskq_seq_ops
= {
522 .show
= taskq_seq_show
,
523 .start
= taskq_seq_start
,
524 .next
= taskq_seq_next
,
525 .stop
= taskq_seq_stop
,
529 proc_taskq_all_open(struct inode
*inode
, struct file
*filp
)
531 return seq_open(filp
, &taskq_all_seq_ops
);
535 proc_taskq_open(struct inode
*inode
, struct file
*filp
)
537 return seq_open(filp
, &taskq_seq_ops
);
540 static struct file_operations proc_taskq_all_operations
= {
541 .open
= proc_taskq_all_open
,
544 .release
= seq_release
,
547 static struct file_operations proc_taskq_operations
= {
548 .open
= proc_taskq_open
,
551 .release
= seq_release
,
554 static struct ctl_table spl_kmem_table
[] = {
557 .procname
= "kmem_used",
558 .data
= &kmem_alloc_used
,
559 # ifdef HAVE_ATOMIC64_T
560 .maxlen
= sizeof(atomic64_t
),
562 .maxlen
= sizeof(atomic_t
),
563 # endif /* HAVE_ATOMIC64_T */
565 .proc_handler
= &proc_domemused
,
568 .procname
= "kmem_max",
569 .data
= &kmem_alloc_max
,
570 .maxlen
= sizeof(unsigned long),
571 .extra1
= &table_min
,
572 .extra2
= &table_max
,
574 .proc_handler
= &proc_doulongvec_minmax
,
576 #endif /* DEBUG_KMEM */
578 .procname
= "slab_kmem_total",
579 .data
= (void *)(KMC_KMEM
| KMC_TOTAL
),
580 .maxlen
= sizeof(unsigned long),
581 .extra1
= &table_min
,
582 .extra2
= &table_max
,
584 .proc_handler
= &proc_doslab
,
587 .procname
= "slab_kmem_alloc",
588 .data
= (void *)(KMC_KMEM
| KMC_ALLOC
),
589 .maxlen
= sizeof(unsigned long),
590 .extra1
= &table_min
,
591 .extra2
= &table_max
,
593 .proc_handler
= &proc_doslab
,
596 .procname
= "slab_kmem_max",
597 .data
= (void *)(KMC_KMEM
| KMC_MAX
),
598 .maxlen
= sizeof(unsigned long),
599 .extra1
= &table_min
,
600 .extra2
= &table_max
,
602 .proc_handler
= &proc_doslab
,
605 .procname
= "slab_vmem_total",
606 .data
= (void *)(KMC_VMEM
| KMC_TOTAL
),
607 .maxlen
= sizeof(unsigned long),
608 .extra1
= &table_min
,
609 .extra2
= &table_max
,
611 .proc_handler
= &proc_doslab
,
614 .procname
= "slab_vmem_alloc",
615 .data
= (void *)(KMC_VMEM
| KMC_ALLOC
),
616 .maxlen
= sizeof(unsigned long),
617 .extra1
= &table_min
,
618 .extra2
= &table_max
,
620 .proc_handler
= &proc_doslab
,
623 .procname
= "slab_vmem_max",
624 .data
= (void *)(KMC_VMEM
| KMC_MAX
),
625 .maxlen
= sizeof(unsigned long),
626 .extra1
= &table_min
,
627 .extra2
= &table_max
,
629 .proc_handler
= &proc_doslab
,
634 static struct ctl_table spl_kstat_table
[] = {
638 static struct ctl_table spl_table
[] = {
639 /* NB No .strategy entries have been provided since
640 * sysctl(8) prefers to go via /proc for portability.
643 .procname
= "version",
645 .maxlen
= sizeof(spl_version
),
647 .proc_handler
= &proc_dostring
,
650 .procname
= "hostid",
652 .maxlen
= sizeof(unsigned long),
654 .proc_handler
= &proc_dohostid
,
659 .child
= spl_kmem_table
,
664 .child
= spl_kstat_table
,
669 static struct ctl_table spl_dir
[] = {
678 static struct ctl_table spl_root
[] = {
681 .ctl_name
= CTL_KERN
,
683 .procname
= "kernel",
695 spl_header
= register_sysctl_table(spl_root
);
696 if (spl_header
== NULL
)
699 proc_spl
= proc_mkdir("spl", NULL
);
700 if (proc_spl
== NULL
) {
705 proc_spl_taskq_all
= proc_create_data("taskq-all", 0444,
706 proc_spl
, &proc_taskq_all_operations
, NULL
);
707 if (proc_spl_taskq_all
== NULL
) {
712 proc_spl_taskq
= proc_create_data("taskq", 0444,
713 proc_spl
, &proc_taskq_operations
, NULL
);
714 if (proc_spl_taskq
== NULL
) {
719 proc_spl_kmem
= proc_mkdir("kmem", proc_spl
);
720 if (proc_spl_kmem
== NULL
) {
725 proc_spl_kmem_slab
= proc_create_data("slab", 0444,
726 proc_spl_kmem
, &proc_slab_operations
, NULL
);
727 if (proc_spl_kmem_slab
== NULL
) {
732 proc_spl_kstat
= proc_mkdir("kstat", proc_spl
);
733 if (proc_spl_kstat
== NULL
) {
739 remove_proc_entry("kstat", proc_spl
);
740 remove_proc_entry("slab", proc_spl_kmem
);
741 remove_proc_entry("kmem", proc_spl
);
742 remove_proc_entry("taskq-all", proc_spl
);
743 remove_proc_entry("taskq", proc_spl
);
744 remove_proc_entry("spl", NULL
);
745 unregister_sysctl_table(spl_header
);
754 remove_proc_entry("kstat", proc_spl
);
755 remove_proc_entry("slab", proc_spl_kmem
);
756 remove_proc_entry("kmem", proc_spl
);
757 remove_proc_entry("taskq-all", proc_spl
);
758 remove_proc_entry("taskq", proc_spl
);
759 remove_proc_entry("spl", NULL
);
761 ASSERT(spl_header
!= NULL
);
762 unregister_sysctl_table(spl_header
);