1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
30 #include <sys/kmem_cache.h>
32 #include <sys/taskq.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/proc_compat.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
41 typedef struct ctl_table __no_const spl_ctl_table
;
43 typedef struct ctl_table spl_ctl_table
;
46 static unsigned long table_min
= 0;
47 static unsigned long table_max
= ~0;
49 static struct ctl_table_header
*spl_header
= NULL
;
50 static struct proc_dir_entry
*proc_spl
= NULL
;
51 static struct proc_dir_entry
*proc_spl_kmem
= NULL
;
52 static struct proc_dir_entry
*proc_spl_kmem_slab
= NULL
;
53 static struct proc_dir_entry
*proc_spl_taskq_all
= NULL
;
54 static struct proc_dir_entry
*proc_spl_taskq
= NULL
;
55 struct proc_dir_entry
*proc_spl_kstat
= NULL
;
58 proc_copyin_string(char *kbuffer
, int kbuffer_size
,
59 const char *ubuffer
, int ubuffer_size
)
63 if (ubuffer_size
> kbuffer_size
)
66 if (copy_from_user((void *)kbuffer
, (void *)ubuffer
, ubuffer_size
))
69 /* strip trailing whitespace */
70 size
= strnlen(kbuffer
, ubuffer_size
);
72 if (!isspace(kbuffer
[size
]))
79 /* no space to terminate */
80 if (size
== kbuffer_size
)
83 kbuffer
[size
+ 1] = 0;
88 proc_copyout_string(char *ubuffer
, int ubuffer_size
,
89 const char *kbuffer
, char *append
)
91 /* NB if 'append' != NULL, it's a single character to append to the
92 * copied out string - usually "\n", for /proc entries and
93 * (i.e. a terminating zero byte) for sysctl entries
95 int size
= MIN(strlen(kbuffer
), ubuffer_size
);
97 if (copy_to_user(ubuffer
, kbuffer
, size
))
100 if (append
!= NULL
&& size
< ubuffer_size
) {
101 if (copy_to_user(ubuffer
+ size
, append
, 1))
112 proc_domemused(struct ctl_table
*table
, int write
,
113 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
116 unsigned long min
= 0, max
= ~0, val
;
117 spl_ctl_table dummy
= *table
;
120 dummy
.proc_handler
= &proc_dointvec
;
127 # ifdef HAVE_ATOMIC64_T
128 val
= atomic64_read((atomic64_t
*)table
->data
);
130 val
= atomic_read((atomic_t
*)table
->data
);
131 # endif /* HAVE_ATOMIC64_T */
132 rc
= proc_doulongvec_minmax(&dummy
, write
, buffer
, lenp
, ppos
);
137 #endif /* DEBUG_KMEM */
140 proc_doslab(struct ctl_table
*table
, int write
,
141 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
144 unsigned long min
= 0, max
= ~0, val
= 0, mask
;
145 spl_ctl_table dummy
= *table
;
146 spl_kmem_cache_t
*skc
;
149 dummy
.proc_handler
= &proc_dointvec
;
156 down_read(&spl_kmem_cache_sem
);
157 mask
= (unsigned long)table
->data
;
159 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
161 /* Only use slabs of the correct kmem/vmem type */
162 if (!(skc
->skc_flags
& mask
))
165 /* Sum the specified field for selected slabs */
166 switch (mask
& (KMC_TOTAL
| KMC_ALLOC
| KMC_MAX
)) {
168 val
+= skc
->skc_slab_size
* skc
->skc_slab_total
;
171 val
+= skc
->skc_obj_size
* skc
->skc_obj_alloc
;
174 val
+= skc
->skc_obj_size
* skc
->skc_obj_max
;
179 up_read(&spl_kmem_cache_sem
);
180 rc
= proc_doulongvec_minmax(&dummy
, write
, buffer
, lenp
, ppos
);
187 proc_dohostid(struct ctl_table
*table
, int write
,
188 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
194 /* We can't use proc_doulongvec_minmax() in the write
195 * case here because hostid while a hex value has no
196 * leading 0x which confuses the helper function. */
197 rc
= proc_copyin_string(str
, sizeof(str
), buffer
, *lenp
);
201 spl_hostid
= simple_strtoul(str
, &end
, 16);
206 len
= snprintf(str
, sizeof(str
), "%lx", spl_hostid
);
210 rc
= proc_copyout_string(buffer
,*lenp
,str
+*ppos
,"\n");
222 taskq_seq_show_headers(struct seq_file
*f
)
224 seq_printf(f
, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
225 "taskq", "act", "nthr", "spwn", "maxt", "pri",
226 "mina", "maxa", "cura", "flags");
229 /* indices into the lheads array below */
232 #define LHEAD_DELAY 2
234 #define LHEAD_ACTIVE 4
238 taskq_seq_show_impl(struct seq_file
*f
, void *p
, boolean_t allflag
)
243 struct task_struct
*tsk
;
246 struct list_head
*lheads
[LHEAD_SIZE
], *lh
;
247 static char *list_names
[LHEAD_SIZE
] =
248 {"pend", "prio", "delay", "wait", "active" };
249 int i
, j
, have_lheads
= 0;
250 unsigned long wflags
, flags
;
252 spin_lock_irqsave_nested(&tq
->tq_lock
, flags
, tq
->tq_lock_class
);
253 spin_lock_irqsave(&tq
->tq_wait_waitq
.lock
, wflags
);
255 /* get the various lists and check whether they're empty */
256 lheads
[LHEAD_PEND
] = &tq
->tq_pend_list
;
257 lheads
[LHEAD_PRIO
] = &tq
->tq_prio_list
;
258 lheads
[LHEAD_DELAY
] = &tq
->tq_delay_list
;
259 lheads
[LHEAD_WAIT
] = &tq
->tq_wait_waitq
.task_list
;
260 lheads
[LHEAD_ACTIVE
] = &tq
->tq_active_list
;
262 for (i
= 0; i
< LHEAD_SIZE
; ++i
) {
263 if (list_empty(lheads
[i
]))
269 /* early return in non-"all" mode if lists are all empty */
270 if (!allflag
&& !have_lheads
) {
271 spin_unlock_irqrestore(&tq
->tq_wait_waitq
.lock
, wflags
);
272 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
276 /* unlock the waitq quickly */
277 if (!lheads
[LHEAD_WAIT
])
278 spin_unlock_irqrestore(&tq
->tq_wait_waitq
.lock
, wflags
);
280 /* show the base taskq contents */
281 snprintf(name
, sizeof(name
), "%s/%d", tq
->tq_name
, tq
->tq_instance
);
282 seq_printf(f
, "%-25s ", name
);
283 seq_printf(f
, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
284 tq
->tq_nactive
, tq
->tq_nthreads
, tq
->tq_nspawn
,
285 tq
->tq_maxthreads
, tq
->tq_pri
, tq
->tq_minalloc
, tq
->tq_maxalloc
,
286 tq
->tq_nalloc
, tq
->tq_flags
);
288 /* show the active list */
289 if (lheads
[LHEAD_ACTIVE
]) {
291 list_for_each_entry(tqt
, &tq
->tq_active_list
, tqt_active_list
) {
293 seq_printf(f
, "\t%s:", list_names
[LHEAD_ACTIVE
]);
295 seq_printf(f
, "\n\t ");
298 seq_printf(f
, " [%d]%pf(%ps)",
299 tqt
->tqt_thread
->pid
,
300 tqt
->tqt_task
->tqent_func
,
301 tqt
->tqt_task
->tqent_arg
);
307 for (i
= LHEAD_PEND
; i
<= LHEAD_WAIT
; ++i
)
310 list_for_each(lh
, lheads
[i
]) {
311 /* show the wait waitq list */
312 if (i
== LHEAD_WAIT
) {
313 wq
= list_entry(lh
, wait_queue_t
, task_list
);
315 seq_printf(f
, "\t%s:",
318 seq_printf(f
, "\n\t ");
322 seq_printf(f
, " %d", tsk
->pid
);
323 /* pend, prio and delay lists */
325 tqe
= list_entry(lh
, taskq_ent_t
,
328 seq_printf(f
, "\t%s:",
331 seq_printf(f
, "\n\t ");
334 seq_printf(f
, " %pf(%ps)",
342 if (lheads
[LHEAD_WAIT
])
343 spin_unlock_irqrestore(&tq
->tq_wait_waitq
.lock
, wflags
);
344 spin_unlock_irqrestore(&tq
->tq_lock
, flags
);
350 taskq_all_seq_show(struct seq_file
*f
, void *p
)
352 return (taskq_seq_show_impl(f
, p
, B_TRUE
));
356 taskq_seq_show(struct seq_file
*f
, void *p
)
358 return (taskq_seq_show_impl(f
, p
, B_FALSE
));
362 taskq_seq_start(struct seq_file
*f
, loff_t
*pos
)
367 down_read(&tq_list_sem
);
369 taskq_seq_show_headers(f
);
378 return (list_entry(p
, taskq_t
, tq_taskqs
));
382 taskq_seq_next(struct seq_file
*f
, void *p
, loff_t
*pos
)
387 return ((tq
->tq_taskqs
.next
== &tq_list
) ?
388 NULL
: list_entry(tq
->tq_taskqs
.next
, taskq_t
, tq_taskqs
));
392 slab_seq_show_headers(struct seq_file
*f
)
395 "--------------------- cache ----------"
396 "--------------------------------------------- "
399 "--- emergency ---\n");
402 " flags size alloc slabsize objsize "
405 "dlock alloc max\n");
409 slab_seq_show(struct seq_file
*f
, void *p
)
411 spl_kmem_cache_t
*skc
= p
;
413 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
416 * Backed by Linux slab see /proc/slabinfo.
418 if (skc
->skc_flags
& KMC_SLAB
)
421 spin_lock(&skc
->skc_lock
);
422 seq_printf(f
, "%-36s ", skc
->skc_name
);
423 seq_printf(f
, "0x%05lx %9lu %9lu %8u %8u "
424 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
425 (long unsigned)skc
->skc_flags
,
426 (long unsigned)(skc
->skc_slab_size
* skc
->skc_slab_total
),
427 (long unsigned)(skc
->skc_obj_size
* skc
->skc_obj_alloc
),
428 (unsigned)skc
->skc_slab_size
,
429 (unsigned)skc
->skc_obj_size
,
430 (long unsigned)skc
->skc_slab_total
,
431 (long unsigned)skc
->skc_slab_alloc
,
432 (long unsigned)skc
->skc_slab_max
,
433 (long unsigned)skc
->skc_obj_total
,
434 (long unsigned)skc
->skc_obj_alloc
,
435 (long unsigned)skc
->skc_obj_max
,
436 (long unsigned)skc
->skc_obj_deadlock
,
437 (long unsigned)skc
->skc_obj_emergency
,
438 (long unsigned)skc
->skc_obj_emergency_max
);
440 spin_unlock(&skc
->skc_lock
);
446 slab_seq_start(struct seq_file
*f
, loff_t
*pos
)
451 down_read(&spl_kmem_cache_sem
);
453 slab_seq_show_headers(f
);
455 p
= spl_kmem_cache_list
.next
;
458 if (p
== &spl_kmem_cache_list
)
462 return (list_entry(p
, spl_kmem_cache_t
, skc_list
));
466 slab_seq_next(struct seq_file
*f
, void *p
, loff_t
*pos
)
468 spl_kmem_cache_t
*skc
= p
;
471 return ((skc
->skc_list
.next
== &spl_kmem_cache_list
) ?
472 NULL
: list_entry(skc
->skc_list
.next
,spl_kmem_cache_t
,skc_list
));
476 slab_seq_stop(struct seq_file
*f
, void *v
)
478 up_read(&spl_kmem_cache_sem
);
481 static struct seq_operations slab_seq_ops
= {
482 .show
= slab_seq_show
,
483 .start
= slab_seq_start
,
484 .next
= slab_seq_next
,
485 .stop
= slab_seq_stop
,
489 proc_slab_open(struct inode
*inode
, struct file
*filp
)
491 return seq_open(filp
, &slab_seq_ops
);
494 static struct file_operations proc_slab_operations
= {
495 .open
= proc_slab_open
,
498 .release
= seq_release
,
502 taskq_seq_stop(struct seq_file
*f
, void *v
)
504 up_read(&tq_list_sem
);
507 static struct seq_operations taskq_all_seq_ops
= {
508 .show
= taskq_all_seq_show
,
509 .start
= taskq_seq_start
,
510 .next
= taskq_seq_next
,
511 .stop
= taskq_seq_stop
,
514 static struct seq_operations taskq_seq_ops
= {
515 .show
= taskq_seq_show
,
516 .start
= taskq_seq_start
,
517 .next
= taskq_seq_next
,
518 .stop
= taskq_seq_stop
,
522 proc_taskq_all_open(struct inode
*inode
, struct file
*filp
)
524 return seq_open(filp
, &taskq_all_seq_ops
);
528 proc_taskq_open(struct inode
*inode
, struct file
*filp
)
530 return seq_open(filp
, &taskq_seq_ops
);
533 static struct file_operations proc_taskq_all_operations
= {
534 .open
= proc_taskq_all_open
,
537 .release
= seq_release
,
540 static struct file_operations proc_taskq_operations
= {
541 .open
= proc_taskq_open
,
544 .release
= seq_release
,
547 static struct ctl_table spl_kmem_table
[] = {
550 .procname
= "kmem_used",
551 .data
= &kmem_alloc_used
,
552 # ifdef HAVE_ATOMIC64_T
553 .maxlen
= sizeof(atomic64_t
),
555 .maxlen
= sizeof(atomic_t
),
556 # endif /* HAVE_ATOMIC64_T */
558 .proc_handler
= &proc_domemused
,
561 .procname
= "kmem_max",
562 .data
= &kmem_alloc_max
,
563 .maxlen
= sizeof(unsigned long),
564 .extra1
= &table_min
,
565 .extra2
= &table_max
,
567 .proc_handler
= &proc_doulongvec_minmax
,
569 #endif /* DEBUG_KMEM */
571 .procname
= "slab_kmem_total",
572 .data
= (void *)(KMC_KMEM
| KMC_TOTAL
),
573 .maxlen
= sizeof(unsigned long),
574 .extra1
= &table_min
,
575 .extra2
= &table_max
,
577 .proc_handler
= &proc_doslab
,
580 .procname
= "slab_kmem_alloc",
581 .data
= (void *)(KMC_KMEM
| KMC_ALLOC
),
582 .maxlen
= sizeof(unsigned long),
583 .extra1
= &table_min
,
584 .extra2
= &table_max
,
586 .proc_handler
= &proc_doslab
,
589 .procname
= "slab_kmem_max",
590 .data
= (void *)(KMC_KMEM
| KMC_MAX
),
591 .maxlen
= sizeof(unsigned long),
592 .extra1
= &table_min
,
593 .extra2
= &table_max
,
595 .proc_handler
= &proc_doslab
,
598 .procname
= "slab_vmem_total",
599 .data
= (void *)(KMC_VMEM
| KMC_TOTAL
),
600 .maxlen
= sizeof(unsigned long),
601 .extra1
= &table_min
,
602 .extra2
= &table_max
,
604 .proc_handler
= &proc_doslab
,
607 .procname
= "slab_vmem_alloc",
608 .data
= (void *)(KMC_VMEM
| KMC_ALLOC
),
609 .maxlen
= sizeof(unsigned long),
610 .extra1
= &table_min
,
611 .extra2
= &table_max
,
613 .proc_handler
= &proc_doslab
,
616 .procname
= "slab_vmem_max",
617 .data
= (void *)(KMC_VMEM
| KMC_MAX
),
618 .maxlen
= sizeof(unsigned long),
619 .extra1
= &table_min
,
620 .extra2
= &table_max
,
622 .proc_handler
= &proc_doslab
,
627 static struct ctl_table spl_kstat_table
[] = {
631 static struct ctl_table spl_table
[] = {
632 /* NB No .strategy entries have been provided since
633 * sysctl(8) prefers to go via /proc for portability.
636 .procname
= "version",
638 .maxlen
= sizeof(spl_version
),
640 .proc_handler
= &proc_dostring
,
643 .procname
= "hostid",
645 .maxlen
= sizeof(unsigned long),
647 .proc_handler
= &proc_dohostid
,
652 .child
= spl_kmem_table
,
657 .child
= spl_kstat_table
,
662 static struct ctl_table spl_dir
[] = {
671 static struct ctl_table spl_root
[] = {
674 .ctl_name
= CTL_KERN
,
676 .procname
= "kernel",
688 spl_header
= register_sysctl_table(spl_root
);
689 if (spl_header
== NULL
)
692 proc_spl
= proc_mkdir("spl", NULL
);
693 if (proc_spl
== NULL
) {
698 proc_spl_taskq_all
= proc_create_data("taskq-all", 0444,
699 proc_spl
, &proc_taskq_all_operations
, NULL
);
700 if (proc_spl_taskq_all
== NULL
) {
705 proc_spl_taskq
= proc_create_data("taskq", 0444,
706 proc_spl
, &proc_taskq_operations
, NULL
);
707 if (proc_spl_taskq
== NULL
) {
712 proc_spl_kmem
= proc_mkdir("kmem", proc_spl
);
713 if (proc_spl_kmem
== NULL
) {
718 proc_spl_kmem_slab
= proc_create_data("slab", 0444,
719 proc_spl_kmem
, &proc_slab_operations
, NULL
);
720 if (proc_spl_kmem_slab
== NULL
) {
725 proc_spl_kstat
= proc_mkdir("kstat", proc_spl
);
726 if (proc_spl_kstat
== NULL
) {
732 remove_proc_entry("kstat", proc_spl
);
733 remove_proc_entry("slab", proc_spl_kmem
);
734 remove_proc_entry("kmem", proc_spl
);
735 remove_proc_entry("taskq-all", proc_spl
);
736 remove_proc_entry("taskq", proc_spl
);
737 remove_proc_entry("spl", NULL
);
738 unregister_sysctl_table(spl_header
);
747 remove_proc_entry("kstat", proc_spl
);
748 remove_proc_entry("slab", proc_spl_kmem
);
749 remove_proc_entry("kmem", proc_spl
);
750 remove_proc_entry("taskq-all", proc_spl
);
751 remove_proc_entry("taskq", proc_spl
);
752 remove_proc_entry("spl", NULL
);
754 ASSERT(spl_header
!= NULL
);
755 unregister_sysctl_table(spl_header
);