]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-proc.c
eb00505d6ee8240205276ac63a843a50d8c4db8e
[mirror_spl-debian.git] / module / spl / spl-proc.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
26
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <sys/kmem.h>
30 #include <sys/kmem_cache.h>
31 #include <sys/vmem.h>
32 #include <linux/ctype.h>
33 #include <linux/kmod.h>
34 #include <linux/seq_file.h>
35 #include <linux/proc_compat.h>
36 #include <linux/uaccess.h>
37 #include <linux/version.h>
38
39 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
40 typedef struct ctl_table __no_const spl_ctl_table;
41 #else
42 typedef struct ctl_table spl_ctl_table;
43 #endif
44
45 static unsigned long table_min = 0;
46 static unsigned long table_max = ~0;
47
48 static struct ctl_table_header *spl_header = NULL;
49 static struct proc_dir_entry *proc_spl = NULL;
50 static struct proc_dir_entry *proc_spl_kmem = NULL;
51 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
52 struct proc_dir_entry *proc_spl_kstat = NULL;
53
54 static int
55 proc_copyin_string(char *kbuffer, int kbuffer_size,
56 const char *ubuffer, int ubuffer_size)
57 {
58 int size;
59
60 if (ubuffer_size > kbuffer_size)
61 return -EOVERFLOW;
62
63 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
64 return -EFAULT;
65
66 /* strip trailing whitespace */
67 size = strnlen(kbuffer, ubuffer_size);
68 while (size-- >= 0)
69 if (!isspace(kbuffer[size]))
70 break;
71
72 /* empty string */
73 if (size < 0)
74 return -EINVAL;
75
76 /* no space to terminate */
77 if (size == kbuffer_size)
78 return -EOVERFLOW;
79
80 kbuffer[size + 1] = 0;
81 return 0;
82 }
83
84 static int
85 proc_copyout_string(char *ubuffer, int ubuffer_size,
86 const char *kbuffer, char *append)
87 {
88 /* NB if 'append' != NULL, it's a single character to append to the
89 * copied out string - usually "\n", for /proc entries and
90 * (i.e. a terminating zero byte) for sysctl entries
91 */
92 int size = MIN(strlen(kbuffer), ubuffer_size);
93
94 if (copy_to_user(ubuffer, kbuffer, size))
95 return -EFAULT;
96
97 if (append != NULL && size < ubuffer_size) {
98 if (copy_to_user(ubuffer + size, append, 1))
99 return -EFAULT;
100
101 size++;
102 }
103
104 return size;
105 }
106
107 #ifdef DEBUG_KMEM
108 static int
109 proc_domemused(struct ctl_table *table, int write,
110 void __user *buffer, size_t *lenp, loff_t *ppos)
111 {
112 int rc = 0;
113 unsigned long min = 0, max = ~0, val;
114 spl_ctl_table dummy = *table;
115
116 dummy.data = &val;
117 dummy.proc_handler = &proc_dointvec;
118 dummy.extra1 = &min;
119 dummy.extra2 = &max;
120
121 if (write) {
122 *ppos += *lenp;
123 } else {
124 # ifdef HAVE_ATOMIC64_T
125 val = atomic64_read((atomic64_t *)table->data);
126 # else
127 val = atomic_read((atomic_t *)table->data);
128 # endif /* HAVE_ATOMIC64_T */
129 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
130 }
131
132 return (rc);
133 }
134 #endif /* DEBUG_KMEM */
135
136 static int
137 proc_doslab(struct ctl_table *table, int write,
138 void __user *buffer, size_t *lenp, loff_t *ppos)
139 {
140 int rc = 0;
141 unsigned long min = 0, max = ~0, val = 0, mask;
142 spl_ctl_table dummy = *table;
143 spl_kmem_cache_t *skc;
144
145 dummy.data = &val;
146 dummy.proc_handler = &proc_dointvec;
147 dummy.extra1 = &min;
148 dummy.extra2 = &max;
149
150 if (write) {
151 *ppos += *lenp;
152 } else {
153 down_read(&spl_kmem_cache_sem);
154 mask = (unsigned long)table->data;
155
156 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
157
158 /* Only use slabs of the correct kmem/vmem type */
159 if (!(skc->skc_flags & mask))
160 continue;
161
162 /* Sum the specified field for selected slabs */
163 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
164 case KMC_TOTAL:
165 val += skc->skc_slab_size * skc->skc_slab_total;
166 break;
167 case KMC_ALLOC:
168 val += skc->skc_obj_size * skc->skc_obj_alloc;
169 break;
170 case KMC_MAX:
171 val += skc->skc_obj_size * skc->skc_obj_max;
172 break;
173 }
174 }
175
176 up_read(&spl_kmem_cache_sem);
177 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
178 }
179
180 return (rc);
181 }
182
183 static int
184 proc_dohostid(struct ctl_table *table, int write,
185 void __user *buffer, size_t *lenp, loff_t *ppos)
186 {
187 int len, rc = 0;
188 char *end, str[32];
189
190 if (write) {
191 /* We can't use proc_doulongvec_minmax() in the write
192 * case here because hostid while a hex value has no
193 * leading 0x which confuses the helper function. */
194 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
195 if (rc < 0)
196 return (rc);
197
198 spl_hostid = simple_strtoul(str, &end, 16);
199 if (str == end)
200 return (-EINVAL);
201
202 } else {
203 len = snprintf(str, sizeof(str), "%lx", spl_hostid);
204 if (*ppos >= len)
205 rc = 0;
206 else
207 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
208
209 if (rc >= 0) {
210 *lenp = rc;
211 *ppos += rc;
212 }
213 }
214
215 return (rc);
216 }
217
218 static void
219 slab_seq_show_headers(struct seq_file *f)
220 {
221 seq_printf(f,
222 "--------------------- cache ----------"
223 "--------------------------------------------- "
224 "----- slab ------ "
225 "---- object ----- "
226 "--- emergency ---\n");
227 seq_printf(f,
228 "name "
229 " flags size alloc slabsize objsize "
230 "total alloc max "
231 "total alloc max "
232 "dlock alloc max\n");
233 }
234
235 static int
236 slab_seq_show(struct seq_file *f, void *p)
237 {
238 spl_kmem_cache_t *skc = p;
239
240 ASSERT(skc->skc_magic == SKC_MAGIC);
241
242 /*
243 * Backed by Linux slab see /proc/slabinfo.
244 */
245 if (skc->skc_flags & KMC_SLAB)
246 return (0);
247
248 spin_lock(&skc->skc_lock);
249 seq_printf(f, "%-36s ", skc->skc_name);
250 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
251 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
252 (long unsigned)skc->skc_flags,
253 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
254 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
255 (unsigned)skc->skc_slab_size,
256 (unsigned)skc->skc_obj_size,
257 (long unsigned)skc->skc_slab_total,
258 (long unsigned)skc->skc_slab_alloc,
259 (long unsigned)skc->skc_slab_max,
260 (long unsigned)skc->skc_obj_total,
261 (long unsigned)skc->skc_obj_alloc,
262 (long unsigned)skc->skc_obj_max,
263 (long unsigned)skc->skc_obj_deadlock,
264 (long unsigned)skc->skc_obj_emergency,
265 (long unsigned)skc->skc_obj_emergency_max);
266
267 spin_unlock(&skc->skc_lock);
268
269 return 0;
270 }
271
272 static void *
273 slab_seq_start(struct seq_file *f, loff_t *pos)
274 {
275 struct list_head *p;
276 loff_t n = *pos;
277
278 down_read(&spl_kmem_cache_sem);
279 if (!n)
280 slab_seq_show_headers(f);
281
282 p = spl_kmem_cache_list.next;
283 while (n--) {
284 p = p->next;
285 if (p == &spl_kmem_cache_list)
286 return (NULL);
287 }
288
289 return (list_entry(p, spl_kmem_cache_t, skc_list));
290 }
291
292 static void *
293 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
294 {
295 spl_kmem_cache_t *skc = p;
296
297 ++*pos;
298 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
299 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
300 }
301
302 static void
303 slab_seq_stop(struct seq_file *f, void *v)
304 {
305 up_read(&spl_kmem_cache_sem);
306 }
307
308 static struct seq_operations slab_seq_ops = {
309 .show = slab_seq_show,
310 .start = slab_seq_start,
311 .next = slab_seq_next,
312 .stop = slab_seq_stop,
313 };
314
315 static int
316 proc_slab_open(struct inode *inode, struct file *filp)
317 {
318 return seq_open(filp, &slab_seq_ops);
319 }
320
321 static struct file_operations proc_slab_operations = {
322 .open = proc_slab_open,
323 .read = seq_read,
324 .llseek = seq_lseek,
325 .release = seq_release,
326 };
327
328 static struct ctl_table spl_kmem_table[] = {
329 #ifdef DEBUG_KMEM
330 {
331 .procname = "kmem_used",
332 .data = &kmem_alloc_used,
333 # ifdef HAVE_ATOMIC64_T
334 .maxlen = sizeof(atomic64_t),
335 # else
336 .maxlen = sizeof(atomic_t),
337 # endif /* HAVE_ATOMIC64_T */
338 .mode = 0444,
339 .proc_handler = &proc_domemused,
340 },
341 {
342 .procname = "kmem_max",
343 .data = &kmem_alloc_max,
344 .maxlen = sizeof(unsigned long),
345 .extra1 = &table_min,
346 .extra2 = &table_max,
347 .mode = 0444,
348 .proc_handler = &proc_doulongvec_minmax,
349 },
350 #endif /* DEBUG_KMEM */
351 {
352 .procname = "slab_kmem_total",
353 .data = (void *)(KMC_KMEM | KMC_TOTAL),
354 .maxlen = sizeof(unsigned long),
355 .extra1 = &table_min,
356 .extra2 = &table_max,
357 .mode = 0444,
358 .proc_handler = &proc_doslab,
359 },
360 {
361 .procname = "slab_kmem_alloc",
362 .data = (void *)(KMC_KMEM | KMC_ALLOC),
363 .maxlen = sizeof(unsigned long),
364 .extra1 = &table_min,
365 .extra2 = &table_max,
366 .mode = 0444,
367 .proc_handler = &proc_doslab,
368 },
369 {
370 .procname = "slab_kmem_max",
371 .data = (void *)(KMC_KMEM | KMC_MAX),
372 .maxlen = sizeof(unsigned long),
373 .extra1 = &table_min,
374 .extra2 = &table_max,
375 .mode = 0444,
376 .proc_handler = &proc_doslab,
377 },
378 {
379 .procname = "slab_vmem_total",
380 .data = (void *)(KMC_VMEM | KMC_TOTAL),
381 .maxlen = sizeof(unsigned long),
382 .extra1 = &table_min,
383 .extra2 = &table_max,
384 .mode = 0444,
385 .proc_handler = &proc_doslab,
386 },
387 {
388 .procname = "slab_vmem_alloc",
389 .data = (void *)(KMC_VMEM | KMC_ALLOC),
390 .maxlen = sizeof(unsigned long),
391 .extra1 = &table_min,
392 .extra2 = &table_max,
393 .mode = 0444,
394 .proc_handler = &proc_doslab,
395 },
396 {
397 .procname = "slab_vmem_max",
398 .data = (void *)(KMC_VMEM | KMC_MAX),
399 .maxlen = sizeof(unsigned long),
400 .extra1 = &table_min,
401 .extra2 = &table_max,
402 .mode = 0444,
403 .proc_handler = &proc_doslab,
404 },
405 {0},
406 };
407
408 static struct ctl_table spl_kstat_table[] = {
409 {0},
410 };
411
412 static struct ctl_table spl_table[] = {
413 /* NB No .strategy entries have been provided since
414 * sysctl(8) prefers to go via /proc for portability.
415 */
416 {
417 .procname = "version",
418 .data = spl_version,
419 .maxlen = sizeof(spl_version),
420 .mode = 0444,
421 .proc_handler = &proc_dostring,
422 },
423 {
424 .procname = "hostid",
425 .data = &spl_hostid,
426 .maxlen = sizeof(unsigned long),
427 .mode = 0644,
428 .proc_handler = &proc_dohostid,
429 },
430 {
431 .procname = "kmem",
432 .mode = 0555,
433 .child = spl_kmem_table,
434 },
435 {
436 .procname = "kstat",
437 .mode = 0555,
438 .child = spl_kstat_table,
439 },
440 { 0 },
441 };
442
443 static struct ctl_table spl_dir[] = {
444 {
445 .procname = "spl",
446 .mode = 0555,
447 .child = spl_table,
448 },
449 { 0 }
450 };
451
452 static struct ctl_table spl_root[] = {
453 {
454 #ifdef HAVE_CTL_NAME
455 .ctl_name = CTL_KERN,
456 #endif
457 .procname = "kernel",
458 .mode = 0555,
459 .child = spl_dir,
460 },
461 { 0 }
462 };
463
464 int
465 spl_proc_init(void)
466 {
467 int rc = 0;
468
469 spl_header = register_sysctl_table(spl_root);
470 if (spl_header == NULL)
471 return (-EUNATCH);
472
473 proc_spl = proc_mkdir("spl", NULL);
474 if (proc_spl == NULL) {
475 rc = -EUNATCH;
476 goto out;
477 }
478
479 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
480 if (proc_spl_kmem == NULL) {
481 rc = -EUNATCH;
482 goto out;
483 }
484
485 proc_spl_kmem_slab = proc_create_data("slab", 0444,
486 proc_spl_kmem, &proc_slab_operations, NULL);
487 if (proc_spl_kmem_slab == NULL) {
488 rc = -EUNATCH;
489 goto out;
490 }
491
492 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
493 if (proc_spl_kstat == NULL) {
494 rc = -EUNATCH;
495 goto out;
496 }
497 out:
498 if (rc) {
499 remove_proc_entry("kstat", proc_spl);
500 remove_proc_entry("slab", proc_spl_kmem);
501 remove_proc_entry("kmem", proc_spl);
502 remove_proc_entry("spl", NULL);
503 unregister_sysctl_table(spl_header);
504 }
505
506 return (rc);
507 }
508
509 void
510 spl_proc_fini(void)
511 {
512 remove_proc_entry("kstat", proc_spl);
513 remove_proc_entry("slab", proc_spl_kmem);
514 remove_proc_entry("kmem", proc_spl);
515 remove_proc_entry("spl", NULL);
516
517 ASSERT(spl_header != NULL);
518 unregister_sysctl_table(spl_header);
519 }