]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-proc.c
137af7188a035ea3144700110f85c5ac3dce3d26
[mirror_spl.git] / module / spl / spl-proc.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Proc Implementation.
25 \*****************************************************************************/
26
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <linux/kmod.h>
30 #include <linux/seq_file.h>
31 #include <linux/proc_compat.h>
32 #include <linux/version.h>
33
34 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
35 typedef struct ctl_table __no_const spl_ctl_table;
36 #else
37 typedef struct ctl_table spl_ctl_table;
38 #endif
39
40 #ifdef DEBUG_KMEM
41 static unsigned long table_min = 0;
42 static unsigned long table_max = ~0;
43 #endif
44
45 static struct ctl_table_header *spl_header = NULL;
46 static struct proc_dir_entry *proc_spl = NULL;
47 #ifdef DEBUG_KMEM
48 static struct proc_dir_entry *proc_spl_kmem = NULL;
49 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
50 #endif /* DEBUG_KMEM */
51 struct proc_dir_entry *proc_spl_kstat = NULL;
52
53 static int
54 proc_copyin_string(char *kbuffer, int kbuffer_size,
55 const char *ubuffer, int ubuffer_size)
56 {
57 int size;
58
59 if (ubuffer_size > kbuffer_size)
60 return -EOVERFLOW;
61
62 if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
63 return -EFAULT;
64
65 /* strip trailing whitespace */
66 size = strnlen(kbuffer, ubuffer_size);
67 while (size-- >= 0)
68 if (!isspace(kbuffer[size]))
69 break;
70
71 /* empty string */
72 if (size < 0)
73 return -EINVAL;
74
75 /* no space to terminate */
76 if (size == kbuffer_size)
77 return -EOVERFLOW;
78
79 kbuffer[size + 1] = 0;
80 return 0;
81 }
82
83 static int
84 proc_copyout_string(char *ubuffer, int ubuffer_size,
85 const char *kbuffer, char *append)
86 {
87 /* NB if 'append' != NULL, it's a single character to append to the
88 * copied out string - usually "\n", for /proc entries and
89 * (i.e. a terminating zero byte) for sysctl entries
90 */
91 int size = MIN(strlen(kbuffer), ubuffer_size);
92
93 if (copy_to_user(ubuffer, kbuffer, size))
94 return -EFAULT;
95
96 if (append != NULL && size < ubuffer_size) {
97 if (copy_to_user(ubuffer + size, append, 1))
98 return -EFAULT;
99
100 size++;
101 }
102
103 return size;
104 }
105
106 #ifdef DEBUG_KMEM
107 static int
108 proc_domemused(struct ctl_table *table, int write,
109 void __user *buffer, size_t *lenp, loff_t *ppos)
110 {
111 int rc = 0;
112 unsigned long min = 0, max = ~0, val;
113 spl_ctl_table dummy = *table;
114
115 dummy.data = &val;
116 dummy.proc_handler = &proc_dointvec;
117 dummy.extra1 = &min;
118 dummy.extra2 = &max;
119
120 if (write) {
121 *ppos += *lenp;
122 } else {
123 # ifdef HAVE_ATOMIC64_T
124 val = atomic64_read((atomic64_t *)table->data);
125 # else
126 val = atomic_read((atomic_t *)table->data);
127 # endif /* HAVE_ATOMIC64_T */
128 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
129 }
130
131 return (rc);
132 }
133
134 static int
135 proc_doslab(struct ctl_table *table, int write,
136 void __user *buffer, size_t *lenp, loff_t *ppos)
137 {
138 int rc = 0;
139 unsigned long min = 0, max = ~0, val = 0, mask;
140 spl_ctl_table dummy = *table;
141 spl_kmem_cache_t *skc;
142
143 dummy.data = &val;
144 dummy.proc_handler = &proc_dointvec;
145 dummy.extra1 = &min;
146 dummy.extra2 = &max;
147
148 if (write) {
149 *ppos += *lenp;
150 } else {
151 down_read(&spl_kmem_cache_sem);
152 mask = (unsigned long)table->data;
153
154 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
155
156 /* Only use slabs of the correct kmem/vmem type */
157 if (!(skc->skc_flags & mask))
158 continue;
159
160 /* Sum the specified field for selected slabs */
161 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
162 case KMC_TOTAL:
163 val += skc->skc_slab_size * skc->skc_slab_total;
164 break;
165 case KMC_ALLOC:
166 val += skc->skc_obj_size * skc->skc_obj_alloc;
167 break;
168 case KMC_MAX:
169 val += skc->skc_obj_size * skc->skc_obj_max;
170 break;
171 }
172 }
173
174 up_read(&spl_kmem_cache_sem);
175 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
176 }
177
178 return (rc);
179 }
180 #endif /* DEBUG_KMEM */
181
182 static int
183 proc_dohostid(struct ctl_table *table, int write,
184 void __user *buffer, size_t *lenp, loff_t *ppos)
185 {
186 int len, rc = 0;
187 char *end, str[32];
188
189 if (write) {
190 /* We can't use proc_doulongvec_minmax() in the write
191 * case here because hostid while a hex value has no
192 * leading 0x which confuses the helper function. */
193 rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
194 if (rc < 0)
195 return (rc);
196
197 spl_hostid = simple_strtoul(str, &end, 16);
198 if (str == end)
199 return (-EINVAL);
200
201 } else {
202 len = snprintf(str, sizeof(str), "%lx", spl_hostid);
203 if (*ppos >= len)
204 rc = 0;
205 else
206 rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
207
208 if (rc >= 0) {
209 *lenp = rc;
210 *ppos += rc;
211 }
212 }
213
214 return (rc);
215 }
216
217 #ifdef DEBUG_KMEM
218 static void
219 slab_seq_show_headers(struct seq_file *f)
220 {
221 seq_printf(f,
222 "--------------------- cache ----------"
223 "--------------------------------------------- "
224 "----- slab ------ "
225 "---- object ----- "
226 "--- emergency ---\n");
227 seq_printf(f,
228 "name "
229 " flags size alloc slabsize objsize "
230 "total alloc max "
231 "total alloc max "
232 "dlock alloc max\n");
233 }
234
235 static int
236 slab_seq_show(struct seq_file *f, void *p)
237 {
238 spl_kmem_cache_t *skc = p;
239
240 ASSERT(skc->skc_magic == SKC_MAGIC);
241
242 /*
243 * Backed by Linux slab see /proc/slabinfo.
244 */
245 if (skc->skc_flags & KMC_SLAB)
246 return (0);
247
248 spin_lock(&skc->skc_lock);
249 seq_printf(f, "%-36s ", skc->skc_name);
250 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
251 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
252 (long unsigned)skc->skc_flags,
253 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
254 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
255 (unsigned)skc->skc_slab_size,
256 (unsigned)skc->skc_obj_size,
257 (long unsigned)skc->skc_slab_total,
258 (long unsigned)skc->skc_slab_alloc,
259 (long unsigned)skc->skc_slab_max,
260 (long unsigned)skc->skc_obj_total,
261 (long unsigned)skc->skc_obj_alloc,
262 (long unsigned)skc->skc_obj_max,
263 (long unsigned)skc->skc_obj_deadlock,
264 (long unsigned)skc->skc_obj_emergency,
265 (long unsigned)skc->skc_obj_emergency_max);
266
267 spin_unlock(&skc->skc_lock);
268
269 return 0;
270 }
271
272 static void *
273 slab_seq_start(struct seq_file *f, loff_t *pos)
274 {
275 struct list_head *p;
276 loff_t n = *pos;
277
278 down_read(&spl_kmem_cache_sem);
279 if (!n)
280 slab_seq_show_headers(f);
281
282 p = spl_kmem_cache_list.next;
283 while (n--) {
284 p = p->next;
285 if (p == &spl_kmem_cache_list)
286 return (NULL);
287 }
288
289 return (list_entry(p, spl_kmem_cache_t, skc_list));
290 }
291
292 static void *
293 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
294 {
295 spl_kmem_cache_t *skc = p;
296
297 ++*pos;
298 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
299 NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
300 }
301
302 static void
303 slab_seq_stop(struct seq_file *f, void *v)
304 {
305 up_read(&spl_kmem_cache_sem);
306 }
307
308 static struct seq_operations slab_seq_ops = {
309 .show = slab_seq_show,
310 .start = slab_seq_start,
311 .next = slab_seq_next,
312 .stop = slab_seq_stop,
313 };
314
315 static int
316 proc_slab_open(struct inode *inode, struct file *filp)
317 {
318 return seq_open(filp, &slab_seq_ops);
319 }
320
321 static struct file_operations proc_slab_operations = {
322 .open = proc_slab_open,
323 .read = seq_read,
324 .llseek = seq_lseek,
325 .release = seq_release,
326 };
327 #endif /* DEBUG_KMEM */
328
329 #ifdef DEBUG_KMEM
330 static struct ctl_table spl_kmem_table[] = {
331 {
332 .procname = "kmem_used",
333 .data = &kmem_alloc_used,
334 # ifdef HAVE_ATOMIC64_T
335 .maxlen = sizeof(atomic64_t),
336 # else
337 .maxlen = sizeof(atomic_t),
338 # endif /* HAVE_ATOMIC64_T */
339 .mode = 0444,
340 .proc_handler = &proc_domemused,
341 },
342 {
343 .procname = "kmem_max",
344 .data = &kmem_alloc_max,
345 .maxlen = sizeof(unsigned long),
346 .extra1 = &table_min,
347 .extra2 = &table_max,
348 .mode = 0444,
349 .proc_handler = &proc_doulongvec_minmax,
350 },
351 {
352 .procname = "vmem_used",
353 .data = &vmem_alloc_used,
354 # ifdef HAVE_ATOMIC64_T
355 .maxlen = sizeof(atomic64_t),
356 # else
357 .maxlen = sizeof(atomic_t),
358 # endif /* HAVE_ATOMIC64_T */
359 .mode = 0444,
360 .proc_handler = &proc_domemused,
361 },
362 {
363 .procname = "vmem_max",
364 .data = &vmem_alloc_max,
365 .maxlen = sizeof(unsigned long),
366 .extra1 = &table_min,
367 .extra2 = &table_max,
368 .mode = 0444,
369 .proc_handler = &proc_doulongvec_minmax,
370 },
371 {
372 .procname = "slab_kmem_total",
373 .data = (void *)(KMC_KMEM | KMC_TOTAL),
374 .maxlen = sizeof(unsigned long),
375 .extra1 = &table_min,
376 .extra2 = &table_max,
377 .mode = 0444,
378 .proc_handler = &proc_doslab,
379 },
380 {
381 .procname = "slab_kmem_alloc",
382 .data = (void *)(KMC_KMEM | KMC_ALLOC),
383 .maxlen = sizeof(unsigned long),
384 .extra1 = &table_min,
385 .extra2 = &table_max,
386 .mode = 0444,
387 .proc_handler = &proc_doslab,
388 },
389 {
390 .procname = "slab_kmem_max",
391 .data = (void *)(KMC_KMEM | KMC_MAX),
392 .maxlen = sizeof(unsigned long),
393 .extra1 = &table_min,
394 .extra2 = &table_max,
395 .mode = 0444,
396 .proc_handler = &proc_doslab,
397 },
398 {
399 .procname = "slab_vmem_total",
400 .data = (void *)(KMC_VMEM | KMC_TOTAL),
401 .maxlen = sizeof(unsigned long),
402 .extra1 = &table_min,
403 .extra2 = &table_max,
404 .mode = 0444,
405 .proc_handler = &proc_doslab,
406 },
407 {
408 .procname = "slab_vmem_alloc",
409 .data = (void *)(KMC_VMEM | KMC_ALLOC),
410 .maxlen = sizeof(unsigned long),
411 .extra1 = &table_min,
412 .extra2 = &table_max,
413 .mode = 0444,
414 .proc_handler = &proc_doslab,
415 },
416 {
417 .procname = "slab_vmem_max",
418 .data = (void *)(KMC_VMEM | KMC_MAX),
419 .maxlen = sizeof(unsigned long),
420 .extra1 = &table_min,
421 .extra2 = &table_max,
422 .mode = 0444,
423 .proc_handler = &proc_doslab,
424 },
425 {0},
426 };
427 #endif /* DEBUG_KMEM */
428
429 static struct ctl_table spl_kstat_table[] = {
430 {0},
431 };
432
433 static struct ctl_table spl_table[] = {
434 /* NB No .strategy entries have been provided since
435 * sysctl(8) prefers to go via /proc for portability.
436 */
437 {
438 .procname = "version",
439 .data = spl_version,
440 .maxlen = sizeof(spl_version),
441 .mode = 0444,
442 .proc_handler = &proc_dostring,
443 },
444 {
445 .procname = "hostid",
446 .data = &spl_hostid,
447 .maxlen = sizeof(unsigned long),
448 .mode = 0644,
449 .proc_handler = &proc_dohostid,
450 },
451 #ifdef DEBUG_KMEM
452 {
453 .procname = "kmem",
454 .mode = 0555,
455 .child = spl_kmem_table,
456 },
457 #endif
458 {
459 .procname = "kstat",
460 .mode = 0555,
461 .child = spl_kstat_table,
462 },
463 { 0 },
464 };
465
466 static struct ctl_table spl_dir[] = {
467 {
468 .procname = "spl",
469 .mode = 0555,
470 .child = spl_table,
471 },
472 { 0 }
473 };
474
475 static struct ctl_table spl_root[] = {
476 {
477 #ifdef HAVE_CTL_NAME
478 .ctl_name = CTL_KERN,
479 #endif
480 .procname = "kernel",
481 .mode = 0555,
482 .child = spl_dir,
483 },
484 { 0 }
485 };
486
487 int
488 spl_proc_init(void)
489 {
490 int rc = 0;
491
492 spl_header = register_sysctl_table(spl_root);
493 if (spl_header == NULL)
494 return (-EUNATCH);
495
496 proc_spl = proc_mkdir("spl", NULL);
497 if (proc_spl == NULL) {
498 rc = -EUNATCH;
499 goto out;
500 }
501
502 #ifdef DEBUG_KMEM
503 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
504 if (proc_spl_kmem == NULL) {
505 rc = -EUNATCH;
506 goto out;
507 }
508
509 proc_spl_kmem_slab = proc_create_data("slab", 0444,
510 proc_spl_kmem, &proc_slab_operations, NULL);
511 if (proc_spl_kmem_slab == NULL) {
512 rc = -EUNATCH;
513 goto out;
514 }
515
516 #endif /* DEBUG_KMEM */
517
518 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
519 if (proc_spl_kstat == NULL) {
520 rc = -EUNATCH;
521 goto out;
522 }
523 out:
524 if (rc) {
525 remove_proc_entry("kstat", proc_spl);
526 #ifdef DEBUG_KMEM
527 remove_proc_entry("slab", proc_spl_kmem);
528 remove_proc_entry("kmem", proc_spl);
529 #endif
530 remove_proc_entry("spl", NULL);
531 unregister_sysctl_table(spl_header);
532 }
533
534 return (rc);
535 }
536
537 void
538 spl_proc_fini(void)
539 {
540 remove_proc_entry("kstat", proc_spl);
541 #ifdef DEBUG_KMEM
542 remove_proc_entry("slab", proc_spl_kmem);
543 remove_proc_entry("kmem", proc_spl);
544 #endif
545 remove_proc_entry("spl", NULL);
546
547 ASSERT(spl_header != NULL);
548 unregister_sysctl_table(spl_header);
549 }