]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - fs/proc/proc_misc.c
Merge branch 'dmapool' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
[mirror_ubuntu-kernels.git] / fs / proc / proc_misc.c
1 /*
2 * linux/fs/proc/proc_misc.c
3 *
4 * linux/fs/proc/array.c
5 * Copyright (C) 1992 by Linus Torvalds
6 * based on ideas by Darren Senn
7 *
8 * This used to be the part of array.c. See the rest of history and credits
9 * there. I took this into a separate file and switched the thing to generic
10 * proc_file_inode_operations, leaving in array.c only per-process stuff.
11 * Inumbers allocation made dynamic (via create_proc_entry()). AV, May 1999.
12 *
13 * Changes:
14 * Fulton Green : Encapsulated position metric calculations.
15 * <kernel@FultonGreen.com>
16 */
17
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/time.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/fs.h>
24 #include <linux/tty.h>
25 #include <linux/string.h>
26 #include <linux/mman.h>
27 #include <linux/proc_fs.h>
28 #include <linux/ioport.h>
29 #include <linux/mm.h>
30 #include <linux/mmzone.h>
31 #include <linux/pagemap.h>
32 #include <linux/swap.h>
33 #include <linux/slab.h>
34 #include <linux/smp.h>
35 #include <linux/signal.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/seq_file.h>
39 #include <linux/times.h>
40 #include <linux/profile.h>
41 #include <linux/utsname.h>
42 #include <linux/blkdev.h>
43 #include <linux/hugetlb.h>
44 #include <linux/jiffies.h>
45 #include <linux/sysrq.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/pid_namespace.h>
49 #include <linux/bootmem.h>
50 #include <asm/uaccess.h>
51 #include <asm/pgtable.h>
52 #include <asm/io.h>
53 #include <asm/tlb.h>
54 #include <asm/div64.h>
55 #include "internal.h"
56
57 #define LOAD_INT(x) ((x) >> FSHIFT)
58 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
59 /*
60 * Warning: stuff below (imported functions) assumes that its output will fit
61 * into one page. For some of those functions it may be wrong. Moreover, we
62 * have a way to deal with that gracefully. Right now I used straightforward
63 * wrappers, but this needs further analysis wrt potential overflows.
64 */
65 extern int get_hardware_list(char *);
66 extern int get_stram_list(char *);
67 extern int get_filesystem_list(char *);
68 extern int get_exec_domain_list(char *);
69 extern int get_dma_list(char *);
70
71 static int proc_calc_metrics(char *page, char **start, off_t off,
72 int count, int *eof, int len)
73 {
74 if (len <= off+count) *eof = 1;
75 *start = page + off;
76 len -= off;
77 if (len>count) len = count;
78 if (len<0) len = 0;
79 return len;
80 }
81
82 static int loadavg_read_proc(char *page, char **start, off_t off,
83 int count, int *eof, void *data)
84 {
85 int a, b, c;
86 int len;
87
88 a = avenrun[0] + (FIXED_1/200);
89 b = avenrun[1] + (FIXED_1/200);
90 c = avenrun[2] + (FIXED_1/200);
91 len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
92 LOAD_INT(a), LOAD_FRAC(a),
93 LOAD_INT(b), LOAD_FRAC(b),
94 LOAD_INT(c), LOAD_FRAC(c),
95 nr_running(), nr_threads,
96 task_active_pid_ns(current)->last_pid);
97 return proc_calc_metrics(page, start, off, count, eof, len);
98 }
99
100 static int uptime_read_proc(char *page, char **start, off_t off,
101 int count, int *eof, void *data)
102 {
103 struct timespec uptime;
104 struct timespec idle;
105 int len;
106 cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
107
108 do_posix_clock_monotonic_gettime(&uptime);
109 monotonic_to_bootbased(&uptime);
110 cputime_to_timespec(idletime, &idle);
111 len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
112 (unsigned long) uptime.tv_sec,
113 (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
114 (unsigned long) idle.tv_sec,
115 (idle.tv_nsec / (NSEC_PER_SEC / 100)));
116
117 return proc_calc_metrics(page, start, off, count, eof, len);
118 }
119
120 static int meminfo_read_proc(char *page, char **start, off_t off,
121 int count, int *eof, void *data)
122 {
123 struct sysinfo i;
124 int len;
125 unsigned long committed;
126 unsigned long allowed;
127 struct vmalloc_info vmi;
128 long cached;
129
130 /*
131 * display in kilobytes.
132 */
133 #define K(x) ((x) << (PAGE_SHIFT - 10))
134 si_meminfo(&i);
135 si_swapinfo(&i);
136 committed = atomic_read(&vm_committed_space);
137 allowed = ((totalram_pages - hugetlb_total_pages())
138 * sysctl_overcommit_ratio / 100) + total_swap_pages;
139
140 cached = global_page_state(NR_FILE_PAGES) -
141 total_swapcache_pages - i.bufferram;
142 if (cached < 0)
143 cached = 0;
144
145 get_vmalloc_info(&vmi);
146
147 /*
148 * Tagged format, for easy grepping and expansion.
149 */
150 len = sprintf(page,
151 "MemTotal: %8lu kB\n"
152 "MemFree: %8lu kB\n"
153 "Buffers: %8lu kB\n"
154 "Cached: %8lu kB\n"
155 "SwapCached: %8lu kB\n"
156 "Active: %8lu kB\n"
157 "Inactive: %8lu kB\n"
158 #ifdef CONFIG_HIGHMEM
159 "HighTotal: %8lu kB\n"
160 "HighFree: %8lu kB\n"
161 "LowTotal: %8lu kB\n"
162 "LowFree: %8lu kB\n"
163 #endif
164 "SwapTotal: %8lu kB\n"
165 "SwapFree: %8lu kB\n"
166 "Dirty: %8lu kB\n"
167 "Writeback: %8lu kB\n"
168 "AnonPages: %8lu kB\n"
169 "Mapped: %8lu kB\n"
170 "Slab: %8lu kB\n"
171 "SReclaimable: %8lu kB\n"
172 "SUnreclaim: %8lu kB\n"
173 "PageTables: %8lu kB\n"
174 "NFS_Unstable: %8lu kB\n"
175 "Bounce: %8lu kB\n"
176 "CommitLimit: %8lu kB\n"
177 "Committed_AS: %8lu kB\n"
178 "VmallocTotal: %8lu kB\n"
179 "VmallocUsed: %8lu kB\n"
180 "VmallocChunk: %8lu kB\n",
181 K(i.totalram),
182 K(i.freeram),
183 K(i.bufferram),
184 K(cached),
185 K(total_swapcache_pages),
186 K(global_page_state(NR_ACTIVE)),
187 K(global_page_state(NR_INACTIVE)),
188 #ifdef CONFIG_HIGHMEM
189 K(i.totalhigh),
190 K(i.freehigh),
191 K(i.totalram-i.totalhigh),
192 K(i.freeram-i.freehigh),
193 #endif
194 K(i.totalswap),
195 K(i.freeswap),
196 K(global_page_state(NR_FILE_DIRTY)),
197 K(global_page_state(NR_WRITEBACK)),
198 K(global_page_state(NR_ANON_PAGES)),
199 K(global_page_state(NR_FILE_MAPPED)),
200 K(global_page_state(NR_SLAB_RECLAIMABLE) +
201 global_page_state(NR_SLAB_UNRECLAIMABLE)),
202 K(global_page_state(NR_SLAB_RECLAIMABLE)),
203 K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
204 K(global_page_state(NR_PAGETABLE)),
205 K(global_page_state(NR_UNSTABLE_NFS)),
206 K(global_page_state(NR_BOUNCE)),
207 K(allowed),
208 K(committed),
209 (unsigned long)VMALLOC_TOTAL >> 10,
210 vmi.used >> 10,
211 vmi.largest_chunk >> 10
212 );
213
214 len += hugetlb_report_meminfo(page + len);
215
216 return proc_calc_metrics(page, start, off, count, eof, len);
217 #undef K
218 }
219
220 extern struct seq_operations fragmentation_op;
221 static int fragmentation_open(struct inode *inode, struct file *file)
222 {
223 (void)inode;
224 return seq_open(file, &fragmentation_op);
225 }
226
227 static const struct file_operations fragmentation_file_operations = {
228 .open = fragmentation_open,
229 .read = seq_read,
230 .llseek = seq_lseek,
231 .release = seq_release,
232 };
233
234 extern struct seq_operations pagetypeinfo_op;
235 static int pagetypeinfo_open(struct inode *inode, struct file *file)
236 {
237 return seq_open(file, &pagetypeinfo_op);
238 }
239
240 static const struct file_operations pagetypeinfo_file_ops = {
241 .open = pagetypeinfo_open,
242 .read = seq_read,
243 .llseek = seq_lseek,
244 .release = seq_release,
245 };
246
247 extern struct seq_operations zoneinfo_op;
248 static int zoneinfo_open(struct inode *inode, struct file *file)
249 {
250 return seq_open(file, &zoneinfo_op);
251 }
252
253 static const struct file_operations proc_zoneinfo_file_operations = {
254 .open = zoneinfo_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = seq_release,
258 };
259
260 static int version_read_proc(char *page, char **start, off_t off,
261 int count, int *eof, void *data)
262 {
263 int len;
264
265 len = snprintf(page, PAGE_SIZE, linux_proc_banner,
266 utsname()->sysname,
267 utsname()->release,
268 utsname()->version);
269 return proc_calc_metrics(page, start, off, count, eof, len);
270 }
271
272 extern struct seq_operations cpuinfo_op;
273 static int cpuinfo_open(struct inode *inode, struct file *file)
274 {
275 return seq_open(file, &cpuinfo_op);
276 }
277
278 static const struct file_operations proc_cpuinfo_operations = {
279 .open = cpuinfo_open,
280 .read = seq_read,
281 .llseek = seq_lseek,
282 .release = seq_release,
283 };
284
285 static int devinfo_show(struct seq_file *f, void *v)
286 {
287 int i = *(loff_t *) v;
288
289 if (i < CHRDEV_MAJOR_HASH_SIZE) {
290 if (i == 0)
291 seq_printf(f, "Character devices:\n");
292 chrdev_show(f, i);
293 }
294 #ifdef CONFIG_BLOCK
295 else {
296 i -= CHRDEV_MAJOR_HASH_SIZE;
297 if (i == 0)
298 seq_printf(f, "\nBlock devices:\n");
299 blkdev_show(f, i);
300 }
301 #endif
302 return 0;
303 }
304
305 static void *devinfo_start(struct seq_file *f, loff_t *pos)
306 {
307 if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
308 return pos;
309 return NULL;
310 }
311
312 static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
313 {
314 (*pos)++;
315 if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
316 return NULL;
317 return pos;
318 }
319
320 static void devinfo_stop(struct seq_file *f, void *v)
321 {
322 /* Nothing to do */
323 }
324
325 static struct seq_operations devinfo_ops = {
326 .start = devinfo_start,
327 .next = devinfo_next,
328 .stop = devinfo_stop,
329 .show = devinfo_show
330 };
331
332 static int devinfo_open(struct inode *inode, struct file *filp)
333 {
334 return seq_open(filp, &devinfo_ops);
335 }
336
337 static const struct file_operations proc_devinfo_operations = {
338 .open = devinfo_open,
339 .read = seq_read,
340 .llseek = seq_lseek,
341 .release = seq_release,
342 };
343
344 extern struct seq_operations vmstat_op;
345 static int vmstat_open(struct inode *inode, struct file *file)
346 {
347 return seq_open(file, &vmstat_op);
348 }
349 static const struct file_operations proc_vmstat_file_operations = {
350 .open = vmstat_open,
351 .read = seq_read,
352 .llseek = seq_lseek,
353 .release = seq_release,
354 };
355
356 #ifdef CONFIG_PROC_HARDWARE
357 static int hardware_read_proc(char *page, char **start, off_t off,
358 int count, int *eof, void *data)
359 {
360 int len = get_hardware_list(page);
361 return proc_calc_metrics(page, start, off, count, eof, len);
362 }
363 #endif
364
365 #ifdef CONFIG_STRAM_PROC
366 static int stram_read_proc(char *page, char **start, off_t off,
367 int count, int *eof, void *data)
368 {
369 int len = get_stram_list(page);
370 return proc_calc_metrics(page, start, off, count, eof, len);
371 }
372 #endif
373
374 #ifdef CONFIG_BLOCK
375 extern struct seq_operations partitions_op;
376 static int partitions_open(struct inode *inode, struct file *file)
377 {
378 return seq_open(file, &partitions_op);
379 }
380 static const struct file_operations proc_partitions_operations = {
381 .open = partitions_open,
382 .read = seq_read,
383 .llseek = seq_lseek,
384 .release = seq_release,
385 };
386
387 extern struct seq_operations diskstats_op;
388 static int diskstats_open(struct inode *inode, struct file *file)
389 {
390 return seq_open(file, &diskstats_op);
391 }
392 static const struct file_operations proc_diskstats_operations = {
393 .open = diskstats_open,
394 .read = seq_read,
395 .llseek = seq_lseek,
396 .release = seq_release,
397 };
398 #endif
399
400 #ifdef CONFIG_MODULES
401 extern struct seq_operations modules_op;
402 static int modules_open(struct inode *inode, struct file *file)
403 {
404 return seq_open(file, &modules_op);
405 }
406 static const struct file_operations proc_modules_operations = {
407 .open = modules_open,
408 .read = seq_read,
409 .llseek = seq_lseek,
410 .release = seq_release,
411 };
412 #endif
413
414 #ifdef CONFIG_SLABINFO
415 static int slabinfo_open(struct inode *inode, struct file *file)
416 {
417 return seq_open(file, &slabinfo_op);
418 }
419 static const struct file_operations proc_slabinfo_operations = {
420 .open = slabinfo_open,
421 .read = seq_read,
422 .write = slabinfo_write,
423 .llseek = seq_lseek,
424 .release = seq_release,
425 };
426
427 #ifdef CONFIG_DEBUG_SLAB_LEAK
428 extern struct seq_operations slabstats_op;
429 static int slabstats_open(struct inode *inode, struct file *file)
430 {
431 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
432 int ret = -ENOMEM;
433 if (n) {
434 ret = seq_open(file, &slabstats_op);
435 if (!ret) {
436 struct seq_file *m = file->private_data;
437 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
438 m->private = n;
439 n = NULL;
440 }
441 kfree(n);
442 }
443 return ret;
444 }
445
446 static const struct file_operations proc_slabstats_operations = {
447 .open = slabstats_open,
448 .read = seq_read,
449 .llseek = seq_lseek,
450 .release = seq_release_private,
451 };
452 #endif
453 #endif
454
455 static int show_stat(struct seq_file *p, void *v)
456 {
457 int i;
458 unsigned long jif;
459 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
460 cputime64_t guest;
461 u64 sum = 0;
462 struct timespec boottime;
463 unsigned int *per_irq_sum;
464
465 per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
466 if (!per_irq_sum)
467 return -ENOMEM;
468
469 user = nice = system = idle = iowait =
470 irq = softirq = steal = cputime64_zero;
471 guest = cputime64_zero;
472 getboottime(&boottime);
473 jif = boottime.tv_sec;
474
475 for_each_possible_cpu(i) {
476 int j;
477
478 user = cputime64_add(user, kstat_cpu(i).cpustat.user);
479 nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
480 system = cputime64_add(system, kstat_cpu(i).cpustat.system);
481 idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
482 iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
483 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
484 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
485 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
486 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
487 for (j = 0; j < NR_IRQS; j++) {
488 unsigned int temp = kstat_cpu(i).irqs[j];
489 sum += temp;
490 per_irq_sum[j] += temp;
491 }
492 }
493
494 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
495 (unsigned long long)cputime64_to_clock_t(user),
496 (unsigned long long)cputime64_to_clock_t(nice),
497 (unsigned long long)cputime64_to_clock_t(system),
498 (unsigned long long)cputime64_to_clock_t(idle),
499 (unsigned long long)cputime64_to_clock_t(iowait),
500 (unsigned long long)cputime64_to_clock_t(irq),
501 (unsigned long long)cputime64_to_clock_t(softirq),
502 (unsigned long long)cputime64_to_clock_t(steal),
503 (unsigned long long)cputime64_to_clock_t(guest));
504 for_each_online_cpu(i) {
505
506 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
507 user = kstat_cpu(i).cpustat.user;
508 nice = kstat_cpu(i).cpustat.nice;
509 system = kstat_cpu(i).cpustat.system;
510 idle = kstat_cpu(i).cpustat.idle;
511 iowait = kstat_cpu(i).cpustat.iowait;
512 irq = kstat_cpu(i).cpustat.irq;
513 softirq = kstat_cpu(i).cpustat.softirq;
514 steal = kstat_cpu(i).cpustat.steal;
515 guest = kstat_cpu(i).cpustat.guest;
516 seq_printf(p,
517 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
518 i,
519 (unsigned long long)cputime64_to_clock_t(user),
520 (unsigned long long)cputime64_to_clock_t(nice),
521 (unsigned long long)cputime64_to_clock_t(system),
522 (unsigned long long)cputime64_to_clock_t(idle),
523 (unsigned long long)cputime64_to_clock_t(iowait),
524 (unsigned long long)cputime64_to_clock_t(irq),
525 (unsigned long long)cputime64_to_clock_t(softirq),
526 (unsigned long long)cputime64_to_clock_t(steal),
527 (unsigned long long)cputime64_to_clock_t(guest));
528 }
529 seq_printf(p, "intr %llu", (unsigned long long)sum);
530
531 for (i = 0; i < NR_IRQS; i++)
532 seq_printf(p, " %u", per_irq_sum[i]);
533
534 seq_printf(p,
535 "\nctxt %llu\n"
536 "btime %lu\n"
537 "processes %lu\n"
538 "procs_running %lu\n"
539 "procs_blocked %lu\n",
540 nr_context_switches(),
541 (unsigned long)jif,
542 total_forks,
543 nr_running(),
544 nr_iowait());
545
546 kfree(per_irq_sum);
547 return 0;
548 }
549
550 static int stat_open(struct inode *inode, struct file *file)
551 {
552 unsigned size = 4096 * (1 + num_possible_cpus() / 32);
553 char *buf;
554 struct seq_file *m;
555 int res;
556
557 /* don't ask for more than the kmalloc() max size, currently 128 KB */
558 if (size > 128 * 1024)
559 size = 128 * 1024;
560 buf = kmalloc(size, GFP_KERNEL);
561 if (!buf)
562 return -ENOMEM;
563
564 res = single_open(file, show_stat, NULL);
565 if (!res) {
566 m = file->private_data;
567 m->buf = buf;
568 m->size = size;
569 } else
570 kfree(buf);
571 return res;
572 }
573 static const struct file_operations proc_stat_operations = {
574 .open = stat_open,
575 .read = seq_read,
576 .llseek = seq_lseek,
577 .release = single_release,
578 };
579
580 /*
581 * /proc/interrupts
582 */
583 static void *int_seq_start(struct seq_file *f, loff_t *pos)
584 {
585 return (*pos <= NR_IRQS) ? pos : NULL;
586 }
587
588 static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
589 {
590 (*pos)++;
591 if (*pos > NR_IRQS)
592 return NULL;
593 return pos;
594 }
595
596 static void int_seq_stop(struct seq_file *f, void *v)
597 {
598 /* Nothing to do */
599 }
600
601
602 extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */
603 static struct seq_operations int_seq_ops = {
604 .start = int_seq_start,
605 .next = int_seq_next,
606 .stop = int_seq_stop,
607 .show = show_interrupts
608 };
609
610 static int interrupts_open(struct inode *inode, struct file *filp)
611 {
612 return seq_open(filp, &int_seq_ops);
613 }
614
615 static const struct file_operations proc_interrupts_operations = {
616 .open = interrupts_open,
617 .read = seq_read,
618 .llseek = seq_lseek,
619 .release = seq_release,
620 };
621
622 static int filesystems_read_proc(char *page, char **start, off_t off,
623 int count, int *eof, void *data)
624 {
625 int len = get_filesystem_list(page);
626 return proc_calc_metrics(page, start, off, count, eof, len);
627 }
628
629 static int cmdline_read_proc(char *page, char **start, off_t off,
630 int count, int *eof, void *data)
631 {
632 int len;
633
634 len = sprintf(page, "%s\n", saved_command_line);
635 return proc_calc_metrics(page, start, off, count, eof, len);
636 }
637
638 static int locks_open(struct inode *inode, struct file *filp)
639 {
640 return seq_open(filp, &locks_seq_operations);
641 }
642
643 static const struct file_operations proc_locks_operations = {
644 .open = locks_open,
645 .read = seq_read,
646 .llseek = seq_lseek,
647 .release = seq_release,
648 };
649
650 static int execdomains_read_proc(char *page, char **start, off_t off,
651 int count, int *eof, void *data)
652 {
653 int len = get_exec_domain_list(page);
654 return proc_calc_metrics(page, start, off, count, eof, len);
655 }
656
657 #ifdef CONFIG_MAGIC_SYSRQ
658 /*
659 * writing 'C' to /proc/sysrq-trigger is like sysrq-C
660 */
661 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
662 size_t count, loff_t *ppos)
663 {
664 if (count) {
665 char c;
666
667 if (get_user(c, buf))
668 return -EFAULT;
669 __handle_sysrq(c, NULL, 0);
670 }
671 return count;
672 }
673
674 static const struct file_operations proc_sysrq_trigger_operations = {
675 .write = write_sysrq_trigger,
676 };
677 #endif
678
679 #ifdef CONFIG_PROC_PAGE_MONITOR
680 #define KPMSIZE sizeof(u64)
681 #define KPMMASK (KPMSIZE - 1)
682 /* /proc/kpagecount - an array exposing page counts
683 *
684 * Each entry is a u64 representing the corresponding
685 * physical page count.
686 */
687 static ssize_t kpagecount_read(struct file *file, char __user *buf,
688 size_t count, loff_t *ppos)
689 {
690 u64 __user *out = (u64 __user *)buf;
691 struct page *ppage;
692 unsigned long src = *ppos;
693 unsigned long pfn;
694 ssize_t ret = 0;
695 u64 pcount;
696
697 pfn = src / KPMSIZE;
698 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
699 if (src & KPMMASK || count & KPMMASK)
700 return -EIO;
701
702 while (count > 0) {
703 ppage = NULL;
704 if (pfn_valid(pfn))
705 ppage = pfn_to_page(pfn);
706 pfn++;
707 if (!ppage)
708 pcount = 0;
709 else
710 pcount = atomic_read(&ppage->_count);
711
712 if (put_user(pcount, out++)) {
713 ret = -EFAULT;
714 break;
715 }
716
717 count -= KPMSIZE;
718 }
719
720 *ppos += (char __user *)out - buf;
721 if (!ret)
722 ret = (char __user *)out - buf;
723 return ret;
724 }
725
726 static struct file_operations proc_kpagecount_operations = {
727 .llseek = mem_lseek,
728 .read = kpagecount_read,
729 };
730
731 /* /proc/kpageflags - an array exposing page flags
732 *
733 * Each entry is a u64 representing the corresponding
734 * physical page flags.
735 */
736
737 /* These macros are used to decouple internal flags from exported ones */
738
739 #define KPF_LOCKED 0
740 #define KPF_ERROR 1
741 #define KPF_REFERENCED 2
742 #define KPF_UPTODATE 3
743 #define KPF_DIRTY 4
744 #define KPF_LRU 5
745 #define KPF_ACTIVE 6
746 #define KPF_SLAB 7
747 #define KPF_WRITEBACK 8
748 #define KPF_RECLAIM 9
749 #define KPF_BUDDY 10
750
751 #define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
752
753 static ssize_t kpageflags_read(struct file *file, char __user *buf,
754 size_t count, loff_t *ppos)
755 {
756 u64 __user *out = (u64 __user *)buf;
757 struct page *ppage;
758 unsigned long src = *ppos;
759 unsigned long pfn;
760 ssize_t ret = 0;
761 u64 kflags, uflags;
762
763 pfn = src / KPMSIZE;
764 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
765 if (src & KPMMASK || count & KPMMASK)
766 return -EIO;
767
768 while (count > 0) {
769 ppage = NULL;
770 if (pfn_valid(pfn))
771 ppage = pfn_to_page(pfn);
772 pfn++;
773 if (!ppage)
774 kflags = 0;
775 else
776 kflags = ppage->flags;
777
778 uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
779 kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
780 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
781 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
782 kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
783 kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
784 kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
785 kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
786 kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
787 kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
788 kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
789
790 if (put_user(uflags, out++)) {
791 ret = -EFAULT;
792 break;
793 }
794
795 count -= KPMSIZE;
796 }
797
798 *ppos += (char __user *)out - buf;
799 if (!ret)
800 ret = (char __user *)out - buf;
801 return ret;
802 }
803
804 static struct file_operations proc_kpageflags_operations = {
805 .llseek = mem_lseek,
806 .read = kpageflags_read,
807 };
808 #endif /* CONFIG_PROC_PAGE_MONITOR */
809
810 struct proc_dir_entry *proc_root_kcore;
811
812 void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
813 {
814 struct proc_dir_entry *entry;
815 entry = create_proc_entry(name, mode, NULL);
816 if (entry)
817 entry->proc_fops = f;
818 }
819
820 void __init proc_misc_init(void)
821 {
822 static struct {
823 char *name;
824 int (*read_proc)(char*,char**,off_t,int,int*,void*);
825 } *p, simple_ones[] = {
826 {"loadavg", loadavg_read_proc},
827 {"uptime", uptime_read_proc},
828 {"meminfo", meminfo_read_proc},
829 {"version", version_read_proc},
830 #ifdef CONFIG_PROC_HARDWARE
831 {"hardware", hardware_read_proc},
832 #endif
833 #ifdef CONFIG_STRAM_PROC
834 {"stram", stram_read_proc},
835 #endif
836 {"filesystems", filesystems_read_proc},
837 {"cmdline", cmdline_read_proc},
838 {"execdomains", execdomains_read_proc},
839 {NULL,}
840 };
841 for (p = simple_ones; p->name; p++)
842 create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
843
844 proc_symlink("mounts", NULL, "self/mounts");
845
846 /* And now for trickier ones */
847 #ifdef CONFIG_PRINTK
848 {
849 struct proc_dir_entry *entry;
850 entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
851 if (entry)
852 entry->proc_fops = &proc_kmsg_operations;
853 }
854 #endif
855 create_seq_entry("locks", 0, &proc_locks_operations);
856 create_seq_entry("devices", 0, &proc_devinfo_operations);
857 create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
858 #ifdef CONFIG_BLOCK
859 create_seq_entry("partitions", 0, &proc_partitions_operations);
860 #endif
861 create_seq_entry("stat", 0, &proc_stat_operations);
862 create_seq_entry("interrupts", 0, &proc_interrupts_operations);
863 #ifdef CONFIG_SLABINFO
864 create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
865 #ifdef CONFIG_DEBUG_SLAB_LEAK
866 create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
867 #endif
868 #endif
869 create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
870 create_seq_entry("pagetypeinfo", S_IRUGO, &pagetypeinfo_file_ops);
871 create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
872 create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations);
873 #ifdef CONFIG_BLOCK
874 create_seq_entry("diskstats", 0, &proc_diskstats_operations);
875 #endif
876 #ifdef CONFIG_MODULES
877 create_seq_entry("modules", 0, &proc_modules_operations);
878 #endif
879 #ifdef CONFIG_SCHEDSTATS
880 create_seq_entry("schedstat", 0, &proc_schedstat_operations);
881 #endif
882 #ifdef CONFIG_PROC_KCORE
883 proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
884 if (proc_root_kcore) {
885 proc_root_kcore->proc_fops = &proc_kcore_operations;
886 proc_root_kcore->size =
887 (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
888 }
889 #endif
890 #ifdef CONFIG_PROC_PAGE_MONITOR
891 create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations);
892 create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations);
893 #endif
894 #ifdef CONFIG_PROC_VMCORE
895 proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
896 if (proc_vmcore)
897 proc_vmcore->proc_fops = &proc_vmcore_operations;
898 #endif
899 #ifdef CONFIG_MAGIC_SYSRQ
900 {
901 struct proc_dir_entry *entry;
902 entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
903 if (entry)
904 entry->proc_fops = &proc_sysrq_trigger_operations;
905 }
906 #endif
907 }