]>
Commit | Line | Data |
---|---|---|
f6ac2354 CL |
1 | /* |
2 | * linux/mm/vmstat.c | |
3 | * | |
4 | * Manages VM statistics | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
2244b95a CL |
6 | * |
7 | * zoned VM statistics | |
8 | * Copyright (C) 2006 Silicon Graphics, Inc., | |
9 | * Christoph Lameter <christoph@lameter.com> | |
f6ac2354 CL |
10 | */ |
11 | ||
f6ac2354 | 12 | #include <linux/mm.h> |
2244b95a | 13 | #include <linux/module.h> |
df9ecaba | 14 | #include <linux/cpu.h> |
f6ac2354 | 15 | |
f6ac2354 CL |
16 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, |
17 | unsigned long *free, struct pglist_data *pgdat) | |
18 | { | |
19 | struct zone *zones = pgdat->node_zones; | |
20 | int i; | |
21 | ||
c8785385 CL |
22 | *active = node_page_state(pgdat->node_id, NR_ACTIVE); |
23 | *inactive = node_page_state(pgdat->node_id, NR_INACTIVE); | |
f6ac2354 CL |
24 | *free = 0; |
25 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
f6ac2354 CL |
26 | *free += zones[i].free_pages; |
27 | } | |
28 | } | |
29 | ||
30 | void get_zone_counts(unsigned long *active, | |
31 | unsigned long *inactive, unsigned long *free) | |
32 | { | |
33 | struct pglist_data *pgdat; | |
34 | ||
c8785385 CL |
35 | *active = global_page_state(NR_ACTIVE); |
36 | *inactive = global_page_state(NR_INACTIVE); | |
f6ac2354 CL |
37 | *free = 0; |
38 | for_each_online_pgdat(pgdat) { | |
39 | unsigned long l, m, n; | |
40 | __get_zone_counts(&l, &m, &n, pgdat); | |
f6ac2354 CL |
41 | *free += n; |
42 | } | |
43 | } | |
44 | ||
f8891e5e CL |
45 | #ifdef CONFIG_VM_EVENT_COUNTERS |
46 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | |
47 | EXPORT_PER_CPU_SYMBOL(vm_event_states); | |
48 | ||
49 | static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) | |
50 | { | |
51 | int cpu = 0; | |
52 | int i; | |
53 | ||
54 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | |
55 | ||
56 | cpu = first_cpu(*cpumask); | |
57 | while (cpu < NR_CPUS) { | |
58 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | |
59 | ||
60 | cpu = next_cpu(cpu, *cpumask); | |
61 | ||
62 | if (cpu < NR_CPUS) | |
63 | prefetch(&per_cpu(vm_event_states, cpu)); | |
64 | ||
65 | ||
66 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | |
67 | ret[i] += this->event[i]; | |
68 | } | |
69 | } | |
70 | ||
71 | /* | |
72 | * Accumulate the vm event counters across all CPUs. | |
73 | * The result is unavoidably approximate - it can change | |
74 | * during and after execution of this function. | |
75 | */ | |
76 | void all_vm_events(unsigned long *ret) | |
77 | { | |
78 | sum_vm_events(ret, &cpu_online_map); | |
79 | } | |
32dd66fc | 80 | EXPORT_SYMBOL_GPL(all_vm_events); |
f8891e5e CL |
81 | |
82 | #ifdef CONFIG_HOTPLUG | |
83 | /* | |
84 | * Fold the foreign cpu events into our own. | |
85 | * | |
86 | * This is adding to the events on one processor | |
87 | * but keeps the global counts constant. | |
88 | */ | |
89 | void vm_events_fold_cpu(int cpu) | |
90 | { | |
91 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); | |
92 | int i; | |
93 | ||
94 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { | |
95 | count_vm_events(i, fold_state->event[i]); | |
96 | fold_state->event[i] = 0; | |
97 | } | |
98 | } | |
99 | #endif /* CONFIG_HOTPLUG */ | |
100 | ||
101 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
102 | ||
2244b95a CL |
103 | /* |
104 | * Manage combined zone based / global counters | |
105 | * | |
106 | * vm_stat contains the global counters | |
107 | */ | |
108 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
109 | EXPORT_SYMBOL(vm_stat); | |
110 | ||
111 | #ifdef CONFIG_SMP | |
112 | ||
df9ecaba CL |
113 | static int calculate_threshold(struct zone *zone) |
114 | { | |
115 | int threshold; | |
116 | int mem; /* memory in 128 MB units */ | |
117 | ||
118 | /* | |
119 | * The threshold scales with the number of processors and the amount | |
120 | * of memory per zone. More memory means that we can defer updates for | |
121 | * longer, more processors could lead to more contention. | |
122 | * fls() is used to have a cheap way of logarithmic scaling. | |
123 | * | |
124 | * Some sample thresholds: | |
125 | * | |
126 | * Threshold Processors (fls) Zonesize fls(mem+1) | |
127 | * ------------------------------------------------------------------ | |
128 | * 8 1 1 0.9-1 GB 4 | |
129 | * 16 2 2 0.9-1 GB 4 | |
130 | * 20 2 2 1-2 GB 5 | |
131 | * 24 2 2 2-4 GB 6 | |
132 | * 28 2 2 4-8 GB 7 | |
133 | * 32 2 2 8-16 GB 8 | |
134 | * 4 2 2 <128M 1 | |
135 | * 30 4 3 2-4 GB 5 | |
136 | * 48 4 3 8-16 GB 8 | |
137 | * 32 8 4 1-2 GB 4 | |
138 | * 32 8 4 0.9-1GB 4 | |
139 | * 10 16 5 <128M 1 | |
140 | * 40 16 5 900M 4 | |
141 | * 70 64 7 2-4 GB 5 | |
142 | * 84 64 7 4-8 GB 6 | |
143 | * 108 512 9 4-8 GB 6 | |
144 | * 125 1024 10 8-16 GB 8 | |
145 | * 125 1024 10 16-32 GB 9 | |
146 | */ | |
147 | ||
148 | mem = zone->present_pages >> (27 - PAGE_SHIFT); | |
149 | ||
150 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | |
151 | ||
152 | /* | |
153 | * Maximum threshold is 125 | |
154 | */ | |
155 | threshold = min(125, threshold); | |
156 | ||
157 | return threshold; | |
158 | } | |
2244b95a CL |
159 | |
160 | /* | |
df9ecaba | 161 | * Refresh the thresholds for each zone. |
2244b95a | 162 | */ |
df9ecaba | 163 | static void refresh_zone_stat_thresholds(void) |
2244b95a | 164 | { |
df9ecaba CL |
165 | struct zone *zone; |
166 | int cpu; | |
167 | int threshold; | |
168 | ||
169 | for_each_zone(zone) { | |
170 | ||
171 | if (!zone->present_pages) | |
172 | continue; | |
173 | ||
174 | threshold = calculate_threshold(zone); | |
175 | ||
176 | for_each_online_cpu(cpu) | |
177 | zone_pcp(zone, cpu)->stat_threshold = threshold; | |
178 | } | |
2244b95a CL |
179 | } |
180 | ||
181 | /* | |
182 | * For use when we know that interrupts are disabled. | |
183 | */ | |
184 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | |
185 | int delta) | |
186 | { | |
df9ecaba CL |
187 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
188 | s8 *p = pcp->vm_stat_diff + item; | |
2244b95a CL |
189 | long x; |
190 | ||
2244b95a CL |
191 | x = delta + *p; |
192 | ||
df9ecaba | 193 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { |
2244b95a CL |
194 | zone_page_state_add(x, zone, item); |
195 | x = 0; | |
196 | } | |
2244b95a CL |
197 | *p = x; |
198 | } | |
199 | EXPORT_SYMBOL(__mod_zone_page_state); | |
200 | ||
201 | /* | |
202 | * For an unknown interrupt state | |
203 | */ | |
204 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | |
205 | int delta) | |
206 | { | |
207 | unsigned long flags; | |
208 | ||
209 | local_irq_save(flags); | |
210 | __mod_zone_page_state(zone, item, delta); | |
211 | local_irq_restore(flags); | |
212 | } | |
213 | EXPORT_SYMBOL(mod_zone_page_state); | |
214 | ||
215 | /* | |
216 | * Optimized increment and decrement functions. | |
217 | * | |
218 | * These are only for a single page and therefore can take a struct page * | |
219 | * argument instead of struct zone *. This allows the inclusion of the code | |
220 | * generated for page_zone(page) into the optimized functions. | |
221 | * | |
222 | * No overflow check is necessary and therefore the differential can be | |
223 | * incremented or decremented in place which may allow the compilers to | |
224 | * generate better code. | |
2244b95a CL |
225 | * The increment or decrement is known and therefore one boundary check can |
226 | * be omitted. | |
227 | * | |
df9ecaba CL |
228 | * NOTE: These functions are very performance sensitive. Change only |
229 | * with care. | |
230 | * | |
2244b95a CL |
231 | * Some processors have inc/dec instructions that are atomic vs an interrupt. |
232 | * However, the code must first determine the differential location in a zone | |
233 | * based on the processor number and then inc/dec the counter. There is no | |
234 | * guarantee without disabling preemption that the processor will not change | |
235 | * in between and therefore the atomicity vs. interrupt cannot be exploited | |
236 | * in a useful way here. | |
237 | */ | |
c8785385 | 238 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
2244b95a | 239 | { |
df9ecaba CL |
240 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
241 | s8 *p = pcp->vm_stat_diff + item; | |
2244b95a CL |
242 | |
243 | (*p)++; | |
244 | ||
df9ecaba CL |
245 | if (unlikely(*p > pcp->stat_threshold)) { |
246 | int overstep = pcp->stat_threshold / 2; | |
247 | ||
248 | zone_page_state_add(*p + overstep, zone, item); | |
249 | *p = -overstep; | |
2244b95a CL |
250 | } |
251 | } | |
ca889e6c CL |
252 | |
253 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) | |
254 | { | |
255 | __inc_zone_state(page_zone(page), item); | |
256 | } | |
2244b95a CL |
257 | EXPORT_SYMBOL(__inc_zone_page_state); |
258 | ||
c8785385 | 259 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
2244b95a | 260 | { |
df9ecaba CL |
261 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
262 | s8 *p = pcp->vm_stat_diff + item; | |
2244b95a CL |
263 | |
264 | (*p)--; | |
265 | ||
df9ecaba CL |
266 | if (unlikely(*p < - pcp->stat_threshold)) { |
267 | int overstep = pcp->stat_threshold / 2; | |
268 | ||
269 | zone_page_state_add(*p - overstep, zone, item); | |
270 | *p = overstep; | |
2244b95a CL |
271 | } |
272 | } | |
c8785385 CL |
273 | |
274 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) | |
275 | { | |
276 | __dec_zone_state(page_zone(page), item); | |
277 | } | |
2244b95a CL |
278 | EXPORT_SYMBOL(__dec_zone_page_state); |
279 | ||
ca889e6c CL |
280 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
281 | { | |
282 | unsigned long flags; | |
283 | ||
284 | local_irq_save(flags); | |
285 | __inc_zone_state(zone, item); | |
286 | local_irq_restore(flags); | |
287 | } | |
288 | ||
2244b95a CL |
289 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) |
290 | { | |
291 | unsigned long flags; | |
292 | struct zone *zone; | |
2244b95a CL |
293 | |
294 | zone = page_zone(page); | |
295 | local_irq_save(flags); | |
ca889e6c | 296 | __inc_zone_state(zone, item); |
2244b95a CL |
297 | local_irq_restore(flags); |
298 | } | |
299 | EXPORT_SYMBOL(inc_zone_page_state); | |
300 | ||
301 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | |
302 | { | |
303 | unsigned long flags; | |
2244b95a | 304 | |
2244b95a | 305 | local_irq_save(flags); |
a302eb4e | 306 | __dec_zone_page_state(page, item); |
2244b95a CL |
307 | local_irq_restore(flags); |
308 | } | |
309 | EXPORT_SYMBOL(dec_zone_page_state); | |
310 | ||
311 | /* | |
312 | * Update the zone counters for one cpu. | |
313 | */ | |
314 | void refresh_cpu_vm_stats(int cpu) | |
315 | { | |
316 | struct zone *zone; | |
317 | int i; | |
318 | unsigned long flags; | |
319 | ||
320 | for_each_zone(zone) { | |
321 | struct per_cpu_pageset *pcp; | |
322 | ||
39bbcb8f CL |
323 | if (!populated_zone(zone)) |
324 | continue; | |
325 | ||
2244b95a CL |
326 | pcp = zone_pcp(zone, cpu); |
327 | ||
328 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | |
329 | if (pcp->vm_stat_diff[i]) { | |
330 | local_irq_save(flags); | |
331 | zone_page_state_add(pcp->vm_stat_diff[i], | |
332 | zone, i); | |
333 | pcp->vm_stat_diff[i] = 0; | |
334 | local_irq_restore(flags); | |
335 | } | |
336 | } | |
337 | } | |
338 | ||
339 | static void __refresh_cpu_vm_stats(void *dummy) | |
340 | { | |
341 | refresh_cpu_vm_stats(smp_processor_id()); | |
342 | } | |
343 | ||
344 | /* | |
345 | * Consolidate all counters. | |
346 | * | |
347 | * Note that the result is less inaccurate but still inaccurate | |
348 | * if concurrent processes are allowed to run. | |
349 | */ | |
350 | void refresh_vm_stats(void) | |
351 | { | |
352 | on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1); | |
353 | } | |
354 | EXPORT_SYMBOL(refresh_vm_stats); | |
355 | ||
356 | #endif | |
357 | ||
ca889e6c CL |
358 | #ifdef CONFIG_NUMA |
359 | /* | |
360 | * zonelist = the list of zones passed to the allocator | |
361 | * z = the zone from which the allocation occurred. | |
362 | * | |
363 | * Must be called with interrupts disabled. | |
364 | */ | |
365 | void zone_statistics(struct zonelist *zonelist, struct zone *z) | |
366 | { | |
367 | if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { | |
368 | __inc_zone_state(z, NUMA_HIT); | |
369 | } else { | |
370 | __inc_zone_state(z, NUMA_MISS); | |
371 | __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); | |
372 | } | |
5d292343 | 373 | if (z->node == numa_node_id()) |
ca889e6c CL |
374 | __inc_zone_state(z, NUMA_LOCAL); |
375 | else | |
376 | __inc_zone_state(z, NUMA_OTHER); | |
377 | } | |
378 | #endif | |
379 | ||
f6ac2354 CL |
380 | #ifdef CONFIG_PROC_FS |
381 | ||
382 | #include <linux/seq_file.h> | |
383 | ||
384 | static void *frag_start(struct seq_file *m, loff_t *pos) | |
385 | { | |
386 | pg_data_t *pgdat; | |
387 | loff_t node = *pos; | |
388 | for (pgdat = first_online_pgdat(); | |
389 | pgdat && node; | |
390 | pgdat = next_online_pgdat(pgdat)) | |
391 | --node; | |
392 | ||
393 | return pgdat; | |
394 | } | |
395 | ||
396 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) | |
397 | { | |
398 | pg_data_t *pgdat = (pg_data_t *)arg; | |
399 | ||
400 | (*pos)++; | |
401 | return next_online_pgdat(pgdat); | |
402 | } | |
403 | ||
404 | static void frag_stop(struct seq_file *m, void *arg) | |
405 | { | |
406 | } | |
407 | ||
408 | /* | |
409 | * This walks the free areas for each zone. | |
410 | */ | |
411 | static int frag_show(struct seq_file *m, void *arg) | |
412 | { | |
413 | pg_data_t *pgdat = (pg_data_t *)arg; | |
414 | struct zone *zone; | |
415 | struct zone *node_zones = pgdat->node_zones; | |
416 | unsigned long flags; | |
417 | int order; | |
418 | ||
419 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
420 | if (!populated_zone(zone)) | |
421 | continue; | |
422 | ||
423 | spin_lock_irqsave(&zone->lock, flags); | |
424 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | |
425 | for (order = 0; order < MAX_ORDER; ++order) | |
426 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); | |
427 | spin_unlock_irqrestore(&zone->lock, flags); | |
428 | seq_putc(m, '\n'); | |
429 | } | |
430 | return 0; | |
431 | } | |
432 | ||
15ad7cdc | 433 | const struct seq_operations fragmentation_op = { |
f6ac2354 CL |
434 | .start = frag_start, |
435 | .next = frag_next, | |
436 | .stop = frag_stop, | |
437 | .show = frag_show, | |
438 | }; | |
439 | ||
27bf71c2 CL |
440 | #ifdef CONFIG_ZONE_DMA32 |
441 | #define TEXT_FOR_DMA32(xx) xx "_dma32", | |
442 | #else | |
443 | #define TEXT_FOR_DMA32(xx) | |
444 | #endif | |
445 | ||
446 | #ifdef CONFIG_HIGHMEM | |
447 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", | |
448 | #else | |
449 | #define TEXT_FOR_HIGHMEM(xx) | |
450 | #endif | |
451 | ||
452 | #define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ | |
453 | TEXT_FOR_HIGHMEM(xx) | |
454 | ||
15ad7cdc | 455 | static const char * const vmstat_text[] = { |
2244b95a | 456 | /* Zoned VM counters */ |
c8785385 CL |
457 | "nr_active", |
458 | "nr_inactive", | |
f3dbd344 | 459 | "nr_anon_pages", |
65ba55f5 | 460 | "nr_mapped", |
347ce434 | 461 | "nr_file_pages", |
972d1a7b CL |
462 | "nr_slab_reclaimable", |
463 | "nr_slab_unreclaimable", | |
df849a15 | 464 | "nr_page_table_pages", |
b1e7a8fd | 465 | "nr_dirty", |
ce866b34 | 466 | "nr_writeback", |
f6ac2354 | 467 | "nr_unstable", |
d2c5e30c | 468 | "nr_bounce", |
e129b5c2 | 469 | "nr_vmscan_write", |
f6ac2354 | 470 | |
ca889e6c CL |
471 | #ifdef CONFIG_NUMA |
472 | "numa_hit", | |
473 | "numa_miss", | |
474 | "numa_foreign", | |
475 | "numa_interleave", | |
476 | "numa_local", | |
477 | "numa_other", | |
478 | #endif | |
479 | ||
f8891e5e | 480 | #ifdef CONFIG_VM_EVENT_COUNTERS |
f6ac2354 CL |
481 | "pgpgin", |
482 | "pgpgout", | |
483 | "pswpin", | |
484 | "pswpout", | |
485 | ||
27bf71c2 | 486 | TEXTS_FOR_ZONES("pgalloc") |
f6ac2354 CL |
487 | |
488 | "pgfree", | |
489 | "pgactivate", | |
490 | "pgdeactivate", | |
491 | ||
492 | "pgfault", | |
493 | "pgmajfault", | |
494 | ||
27bf71c2 CL |
495 | TEXTS_FOR_ZONES("pgrefill") |
496 | TEXTS_FOR_ZONES("pgsteal") | |
497 | TEXTS_FOR_ZONES("pgscan_kswapd") | |
498 | TEXTS_FOR_ZONES("pgscan_direct") | |
f6ac2354 CL |
499 | |
500 | "pginodesteal", | |
501 | "slabs_scanned", | |
502 | "kswapd_steal", | |
503 | "kswapd_inodesteal", | |
504 | "pageoutrun", | |
505 | "allocstall", | |
506 | ||
507 | "pgrotated", | |
f8891e5e | 508 | #endif |
f6ac2354 CL |
509 | }; |
510 | ||
511 | /* | |
512 | * Output information about zones in @pgdat. | |
513 | */ | |
514 | static int zoneinfo_show(struct seq_file *m, void *arg) | |
515 | { | |
516 | pg_data_t *pgdat = arg; | |
517 | struct zone *zone; | |
518 | struct zone *node_zones = pgdat->node_zones; | |
519 | unsigned long flags; | |
520 | ||
521 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { | |
522 | int i; | |
523 | ||
524 | if (!populated_zone(zone)) | |
525 | continue; | |
526 | ||
527 | spin_lock_irqsave(&zone->lock, flags); | |
528 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); | |
529 | seq_printf(m, | |
530 | "\n pages free %lu" | |
531 | "\n min %lu" | |
532 | "\n low %lu" | |
533 | "\n high %lu" | |
f6ac2354 CL |
534 | "\n scanned %lu (a: %lu i: %lu)" |
535 | "\n spanned %lu" | |
536 | "\n present %lu", | |
537 | zone->free_pages, | |
538 | zone->pages_min, | |
539 | zone->pages_low, | |
540 | zone->pages_high, | |
f6ac2354 CL |
541 | zone->pages_scanned, |
542 | zone->nr_scan_active, zone->nr_scan_inactive, | |
543 | zone->spanned_pages, | |
544 | zone->present_pages); | |
2244b95a CL |
545 | |
546 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | |
547 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], | |
548 | zone_page_state(zone, i)); | |
549 | ||
f6ac2354 CL |
550 | seq_printf(m, |
551 | "\n protection: (%lu", | |
552 | zone->lowmem_reserve[0]); | |
553 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) | |
554 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); | |
555 | seq_printf(m, | |
556 | ")" | |
557 | "\n pagesets"); | |
558 | for_each_online_cpu(i) { | |
559 | struct per_cpu_pageset *pageset; | |
560 | int j; | |
561 | ||
562 | pageset = zone_pcp(zone, i); | |
f6ac2354 CL |
563 | for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { |
564 | seq_printf(m, | |
565 | "\n cpu: %i pcp: %i" | |
566 | "\n count: %i" | |
567 | "\n high: %i" | |
568 | "\n batch: %i", | |
569 | i, j, | |
570 | pageset->pcp[j].count, | |
571 | pageset->pcp[j].high, | |
572 | pageset->pcp[j].batch); | |
573 | } | |
df9ecaba CL |
574 | #ifdef CONFIG_SMP |
575 | seq_printf(m, "\n vm stats threshold: %d", | |
576 | pageset->stat_threshold); | |
577 | #endif | |
f6ac2354 CL |
578 | } |
579 | seq_printf(m, | |
580 | "\n all_unreclaimable: %u" | |
581 | "\n prev_priority: %i" | |
f6ac2354 CL |
582 | "\n start_pfn: %lu", |
583 | zone->all_unreclaimable, | |
584 | zone->prev_priority, | |
f6ac2354 CL |
585 | zone->zone_start_pfn); |
586 | spin_unlock_irqrestore(&zone->lock, flags); | |
587 | seq_putc(m, '\n'); | |
588 | } | |
589 | return 0; | |
590 | } | |
591 | ||
15ad7cdc | 592 | const struct seq_operations zoneinfo_op = { |
f6ac2354 CL |
593 | .start = frag_start, /* iterate over all zones. The same as in |
594 | * fragmentation. */ | |
595 | .next = frag_next, | |
596 | .stop = frag_stop, | |
597 | .show = zoneinfo_show, | |
598 | }; | |
599 | ||
600 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | |
601 | { | |
2244b95a | 602 | unsigned long *v; |
f8891e5e CL |
603 | #ifdef CONFIG_VM_EVENT_COUNTERS |
604 | unsigned long *e; | |
605 | #endif | |
2244b95a | 606 | int i; |
f6ac2354 CL |
607 | |
608 | if (*pos >= ARRAY_SIZE(vmstat_text)) | |
609 | return NULL; | |
610 | ||
f8891e5e | 611 | #ifdef CONFIG_VM_EVENT_COUNTERS |
2244b95a | 612 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) |
f8891e5e CL |
613 | + sizeof(struct vm_event_state), GFP_KERNEL); |
614 | #else | |
615 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), | |
616 | GFP_KERNEL); | |
617 | #endif | |
2244b95a CL |
618 | m->private = v; |
619 | if (!v) | |
f6ac2354 | 620 | return ERR_PTR(-ENOMEM); |
2244b95a CL |
621 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
622 | v[i] = global_page_state(i); | |
f8891e5e CL |
623 | #ifdef CONFIG_VM_EVENT_COUNTERS |
624 | e = v + NR_VM_ZONE_STAT_ITEMS; | |
625 | all_vm_events(e); | |
626 | e[PGPGIN] /= 2; /* sectors -> kbytes */ | |
627 | e[PGPGOUT] /= 2; | |
628 | #endif | |
2244b95a | 629 | return v + *pos; |
f6ac2354 CL |
630 | } |
631 | ||
632 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | |
633 | { | |
634 | (*pos)++; | |
635 | if (*pos >= ARRAY_SIZE(vmstat_text)) | |
636 | return NULL; | |
637 | return (unsigned long *)m->private + *pos; | |
638 | } | |
639 | ||
640 | static int vmstat_show(struct seq_file *m, void *arg) | |
641 | { | |
642 | unsigned long *l = arg; | |
643 | unsigned long off = l - (unsigned long *)m->private; | |
644 | ||
645 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | |
646 | return 0; | |
647 | } | |
648 | ||
649 | static void vmstat_stop(struct seq_file *m, void *arg) | |
650 | { | |
651 | kfree(m->private); | |
652 | m->private = NULL; | |
653 | } | |
654 | ||
15ad7cdc | 655 | const struct seq_operations vmstat_op = { |
f6ac2354 CL |
656 | .start = vmstat_start, |
657 | .next = vmstat_next, | |
658 | .stop = vmstat_stop, | |
659 | .show = vmstat_show, | |
660 | }; | |
661 | ||
662 | #endif /* CONFIG_PROC_FS */ | |
663 | ||
df9ecaba CL |
664 | #ifdef CONFIG_SMP |
665 | /* | |
666 | * Use the cpu notifier to insure that the thresholds are recalculated | |
667 | * when necessary. | |
668 | */ | |
669 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |
670 | unsigned long action, | |
671 | void *hcpu) | |
672 | { | |
673 | switch (action) { | |
ce421c79 AW |
674 | case CPU_UP_PREPARE: |
675 | case CPU_UP_CANCELED: | |
676 | case CPU_DEAD: | |
677 | refresh_zone_stat_thresholds(); | |
678 | break; | |
679 | default: | |
680 | break; | |
df9ecaba CL |
681 | } |
682 | return NOTIFY_OK; | |
683 | } | |
684 | ||
685 | static struct notifier_block __cpuinitdata vmstat_notifier = | |
686 | { &vmstat_cpuup_callback, NULL, 0 }; | |
687 | ||
688 | int __init setup_vmstat(void) | |
689 | { | |
690 | refresh_zone_stat_thresholds(); | |
691 | register_cpu_notifier(&vmstat_notifier); | |
692 | return 0; | |
693 | } | |
694 | module_init(setup_vmstat) | |
695 | #endif |