1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
13 extern int sysctl_stat_interval
;
16 #define ENABLE_NUMA_STAT 1
17 #define DISABLE_NUMA_STAT 0
18 extern int sysctl_vm_numa_stat
;
19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key
);
20 int sysctl_vm_numa_stat_handler(struct ctl_table
*table
, int write
,
21 void *buffer
, size_t *length
, loff_t
*ppos
);
26 unsigned nr_unqueued_dirty
;
27 unsigned nr_congested
;
28 unsigned nr_writeback
;
29 unsigned nr_immediate
;
31 unsigned nr_activate
[ANON_AND_FILE
];
33 unsigned nr_unmap_fail
;
34 unsigned nr_lazyfree_fail
;
37 enum writeback_stat_item
{
39 NR_DIRTY_BG_THRESHOLD
,
40 NR_VM_WRITEBACK_STAT_ITEMS
,
43 #ifdef CONFIG_VM_EVENT_COUNTERS
45 * Light weight per cpu counter implementation.
47 * Counters should only be incremented and no critical kernel component
48 * should rely on the counter values.
50 * Counters are handled completely inline. On many platforms the code
51 * generated will simply be the increment of a global address.
54 struct vm_event_state
{
55 unsigned long event
[NR_VM_EVENT_ITEMS
];
58 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
61 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62 * local_irq_disable overhead.
64 static inline void __count_vm_event(enum vm_event_item item
)
66 raw_cpu_inc(vm_event_states
.event
[item
]);
69 static inline void count_vm_event(enum vm_event_item item
)
71 this_cpu_inc(vm_event_states
.event
[item
]);
74 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
76 raw_cpu_add(vm_event_states
.event
[item
], delta
);
79 static inline void count_vm_events(enum vm_event_item item
, long delta
)
81 this_cpu_add(vm_event_states
.event
[item
], delta
);
84 extern void all_vm_events(unsigned long *);
86 extern void vm_events_fold_cpu(int cpu
);
90 /* Disable counters */
91 static inline void count_vm_event(enum vm_event_item item
)
94 static inline void count_vm_events(enum vm_event_item item
, long delta
)
97 static inline void __count_vm_event(enum vm_event_item item
)
100 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
103 static inline void all_vm_events(unsigned long *ret
)
106 static inline void vm_events_fold_cpu(int cpu
)
110 #endif /* CONFIG_VM_EVENT_COUNTERS */
112 #ifdef CONFIG_NUMA_BALANCING
113 #define count_vm_numa_event(x) count_vm_event(x)
114 #define count_vm_numa_events(x, y) count_vm_events(x, y)
116 #define count_vm_numa_event(x) do {} while (0)
117 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
118 #endif /* CONFIG_NUMA_BALANCING */
120 #ifdef CONFIG_DEBUG_TLBFLUSH
121 #define count_vm_tlb_event(x) count_vm_event(x)
122 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
124 #define count_vm_tlb_event(x) do {} while (0)
125 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
128 #ifdef CONFIG_DEBUG_VM_VMACACHE
129 #define count_vm_vmacache_event(x) count_vm_event(x)
131 #define count_vm_vmacache_event(x) do {} while (0)
134 #define __count_zid_vm_events(item, zid, delta) \
135 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
138 * Zone and node-based page accounting with per cpu differentials.
140 extern atomic_long_t vm_zone_stat
[NR_VM_ZONE_STAT_ITEMS
];
141 extern atomic_long_t vm_node_stat
[NR_VM_NODE_STAT_ITEMS
];
142 extern atomic_long_t vm_numa_event
[NR_VM_NUMA_EVENT_ITEMS
];
145 static inline void zone_numa_event_add(long x
, struct zone
*zone
,
146 enum numa_stat_item item
)
148 atomic_long_add(x
, &zone
->vm_numa_event
[item
]);
149 atomic_long_add(x
, &vm_numa_event
[item
]);
152 static inline unsigned long zone_numa_event_state(struct zone
*zone
,
153 enum numa_stat_item item
)
155 return atomic_long_read(&zone
->vm_numa_event
[item
]);
158 static inline unsigned long
159 global_numa_event_state(enum numa_stat_item item
)
161 return atomic_long_read(&vm_numa_event
[item
]);
163 #endif /* CONFIG_NUMA */
165 static inline void zone_page_state_add(long x
, struct zone
*zone
,
166 enum zone_stat_item item
)
168 atomic_long_add(x
, &zone
->vm_stat
[item
]);
169 atomic_long_add(x
, &vm_zone_stat
[item
]);
172 static inline void node_page_state_add(long x
, struct pglist_data
*pgdat
,
173 enum node_stat_item item
)
175 atomic_long_add(x
, &pgdat
->vm_stat
[item
]);
176 atomic_long_add(x
, &vm_node_stat
[item
]);
179 static inline unsigned long global_zone_page_state(enum zone_stat_item item
)
181 long x
= atomic_long_read(&vm_zone_stat
[item
]);
190 unsigned long global_node_page_state_pages(enum node_stat_item item
)
192 long x
= atomic_long_read(&vm_node_stat
[item
]);
200 static inline unsigned long global_node_page_state(enum node_stat_item item
)
202 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item
));
204 return global_node_page_state_pages(item
);
207 static inline unsigned long zone_page_state(struct zone
*zone
,
208 enum zone_stat_item item
)
210 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
219 * More accurate version that also considers the currently pending
220 * deltas. For that we need to loop over all cpus to find the current
221 * deltas. There is no synchronization so the result cannot be
222 * exactly accurate either.
224 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
225 enum zone_stat_item item
)
227 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
231 for_each_online_cpu(cpu
)
232 x
+= per_cpu_ptr(zone
->per_cpu_zonestats
, cpu
)->vm_stat_diff
[item
];
241 /* See __count_vm_event comment on why raw_cpu_inc is used. */
243 __count_numa_event(struct zone
*zone
, enum numa_stat_item item
)
245 struct per_cpu_zonestat __percpu
*pzstats
= zone
->per_cpu_zonestats
;
247 raw_cpu_inc(pzstats
->vm_numa_event
[item
]);
251 __count_numa_events(struct zone
*zone
, enum numa_stat_item item
, long delta
)
253 struct per_cpu_zonestat __percpu
*pzstats
= zone
->per_cpu_zonestats
;
255 raw_cpu_add(pzstats
->vm_numa_event
[item
], delta
);
258 extern unsigned long sum_zone_node_page_state(int node
,
259 enum zone_stat_item item
);
260 extern unsigned long sum_zone_numa_event_state(int node
, enum numa_stat_item item
);
261 extern unsigned long node_page_state(struct pglist_data
*pgdat
,
262 enum node_stat_item item
);
263 extern unsigned long node_page_state_pages(struct pglist_data
*pgdat
,
264 enum node_stat_item item
);
265 extern void fold_vm_numa_events(void);
267 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
268 #define node_page_state(node, item) global_node_page_state(item)
269 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
270 static inline void fold_vm_numa_events(void)
273 #endif /* CONFIG_NUMA */
276 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, long);
277 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
278 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
280 void __mod_node_page_state(struct pglist_data
*, enum node_stat_item item
, long);
281 void __inc_node_page_state(struct page
*, enum node_stat_item
);
282 void __dec_node_page_state(struct page
*, enum node_stat_item
);
284 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, long);
285 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
286 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
288 void mod_node_page_state(struct pglist_data
*, enum node_stat_item
, long);
289 void inc_node_page_state(struct page
*, enum node_stat_item
);
290 void dec_node_page_state(struct page
*, enum node_stat_item
);
292 extern void inc_node_state(struct pglist_data
*, enum node_stat_item
);
293 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
294 extern void __inc_node_state(struct pglist_data
*, enum node_stat_item
);
295 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
296 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
297 extern void __dec_node_state(struct pglist_data
*, enum node_stat_item
);
299 void quiet_vmstat(void);
300 void cpu_vm_stats_fold(int cpu
);
301 void refresh_zone_stat_thresholds(void);
304 int vmstat_refresh(struct ctl_table
*, int write
, void *buffer
, size_t *lenp
,
307 void drain_zonestat(struct zone
*zone
, struct per_cpu_zonestat
*);
309 int calculate_pressure_threshold(struct zone
*zone
);
310 int calculate_normal_threshold(struct zone
*zone
);
311 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
312 int (*calculate_pressure
)(struct zone
*));
313 #else /* CONFIG_SMP */
316 * We do not maintain differentials in a single processor configuration.
317 * The functions directly modify the zone and global counters.
319 static inline void __mod_zone_page_state(struct zone
*zone
,
320 enum zone_stat_item item
, long delta
)
322 zone_page_state_add(delta
, zone
, item
);
325 static inline void __mod_node_page_state(struct pglist_data
*pgdat
,
326 enum node_stat_item item
, int delta
)
328 if (vmstat_item_in_bytes(item
)) {
330 * Only cgroups use subpage accounting right now; at
331 * the global level, these items still change in
332 * multiples of whole pages. Store them as pages
333 * internally to keep the per-cpu counters compact.
335 VM_WARN_ON_ONCE(delta
& (PAGE_SIZE
- 1));
336 delta
>>= PAGE_SHIFT
;
339 node_page_state_add(delta
, pgdat
, item
);
342 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
344 atomic_long_inc(&zone
->vm_stat
[item
]);
345 atomic_long_inc(&vm_zone_stat
[item
]);
348 static inline void __inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
350 atomic_long_inc(&pgdat
->vm_stat
[item
]);
351 atomic_long_inc(&vm_node_stat
[item
]);
354 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
356 atomic_long_dec(&zone
->vm_stat
[item
]);
357 atomic_long_dec(&vm_zone_stat
[item
]);
360 static inline void __dec_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
362 atomic_long_dec(&pgdat
->vm_stat
[item
]);
363 atomic_long_dec(&vm_node_stat
[item
]);
366 static inline void __inc_zone_page_state(struct page
*page
,
367 enum zone_stat_item item
)
369 __inc_zone_state(page_zone(page
), item
);
372 static inline void __inc_node_page_state(struct page
*page
,
373 enum node_stat_item item
)
375 __inc_node_state(page_pgdat(page
), item
);
379 static inline void __dec_zone_page_state(struct page
*page
,
380 enum zone_stat_item item
)
382 __dec_zone_state(page_zone(page
), item
);
385 static inline void __dec_node_page_state(struct page
*page
,
386 enum node_stat_item item
)
388 __dec_node_state(page_pgdat(page
), item
);
393 * We only use atomic operations to update counters. So there is no need to
394 * disable interrupts.
396 #define inc_zone_page_state __inc_zone_page_state
397 #define dec_zone_page_state __dec_zone_page_state
398 #define mod_zone_page_state __mod_zone_page_state
400 #define inc_node_page_state __inc_node_page_state
401 #define dec_node_page_state __dec_node_page_state
402 #define mod_node_page_state __mod_node_page_state
404 #define inc_zone_state __inc_zone_state
405 #define inc_node_state __inc_node_state
406 #define dec_zone_state __dec_zone_state
408 #define set_pgdat_percpu_threshold(pgdat, callback) { }
410 static inline void refresh_zone_stat_thresholds(void) { }
411 static inline void cpu_vm_stats_fold(int cpu
) { }
412 static inline void quiet_vmstat(void) { }
414 static inline void drain_zonestat(struct zone
*zone
,
415 struct per_cpu_zonestat
*pzstats
) { }
416 #endif /* CONFIG_SMP */
418 static inline void __mod_zone_freepage_state(struct zone
*zone
, int nr_pages
,
421 __mod_zone_page_state(zone
, NR_FREE_PAGES
, nr_pages
);
422 if (is_migrate_cma(migratetype
))
423 __mod_zone_page_state(zone
, NR_FREE_CMA_PAGES
, nr_pages
);
426 extern const char * const vmstat_text
[];
428 static inline const char *zone_stat_name(enum zone_stat_item item
)
430 return vmstat_text
[item
];
434 static inline const char *numa_stat_name(enum numa_stat_item item
)
436 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
439 #endif /* CONFIG_NUMA */
441 static inline const char *node_stat_name(enum node_stat_item item
)
443 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
444 NR_VM_NUMA_EVENT_ITEMS
+
448 static inline const char *lru_list_name(enum lru_list lru
)
450 return node_stat_name(NR_LRU_BASE
+ lru
) + 3; // skip "nr_"
453 static inline const char *writeback_stat_name(enum writeback_stat_item item
)
455 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
456 NR_VM_NUMA_EVENT_ITEMS
+
457 NR_VM_NODE_STAT_ITEMS
+
461 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
462 static inline const char *vm_event_name(enum vm_event_item item
)
464 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
465 NR_VM_NUMA_EVENT_ITEMS
+
466 NR_VM_NODE_STAT_ITEMS
+
467 NR_VM_WRITEBACK_STAT_ITEMS
+
470 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
474 void __mod_lruvec_state(struct lruvec
*lruvec
, enum node_stat_item idx
,
477 static inline void mod_lruvec_state(struct lruvec
*lruvec
,
478 enum node_stat_item idx
, int val
)
482 local_irq_save(flags
);
483 __mod_lruvec_state(lruvec
, idx
, val
);
484 local_irq_restore(flags
);
487 void __mod_lruvec_page_state(struct page
*page
,
488 enum node_stat_item idx
, int val
);
490 static inline void mod_lruvec_page_state(struct page
*page
,
491 enum node_stat_item idx
, int val
)
495 local_irq_save(flags
);
496 __mod_lruvec_page_state(page
, idx
, val
);
497 local_irq_restore(flags
);
502 static inline void __mod_lruvec_state(struct lruvec
*lruvec
,
503 enum node_stat_item idx
, int val
)
505 __mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
508 static inline void mod_lruvec_state(struct lruvec
*lruvec
,
509 enum node_stat_item idx
, int val
)
511 mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
514 static inline void __mod_lruvec_page_state(struct page
*page
,
515 enum node_stat_item idx
, int val
)
517 __mod_node_page_state(page_pgdat(page
), idx
, val
);
520 static inline void mod_lruvec_page_state(struct page
*page
,
521 enum node_stat_item idx
, int val
)
523 mod_node_page_state(page_pgdat(page
), idx
, val
);
526 #endif /* CONFIG_MEMCG */
528 static inline void inc_lruvec_state(struct lruvec
*lruvec
,
529 enum node_stat_item idx
)
531 mod_lruvec_state(lruvec
, idx
, 1);
534 static inline void __inc_lruvec_page_state(struct page
*page
,
535 enum node_stat_item idx
)
537 __mod_lruvec_page_state(page
, idx
, 1);
540 static inline void __dec_lruvec_page_state(struct page
*page
,
541 enum node_stat_item idx
)
543 __mod_lruvec_page_state(page
, idx
, -1);
546 static inline void inc_lruvec_page_state(struct page
*page
,
547 enum node_stat_item idx
)
549 mod_lruvec_page_state(page
, idx
, 1);
552 static inline void dec_lruvec_page_state(struct page
*page
,
553 enum node_stat_item idx
)
555 mod_lruvec_page_state(page
, idx
, -1);
558 #endif /* _LINUX_VMSTAT_H */