]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/vmstat.h
mm, vmscan: move LRU lists to node
[mirror_ubuntu-bionic-kernel.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
f6ac2354 10
c748e134
AB
11extern int sysctl_stat_interval;
12
780a0656
AM
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
f8891e5e
CL
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
26};
27
f8891e5e
CL
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
293b6a4c
CL
30/*
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
33 */
f8891e5e
CL
34static inline void __count_vm_event(enum vm_event_item item)
35{
293b6a4c 36 raw_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
37}
38
39static inline void count_vm_event(enum vm_event_item item)
40{
dd17c8f7 41 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
42}
43
44static inline void __count_vm_events(enum vm_event_item item, long delta)
45{
293b6a4c 46 raw_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
47}
48
49static inline void count_vm_events(enum vm_event_item item, long delta)
50{
dd17c8f7 51 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
52}
53
54extern void all_vm_events(unsigned long *);
f1cb0879 55
f8891e5e
CL
56extern void vm_events_fold_cpu(int cpu);
57
58#else
59
60/* Disable counters */
780a0656
AM
61static inline void count_vm_event(enum vm_event_item item)
62{
63}
64static inline void count_vm_events(enum vm_event_item item, long delta)
65{
66}
67static inline void __count_vm_event(enum vm_event_item item)
68{
69}
70static inline void __count_vm_events(enum vm_event_item item, long delta)
71{
72}
73static inline void all_vm_events(unsigned long *ret)
74{
75}
76static inline void vm_events_fold_cpu(int cpu)
77{
78}
f8891e5e
CL
79
80#endif /* CONFIG_VM_EVENT_COUNTERS */
81
03c5a6e1
MG
82#ifdef CONFIG_NUMA_BALANCING
83#define count_vm_numa_event(x) count_vm_event(x)
84#define count_vm_numa_events(x, y) count_vm_events(x, y)
85#else
86#define count_vm_numa_event(x) do {} while (0)
3c0ff468 87#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
88#endif /* CONFIG_NUMA_BALANCING */
89
ec659934
MG
90#ifdef CONFIG_DEBUG_TLBFLUSH
91#define count_vm_tlb_event(x) count_vm_event(x)
92#define count_vm_tlb_events(x, y) count_vm_events(x, y)
93#else
94#define count_vm_tlb_event(x) do {} while (0)
95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
96#endif
97
4f115147
DB
98#ifdef CONFIG_DEBUG_VM_VMACACHE
99#define count_vm_vmacache_event(x) count_vm_event(x)
100#else
101#define count_vm_vmacache_event(x) do {} while (0)
102#endif
103
f8891e5e 104#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
105 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
106 zone_idx(zone), delta)
f6ac2354 107
2244b95a 108/*
75ef7184 109 * Zone and node-based page accounting with per cpu differentials.
2244b95a 110 */
75ef7184
MG
111extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
112extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
2244b95a
CL
113
114static inline void zone_page_state_add(long x, struct zone *zone,
115 enum zone_stat_item item)
116{
117 atomic_long_add(x, &zone->vm_stat[item]);
75ef7184
MG
118 atomic_long_add(x, &vm_zone_stat[item]);
119}
120
121static inline void node_page_state_add(long x, struct pglist_data *pgdat,
122 enum node_stat_item item)
123{
124 atomic_long_add(x, &pgdat->vm_stat[item]);
125 atomic_long_add(x, &vm_node_stat[item]);
2244b95a
CL
126}
127
128static inline unsigned long global_page_state(enum zone_stat_item item)
129{
75ef7184
MG
130 long x = atomic_long_read(&vm_zone_stat[item]);
131#ifdef CONFIG_SMP
132 if (x < 0)
133 x = 0;
134#endif
135 return x;
136}
137
138static inline unsigned long global_node_page_state(enum node_stat_item item)
139{
140 long x = atomic_long_read(&vm_node_stat[item]);
2244b95a
CL
141#ifdef CONFIG_SMP
142 if (x < 0)
143 x = 0;
144#endif
145 return x;
146}
147
148static inline unsigned long zone_page_state(struct zone *zone,
149 enum zone_stat_item item)
150{
151 long x = atomic_long_read(&zone->vm_stat[item]);
152#ifdef CONFIG_SMP
153 if (x < 0)
154 x = 0;
155#endif
156 return x;
157}
158
aa454840
CL
159/*
160 * More accurate version that also considers the currently pending
161 * deltas. For that we need to loop over all cpus to find the current
162 * deltas. There is no synchronization so the result cannot be
163 * exactly accurate either.
164 */
165static inline unsigned long zone_page_state_snapshot(struct zone *zone,
166 enum zone_stat_item item)
167{
168 long x = atomic_long_read(&zone->vm_stat[item]);
169
170#ifdef CONFIG_SMP
171 int cpu;
172 for_each_online_cpu(cpu)
173 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
174
175 if (x < 0)
176 x = 0;
177#endif
178 return x;
179}
180
599d0c95
MG
181static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
182 enum node_stat_item item)
183{
184 long x = atomic_long_read(&pgdat->vm_stat[item]);
185
186#ifdef CONFIG_SMP
187 int cpu;
188 for_each_online_cpu(cpu)
189 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
190
191 if (x < 0)
192 x = 0;
193#endif
194 return x;
195}
196
197
2244b95a 198#ifdef CONFIG_NUMA
75ef7184
MG
199extern unsigned long sum_zone_node_page_state(int node,
200 enum zone_stat_item item);
201extern unsigned long node_page_state(struct pglist_data *pgdat,
202 enum node_stat_item item);
2244b95a 203#else
75ef7184
MG
204#define sum_zone_node_page_state(node, item) global_page_state(item)
205#define node_page_state(node, item) global_node_page_state(item)
ca889e6c 206#endif /* CONFIG_NUMA */
2244b95a 207
2244b95a
CL
208#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
209#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
75ef7184
MG
210#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
211#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
2244b95a 212
2244b95a 213#ifdef CONFIG_SMP
6cdb18ad 214void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
2244b95a
CL
215void __inc_zone_page_state(struct page *, enum zone_stat_item);
216void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 217
75ef7184
MG
218void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
219void __inc_node_page_state(struct page *, enum node_stat_item);
220void __dec_node_page_state(struct page *, enum node_stat_item);
221
6cdb18ad 222void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
2244b95a
CL
223void inc_zone_page_state(struct page *, enum zone_stat_item);
224void dec_zone_page_state(struct page *, enum zone_stat_item);
225
75ef7184
MG
226void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
227void inc_node_page_state(struct page *, enum node_stat_item);
228void dec_node_page_state(struct page *, enum node_stat_item);
229
2244b95a 230extern void inc_zone_state(struct zone *, enum zone_stat_item);
75ef7184 231extern void inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385 232extern void __inc_zone_state(struct zone *, enum zone_stat_item);
75ef7184 233extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385
CL
234extern void dec_zone_state(struct zone *, enum zone_stat_item);
235extern void __dec_zone_state(struct zone *, enum zone_stat_item);
75ef7184 236extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
2244b95a 237
0eb77e98 238void quiet_vmstat(void);
2bb921e5 239void cpu_vm_stats_fold(int cpu);
a6cccdc3 240void refresh_zone_stat_thresholds(void);
b44129b3 241
52b6f46b
HD
242struct ctl_table;
243int vmstat_refresh(struct ctl_table *, int write,
244 void __user *buffer, size_t *lenp, loff_t *ppos);
245
5a883813
MK
246void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
247
b44129b3
MG
248int calculate_pressure_threshold(struct zone *zone);
249int calculate_normal_threshold(struct zone *zone);
250void set_pgdat_percpu_threshold(pg_data_t *pgdat,
251 int (*calculate_pressure)(struct zone *));
2244b95a
CL
252#else /* CONFIG_SMP */
253
254/*
255 * We do not maintain differentials in a single processor configuration.
256 * The functions directly modify the zone and global counters.
257 */
258static inline void __mod_zone_page_state(struct zone *zone,
6cdb18ad 259 enum zone_stat_item item, long delta)
2244b95a
CL
260{
261 zone_page_state_add(delta, zone, item);
262}
263
75ef7184
MG
264static inline void __mod_node_page_state(struct pglist_data *pgdat,
265 enum node_stat_item item, int delta)
266{
267 node_page_state_add(delta, pgdat, item);
268}
269
7f4599e9
CL
270static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
271{
272 atomic_long_inc(&zone->vm_stat[item]);
75ef7184
MG
273 atomic_long_inc(&vm_zone_stat[item]);
274}
275
276static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
277{
278 atomic_long_inc(&pgdat->vm_stat[item]);
279 atomic_long_inc(&vm_node_stat[item]);
7f4599e9
CL
280}
281
c8785385
CL
282static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
283{
284 atomic_long_dec(&zone->vm_stat[item]);
75ef7184
MG
285 atomic_long_dec(&vm_zone_stat[item]);
286}
287
288static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
289{
290 atomic_long_dec(&pgdat->vm_stat[item]);
291 atomic_long_dec(&vm_node_stat[item]);
c8785385
CL
292}
293
6a3ed212
JW
294static inline void __inc_zone_page_state(struct page *page,
295 enum zone_stat_item item)
296{
297 __inc_zone_state(page_zone(page), item);
298}
299
75ef7184
MG
300static inline void __inc_node_page_state(struct page *page,
301 enum node_stat_item item)
302{
303 __inc_node_state(page_pgdat(page), item);
304}
305
306
2244b95a
CL
307static inline void __dec_zone_page_state(struct page *page,
308 enum zone_stat_item item)
309{
57ce36fe 310 __dec_zone_state(page_zone(page), item);
2244b95a
CL
311}
312
75ef7184
MG
313static inline void __dec_node_page_state(struct page *page,
314 enum node_stat_item item)
315{
316 __dec_node_state(page_pgdat(page), item);
317}
318
319
2244b95a
CL
320/*
321 * We only use atomic operations to update counters. So there is no need to
322 * disable interrupts.
323 */
324#define inc_zone_page_state __inc_zone_page_state
325#define dec_zone_page_state __dec_zone_page_state
326#define mod_zone_page_state __mod_zone_page_state
327
75ef7184
MG
328#define inc_node_page_state __inc_node_page_state
329#define dec_node_page_state __dec_node_page_state
330#define mod_node_page_state __mod_node_page_state
331
6a3ed212 332#define inc_zone_state __inc_zone_state
75ef7184 333#define inc_node_state __inc_node_state
6a3ed212
JW
334#define dec_zone_state __dec_zone_state
335
b44129b3 336#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 337
a6cccdc3 338static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 339static inline void cpu_vm_stats_fold(int cpu) { }
0eb77e98 340static inline void quiet_vmstat(void) { }
a6cccdc3 341
5a883813
MK
342static inline void drain_zonestat(struct zone *zone,
343 struct per_cpu_pageset *pset) { }
fa25c503
KM
344#endif /* CONFIG_SMP */
345
d1ce749a
BZ
346static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
347 int migratetype)
348{
349 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
350 if (is_migrate_cma(migratetype))
351 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
352}
353
fa25c503 354extern const char * const vmstat_text[];
2244b95a
CL
355
356#endif /* _LINUX_VMSTAT_H */