]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/vmstat.h
mm: page allocator: update free page counters after pages are placed on the free...
[mirror_ubuntu-bionic-kernel.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a
CL
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
f6ac2354 9
4b51d669
CL
10#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
27bf71c2
CL
16#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
3b116300 28
2a1e274a 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
f8891e5e
CL
30
31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
24cf7251
MG
39#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
f8891e5e 42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
bb3ab596
KM
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
f8891e5e 45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
748446bb
MG
46#ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
56de7263 48 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
748446bb 49#endif
3b116300
AL
50#ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
bbfd28ee 52#endif
bbfd28ee
LS
53 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
5344b7e6
NP
56 UNEVICTABLE_PGMLOCKED,
57 UNEVICTABLE_PGMUNLOCKED,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
985737cf 60 UNEVICTABLE_MLOCKFREED,
f8891e5e
CL
61 NR_VM_EVENT_ITEMS
62};
63
c748e134
AB
64extern int sysctl_stat_interval;
65
780a0656
AM
66#ifdef CONFIG_VM_EVENT_COUNTERS
67/*
68 * Light weight per cpu counter implementation.
69 *
70 * Counters should only be incremented and no critical kernel component
71 * should rely on the counter values.
72 *
73 * Counters are handled completely inline. On many platforms the code
74 * generated will simply be the increment of a global address.
75 */
76
f8891e5e
CL
77struct vm_event_state {
78 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
79};
80
f8891e5e
CL
81DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
82
83static inline void __count_vm_event(enum vm_event_item item)
84{
dd17c8f7 85 __this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
86}
87
88static inline void count_vm_event(enum vm_event_item item)
89{
dd17c8f7 90 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
91}
92
93static inline void __count_vm_events(enum vm_event_item item, long delta)
94{
dd17c8f7 95 __this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
96}
97
98static inline void count_vm_events(enum vm_event_item item, long delta)
99{
dd17c8f7 100 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
101}
102
103extern void all_vm_events(unsigned long *);
e903387f 104#ifdef CONFIG_HOTPLUG
f8891e5e 105extern void vm_events_fold_cpu(int cpu);
e903387f
MD
106#else
107static inline void vm_events_fold_cpu(int cpu)
108{
109}
110#endif
f8891e5e
CL
111
112#else
113
114/* Disable counters */
780a0656
AM
115static inline void count_vm_event(enum vm_event_item item)
116{
117}
118static inline void count_vm_events(enum vm_event_item item, long delta)
119{
120}
121static inline void __count_vm_event(enum vm_event_item item)
122{
123}
124static inline void __count_vm_events(enum vm_event_item item, long delta)
125{
126}
127static inline void all_vm_events(unsigned long *ret)
128{
129}
130static inline void vm_events_fold_cpu(int cpu)
131{
132}
f8891e5e
CL
133
134#endif /* CONFIG_VM_EVENT_COUNTERS */
135
136#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
137 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
138 zone_idx(zone), delta)
f6ac2354 139
2244b95a
CL
140/*
141 * Zone based page accounting with per cpu differentials.
142 */
143extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
144
145static inline void zone_page_state_add(long x, struct zone *zone,
146 enum zone_stat_item item)
147{
148 atomic_long_add(x, &zone->vm_stat[item]);
149 atomic_long_add(x, &vm_stat[item]);
150}
151
152static inline unsigned long global_page_state(enum zone_stat_item item)
153{
154 long x = atomic_long_read(&vm_stat[item]);
155#ifdef CONFIG_SMP
156 if (x < 0)
157 x = 0;
158#endif
159 return x;
160}
161
162static inline unsigned long zone_page_state(struct zone *zone,
163 enum zone_stat_item item)
164{
165 long x = atomic_long_read(&zone->vm_stat[item]);
166#ifdef CONFIG_SMP
167 if (x < 0)
168 x = 0;
169#endif
170 return x;
171}
172
adea02a1
WF
173extern unsigned long global_reclaimable_pages(void);
174extern unsigned long zone_reclaimable_pages(struct zone *zone);
4f98a2fe 175
2244b95a
CL
176#ifdef CONFIG_NUMA
177/*
178 * Determine the per node value of a stat item. This function
179 * is called frequently in a NUMA machine, so try to be as
180 * frugal as possible.
181 */
182static inline unsigned long node_page_state(int node,
183 enum zone_stat_item item)
184{
185 struct zone *zones = NODE_DATA(node)->node_zones;
186
187 return
4b51d669
CL
188#ifdef CONFIG_ZONE_DMA
189 zone_page_state(&zones[ZONE_DMA], item) +
190#endif
fb0e7942 191#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
192 zone_page_state(&zones[ZONE_DMA32], item) +
193#endif
2244b95a
CL
194#ifdef CONFIG_HIGHMEM
195 zone_page_state(&zones[ZONE_HIGHMEM], item) +
196#endif
2a1e274a
MG
197 zone_page_state(&zones[ZONE_NORMAL], item) +
198 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 199}
ca889e6c 200
18ea7e71 201extern void zone_statistics(struct zone *, struct zone *);
ca889e6c 202
2244b95a 203#else
ca889e6c 204
2244b95a 205#define node_page_state(node, item) global_page_state(item)
ca889e6c
CL
206#define zone_statistics(_zl,_z) do { } while (0)
207
208#endif /* CONFIG_NUMA */
2244b95a 209
2244b95a
CL
210#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
211#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
212
213static inline void zap_zone_vm_stats(struct zone *zone)
214{
215 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
216}
217
ca889e6c
CL
218extern void inc_zone_state(struct zone *, enum zone_stat_item);
219
2244b95a
CL
220#ifdef CONFIG_SMP
221void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
222void __inc_zone_page_state(struct page *, enum zone_stat_item);
223void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 224
2244b95a
CL
225void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
226void inc_zone_page_state(struct page *, enum zone_stat_item);
227void dec_zone_page_state(struct page *, enum zone_stat_item);
228
229extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
230extern void __inc_zone_state(struct zone *, enum zone_stat_item);
231extern void dec_zone_state(struct zone *, enum zone_stat_item);
232extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a
CL
233
234void refresh_cpu_vm_stats(int);
2244b95a
CL
235#else /* CONFIG_SMP */
236
237/*
238 * We do not maintain differentials in a single processor configuration.
239 * The functions directly modify the zone and global counters.
240 */
241static inline void __mod_zone_page_state(struct zone *zone,
242 enum zone_stat_item item, int delta)
243{
244 zone_page_state_add(delta, zone, item);
245}
246
7f4599e9
CL
247static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
248{
249 atomic_long_inc(&zone->vm_stat[item]);
250 atomic_long_inc(&vm_stat[item]);
251}
252
2244b95a
CL
253static inline void __inc_zone_page_state(struct page *page,
254 enum zone_stat_item item)
255{
7f4599e9 256 __inc_zone_state(page_zone(page), item);
2244b95a
CL
257}
258
c8785385
CL
259static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
260{
261 atomic_long_dec(&zone->vm_stat[item]);
262 atomic_long_dec(&vm_stat[item]);
263}
264
2244b95a
CL
265static inline void __dec_zone_page_state(struct page *page,
266 enum zone_stat_item item)
267{
57ce36fe 268 __dec_zone_state(page_zone(page), item);
2244b95a
CL
269}
270
271/*
272 * We only use atomic operations to update counters. So there is no need to
273 * disable interrupts.
274 */
275#define inc_zone_page_state __inc_zone_page_state
276#define dec_zone_page_state __dec_zone_page_state
277#define mod_zone_page_state __mod_zone_page_state
278
279static inline void refresh_cpu_vm_stats(int cpu) { }
2244b95a
CL
280#endif
281
282#endif /* _LINUX_VMSTAT_H */