]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/vmstat.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivi...
[mirror_ubuntu-artful-kernel.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
f6ac2354 10
c748e134
AB
11extern int sysctl_stat_interval;
12
780a0656
AM
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
f8891e5e
CL
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
26};
27
f8891e5e
CL
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
293b6a4c
CL
30/*
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
33 */
f8891e5e
CL
34static inline void __count_vm_event(enum vm_event_item item)
35{
293b6a4c 36 raw_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
37}
38
39static inline void count_vm_event(enum vm_event_item item)
40{
dd17c8f7 41 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
42}
43
44static inline void __count_vm_events(enum vm_event_item item, long delta)
45{
293b6a4c 46 raw_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
47}
48
49static inline void count_vm_events(enum vm_event_item item, long delta)
50{
dd17c8f7 51 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
52}
53
54extern void all_vm_events(unsigned long *);
f1cb0879 55
f8891e5e
CL
56extern void vm_events_fold_cpu(int cpu);
57
58#else
59
60/* Disable counters */
780a0656
AM
61static inline void count_vm_event(enum vm_event_item item)
62{
63}
64static inline void count_vm_events(enum vm_event_item item, long delta)
65{
66}
67static inline void __count_vm_event(enum vm_event_item item)
68{
69}
70static inline void __count_vm_events(enum vm_event_item item, long delta)
71{
72}
73static inline void all_vm_events(unsigned long *ret)
74{
75}
76static inline void vm_events_fold_cpu(int cpu)
77{
78}
f8891e5e
CL
79
80#endif /* CONFIG_VM_EVENT_COUNTERS */
81
03c5a6e1
MG
82#ifdef CONFIG_NUMA_BALANCING
83#define count_vm_numa_event(x) count_vm_event(x)
84#define count_vm_numa_events(x, y) count_vm_events(x, y)
85#else
86#define count_vm_numa_event(x) do {} while (0)
3c0ff468 87#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
88#endif /* CONFIG_NUMA_BALANCING */
89
ec659934
MG
90#ifdef CONFIG_DEBUG_TLBFLUSH
91#define count_vm_tlb_event(x) count_vm_event(x)
92#define count_vm_tlb_events(x, y) count_vm_events(x, y)
93#else
94#define count_vm_tlb_event(x) do {} while (0)
95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
96#endif
97
f8891e5e 98#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
99 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
100 zone_idx(zone), delta)
f6ac2354 101
2244b95a
CL
102/*
103 * Zone based page accounting with per cpu differentials.
104 */
105extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
106
107static inline void zone_page_state_add(long x, struct zone *zone,
108 enum zone_stat_item item)
109{
110 atomic_long_add(x, &zone->vm_stat[item]);
111 atomic_long_add(x, &vm_stat[item]);
112}
113
114static inline unsigned long global_page_state(enum zone_stat_item item)
115{
116 long x = atomic_long_read(&vm_stat[item]);
117#ifdef CONFIG_SMP
118 if (x < 0)
119 x = 0;
120#endif
121 return x;
122}
123
124static inline unsigned long zone_page_state(struct zone *zone,
125 enum zone_stat_item item)
126{
127 long x = atomic_long_read(&zone->vm_stat[item]);
128#ifdef CONFIG_SMP
129 if (x < 0)
130 x = 0;
131#endif
132 return x;
133}
134
aa454840
CL
135/*
136 * More accurate version that also considers the currently pending
137 * deltas. For that we need to loop over all cpus to find the current
138 * deltas. There is no synchronization so the result cannot be
139 * exactly accurate either.
140 */
141static inline unsigned long zone_page_state_snapshot(struct zone *zone,
142 enum zone_stat_item item)
143{
144 long x = atomic_long_read(&zone->vm_stat[item]);
145
146#ifdef CONFIG_SMP
147 int cpu;
148 for_each_online_cpu(cpu)
149 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
150
151 if (x < 0)
152 x = 0;
153#endif
154 return x;
155}
156
2244b95a
CL
157#ifdef CONFIG_NUMA
158/*
159 * Determine the per node value of a stat item. This function
160 * is called frequently in a NUMA machine, so try to be as
161 * frugal as possible.
162 */
163static inline unsigned long node_page_state(int node,
164 enum zone_stat_item item)
165{
166 struct zone *zones = NODE_DATA(node)->node_zones;
167
168 return
4b51d669
CL
169#ifdef CONFIG_ZONE_DMA
170 zone_page_state(&zones[ZONE_DMA], item) +
171#endif
fb0e7942 172#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
173 zone_page_state(&zones[ZONE_DMA32], item) +
174#endif
2244b95a
CL
175#ifdef CONFIG_HIGHMEM
176 zone_page_state(&zones[ZONE_HIGHMEM], item) +
177#endif
2a1e274a
MG
178 zone_page_state(&zones[ZONE_NORMAL], item) +
179 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 180}
ca889e6c 181
78afd561 182extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
ca889e6c 183
2244b95a 184#else
ca889e6c 185
2244b95a 186#define node_page_state(node, item) global_page_state(item)
78afd561 187#define zone_statistics(_zl, _z, gfp) do { } while (0)
ca889e6c
CL
188
189#endif /* CONFIG_NUMA */
2244b95a 190
2244b95a
CL
191#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
192#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
193
2244b95a
CL
194#ifdef CONFIG_SMP
195void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
196void __inc_zone_page_state(struct page *, enum zone_stat_item);
197void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 198
2244b95a
CL
199void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
200void inc_zone_page_state(struct page *, enum zone_stat_item);
201void dec_zone_page_state(struct page *, enum zone_stat_item);
202
203extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
204extern void __inc_zone_state(struct zone *, enum zone_stat_item);
205extern void dec_zone_state(struct zone *, enum zone_stat_item);
206extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a 207
2bb921e5 208void cpu_vm_stats_fold(int cpu);
a6cccdc3 209void refresh_zone_stat_thresholds(void);
b44129b3 210
5a883813
MK
211void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
212
b44129b3
MG
213int calculate_pressure_threshold(struct zone *zone);
214int calculate_normal_threshold(struct zone *zone);
215void set_pgdat_percpu_threshold(pg_data_t *pgdat,
216 int (*calculate_pressure)(struct zone *));
2244b95a
CL
217#else /* CONFIG_SMP */
218
219/*
220 * We do not maintain differentials in a single processor configuration.
221 * The functions directly modify the zone and global counters.
222 */
223static inline void __mod_zone_page_state(struct zone *zone,
224 enum zone_stat_item item, int delta)
225{
226 zone_page_state_add(delta, zone, item);
227}
228
7f4599e9
CL
229static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
230{
231 atomic_long_inc(&zone->vm_stat[item]);
232 atomic_long_inc(&vm_stat[item]);
233}
234
c8785385
CL
235static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
236{
237 atomic_long_dec(&zone->vm_stat[item]);
238 atomic_long_dec(&vm_stat[item]);
239}
240
6a3ed212
JW
241static inline void __inc_zone_page_state(struct page *page,
242 enum zone_stat_item item)
243{
244 __inc_zone_state(page_zone(page), item);
245}
246
2244b95a
CL
247static inline void __dec_zone_page_state(struct page *page,
248 enum zone_stat_item item)
249{
57ce36fe 250 __dec_zone_state(page_zone(page), item);
2244b95a
CL
251}
252
253/*
254 * We only use atomic operations to update counters. So there is no need to
255 * disable interrupts.
256 */
257#define inc_zone_page_state __inc_zone_page_state
258#define dec_zone_page_state __dec_zone_page_state
259#define mod_zone_page_state __mod_zone_page_state
260
6a3ed212
JW
261#define inc_zone_state __inc_zone_state
262#define dec_zone_state __dec_zone_state
263
b44129b3 264#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 265
2244b95a 266static inline void refresh_cpu_vm_stats(int cpu) { }
a6cccdc3 267static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 268static inline void cpu_vm_stats_fold(int cpu) { }
a6cccdc3 269
5a883813
MK
270static inline void drain_zonestat(struct zone *zone,
271 struct per_cpu_pageset *pset) { }
fa25c503
KM
272#endif /* CONFIG_SMP */
273
d1ce749a
BZ
274static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
275 int migratetype)
276{
277 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
278 if (is_migrate_cma(migratetype))
279 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
280}
281
fa25c503 282extern const char * const vmstat_text[];
2244b95a
CL
283
284#endif /* _LINUX_VMSTAT_H */