]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/vmstat.h
UBUNTU: SAUCE: LSM stacking: procfs: add smack subdir to attrs
[mirror_ubuntu-artful-kernel.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
2244b95a 6#include <linux/mmzone.h>
f042e707 7#include <linux/vm_event_item.h>
60063497 8#include <linux/atomic.h>
f6ac2354 9
c748e134
AB
10extern int sysctl_stat_interval;
11
780a0656
AM
12#ifdef CONFIG_VM_EVENT_COUNTERS
13/*
14 * Light weight per cpu counter implementation.
15 *
16 * Counters should only be incremented and no critical kernel component
17 * should rely on the counter values.
18 *
19 * Counters are handled completely inline. On many platforms the code
20 * generated will simply be the increment of a global address.
21 */
22
f8891e5e
CL
23struct vm_event_state {
24 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
25};
26
f8891e5e
CL
27DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
28
293b6a4c
CL
29/*
30 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
31 * local_irq_disable overhead.
32 */
f8891e5e
CL
33static inline void __count_vm_event(enum vm_event_item item)
34{
293b6a4c 35 raw_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
36}
37
38static inline void count_vm_event(enum vm_event_item item)
39{
dd17c8f7 40 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
41}
42
43static inline void __count_vm_events(enum vm_event_item item, long delta)
44{
293b6a4c 45 raw_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
46}
47
48static inline void count_vm_events(enum vm_event_item item, long delta)
49{
dd17c8f7 50 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
51}
52
53extern void all_vm_events(unsigned long *);
f1cb0879 54
f8891e5e
CL
55extern void vm_events_fold_cpu(int cpu);
56
57#else
58
59/* Disable counters */
780a0656
AM
60static inline void count_vm_event(enum vm_event_item item)
61{
62}
63static inline void count_vm_events(enum vm_event_item item, long delta)
64{
65}
66static inline void __count_vm_event(enum vm_event_item item)
67{
68}
69static inline void __count_vm_events(enum vm_event_item item, long delta)
70{
71}
72static inline void all_vm_events(unsigned long *ret)
73{
74}
75static inline void vm_events_fold_cpu(int cpu)
76{
77}
f8891e5e
CL
78
79#endif /* CONFIG_VM_EVENT_COUNTERS */
80
03c5a6e1
MG
81#ifdef CONFIG_NUMA_BALANCING
82#define count_vm_numa_event(x) count_vm_event(x)
83#define count_vm_numa_events(x, y) count_vm_events(x, y)
84#else
85#define count_vm_numa_event(x) do {} while (0)
3c0ff468 86#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
87#endif /* CONFIG_NUMA_BALANCING */
88
ec659934
MG
89#ifdef CONFIG_DEBUG_TLBFLUSH
90#define count_vm_tlb_event(x) count_vm_event(x)
91#define count_vm_tlb_events(x, y) count_vm_events(x, y)
92#else
93#define count_vm_tlb_event(x) do {} while (0)
94#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
95#endif
96
4f115147
DB
97#ifdef CONFIG_DEBUG_VM_VMACACHE
98#define count_vm_vmacache_event(x) count_vm_event(x)
99#else
100#define count_vm_vmacache_event(x) do {} while (0)
101#endif
102
16709d1d
MG
103#define __count_zid_vm_events(item, zid, delta) \
104 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
f6ac2354 105
2244b95a 106/*
75ef7184 107 * Zone and node-based page accounting with per cpu differentials.
2244b95a 108 */
75ef7184
MG
109extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
110extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
2244b95a
CL
111
112static inline void zone_page_state_add(long x, struct zone *zone,
113 enum zone_stat_item item)
114{
115 atomic_long_add(x, &zone->vm_stat[item]);
75ef7184
MG
116 atomic_long_add(x, &vm_zone_stat[item]);
117}
118
119static inline void node_page_state_add(long x, struct pglist_data *pgdat,
120 enum node_stat_item item)
121{
122 atomic_long_add(x, &pgdat->vm_stat[item]);
123 atomic_long_add(x, &vm_node_stat[item]);
2244b95a
CL
124}
125
126static inline unsigned long global_page_state(enum zone_stat_item item)
127{
75ef7184
MG
128 long x = atomic_long_read(&vm_zone_stat[item]);
129#ifdef CONFIG_SMP
130 if (x < 0)
131 x = 0;
132#endif
133 return x;
134}
135
136static inline unsigned long global_node_page_state(enum node_stat_item item)
137{
138 long x = atomic_long_read(&vm_node_stat[item]);
2244b95a
CL
139#ifdef CONFIG_SMP
140 if (x < 0)
141 x = 0;
142#endif
143 return x;
144}
145
146static inline unsigned long zone_page_state(struct zone *zone,
147 enum zone_stat_item item)
148{
149 long x = atomic_long_read(&zone->vm_stat[item]);
150#ifdef CONFIG_SMP
151 if (x < 0)
152 x = 0;
153#endif
154 return x;
155}
156
aa454840
CL
157/*
158 * More accurate version that also considers the currently pending
159 * deltas. For that we need to loop over all cpus to find the current
160 * deltas. There is no synchronization so the result cannot be
161 * exactly accurate either.
162 */
163static inline unsigned long zone_page_state_snapshot(struct zone *zone,
164 enum zone_stat_item item)
165{
166 long x = atomic_long_read(&zone->vm_stat[item]);
167
168#ifdef CONFIG_SMP
169 int cpu;
170 for_each_online_cpu(cpu)
171 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
172
173 if (x < 0)
174 x = 0;
175#endif
176 return x;
177}
178
599d0c95
MG
179static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
180 enum node_stat_item item)
181{
182 long x = atomic_long_read(&pgdat->vm_stat[item]);
183
184#ifdef CONFIG_SMP
185 int cpu;
186 for_each_online_cpu(cpu)
187 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
188
189 if (x < 0)
190 x = 0;
191#endif
192 return x;
193}
194
195
2244b95a 196#ifdef CONFIG_NUMA
75ef7184
MG
197extern unsigned long sum_zone_node_page_state(int node,
198 enum zone_stat_item item);
199extern unsigned long node_page_state(struct pglist_data *pgdat,
200 enum node_stat_item item);
2244b95a 201#else
75ef7184
MG
202#define sum_zone_node_page_state(node, item) global_page_state(item)
203#define node_page_state(node, item) global_node_page_state(item)
ca889e6c 204#endif /* CONFIG_NUMA */
2244b95a 205
2244b95a
CL
206#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
207#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
75ef7184
MG
208#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
209#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
2244b95a 210
2244b95a 211#ifdef CONFIG_SMP
6cdb18ad 212void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
2244b95a
CL
213void __inc_zone_page_state(struct page *, enum zone_stat_item);
214void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 215
75ef7184
MG
216void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
217void __inc_node_page_state(struct page *, enum node_stat_item);
218void __dec_node_page_state(struct page *, enum node_stat_item);
219
6cdb18ad 220void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
2244b95a
CL
221void inc_zone_page_state(struct page *, enum zone_stat_item);
222void dec_zone_page_state(struct page *, enum zone_stat_item);
223
75ef7184
MG
224void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
225void inc_node_page_state(struct page *, enum node_stat_item);
226void dec_node_page_state(struct page *, enum node_stat_item);
227
75ef7184 228extern void inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385 229extern void __inc_zone_state(struct zone *, enum zone_stat_item);
75ef7184 230extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385
CL
231extern void dec_zone_state(struct zone *, enum zone_stat_item);
232extern void __dec_zone_state(struct zone *, enum zone_stat_item);
75ef7184 233extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
2244b95a 234
0eb77e98 235void quiet_vmstat(void);
2bb921e5 236void cpu_vm_stats_fold(int cpu);
a6cccdc3 237void refresh_zone_stat_thresholds(void);
b44129b3 238
52b6f46b
HD
239struct ctl_table;
240int vmstat_refresh(struct ctl_table *, int write,
241 void __user *buffer, size_t *lenp, loff_t *ppos);
242
5a883813
MK
243void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
244
b44129b3
MG
245int calculate_pressure_threshold(struct zone *zone);
246int calculate_normal_threshold(struct zone *zone);
247void set_pgdat_percpu_threshold(pg_data_t *pgdat,
248 int (*calculate_pressure)(struct zone *));
2244b95a
CL
249#else /* CONFIG_SMP */
250
251/*
252 * We do not maintain differentials in a single processor configuration.
253 * The functions directly modify the zone and global counters.
254 */
255static inline void __mod_zone_page_state(struct zone *zone,
6cdb18ad 256 enum zone_stat_item item, long delta)
2244b95a
CL
257{
258 zone_page_state_add(delta, zone, item);
259}
260
75ef7184
MG
261static inline void __mod_node_page_state(struct pglist_data *pgdat,
262 enum node_stat_item item, int delta)
263{
264 node_page_state_add(delta, pgdat, item);
265}
266
7f4599e9
CL
267static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
268{
269 atomic_long_inc(&zone->vm_stat[item]);
75ef7184
MG
270 atomic_long_inc(&vm_zone_stat[item]);
271}
272
273static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
274{
275 atomic_long_inc(&pgdat->vm_stat[item]);
276 atomic_long_inc(&vm_node_stat[item]);
7f4599e9
CL
277}
278
c8785385
CL
279static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
280{
281 atomic_long_dec(&zone->vm_stat[item]);
75ef7184
MG
282 atomic_long_dec(&vm_zone_stat[item]);
283}
284
285static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
286{
287 atomic_long_dec(&pgdat->vm_stat[item]);
288 atomic_long_dec(&vm_node_stat[item]);
c8785385
CL
289}
290
6a3ed212
JW
291static inline void __inc_zone_page_state(struct page *page,
292 enum zone_stat_item item)
293{
294 __inc_zone_state(page_zone(page), item);
295}
296
75ef7184
MG
297static inline void __inc_node_page_state(struct page *page,
298 enum node_stat_item item)
299{
300 __inc_node_state(page_pgdat(page), item);
301}
302
303
2244b95a
CL
304static inline void __dec_zone_page_state(struct page *page,
305 enum zone_stat_item item)
306{
57ce36fe 307 __dec_zone_state(page_zone(page), item);
2244b95a
CL
308}
309
75ef7184
MG
310static inline void __dec_node_page_state(struct page *page,
311 enum node_stat_item item)
312{
313 __dec_node_state(page_pgdat(page), item);
314}
315
316
2244b95a
CL
317/*
318 * We only use atomic operations to update counters. So there is no need to
319 * disable interrupts.
320 */
321#define inc_zone_page_state __inc_zone_page_state
322#define dec_zone_page_state __dec_zone_page_state
323#define mod_zone_page_state __mod_zone_page_state
324
75ef7184
MG
325#define inc_node_page_state __inc_node_page_state
326#define dec_node_page_state __dec_node_page_state
327#define mod_node_page_state __mod_node_page_state
328
6a3ed212 329#define inc_zone_state __inc_zone_state
75ef7184 330#define inc_node_state __inc_node_state
6a3ed212
JW
331#define dec_zone_state __dec_zone_state
332
b44129b3 333#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 334
a6cccdc3 335static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 336static inline void cpu_vm_stats_fold(int cpu) { }
0eb77e98 337static inline void quiet_vmstat(void) { }
a6cccdc3 338
5a883813
MK
339static inline void drain_zonestat(struct zone *zone,
340 struct per_cpu_pageset *pset) { }
fa25c503
KM
341#endif /* CONFIG_SMP */
342
d1ce749a
BZ
343static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
344 int migratetype)
345{
346 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
347 if (is_migrate_cma(migratetype))
348 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
349}
350
fa25c503 351extern const char * const vmstat_text[];
2244b95a
CL
352
353#endif /* _LINUX_VMSTAT_H */