]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/vmstat.h
[PATCH] zoned vm counters: convert nr_mapped to per zone counter
[mirror_ubuntu-zesty-kernel.git] / include / linux / vmstat.h
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/config.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9
10 /*
11 * Global page accounting. One instance per CPU. Only unsigned longs are
12 * allowed.
13 *
14 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
15 * any time safely (which protects the instance from modification by
16 * interrupt.
17 * - The __xxx_page_state variants can be used safely when interrupts are
18 * disabled.
19 * - The __xxx_page_state variants can be used if the field is only
20 * modified from process context and protected from preemption, or only
21 * modified from interrupt context. In this case, the field should be
22 * commented here.
23 */
24 struct page_state {
25 unsigned long nr_dirty; /* Dirty writeable pages */
26 unsigned long nr_writeback; /* Pages under writeback */
27 unsigned long nr_unstable; /* NFS unstable pages */
28 unsigned long nr_page_table_pages;/* Pages used for pagetables */
29 unsigned long nr_slab; /* In slab */
30 #define GET_PAGE_STATE_LAST nr_slab
31
32 /*
33 * The below are zeroed by get_page_state(). Use get_full_page_state()
34 * to add up all these.
35 */
36 unsigned long pgpgin; /* Disk reads */
37 unsigned long pgpgout; /* Disk writes */
38 unsigned long pswpin; /* swap reads */
39 unsigned long pswpout; /* swap writes */
40
41 unsigned long pgalloc_high; /* page allocations */
42 unsigned long pgalloc_normal;
43 unsigned long pgalloc_dma32;
44 unsigned long pgalloc_dma;
45
46 unsigned long pgfree; /* page freeings */
47 unsigned long pgactivate; /* pages moved inactive->active */
48 unsigned long pgdeactivate; /* pages moved active->inactive */
49
50 unsigned long pgfault; /* faults (major+minor) */
51 unsigned long pgmajfault; /* faults (major only) */
52
53 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
54 unsigned long pgrefill_normal;
55 unsigned long pgrefill_dma32;
56 unsigned long pgrefill_dma;
57
58 unsigned long pgsteal_high; /* total highmem pages reclaimed */
59 unsigned long pgsteal_normal;
60 unsigned long pgsteal_dma32;
61 unsigned long pgsteal_dma;
62
63 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
64 unsigned long pgscan_kswapd_normal;
65 unsigned long pgscan_kswapd_dma32;
66 unsigned long pgscan_kswapd_dma;
67
68 unsigned long pgscan_direct_high;/* total highmem pages scanned */
69 unsigned long pgscan_direct_normal;
70 unsigned long pgscan_direct_dma32;
71 unsigned long pgscan_direct_dma;
72
73 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
74 unsigned long slabs_scanned; /* slab objects scanned */
75 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
76 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
77 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
78 unsigned long allocstall; /* direct reclaim calls */
79
80 unsigned long pgrotated; /* pages rotated to tail of the LRU */
81 unsigned long nr_bounce; /* pages for bounce buffers */
82 };
83
84 extern void get_page_state(struct page_state *ret);
85 extern void get_page_state_node(struct page_state *ret, int node);
86 extern void get_full_page_state(struct page_state *ret);
87 extern unsigned long read_page_state_offset(unsigned long offset);
88 extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
89 extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
90
91 #define read_page_state(member) \
92 read_page_state_offset(offsetof(struct page_state, member))
93
94 #define mod_page_state(member, delta) \
95 mod_page_state_offset(offsetof(struct page_state, member), (delta))
96
97 #define __mod_page_state(member, delta) \
98 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
99
100 #define inc_page_state(member) mod_page_state(member, 1UL)
101 #define dec_page_state(member) mod_page_state(member, 0UL - 1)
102 #define add_page_state(member,delta) mod_page_state(member, (delta))
103 #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
104
105 #define __inc_page_state(member) __mod_page_state(member, 1UL)
106 #define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
107 #define __add_page_state(member,delta) __mod_page_state(member, (delta))
108 #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
109
110 #define page_state(member) (*__page_state(offsetof(struct page_state, member)))
111
112 #define state_zone_offset(zone, member) \
113 ({ \
114 unsigned offset; \
115 if (is_highmem(zone)) \
116 offset = offsetof(struct page_state, member##_high); \
117 else if (is_normal(zone)) \
118 offset = offsetof(struct page_state, member##_normal); \
119 else if (is_dma32(zone)) \
120 offset = offsetof(struct page_state, member##_dma32); \
121 else \
122 offset = offsetof(struct page_state, member##_dma); \
123 offset; \
124 })
125
126 #define __mod_page_state_zone(zone, member, delta) \
127 do { \
128 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
129 } while (0)
130
131 #define mod_page_state_zone(zone, member, delta) \
132 do { \
133 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
134 } while (0)
135
136 DECLARE_PER_CPU(struct page_state, page_states);
137
138 /*
139 * Zone based page accounting with per cpu differentials.
140 */
141 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
142
143 static inline void zone_page_state_add(long x, struct zone *zone,
144 enum zone_stat_item item)
145 {
146 atomic_long_add(x, &zone->vm_stat[item]);
147 atomic_long_add(x, &vm_stat[item]);
148 }
149
150 static inline unsigned long global_page_state(enum zone_stat_item item)
151 {
152 long x = atomic_long_read(&vm_stat[item]);
153 #ifdef CONFIG_SMP
154 if (x < 0)
155 x = 0;
156 #endif
157 return x;
158 }
159
160 static inline unsigned long zone_page_state(struct zone *zone,
161 enum zone_stat_item item)
162 {
163 long x = atomic_long_read(&zone->vm_stat[item]);
164 #ifdef CONFIG_SMP
165 if (x < 0)
166 x = 0;
167 #endif
168 return x;
169 }
170
171 #ifdef CONFIG_NUMA
172 /*
173 * Determine the per node value of a stat item. This function
174 * is called frequently in a NUMA machine, so try to be as
175 * frugal as possible.
176 */
177 static inline unsigned long node_page_state(int node,
178 enum zone_stat_item item)
179 {
180 struct zone *zones = NODE_DATA(node)->node_zones;
181
182 return
183 #ifndef CONFIG_DMA_IS_NORMAL
184 #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
185 zone_page_state(&zones[ZONE_DMA32], item) +
186 #endif
187 zone_page_state(&zones[ZONE_NORMAL], item) +
188 #endif
189 #ifdef CONFIG_HIGHMEM
190 zone_page_state(&zones[ZONE_HIGHMEM], item) +
191 #endif
192 zone_page_state(&zones[ZONE_DMA], item);
193 }
194 #else
195 #define node_page_state(node, item) global_page_state(item)
196 #endif
197
198 #define __add_zone_page_state(__z, __i, __d) \
199 __mod_zone_page_state(__z, __i, __d)
200 #define __sub_zone_page_state(__z, __i, __d) \
201 __mod_zone_page_state(__z, __i,-(__d))
202
203 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
204 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
205
206 static inline void zap_zone_vm_stats(struct zone *zone)
207 {
208 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
209 }
210
211 #ifdef CONFIG_SMP
212 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
213 void __inc_zone_page_state(struct page *, enum zone_stat_item);
214 void __dec_zone_page_state(struct page *, enum zone_stat_item);
215
216 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
217 void inc_zone_page_state(struct page *, enum zone_stat_item);
218 void dec_zone_page_state(struct page *, enum zone_stat_item);
219
220 extern void inc_zone_state(struct zone *, enum zone_stat_item);
221
222 void refresh_cpu_vm_stats(int);
223 void refresh_vm_stats(void);
224
225 #else /* CONFIG_SMP */
226
227 /*
228 * We do not maintain differentials in a single processor configuration.
229 * The functions directly modify the zone and global counters.
230 */
231 static inline void __mod_zone_page_state(struct zone *zone,
232 enum zone_stat_item item, int delta)
233 {
234 zone_page_state_add(delta, zone, item);
235 }
236
237 static inline void __inc_zone_page_state(struct page *page,
238 enum zone_stat_item item)
239 {
240 atomic_long_inc(&page_zone(page)->vm_stat[item]);
241 atomic_long_inc(&vm_stat[item]);
242 }
243
244 static inline void __dec_zone_page_state(struct page *page,
245 enum zone_stat_item item)
246 {
247 atomic_long_dec(&page_zone(page)->vm_stat[item]);
248 atomic_long_dec(&vm_stat[item]);
249 }
250
251 /*
252 * We only use atomic operations to update counters. So there is no need to
253 * disable interrupts.
254 */
255 #define inc_zone_page_state __inc_zone_page_state
256 #define dec_zone_page_state __dec_zone_page_state
257 #define mod_zone_page_state __mod_zone_page_state
258
259 static inline void refresh_cpu_vm_stats(int cpu) { }
260 static inline void refresh_vm_stats(void) { }
261 #endif
262
263 #endif /* _LINUX_VMSTAT_H */