]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/linux/memcontrol.h
mm: change isolate mode from #define to bitwise type
[mirror_ubuntu-hirsute-kernel.git] / include / linux / memcontrol.h
1 /* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24
25 struct mem_cgroup;
26 struct page_cgroup;
27 struct page;
28 struct mm_struct;
29
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33 };
34
35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
36 struct list_head *dst,
37 unsigned long *scanned, int order,
38 isolate_mode_t mode,
39 struct zone *z,
40 struct mem_cgroup *mem_cont,
41 int active, int file);
42
43 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
44 /*
45 * All "charge" functions with gfp_mask should use GFP_KERNEL or
46 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
47 * alloc memory but reclaims memory from all available zones. So, "where I want
48 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
49 * available but adding a rule is better. charge functions' gfp_mask should
50 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
51 * codes.
52 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
53 */
54
55 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
56 gfp_t gfp_mask);
57 /* for swap handling */
58 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
59 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
60 extern void mem_cgroup_commit_charge_swapin(struct page *page,
61 struct mem_cgroup *ptr);
62 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
63
64 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
65 gfp_t gfp_mask);
66 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
67 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
68 extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
69 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
70 extern void mem_cgroup_del_lru(struct page *page);
71 extern void mem_cgroup_move_lists(struct page *page,
72 enum lru_list from, enum lru_list to);
73
74 /* For coalescing uncharge for reducing memcg' overhead*/
75 extern void mem_cgroup_uncharge_start(void);
76 extern void mem_cgroup_uncharge_end(void);
77
78 extern void mem_cgroup_uncharge_page(struct page *page);
79 extern void mem_cgroup_uncharge_cache_page(struct page *page);
80
81 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
82 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
83
84 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
85 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
86 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
87
88 static inline
89 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
90 {
91 struct mem_cgroup *mem;
92 rcu_read_lock();
93 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
94 rcu_read_unlock();
95 return cgroup == mem;
96 }
97
98 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
99
100 extern int
101 mem_cgroup_prepare_migration(struct page *page,
102 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
103 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
104 struct page *oldpage, struct page *newpage, bool migration_ok);
105
106 /*
107 * For memory reclaim.
108 */
109 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
110 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
111 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
112 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
113 int nid, int zid, unsigned int lrumask);
114 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
115 struct zone *zone);
116 struct zone_reclaim_stat*
117 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
118 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
119 struct task_struct *p);
120
121 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
122 extern int do_swap_account;
123 #endif
124
125 static inline bool mem_cgroup_disabled(void)
126 {
127 if (mem_cgroup_subsys.disabled)
128 return true;
129 return false;
130 }
131
132 void mem_cgroup_update_page_stat(struct page *page,
133 enum mem_cgroup_page_stat_item idx,
134 int val);
135
136 static inline void mem_cgroup_inc_page_stat(struct page *page,
137 enum mem_cgroup_page_stat_item idx)
138 {
139 mem_cgroup_update_page_stat(page, idx, 1);
140 }
141
142 static inline void mem_cgroup_dec_page_stat(struct page *page,
143 enum mem_cgroup_page_stat_item idx)
144 {
145 mem_cgroup_update_page_stat(page, idx, -1);
146 }
147
148 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
149 gfp_t gfp_mask,
150 unsigned long *total_scanned);
151 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
152
153 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
154 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
155 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
156 #endif
157
158 #ifdef CONFIG_DEBUG_VM
159 bool mem_cgroup_bad_page_check(struct page *page);
160 void mem_cgroup_print_bad_page(struct page *page);
161 #endif
162 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
163 struct mem_cgroup;
164
165 static inline int mem_cgroup_newpage_charge(struct page *page,
166 struct mm_struct *mm, gfp_t gfp_mask)
167 {
168 return 0;
169 }
170
171 static inline int mem_cgroup_cache_charge(struct page *page,
172 struct mm_struct *mm, gfp_t gfp_mask)
173 {
174 return 0;
175 }
176
177 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
178 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
179 {
180 return 0;
181 }
182
183 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
184 struct mem_cgroup *ptr)
185 {
186 }
187
188 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
189 {
190 }
191
192 static inline void mem_cgroup_uncharge_start(void)
193 {
194 }
195
196 static inline void mem_cgroup_uncharge_end(void)
197 {
198 }
199
200 static inline void mem_cgroup_uncharge_page(struct page *page)
201 {
202 }
203
204 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
205 {
206 }
207
208 static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
209 {
210 }
211
212 static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
213 {
214 return ;
215 }
216
217 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
218 {
219 return ;
220 }
221
222 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
223 {
224 return ;
225 }
226
227 static inline void mem_cgroup_del_lru(struct page *page)
228 {
229 return ;
230 }
231
232 static inline void
233 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
234 {
235 }
236
237 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
238 {
239 return NULL;
240 }
241
242 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
243 {
244 return NULL;
245 }
246
247 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
248 {
249 return 1;
250 }
251
252 static inline int task_in_mem_cgroup(struct task_struct *task,
253 const struct mem_cgroup *mem)
254 {
255 return 1;
256 }
257
258 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
259 {
260 return NULL;
261 }
262
263 static inline int
264 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
265 struct mem_cgroup **ptr, gfp_t gfp_mask)
266 {
267 return 0;
268 }
269
270 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
271 struct page *oldpage, struct page *newpage, bool migration_ok)
272 {
273 }
274
275 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
276 {
277 return 0;
278 }
279
280 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
281 int priority)
282 {
283 }
284
285 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
286 int priority)
287 {
288 }
289
290 static inline bool mem_cgroup_disabled(void)
291 {
292 return true;
293 }
294
295 static inline int
296 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
297 {
298 return 1;
299 }
300
301 static inline int
302 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
303 {
304 return 1;
305 }
306
307 static inline unsigned long
308 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
309 unsigned int lru_mask)
310 {
311 return 0;
312 }
313
314
315 static inline struct zone_reclaim_stat*
316 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
317 {
318 return NULL;
319 }
320
321 static inline struct zone_reclaim_stat*
322 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
323 {
324 return NULL;
325 }
326
327 static inline void
328 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
329 {
330 }
331
332 static inline void mem_cgroup_inc_page_stat(struct page *page,
333 enum mem_cgroup_page_stat_item idx)
334 {
335 }
336
337 static inline void mem_cgroup_dec_page_stat(struct page *page,
338 enum mem_cgroup_page_stat_item idx)
339 {
340 }
341
342 static inline
343 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
344 gfp_t gfp_mask,
345 unsigned long *total_scanned)
346 {
347 return 0;
348 }
349
350 static inline
351 u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
352 {
353 return 0;
354 }
355
356 static inline void mem_cgroup_split_huge_fixup(struct page *head,
357 struct page *tail)
358 {
359 }
360
361 static inline
362 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
363 {
364 }
365 #endif /* CONFIG_CGROUP_MEM_CONT */
366
367 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
368 static inline bool
369 mem_cgroup_bad_page_check(struct page *page)
370 {
371 return false;
372 }
373
374 static inline void
375 mem_cgroup_print_bad_page(struct page *page)
376 {
377 }
378 #endif
379
380 #endif /* _LINUX_MEMCONTROL_H */
381