]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/memcontrol.h
memcg: remove PCG_FILE_MAPPED
[mirror_ubuntu-zesty-kernel.git] / include / linux / memcontrol.h
CommitLineData
8cdea7c0
BS
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
f8d66542 22#include <linux/cgroup.h>
456f998e
YH
23#include <linux/vm_event_item.h>
24
78fb7466
PE
25struct mem_cgroup;
26struct page_cgroup;
8697d331
BS
27struct page;
28struct mm_struct;
78fb7466 29
2a7106f2
GT
30/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
5660048c
JW
35struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39};
40
00f0b825 41#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2c26fdd7
KH
42/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
78fb7466 52
7a81b88c 53extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
e1a1cd59 54 gfp_t gfp_mask);
7a81b88c 55/* for swap handling */
8c7c6e34 56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72835c86 57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
7a81b88c 58extern void mem_cgroup_commit_charge_swapin(struct page *page,
72835c86
JW
59 struct mem_cgroup *memcg);
60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
7a81b88c 61
8289546e
HD
62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
925b7673
JW
64
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
67 enum lru_list);
68void mem_cgroup_lru_del_list(struct page *, enum lru_list);
69void mem_cgroup_lru_del(struct page *);
70struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 enum lru_list, enum lru_list);
569b846d
KH
72
73/* For coalescing uncharge for reducing memcg' overhead*/
74extern void mem_cgroup_uncharge_start(void);
75extern void mem_cgroup_uncharge_end(void);
76
3c541e14 77extern void mem_cgroup_uncharge_page(struct page *page);
69029cd5 78extern void mem_cgroup_uncharge_cache_page(struct page *page);
c9b0ed51 79
e845e199
DR
80extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
81 int order);
c0ff4b85 82int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
3062fc67 83
e42d9d5d 84extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad2 85extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
a433658c 86extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
cf475ad2 87
e1aab161 88extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
d1a4c0b3 89extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
e1aab161 90
2e4d4091
LJ
91static inline
92int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
93{
c0ff4b85 94 struct mem_cgroup *memcg;
2e4d4091 95 rcu_read_lock();
c0ff4b85 96 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
2e4d4091 97 rcu_read_unlock();
c0ff4b85 98 return cgroup == memcg;
2e4d4091 99}
8a9f3ccd 100
c0ff4b85 101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b 102
e8589cc1 103extern int
ac39cf8c 104mem_cgroup_prepare_migration(struct page *page,
72835c86 105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
c0ff4b85 106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 107 struct page *oldpage, struct page *newpage, bool migration_ok);
ae41be37 108
5660048c
JW
109struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
110 struct mem_cgroup *,
111 struct mem_cgroup_reclaim_cookie *);
112void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
113
58ae83db
KH
114/*
115 * For memory reclaim.
116 */
9b272977
JW
117int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
118 struct zone *zone);
119int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
120 struct zone *zone);
889976db 121int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
1bac180b 122unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9 123 int nid, int zid, unsigned int lrumask);
3e2f41f1
KM
124struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
125 struct zone *zone);
126struct zone_reclaim_stat*
127mem_cgroup_get_reclaim_stat_from_page(struct page *page);
e222432b
BS
128extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
129 struct task_struct *p);
ab936cbc
KH
130extern void mem_cgroup_replace_page_cache(struct page *oldpage,
131 struct page *newpage);
58ae83db 132
c077719b
KH
133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
134extern int do_swap_account;
135#endif
f8d66542
HT
136
137static inline bool mem_cgroup_disabled(void)
138{
139 if (mem_cgroup_subsys.disabled)
140 return true;
141 return false;
142}
143
89c06bd5
KH
144void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
145 unsigned long *flags);
146
147static inline void mem_cgroup_begin_update_page_stat(struct page *page,
148 bool *locked, unsigned long *flags)
149{
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 return __mem_cgroup_begin_update_page_stat(page, locked, flags);
155}
156
157void __mem_cgroup_end_update_page_stat(struct page *page,
158 unsigned long *flags);
159static inline void mem_cgroup_end_update_page_stat(struct page *page,
160 bool *locked, unsigned long *flags)
161{
162 if (mem_cgroup_disabled())
163 return;
164 if (*locked)
165 __mem_cgroup_end_update_page_stat(page, flags);
166 rcu_read_unlock();
167}
168
2a7106f2
GT
169void mem_cgroup_update_page_stat(struct page *page,
170 enum mem_cgroup_page_stat_item idx,
171 int val);
172
173static inline void mem_cgroup_inc_page_stat(struct page *page,
174 enum mem_cgroup_page_stat_item idx)
175{
176 mem_cgroup_update_page_stat(page, idx, 1);
177}
178
179static inline void mem_cgroup_dec_page_stat(struct page *page,
180 enum mem_cgroup_page_stat_item idx)
181{
182 mem_cgroup_update_page_stat(page, idx, -1);
183}
184
4e416953 185unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c
YH
186 gfp_t gfp_mask,
187 unsigned long *total_scanned);
c0ff4b85 188u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
a63d83f4 189
456f998e 190void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
ca3e0214 191#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9c 192void mem_cgroup_split_huge_fixup(struct page *head);
ca3e0214
KH
193#endif
194
f212ad7c
DN
195#ifdef CONFIG_DEBUG_VM
196bool mem_cgroup_bad_page_check(struct page *page);
197void mem_cgroup_print_bad_page(struct page *page);
198#endif
52d4b9ac 199#else /* CONFIG_CGROUP_MEM_RES_CTLR */
7a81b88c
KH
200struct mem_cgroup;
201
202static inline int mem_cgroup_newpage_charge(struct page *page,
8289546e 203 struct mm_struct *mm, gfp_t gfp_mask)
8a9f3ccd
BS
204{
205 return 0;
206}
207
8289546e
HD
208static inline int mem_cgroup_cache_charge(struct page *page,
209 struct mm_struct *mm, gfp_t gfp_mask)
8a9f3ccd 210{
8289546e 211 return 0;
8a9f3ccd
BS
212}
213
8c7c6e34 214static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72835c86 215 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
7a81b88c
KH
216{
217 return 0;
218}
219
220static inline void mem_cgroup_commit_charge_swapin(struct page *page,
72835c86 221 struct mem_cgroup *memcg)
7a81b88c
KH
222{
223}
224
72835c86 225static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
7a81b88c
KH
226{
227}
228
569b846d
KH
229static inline void mem_cgroup_uncharge_start(void)
230{
231}
232
233static inline void mem_cgroup_uncharge_end(void)
234{
235}
236
8a9f3ccd
BS
237static inline void mem_cgroup_uncharge_page(struct page *page)
238{
239}
240
69029cd5
KH
241static inline void mem_cgroup_uncharge_cache_page(struct page *page)
242{
243}
244
925b7673
JW
245static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
246 struct mem_cgroup *memcg)
08e552c6 247{
925b7673 248 return &zone->lruvec;
08e552c6
KH
249}
250
925b7673
JW
251static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
252 struct page *page,
253 enum lru_list lru)
3f58a829 254{
925b7673 255 return &zone->lruvec;
3f58a829
MK
256}
257
925b7673 258static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
08e552c6 259{
08e552c6
KH
260}
261
925b7673 262static inline void mem_cgroup_lru_del(struct page *page)
08e552c6 263{
08e552c6
KH
264}
265
925b7673
JW
266static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
267 struct page *page,
268 enum lru_list from,
269 enum lru_list to)
66e1707b 270{
925b7673 271 return &zone->lruvec;
66e1707b
BS
272}
273
e42d9d5d
WF
274static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
275{
276 return NULL;
277}
278
a433658c
KM
279static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
280{
281 return NULL;
282}
283
c0ff4b85
R
284static inline int mm_match_cgroup(struct mm_struct *mm,
285 struct mem_cgroup *memcg)
bed7161a 286{
60c12b12 287 return 1;
bed7161a
BS
288}
289
4c4a2214 290static inline int task_in_mem_cgroup(struct task_struct *task,
c0ff4b85 291 const struct mem_cgroup *memcg)
4c4a2214
DR
292{
293 return 1;
294}
295
c0ff4b85
R
296static inline struct cgroup_subsys_state
297 *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b
WF
298{
299 return NULL;
300}
301
e8589cc1 302static inline int
ac39cf8c 303mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
72835c86 304 struct mem_cgroup **memcgp, gfp_t gfp_mask)
ae41be37
KH
305{
306 return 0;
307}
308
c0ff4b85 309static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 310 struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be37
KH
311{
312}
313
5660048c
JW
314static inline struct mem_cgroup *
315mem_cgroup_iter(struct mem_cgroup *root,
316 struct mem_cgroup *prev,
317 struct mem_cgroup_reclaim_cookie *reclaim)
318{
319 return NULL;
320}
321
322static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
323 struct mem_cgroup *prev)
324{
325}
326
f8d66542
HT
327static inline bool mem_cgroup_disabled(void)
328{
329 return true;
330}
a636b327 331
14797e23 332static inline int
9b272977 333mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
14797e23
KM
334{
335 return 1;
336}
337
56e49d21 338static inline int
9b272977 339mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
56e49d21
RR
340{
341 return 1;
342}
343
a3d8e054 344static inline unsigned long
bb2a0de9
KH
345mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
346 unsigned int lru_mask)
a3d8e054
KM
347{
348 return 0;
349}
350
351
3e2f41f1
KM
352static inline struct zone_reclaim_stat*
353mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
354{
355 return NULL;
356}
357
358static inline struct zone_reclaim_stat*
359mem_cgroup_get_reclaim_stat_from_page(struct page *page)
360{
361 return NULL;
362}
363
e222432b
BS
364static inline void
365mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
366{
367}
368
89c06bd5
KH
369static inline void mem_cgroup_begin_update_page_stat(struct page *page,
370 bool *locked, unsigned long *flags)
371{
372}
373
374static inline void mem_cgroup_end_update_page_stat(struct page *page,
375 bool *locked, unsigned long *flags)
376{
377}
378
2a7106f2
GT
379static inline void mem_cgroup_inc_page_stat(struct page *page,
380 enum mem_cgroup_page_stat_item idx)
381{
382}
383
384static inline void mem_cgroup_dec_page_stat(struct page *page,
385 enum mem_cgroup_page_stat_item idx)
d69b042f
BS
386{
387}
388
4e416953
BS
389static inline
390unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c
YH
391 gfp_t gfp_mask,
392 unsigned long *total_scanned)
4e416953
BS
393{
394 return 0;
395}
396
a63d83f4 397static inline
c0ff4b85 398u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f4
DR
399{
400 return 0;
401}
402
e94c8a9c 403static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
404{
405}
406
456f998e
YH
407static inline
408void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
409{
410}
ab936cbc
KH
411static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
412 struct page *newpage)
413{
414}
31a79235 415#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
78fb7466 416
f212ad7c
DN
417#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
418static inline bool
419mem_cgroup_bad_page_check(struct page *page)
420{
421 return false;
422}
423
424static inline void
425mem_cgroup_print_bad_page(struct page *page)
426{
427}
428#endif
429
e1aab161
GC
430enum {
431 UNDER_LIMIT,
432 SOFT_LIMIT,
433 OVER_LIMIT,
434};
435
436struct sock;
437#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
438void sock_update_memcg(struct sock *sk);
439void sock_release_memcg(struct sock *sk);
440#else
441static inline void sock_update_memcg(struct sock *sk)
442{
443}
444static inline void sock_release_memcg(struct sock *sk)
445{
446}
447#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
8cdea7c0
BS
448#endif /* _LINUX_MEMCONTROL_H */
449