]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/memcontrol.h
memcg: remove PCG_MOVE_LOCK flag from page_cgroup
[mirror_ubuntu-artful-kernel.git] / include / linux / memcontrol.h
CommitLineData
8cdea7c0
BS
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
f8d66542 22#include <linux/cgroup.h>
456f998e
YH
23#include <linux/vm_event_item.h>
24
78fb7466
PE
25struct mem_cgroup;
26struct page_cgroup;
8697d331
BS
27struct page;
28struct mm_struct;
78fb7466 29
2a7106f2
GT
30/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
5660048c
JW
35struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39};
40
00f0b825 41#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2c26fdd7
KH
42/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
78fb7466 52
7a81b88c 53extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
e1a1cd59 54 gfp_t gfp_mask);
7a81b88c 55/* for swap handling */
8c7c6e34 56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72835c86 57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
7a81b88c 58extern void mem_cgroup_commit_charge_swapin(struct page *page,
72835c86
JW
59 struct mem_cgroup *memcg);
60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
7a81b88c 61
8289546e
HD
62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
925b7673
JW
64
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
67 enum lru_list);
68void mem_cgroup_lru_del_list(struct page *, enum lru_list);
69void mem_cgroup_lru_del(struct page *);
70struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 enum lru_list, enum lru_list);
569b846d
KH
72
73/* For coalescing uncharge for reducing memcg' overhead*/
74extern void mem_cgroup_uncharge_start(void);
75extern void mem_cgroup_uncharge_end(void);
76
3c541e14 77extern void mem_cgroup_uncharge_page(struct page *page);
69029cd5 78extern void mem_cgroup_uncharge_cache_page(struct page *page);
c9b0ed51 79
e845e199
DR
80extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
81 int order);
c0ff4b85 82int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
3062fc67 83
e42d9d5d 84extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad2 85extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
a433658c 86extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
cf475ad2 87
e1aab161 88extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
d1a4c0b3 89extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
e1aab161 90
2e4d4091
LJ
91static inline
92int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
93{
c0ff4b85 94 struct mem_cgroup *memcg;
2e4d4091 95 rcu_read_lock();
c0ff4b85 96 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
2e4d4091 97 rcu_read_unlock();
c0ff4b85 98 return cgroup == memcg;
2e4d4091 99}
8a9f3ccd 100
c0ff4b85 101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b 102
e8589cc1 103extern int
ac39cf8c 104mem_cgroup_prepare_migration(struct page *page,
72835c86 105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
c0ff4b85 106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 107 struct page *oldpage, struct page *newpage, bool migration_ok);
ae41be37 108
5660048c
JW
109struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
110 struct mem_cgroup *,
111 struct mem_cgroup_reclaim_cookie *);
112void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
113
58ae83db
KH
114/*
115 * For memory reclaim.
116 */
9b272977
JW
117int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
118 struct zone *zone);
119int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
120 struct zone *zone);
889976db 121int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
1bac180b 122unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9 123 int nid, int zid, unsigned int lrumask);
3e2f41f1
KM
124struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
125 struct zone *zone);
126struct zone_reclaim_stat*
127mem_cgroup_get_reclaim_stat_from_page(struct page *page);
e222432b
BS
128extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
129 struct task_struct *p);
ab936cbc
KH
130extern void mem_cgroup_replace_page_cache(struct page *oldpage,
131 struct page *newpage);
58ae83db 132
c077719b
KH
133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
134extern int do_swap_account;
135#endif
f8d66542
HT
136
137static inline bool mem_cgroup_disabled(void)
138{
139 if (mem_cgroup_subsys.disabled)
140 return true;
141 return false;
142}
143
2a7106f2
GT
144void mem_cgroup_update_page_stat(struct page *page,
145 enum mem_cgroup_page_stat_item idx,
146 int val);
147
148static inline void mem_cgroup_inc_page_stat(struct page *page,
149 enum mem_cgroup_page_stat_item idx)
150{
151 mem_cgroup_update_page_stat(page, idx, 1);
152}
153
154static inline void mem_cgroup_dec_page_stat(struct page *page,
155 enum mem_cgroup_page_stat_item idx)
156{
157 mem_cgroup_update_page_stat(page, idx, -1);
158}
159
4e416953 160unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c
YH
161 gfp_t gfp_mask,
162 unsigned long *total_scanned);
c0ff4b85 163u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
a63d83f4 164
456f998e 165void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
ca3e0214 166#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9c 167void mem_cgroup_split_huge_fixup(struct page *head);
ca3e0214
KH
168#endif
169
f212ad7c
DN
170#ifdef CONFIG_DEBUG_VM
171bool mem_cgroup_bad_page_check(struct page *page);
172void mem_cgroup_print_bad_page(struct page *page);
173#endif
52d4b9ac 174#else /* CONFIG_CGROUP_MEM_RES_CTLR */
7a81b88c
KH
175struct mem_cgroup;
176
177static inline int mem_cgroup_newpage_charge(struct page *page,
8289546e 178 struct mm_struct *mm, gfp_t gfp_mask)
8a9f3ccd
BS
179{
180 return 0;
181}
182
8289546e
HD
183static inline int mem_cgroup_cache_charge(struct page *page,
184 struct mm_struct *mm, gfp_t gfp_mask)
8a9f3ccd 185{
8289546e 186 return 0;
8a9f3ccd
BS
187}
188
8c7c6e34 189static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72835c86 190 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
7a81b88c
KH
191{
192 return 0;
193}
194
195static inline void mem_cgroup_commit_charge_swapin(struct page *page,
72835c86 196 struct mem_cgroup *memcg)
7a81b88c
KH
197{
198}
199
72835c86 200static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
7a81b88c
KH
201{
202}
203
569b846d
KH
204static inline void mem_cgroup_uncharge_start(void)
205{
206}
207
208static inline void mem_cgroup_uncharge_end(void)
209{
210}
211
8a9f3ccd
BS
212static inline void mem_cgroup_uncharge_page(struct page *page)
213{
214}
215
69029cd5
KH
216static inline void mem_cgroup_uncharge_cache_page(struct page *page)
217{
218}
219
925b7673
JW
220static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
221 struct mem_cgroup *memcg)
08e552c6 222{
925b7673 223 return &zone->lruvec;
08e552c6
KH
224}
225
925b7673
JW
226static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
227 struct page *page,
228 enum lru_list lru)
3f58a829 229{
925b7673 230 return &zone->lruvec;
3f58a829
MK
231}
232
925b7673 233static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
08e552c6 234{
08e552c6
KH
235}
236
925b7673 237static inline void mem_cgroup_lru_del(struct page *page)
08e552c6 238{
08e552c6
KH
239}
240
925b7673
JW
241static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
242 struct page *page,
243 enum lru_list from,
244 enum lru_list to)
66e1707b 245{
925b7673 246 return &zone->lruvec;
66e1707b
BS
247}
248
e42d9d5d
WF
249static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
250{
251 return NULL;
252}
253
a433658c
KM
254static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
255{
256 return NULL;
257}
258
c0ff4b85
R
259static inline int mm_match_cgroup(struct mm_struct *mm,
260 struct mem_cgroup *memcg)
bed7161a 261{
60c12b12 262 return 1;
bed7161a
BS
263}
264
4c4a2214 265static inline int task_in_mem_cgroup(struct task_struct *task,
c0ff4b85 266 const struct mem_cgroup *memcg)
4c4a2214
DR
267{
268 return 1;
269}
270
c0ff4b85
R
271static inline struct cgroup_subsys_state
272 *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b
WF
273{
274 return NULL;
275}
276
e8589cc1 277static inline int
ac39cf8c 278mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
72835c86 279 struct mem_cgroup **memcgp, gfp_t gfp_mask)
ae41be37
KH
280{
281 return 0;
282}
283
c0ff4b85 284static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 285 struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be37
KH
286{
287}
288
5660048c
JW
289static inline struct mem_cgroup *
290mem_cgroup_iter(struct mem_cgroup *root,
291 struct mem_cgroup *prev,
292 struct mem_cgroup_reclaim_cookie *reclaim)
293{
294 return NULL;
295}
296
297static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
298 struct mem_cgroup *prev)
299{
300}
301
f8d66542
HT
302static inline bool mem_cgroup_disabled(void)
303{
304 return true;
305}
a636b327 306
14797e23 307static inline int
9b272977 308mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
14797e23
KM
309{
310 return 1;
311}
312
56e49d21 313static inline int
9b272977 314mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
56e49d21
RR
315{
316 return 1;
317}
318
a3d8e054 319static inline unsigned long
bb2a0de9
KH
320mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
321 unsigned int lru_mask)
a3d8e054
KM
322{
323 return 0;
324}
325
326
3e2f41f1
KM
327static inline struct zone_reclaim_stat*
328mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
329{
330 return NULL;
331}
332
333static inline struct zone_reclaim_stat*
334mem_cgroup_get_reclaim_stat_from_page(struct page *page)
335{
336 return NULL;
337}
338
e222432b
BS
339static inline void
340mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
341{
342}
343
2a7106f2
GT
344static inline void mem_cgroup_inc_page_stat(struct page *page,
345 enum mem_cgroup_page_stat_item idx)
346{
347}
348
349static inline void mem_cgroup_dec_page_stat(struct page *page,
350 enum mem_cgroup_page_stat_item idx)
d69b042f
BS
351{
352}
353
4e416953
BS
354static inline
355unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c
YH
356 gfp_t gfp_mask,
357 unsigned long *total_scanned)
4e416953
BS
358{
359 return 0;
360}
361
a63d83f4 362static inline
c0ff4b85 363u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f4
DR
364{
365 return 0;
366}
367
e94c8a9c 368static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
369{
370}
371
456f998e
YH
372static inline
373void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
374{
375}
ab936cbc
KH
376static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
377 struct page *newpage)
378{
379}
31a79235 380#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
78fb7466 381
f212ad7c
DN
382#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
383static inline bool
384mem_cgroup_bad_page_check(struct page *page)
385{
386 return false;
387}
388
389static inline void
390mem_cgroup_print_bad_page(struct page *page)
391{
392}
393#endif
394
e1aab161
GC
395enum {
396 UNDER_LIMIT,
397 SOFT_LIMIT,
398 OVER_LIMIT,
399};
400
401struct sock;
402#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
403void sock_update_memcg(struct sock *sk);
404void sock_release_memcg(struct sock *sk);
405#else
406static inline void sock_update_memcg(struct sock *sk)
407{
408}
409static inline void sock_release_memcg(struct sock *sk)
410{
411}
412#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
8cdea7c0
BS
413#endif /* _LINUX_MEMCONTROL_H */
414