1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item
{
32 MEMCG_NR_FILE_MAPPED
, /* # of pages charged as file rss */
35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan
,
36 struct list_head
*dst
,
37 unsigned long *scanned
, int order
,
40 struct mem_cgroup
*mem_cont
,
41 int active
, int file
);
43 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
45 * All "charge" functions with gfp_mask should use GFP_KERNEL or
46 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
47 * alloc memory but reclaims memory from all available zones. So, "where I want
48 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
49 * available but adding a rule is better. charge functions' gfp_mask should
50 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
52 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
55 extern int mem_cgroup_newpage_charge(struct page
*page
, struct mm_struct
*mm
,
57 /* for swap handling */
58 extern int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
59 struct page
*page
, gfp_t mask
, struct mem_cgroup
**ptr
);
60 extern void mem_cgroup_commit_charge_swapin(struct page
*page
,
61 struct mem_cgroup
*ptr
);
62 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*ptr
);
64 extern int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
66 extern void mem_cgroup_add_lru_list(struct page
*page
, enum lru_list lru
);
67 extern void mem_cgroup_del_lru_list(struct page
*page
, enum lru_list lru
);
68 extern void mem_cgroup_rotate_reclaimable_page(struct page
*page
);
69 extern void mem_cgroup_rotate_lru_list(struct page
*page
, enum lru_list lru
);
70 extern void mem_cgroup_del_lru(struct page
*page
);
71 extern void mem_cgroup_move_lists(struct page
*page
,
72 enum lru_list from
, enum lru_list to
);
74 /* For coalescing uncharge for reducing memcg' overhead*/
75 extern void mem_cgroup_uncharge_start(void);
76 extern void mem_cgroup_uncharge_end(void);
78 extern void mem_cgroup_uncharge_page(struct page
*page
);
79 extern void mem_cgroup_uncharge_cache_page(struct page
*page
);
81 extern void mem_cgroup_out_of_memory(struct mem_cgroup
*mem
, gfp_t gfp_mask
);
82 int task_in_mem_cgroup(struct task_struct
*task
, const struct mem_cgroup
*mem
);
84 extern struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
);
85 extern struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
86 extern struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
);
89 int mm_match_cgroup(const struct mm_struct
*mm
, const struct mem_cgroup
*cgroup
)
91 struct mem_cgroup
*mem
;
93 mem
= mem_cgroup_from_task(rcu_dereference((mm
)->owner
));
98 extern struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*mem
);
101 mem_cgroup_prepare_migration(struct page
*page
,
102 struct page
*newpage
, struct mem_cgroup
**ptr
, gfp_t gfp_mask
);
103 extern void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
104 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
);
107 * For memory reclaim.
109 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
);
110 int mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
);
111 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
112 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
,
113 int nid
, int zid
, unsigned int lrumask
);
114 struct zone_reclaim_stat
*mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
,
116 struct zone_reclaim_stat
*
117 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
);
118 extern void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
,
119 struct task_struct
*p
);
121 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
122 extern int do_swap_account
;
125 static inline bool mem_cgroup_disabled(void)
127 if (mem_cgroup_subsys
.disabled
)
132 void mem_cgroup_update_page_stat(struct page
*page
,
133 enum mem_cgroup_page_stat_item idx
,
136 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
137 enum mem_cgroup_page_stat_item idx
)
139 mem_cgroup_update_page_stat(page
, idx
, 1);
142 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
143 enum mem_cgroup_page_stat_item idx
)
145 mem_cgroup_update_page_stat(page
, idx
, -1);
148 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
150 unsigned long *total_scanned
);
151 u64
mem_cgroup_get_limit(struct mem_cgroup
*mem
);
153 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
);
154 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
155 void mem_cgroup_split_huge_fixup(struct page
*head
, struct page
*tail
);
158 #ifdef CONFIG_DEBUG_VM
159 bool mem_cgroup_bad_page_check(struct page
*page
);
160 void mem_cgroup_print_bad_page(struct page
*page
);
162 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
165 static inline int mem_cgroup_newpage_charge(struct page
*page
,
166 struct mm_struct
*mm
, gfp_t gfp_mask
)
171 static inline int mem_cgroup_cache_charge(struct page
*page
,
172 struct mm_struct
*mm
, gfp_t gfp_mask
)
177 static inline int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
178 struct page
*page
, gfp_t gfp_mask
, struct mem_cgroup
**ptr
)
183 static inline void mem_cgroup_commit_charge_swapin(struct page
*page
,
184 struct mem_cgroup
*ptr
)
188 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*ptr
)
192 static inline void mem_cgroup_uncharge_start(void)
196 static inline void mem_cgroup_uncharge_end(void)
200 static inline void mem_cgroup_uncharge_page(struct page
*page
)
204 static inline void mem_cgroup_uncharge_cache_page(struct page
*page
)
208 static inline void mem_cgroup_add_lru_list(struct page
*page
, int lru
)
212 static inline void mem_cgroup_del_lru_list(struct page
*page
, int lru
)
217 static inline void mem_cgroup_rotate_reclaimable_page(struct page
*page
)
222 static inline void mem_cgroup_rotate_lru_list(struct page
*page
, int lru
)
227 static inline void mem_cgroup_del_lru(struct page
*page
)
233 mem_cgroup_move_lists(struct page
*page
, enum lru_list from
, enum lru_list to
)
237 static inline struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
)
242 static inline struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
)
247 static inline int mm_match_cgroup(struct mm_struct
*mm
, struct mem_cgroup
*mem
)
252 static inline int task_in_mem_cgroup(struct task_struct
*task
,
253 const struct mem_cgroup
*mem
)
258 static inline struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*mem
)
264 mem_cgroup_prepare_migration(struct page
*page
, struct page
*newpage
,
265 struct mem_cgroup
**ptr
, gfp_t gfp_mask
)
270 static inline void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
271 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
)
275 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup
*mem
)
280 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup
*mem
,
285 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup
*mem
,
290 static inline bool mem_cgroup_disabled(void)
296 mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
)
302 mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
)
307 static inline unsigned long
308 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
, int nid
, int zid
,
309 unsigned int lru_mask
)
315 static inline struct zone_reclaim_stat
*
316 mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
, struct zone
*zone
)
321 static inline struct zone_reclaim_stat
*
322 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
)
328 mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
332 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
333 enum mem_cgroup_page_stat_item idx
)
337 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
338 enum mem_cgroup_page_stat_item idx
)
343 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
345 unsigned long *total_scanned
)
351 u64
mem_cgroup_get_limit(struct mem_cgroup
*mem
)
356 static inline void mem_cgroup_split_huge_fixup(struct page
*head
,
362 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
)
365 #endif /* CONFIG_CGROUP_MEM_CONT */
367 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
369 mem_cgroup_bad_page_check(struct page
*page
)
375 mem_cgroup_print_bad_page(struct page
*page
)
380 #endif /* _LINUX_MEMCONTROL_H */