From 58ae83db2a40dea15d4277d499a11dadc823c388 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Date: Thu, 7 Feb 2008 00:14:32 -0800 Subject: [PATCH] per-zone and reclaim enhancements for memory controller: calculate mapper_ratio per cgroup Define function for calculating mapped_ratio in memory cgroup. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: Kirill Korotaev <dev@sw.ru> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Paul Menage <menage@google.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- include/linux/memcontrol.h | 11 ++++++++++- mm/memcontrol.c | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 4ec712967f7c..085cdcd817b0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -64,6 +64,12 @@ extern int mem_cgroup_prepare_migration(struct page *page); extern void mem_cgroup_end_migration(struct page *page); extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); +/* + * For memory reclaim. + */ +extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); + + #else /* CONFIG_CGROUP_MEM_CONT */ static inline void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) @@ -135,7 +141,10 @@ mem_cgroup_page_migration(struct page *page, struct page *newpage) { } - +static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) +{ + return 0; +} #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1637575d3339..2ef214ed5cf8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -420,6 +420,23 @@ void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) spin_unlock(&mem->lru_lock); } +/* + * Calculate mapped_ratio under memory controller. This will be used in + * vmscan.c for deteremining we have to reclaim mapped pages. + */ +int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) +{ + long total, rss; + + /* + * usage is recorded in bytes. But, here, we assume the number of + * physical pages can be represented by "long" on any arch. + */ + total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; + rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); + return (int)((rss * 100L) / total); +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, -- 2.39.5