]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
hugetlb/cgroup: migrate hugetlb cgroup info from oldpage to new page during migration
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Tue, 31 Jul 2012 23:42:27 +0000 (16:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Aug 2012 01:42:41 +0000 (18:42 -0700)
With HugeTLB pages, hugetlb cgroup is uncharged in compound page
destructor.  Since we are holding a hugepage reference, we can be sure
that old page won't get uncharged till the last put_page().

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/hugetlb_cgroup.h
mm/hugetlb_cgroup.c
mm/migrate.c

index 73f1e600fc1217d7f843de46df3a1dc49b9d3468..d73878c694b3d43e85fc8b2d62f29107ed61d157 100644 (file)
@@ -63,6 +63,8 @@ extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
                                           struct hugetlb_cgroup *h_cg);
 extern int hugetlb_cgroup_file_init(int idx) __init;
+extern void hugetlb_cgroup_migrate(struct page *oldhpage,
+                                  struct page *newhpage);
 
 #else
 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
@@ -114,5 +116,11 @@ static inline int __init hugetlb_cgroup_file_init(int idx)
        return 0;
 }
 
+static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
+                                         struct page *newhpage)
+{
+       return;
+}
+
 #endif  /* CONFIG_MEM_RES_CTLR_HUGETLB */
 #endif
index d1ca1196e62ffd577057a04b5ab02cabc3264634..680e4819e0775ed50bdb7e928b62cf44a9d005aa 100644 (file)
@@ -386,6 +386,26 @@ int __init hugetlb_cgroup_file_init(int idx)
        return 0;
 }
 
+void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
+{
+       struct hugetlb_cgroup *h_cg;
+
+       if (hugetlb_cgroup_disabled())
+               return;
+
+       VM_BUG_ON(!PageHuge(oldhpage));
+       spin_lock(&hugetlb_lock);
+       h_cg = hugetlb_cgroup_from_page(oldhpage);
+       set_hugetlb_cgroup(oldhpage, NULL);
+       cgroup_exclude_rmdir(&h_cg->css);
+
+       /* move the h_cg details to new cgroup */
+       set_hugetlb_cgroup(newhpage, h_cg);
+       spin_unlock(&hugetlb_lock);
+       cgroup_release_and_wakeup_rmdir(&h_cg->css);
+       return;
+}
+
 struct cgroup_subsys hugetlb_subsys = {
        .name = "hugetlb",
        .create     = hugetlb_cgroup_create,
index fdce3a29fc4c2261a73d5f4201f9bcbb4b0b1b40..6c37c51565e5190ca4d716617817aa361268175e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/memcontrol.h>
 #include <linux/syscalls.h>
 #include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
 
 #include <asm/tlbflush.h>
@@ -931,6 +932,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (anon_vma)
                put_anon_vma(anon_vma);
+
+       if (!rc)
+               hugetlb_cgroup_migrate(hpage, new_hpage);
+
        unlock_page(hpage);
 out:
        put_page(new_hpage);