]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
memcg: refactor mem_cgroup_resize_limit()
authorYu Zhao <yuzhao@google.com>
Thu, 1 Feb 2018 00:20:02 +0000 (16:20 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Feb 2018 01:18:39 +0000 (17:18 -0800)
mem_cgroup_resize_limit() and mem_cgroup_resize_memsw_limit() have
identical logics.  Refactor code so we don't need to keep two pieces of
code that does same thing.

Link: http://lkml.kernel.org/r/20180108224238.14583-1-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 51d398f1363c46495c4cc1e9711ccf70159667f0..695d9f10906ee5656247c0c11430a5be7621050b 100644 (file)
@@ -2461,13 +2461,15 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-                                  unsigned long limit)
+                                  unsigned long limit, bool memsw)
 {
        unsigned long curusage;
        unsigned long oldusage;
        bool enlarge = false;
        int retry_count;
        int ret;
+       bool limits_invariant;
+       struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
 
        /*
         * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2477,7 +2479,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
        retry_count = MEM_CGROUP_RECLAIM_RETRIES *
                      mem_cgroup_count_children(memcg);
 
-       oldusage = page_counter_read(&memcg->memory);
+       oldusage = page_counter_read(counter);
 
        do {
                if (signal_pending(current)) {
@@ -2486,73 +2488,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                }
 
                mutex_lock(&memcg_limit_mutex);
-               if (limit > memcg->memsw.limit) {
-                       mutex_unlock(&memcg_limit_mutex);
-                       ret = -EINVAL;
-                       break;
-               }
-               if (limit > memcg->memory.limit)
-                       enlarge = true;
-               ret = page_counter_limit(&memcg->memory, limit);
-               mutex_unlock(&memcg_limit_mutex);
-
-               if (!ret)
-                       break;
-
-               try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
-
-               curusage = page_counter_read(&memcg->memory);
-               /* Usage is reduced ? */
-               if (curusage >= oldusage)
-                       retry_count--;
-               else
-                       oldusage = curusage;
-       } while (retry_count);
-
-       if (!ret && enlarge)
-               memcg_oom_recover(memcg);
-
-       return ret;
-}
-
-static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-                                        unsigned long limit)
-{
-       unsigned long curusage;
-       unsigned long oldusage;
-       bool enlarge = false;
-       int retry_count;
-       int ret;
-
-       /* see mem_cgroup_resize_res_limit */
-       retry_count = MEM_CGROUP_RECLAIM_RETRIES *
-                     mem_cgroup_count_children(memcg);
-
-       oldusage = page_counter_read(&memcg->memsw);
-
-       do {
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-
-               mutex_lock(&memcg_limit_mutex);
-               if (limit < memcg->memory.limit) {
+               /*
+                * Make sure that the new limit (memsw or memory limit) doesn't
+                * break our basic invariant rule memory.limit <= memsw.limit.
+                */
+               limits_invariant = memsw ? limit >= memcg->memory.limit :
+                                          limit <= memcg->memsw.limit;
+               if (!limits_invariant) {
                        mutex_unlock(&memcg_limit_mutex);
                        ret = -EINVAL;
                        break;
                }
-               if (limit > memcg->memsw.limit)
+               if (limit > counter->limit)
                        enlarge = true;
-               ret = page_counter_limit(&memcg->memsw, limit);
+               ret = page_counter_limit(counter, limit);
                mutex_unlock(&memcg_limit_mutex);
 
                if (!ret)
                        break;
 
-               try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
+               try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
 
-               curusage = page_counter_read(&memcg->memsw);
+               curusage = page_counter_read(counter);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
                        retry_count--;
@@ -3014,10 +2971,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
                }
                switch (MEMFILE_TYPE(of_cft(of)->private)) {
                case _MEM:
-                       ret = mem_cgroup_resize_limit(memcg, nr_pages);
+                       ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
                        break;
                case _MEMSWAP:
-                       ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
+                       ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
                        break;
                case _KMEM:
                        ret = memcg_update_kmem_limit(memcg, nr_pages);