]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
mm, memcontrol: move swap charge handling into get_swap_page()
authorTejun Heo <tj@kernel.org>
Fri, 8 Jun 2018 00:05:31 +0000 (17:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 00:34:34 +0000 (17:34 -0700)
Patch series "mm, memcontrol: Implement memory.swap.events", v2.

This patchset implements memory.swap.events which contains max and fail
events so that userland can monitor and respond to swap running out.

This patch (of 2):

get_swap_page() is always followed by mem_cgroup_try_charge_swap().
This patch moves mem_cgroup_try_charge_swap() into get_swap_page() and
makes get_swap_page() call the function even after swap allocation
failure.

This simplifies the callers and consolidates memcg related logic and
will ease adding swap related memcg events.

Link: http://lkml.kernel.org/r/20180416230934.GH1911913@devbig577.frc2.facebook.com
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c
mm/shmem.c
mm/swap_slots.c
mm/swap_state.c

index 1695f38630f111c367c72815f6fdd86cd4501ebb..e8166521a4745b58935adb09dc5c379c3f0750c6 100644 (file)
@@ -6012,6 +6012,9 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
        if (!memcg)
                return 0;
 
+       if (!entry.val)
+               return 0;
+
        memcg = mem_cgroup_id_get_online(memcg);
 
        if (!mem_cgroup_is_root(memcg) &&
index 9d6c7e5954153b6b678ff4660b0dfddd143c21fb..8c43e207cd3b83ae3ab0d0ef8ead31bf7c9b6cfb 100644 (file)
@@ -1322,9 +1322,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        if (!swap.val)
                goto redirty;
 
-       if (mem_cgroup_try_charge_swap(page, swap))
-               goto free_swap;
-
        /*
         * Add inode to shmem_unuse()'s list of swapped-out inodes,
         * if it's not already there.  Do it now before the page is
@@ -1353,7 +1350,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        }
 
        mutex_unlock(&shmem_swaplist_mutex);
-free_swap:
        put_swap_page(page, swap);
 redirty:
        set_page_dirty(page);
index f2641894f4409ad27fcc5319dbbcb336eae14a7d..f51ac051c0c9ede270c925081ba4be6428f9fe4b 100644 (file)
@@ -317,7 +317,7 @@ swp_entry_t get_swap_page(struct page *page)
        if (PageTransHuge(page)) {
                if (IS_ENABLED(CONFIG_THP_SWAP))
                        get_swap_pages(1, true, &entry);
-               return entry;
+               goto out;
        }
 
        /*
@@ -347,10 +347,14 @@ repeat:
                }
                mutex_unlock(&cache->alloc_lock);
                if (entry.val)
-                       return entry;
+                       goto out;
        }
 
        get_swap_pages(1, false, &entry);
-
+out:
+       if (mem_cgroup_try_charge_swap(page, entry)) {
+               put_swap_page(page, entry);
+               entry.val = 0;
+       }
        return entry;
 }
index 07f9aa2340c3a4b5c0b1138feffa1d901aa7ccae..ab8e59cd18ea09df428b3eb6b19e58eb7592564c 100644 (file)
@@ -216,9 +216,6 @@ int add_to_swap(struct page *page)
        if (!entry.val)
                return 0;
 
-       if (mem_cgroup_try_charge_swap(page, entry))
-               goto fail;
-
        /*
         * Radix-tree node allocations from PF_MEMALLOC contexts could
         * completely exhaust the page allocator. __GFP_NOMEMALLOC