]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - mm/vmscan.c
memcg: reclaim shouldn't change zone->recent_rotated statistics
[mirror_ubuntu-jammy-kernel.git] / mm / vmscan.c
index d196f46c8808ea56734f12501d0e53968cc3f4eb..da7c3a2304a7d45d2434ef2aa209dc88b5c5cb4c 100644 (file)
@@ -617,7 +617,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                        referenced && page_mapping_inuse(page))
                        goto activate_locked;
 
-#ifdef CONFIG_SWAP
                /*
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
@@ -625,20 +624,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (PageAnon(page) && !PageSwapCache(page)) {
                        if (!(sc->gfp_mask & __GFP_IO))
                                goto keep_locked;
-                       switch (try_to_munlock(page)) {
-                       case SWAP_FAIL:         /* shouldn't happen */
-                       case SWAP_AGAIN:
-                               goto keep_locked;
-                       case SWAP_MLOCK:
-                               goto cull_mlocked;
-                       case SWAP_SUCCESS:
-                               ; /* fall thru'; add to swap cache */
-                       }
-                       if (!add_to_swap(page, GFP_ATOMIC))
+                       if (!add_to_swap(page))
                                goto activate_locked;
                        may_enter_fs = 1;
                }
-#endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
 
@@ -752,6 +741,8 @@ free_it:
                continue;
 
 cull_mlocked:
+               if (PageSwapCache(page))
+                       try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
                continue;
@@ -759,7 +750,7 @@ cull_mlocked:
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
-                       remove_exclusive_swap_page_ref(page);
+                       try_to_free_swap(page);
                VM_BUG_ON(PageActive(page));
                SetPageActive(page);
                pgactivate++;
@@ -1255,7 +1246,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * This helps balance scan pressure between file and anonymous
         * pages in get_scan_ratio.
         */
-       zone->recent_rotated[!!file] += pgmoved;
+       if (scan_global_lru(sc))
+               zone->recent_rotated[!!file] += pgmoved;
 
        /*
         * Move the pages to the [file or anon] inactive list.
@@ -1336,12 +1328,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
 
-       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
-               zone_page_state(zone, NR_INACTIVE_ANON);
-       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
-               zone_page_state(zone, NR_INACTIVE_FILE);
-       free  = zone_page_state(zone, NR_FREE_PAGES);
-
        /* If we have no swap space, do not bother scanning anon pages. */
        if (nr_swap_pages <= 0) {
                percent[0] = 0;
@@ -1349,6 +1335,12 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                return;
        }
 
+       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
+               zone_page_state(zone, NR_INACTIVE_ANON);
+       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
+               zone_page_state(zone, NR_INACTIVE_FILE);
+       free  = zone_page_state(zone, NR_FREE_PAGES);
+
        /* If we have very few page cache pages, force-scan anon pages. */
        if (unlikely(file + free <= zone->pages_high)) {
                percent[0] = 100;