]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/vmscan.c
mm: shrink_inactive_list() nr_scan accounting fix fix
[mirror_ubuntu-bionic-kernel.git] / mm / vmscan.c
index dea7abd310980daea1fa6c5a0c850a972fe37a21..d86a91f8c16bab842e1abf5d458d4ef4227e5f33 100644 (file)
@@ -630,9 +630,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                referenced = page_referenced(page, 1,
                                                sc->mem_cgroup, &vm_flags);
-               /* In active use or really unfreeable?  Activate it. */
+               /*
+                * In active use or really unfreeable?  Activate it.
+                * If page which have PG_mlocked lost isoltation race,
+                * try_to_unmap moves it to unevictable list
+                */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
-                                       referenced && page_mapping_inuse(page))
+                                       referenced && page_mapping_inuse(page)
+                                       && !(vm_flags & VM_LOCKED))
                        goto activate_locked;
 
                /*
@@ -1071,6 +1076,20 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
                                zone, sc->mem_cgroup, 0, file);
+
+               if (scanning_global_lru(sc)) {
+                       zone->pages_scanned += nr_scan;
+                       if (current_is_kswapd())
+                               __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+                                                      nr_scan);
+                       else
+                               __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                                      nr_scan);
+               }
+
+               if (nr_taken == 0)
+                       goto done;
+
                nr_active = clear_active_flags(&page_list, count);
                __count_vm_events(PGDEACTIVATE, nr_active);
 
@@ -1083,8 +1102,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                __mod_zone_page_state(zone, NR_INACTIVE_ANON,
                                                -count[LRU_INACTIVE_ANON]);
 
-               if (scanning_global_lru(sc))
-                       zone->pages_scanned += nr_scan;
 
                reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
                reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
@@ -1118,18 +1135,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                }
 
                nr_reclaimed += nr_freed;
+
                local_irq_disable();
-               if (current_is_kswapd()) {
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+               if (current_is_kswapd())
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
-               } else if (scanning_global_lru(sc))
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
-               if (nr_taken == 0)
-                       goto done;
-
                spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
@@ -1159,9 +1170,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        }
                }
        } while (nr_scanned < max_scan);
-       spin_unlock(&zone->lru_lock);
+
 done:
-       local_irq_enable();
+       spin_unlock_irq(&zone->lru_lock);
        pagevec_release(&pvec);
        return nr_reclaimed;
 }
@@ -1239,7 +1250,7 @@ static void move_active_pages_to_lru(struct zone *zone,
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        struct scan_control *sc, int priority, int file)
 {
-       unsigned long pgmoved;
+       unsigned long nr_taken;
        unsigned long pgscanned;
        unsigned long vm_flags;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
@@ -1247,10 +1258,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        LIST_HEAD(l_inactive);
        struct page *page;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       unsigned long nr_rotated = 0;
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-       pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+       nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
                                        ISOLATE_ACTIVE, zone,
                                        sc->mem_cgroup, 1, file);
        /*
@@ -1260,16 +1272,15 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        if (scanning_global_lru(sc)) {
                zone->pages_scanned += pgscanned;
        }
-       reclaim_stat->recent_scanned[!!file] += pgmoved;
+       reclaim_stat->recent_scanned[!!file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
-               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
+               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
        else
-               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
+               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
-       pgmoved = 0;  /* count referenced (mapping) mapped pages */
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);
@@ -1283,7 +1294,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
                    page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
-                       pgmoved++;
+                       nr_rotated++;
                        /*
                         * Identify referenced, file-backed active pages and
                         * give them one more trip around the active list. So
@@ -1312,7 +1323,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * helps balance scan pressure between file and anonymous pages in
         * get_scan_ratio.
         */
-       reclaim_stat->recent_rotated[!!file] += pgmoved;
+       reclaim_stat->recent_rotated[!!file] += nr_rotated;
 
        move_active_pages_to_lru(zone, &l_active,
                                                LRU_ACTIVE + file * LRU_FILE);
@@ -1715,7 +1726,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 */
                if (total_scanned > sc->swap_cluster_max +
                                        sc->swap_cluster_max / 2) {
-                       wakeup_pdflush(laptop_mode ? 0 : total_scanned);
+                       wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
                        sc->may_writepage = 1;
                }