]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: push lru index into shrink_[in]active_list()
authorKonstantin Khlebnikov <khlebnikov@openvz.org>
Tue, 29 May 2012 22:06:53 +0000 (15:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 May 2012 23:22:25 +0000 (16:22 -0700)
Let's toss lru index through call stack to isolate_lru_pages(), this is
better than its reconstructing from individual bits.

[akpm@linux-foundation.org: fix kerneldoc, per Minchan]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Glauber Costa <glommer@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index e234ada187473dd66d8cc1923c9d951412839fb9..987be819fad62196b6c426604b450fe9e6bcf76b 100644 (file)
@@ -1044,27 +1044,22 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
  * @nr_scanned:        The number of pages that were scanned.
  * @sc:                The scan_control struct for this reclaim session
  * @mode:      One of the LRU isolation modes
- * @active:    True [1] if isolating active pages
- * @file:      True [1] if isolating file [!anon] pages
+ * @lru:       LRU list id for isolating
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct mem_cgroup_zone *mz, struct list_head *dst,
                unsigned long *nr_scanned, struct scan_control *sc,
-               isolate_mode_t mode, int active, int file)
+               isolate_mode_t mode, enum lru_list lru)
 {
        struct lruvec *lruvec;
        struct list_head *src;
        unsigned long nr_taken = 0;
        unsigned long scan;
-       int lru = LRU_BASE;
+       int file = is_file_lru(lru);
 
        lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
-       if (active)
-               lru += LRU_ACTIVE;
-       if (file)
-               lru += LRU_FILE;
        src = &lruvec->lists[lru];
 
        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
@@ -1277,7 +1272,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
  */
 static noinline_for_stack unsigned long
 shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
-                    struct scan_control *sc, int priority, int file)
+                    struct scan_control *sc, int priority, enum lru_list lru)
 {
        LIST_HEAD(page_list);
        unsigned long nr_scanned;
@@ -1288,6 +1283,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        unsigned long nr_dirty = 0;
        unsigned long nr_writeback = 0;
        isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
+       int file = is_file_lru(lru);
        struct zone *zone = mz->zone;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
 
@@ -1309,7 +1305,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        spin_lock_irq(&zone->lru_lock);
 
        nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
-                                    sc, isolate_mode, 0, file);
+                                    sc, isolate_mode, lru);
        if (global_reclaim(sc)) {
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
@@ -1445,7 +1441,7 @@ static void move_active_pages_to_lru(struct zone *zone,
 static void shrink_active_list(unsigned long nr_to_scan,
                               struct mem_cgroup_zone *mz,
                               struct scan_control *sc,
-                              int priority, int file)
+                              int priority, enum lru_list lru)
 {
        unsigned long nr_taken;
        unsigned long nr_scanned;
@@ -1457,6 +1453,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        unsigned long nr_rotated = 0;
        isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
+       int file = is_file_lru(lru);
        struct zone *zone = mz->zone;
 
        lru_add_drain();
@@ -1469,17 +1466,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
        spin_lock_irq(&zone->lru_lock);
 
        nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
-                                    isolate_mode, 1, file);
+                                    isolate_mode, lru);
        if (global_reclaim(sc))
                zone->pages_scanned += nr_scanned;
 
        reclaim_stat->recent_scanned[file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, nr_scanned);
-       if (file)
-               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
-       else
-               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1535,10 +1529,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
 
-       move_active_pages_to_lru(zone, &l_active, &l_hold,
-                                               LRU_ACTIVE + file * LRU_FILE);
-       move_active_pages_to_lru(zone, &l_inactive, &l_hold,
-                                               LRU_BASE   + file * LRU_FILE);
+       move_active_pages_to_lru(zone, &l_active, &l_hold, lru);
+       move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE);
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1638,11 +1630,11 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
 
        if (is_active_lru(lru)) {
                if (inactive_list_is_low(mz, file))
-                       shrink_active_list(nr_to_scan, mz, sc, priority, file);
+                       shrink_active_list(nr_to_scan, mz, sc, priority, lru);
                return 0;
        }
 
-       return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
+       return shrink_inactive_list(nr_to_scan, mz, sc, priority, lru);
 }
 
 static int vmscan_swappiness(struct mem_cgroup_zone *mz,
@@ -1900,7 +1892,8 @@ restart:
         * rebalance the anon lru active/inactive ratio.
         */
        if (inactive_anon_is_low(mz))
-               shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
+               shrink_active_list(SWAP_CLUSTER_MAX, mz,
+                                  sc, priority, LRU_ACTIVE_ANON);
 
        /* reclaim/compaction might need reclaim to continue */
        if (should_continue_reclaim(mz, nr_reclaimed,
@@ -2339,7 +2332,7 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc,
 
                if (inactive_anon_is_low(&mz))
                        shrink_active_list(SWAP_CLUSTER_MAX, &mz,
-                                          sc, priority, 0);
+                                          sc, priority, LRU_ACTIVE_ANON);
 
                memcg = mem_cgroup_iter(NULL, memcg, NULL);
        } while (memcg);