]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
vmscan: fix do_try_to_free_pages() return value when priority==0 reclaim failure
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Fri, 4 Jun 2010 21:15:05 +0000 (14:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 4 Jun 2010 22:21:45 +0000 (15:21 -0700)
Greg Thelen reported recent Johannes's stack diet patch makes kernel hang.
 His test is following.

  mount -t cgroup none /cgroups -o memory
  mkdir /cgroups/cg1
  echo $$ > /cgroups/cg1/tasks
  dd bs=1024 count=1024 if=/dev/null of=/data/foo
  echo $$ > /cgroups/tasks
  echo 1 > /cgroups/cg1/memory.force_empty

Actually, This OOM hard to try logic have been corrupted since following
two years old patch.

commit a41f24ea9fd6169b147c53c2392e2887cc1d9247
Author: Nishanth Aravamudan <nacc@us.ibm.com>
Date:   Tue Apr 29 00:58:25 2008 -0700

    page allocator: smarter retry of costly-order allocations

Original intention was "return success if the system have shrinkable zones
though priority==0 reclaim was failure".  But the above patch changed to
"return nr_reclaimed if .....".  Oh, That forgot nr_reclaimed may be 0 if
priority==0 reclaim failure.

And Johannes's patch 0aeb2339e54e ("vmscan: remove all_unreclaimable scan
control") made it more corrupt.  Originally, priority==0 reclaim failure
on memcg return 0, but this patch changed to return 1.  It totally
confused memcg.

This patch fixes it completely.

Reported-by: Greg Thelen <gthelen@google.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Greg Thelen <gthelen@google.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 915dceb487c11b1f1783df86908ba5e6e7317769..9c7e57cc63a34f7231b77a7d8b395d3157a34ba6 100644 (file)
@@ -1724,13 +1724,13 @@ static void shrink_zone(int priority, struct zone *zone,
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static int shrink_zones(int priority, struct zonelist *zonelist,
+static bool shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
        struct zoneref *z;
        struct zone *zone;
-       int progress = 0;
+       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
                                        sc->nodemask) {
@@ -1757,9 +1757,9 @@ static int shrink_zones(int priority, struct zonelist *zonelist,
                }
 
                shrink_zone(priority, zone, sc);
-               progress = 1;
+               all_unreclaimable = false;
        }
-       return progress;
+       return all_unreclaimable;
 }
 
 /*
@@ -1782,7 +1782,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        int priority;
-       unsigned long ret = 0;
+       bool all_unreclaimable;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
@@ -1813,7 +1813,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               ret = shrink_zones(priority, zonelist, sc);
+               all_unreclaimable = shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -1826,10 +1826,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        }
                }
                total_scanned += sc->nr_scanned;
-               if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
-                       ret = sc->nr_reclaimed;
+               if (sc->nr_reclaimed >= sc->nr_to_reclaim)
                        goto out;
-               }
 
                /*
                 * Try to write back as many pages as we just scanned.  This
@@ -1849,9 +1847,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                    priority < DEF_PRIORITY - 2)
                        congestion_wait(BLK_RW_ASYNC, HZ/10);
        }
-       /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (ret && scanning_global_lru(sc))
-               ret = sc->nr_reclaimed;
+
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
@@ -1877,7 +1873,14 @@ out:
        delayacct_freepages_end();
        put_mems_allowed();
 
-       return ret;
+       if (sc->nr_reclaimed)
+               return sc->nr_reclaimed;
+
+       /* top priority shrink_zones still had more to do? don't OOM, then */
+       if (scanning_global_lru(sc) && !all_unreclaimable)
+               return 1;
+
+       return 0;
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,