]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
mm: have order > 0 compaction start off where it left
authorRik van Riel <riel@redhat.com>
Tue, 31 Jul 2012 23:43:12 +0000 (16:43 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Aug 2012 01:42:43 +0000 (18:42 -0700)
Order > 0 compaction stops when enough free pages of the correct page
order have been coalesced.  When doing subsequent higher order
allocations, it is possible for compaction to be invoked many times.

However, the compaction code always starts out looking for things to
compact at the start of the zone, and for free pages to compact things to
at the end of the zone.

This can cause quadratic behaviour, with isolate_freepages starting at the
end of the zone each time, even though previous invocations of the
compaction code already filled up all free memory on that end of the zone.

This can cause isolate_freepages to take enormous amounts of CPU with
certain workloads on larger memory systems.

The obvious solution is to have isolate_freepages remember where it left
off last time, and continue at that point the next time it gets invoked
for an order > 0 compaction.  This could cause compaction to fail if
cc->free_pfn and cc->migrate_pfn are close together initially, in that
case we restart from the end of the zone and try once more.

Forced full (order == -1) compactions are left alone.

[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: s/laste/last/, use 80 cols]
Signed-off-by: Rik van Riel <riel@redhat.com>
Reported-by: Jim Schutt <jaschut@sandia.gov>
Tested-by: Jim Schutt <jaschut@sandia.gov>
Cc: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/compaction.c
mm/internal.h
mm/page_alloc.c

index 1495d952e64153ef41ac04c2e50a94216a17b47b..1aeadce4d56eb5d747f740dddf4d27368c37a8ab 100644 (file)
@@ -368,6 +368,10 @@ struct zone {
         */
        spinlock_t              lock;
        int                     all_unreclaimable; /* All pages pinned */
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+       /* pfn where the last incremental compaction isolated free pages */
+       unsigned long           compact_cached_free_pfn;
+#endif
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
index 2f42d952853970b5dd1f95a149d8750e7d570357..e78cb968842163ff65154346e6721aac91070319 100644 (file)
@@ -422,6 +422,17 @@ static void isolate_freepages(struct zone *zone,
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
 
+               /*
+                * Skip ahead if another thread is compacting in the area
+                * simultaneously. If we wrapped around, we can only skip
+                * ahead if zone->compact_cached_free_pfn also wrapped to
+                * above our starting point.
+                */
+               if (cc->order > 0 && (!cc->wrapped ||
+                                     zone->compact_cached_free_pfn >
+                                     cc->start_free_pfn))
+                       pfn = min(pfn, zone->compact_cached_free_pfn);
+
                if (!pfn_valid(pfn))
                        continue;
 
@@ -461,8 +472,11 @@ static void isolate_freepages(struct zone *zone,
                 * looking for free pages, the search will restart here as
                 * page migration may have returned some pages to the allocator
                 */
-               if (isolated)
+               if (isolated) {
                        high_pfn = max(high_pfn, pfn);
+                       if (cc->order > 0)
+                               zone->compact_cached_free_pfn = high_pfn;
+               }
        }
 
        /* split_free_page does not map the pages */
@@ -556,6 +570,20 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        return ISOLATE_SUCCESS;
 }
 
+/*
+ * Returns the start pfn of the last page block in a zone.  This is the starting
+ * point for full compaction of a zone.  Compaction searches for free pages from
+ * the end of each zone, while isolate_freepages_block scans forward inside each
+ * page block.
+ */
+static unsigned long start_free_pfn(struct zone *zone)
+{
+       unsigned long free_pfn;
+       free_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       free_pfn &= ~(pageblock_nr_pages-1);
+       return free_pfn;
+}
+
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
@@ -565,8 +593,26 @@ static int compact_finished(struct zone *zone,
        if (fatal_signal_pending(current))
                return COMPACT_PARTIAL;
 
-       /* Compaction run completes if the migrate and free scanner meet */
-       if (cc->free_pfn <= cc->migrate_pfn)
+       /*
+        * A full (order == -1) compaction run starts at the beginning and
+        * end of a zone; it completes when the migrate and free scanner meet.
+        * A partial (order > 0) compaction can start with the free scanner
+        * at a random point in the zone, and may have to restart.
+        */
+       if (cc->free_pfn <= cc->migrate_pfn) {
+               if (cc->order > 0 && !cc->wrapped) {
+                       /* We started partway through; restart at the end. */
+                       unsigned long free_pfn = start_free_pfn(zone);
+                       zone->compact_cached_free_pfn = free_pfn;
+                       cc->free_pfn = free_pfn;
+                       cc->wrapped = 1;
+                       return COMPACT_CONTINUE;
+               }
+               return COMPACT_COMPLETE;
+       }
+
+       /* We wrapped around and ended up where we started. */
+       if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
                return COMPACT_COMPLETE;
 
        /*
@@ -664,8 +710,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
        /* Setup to move all movable pages to the end of the zone */
        cc->migrate_pfn = zone->zone_start_pfn;
-       cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
-       cc->free_pfn &= ~(pageblock_nr_pages-1);
+
+       if (cc->order > 0) {
+               /* Incremental compaction. Start where the last one stopped. */
+               cc->free_pfn = zone->compact_cached_free_pfn;
+               cc->start_free_pfn = cc->free_pfn;
+       } else {
+               /* Order == -1 starts at the end of the zone. */
+               cc->free_pfn = start_free_pfn(zone);
+       }
 
        migrate_prep_local();
 
index 2ba87fbfb75b9755e279d39af93359693afe66fd..da6b9b2ed3fc429443add0dc2349098b83ee9ea8 100644 (file)
@@ -118,8 +118,14 @@ struct compact_control {
        unsigned long nr_freepages;     /* Number of isolated free pages */
        unsigned long nr_migratepages;  /* Number of pages to migrate */
        unsigned long free_pfn;         /* isolate_freepages search base */
+       unsigned long start_free_pfn;   /* where we started the search */
        unsigned long migrate_pfn;      /* isolate_migratepages search base */
        bool sync;                      /* Synchronous migration */
+       bool wrapped;                   /* Order > 0 compactions are
+                                          incremental, once free_pfn
+                                          and migrate_pfn meet, we restart
+                                          from the top of the zone;
+                                          remember we wrapped around. */
 
        int order;                      /* order a direct compactor needs */
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
index fba2a1223f149ee88190fc367d8bb3817bafc5ae..94fc475c3f94ec72e116e2e2b327a5b1ecd1330d 100644 (file)
@@ -4397,6 +4397,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 
                zone->spanned_pages = size;
                zone->present_pages = realsize;
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+               zone->compact_cached_free_pfn = zone->zone_start_pfn +
+                                               zone->spanned_pages;
+               zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1);
+#endif
 #ifdef CONFIG_NUMA
                zone->node = nid;
                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)