]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/compaction.c
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[mirror_ubuntu-artful-kernel.git] / mm / compaction.c
index b68736c8a1ce0d5c74bd8ebfa78b502c5be37cf6..8c0d9459b54a02042dd2caf9489566188d9ea908 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/sysfs.h>
 #include <linux/balloon_compaction.h>
 #include <linux/page-isolation.h>
+#include <linux/kasan.h>
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
@@ -72,6 +73,7 @@ static void map_pages(struct list_head *list)
        list_for_each_entry(page, list, lru) {
                arch_alloc_page(page, 0);
                kernel_map_pages(page, 1, 1);
+               kasan_alloc_pages(page, 0);
        }
 }
 
@@ -490,6 +492,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* If a page was split, advance to the end of it */
                if (isolated) {
+                       cc->nr_freepages += isolated;
+                       if (!strict &&
+                               cc->nr_migratepages <= cc->nr_freepages) {
+                               blockpfn += isolated;
+                               break;
+                       }
+
                        blockpfn += isolated - 1;
                        cursor += isolated - 1;
                        continue;
@@ -899,7 +908,6 @@ static void isolate_freepages(struct compact_control *cc)
        unsigned long isolate_start_pfn; /* exact pfn we start at */
        unsigned long block_end_pfn;    /* end of current pageblock */
        unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
-       int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
        /*
@@ -924,11 +932,11 @@ static void isolate_freepages(struct compact_control *cc)
         * pages on cc->migratepages. We stop searching if the migrate
         * and free page scanners meet or enough free pages are isolated.
         */
-       for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+       for (; block_start_pfn >= low_pfn &&
+                       cc->nr_migratepages > cc->nr_freepages;
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-               unsigned long isolated;
 
                /*
                 * This can iterate a massively long zone without finding any
@@ -953,9 +961,8 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+               isolate_freepages_block(cc, &isolate_start_pfn,
                                        block_end_pfn, freelist, false);
-               nr_freepages += isolated;
 
                /*
                 * Remember where the free scanner should restart next time,
@@ -987,8 +994,6 @@ static void isolate_freepages(struct compact_control *cc)
         */
        if (block_start_pfn < low_pfn)
                cc->free_pfn = cc->migrate_pfn;
-
-       cc->nr_freepages = nr_freepages;
 }
 
 /*
@@ -1100,8 +1105,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
                                                                isolate_mode);
 
-               if (!low_pfn || cc->contended)
+               if (!low_pfn || cc->contended) {
+                       acct_isolated(zone, cc);
                        return ISOLATE_ABORT;
+               }
 
                /*
                 * Either we isolated something and proceed with migration. Or
@@ -1173,7 +1180,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
                        return COMPACT_PARTIAL;
 
                /* Job done if allocation would set block type */
-               if (cc->order >= pageblock_order && area->nr_free)
+               if (order >= pageblock_order && area->nr_free)
                        return COMPACT_PARTIAL;
        }