]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
mm: meminit: reduce number of times pageblocks are set during struct page init
authorMel Gorman <mgorman@suse.de>
Tue, 30 Jun 2015 21:57:20 +0000 (14:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Jul 2015 02:44:56 +0000 (19:44 -0700)
During parallel sturct page initialisation, ranges are checked for every
PFN unnecessarily which increases boot times.  This patch alters when the
ranges are checked.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Tested-by: Nate Zimmer <nzimmer@sgi.com>
Tested-by: Waiman Long <waiman.long@hp.com>
Tested-by: Daniel J Blueman <daniel@numascale.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Nate Zimmer <nzimmer@sgi.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index e3f00f622f28e400a7a578314e1eef603b86388e..f1f455a69cefc25e94d5db7e44b61d13866e21cf 100644 (file)
@@ -838,33 +838,12 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
                                unsigned long zone, int nid)
 {
-       struct zone *z = &NODE_DATA(nid)->node_zones[zone];
-
        set_page_links(page, zone, nid, pfn);
        mminit_verify_page_links(page, zone, nid, pfn);
        init_page_count(page);
        page_mapcount_reset(page);
        page_cpupid_reset_last(page);
 
-       /*
-        * Mark the block movable so that blocks are reserved for
-        * movable at startup. This will force kernel allocations
-        * to reserve their blocks rather than leaking throughout
-        * the address space during boot when many long-lived
-        * kernel allocations are made. Later some blocks near
-        * the start are marked MIGRATE_RESERVE by
-        * setup_zone_migrate_reserve()
-        *
-        * bitmap is created for zone's valid pfn range. but memmap
-        * can be created for invalid pages (for alignment)
-        * check here not to call set_pageblock_migratetype() against
-        * pfn out of zone.
-        */
-       if ((z->zone_start_pfn <= pfn)
-           && (pfn < zone_end_pfn(z))
-           && !(pfn & (pageblock_nr_pages - 1)))
-               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-
        INIT_LIST_HEAD(&page->lru);
 #ifdef WANT_PAGE_VIRTUAL
        /* The shift won't overflow because ZONE_NORMAL is below 4G. */
@@ -1073,6 +1052,7 @@ static void __defermem_init deferred_free_range(struct page *page,
        /* Free a large naturally-aligned chunk if possible */
        if (nr_pages == MAX_ORDER_NR_PAGES &&
            (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
+               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                __free_pages_boot_core(page, pfn, MAX_ORDER-1);
                return;
        }
@@ -4593,7 +4573,29 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                                                &nr_initialised))
                                break;
                }
-               __init_single_pfn(pfn, zone, nid);
+
+               /*
+                * Mark the block movable so that blocks are reserved for
+                * movable at startup. This will force kernel allocations
+                * to reserve their blocks rather than leaking throughout
+                * the address space during boot when many long-lived
+                * kernel allocations are made. Later some blocks near
+                * the start are marked MIGRATE_RESERVE by
+                * setup_zone_migrate_reserve()
+                *
+                * bitmap is created for zone's valid pfn range. but memmap
+                * can be created for invalid pages (for alignment)
+                * check here not to call set_pageblock_migratetype() against
+                * pfn out of zone.
+                */
+               if (!(pfn & (pageblock_nr_pages - 1))) {
+                       struct page *page = pfn_to_page(pfn);
+
+                       __init_single_page(page, pfn, zone, nid);
+                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               } else {
+                       __init_single_pfn(pfn, zone, nid);
+               }
        }
 }