]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/page_alloc.c
powerpc/vdso: Correct call frame information
[mirror_ubuntu-bionic-kernel.git] / mm / page_alloc.c
index 73f5d4556b3d0b7218bea0cb9bb0fdd1f1cb3cdd..d8ee1effa4a6c2325811bd6d88d8f6fc790ebfab 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/stop_machine.h>
 #include <linux/sort.h>
 #include <linux/pfn.h>
+#include <xen/xen.h>
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
+       /* Xen PV domains need page structures early */
+       if (xen_pv_domain())
+               return true;
        (*nr_initialised)++;
        if ((*nr_initialised > pgdat->static_init_pgcnt) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
@@ -1177,9 +1181,10 @@ static void free_one_page(struct zone *zone,
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-                               unsigned long zone, int nid)
+                               unsigned long zone, int nid, bool zero)
 {
-       mm_zero_struct_page(page);
+       if (zero)
+               mm_zero_struct_page(page);
        set_page_links(page, zone, nid, pfn);
        init_page_count(page);
        page_mapcount_reset(page);
@@ -1194,9 +1199,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
 }
 
 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
-                                       int nid)
+                                       int nid, bool zero)
 {
-       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
+       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
 }
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1217,7 +1222,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
                if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
                        break;
        }
-       __init_single_pfn(pfn, zid, nid);
+       __init_single_pfn(pfn, zid, nid, true);
 }
 #else
 static inline void init_reserved_page(unsigned long pfn)
@@ -1514,7 +1519,7 @@ static unsigned long __init deferred_init_range(int nid, int zid,
                                        page++;
                                else
                                        page = pfn_to_page(pfn);
-                               __init_single_page(page, pfn, zid, nid);
+                               __init_single_page(page, pfn, zid, nid, true);
                                cond_resched();
                        }
                }
@@ -2684,6 +2689,7 @@ void free_unref_page_list(struct list_head *list)
 {
        struct page *page, *next;
        unsigned long flags, pfn;
+       int batch_count = 0;
 
        /* Prepare pages for freeing */
        list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2706,16 @@ void free_unref_page_list(struct list_head *list)
                set_page_private(page, 0);
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn);
+
+               /*
+                * Guard against excessive IRQ disabled times when we get
+                * a large list of pages to free.
+                */
+               if (++batch_count == SWAP_CLUSTER_MAX) {
+                       local_irq_restore(flags);
+                       batch_count = 0;
+                       local_irq_save(flags);
+               }
        }
        local_irq_restore(flags);
 }
@@ -3572,7 +3588,7 @@ static bool __need_fs_reclaim(gfp_t gfp_mask)
                return false;
 
        /* this guy won't enter reclaim */
-       if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+       if (current->flags & PF_MEMALLOC)
                return false;
 
        /* We're only interested __GFP_FS allocations for now */
@@ -5332,17 +5348,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                if (context != MEMMAP_EARLY)
                        goto not_early;
 
-               if (!early_pfn_valid(pfn)) {
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-                       /*
-                        * Skip to the pfn preceding the next valid one (or
-                        * end_pfn), such that we hit a valid pfn (or end_pfn)
-                        * on our next iteration of the loop.
-                        */
-                       pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
-#endif
+               if (!early_pfn_valid(pfn))
                        continue;
-               }
                if (!early_pfn_in_nid(pfn, nid))
                        continue;
                if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
@@ -5382,15 +5389,20 @@ not_early:
                 * can be created for invalid pages (for alignment)
                 * check here not to call set_pageblock_migratetype() against
                 * pfn out of zone.
+                *
+                * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
+                * because this is done early in sparse_add_one_section
                 */
                if (!(pfn & (pageblock_nr_pages - 1))) {
                        struct page *page = pfn_to_page(pfn);
 
-                       __init_single_page(page, pfn, zone, nid);
+                       __init_single_page(page, pfn, zone, nid,
+                                       context != MEMMAP_HOTPLUG);
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                        cond_resched();
                } else {
-                       __init_single_pfn(pfn, zone, nid);
+                       __init_single_pfn(pfn, zone, nid,
+                                       context != MEMMAP_HOTPLUG);
                }
        }
 }
@@ -6249,6 +6261,8 @@ void __paginginit zero_resv_unavail(void)
        pgcnt = 0;
        for_each_resv_unavail_range(i, &start, &end) {
                for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
+                       if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
+                               continue;
                        mm_zero_struct_page(pfn_to_page(pfn));
                        pgcnt++;
                }