]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/page_alloc.c
mmc: dw_mmc-k3: Fix out-of-bounds access through DT alias
[mirror_ubuntu-bionic-kernel.git] / mm / page_alloc.c
index 73f5d4556b3d0b7218bea0cb9bb0fdd1f1cb3cdd..9f927497f2f5d275c37fc6d7e551e821200fb6e5 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/stop_machine.h>
 #include <linux/sort.h>
 #include <linux/pfn.h>
+#include <xen/xen.h>
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
+       /* Xen PV domains need page structures early */
+       if (xen_pv_domain())
+               return true;
        (*nr_initialised)++;
        if ((*nr_initialised > pgdat->static_init_pgcnt) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
@@ -1177,9 +1181,10 @@ static void free_one_page(struct zone *zone,
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-                               unsigned long zone, int nid)
+                               unsigned long zone, int nid, bool zero)
 {
-       mm_zero_struct_page(page);
+       if (zero)
+               mm_zero_struct_page(page);
        set_page_links(page, zone, nid, pfn);
        init_page_count(page);
        page_mapcount_reset(page);
@@ -1194,9 +1199,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
 }
 
 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
-                                       int nid)
+                                       int nid, bool zero)
 {
-       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
+       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
 }
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1217,7 +1222,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
                if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
                        break;
        }
-       __init_single_pfn(pfn, zid, nid);
+       __init_single_pfn(pfn, zid, nid, true);
 }
 #else
 static inline void init_reserved_page(unsigned long pfn)
@@ -1514,7 +1519,7 @@ static unsigned long __init deferred_init_range(int nid, int zid,
                                        page++;
                                else
                                        page = pfn_to_page(pfn);
-                               __init_single_page(page, pfn, zid, nid);
+                               __init_single_page(page, pfn, zid, nid, true);
                                cond_resched();
                        }
                }
@@ -2684,6 +2689,7 @@ void free_unref_page_list(struct list_head *list)
 {
        struct page *page, *next;
        unsigned long flags, pfn;
+       int batch_count = 0;
 
        /* Prepare pages for freeing */
        list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2706,16 @@ void free_unref_page_list(struct list_head *list)
                set_page_private(page, 0);
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn);
+
+               /*
+                * Guard against excessive IRQ disabled times when we get
+                * a large list of pages to free.
+                */
+               if (++batch_count == SWAP_CLUSTER_MAX) {
+                       local_irq_restore(flags);
+                       batch_count = 0;
+                       local_irq_save(flags);
+               }
        }
        local_irq_restore(flags);
 }
@@ -5382,15 +5398,20 @@ not_early:
                 * can be created for invalid pages (for alignment)
                 * check here not to call set_pageblock_migratetype() against
                 * pfn out of zone.
+                *
+                * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
+                * because this is done early in sparse_add_one_section
                 */
                if (!(pfn & (pageblock_nr_pages - 1))) {
                        struct page *page = pfn_to_page(pfn);
 
-                       __init_single_page(page, pfn, zone, nid);
+                       __init_single_page(page, pfn, zone, nid,
+                                       context != MEMMAP_HOTPLUG);
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                        cond_resched();
                } else {
-                       __init_single_pfn(pfn, zone, nid);
+                       __init_single_pfn(pfn, zone, nid,
+                                       context != MEMMAP_HOTPLUG);
                }
        }
 }
@@ -6249,6 +6270,8 @@ void __paginginit zero_resv_unavail(void)
        pgcnt = 0;
        for_each_resv_unavail_range(i, &start, &end) {
                for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
+                       if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
+                               continue;
                        mm_zero_struct_page(pfn_to_page(pfn));
                        pgcnt++;
                }