]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/page_alloc.c
drm/i915: Save the old CDCLK atomic state
[mirror_ubuntu-bionic-kernel.git] / mm / page_alloc.c
index 76c9688b6a0a75fc1c28e90920a8c9498c5e6d06..326c63b153133c863dec1c83068bb7ffe9f9fb80 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/stop_machine.h>
 #include <linux/sort.h>
 #include <linux/pfn.h>
+#include <xen/xen.h>
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
+       /* Xen PV domains need page structures early */
+       if (xen_pv_domain())
+               return true;
        (*nr_initialised)++;
        if ((*nr_initialised > pgdat->static_init_pgcnt) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
@@ -1177,9 +1181,10 @@ static void free_one_page(struct zone *zone,
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-                               unsigned long zone, int nid)
+                               unsigned long zone, int nid, bool zero)
 {
-       mm_zero_struct_page(page);
+       if (zero)
+               mm_zero_struct_page(page);
        set_page_links(page, zone, nid, pfn);
        init_page_count(page);
        page_mapcount_reset(page);
@@ -1194,9 +1199,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
 }
 
 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
-                                       int nid)
+                                       int nid, bool zero)
 {
-       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
+       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
 }
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1217,7 +1222,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
                if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
                        break;
        }
-       __init_single_pfn(pfn, zid, nid);
+       __init_single_pfn(pfn, zid, nid, true);
 }
 #else
 static inline void init_reserved_page(unsigned long pfn)
@@ -1514,7 +1519,7 @@ static unsigned long __init deferred_init_range(int nid, int zid,
                                        page++;
                                else
                                        page = pfn_to_page(pfn);
-                               __init_single_page(page, pfn, zid, nid);
+                               __init_single_page(page, pfn, zid, nid, true);
                                cond_resched();
                        }
                }
@@ -3583,7 +3588,7 @@ static bool __need_fs_reclaim(gfp_t gfp_mask)
                return false;
 
        /* this guy won't enter reclaim */
-       if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+       if (current->flags & PF_MEMALLOC)
                return false;
 
        /* We're only interested __GFP_FS allocations for now */
@@ -4030,7 +4035,6 @@ retry:
         * orientated.
         */
        if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
-               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
                ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
                                        ac->high_zoneidx, ac->nodemask);
        }
@@ -5343,17 +5347,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                if (context != MEMMAP_EARLY)
                        goto not_early;
 
-               if (!early_pfn_valid(pfn)) {
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-                       /*
-                        * Skip to the pfn preceding the next valid one (or
-                        * end_pfn), such that we hit a valid pfn (or end_pfn)
-                        * on our next iteration of the loop.
-                        */
-                       pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
-#endif
+               if (!early_pfn_valid(pfn))
                        continue;
-               }
                if (!early_pfn_in_nid(pfn, nid))
                        continue;
                if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
@@ -5393,15 +5388,20 @@ not_early:
                 * can be created for invalid pages (for alignment)
                 * check here not to call set_pageblock_migratetype() against
                 * pfn out of zone.
+                *
+                * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
+                * because this is done early in sparse_add_one_section
                 */
                if (!(pfn & (pageblock_nr_pages - 1))) {
                        struct page *page = pfn_to_page(pfn);
 
-                       __init_single_page(page, pfn, zone, nid);
+                       __init_single_page(page, pfn, zone, nid,
+                                       context != MEMMAP_HOTPLUG);
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                        cond_resched();
                } else {
-                       __init_single_pfn(pfn, zone, nid);
+                       __init_single_pfn(pfn, zone, nid,
+                                       context != MEMMAP_HOTPLUG);
                }
        }
 }
@@ -6239,7 +6239,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        free_area_init_core(pgdat);
 }
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 /*
  * Only struct pages that are backed by physical memory are zeroed and
  * initialized by going through __init_single_page(). But, there are some
@@ -6277,7 +6277,7 @@ void __paginginit zero_resv_unavail(void)
        if (pgcnt)
                pr_info("Reserved but unavailable: %lld pages", pgcnt);
 }
-#endif /* CONFIG_HAVE_MEMBLOCK */
+#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 
@@ -6692,6 +6692,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
+       zero_resv_unavail();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                free_area_init_node(nid, NULL,
@@ -6702,7 +6703,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                        node_set_state(nid, N_MEMORY);
                check_for_memory(pgdat, nid);
        }
-       zero_resv_unavail();
 }
 
 static int __init cmdline_parse_core(char *p, unsigned long *core)
@@ -6770,9 +6770,21 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
        start = (void *)PAGE_ALIGN((unsigned long)start);
        end = (void *)((unsigned long)end & PAGE_MASK);
        for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
+               struct page *page = virt_to_page(pos);
+               void *direct_map_addr;
+
+               /*
+                * 'direct_map_addr' might be different from 'pos'
+                * because some architectures' virt_to_page()
+                * work with aliases.  Getting the direct map
+                * address ensures that we get a _writeable_
+                * alias for the memset().
+                */
+               direct_map_addr = page_address(page);
                if ((unsigned int)poison <= 0xFF)
-                       memset(pos, poison, PAGE_SIZE);
-               free_reserved_page(virt_to_page(pos));
+                       memset(direct_map_addr, poison, PAGE_SIZE);
+
+               free_reserved_page(page);
        }
 
        if (pages && s)
@@ -6864,9 +6876,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 
 void __init free_area_init(unsigned long *zones_size)
 {
+       zero_resv_unavail();
        free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
-       zero_resv_unavail();
 }
 
 static int page_alloc_cpu_dead(unsigned int cpu)