]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm/page_alloc: refactor memmap_init_zone_device() page init
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 14 Jan 2022 22:04:18 +0000 (14:04 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Jan 2022 14:30:25 +0000 (16:30 +0200)
Move struct page init to an helper function __init_zone_device_page().

This is in preparation for sharing the storage for compound page
metadata.

Link: https://lkml.kernel.org/r/20211202204422.26777-4-joao.m.martins@oracle.com
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 20b9db0cf97ce6994a6bb6cc2a27595ca93dc6d8..23045a2a1339b62c2ff6d1f8b0204bd1edb02a3d 100644 (file)
@@ -6572,6 +6572,46 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
 }
 
 #ifdef CONFIG_ZONE_DEVICE
+static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
+                                         unsigned long zone_idx, int nid,
+                                         struct dev_pagemap *pgmap)
+{
+
+       __init_single_page(page, pfn, zone_idx, nid);
+
+       /*
+        * Mark page reserved as it will need to wait for onlining
+        * phase for it to be fully associated with a zone.
+        *
+        * We can use the non-atomic __set_bit operation for setting
+        * the flag as we are still initializing the pages.
+        */
+       __SetPageReserved(page);
+
+       /*
+        * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
+        * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
+        * ever freed or placed on a driver-private list.
+        */
+       page->pgmap = pgmap;
+       page->zone_device_data = NULL;
+
+       /*
+        * Mark the block movable so that blocks are reserved for
+        * movable at startup. This will force kernel allocations
+        * to reserve their blocks rather than leaking throughout
+        * the address space during boot when many long-lived
+        * kernel allocations are made.
+        *
+        * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
+        * because this is done early in section_activate()
+        */
+       if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
+               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               cond_resched();
+       }
+}
+
 void __ref memmap_init_zone_device(struct zone *zone,
                                   unsigned long start_pfn,
                                   unsigned long nr_pages,
@@ -6600,39 +6640,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                struct page *page = pfn_to_page(pfn);
 
-               __init_single_page(page, pfn, zone_idx, nid);
-
-               /*
-                * Mark page reserved as it will need to wait for onlining
-                * phase for it to be fully associated with a zone.
-                *
-                * We can use the non-atomic __set_bit operation for setting
-                * the flag as we are still initializing the pages.
-                */
-               __SetPageReserved(page);
-
-               /*
-                * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
-                * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
-                * ever freed or placed on a driver-private list.
-                */
-               page->pgmap = pgmap;
-               page->zone_device_data = NULL;
-
-               /*
-                * Mark the block movable so that blocks are reserved for
-                * movable at startup. This will force kernel allocations
-                * to reserve their blocks rather than leaking throughout
-                * the address space during boot when many long-lived
-                * kernel allocations are made.
-                *
-                * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
-                * because this is done early in section_activate()
-                */
-               if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
-                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-                       cond_resched();
-               }
+               __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
        }
 
        pr_info("%s initialised %lu pages in %ums\n", __func__,