]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
mm/memory_hotplug: don't access uninitialized memmaps in shrink_pgdat_span()
authorDavid Hildenbrand <david@redhat.com>
Sat, 19 Oct 2019 03:19:33 +0000 (20:19 -0700)
committerKhalid Elmously <khalid.elmously@canonical.com>
Thu, 28 Nov 2019 04:59:27 +0000 (23:59 -0500)
BugLink: https://bugs.launchpad.net/bugs/1854216
commit 00d6c019b5bc175cee3770e0e659f2b5f4804ea5 upstream.

We might use the nid of memmaps that were never initialized.  For
example, if the memmap was poisoned, we will crash the kernel in
pfn_to_nid() right now.  Let's use the calculated boundaries of the
separate zones instead.  This now also avoids having to iterate over a
whole bunch of subsections again, after shrinking one zone.

Before commit d0dc12e86b31 ("mm/memory_hotplug: optimize memory
hotplug"), the memmap was initialized to 0 and the node was set to the
right value.  After that commit, the node might be garbage.

We'll have to fix shrink_zone_span() next.

Link: http://lkml.kernel.org/r/20191006085646.5768-4-david@redhat.com
Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online") [d0dc12e86b319]
Signed-off-by: David Hildenbrand <david@redhat.com>
Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Damian Tometzki <damian.tometzki@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Halil Pasic <pasic@linux.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jun Yao <yaojun8558363@gmail.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Pankaj Gupta <pagupta@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qian Cai <cai@lca.pw>
Cc: Rich Felker <dalias@libc.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Yu Zhao <yuzhao@google.com>
Cc: <stable@vger.kernel.org> [4.13+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
mm/memory_hotplug.c

index 1a34adb0fe59bc3d5b8382cb45756845fd037ea9..a2466fc1534e561a510f5fc24a2aaf28795c629e 100644 (file)
@@ -461,70 +461,25 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
        zone_span_writeunlock(zone);
 }
 
-static void shrink_pgdat_span(struct pglist_data *pgdat,
-                             unsigned long start_pfn, unsigned long end_pfn)
+static void update_pgdat_span(struct pglist_data *pgdat)
 {
-       unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
-       unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
-       unsigned long pgdat_end_pfn = p;
-       unsigned long pfn;
-       struct mem_section *ms;
-       int nid = pgdat->node_id;
-
-       if (pgdat_start_pfn == start_pfn) {
-               /*
-                * If the section is smallest section in the pgdat, it need
-                * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
-                * In this case, we find second smallest valid mem_section
-                * for shrinking zone.
-                */
-               pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
-                                               pgdat_end_pfn);
-               if (pfn) {
-                       pgdat->node_start_pfn = pfn;
-                       pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
-               }
-       } else if (pgdat_end_pfn == end_pfn) {
-               /*
-                * If the section is biggest section in the pgdat, it need
-                * shrink pgdat->node_spanned_pages.
-                * In this case, we find second biggest valid mem_section for
-                * shrinking zone.
-                */
-               pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
-                                              start_pfn);
-               if (pfn)
-                       pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
-       }
-
-       /*
-        * If the section is not biggest or smallest mem_section in the pgdat,
-        * it only creates a hole in the pgdat. So in this case, we need not
-        * change the pgdat.
-        * But perhaps, the pgdat has only hole data. Thus it check the pgdat
-        * has only hole or not.
-        */
-       pfn = pgdat_start_pfn;
-       for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
-               ms = __pfn_to_section(pfn);
-
-               if (unlikely(!valid_section(ms)))
-                       continue;
-
-               if (pfn_to_nid(pfn) != nid)
-                       continue;
+       unsigned long node_start_pfn = 0, node_end_pfn = 0;
+       struct zone *zone;
 
-                /* If the section is current section, it continues the loop */
-               if (start_pfn == pfn)
-                       continue;
+       for (zone = pgdat->node_zones;
+            zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
+               unsigned long zone_end_pfn = zone->zone_start_pfn +
+                                            zone->spanned_pages;
 
-               /* If we find valid section, we have nothing to do */
-               return;
+               /* No need to lock the zones, they can't change. */
+               if (zone_end_pfn > node_end_pfn)
+                       node_end_pfn = zone_end_pfn;
+               if (zone->zone_start_pfn < node_start_pfn)
+                       node_start_pfn = zone->zone_start_pfn;
        }
 
-       /* The pgdat has no valid section */
-       pgdat->node_start_pfn = 0;
-       pgdat->node_spanned_pages = 0;
+       pgdat->node_start_pfn = node_start_pfn;
+       pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
 }
 
 static void __remove_zone(struct zone *zone, unsigned long start_pfn)
@@ -535,7 +490,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
 
        pgdat_resize_lock(zone->zone_pgdat, &flags);
        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
-       shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
+       update_pgdat_span(pgdat);
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
 }