]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
mm, sparse: drop pgdat_resize_lock in sparse_add/remove_one_section()
authorWei Yang <richard.weiyang@gmail.com>
Tue, 28 Jan 2020 09:49:59 +0000 (10:49 +0100)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 6 Mar 2020 07:13:20 +0000 (02:13 -0500)
BugLink: https://bugs.launchpad.net/bugs/1864261
commit 83af658898cb292a32d8b6cd9b51266d7cfc4b6a upstream.

pgdat_resize_lock is used to protect pgdat's memory region information
like: node_start_pfn, node_present_pages, etc.  While in function
sparse_add/remove_one_section(), pgdat_resize_lock is used to protect
initialization/release of one mem_section.  This looks not proper.

These code paths are currently protected by mem_hotplug_lock currently but
should there ever be any reason for locking at the sparse layer a
dedicated lock should be introduced.

Following is the current call trace of sparse_add/remove_one_section()

    mem_hotplug_begin()
    arch_add_memory()
       add_pages()
           __add_pages()
               __add_section()
                   sparse_add_one_section()
    mem_hotplug_done()

    mem_hotplug_begin()
    arch_remove_memory()
        __remove_pages()
            __remove_section()
                sparse_remove_one_section()
    mem_hotplug_done()

The comment above the pgdat_resize_lock also mentions "Holding this will
also guarantee that any pfn_valid() stays that way.", which is true with
the current implementation and false after this patch.  But current
implementation doesn't meet this comment.  There isn't any pfn walkers to
take the lock so this looks like a relict from the past.  This patch also
removes this comment.

[richard.weiyang@gmail.com: v4]
Link: http://lkml.kernel.org/r/20181204085657.20472-1-richard.weiyang@gmail.com
[mhocko@suse.com: changelog suggestion]
Link: http://lkml.kernel.org/r/20181128091243.19249-1-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
include/linux/mmzone.h
mm/sparse.c

index 953af0232023fff4c357500894e3bde31d7a8769..9df03d1afcbf0aaeced6ebf9ffd3fc54289e7da6 100644 (file)
@@ -637,8 +637,7 @@ typedef struct pglist_data {
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Must be held any time you expect node_start_pfn, node_present_pages
-        * or node_spanned_pages stay constant.  Holding this will also
-        * guarantee that any pfn_valid() stays that way.
+        * or node_spanned_pages stay constant.
         *
         * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
         * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
index f4b9087b2d4b9caa051b10ee9cf7fbd1d4159471..02d03325767da1d58f7d09b35b67c47cffdc6f80 100644 (file)
@@ -779,7 +779,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
        struct mem_section *ms;
        struct page *memmap;
        unsigned long *usemap;
-       unsigned long flags;
        int ret;
 
        /*
@@ -798,8 +797,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
                return -ENOMEM;
        }
 
-       pgdat_resize_lock(pgdat, &flags);
-
        ms = __pfn_to_section(start_pfn);
        if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
                ret = -EEXIST;
@@ -813,7 +810,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
        ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
 
 out:
-       pgdat_resize_unlock(pgdat, &flags);
        if (ret <= 0) {
                kfree(usemap);
                __kfree_section_memmap(memmap);
@@ -874,10 +870,8 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset)
 {
        struct page *memmap = NULL;
-       unsigned long *usemap = NULL, flags;
-       struct pglist_data *pgdat = zone->zone_pgdat;
+       unsigned long *usemap = NULL;
 
-       pgdat_resize_lock(pgdat, &flags);
        if (ms->section_mem_map) {
                usemap = ms->pageblock_flags;
                memmap = sparse_decode_mem_map(ms->section_mem_map,
@@ -885,7 +879,6 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                ms->section_mem_map = 0;
                ms->pageblock_flags = NULL;
        }
-       pgdat_resize_unlock(pgdat, &flags);
 
        clear_hwpoisoned_pages(memmap + map_offset,
                        PAGES_PER_SECTION - map_offset);