]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
mm: make deferred init's max threads arch-specific
authorDaniel Jordan <daniel.m.jordan@oracle.com>
Wed, 3 Jun 2020 22:59:55 +0000 (15:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:45 +0000 (20:09 -0700)
Using padata during deferred init has only been tested on x86, so for now
limit it to this architecture.

If another arch wants this, it can find the max thread limit that's best
for it and override deferred_page_init_max_threads().

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Josh Triplett <josh@joshtriplett.org>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Robert Elliott <elliott@hpe.com>
Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Link: http://lkml.kernel.org/r/20200527173608.2885243-8-daniel.m.jordan@oracle.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init_64.c
include/linux/memblock.h
mm/page_alloc.c

index 96274a90c5ff7ea8ba20a8787a6fb86b1db43a1b..e08f1007f776170fb5a7d6efbf3da24b27447b47 100644 (file)
@@ -1265,6 +1265,18 @@ void __init mem_init(void)
        mem_init_print_info(NULL);
 }
 
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask)
+{
+       /*
+        * More CPUs always led to greater speedups on tested systems, up to
+        * all the nodes' CPUs.  Use all since the system is otherwise idle
+        * now.
+        */
+       return max_t(int, cpumask_weight(node_cpumask), 1);
+}
+#endif
+
 int kernel_set_to_readonly;
 
 void mark_rodata_ro(void)
index 45abfc54da37f9c7f5d98b1db57cc991195a2989..807ab9daf0cd2c3b95a8dd445ea0ee41a557dabf 100644 (file)
@@ -273,6 +273,9 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
        for (; i != U64_MAX;                                      \
             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+
+int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
+
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 /**
index 27ec5dc4db33676bb34d93ee2d97c10d100959e0..fb9dec1c19768389024769c4b36760c6ea5eef11 100644 (file)
@@ -1836,6 +1836,13 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
        }
 }
 
+/* An arch may override for more concurrency. */
+__weak int __init
+deferred_page_init_max_threads(const struct cpumask *node_cpumask)
+{
+       return 1;
+}
+
 /* Initialise remaining memory on a node */
 static int __init deferred_init_memmap(void *data)
 {
@@ -1884,11 +1891,7 @@ static int __init deferred_init_memmap(void *data)
                                                 first_init_pfn))
                goto zone_empty;
 
-       /*
-        * More CPUs always led to greater speedups on tested systems, up to
-        * all the nodes' CPUs.  Use all since the system is otherwise idle now.
-        */
-       max_threads = max(cpumask_weight(cpumask), 1u);
+       max_threads = deferred_page_init_max_threads(cpumask);
 
        while (spfn < epfn) {
                unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);