]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
arm64/numa: support HAVE_SETUP_PER_CPU_AREA
authorZhen Lei <thunder.leizhen@huawei.com>
Thu, 1 Sep 2016 06:55:00 +0000 (14:55 +0800)
committerWill Deacon <will.deacon@arm.com>
Fri, 9 Sep 2016 13:59:09 +0000 (14:59 +0100)
To make each percpu area allocated from its local numa node. Without this
patch, all percpu areas will be allocated from the node which cpu0 belongs
to.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/Kconfig
arch/arm64/mm/numa.c

index e072033b27b20e894c6a4a5152c3171dea5e964e..0e11c8a2aec1bb9190597f507f19552cb8a9ba6f 100644 (file)
@@ -600,6 +600,14 @@ config USE_PERCPU_NUMA_NODE_ID
        def_bool y
        depends on NUMA
 
+config HAVE_SETUP_PER_CPU_AREA
+       def_bool y
+       depends on NUMA
+
+config NEED_PER_CPU_EMBED_FIRST_CHUNK
+       def_bool y
+       depends on NUMA
+
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
index 0e75b537de0cce405124ca8d124630cfa5d9342f..087064d5dcc1ba649dc567ca010b69c7053149b0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/of.h>
 
 #include <asm/acpi.h>
+#include <asm/sections.h>
 
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
@@ -131,6 +132,57 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
        cpu_to_node_map[cpu] = nid;
 }
 
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init early_cpu_to_node(int cpu)
+{
+       return cpu_to_node_map[cpu];
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+       return node_distance(from, to);
+}
+
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
+                                      size_t align)
+{
+       int nid = early_cpu_to_node(cpu);
+
+       return  memblock_virt_alloc_try_nid(size, align,
+                       __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static void __init pcpu_fc_free(void *ptr, size_t size)
+{
+       memblock_free_early(__pa(ptr), size);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+       unsigned long delta;
+       unsigned int cpu;
+       int rc;
+
+       /*
+        * Always reserve area for module percpu variables.  That's
+        * what the legacy allocator did.
+        */
+       rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+                                   PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+                                   pcpu_cpu_distance,
+                                   pcpu_fc_alloc, pcpu_fc_free);
+       if (rc < 0)
+               panic("Failed to initialize percpu areas.");
+
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu)
+               __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
 /**
  * numa_add_memblk - Set node id to memblk
  * @nid: NUMA node ID of the new memblk