2 * NUMA support, based on the x86 implementation.
4 * Copyright (C) 2015 Cavium Inc.
5 * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #define pr_fmt(fmt) "NUMA: " fmt
22 #include <linux/acpi.h>
23 #include <linux/memblock.h>
24 #include <linux/module.h>
28 #include <asm/sections.h>
30 struct pglist_data
*node_data
[MAX_NUMNODES
] __read_mostly
;
31 EXPORT_SYMBOL(node_data
);
32 nodemask_t numa_nodes_parsed __initdata
;
33 static int cpu_to_node_map
[NR_CPUS
] = { [0 ... NR_CPUS
-1] = NUMA_NO_NODE
};
35 static int numa_distance_cnt
;
36 static u8
*numa_distance
;
39 static __init
int numa_parse_early_param(char *opt
)
43 if (!strncmp(opt
, "off", 3))
48 early_param("numa", numa_parse_early_param
);
50 cpumask_var_t node_to_cpumask_map
[MAX_NUMNODES
];
51 EXPORT_SYMBOL(node_to_cpumask_map
);
53 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
56 * Returns a pointer to the bitmask of CPUs on Node 'node'.
58 const struct cpumask
*cpumask_of_node(int node
)
60 if (WARN_ON(node
>= nr_node_ids
))
63 if (WARN_ON(node_to_cpumask_map
[node
] == NULL
))
64 return cpu_online_mask
;
66 return node_to_cpumask_map
[node
];
68 EXPORT_SYMBOL(cpumask_of_node
);
72 static void numa_update_cpu(unsigned int cpu
, bool remove
)
74 int nid
= cpu_to_node(cpu
);
76 if (nid
== NUMA_NO_NODE
)
80 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[nid
]);
82 cpumask_set_cpu(cpu
, node_to_cpumask_map
[nid
]);
85 void numa_add_cpu(unsigned int cpu
)
87 numa_update_cpu(cpu
, false);
90 void numa_remove_cpu(unsigned int cpu
)
92 numa_update_cpu(cpu
, true);
95 void numa_clear_node(unsigned int cpu
)
98 set_cpu_numa_node(cpu
, NUMA_NO_NODE
);
102 * Allocate node_to_cpumask_map based on number of available nodes
103 * Requires node_possible_map to be valid.
105 * Note: cpumask_of_node() is not valid until after this is done.
106 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
108 static void __init
setup_node_to_cpumask_map(void)
112 /* setup nr_node_ids if not done yet */
113 if (nr_node_ids
== MAX_NUMNODES
)
116 /* allocate and clear the mapping */
117 for (node
= 0; node
< nr_node_ids
; node
++) {
118 alloc_bootmem_cpumask_var(&node_to_cpumask_map
[node
]);
119 cpumask_clear(node_to_cpumask_map
[node
]);
122 /* cpumask_of_node() will now work */
123 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids
);
127 * Set the cpu to node and mem mapping
129 void numa_store_cpu_info(unsigned int cpu
)
131 set_cpu_numa_node(cpu
, cpu_to_node_map
[cpu
]);
134 void __init
early_map_cpu_to_node(unsigned int cpu
, int nid
)
136 /* fallback to node 0 */
137 if (nid
< 0 || nid
>= MAX_NUMNODES
|| numa_off
)
140 cpu_to_node_map
[cpu
] = nid
;
143 * We should set the numa node of cpu0 as soon as possible, because it
144 * has already been set up online before. cpu_to_node(0) will soon be
148 set_cpu_numa_node(cpu
, nid
);
151 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
152 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
153 EXPORT_SYMBOL(__per_cpu_offset
);
155 static int __init
early_cpu_to_node(int cpu
)
157 return cpu_to_node_map
[cpu
];
160 static int __init
pcpu_cpu_distance(unsigned int from
, unsigned int to
)
162 return node_distance(early_cpu_to_node(from
), early_cpu_to_node(to
));
165 static void * __init
pcpu_fc_alloc(unsigned int cpu
, size_t size
,
168 int nid
= early_cpu_to_node(cpu
);
170 return memblock_alloc_try_nid(size
, align
,
171 __pa(MAX_DMA_ADDRESS
), MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
174 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
176 memblock_free_early(__pa(ptr
), size
);
179 void __init
setup_per_cpu_areas(void)
186 * Always reserve area for module percpu variables. That's
187 * what the legacy allocator did.
189 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
190 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
,
192 pcpu_fc_alloc
, pcpu_fc_free
);
194 panic("Failed to initialize percpu areas.");
196 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
197 for_each_possible_cpu(cpu
)
198 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
203 * numa_add_memblk() - Set node id to memblk
204 * @nid: NUMA node ID of the new memblk
205 * @start: Start address of the new memblk
206 * @end: End address of the new memblk
209 * 0 on success, -errno on failure.
211 int __init
numa_add_memblk(int nid
, u64 start
, u64 end
)
215 ret
= memblock_set_node(start
, (end
- start
), &memblock
.memory
, nid
);
217 pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
218 start
, (end
- 1), nid
);
222 node_set(nid
, numa_nodes_parsed
);
227 * Initialize NODE_DATA for a node on the local memory
229 static void __init
setup_node_data(int nid
, u64 start_pfn
, u64 end_pfn
)
231 const size_t nd_size
= roundup(sizeof(pg_data_t
), SMP_CACHE_BYTES
);
236 if (start_pfn
>= end_pfn
)
237 pr_info("Initmem setup node %d [<memory-less node>]\n", nid
);
239 nd_pa
= memblock_phys_alloc_try_nid(nd_size
, SMP_CACHE_BYTES
, nid
);
241 panic("Cannot allocate %zu bytes for node %d data\n",
246 /* report and initialize */
247 pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
248 nd_pa
, nd_pa
+ nd_size
- 1);
249 tnid
= early_pfn_to_nid(nd_pa
>> PAGE_SHIFT
);
251 pr_info("NODE_DATA(%d) on node %d\n", nid
, tnid
);
254 memset(NODE_DATA(nid
), 0, sizeof(pg_data_t
));
255 NODE_DATA(nid
)->node_id
= nid
;
256 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
257 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
263 * The current table is freed.
265 void __init
numa_free_distance(void)
272 size
= numa_distance_cnt
* numa_distance_cnt
*
273 sizeof(numa_distance
[0]);
275 memblock_free(__pa(numa_distance
), size
);
276 numa_distance_cnt
= 0;
277 numa_distance
= NULL
;
281 * Create a new NUMA distance table.
283 static int __init
numa_alloc_distance(void)
289 size
= nr_node_ids
* nr_node_ids
* sizeof(numa_distance
[0]);
290 phys
= memblock_find_in_range(0, PFN_PHYS(max_pfn
),
295 memblock_reserve(phys
, size
);
297 numa_distance
= __va(phys
);
298 numa_distance_cnt
= nr_node_ids
;
300 /* fill with the default distances */
301 for (i
= 0; i
< numa_distance_cnt
; i
++)
302 for (j
= 0; j
< numa_distance_cnt
; j
++)
303 numa_distance
[i
* numa_distance_cnt
+ j
] = i
== j
?
304 LOCAL_DISTANCE
: REMOTE_DISTANCE
;
306 pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt
);
312 * numa_set_distance() - Set inter node NUMA distance from node to node.
313 * @from: the 'from' node to set distance
314 * @to: the 'to' node to set distance
315 * @distance: NUMA distance
317 * Set the distance from node @from to @to to @distance.
318 * If distance table doesn't exist, a warning is printed.
320 * If @from or @to is higher than the highest known node or lower than zero
321 * or @distance doesn't make sense, the call is ignored.
323 void __init
numa_set_distance(int from
, int to
, int distance
)
325 if (!numa_distance
) {
326 pr_warn_once("Warning: distance table not allocated yet\n");
330 if (from
>= numa_distance_cnt
|| to
>= numa_distance_cnt
||
331 from
< 0 || to
< 0) {
332 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
337 if ((u8
)distance
!= distance
||
338 (from
== to
&& distance
!= LOCAL_DISTANCE
)) {
339 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
344 numa_distance
[from
* numa_distance_cnt
+ to
] = distance
;
348 * Return NUMA distance @from to @to
350 int __node_distance(int from
, int to
)
352 if (from
>= numa_distance_cnt
|| to
>= numa_distance_cnt
)
353 return from
== to
? LOCAL_DISTANCE
: REMOTE_DISTANCE
;
354 return numa_distance
[from
* numa_distance_cnt
+ to
];
356 EXPORT_SYMBOL(__node_distance
);
358 static int __init
numa_register_nodes(void)
361 struct memblock_region
*mblk
;
363 /* Check that valid nid is set to memblks */
364 for_each_memblock(memory
, mblk
)
365 if (mblk
->nid
== NUMA_NO_NODE
|| mblk
->nid
>= MAX_NUMNODES
) {
366 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
367 mblk
->nid
, mblk
->base
,
368 mblk
->base
+ mblk
->size
- 1);
372 /* Finally register nodes. */
373 for_each_node_mask(nid
, numa_nodes_parsed
) {
374 unsigned long start_pfn
, end_pfn
;
376 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
377 setup_node_data(nid
, start_pfn
, end_pfn
);
378 node_set_online(nid
);
381 /* Setup online nodes to actual nodes*/
382 node_possible_map
= numa_nodes_parsed
;
387 static int __init
numa_init(int (*init_func
)(void))
391 nodes_clear(numa_nodes_parsed
);
392 nodes_clear(node_possible_map
);
393 nodes_clear(node_online_map
);
395 ret
= numa_alloc_distance();
401 goto out_free_distance
;
403 if (nodes_empty(numa_nodes_parsed
)) {
404 pr_info("No NUMA configuration found\n");
406 goto out_free_distance
;
409 ret
= numa_register_nodes();
411 goto out_free_distance
;
413 setup_node_to_cpumask_map();
417 numa_free_distance();
422 * dummy_numa_init() - Fallback dummy NUMA init
424 * Used if there's no underlying NUMA architecture, NUMA initialization
425 * fails, or NUMA is disabled on the command line.
427 * Must online at least one node (node 0) and add memory blocks that cover all
428 * allowed memory. It is unlikely that this function fails.
430 * Return: 0 on success, -errno on failure.
432 static int __init
dummy_numa_init(void)
435 struct memblock_region
*mblk
;
438 pr_info("NUMA disabled\n"); /* Forced off on command line. */
439 pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
440 memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
442 for_each_memblock(memory
, mblk
) {
443 ret
= numa_add_memblk(0, mblk
->base
, mblk
->base
+ mblk
->size
);
447 pr_err("NUMA init failed\n");
456 * arm64_numa_init() - Initialize NUMA
458 * Try each configured NUMA initialization method until one succeeds. The
459 * last fallback is dummy single node config encomapssing whole memory.
461 void __init
arm64_numa_init(void)
464 if (!acpi_disabled
&& !numa_init(arm64_acpi_numa_init
))
466 if (acpi_disabled
&& !numa_init(of_numa_init
))
470 numa_init(dummy_numa_init
);
474 * We hope that we will be hotplugging memory on nodes we already know about,
475 * such that acpi_get_node() succeeds and we never fall back to this...
477 int memory_add_physaddr_to_nid(u64 addr
)
479 pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr
);