]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
71ee73e7 | 2 | /* Common code for 32 and 64-bit NUMA */ |
e84025e2 | 3 | #include <linux/acpi.h> |
a4106eae TH |
4 | #include <linux/kernel.h> |
5 | #include <linux/mm.h> | |
6 | #include <linux/string.h> | |
7 | #include <linux/init.h> | |
a4106eae TH |
8 | #include <linux/memblock.h> |
9 | #include <linux/mmzone.h> | |
10 | #include <linux/ctype.h> | |
a4106eae TH |
11 | #include <linux/nodemask.h> |
12 | #include <linux/sched.h> | |
13 | #include <linux/topology.h> | |
14 | ||
66441bd3 | 15 | #include <asm/e820/api.h> |
a4106eae TH |
16 | #include <asm/proto.h> |
17 | #include <asm/dma.h> | |
a4106eae TH |
18 | #include <asm/amd_nb.h> |
19 | ||
20 | #include "numa_internal.h" | |
90321602 | 21 | |
aec03f89 | 22 | int numa_off; |
e6df595b | 23 | nodemask_t numa_nodes_parsed __initdata; |
90321602 | 24 | |
a4106eae TH |
25 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
26 | EXPORT_SYMBOL(node_data); | |
27 | ||
1e5d8e1e | 28 | static struct numa_meminfo numa_meminfo __initdata_or_meminfo; |
5d30f92e | 29 | static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo; |
a4106eae TH |
30 | |
31 | static int numa_distance_cnt; | |
32 | static u8 *numa_distance; | |
a4106eae | 33 | |
90321602 JB |
34 | static __init int numa_setup(char *opt) |
35 | { | |
36 | if (!opt) | |
37 | return -EINVAL; | |
38 | if (!strncmp(opt, "off", 3)) | |
39 | numa_off = 1; | |
90321602 | 40 | if (!strncmp(opt, "fake=", 5)) |
2dd57d34 | 41 | return numa_emu_cmdline(opt + 5); |
90321602 | 42 | if (!strncmp(opt, "noacpi", 6)) |
2dd57d34 | 43 | disable_srat(); |
3b0d3101 DW |
44 | if (!strncmp(opt, "nohmat", 6)) |
45 | disable_hmat(); | |
90321602 JB |
46 | return 0; |
47 | } | |
48 | early_param("numa", numa_setup); | |
71ee73e7 | 49 | |
71ee73e7 | 50 | /* |
bbc9e2f4 | 51 | * apicid, cpu, node mappings |
71ee73e7 | 52 | */ |
c4c60524 | 53 | s16 __apicid_to_node[MAX_LOCAL_APIC] = { |
bbc9e2f4 TH |
54 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
55 | }; | |
56 | ||
148f9bb8 | 57 | int numa_cpu_node(int cpu) |
6bd26273 TH |
58 | { |
59 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
60 | ||
61 | if (apicid != BAD_APICID) | |
62 | return __apicid_to_node[apicid]; | |
63 | return NUMA_NO_NODE; | |
64 | } | |
65 | ||
c032ef60 | 66 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
71ee73e7 RR |
67 | EXPORT_SYMBOL(node_to_cpumask_map); |
68 | ||
645a7919 TH |
69 | /* |
70 | * Map cpu index to node index | |
71 | */ | |
645a7919 | 72 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
645a7919 TH |
73 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
74 | ||
e13fe869 | 75 | void numa_set_node(int cpu, int node) |
645a7919 TH |
76 | { |
77 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
78 | ||
79 | /* early setting, no percpu area yet */ | |
80 | if (cpu_to_node_map) { | |
81 | cpu_to_node_map[cpu] = node; | |
82 | return; | |
83 | } | |
84 | ||
85 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | |
86 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | |
87 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
88 | dump_stack(); | |
89 | return; | |
90 | } | |
91 | #endif | |
92 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
93 | ||
942670d0 | 94 | set_cpu_numa_node(cpu, node); |
645a7919 TH |
95 | } |
96 | ||
e13fe869 | 97 | void numa_clear_node(int cpu) |
645a7919 TH |
98 | { |
99 | numa_set_node(cpu, NUMA_NO_NODE); | |
100 | } | |
101 | ||
71ee73e7 RR |
102 | /* |
103 | * Allocate node_to_cpumask_map based on number of available nodes | |
104 | * Requires node_possible_map to be valid. | |
105 | * | |
9512938b | 106 | * Note: cpumask_of_node() is not valid until after this is done. |
71ee73e7 RR |
107 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) |
108 | */ | |
109 | void __init setup_node_to_cpumask_map(void) | |
110 | { | |
d2ad351e | 111 | unsigned int node; |
71ee73e7 RR |
112 | |
113 | /* setup nr_node_ids if not done yet */ | |
d2ad351e CS |
114 | if (nr_node_ids == MAX_NUMNODES) |
115 | setup_nr_node_ids(); | |
71ee73e7 RR |
116 | |
117 | /* allocate the map */ | |
c032ef60 RR |
118 | for (node = 0; node < nr_node_ids; node++) |
119 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
71ee73e7 | 120 | |
c032ef60 | 121 | /* cpumask_of_node() will now work */ |
b9726c26 | 122 | pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); |
71ee73e7 RR |
123 | } |
124 | ||
a4106eae TH |
125 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, |
126 | struct numa_meminfo *mi) | |
127 | { | |
128 | /* ignore zero length blks */ | |
129 | if (start == end) | |
130 | return 0; | |
131 | ||
132 | /* whine about and ignore invalid blks */ | |
133 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | |
1de392f5 JP |
134 | pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", |
135 | nid, start, end - 1); | |
a4106eae TH |
136 | return 0; |
137 | } | |
138 | ||
139 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | |
1de392f5 | 140 | pr_err("too many memblk ranges\n"); |
a4106eae TH |
141 | return -EINVAL; |
142 | } | |
143 | ||
144 | mi->blk[mi->nr_blks].start = start; | |
145 | mi->blk[mi->nr_blks].end = end; | |
146 | mi->blk[mi->nr_blks].nid = nid; | |
147 | mi->nr_blks++; | |
148 | return 0; | |
149 | } | |
150 | ||
151 | /** | |
152 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | |
153 | * @idx: Index of memblk to remove | |
154 | * @mi: numa_meminfo to remove memblk from | |
155 | * | |
156 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | |
157 | * decrementing @mi->nr_blks. | |
158 | */ | |
159 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | |
160 | { | |
161 | mi->nr_blks--; | |
162 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | |
163 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | |
164 | } | |
165 | ||
5d30f92e DW |
166 | /** |
167 | * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another | |
168 | * @dst: numa_meminfo to append block to | |
169 | * @idx: Index of memblk to remove | |
170 | * @src: numa_meminfo to remove memblk from | |
171 | */ | |
172 | static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx, | |
173 | struct numa_meminfo *src) | |
174 | { | |
175 | dst->blk[dst->nr_blks++] = src->blk[idx]; | |
176 | numa_remove_memblk_from(idx, src); | |
177 | } | |
178 | ||
a4106eae TH |
179 | /** |
180 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | |
181 | * @nid: NUMA node ID of the new memblk | |
182 | * @start: Start address of the new memblk | |
183 | * @end: End address of the new memblk | |
184 | * | |
185 | * Add a new memblk to the default numa_meminfo. | |
186 | * | |
187 | * RETURNS: | |
188 | * 0 on success, -errno on failure. | |
189 | */ | |
190 | int __init numa_add_memblk(int nid, u64 start, u64 end) | |
191 | { | |
192 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | |
193 | } | |
194 | ||
8b375f64 LC |
195 | /* Allocate NODE_DATA for a node on the local memory */ |
196 | static void __init alloc_node_data(int nid) | |
a4106eae | 197 | { |
a4106eae | 198 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
38f3e1ca | 199 | u64 nd_pa; |
7888e96b | 200 | void *nd; |
a4106eae TH |
201 | int tnid; |
202 | ||
a4106eae | 203 | /* |
07f4207a PA |
204 | * Allocate node data. Try node-local memory and then any node. |
205 | * Never allocate in DMA zone. | |
a4106eae | 206 | */ |
42b46aef | 207 | nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); |
07f4207a | 208 | if (!nd_pa) { |
42b46aef MR |
209 | pr_err("Cannot find %zu bytes in any node (initial node: %d)\n", |
210 | nd_size, nid); | |
211 | return; | |
a4106eae | 212 | } |
07f4207a | 213 | nd = __va(nd_pa); |
a4106eae TH |
214 | |
215 | /* report and initialize */ | |
8b375f64 | 216 | printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, |
07f4207a | 217 | nd_pa, nd_pa + nd_size - 1); |
a4106eae | 218 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); |
07f4207a | 219 | if (tnid != nid) |
a4106eae TH |
220 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); |
221 | ||
7888e96b | 222 | node_data[nid] = nd; |
a4106eae | 223 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); |
a4106eae TH |
224 | |
225 | node_set_online(nid); | |
226 | } | |
227 | ||
228 | /** | |
229 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | |
230 | * @mi: numa_meminfo to clean up | |
231 | * | |
43dac8f6 | 232 | * Sanitize @mi by merging and removing unnecessary memblks. Also check for |
a4106eae TH |
233 | * conflicts and clear unused memblks. |
234 | * | |
235 | * RETURNS: | |
236 | * 0 on success, -errno on failure. | |
237 | */ | |
238 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | |
239 | { | |
240 | const u64 low = 0; | |
38f3e1ca | 241 | const u64 high = PFN_PHYS(max_pfn); |
a4106eae TH |
242 | int i, j, k; |
243 | ||
e5a10c1b | 244 | /* first, trim all entries */ |
a4106eae TH |
245 | for (i = 0; i < mi->nr_blks; i++) { |
246 | struct numa_memblk *bi = &mi->blk[i]; | |
247 | ||
5d30f92e DW |
248 | /* move / save reserved memory ranges */ |
249 | if (!memblock_overlaps_region(&memblock.memory, | |
250 | bi->start, bi->end - bi->start)) { | |
251 | numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi); | |
252 | continue; | |
253 | } | |
254 | ||
255 | /* make sure all non-reserved blocks are inside the limits */ | |
a4106eae TH |
256 | bi->start = max(bi->start, low); |
257 | bi->end = min(bi->end, high); | |
258 | ||
5d30f92e DW |
259 | /* and there's no empty block */ |
260 | if (bi->start >= bi->end) | |
a4106eae | 261 | numa_remove_memblk_from(i--, mi); |
e5a10c1b YL |
262 | } |
263 | ||
264 | /* merge neighboring / overlapping entries */ | |
265 | for (i = 0; i < mi->nr_blks; i++) { | |
266 | struct numa_memblk *bi = &mi->blk[i]; | |
a4106eae TH |
267 | |
268 | for (j = i + 1; j < mi->nr_blks; j++) { | |
269 | struct numa_memblk *bj = &mi->blk[j]; | |
38f3e1ca | 270 | u64 start, end; |
a4106eae TH |
271 | |
272 | /* | |
273 | * See whether there are overlapping blocks. Whine | |
274 | * about but allow overlaps of the same nid. They | |
275 | * will be merged below. | |
276 | */ | |
277 | if (bi->end > bj->start && bi->start < bj->end) { | |
278 | if (bi->nid != bj->nid) { | |
1de392f5 | 279 | pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", |
365811d6 BH |
280 | bi->nid, bi->start, bi->end - 1, |
281 | bj->nid, bj->start, bj->end - 1); | |
a4106eae TH |
282 | return -EINVAL; |
283 | } | |
1de392f5 JP |
284 | pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", |
285 | bi->nid, bi->start, bi->end - 1, | |
286 | bj->start, bj->end - 1); | |
a4106eae TH |
287 | } |
288 | ||
289 | /* | |
290 | * Join together blocks on the same node, holes | |
291 | * between which don't overlap with memory on other | |
292 | * nodes. | |
293 | */ | |
294 | if (bi->nid != bj->nid) | |
295 | continue; | |
e5a10c1b YL |
296 | start = min(bi->start, bj->start); |
297 | end = max(bi->end, bj->end); | |
a4106eae TH |
298 | for (k = 0; k < mi->nr_blks; k++) { |
299 | struct numa_memblk *bk = &mi->blk[k]; | |
300 | ||
301 | if (bi->nid == bk->nid) | |
302 | continue; | |
303 | if (start < bk->end && end > bk->start) | |
304 | break; | |
305 | } | |
306 | if (k < mi->nr_blks) | |
307 | continue; | |
365811d6 BH |
308 | printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", |
309 | bi->nid, bi->start, bi->end - 1, bj->start, | |
310 | bj->end - 1, start, end - 1); | |
a4106eae TH |
311 | bi->start = start; |
312 | bi->end = end; | |
313 | numa_remove_memblk_from(j--, mi); | |
314 | } | |
315 | } | |
316 | ||
e5a10c1b | 317 | /* clear unused ones */ |
a4106eae TH |
318 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { |
319 | mi->blk[i].start = mi->blk[i].end = 0; | |
320 | mi->blk[i].nid = NUMA_NO_NODE; | |
321 | } | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
b678c91a TG |
326 | /* |
327 | * Set nodes, which have memory in @mi, in *@nodemask. | |
328 | */ | |
329 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | |
330 | const struct numa_meminfo *mi) | |
331 | { | |
332 | int i; | |
333 | ||
334 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | |
335 | if (mi->blk[i].start != mi->blk[i].end && | |
336 | mi->blk[i].nid != NUMA_NO_NODE) | |
337 | node_set(mi->blk[i].nid, *nodemask); | |
338 | } | |
339 | ||
a4106eae TH |
340 | /** |
341 | * numa_reset_distance - Reset NUMA distance table | |
342 | * | |
343 | * The current table is freed. The next numa_set_distance() call will | |
344 | * create a new one. | |
345 | */ | |
346 | void __init numa_reset_distance(void) | |
347 | { | |
348 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | |
349 | ||
350 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | |
351 | if (numa_distance_cnt) | |
24aa0788 | 352 | memblock_free(__pa(numa_distance), size); |
a4106eae TH |
353 | numa_distance_cnt = 0; |
354 | numa_distance = NULL; /* enable table creation */ | |
355 | } | |
356 | ||
357 | static int __init numa_alloc_distance(void) | |
358 | { | |
b678c91a | 359 | nodemask_t nodes_parsed; |
a4106eae TH |
360 | size_t size; |
361 | int i, j, cnt = 0; | |
362 | u64 phys; | |
363 | ||
364 | /* size the new table and allocate it */ | |
b678c91a TG |
365 | nodes_parsed = numa_nodes_parsed; |
366 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | |
367 | ||
368 | for_each_node_mask(i, nodes_parsed) | |
a4106eae TH |
369 | cnt = i; |
370 | cnt++; | |
371 | size = cnt * cnt * sizeof(numa_distance[0]); | |
372 | ||
38f3e1ca | 373 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
a4106eae | 374 | size, PAGE_SIZE); |
1f5026a7 | 375 | if (!phys) { |
1de392f5 | 376 | pr_warn("Warning: can't allocate distance table!\n"); |
a4106eae TH |
377 | /* don't retry until explicitly reset */ |
378 | numa_distance = (void *)1LU; | |
379 | return -ENOMEM; | |
380 | } | |
24aa0788 | 381 | memblock_reserve(phys, size); |
a4106eae TH |
382 | |
383 | numa_distance = __va(phys); | |
384 | numa_distance_cnt = cnt; | |
385 | ||
386 | /* fill with the default distances */ | |
387 | for (i = 0; i < cnt; i++) | |
388 | for (j = 0; j < cnt; j++) | |
389 | numa_distance[i * cnt + j] = i == j ? | |
390 | LOCAL_DISTANCE : REMOTE_DISTANCE; | |
391 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | /** | |
397 | * numa_set_distance - Set NUMA distance from one NUMA to another | |
398 | * @from: the 'from' node to set distance | |
399 | * @to: the 'to' node to set distance | |
400 | * @distance: NUMA distance | |
401 | * | |
402 | * Set the distance from node @from to @to to @distance. If distance table | |
403 | * doesn't exist, one which is large enough to accommodate all the currently | |
404 | * known nodes will be created. | |
405 | * | |
406 | * If such table cannot be allocated, a warning is printed and further | |
407 | * calls are ignored until the distance table is reset with | |
408 | * numa_reset_distance(). | |
409 | * | |
54eed6cb PH |
410 | * If @from or @to is higher than the highest known node or lower than zero |
411 | * at the time of table creation or @distance doesn't make sense, the call | |
412 | * is ignored. | |
a4106eae TH |
413 | * This is to allow simplification of specific NUMA config implementations. |
414 | */ | |
415 | void __init numa_set_distance(int from, int to, int distance) | |
416 | { | |
417 | if (!numa_distance && numa_alloc_distance() < 0) | |
418 | return; | |
419 | ||
54eed6cb PH |
420 | if (from >= numa_distance_cnt || to >= numa_distance_cnt || |
421 | from < 0 || to < 0) { | |
1de392f5 JP |
422 | pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", |
423 | from, to, distance); | |
a4106eae TH |
424 | return; |
425 | } | |
426 | ||
427 | if ((u8)distance != distance || | |
428 | (from == to && distance != LOCAL_DISTANCE)) { | |
1de392f5 | 429 | pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", |
a4106eae TH |
430 | from, to, distance); |
431 | return; | |
432 | } | |
433 | ||
434 | numa_distance[from * numa_distance_cnt + to] = distance; | |
435 | } | |
436 | ||
437 | int __node_distance(int from, int to) | |
438 | { | |
439 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | |
440 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | |
441 | return numa_distance[from * numa_distance_cnt + to]; | |
442 | } | |
443 | EXPORT_SYMBOL(__node_distance); | |
444 | ||
445 | /* | |
446 | * Sanity check to catch more bad NUMA configurations (they are amazingly | |
447 | * common). Make sure the nodes cover all memory. | |
448 | */ | |
449 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | |
450 | { | |
38f3e1ca | 451 | u64 numaram, e820ram; |
a4106eae TH |
452 | int i; |
453 | ||
454 | numaram = 0; | |
455 | for (i = 0; i < mi->nr_blks; i++) { | |
38f3e1ca TH |
456 | u64 s = mi->blk[i].start >> PAGE_SHIFT; |
457 | u64 e = mi->blk[i].end >> PAGE_SHIFT; | |
a4106eae TH |
458 | numaram += e - s; |
459 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | |
38f3e1ca | 460 | if ((s64)numaram < 0) |
a4106eae TH |
461 | numaram = 0; |
462 | } | |
463 | ||
474b881b TH |
464 | e820ram = max_pfn - absent_pages_in_range(0, max_pfn); |
465 | ||
a4106eae | 466 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ |
38f3e1ca TH |
467 | if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { |
468 | printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", | |
a4106eae TH |
469 | (numaram << PAGE_SHIFT) >> 20, |
470 | (e820ram << PAGE_SHIFT) >> 20); | |
471 | return false; | |
472 | } | |
473 | return true; | |
474 | } | |
475 | ||
c1a0bf34 IM |
476 | /* |
477 | * Mark all currently memblock-reserved physical memory (which covers the | |
478 | * kernel's own memory ranges) as hot-unswappable. | |
479 | */ | |
bd5cfb89 XQ |
480 | static void __init numa_clear_kernel_node_hotplug(void) |
481 | { | |
c1a0bf34 IM |
482 | nodemask_t reserved_nodemask = NODE_MASK_NONE; |
483 | struct memblock_region *mb_region; | |
484 | int i; | |
bd5cfb89 XQ |
485 | |
486 | /* | |
c1a0bf34 IM |
487 | * We have to do some preprocessing of memblock regions, to |
488 | * make them suitable for reservation. | |
489 | * | |
bd5cfb89 | 490 | * At this time, all memory regions reserved by memblock are |
c1a0bf34 IM |
491 | * used by the kernel, but those regions are not split up |
492 | * along node boundaries yet, and don't necessarily have their | |
493 | * node ID set yet either. | |
494 | * | |
495 | * So iterate over all memory known to the x86 architecture, | |
496 | * and use those ranges to set the nid in memblock.reserved. | |
497 | * This will split up the memblock regions along node | |
498 | * boundaries and will set the node IDs as well. | |
bd5cfb89 XQ |
499 | */ |
500 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | |
c1a0bf34 | 501 | struct numa_memblk *mb = numa_meminfo.blk + i; |
5f7ee246 | 502 | int ret; |
bd5cfb89 | 503 | |
5f7ee246 IM |
504 | ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); |
505 | WARN_ON_ONCE(ret); | |
bd5cfb89 XQ |
506 | } |
507 | ||
22ef882e | 508 | /* |
c1a0bf34 IM |
509 | * Now go over all reserved memblock regions, to construct a |
510 | * node mask of all kernel reserved memory areas. | |
22ef882e | 511 | * |
c1a0bf34 IM |
512 | * [ Note, when booting with mem=nn[kMG] or in a kdump kernel, |
513 | * numa_meminfo might not include all memblock.reserved | |
514 | * memory ranges, because quirks such as trim_snb_memory() | |
515 | * reserve specific pages for Sandy Bridge graphics. ] | |
22ef882e | 516 | */ |
cc6de168 | 517 | for_each_reserved_mem_region(mb_region) { |
d622abf7 MR |
518 | int nid = memblock_get_region_node(mb_region); |
519 | ||
520 | if (nid != MAX_NUMNODES) | |
521 | node_set(nid, reserved_nodemask); | |
c1a0bf34 | 522 | } |
bd5cfb89 | 523 | |
c1a0bf34 IM |
524 | /* |
525 | * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory | |
526 | * belonging to the reserved node mask. | |
527 | * | |
528 | * Note that this will include memory regions that reside | |
529 | * on nodes that contain kernel memory - entire nodes | |
530 | * become hot-unpluggable: | |
531 | */ | |
bd5cfb89 | 532 | for (i = 0; i < numa_meminfo.nr_blks; i++) { |
c1a0bf34 | 533 | struct numa_memblk *mb = numa_meminfo.blk + i; |
bd5cfb89 | 534 | |
c1a0bf34 IM |
535 | if (!node_isset(mb->nid, reserved_nodemask)) |
536 | continue; | |
bd5cfb89 | 537 | |
c1a0bf34 | 538 | memblock_clear_hotplug(mb->start, mb->end - mb->start); |
bd5cfb89 XQ |
539 | } |
540 | } | |
541 | ||
a4106eae TH |
542 | static int __init numa_register_memblks(struct numa_meminfo *mi) |
543 | { | |
544 | int i, nid; | |
545 | ||
546 | /* Account for nodes with cpus and no memory */ | |
547 | node_possible_map = numa_nodes_parsed; | |
b678c91a | 548 | numa_nodemask_from_meminfo(&node_possible_map, mi); |
a4106eae TH |
549 | if (WARN_ON(nodes_empty(node_possible_map))) |
550 | return -EINVAL; | |
551 | ||
0608f70c TH |
552 | for (i = 0; i < mi->nr_blks; i++) { |
553 | struct numa_memblk *mb = &mi->blk[i]; | |
e7e8de59 TC |
554 | memblock_set_node(mb->start, mb->end - mb->start, |
555 | &memblock.memory, mb->nid); | |
0608f70c | 556 | } |
1e01979c | 557 | |
bd5cfb89 XQ |
558 | /* |
559 | * At very early time, the kernel have to use some memory such as | |
560 | * loading the kernel image. We cannot prevent this anyway. So any | |
561 | * node the kernel resides in should be un-hotpluggable. | |
562 | * | |
563 | * And when we come here, alloc node data won't fail. | |
564 | */ | |
565 | numa_clear_kernel_node_hotplug(); | |
566 | ||
1e01979c TH |
567 | /* |
568 | * If sections array is gonna be used for pfn -> nid mapping, check | |
569 | * whether its granularity is fine enough. | |
570 | */ | |
aecfd220 KC |
571 | if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) { |
572 | unsigned long pfn_align = node_map_pfn_alignment(); | |
573 | ||
574 | if (pfn_align && pfn_align < PAGES_PER_SECTION) { | |
575 | pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", | |
576 | PFN_PHYS(pfn_align) >> 20, | |
577 | PFN_PHYS(PAGES_PER_SECTION) >> 20); | |
578 | return -EINVAL; | |
579 | } | |
1e01979c | 580 | } |
a4106eae TH |
581 | if (!numa_meminfo_cover_memory(mi)) |
582 | return -EINVAL; | |
583 | ||
584 | /* Finally register nodes. */ | |
585 | for_each_node_mask(nid, node_possible_map) { | |
38f3e1ca | 586 | u64 start = PFN_PHYS(max_pfn); |
a4106eae TH |
587 | u64 end = 0; |
588 | ||
589 | for (i = 0; i < mi->nr_blks; i++) { | |
590 | if (nid != mi->blk[i].nid) | |
591 | continue; | |
592 | start = min(mi->blk[i].start, start); | |
593 | end = max(mi->blk[i].end, end); | |
594 | } | |
595 | ||
8b375f64 LC |
596 | if (start >= end) |
597 | continue; | |
598 | ||
599 | /* | |
600 | * Don't confuse VM with a node that doesn't have the | |
601 | * minimum amount of memory: | |
602 | */ | |
603 | if (end && (end - start) < NODE_MIN_SIZE) | |
604 | continue; | |
605 | ||
606 | alloc_node_data(nid); | |
a4106eae TH |
607 | } |
608 | ||
0608f70c TH |
609 | /* Dump memblock with node info and return. */ |
610 | memblock_dump_all(); | |
a4106eae TH |
611 | return 0; |
612 | } | |
a4106eae | 613 | |
8db78cc4 TH |
614 | /* |
615 | * There are unfortunately some poorly designed mainboards around that | |
616 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
617 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
618 | * as the number of CPUs is not known yet. We round robin the existing | |
619 | * nodes. | |
620 | */ | |
752d4f37 | 621 | static void __init numa_init_array(void) |
8db78cc4 TH |
622 | { |
623 | int rr, i; | |
624 | ||
625 | rr = first_node(node_online_map); | |
626 | for (i = 0; i < nr_cpu_ids; i++) { | |
627 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | |
628 | continue; | |
629 | numa_set_node(i, rr); | |
0edaf86c | 630 | rr = next_node_in(rr, node_online_map); |
8db78cc4 TH |
631 | } |
632 | } | |
633 | ||
a4106eae TH |
634 | static int __init numa_init(int (*init_func)(void)) |
635 | { | |
636 | int i; | |
637 | int ret; | |
638 | ||
639 | for (i = 0; i < MAX_LOCAL_APIC; i++) | |
640 | set_apicid_to_node(i, NUMA_NO_NODE); | |
641 | ||
20e6926d | 642 | nodes_clear(numa_nodes_parsed); |
a4106eae TH |
643 | nodes_clear(node_possible_map); |
644 | nodes_clear(node_online_map); | |
20e6926d | 645 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
e7e8de59 TC |
646 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, |
647 | MAX_NUMNODES)); | |
a0acda91 TC |
648 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, |
649 | MAX_NUMNODES)); | |
05d1d8cb TC |
650 | /* In case that parsing SRAT failed. */ |
651 | WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); | |
a4106eae TH |
652 | numa_reset_distance(); |
653 | ||
654 | ret = init_func(); | |
655 | if (ret < 0) | |
656 | return ret; | |
c5320926 TC |
657 | |
658 | /* | |
659 | * We reset memblock back to the top-down direction | |
660 | * here because if we configured ACPI_NUMA, we have | |
661 | * parsed SRAT in init_func(). It is ok to have the | |
662 | * reset here even if we did't configure ACPI_NUMA | |
663 | * or acpi numa init fails and fallbacks to dummy | |
664 | * numa init. | |
665 | */ | |
666 | memblock_set_bottom_up(false); | |
667 | ||
a4106eae TH |
668 | ret = numa_cleanup_meminfo(&numa_meminfo); |
669 | if (ret < 0) | |
670 | return ret; | |
671 | ||
672 | numa_emulation(&numa_meminfo, numa_distance_cnt); | |
673 | ||
674 | ret = numa_register_memblks(&numa_meminfo); | |
675 | if (ret < 0) | |
676 | return ret; | |
677 | ||
678 | for (i = 0; i < nr_cpu_ids; i++) { | |
679 | int nid = early_cpu_to_node(i); | |
680 | ||
681 | if (nid == NUMA_NO_NODE) | |
682 | continue; | |
683 | if (!node_online(nid)) | |
684 | numa_clear_node(i); | |
685 | } | |
686 | numa_init_array(); | |
a0acda91 | 687 | |
a4106eae TH |
688 | return 0; |
689 | } | |
690 | ||
691 | /** | |
692 | * dummy_numa_init - Fallback dummy NUMA init | |
693 | * | |
694 | * Used if there's no underlying NUMA architecture, NUMA initialization | |
695 | * fails, or NUMA is disabled on the command line. | |
696 | * | |
697 | * Must online at least one node and add memory blocks that cover all | |
698 | * allowed memory. This function must not fail. | |
699 | */ | |
700 | static int __init dummy_numa_init(void) | |
701 | { | |
702 | printk(KERN_INFO "%s\n", | |
703 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | |
365811d6 BH |
704 | printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", |
705 | 0LLU, PFN_PHYS(max_pfn) - 1); | |
a4106eae TH |
706 | |
707 | node_set(0, numa_nodes_parsed); | |
38f3e1ca | 708 | numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); |
a4106eae TH |
709 | |
710 | return 0; | |
711 | } | |
712 | ||
713 | /** | |
714 | * x86_numa_init - Initialize NUMA | |
715 | * | |
716 | * Try each configured NUMA initialization method until one succeeds. The | |
11a98f37 | 717 | * last fallback is dummy single node config encompassing whole memory and |
a4106eae TH |
718 | * never fails. |
719 | */ | |
720 | void __init x86_numa_init(void) | |
721 | { | |
722 | if (!numa_off) { | |
723 | #ifdef CONFIG_ACPI_NUMA | |
724 | if (!numa_init(x86_acpi_numa_init)) | |
725 | return; | |
726 | #endif | |
727 | #ifdef CONFIG_AMD_NUMA | |
728 | if (!numa_init(amd_numa_init)) | |
729 | return; | |
730 | #endif | |
731 | } | |
732 | ||
733 | numa_init(dummy_numa_init); | |
734 | } | |
a4106eae | 735 | |
2532fc31 | 736 | static void __init init_memory_less_node(int nid) |
8db78cc4 | 737 | { |
2532fc31 TC |
738 | /* Allocate and initialize node data. Memory-less node is now online.*/ |
739 | alloc_node_data(nid); | |
bc9331a1 | 740 | free_area_init_memoryless_node(nid); |
8db78cc4 | 741 | |
2532fc31 TC |
742 | /* |
743 | * All zonelists will be built later in start_kernel() after per cpu | |
744 | * areas are initialized. | |
745 | */ | |
8db78cc4 TH |
746 | } |
747 | ||
73bf7382 JC |
748 | /* |
749 | * A node may exist which has one or more Generic Initiators but no CPUs and no | |
750 | * memory. | |
751 | * | |
752 | * This function must be called after init_cpu_to_node(), to ensure that any | |
753 | * memoryless CPU nodes have already been brought online, and before the | |
754 | * node_data[nid] is needed for zone list setup in build_all_zonelists(). | |
755 | * | |
756 | * When this function is called, any nodes containing either memory and/or CPUs | |
757 | * will already be online and there is no need to do anything extra, even if | |
758 | * they also contain one or more Generic Initiators. | |
759 | */ | |
760 | void __init init_gi_nodes(void) | |
761 | { | |
762 | int nid; | |
763 | ||
764 | for_each_node_state(nid, N_GENERIC_INITIATOR) | |
765 | if (!node_online(nid)) | |
766 | init_memory_less_node(nid); | |
767 | } | |
768 | ||
8db78cc4 TH |
769 | /* |
770 | * Setup early cpu_to_node. | |
771 | * | |
772 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
773 | * and apicid_to_node[] tables have valid entries for a CPU. | |
774 | * This means we skip cpu_to_node[] initialisation for NUMA | |
775 | * emulation and faking node case (when running a kernel compiled | |
776 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
777 | * is already initialized in a round robin manner at numa_init_array, | |
778 | * prior to this call, and this initialization is good enough | |
779 | * for the fake NUMA cases. | |
780 | * | |
781 | * Called before the per_cpu areas are setup. | |
782 | */ | |
783 | void __init init_cpu_to_node(void) | |
784 | { | |
785 | int cpu; | |
786 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
787 | ||
788 | BUG_ON(cpu_to_apicid == NULL); | |
789 | ||
790 | for_each_possible_cpu(cpu) { | |
791 | int node = numa_cpu_node(cpu); | |
792 | ||
793 | if (node == NUMA_NO_NODE) | |
794 | continue; | |
2532fc31 | 795 | |
8db78cc4 | 796 | if (!node_online(node)) |
2532fc31 TC |
797 | init_memory_less_node(node); |
798 | ||
8db78cc4 TH |
799 | numa_set_node(cpu, node); |
800 | } | |
801 | } | |
802 | ||
de2d9445 TH |
803 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
804 | ||
805 | # ifndef CONFIG_NUMA_EMU | |
148f9bb8 | 806 | void numa_add_cpu(int cpu) |
de2d9445 TH |
807 | { |
808 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
809 | } | |
810 | ||
148f9bb8 | 811 | void numa_remove_cpu(int cpu) |
de2d9445 TH |
812 | { |
813 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
814 | } | |
815 | # endif /* !CONFIG_NUMA_EMU */ | |
816 | ||
817 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
645a7919 TH |
818 | |
819 | int __cpu_to_node(int cpu) | |
820 | { | |
821 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
822 | printk(KERN_WARNING | |
823 | "cpu_to_node(%d): usage too early!\n", cpu); | |
824 | dump_stack(); | |
825 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
826 | } | |
827 | return per_cpu(x86_cpu_to_node_map, cpu); | |
828 | } | |
829 | EXPORT_SYMBOL(__cpu_to_node); | |
830 | ||
831 | /* | |
832 | * Same function as cpu_to_node() but used if called before the | |
833 | * per_cpu areas are setup. | |
834 | */ | |
835 | int early_cpu_to_node(int cpu) | |
836 | { | |
837 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
838 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
839 | ||
840 | if (!cpu_possible(cpu)) { | |
841 | printk(KERN_WARNING | |
842 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
843 | dump_stack(); | |
844 | return NUMA_NO_NODE; | |
845 | } | |
846 | return per_cpu(x86_cpu_to_node_map, cpu); | |
847 | } | |
848 | ||
7a6c6547 | 849 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) |
de2d9445 | 850 | { |
de2d9445 | 851 | struct cpumask *mask; |
de2d9445 | 852 | |
14392fd3 DR |
853 | if (node == NUMA_NO_NODE) { |
854 | /* early_cpu_to_node() already emits a warning and trace */ | |
7a6c6547 | 855 | return; |
14392fd3 | 856 | } |
de2d9445 TH |
857 | mask = node_to_cpumask_map[node]; |
858 | if (!mask) { | |
859 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
860 | dump_stack(); | |
7a6c6547 | 861 | return; |
de2d9445 TH |
862 | } |
863 | ||
7a6c6547 DR |
864 | if (enable) |
865 | cpumask_set_cpu(cpu, mask); | |
866 | else | |
867 | cpumask_clear_cpu(cpu, mask); | |
868 | ||
bf58b487 | 869 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", |
de2d9445 | 870 | enable ? "numa_add_cpu" : "numa_remove_cpu", |
bf58b487 | 871 | cpu, node, cpumask_pr_args(mask)); |
7a6c6547 | 872 | return; |
de2d9445 TH |
873 | } |
874 | ||
875 | # ifndef CONFIG_NUMA_EMU | |
148f9bb8 | 876 | static void numa_set_cpumask(int cpu, bool enable) |
de2d9445 | 877 | { |
7a6c6547 | 878 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
de2d9445 TH |
879 | } |
880 | ||
148f9bb8 | 881 | void numa_add_cpu(int cpu) |
de2d9445 | 882 | { |
7a6c6547 | 883 | numa_set_cpumask(cpu, true); |
de2d9445 TH |
884 | } |
885 | ||
148f9bb8 | 886 | void numa_remove_cpu(int cpu) |
de2d9445 | 887 | { |
7a6c6547 | 888 | numa_set_cpumask(cpu, false); |
de2d9445 TH |
889 | } |
890 | # endif /* !CONFIG_NUMA_EMU */ | |
891 | ||
71ee73e7 RR |
892 | /* |
893 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
894 | */ | |
73e907de | 895 | const struct cpumask *cpumask_of_node(int node) |
71ee73e7 | 896 | { |
bc04a049 | 897 | if ((unsigned)node >= nr_node_ids) { |
71ee73e7 | 898 | printk(KERN_WARNING |
bc04a049 | 899 | "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", |
71ee73e7 RR |
900 | node, nr_node_ids); |
901 | dump_stack(); | |
902 | return cpu_none_mask; | |
903 | } | |
c032ef60 RR |
904 | if (node_to_cpumask_map[node] == NULL) { |
905 | printk(KERN_WARNING | |
906 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | |
907 | node); | |
908 | dump_stack(); | |
909 | return cpu_online_mask; | |
910 | } | |
0b966252 | 911 | return node_to_cpumask_map[node]; |
71ee73e7 RR |
912 | } |
913 | EXPORT_SYMBOL(cpumask_of_node); | |
645a7919 | 914 | |
de2d9445 | 915 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
a4106eae | 916 | |
5d30f92e DW |
917 | #ifdef CONFIG_NUMA_KEEP_MEMINFO |
918 | static int meminfo_to_nid(struct numa_meminfo *mi, u64 start) | |
a4106eae | 919 | { |
a4106eae TH |
920 | int i; |
921 | ||
922 | for (i = 0; i < mi->nr_blks; i++) | |
923 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | |
5d30f92e DW |
924 | return mi->blk[i].nid; |
925 | return NUMA_NO_NODE; | |
926 | } | |
927 | ||
928 | int phys_to_target_node(phys_addr_t start) | |
929 | { | |
930 | int nid = meminfo_to_nid(&numa_meminfo, start); | |
931 | ||
932 | /* | |
933 | * Prefer online nodes, but if reserved memory might be | |
934 | * hot-added continue the search with reserved ranges. | |
935 | */ | |
936 | if (nid != NUMA_NO_NODE) | |
937 | return nid; | |
938 | ||
939 | return meminfo_to_nid(&numa_reserved_meminfo, start); | |
940 | } | |
a927bd6b | 941 | EXPORT_SYMBOL_GPL(phys_to_target_node); |
5d30f92e DW |
942 | |
943 | int memory_add_physaddr_to_nid(u64 start) | |
944 | { | |
945 | int nid = meminfo_to_nid(&numa_meminfo, start); | |
946 | ||
947 | if (nid == NUMA_NO_NODE) | |
948 | nid = numa_meminfo.blk[0].nid; | |
a4106eae TH |
949 | return nid; |
950 | } | |
a927bd6b | 951 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
a4106eae | 952 | #endif |