]>
Commit | Line | Data |
---|---|---|
1 | /* Common code for 32 and 64-bit NUMA */ | |
2 | #include <linux/kernel.h> | |
3 | #include <linux/mm.h> | |
4 | #include <linux/string.h> | |
5 | #include <linux/init.h> | |
6 | #include <linux/bootmem.h> | |
7 | #include <linux/memblock.h> | |
8 | #include <linux/mmzone.h> | |
9 | #include <linux/ctype.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/nodemask.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/topology.h> | |
14 | ||
15 | #include <asm/e820.h> | |
16 | #include <asm/proto.h> | |
17 | #include <asm/dma.h> | |
18 | #include <asm/acpi.h> | |
19 | #include <asm/amd_nb.h> | |
20 | ||
21 | #include "numa_internal.h" | |
22 | ||
23 | int __initdata numa_off; | |
24 | nodemask_t numa_nodes_parsed __initdata; | |
25 | ||
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | |
27 | EXPORT_SYMBOL(node_data); | |
28 | ||
29 | static struct numa_meminfo numa_meminfo | |
30 | #ifndef CONFIG_MEMORY_HOTPLUG | |
31 | __initdata | |
32 | #endif | |
33 | ; | |
34 | ||
35 | static int numa_distance_cnt; | |
36 | static u8 *numa_distance; | |
37 | ||
38 | static __init int numa_setup(char *opt) | |
39 | { | |
40 | if (!opt) | |
41 | return -EINVAL; | |
42 | if (!strncmp(opt, "off", 3)) | |
43 | numa_off = 1; | |
44 | #ifdef CONFIG_NUMA_EMU | |
45 | if (!strncmp(opt, "fake=", 5)) | |
46 | numa_emu_cmdline(opt + 5); | |
47 | #endif | |
48 | #ifdef CONFIG_ACPI_NUMA | |
49 | if (!strncmp(opt, "noacpi", 6)) | |
50 | acpi_numa = -1; | |
51 | #endif | |
52 | return 0; | |
53 | } | |
54 | early_param("numa", numa_setup); | |
55 | ||
56 | /* | |
57 | * apicid, cpu, node mappings | |
58 | */ | |
59 | s16 __apicid_to_node[MAX_LOCAL_APIC] = { | |
60 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | |
61 | }; | |
62 | ||
63 | int numa_cpu_node(int cpu) | |
64 | { | |
65 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
66 | ||
67 | if (apicid != BAD_APICID) | |
68 | return __apicid_to_node[apicid]; | |
69 | return NUMA_NO_NODE; | |
70 | } | |
71 | ||
72 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | |
73 | EXPORT_SYMBOL(node_to_cpumask_map); | |
74 | ||
75 | /* | |
76 | * Map cpu index to node index | |
77 | */ | |
78 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | |
79 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | |
80 | ||
81 | void numa_set_node(int cpu, int node) | |
82 | { | |
83 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
84 | ||
85 | /* early setting, no percpu area yet */ | |
86 | if (cpu_to_node_map) { | |
87 | cpu_to_node_map[cpu] = node; | |
88 | return; | |
89 | } | |
90 | ||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | |
92 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | |
93 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
94 | dump_stack(); | |
95 | return; | |
96 | } | |
97 | #endif | |
98 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
99 | ||
100 | set_cpu_numa_node(cpu, node); | |
101 | } | |
102 | ||
103 | void numa_clear_node(int cpu) | |
104 | { | |
105 | numa_set_node(cpu, NUMA_NO_NODE); | |
106 | } | |
107 | ||
108 | /* | |
109 | * Allocate node_to_cpumask_map based on number of available nodes | |
110 | * Requires node_possible_map to be valid. | |
111 | * | |
112 | * Note: cpumask_of_node() is not valid until after this is done. | |
113 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | |
114 | */ | |
115 | void __init setup_node_to_cpumask_map(void) | |
116 | { | |
117 | unsigned int node; | |
118 | ||
119 | /* setup nr_node_ids if not done yet */ | |
120 | if (nr_node_ids == MAX_NUMNODES) | |
121 | setup_nr_node_ids(); | |
122 | ||
123 | /* allocate the map */ | |
124 | for (node = 0; node < nr_node_ids; node++) | |
125 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
126 | ||
127 | /* cpumask_of_node() will now work */ | |
128 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | |
129 | } | |
130 | ||
131 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | |
132 | struct numa_meminfo *mi) | |
133 | { | |
134 | /* ignore zero length blks */ | |
135 | if (start == end) | |
136 | return 0; | |
137 | ||
138 | /* whine about and ignore invalid blks */ | |
139 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | |
140 | pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", | |
141 | nid, start, end - 1); | |
142 | return 0; | |
143 | } | |
144 | ||
145 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | |
146 | pr_err("NUMA: too many memblk ranges\n"); | |
147 | return -EINVAL; | |
148 | } | |
149 | ||
150 | mi->blk[mi->nr_blks].start = start; | |
151 | mi->blk[mi->nr_blks].end = end; | |
152 | mi->blk[mi->nr_blks].nid = nid; | |
153 | mi->nr_blks++; | |
154 | return 0; | |
155 | } | |
156 | ||
157 | /** | |
158 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | |
159 | * @idx: Index of memblk to remove | |
160 | * @mi: numa_meminfo to remove memblk from | |
161 | * | |
162 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | |
163 | * decrementing @mi->nr_blks. | |
164 | */ | |
165 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | |
166 | { | |
167 | mi->nr_blks--; | |
168 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | |
169 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | |
170 | } | |
171 | ||
172 | /** | |
173 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | |
174 | * @nid: NUMA node ID of the new memblk | |
175 | * @start: Start address of the new memblk | |
176 | * @end: End address of the new memblk | |
177 | * | |
178 | * Add a new memblk to the default numa_meminfo. | |
179 | * | |
180 | * RETURNS: | |
181 | * 0 on success, -errno on failure. | |
182 | */ | |
183 | int __init numa_add_memblk(int nid, u64 start, u64 end) | |
184 | { | |
185 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | |
186 | } | |
187 | ||
188 | /* Allocate NODE_DATA for a node on the local memory */ | |
189 | static void __init alloc_node_data(int nid) | |
190 | { | |
191 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | |
192 | u64 nd_pa; | |
193 | void *nd; | |
194 | int tnid; | |
195 | ||
196 | /* | |
197 | * Allocate node data. Try node-local memory and then any node. | |
198 | * Never allocate in DMA zone. | |
199 | */ | |
200 | nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); | |
201 | if (!nd_pa) { | |
202 | nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, | |
203 | MEMBLOCK_ALLOC_ACCESSIBLE); | |
204 | if (!nd_pa) { | |
205 | pr_err("Cannot find %zu bytes in node %d\n", | |
206 | nd_size, nid); | |
207 | return; | |
208 | } | |
209 | } | |
210 | nd = __va(nd_pa); | |
211 | ||
212 | /* report and initialize */ | |
213 | printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, | |
214 | nd_pa, nd_pa + nd_size - 1); | |
215 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); | |
216 | if (tnid != nid) | |
217 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); | |
218 | ||
219 | node_data[nid] = nd; | |
220 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); | |
221 | ||
222 | node_set_online(nid); | |
223 | } | |
224 | ||
225 | /** | |
226 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | |
227 | * @mi: numa_meminfo to clean up | |
228 | * | |
229 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | |
230 | * conflicts and clear unused memblks. | |
231 | * | |
232 | * RETURNS: | |
233 | * 0 on success, -errno on failure. | |
234 | */ | |
235 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | |
236 | { | |
237 | const u64 low = 0; | |
238 | const u64 high = PFN_PHYS(max_pfn); | |
239 | int i, j, k; | |
240 | ||
241 | /* first, trim all entries */ | |
242 | for (i = 0; i < mi->nr_blks; i++) { | |
243 | struct numa_memblk *bi = &mi->blk[i]; | |
244 | ||
245 | /* make sure all blocks are inside the limits */ | |
246 | bi->start = max(bi->start, low); | |
247 | bi->end = min(bi->end, high); | |
248 | ||
249 | /* and there's no empty block */ | |
250 | if (bi->start >= bi->end) | |
251 | numa_remove_memblk_from(i--, mi); | |
252 | } | |
253 | ||
254 | /* merge neighboring / overlapping entries */ | |
255 | for (i = 0; i < mi->nr_blks; i++) { | |
256 | struct numa_memblk *bi = &mi->blk[i]; | |
257 | ||
258 | for (j = i + 1; j < mi->nr_blks; j++) { | |
259 | struct numa_memblk *bj = &mi->blk[j]; | |
260 | u64 start, end; | |
261 | ||
262 | /* | |
263 | * See whether there are overlapping blocks. Whine | |
264 | * about but allow overlaps of the same nid. They | |
265 | * will be merged below. | |
266 | */ | |
267 | if (bi->end > bj->start && bi->start < bj->end) { | |
268 | if (bi->nid != bj->nid) { | |
269 | pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", | |
270 | bi->nid, bi->start, bi->end - 1, | |
271 | bj->nid, bj->start, bj->end - 1); | |
272 | return -EINVAL; | |
273 | } | |
274 | pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", | |
275 | bi->nid, bi->start, bi->end - 1, | |
276 | bj->start, bj->end - 1); | |
277 | } | |
278 | ||
279 | /* | |
280 | * Join together blocks on the same node, holes | |
281 | * between which don't overlap with memory on other | |
282 | * nodes. | |
283 | */ | |
284 | if (bi->nid != bj->nid) | |
285 | continue; | |
286 | start = min(bi->start, bj->start); | |
287 | end = max(bi->end, bj->end); | |
288 | for (k = 0; k < mi->nr_blks; k++) { | |
289 | struct numa_memblk *bk = &mi->blk[k]; | |
290 | ||
291 | if (bi->nid == bk->nid) | |
292 | continue; | |
293 | if (start < bk->end && end > bk->start) | |
294 | break; | |
295 | } | |
296 | if (k < mi->nr_blks) | |
297 | continue; | |
298 | printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", | |
299 | bi->nid, bi->start, bi->end - 1, bj->start, | |
300 | bj->end - 1, start, end - 1); | |
301 | bi->start = start; | |
302 | bi->end = end; | |
303 | numa_remove_memblk_from(j--, mi); | |
304 | } | |
305 | } | |
306 | ||
307 | /* clear unused ones */ | |
308 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { | |
309 | mi->blk[i].start = mi->blk[i].end = 0; | |
310 | mi->blk[i].nid = NUMA_NO_NODE; | |
311 | } | |
312 | ||
313 | return 0; | |
314 | } | |
315 | ||
316 | /* | |
317 | * Set nodes, which have memory in @mi, in *@nodemask. | |
318 | */ | |
319 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | |
320 | const struct numa_meminfo *mi) | |
321 | { | |
322 | int i; | |
323 | ||
324 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | |
325 | if (mi->blk[i].start != mi->blk[i].end && | |
326 | mi->blk[i].nid != NUMA_NO_NODE) | |
327 | node_set(mi->blk[i].nid, *nodemask); | |
328 | } | |
329 | ||
330 | /** | |
331 | * numa_reset_distance - Reset NUMA distance table | |
332 | * | |
333 | * The current table is freed. The next numa_set_distance() call will | |
334 | * create a new one. | |
335 | */ | |
336 | void __init numa_reset_distance(void) | |
337 | { | |
338 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | |
339 | ||
340 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | |
341 | if (numa_distance_cnt) | |
342 | memblock_free(__pa(numa_distance), size); | |
343 | numa_distance_cnt = 0; | |
344 | numa_distance = NULL; /* enable table creation */ | |
345 | } | |
346 | ||
347 | static int __init numa_alloc_distance(void) | |
348 | { | |
349 | nodemask_t nodes_parsed; | |
350 | size_t size; | |
351 | int i, j, cnt = 0; | |
352 | u64 phys; | |
353 | ||
354 | /* size the new table and allocate it */ | |
355 | nodes_parsed = numa_nodes_parsed; | |
356 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | |
357 | ||
358 | for_each_node_mask(i, nodes_parsed) | |
359 | cnt = i; | |
360 | cnt++; | |
361 | size = cnt * cnt * sizeof(numa_distance[0]); | |
362 | ||
363 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), | |
364 | size, PAGE_SIZE); | |
365 | if (!phys) { | |
366 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | |
367 | /* don't retry until explicitly reset */ | |
368 | numa_distance = (void *)1LU; | |
369 | return -ENOMEM; | |
370 | } | |
371 | memblock_reserve(phys, size); | |
372 | ||
373 | numa_distance = __va(phys); | |
374 | numa_distance_cnt = cnt; | |
375 | ||
376 | /* fill with the default distances */ | |
377 | for (i = 0; i < cnt; i++) | |
378 | for (j = 0; j < cnt; j++) | |
379 | numa_distance[i * cnt + j] = i == j ? | |
380 | LOCAL_DISTANCE : REMOTE_DISTANCE; | |
381 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
386 | /** | |
387 | * numa_set_distance - Set NUMA distance from one NUMA to another | |
388 | * @from: the 'from' node to set distance | |
389 | * @to: the 'to' node to set distance | |
390 | * @distance: NUMA distance | |
391 | * | |
392 | * Set the distance from node @from to @to to @distance. If distance table | |
393 | * doesn't exist, one which is large enough to accommodate all the currently | |
394 | * known nodes will be created. | |
395 | * | |
396 | * If such table cannot be allocated, a warning is printed and further | |
397 | * calls are ignored until the distance table is reset with | |
398 | * numa_reset_distance(). | |
399 | * | |
400 | * If @from or @to is higher than the highest known node or lower than zero | |
401 | * at the time of table creation or @distance doesn't make sense, the call | |
402 | * is ignored. | |
403 | * This is to allow simplification of specific NUMA config implementations. | |
404 | */ | |
405 | void __init numa_set_distance(int from, int to, int distance) | |
406 | { | |
407 | if (!numa_distance && numa_alloc_distance() < 0) | |
408 | return; | |
409 | ||
410 | if (from >= numa_distance_cnt || to >= numa_distance_cnt || | |
411 | from < 0 || to < 0) { | |
412 | pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", | |
413 | from, to, distance); | |
414 | return; | |
415 | } | |
416 | ||
417 | if ((u8)distance != distance || | |
418 | (from == to && distance != LOCAL_DISTANCE)) { | |
419 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | |
420 | from, to, distance); | |
421 | return; | |
422 | } | |
423 | ||
424 | numa_distance[from * numa_distance_cnt + to] = distance; | |
425 | } | |
426 | ||
427 | int __node_distance(int from, int to) | |
428 | { | |
429 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | |
430 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | |
431 | return numa_distance[from * numa_distance_cnt + to]; | |
432 | } | |
433 | EXPORT_SYMBOL(__node_distance); | |
434 | ||
435 | /* | |
436 | * Sanity check to catch more bad NUMA configurations (they are amazingly | |
437 | * common). Make sure the nodes cover all memory. | |
438 | */ | |
439 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | |
440 | { | |
441 | u64 numaram, e820ram; | |
442 | int i; | |
443 | ||
444 | numaram = 0; | |
445 | for (i = 0; i < mi->nr_blks; i++) { | |
446 | u64 s = mi->blk[i].start >> PAGE_SHIFT; | |
447 | u64 e = mi->blk[i].end >> PAGE_SHIFT; | |
448 | numaram += e - s; | |
449 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | |
450 | if ((s64)numaram < 0) | |
451 | numaram = 0; | |
452 | } | |
453 | ||
454 | e820ram = max_pfn - absent_pages_in_range(0, max_pfn); | |
455 | ||
456 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | |
457 | if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | |
458 | printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", | |
459 | (numaram << PAGE_SHIFT) >> 20, | |
460 | (e820ram << PAGE_SHIFT) >> 20); | |
461 | return false; | |
462 | } | |
463 | return true; | |
464 | } | |
465 | ||
466 | static void __init numa_clear_kernel_node_hotplug(void) | |
467 | { | |
468 | int i, nid; | |
469 | nodemask_t numa_kernel_nodes = NODE_MASK_NONE; | |
470 | unsigned long start, end; | |
471 | struct memblock_region *r; | |
472 | ||
473 | /* | |
474 | * At this time, all memory regions reserved by memblock are | |
475 | * used by the kernel. Set the nid in memblock.reserved will | |
476 | * mark out all the nodes the kernel resides in. | |
477 | */ | |
478 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | |
479 | struct numa_memblk *mb = &numa_meminfo.blk[i]; | |
480 | ||
481 | memblock_set_node(mb->start, mb->end - mb->start, | |
482 | &memblock.reserved, mb->nid); | |
483 | } | |
484 | ||
485 | /* | |
486 | * Mark all kernel nodes. | |
487 | * | |
488 | * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo | |
489 | * may not include all the memblock.reserved memory ranges because | |
490 | * trim_snb_memory() reserves specific pages for Sandy Bridge graphics. | |
491 | */ | |
492 | for_each_memblock(reserved, r) | |
493 | if (r->nid != MAX_NUMNODES) | |
494 | node_set(r->nid, numa_kernel_nodes); | |
495 | ||
496 | /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ | |
497 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | |
498 | nid = numa_meminfo.blk[i].nid; | |
499 | if (!node_isset(nid, numa_kernel_nodes)) | |
500 | continue; | |
501 | ||
502 | start = numa_meminfo.blk[i].start; | |
503 | end = numa_meminfo.blk[i].end; | |
504 | ||
505 | memblock_clear_hotplug(start, end - start); | |
506 | } | |
507 | } | |
508 | ||
509 | static int __init numa_register_memblks(struct numa_meminfo *mi) | |
510 | { | |
511 | unsigned long uninitialized_var(pfn_align); | |
512 | int i, nid; | |
513 | ||
514 | /* Account for nodes with cpus and no memory */ | |
515 | node_possible_map = numa_nodes_parsed; | |
516 | numa_nodemask_from_meminfo(&node_possible_map, mi); | |
517 | if (WARN_ON(nodes_empty(node_possible_map))) | |
518 | return -EINVAL; | |
519 | ||
520 | for (i = 0; i < mi->nr_blks; i++) { | |
521 | struct numa_memblk *mb = &mi->blk[i]; | |
522 | memblock_set_node(mb->start, mb->end - mb->start, | |
523 | &memblock.memory, mb->nid); | |
524 | } | |
525 | ||
526 | /* | |
527 | * At very early time, the kernel have to use some memory such as | |
528 | * loading the kernel image. We cannot prevent this anyway. So any | |
529 | * node the kernel resides in should be un-hotpluggable. | |
530 | * | |
531 | * And when we come here, alloc node data won't fail. | |
532 | */ | |
533 | numa_clear_kernel_node_hotplug(); | |
534 | ||
535 | /* | |
536 | * If sections array is gonna be used for pfn -> nid mapping, check | |
537 | * whether its granularity is fine enough. | |
538 | */ | |
539 | #ifdef NODE_NOT_IN_PAGE_FLAGS | |
540 | pfn_align = node_map_pfn_alignment(); | |
541 | if (pfn_align && pfn_align < PAGES_PER_SECTION) { | |
542 | printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", | |
543 | PFN_PHYS(pfn_align) >> 20, | |
544 | PFN_PHYS(PAGES_PER_SECTION) >> 20); | |
545 | return -EINVAL; | |
546 | } | |
547 | #endif | |
548 | if (!numa_meminfo_cover_memory(mi)) | |
549 | return -EINVAL; | |
550 | ||
551 | /* Finally register nodes. */ | |
552 | for_each_node_mask(nid, node_possible_map) { | |
553 | u64 start = PFN_PHYS(max_pfn); | |
554 | u64 end = 0; | |
555 | ||
556 | for (i = 0; i < mi->nr_blks; i++) { | |
557 | if (nid != mi->blk[i].nid) | |
558 | continue; | |
559 | start = min(mi->blk[i].start, start); | |
560 | end = max(mi->blk[i].end, end); | |
561 | } | |
562 | ||
563 | if (start >= end) | |
564 | continue; | |
565 | ||
566 | /* | |
567 | * Don't confuse VM with a node that doesn't have the | |
568 | * minimum amount of memory: | |
569 | */ | |
570 | if (end && (end - start) < NODE_MIN_SIZE) | |
571 | continue; | |
572 | ||
573 | alloc_node_data(nid); | |
574 | } | |
575 | ||
576 | /* Dump memblock with node info and return. */ | |
577 | memblock_dump_all(); | |
578 | return 0; | |
579 | } | |
580 | ||
581 | /* | |
582 | * There are unfortunately some poorly designed mainboards around that | |
583 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
584 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
585 | * as the number of CPUs is not known yet. We round robin the existing | |
586 | * nodes. | |
587 | */ | |
588 | static void __init numa_init_array(void) | |
589 | { | |
590 | int rr, i; | |
591 | ||
592 | rr = first_node(node_online_map); | |
593 | for (i = 0; i < nr_cpu_ids; i++) { | |
594 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | |
595 | continue; | |
596 | numa_set_node(i, rr); | |
597 | rr = next_node(rr, node_online_map); | |
598 | if (rr == MAX_NUMNODES) | |
599 | rr = first_node(node_online_map); | |
600 | } | |
601 | } | |
602 | ||
603 | static int __init numa_init(int (*init_func)(void)) | |
604 | { | |
605 | int i; | |
606 | int ret; | |
607 | ||
608 | for (i = 0; i < MAX_LOCAL_APIC; i++) | |
609 | set_apicid_to_node(i, NUMA_NO_NODE); | |
610 | ||
611 | nodes_clear(numa_nodes_parsed); | |
612 | nodes_clear(node_possible_map); | |
613 | nodes_clear(node_online_map); | |
614 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | |
615 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, | |
616 | MAX_NUMNODES)); | |
617 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, | |
618 | MAX_NUMNODES)); | |
619 | /* In case that parsing SRAT failed. */ | |
620 | WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); | |
621 | numa_reset_distance(); | |
622 | ||
623 | ret = init_func(); | |
624 | if (ret < 0) | |
625 | return ret; | |
626 | ||
627 | /* | |
628 | * We reset memblock back to the top-down direction | |
629 | * here because if we configured ACPI_NUMA, we have | |
630 | * parsed SRAT in init_func(). It is ok to have the | |
631 | * reset here even if we did't configure ACPI_NUMA | |
632 | * or acpi numa init fails and fallbacks to dummy | |
633 | * numa init. | |
634 | */ | |
635 | memblock_set_bottom_up(false); | |
636 | ||
637 | ret = numa_cleanup_meminfo(&numa_meminfo); | |
638 | if (ret < 0) | |
639 | return ret; | |
640 | ||
641 | numa_emulation(&numa_meminfo, numa_distance_cnt); | |
642 | ||
643 | ret = numa_register_memblks(&numa_meminfo); | |
644 | if (ret < 0) | |
645 | return ret; | |
646 | ||
647 | for (i = 0; i < nr_cpu_ids; i++) { | |
648 | int nid = early_cpu_to_node(i); | |
649 | ||
650 | if (nid == NUMA_NO_NODE) | |
651 | continue; | |
652 | if (!node_online(nid)) | |
653 | numa_clear_node(i); | |
654 | } | |
655 | numa_init_array(); | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
660 | /** | |
661 | * dummy_numa_init - Fallback dummy NUMA init | |
662 | * | |
663 | * Used if there's no underlying NUMA architecture, NUMA initialization | |
664 | * fails, or NUMA is disabled on the command line. | |
665 | * | |
666 | * Must online at least one node and add memory blocks that cover all | |
667 | * allowed memory. This function must not fail. | |
668 | */ | |
669 | static int __init dummy_numa_init(void) | |
670 | { | |
671 | printk(KERN_INFO "%s\n", | |
672 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | |
673 | printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", | |
674 | 0LLU, PFN_PHYS(max_pfn) - 1); | |
675 | ||
676 | node_set(0, numa_nodes_parsed); | |
677 | numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); | |
678 | ||
679 | return 0; | |
680 | } | |
681 | ||
682 | /** | |
683 | * x86_numa_init - Initialize NUMA | |
684 | * | |
685 | * Try each configured NUMA initialization method until one succeeds. The | |
686 | * last fallback is dummy single node config encomapssing whole memory and | |
687 | * never fails. | |
688 | */ | |
689 | void __init x86_numa_init(void) | |
690 | { | |
691 | if (!numa_off) { | |
692 | #ifdef CONFIG_ACPI_NUMA | |
693 | if (!numa_init(x86_acpi_numa_init)) | |
694 | return; | |
695 | #endif | |
696 | #ifdef CONFIG_AMD_NUMA | |
697 | if (!numa_init(amd_numa_init)) | |
698 | return; | |
699 | #endif | |
700 | } | |
701 | ||
702 | numa_init(dummy_numa_init); | |
703 | } | |
704 | ||
705 | static __init int find_near_online_node(int node) | |
706 | { | |
707 | int n, val; | |
708 | int min_val = INT_MAX; | |
709 | int best_node = -1; | |
710 | ||
711 | for_each_online_node(n) { | |
712 | val = node_distance(node, n); | |
713 | ||
714 | if (val < min_val) { | |
715 | min_val = val; | |
716 | best_node = n; | |
717 | } | |
718 | } | |
719 | ||
720 | return best_node; | |
721 | } | |
722 | ||
723 | /* | |
724 | * Setup early cpu_to_node. | |
725 | * | |
726 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
727 | * and apicid_to_node[] tables have valid entries for a CPU. | |
728 | * This means we skip cpu_to_node[] initialisation for NUMA | |
729 | * emulation and faking node case (when running a kernel compiled | |
730 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
731 | * is already initialized in a round robin manner at numa_init_array, | |
732 | * prior to this call, and this initialization is good enough | |
733 | * for the fake NUMA cases. | |
734 | * | |
735 | * Called before the per_cpu areas are setup. | |
736 | */ | |
737 | void __init init_cpu_to_node(void) | |
738 | { | |
739 | int cpu; | |
740 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
741 | ||
742 | BUG_ON(cpu_to_apicid == NULL); | |
743 | ||
744 | for_each_possible_cpu(cpu) { | |
745 | int node = numa_cpu_node(cpu); | |
746 | ||
747 | if (node == NUMA_NO_NODE) | |
748 | continue; | |
749 | if (!node_online(node)) | |
750 | node = find_near_online_node(node); | |
751 | numa_set_node(cpu, node); | |
752 | } | |
753 | } | |
754 | ||
755 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | |
756 | ||
757 | # ifndef CONFIG_NUMA_EMU | |
758 | void numa_add_cpu(int cpu) | |
759 | { | |
760 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
761 | } | |
762 | ||
763 | void numa_remove_cpu(int cpu) | |
764 | { | |
765 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
766 | } | |
767 | # endif /* !CONFIG_NUMA_EMU */ | |
768 | ||
769 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
770 | ||
771 | int __cpu_to_node(int cpu) | |
772 | { | |
773 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
774 | printk(KERN_WARNING | |
775 | "cpu_to_node(%d): usage too early!\n", cpu); | |
776 | dump_stack(); | |
777 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
778 | } | |
779 | return per_cpu(x86_cpu_to_node_map, cpu); | |
780 | } | |
781 | EXPORT_SYMBOL(__cpu_to_node); | |
782 | ||
783 | /* | |
784 | * Same function as cpu_to_node() but used if called before the | |
785 | * per_cpu areas are setup. | |
786 | */ | |
787 | int early_cpu_to_node(int cpu) | |
788 | { | |
789 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
790 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
791 | ||
792 | if (!cpu_possible(cpu)) { | |
793 | printk(KERN_WARNING | |
794 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
795 | dump_stack(); | |
796 | return NUMA_NO_NODE; | |
797 | } | |
798 | return per_cpu(x86_cpu_to_node_map, cpu); | |
799 | } | |
800 | ||
801 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) | |
802 | { | |
803 | struct cpumask *mask; | |
804 | ||
805 | if (node == NUMA_NO_NODE) { | |
806 | /* early_cpu_to_node() already emits a warning and trace */ | |
807 | return; | |
808 | } | |
809 | mask = node_to_cpumask_map[node]; | |
810 | if (!mask) { | |
811 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
812 | dump_stack(); | |
813 | return; | |
814 | } | |
815 | ||
816 | if (enable) | |
817 | cpumask_set_cpu(cpu, mask); | |
818 | else | |
819 | cpumask_clear_cpu(cpu, mask); | |
820 | ||
821 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", | |
822 | enable ? "numa_add_cpu" : "numa_remove_cpu", | |
823 | cpu, node, cpumask_pr_args(mask)); | |
824 | return; | |
825 | } | |
826 | ||
827 | # ifndef CONFIG_NUMA_EMU | |
828 | static void numa_set_cpumask(int cpu, bool enable) | |
829 | { | |
830 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); | |
831 | } | |
832 | ||
833 | void numa_add_cpu(int cpu) | |
834 | { | |
835 | numa_set_cpumask(cpu, true); | |
836 | } | |
837 | ||
838 | void numa_remove_cpu(int cpu) | |
839 | { | |
840 | numa_set_cpumask(cpu, false); | |
841 | } | |
842 | # endif /* !CONFIG_NUMA_EMU */ | |
843 | ||
844 | /* | |
845 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
846 | */ | |
847 | const struct cpumask *cpumask_of_node(int node) | |
848 | { | |
849 | if (node >= nr_node_ids) { | |
850 | printk(KERN_WARNING | |
851 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | |
852 | node, nr_node_ids); | |
853 | dump_stack(); | |
854 | return cpu_none_mask; | |
855 | } | |
856 | if (node_to_cpumask_map[node] == NULL) { | |
857 | printk(KERN_WARNING | |
858 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | |
859 | node); | |
860 | dump_stack(); | |
861 | return cpu_online_mask; | |
862 | } | |
863 | return node_to_cpumask_map[node]; | |
864 | } | |
865 | EXPORT_SYMBOL(cpumask_of_node); | |
866 | ||
867 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
868 | ||
869 | #ifdef CONFIG_MEMORY_HOTPLUG | |
870 | int memory_add_physaddr_to_nid(u64 start) | |
871 | { | |
872 | struct numa_meminfo *mi = &numa_meminfo; | |
873 | int nid = mi->blk[0].nid; | |
874 | int i; | |
875 | ||
876 | for (i = 0; i < mi->nr_blks; i++) | |
877 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | |
878 | nid = mi->blk[i].nid; | |
879 | return nid; | |
880 | } | |
881 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |
882 | #endif |