]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86_64/mm/numa.c
x86_64: extract helper function from e820_register_active_regions
[mirror_ubuntu-bionic-kernel.git] / arch / x86_64 / mm / numa.c
CommitLineData
1da177e4
LT
1/*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/mmzone.h>
11#include <linux/ctype.h>
12#include <linux/module.h>
13#include <linux/nodemask.h>
14
15#include <asm/e820.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
18#include <asm/numa.h>
19#include <asm/acpi.h>
20
21#ifndef Dprintk
22#define Dprintk(x...)
23#endif
24
6c231b7b 25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
1da177e4
LT
26bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27
dcf36bfa 28struct memnode memnode;
1da177e4 29
3f098c26
AK
30unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
0b07e984 32};
3f098c26
AK
33unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
35};
36cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
1da177e4
LT
37
38int numa_off __initdata;
076422d2
AS
39unsigned long __initdata nodemap_addr;
40unsigned long __initdata nodemap_size;
1da177e4 41
529a3404
ED
42
43/*
44 * Given a shift value, try to populate memnodemap[]
45 * Returns :
46 * 1 if OK
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
49 */
d18ff470 50static int __init
abe059e7 51populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
1da177e4
LT
52{
53 int i;
529a3404
ED
54 int res = -1;
55 unsigned long addr, end;
b684664f 56
076422d2 57 memset(memnodemap, 0xff, memnodemapsize);
b684664f 58 for (i = 0; i < numnodes; i++) {
529a3404
ED
59 addr = nodes[i].start;
60 end = nodes[i].end;
61 if (addr >= end)
b684664f 62 continue;
076422d2 63 if ((end >> shift) >= memnodemapsize)
529a3404
ED
64 return 0;
65 do {
66 if (memnodemap[addr >> shift] != 0xff)
b684664f 67 return -1;
b684664f 68 memnodemap[addr >> shift] = i;
076422d2 69 addr += (1UL << shift);
529a3404
ED
70 } while (addr < end);
71 res = 1;
1da177e4 72 }
529a3404
ED
73 return res;
74}
75
076422d2
AS
76static int __init allocate_cachealigned_memnodemap(void)
77{
78 unsigned long pad, pad_addr;
79
80 memnodemap = memnode.embedded_map;
54413927 81 if (memnodemapsize <= 48)
076422d2 82 return 0;
076422d2
AS
83
84 pad = L1_CACHE_BYTES - 1;
85 pad_addr = 0x8000;
86 nodemap_size = pad + memnodemapsize;
87 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
88 nodemap_size);
89 if (nodemap_addr == -1UL) {
90 printk(KERN_ERR
91 "NUMA: Unable to allocate Memory to Node hash map\n");
92 nodemap_addr = nodemap_size = 0;
93 return -1;
94 }
95 pad_addr = (nodemap_addr + pad) & ~pad;
96 memnodemap = phys_to_virt(pad_addr);
97
98 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
99 nodemap_addr, nodemap_addr + nodemap_size);
100 return 0;
101}
102
103/*
104 * The LSB of all start and end addresses in the node map is the value of the
105 * maximum possible shift.
106 */
107static int __init
108extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
529a3404 109{
54413927 110 int i, nodes_used = 0;
076422d2
AS
111 unsigned long start, end;
112 unsigned long bitfield = 0, memtop = 0;
113
114 for (i = 0; i < numnodes; i++) {
115 start = nodes[i].start;
116 end = nodes[i].end;
117 if (start >= end)
118 continue;
54413927
AS
119 bitfield |= start;
120 nodes_used++;
076422d2
AS
121 if (end > memtop)
122 memtop = end;
123 }
54413927
AS
124 if (nodes_used <= 1)
125 i = 63;
126 else
127 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
076422d2
AS
128 memnodemapsize = (memtop >> i)+1;
129 return i;
130}
529a3404 131
076422d2
AS
132int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
133{
134 int shift;
529a3404 135
076422d2
AS
136 shift = extract_lsb_from_nodes(nodes, numnodes);
137 if (allocate_cachealigned_memnodemap())
138 return -1;
6b050f80 139 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
529a3404
ED
140 shift);
141
142 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
143 printk(KERN_INFO
144 "Your memory is not aligned you need to rebuild your kernel "
145 "with a bigger NODEMAPSIZE shift=%d\n",
146 shift);
147 return -1;
148 }
b684664f 149 return shift;
1da177e4
LT
150}
151
bbfceef4
MT
152#ifdef CONFIG_SPARSEMEM
153int early_pfn_to_nid(unsigned long pfn)
154{
155 return phys_to_nid(pfn << PAGE_SHIFT);
156}
157#endif
158
a8062231
AK
159static void * __init
160early_node_mem(int nodeid, unsigned long start, unsigned long end,
161 unsigned long size)
162{
163 unsigned long mem = find_e820_area(start, end, size);
164 void *ptr;
165 if (mem != -1L)
166 return __va(mem);
167 ptr = __alloc_bootmem_nopanic(size,
168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
169 if (ptr == 0) {
170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
171 size, nodeid);
172 return NULL;
173 }
174 return ptr;
175}
176
1da177e4
LT
177/* Initialize bootmem allocator for a node */
178void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
179{
180 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
181 unsigned long nodedata_phys;
a8062231 182 void *bootmap;
1da177e4
LT
183 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
184
185 start = round_up(start, ZONE_ALIGN);
186
6b050f80 187 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
1da177e4
LT
188
189 start_pfn = start >> PAGE_SHIFT;
190 end_pfn = end >> PAGE_SHIFT;
191
a8062231
AK
192 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
193 if (node_data[nodeid] == NULL)
194 return;
195 nodedata_phys = __pa(node_data[nodeid]);
1da177e4 196
1da177e4
LT
197 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
198 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
199 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
200 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
201
202 /* Find a place for the bootmem map */
203 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
204 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
a8062231
AK
205 bootmap = early_node_mem(nodeid, bootmap_start, end,
206 bootmap_pages<<PAGE_SHIFT);
207 if (bootmap == NULL) {
208 if (nodedata_phys < start || nodedata_phys >= end)
209 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
210 node_data[nodeid] = NULL;
211 return;
212 }
213 bootmap_start = __pa(bootmap);
1da177e4
LT
214 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
215
216 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
217 bootmap_start >> PAGE_SHIFT,
218 start_pfn, end_pfn);
219
5cb248ab 220 free_bootmem_with_active_regions(nodeid, end);
1da177e4
LT
221
222 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
223 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
68a3a7fe
AK
224#ifdef CONFIG_ACPI_NUMA
225 srat_reserve_add_area(nodeid);
226#endif
1da177e4
LT
227 node_set_online(nodeid);
228}
229
230/* Initialize final allocator for a zone */
231void __init setup_node_zones(int nodeid)
232{
267b4801 233 unsigned long start_pfn, end_pfn, memmapsize, limit;
1da177e4 234
a2f1b424
AK
235 start_pfn = node_start_pfn(nodeid);
236 end_pfn = node_end_pfn(nodeid);
1da177e4 237
5cb248ab 238 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
a2f1b424 239 nodeid, start_pfn, end_pfn);
1da177e4 240
267b4801
AK
241 /* Try to allocate mem_map at end to not fill up precious <4GB
242 memory. */
243 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
244 limit = end_pfn << PAGE_SHIFT;
3b5fd59f 245#ifdef CONFIG_FLAT_NODE_MEM_MAP
267b4801
AK
246 NODE_DATA(nodeid)->node_mem_map =
247 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
248 memmapsize, SMP_CACHE_BYTES,
249 round_down(limit - memmapsize, PAGE_SIZE),
250 limit);
3b5fd59f 251#endif
1da177e4
LT
252}
253
254void __init numa_init_array(void)
255{
256 int rr, i;
257 /* There are unfortunately some poorly designed mainboards around
258 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
259 mapping. To avoid this fill in the mapping for all possible
260 CPUs, as the number of CPUs is not known yet.
261 We round robin the existing nodes. */
85cc5135 262 rr = first_node(node_online_map);
1da177e4
LT
263 for (i = 0; i < NR_CPUS; i++) {
264 if (cpu_to_node[i] != NUMA_NO_NODE)
265 continue;
69d81fcd 266 numa_set_node(i, rr);
1da177e4
LT
267 rr = next_node(rr, node_online_map);
268 if (rr == MAX_NUMNODES)
269 rr = first_node(node_online_map);
1da177e4
LT
270 }
271
1da177e4
LT
272}
273
274#ifdef CONFIG_NUMA_EMU
53fee04f 275/* Numa emulation */
8b8ca80e
DR
276#define E820_ADDR_HOLE_SIZE(start, end) \
277 (e820_hole_size((start) >> PAGE_SHIFT, (end) >> PAGE_SHIFT) << \
278 PAGE_SHIFT)
279char *cmdline __initdata;
1da177e4 280
53fee04f 281/*
8b8ca80e
DR
282 * Setups up nid to range from addr to addr + size. If the end boundary is
283 * greater than max_addr, then max_addr is used instead. The return value is 0
284 * if there is additional memory left for allocation past addr and -1 otherwise.
285 * addr is adjusted to be at the end of the node.
53fee04f 286 */
8b8ca80e
DR
287static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
288 u64 size, u64 max_addr)
53fee04f 289{
8b8ca80e
DR
290 int ret = 0;
291 nodes[nid].start = *addr;
292 *addr += size;
293 if (*addr >= max_addr) {
294 *addr = max_addr;
295 ret = -1;
296 }
297 nodes[nid].end = *addr;
e3f1caee 298 node_set(nid, node_possible_map);
8b8ca80e
DR
299 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
300 nodes[nid].start, nodes[nid].end,
301 (nodes[nid].end - nodes[nid].start) >> 20);
302 return ret;
53fee04f
RS
303}
304
8b8ca80e
DR
305/*
306 * Splits num_nodes nodes up equally starting at node_start. The return value
307 * is the number of nodes split up and addr is adjusted to be at the end of the
308 * last node allocated.
309 */
310static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
311 u64 max_addr, int node_start,
312 int num_nodes)
1da177e4 313{
8b8ca80e
DR
314 unsigned int big;
315 u64 size;
316 int i;
53fee04f 317
8b8ca80e
DR
318 if (num_nodes <= 0)
319 return -1;
320 if (num_nodes > MAX_NUMNODES)
321 num_nodes = MAX_NUMNODES;
322 size = (max_addr - *addr - E820_ADDR_HOLE_SIZE(*addr, max_addr)) /
323 num_nodes;
53fee04f 324 /*
8b8ca80e
DR
325 * Calculate the number of big nodes that can be allocated as a result
326 * of consolidating the leftovers.
53fee04f 327 */
8b8ca80e
DR
328 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
329 FAKE_NODE_MIN_SIZE;
330
331 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
332 size &= FAKE_NODE_MIN_HASH_MASK;
333 if (!size) {
334 printk(KERN_ERR "Not enough memory for each node. "
335 "NUMA emulation disabled.\n");
336 return -1;
53fee04f 337 }
8b8ca80e
DR
338
339 for (i = node_start; i < num_nodes + node_start; i++) {
340 u64 end = *addr + size;
53fee04f
RS
341 if (i < big)
342 end += FAKE_NODE_MIN_SIZE;
343 /*
8b8ca80e
DR
344 * The final node can have the remaining system RAM. Other
345 * nodes receive roughly the same amount of available pages.
53fee04f 346 */
8b8ca80e
DR
347 if (i == num_nodes + node_start - 1)
348 end = max_addr;
349 else
350 while (end - *addr - E820_ADDR_HOLE_SIZE(*addr, end) <
351 size) {
352 end += FAKE_NODE_MIN_SIZE;
353 if (end > max_addr) {
354 end = max_addr;
355 break;
356 }
357 }
358 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
359 break;
360 }
361 return i - node_start + 1;
362}
363
382591d5
DR
364/*
365 * Splits the remaining system RAM into chunks of size. The remaining memory is
366 * always assigned to a final node and can be asymmetric. Returns the number of
367 * nodes split.
368 */
369static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
370 u64 max_addr, int node_start, u64 size)
371{
372 int i = node_start;
373 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
374 while (!setup_node_range(i++, nodes, addr, size, max_addr))
375 ;
376 return i - node_start;
377}
378
8b8ca80e
DR
379/*
380 * Sets up the system RAM area from start_pfn to end_pfn according to the
381 * numa=fake command-line option.
382 */
383static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
384{
385 struct bootnode nodes[MAX_NUMNODES];
386 u64 addr = start_pfn << PAGE_SHIFT;
387 u64 max_addr = end_pfn << PAGE_SHIFT;
8b8ca80e 388 int num_nodes = 0;
382591d5
DR
389 int coeff_flag;
390 int coeff = -1;
391 int num = 0;
8b8ca80e
DR
392 u64 size;
393 int i;
394
395 memset(&nodes, 0, sizeof(nodes));
396 /*
397 * If the numa=fake command-line is just a single number N, split the
398 * system RAM into N fake nodes.
399 */
400 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
401 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,
402 simple_strtol(cmdline, NULL, 0));
403 if (num_nodes < 0)
404 return num_nodes;
405 goto out;
406 }
407
408 /* Parse the command line. */
382591d5 409 for (coeff_flag = 0; ; cmdline++) {
8b8ca80e
DR
410 if (*cmdline && isdigit(*cmdline)) {
411 num = num * 10 + *cmdline - '0';
412 continue;
53fee04f 413 }
382591d5
DR
414 if (*cmdline == '*') {
415 if (num > 0)
416 coeff = num;
417 coeff_flag = 1;
418 }
8b8ca80e 419 if (!*cmdline || *cmdline == ',') {
382591d5
DR
420 if (!coeff_flag)
421 coeff = 1;
8b8ca80e
DR
422 /*
423 * Round down to the nearest FAKE_NODE_MIN_SIZE.
424 * Command-line coefficients are in megabytes.
425 */
426 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
382591d5 427 if (size)
8b8ca80e
DR
428 for (i = 0; i < coeff; i++, num_nodes++)
429 if (setup_node_range(num_nodes, nodes,
430 &addr, size, max_addr) < 0)
431 goto done;
382591d5
DR
432 if (!*cmdline)
433 break;
434 coeff_flag = 0;
435 coeff = -1;
53fee04f 436 }
8b8ca80e
DR
437 num = 0;
438 }
439done:
440 if (!num_nodes)
441 return -1;
14694d73 442 /* Fill remainder of system RAM, if appropriate. */
8b8ca80e 443 if (addr < max_addr) {
382591d5
DR
444 if (coeff_flag && coeff < 0) {
445 /* Split remaining nodes into num-sized chunks */
446 num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
447 num_nodes, num);
448 goto out;
449 }
14694d73
DR
450 switch (*(cmdline - 1)) {
451 case '*':
452 /* Split remaining nodes into coeff chunks */
453 if (coeff <= 0)
454 break;
455 num_nodes += split_nodes_equally(nodes, &addr, max_addr,
456 num_nodes, coeff);
457 break;
458 case ',':
459 /* Do not allocate remaining system RAM */
460 break;
461 default:
462 /* Give one final node */
463 setup_node_range(num_nodes, nodes, &addr,
464 max_addr - addr, max_addr);
465 num_nodes++;
466 }
8b8ca80e
DR
467 }
468out:
469 memnode_shift = compute_hash_shift(nodes, num_nodes);
470 if (memnode_shift < 0) {
471 memnode_shift = 0;
472 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
473 "disabled.\n");
474 return -1;
475 }
476
477 /*
478 * We need to vacate all active ranges that may have been registered by
479 * SRAT.
480 */
481 remove_all_active_ranges();
e3f1caee 482 for_each_node_mask(i, node_possible_map) {
5cb248ab
MG
483 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
484 nodes[i].end >> PAGE_SHIFT);
1da177e4 485 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
5cb248ab 486 }
1da177e4
LT
487 numa_init_array();
488 return 0;
489}
8b8ca80e
DR
490#undef E820_ADDR_HOLE_SIZE
491#endif /* CONFIG_NUMA_EMU */
1da177e4
LT
492
493void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
494{
495 int i;
496
e3f1caee
SS
497 nodes_clear(node_possible_map);
498
1da177e4 499#ifdef CONFIG_NUMA_EMU
8b8ca80e 500 if (cmdline && !numa_emulation(start_pfn, end_pfn))
1da177e4 501 return;
e3f1caee 502 nodes_clear(node_possible_map);
1da177e4
LT
503#endif
504
505#ifdef CONFIG_ACPI_NUMA
506 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
507 end_pfn << PAGE_SHIFT))
508 return;
e3f1caee 509 nodes_clear(node_possible_map);
1da177e4
LT
510#endif
511
512#ifdef CONFIG_K8_NUMA
513 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
514 return;
e3f1caee 515 nodes_clear(node_possible_map);
1da177e4
LT
516#endif
517 printk(KERN_INFO "%s\n",
518 numa_off ? "NUMA turned off" : "No NUMA configuration found");
519
520 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
521 start_pfn << PAGE_SHIFT,
522 end_pfn << PAGE_SHIFT);
523 /* setup dummy node covering all memory */
524 memnode_shift = 63;
076422d2 525 memnodemap = memnode.embedded_map;
1da177e4
LT
526 memnodemap[0] = 0;
527 nodes_clear(node_online_map);
528 node_set_online(0);
e3f1caee 529 node_set(0, node_possible_map);
1da177e4 530 for (i = 0; i < NR_CPUS; i++)
69d81fcd 531 numa_set_node(i, 0);
1da177e4 532 node_to_cpumask[0] = cpumask_of_cpu(0);
5cb248ab 533 e820_register_active_regions(0, start_pfn, end_pfn);
1da177e4
LT
534 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
535}
536
e6982c67 537__cpuinit void numa_add_cpu(int cpu)
1da177e4 538{
e6a045a5 539 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
1da177e4
LT
540}
541
69d81fcd
AK
542void __cpuinit numa_set_node(int cpu, int node)
543{
df79efde 544 cpu_pda(cpu)->nodenumber = node;
69d81fcd
AK
545 cpu_to_node[cpu] = node;
546}
547
1da177e4
LT
548unsigned long __init numa_free_all_bootmem(void)
549{
550 int i;
551 unsigned long pages = 0;
552 for_each_online_node(i) {
553 pages += free_all_bootmem_node(NODE_DATA(i));
554 }
555 return pages;
556}
557
558void __init paging_init(void)
559{
560 int i;
6391af17
MG
561 unsigned long max_zone_pfns[MAX_NR_ZONES];
562 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
563 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
564 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
565 max_zone_pfns[ZONE_NORMAL] = end_pfn;
d3ee871e 566
f0a5a58a
BP
567 sparse_memory_present_with_active_regions(MAX_NUMNODES);
568 sparse_init();
d3ee871e 569
1da177e4
LT
570 for_each_online_node(i) {
571 setup_node_zones(i);
572 }
5cb248ab
MG
573
574 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
575}
576
2c8c0e6b 577static __init int numa_setup(char *opt)
1da177e4 578{
2c8c0e6b
AK
579 if (!opt)
580 return -EINVAL;
1da177e4
LT
581 if (!strncmp(opt,"off",3))
582 numa_off = 1;
583#ifdef CONFIG_NUMA_EMU
8b8ca80e
DR
584 if (!strncmp(opt, "fake=", 5))
585 cmdline = opt + 5;
1da177e4
LT
586#endif
587#ifdef CONFIG_ACPI_NUMA
588 if (!strncmp(opt,"noacpi",6))
589 acpi_numa = -1;
68a3a7fe
AK
590 if (!strncmp(opt,"hotadd=", 7))
591 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
1da177e4 592#endif
2c8c0e6b 593 return 0;
1da177e4
LT
594}
595
2c8c0e6b
AK
596early_param("numa", numa_setup);
597
05b3cbd8
RT
598/*
599 * Setup early cpu_to_node.
600 *
601 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
602 * and apicid_to_node[] tables have valid entries for a CPU.
603 * This means we skip cpu_to_node[] initialisation for NUMA
604 * emulation and faking node case (when running a kernel compiled
605 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
606 * is already initialized in a round robin manner at numa_init_array,
607 * prior to this call, and this initialization is good enough
608 * for the fake NUMA cases.
609 */
610void __init init_cpu_to_node(void)
611{
612 int i;
613 for (i = 0; i < NR_CPUS; i++) {
614 u8 apicid = x86_cpu_to_apicid[i];
615 if (apicid == BAD_APICID)
616 continue;
617 if (apicid_to_node[apicid] == NUMA_NO_NODE)
618 continue;
d1db4ec8 619 numa_set_node(i,apicid_to_node[apicid]);
05b3cbd8
RT
620 }
621}
622
1da177e4
LT
623EXPORT_SYMBOL(cpu_to_node);
624EXPORT_SYMBOL(node_to_cpumask);
dcf36bfa 625EXPORT_SYMBOL(memnode);
1da177e4 626EXPORT_SYMBOL(node_data);
cf050132
AK
627
628#ifdef CONFIG_DISCONTIGMEM
629/*
630 * Functions to convert PFNs from/to per node page addresses.
631 * These are out of line because they are quite big.
632 * They could be all tuned by pre caching more state.
633 * Should do that.
634 */
635
cf050132
AK
636int pfn_valid(unsigned long pfn)
637{
638 unsigned nid;
639 if (pfn >= num_physpages)
640 return 0;
641 nid = pfn_to_nid(pfn);
642 if (nid == 0xff)
643 return 0;
644 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
645}
646EXPORT_SYMBOL(pfn_valid);
647#endif