]>
Commit | Line | Data |
---|---|---|
e3cfe529 | 1 | /* |
1da177e4 LT |
2 | * Generic VM initialization for x86-64 NUMA setups. |
3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. | |
e3cfe529 | 4 | */ |
1da177e4 LT |
5 | #include <linux/kernel.h> |
6 | #include <linux/mm.h> | |
7 | #include <linux/string.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/bootmem.h> | |
72d7c3b3 | 10 | #include <linux/memblock.h> |
1da177e4 LT |
11 | #include <linux/mmzone.h> |
12 | #include <linux/ctype.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/nodemask.h> | |
3cc87e3f | 15 | #include <linux/sched.h> |
1da177e4 LT |
16 | |
17 | #include <asm/e820.h> | |
18 | #include <asm/proto.h> | |
19 | #include <asm/dma.h> | |
20 | #include <asm/numa.h> | |
21 | #include <asm/acpi.h> | |
23ac4ae8 | 22 | #include <asm/amd_nb.h> |
1da177e4 | 23 | |
6c231b7b | 24 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
e3cfe529 TG |
25 | EXPORT_SYMBOL(node_data); |
26 | ||
dcf36bfa | 27 | struct memnode memnode; |
1da177e4 | 28 | |
864fc31e TG |
29 | static unsigned long __initdata nodemap_addr; |
30 | static unsigned long __initdata nodemap_size; | |
1da177e4 | 31 | |
529a3404 ED |
32 | /* |
33 | * Given a shift value, try to populate memnodemap[] | |
34 | * Returns : | |
35 | * 1 if OK | |
36 | * 0 if memnodmap[] too small (of shift too small) | |
37 | * -1 if node overlap or lost ram (shift too big) | |
38 | */ | |
e3cfe529 | 39 | static int __init populate_memnodemap(const struct bootnode *nodes, |
6ec6e0d9 | 40 | int numnodes, int shift, int *nodeids) |
1da177e4 | 41 | { |
529a3404 | 42 | unsigned long addr, end; |
e3cfe529 | 43 | int i, res = -1; |
b684664f | 44 | |
43238382 | 45 | memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); |
b684664f | 46 | for (i = 0; i < numnodes; i++) { |
529a3404 ED |
47 | addr = nodes[i].start; |
48 | end = nodes[i].end; | |
49 | if (addr >= end) | |
b684664f | 50 | continue; |
076422d2 | 51 | if ((end >> shift) >= memnodemapsize) |
529a3404 ED |
52 | return 0; |
53 | do { | |
43238382 | 54 | if (memnodemap[addr >> shift] != NUMA_NO_NODE) |
b684664f | 55 | return -1; |
6ec6e0d9 SS |
56 | |
57 | if (!nodeids) | |
58 | memnodemap[addr >> shift] = i; | |
59 | else | |
60 | memnodemap[addr >> shift] = nodeids[i]; | |
61 | ||
076422d2 | 62 | addr += (1UL << shift); |
529a3404 ED |
63 | } while (addr < end); |
64 | res = 1; | |
e3cfe529 | 65 | } |
529a3404 ED |
66 | return res; |
67 | } | |
68 | ||
076422d2 AS |
69 | static int __init allocate_cachealigned_memnodemap(void) |
70 | { | |
24a5da73 | 71 | unsigned long addr; |
076422d2 AS |
72 | |
73 | memnodemap = memnode.embedded_map; | |
316390b0 | 74 | if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) |
076422d2 | 75 | return 0; |
076422d2 | 76 | |
24a5da73 | 77 | addr = 0x8000; |
be3e89ee | 78 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
a9ce6bc1 | 79 | nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT, |
24a5da73 | 80 | nodemap_size, L1_CACHE_BYTES); |
a9ce6bc1 | 81 | if (nodemap_addr == MEMBLOCK_ERROR) { |
076422d2 AS |
82 | printk(KERN_ERR |
83 | "NUMA: Unable to allocate Memory to Node hash map\n"); | |
84 | nodemap_addr = nodemap_size = 0; | |
85 | return -1; | |
86 | } | |
24a5da73 | 87 | memnodemap = phys_to_virt(nodemap_addr); |
a9ce6bc1 | 88 | memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); |
076422d2 AS |
89 | |
90 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", | |
91 | nodemap_addr, nodemap_addr + nodemap_size); | |
92 | return 0; | |
93 | } | |
94 | ||
95 | /* | |
96 | * The LSB of all start and end addresses in the node map is the value of the | |
97 | * maximum possible shift. | |
98 | */ | |
e3cfe529 TG |
99 | static int __init extract_lsb_from_nodes(const struct bootnode *nodes, |
100 | int numnodes) | |
529a3404 | 101 | { |
54413927 | 102 | int i, nodes_used = 0; |
076422d2 AS |
103 | unsigned long start, end; |
104 | unsigned long bitfield = 0, memtop = 0; | |
105 | ||
106 | for (i = 0; i < numnodes; i++) { | |
107 | start = nodes[i].start; | |
108 | end = nodes[i].end; | |
109 | if (start >= end) | |
110 | continue; | |
54413927 AS |
111 | bitfield |= start; |
112 | nodes_used++; | |
076422d2 AS |
113 | if (end > memtop) |
114 | memtop = end; | |
115 | } | |
54413927 AS |
116 | if (nodes_used <= 1) |
117 | i = 63; | |
118 | else | |
119 | i = find_first_bit(&bitfield, sizeof(unsigned long)*8); | |
076422d2 AS |
120 | memnodemapsize = (memtop >> i)+1; |
121 | return i; | |
122 | } | |
529a3404 | 123 | |
6ec6e0d9 SS |
124 | int __init compute_hash_shift(struct bootnode *nodes, int numnodes, |
125 | int *nodeids) | |
076422d2 AS |
126 | { |
127 | int shift; | |
529a3404 | 128 | |
076422d2 AS |
129 | shift = extract_lsb_from_nodes(nodes, numnodes); |
130 | if (allocate_cachealigned_memnodemap()) | |
131 | return -1; | |
6b050f80 | 132 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", |
529a3404 ED |
133 | shift); |
134 | ||
6ec6e0d9 | 135 | if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) { |
e3cfe529 TG |
136 | printk(KERN_INFO "Your memory is not aligned you need to " |
137 | "rebuild your kernel with a bigger NODEMAPSIZE " | |
138 | "shift=%d\n", shift); | |
529a3404 ED |
139 | return -1; |
140 | } | |
b684664f | 141 | return shift; |
1da177e4 LT |
142 | } |
143 | ||
f2dbcfa7 | 144 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
bbfceef4 MT |
145 | { |
146 | return phys_to_nid(pfn << PAGE_SHIFT); | |
147 | } | |
bbfceef4 | 148 | |
e3cfe529 | 149 | static void * __init early_node_mem(int nodeid, unsigned long start, |
24a5da73 YL |
150 | unsigned long end, unsigned long size, |
151 | unsigned long align) | |
a8062231 | 152 | { |
cef625ee | 153 | unsigned long mem; |
e3cfe529 | 154 | |
cef625ee YL |
155 | /* |
156 | * put it on high as possible | |
157 | * something will go with NODE_DATA | |
158 | */ | |
159 | if (start < (MAX_DMA_PFN<<PAGE_SHIFT)) | |
160 | start = MAX_DMA_PFN<<PAGE_SHIFT; | |
161 | if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) && | |
162 | end > (MAX_DMA32_PFN<<PAGE_SHIFT)) | |
163 | start = MAX_DMA32_PFN<<PAGE_SHIFT; | |
72d7c3b3 YL |
164 | mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align); |
165 | if (mem != MEMBLOCK_ERROR) | |
a8062231 | 166 | return __va(mem); |
9347e0b0 | 167 | |
cef625ee YL |
168 | /* extend the search scope */ |
169 | end = max_pfn_mapped << PAGE_SHIFT; | |
419db274 YL |
170 | start = MAX_DMA_PFN << PAGE_SHIFT; |
171 | mem = memblock_find_in_range(start, end, size, align); | |
72d7c3b3 | 172 | if (mem != MEMBLOCK_ERROR) |
a8062231 | 173 | return __va(mem); |
9347e0b0 | 174 | |
1842f90c | 175 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", |
e3cfe529 | 176 | size, nodeid); |
1842f90c YL |
177 | |
178 | return NULL; | |
a8062231 AK |
179 | } |
180 | ||
1da177e4 | 181 | /* Initialize bootmem allocator for a node */ |
7c43769a YL |
182 | void __init |
183 | setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | |
e3cfe529 | 184 | { |
08677214 | 185 | unsigned long start_pfn, last_pfn, nodedata_phys; |
7c43769a | 186 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
1a27fc0a | 187 | int nid; |
1da177e4 | 188 | |
4c31e92b YL |
189 | if (!end) |
190 | return; | |
191 | ||
7c43769a YL |
192 | /* |
193 | * Don't confuse VM with a node that doesn't have the | |
194 | * minimum amount of memory: | |
195 | */ | |
196 | if (end && (end - start) < NODE_MIN_SIZE) | |
197 | return; | |
198 | ||
be3e89ee | 199 | start = roundup(start, ZONE_ALIGN); |
1da177e4 | 200 | |
08677214 | 201 | printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid, |
e3cfe529 | 202 | start, end); |
1da177e4 LT |
203 | |
204 | start_pfn = start >> PAGE_SHIFT; | |
886533a3 | 205 | last_pfn = end >> PAGE_SHIFT; |
1da177e4 | 206 | |
24a5da73 YL |
207 | node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, |
208 | SMP_CACHE_BYTES); | |
a8062231 AK |
209 | if (node_data[nodeid] == NULL) |
210 | return; | |
211 | nodedata_phys = __pa(node_data[nodeid]); | |
a9ce6bc1 | 212 | memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); |
6118f76f YL |
213 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, |
214 | nodedata_phys + pgdat_size - 1); | |
1842f90c YL |
215 | nid = phys_to_nid(nodedata_phys); |
216 | if (nid != nodeid) | |
217 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid); | |
1da177e4 | 218 | |
1da177e4 | 219 | memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); |
08677214 | 220 | NODE_DATA(nodeid)->node_id = nodeid; |
1da177e4 | 221 | NODE_DATA(nodeid)->node_start_pfn = start_pfn; |
886533a3 | 222 | NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; |
1da177e4 | 223 | |
1da177e4 | 224 | node_set_online(nodeid); |
e3cfe529 | 225 | } |
1da177e4 | 226 | |
e3cfe529 TG |
227 | /* |
228 | * There are unfortunately some poorly designed mainboards around that | |
229 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
230 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
231 | * as the number of CPUs is not known yet. We round robin the existing | |
232 | * nodes. | |
233 | */ | |
1da177e4 LT |
234 | void __init numa_init_array(void) |
235 | { | |
236 | int rr, i; | |
e3cfe529 | 237 | |
85cc5135 | 238 | rr = first_node(node_online_map); |
168ef543 | 239 | for (i = 0; i < nr_cpu_ids; i++) { |
1ce35712 | 240 | if (early_cpu_to_node(i) != NUMA_NO_NODE) |
1da177e4 | 241 | continue; |
e3cfe529 | 242 | numa_set_node(i, rr); |
1da177e4 LT |
243 | rr = next_node(rr, node_online_map); |
244 | if (rr == MAX_NUMNODES) | |
245 | rr = first_node(node_online_map); | |
1da177e4 | 246 | } |
1da177e4 LT |
247 | } |
248 | ||
249 | #ifdef CONFIG_NUMA_EMU | |
53fee04f | 250 | /* Numa emulation */ |
adc19389 | 251 | static struct bootnode nodes[MAX_NUMNODES] __initdata; |
c1c3443c | 252 | static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; |
864fc31e | 253 | static char *cmdline __initdata; |
1da177e4 | 254 | |
90321602 JB |
255 | void __init numa_emu_cmdline(char *str) |
256 | { | |
257 | cmdline = str; | |
258 | } | |
259 | ||
adc19389 | 260 | static int __init setup_physnodes(unsigned long start, unsigned long end, |
eec1d4fa | 261 | int acpi, int amd) |
adc19389 | 262 | { |
adc19389 DR |
263 | int ret = 0; |
264 | int i; | |
265 | ||
c1c3443c | 266 | memset(physnodes, 0, sizeof(physnodes)); |
adc19389 DR |
267 | #ifdef CONFIG_ACPI_NUMA |
268 | if (acpi) | |
a387e95a | 269 | acpi_get_nodes(physnodes, start, end); |
adc19389 | 270 | #endif |
eec1d4fa HR |
271 | #ifdef CONFIG_AMD_NUMA |
272 | if (amd) | |
a387e95a | 273 | amd_get_nodes(physnodes); |
adc19389 DR |
274 | #endif |
275 | /* | |
276 | * Basic sanity checking on the physical node map: there may be errors | |
eec1d4fa | 277 | * if the SRAT or AMD code incorrectly reported the topology or the mem= |
adc19389 DR |
278 | * kernel parameter is used. |
279 | */ | |
a387e95a | 280 | for (i = 0; i < MAX_NUMNODES; i++) { |
adc19389 DR |
281 | if (physnodes[i].start == physnodes[i].end) |
282 | continue; | |
283 | if (physnodes[i].start > end) { | |
284 | physnodes[i].end = physnodes[i].start; | |
285 | continue; | |
286 | } | |
287 | if (physnodes[i].end < start) { | |
288 | physnodes[i].start = physnodes[i].end; | |
289 | continue; | |
290 | } | |
291 | if (physnodes[i].start < start) | |
292 | physnodes[i].start = start; | |
293 | if (physnodes[i].end > end) | |
294 | physnodes[i].end = end; | |
adc19389 DR |
295 | ret++; |
296 | } | |
297 | ||
298 | /* | |
299 | * If no physical topology was detected, a single node is faked to cover | |
300 | * the entire address space. | |
301 | */ | |
302 | if (!ret) { | |
303 | physnodes[ret].start = start; | |
304 | physnodes[ret].end = end; | |
305 | ret = 1; | |
306 | } | |
307 | return ret; | |
308 | } | |
309 | ||
f51bf307 DR |
310 | static void __init fake_physnodes(int acpi, int amd, int nr_nodes) |
311 | { | |
312 | int i; | |
313 | ||
314 | BUG_ON(acpi && amd); | |
315 | #ifdef CONFIG_ACPI_NUMA | |
316 | if (acpi) | |
317 | acpi_fake_nodes(nodes, nr_nodes); | |
318 | #endif | |
319 | #ifdef CONFIG_AMD_NUMA | |
320 | if (amd) | |
321 | amd_fake_nodes(nodes, nr_nodes); | |
322 | #endif | |
323 | if (!acpi && !amd) | |
324 | for (i = 0; i < nr_cpu_ids; i++) | |
325 | numa_set_node(i, 0); | |
326 | } | |
327 | ||
53fee04f | 328 | /* |
e3cfe529 TG |
329 | * Setups up nid to range from addr to addr + size. If the end |
330 | * boundary is greater than max_addr, then max_addr is used instead. | |
331 | * The return value is 0 if there is additional memory left for | |
332 | * allocation past addr and -1 otherwise. addr is adjusted to be at | |
333 | * the end of the node. | |
53fee04f | 334 | */ |
adc19389 | 335 | static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) |
53fee04f | 336 | { |
8b8ca80e DR |
337 | int ret = 0; |
338 | nodes[nid].start = *addr; | |
339 | *addr += size; | |
340 | if (*addr >= max_addr) { | |
341 | *addr = max_addr; | |
342 | ret = -1; | |
343 | } | |
344 | nodes[nid].end = *addr; | |
e3f1caee | 345 | node_set(nid, node_possible_map); |
8b8ca80e DR |
346 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, |
347 | nodes[nid].start, nodes[nid].end, | |
348 | (nodes[nid].end - nodes[nid].start) >> 20); | |
349 | return ret; | |
53fee04f RS |
350 | } |
351 | ||
adc19389 DR |
352 | /* |
353 | * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr | |
354 | * to max_addr. The return value is the number of nodes allocated. | |
355 | */ | |
c1c3443c | 356 | static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) |
adc19389 DR |
357 | { |
358 | nodemask_t physnode_mask = NODE_MASK_NONE; | |
359 | u64 size; | |
360 | int big; | |
361 | int ret = 0; | |
362 | int i; | |
363 | ||
364 | if (nr_nodes <= 0) | |
365 | return -1; | |
366 | if (nr_nodes > MAX_NUMNODES) { | |
367 | pr_info("numa=fake=%d too large, reducing to %d\n", | |
368 | nr_nodes, MAX_NUMNODES); | |
369 | nr_nodes = MAX_NUMNODES; | |
370 | } | |
371 | ||
a9ce6bc1 | 372 | size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; |
adc19389 DR |
373 | /* |
374 | * Calculate the number of big nodes that can be allocated as a result | |
375 | * of consolidating the remainder. | |
376 | */ | |
68fd111e | 377 | big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / |
adc19389 DR |
378 | FAKE_NODE_MIN_SIZE; |
379 | ||
380 | size &= FAKE_NODE_MIN_HASH_MASK; | |
381 | if (!size) { | |
382 | pr_err("Not enough memory for each node. " | |
383 | "NUMA emulation disabled.\n"); | |
384 | return -1; | |
385 | } | |
386 | ||
c1c3443c | 387 | for (i = 0; i < MAX_NUMNODES; i++) |
adc19389 DR |
388 | if (physnodes[i].start != physnodes[i].end) |
389 | node_set(i, physnode_mask); | |
390 | ||
391 | /* | |
392 | * Continue to fill physical nodes with fake nodes until there is no | |
393 | * memory left on any of them. | |
394 | */ | |
395 | while (nodes_weight(physnode_mask)) { | |
396 | for_each_node_mask(i, physnode_mask) { | |
397 | u64 end = physnodes[i].start + size; | |
398 | u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); | |
399 | ||
400 | if (ret < big) | |
401 | end += FAKE_NODE_MIN_SIZE; | |
402 | ||
403 | /* | |
404 | * Continue to add memory to this fake node if its | |
405 | * non-reserved memory is less than the per-node size. | |
406 | */ | |
407 | while (end - physnodes[i].start - | |
a9ce6bc1 | 408 | memblock_x86_hole_size(physnodes[i].start, end) < size) { |
adc19389 DR |
409 | end += FAKE_NODE_MIN_SIZE; |
410 | if (end > physnodes[i].end) { | |
411 | end = physnodes[i].end; | |
412 | break; | |
413 | } | |
414 | } | |
415 | ||
416 | /* | |
417 | * If there won't be at least FAKE_NODE_MIN_SIZE of | |
418 | * non-reserved memory in ZONE_DMA32 for the next node, | |
419 | * this one must extend to the boundary. | |
420 | */ | |
421 | if (end < dma32_end && dma32_end - end - | |
a9ce6bc1 | 422 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
adc19389 DR |
423 | end = dma32_end; |
424 | ||
425 | /* | |
426 | * If there won't be enough non-reserved memory for the | |
427 | * next node, this one must extend to the end of the | |
428 | * physical node. | |
429 | */ | |
430 | if (physnodes[i].end - end - | |
a9ce6bc1 | 431 | memblock_x86_hole_size(end, physnodes[i].end) < size) |
adc19389 DR |
432 | end = physnodes[i].end; |
433 | ||
434 | /* | |
435 | * Avoid allocating more nodes than requested, which can | |
436 | * happen as a result of rounding down each node's size | |
437 | * to FAKE_NODE_MIN_SIZE. | |
438 | */ | |
439 | if (nodes_weight(physnode_mask) + ret >= nr_nodes) | |
440 | end = physnodes[i].end; | |
441 | ||
442 | if (setup_node_range(ret++, &physnodes[i].start, | |
443 | end - physnodes[i].start, | |
444 | physnodes[i].end) < 0) | |
445 | node_clear(i, physnode_mask); | |
446 | } | |
447 | } | |
448 | return ret; | |
449 | } | |
450 | ||
8df5bb34 DR |
451 | /* |
452 | * Returns the end address of a node so that there is at least `size' amount of | |
453 | * non-reserved memory or `max_addr' is reached. | |
454 | */ | |
455 | static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) | |
456 | { | |
457 | u64 end = start + size; | |
458 | ||
a9ce6bc1 | 459 | while (end - start - memblock_x86_hole_size(start, end) < size) { |
8df5bb34 DR |
460 | end += FAKE_NODE_MIN_SIZE; |
461 | if (end > max_addr) { | |
462 | end = max_addr; | |
463 | break; | |
464 | } | |
465 | } | |
466 | return end; | |
467 | } | |
468 | ||
469 | /* | |
470 | * Sets up fake nodes of `size' interleaved over physical nodes ranging from | |
471 | * `addr' to `max_addr'. The return value is the number of nodes allocated. | |
472 | */ | |
473 | static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) | |
474 | { | |
475 | nodemask_t physnode_mask = NODE_MASK_NONE; | |
476 | u64 min_size; | |
477 | int ret = 0; | |
478 | int i; | |
479 | ||
480 | if (!size) | |
481 | return -1; | |
482 | /* | |
483 | * The limit on emulated nodes is MAX_NUMNODES, so the size per node is | |
484 | * increased accordingly if the requested size is too small. This | |
485 | * creates a uniform distribution of node sizes across the entire | |
486 | * machine (but not necessarily over physical nodes). | |
487 | */ | |
a9ce6bc1 | 488 | min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / |
8df5bb34 DR |
489 | MAX_NUMNODES; |
490 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); | |
491 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) | |
492 | min_size = (min_size + FAKE_NODE_MIN_SIZE) & | |
493 | FAKE_NODE_MIN_HASH_MASK; | |
494 | if (size < min_size) { | |
495 | pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", | |
496 | size >> 20, min_size >> 20); | |
497 | size = min_size; | |
498 | } | |
499 | size &= FAKE_NODE_MIN_HASH_MASK; | |
500 | ||
501 | for (i = 0; i < MAX_NUMNODES; i++) | |
502 | if (physnodes[i].start != physnodes[i].end) | |
503 | node_set(i, physnode_mask); | |
504 | /* | |
505 | * Fill physical nodes with fake nodes of size until there is no memory | |
506 | * left on any of them. | |
507 | */ | |
508 | while (nodes_weight(physnode_mask)) { | |
509 | for_each_node_mask(i, physnode_mask) { | |
510 | u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; | |
511 | u64 end; | |
512 | ||
513 | end = find_end_of_node(physnodes[i].start, | |
514 | physnodes[i].end, size); | |
515 | /* | |
516 | * If there won't be at least FAKE_NODE_MIN_SIZE of | |
517 | * non-reserved memory in ZONE_DMA32 for the next node, | |
518 | * this one must extend to the boundary. | |
519 | */ | |
520 | if (end < dma32_end && dma32_end - end - | |
a9ce6bc1 | 521 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
8df5bb34 DR |
522 | end = dma32_end; |
523 | ||
524 | /* | |
525 | * If there won't be enough non-reserved memory for the | |
526 | * next node, this one must extend to the end of the | |
527 | * physical node. | |
528 | */ | |
529 | if (physnodes[i].end - end - | |
a9ce6bc1 | 530 | memblock_x86_hole_size(end, physnodes[i].end) < size) |
8df5bb34 DR |
531 | end = physnodes[i].end; |
532 | ||
533 | /* | |
534 | * Setup the fake node that will be allocated as bootmem | |
535 | * later. If setup_node_range() returns non-zero, there | |
536 | * is no more memory available on this physical node. | |
537 | */ | |
538 | if (setup_node_range(ret++, &physnodes[i].start, | |
539 | end - physnodes[i].start, | |
540 | physnodes[i].end) < 0) | |
541 | node_clear(i, physnode_mask); | |
542 | } | |
543 | } | |
544 | return ret; | |
545 | } | |
546 | ||
8b8ca80e | 547 | /* |
886533a3 | 548 | * Sets up the system RAM area from start_pfn to last_pfn according to the |
8b8ca80e DR |
549 | * numa=fake command-line option. |
550 | */ | |
adc19389 | 551 | static int __init numa_emulation(unsigned long start_pfn, |
eec1d4fa | 552 | unsigned long last_pfn, int acpi, int amd) |
8b8ca80e | 553 | { |
ca2107c9 | 554 | u64 addr = start_pfn << PAGE_SHIFT; |
886533a3 | 555 | u64 max_addr = last_pfn << PAGE_SHIFT; |
ca2107c9 DR |
556 | int num_nodes; |
557 | int i; | |
8b8ca80e | 558 | |
8df5bb34 DR |
559 | /* |
560 | * If the numa=fake command-line contains a 'M' or 'G', it represents | |
ca2107c9 DR |
561 | * the fixed node size. Otherwise, if it is just a single number N, |
562 | * split the system RAM into N fake nodes. | |
8df5bb34 DR |
563 | */ |
564 | if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) { | |
ca2107c9 DR |
565 | u64 size; |
566 | ||
8df5bb34 DR |
567 | size = memparse(cmdline, &cmdline); |
568 | num_nodes = split_nodes_size_interleave(addr, max_addr, size); | |
ca2107c9 DR |
569 | } else { |
570 | unsigned long n; | |
8df5bb34 | 571 | |
ca2107c9 | 572 | n = simple_strtoul(cmdline, NULL, 0); |
c1c3443c | 573 | num_nodes = split_nodes_interleave(addr, max_addr, n); |
8b8ca80e DR |
574 | } |
575 | ||
ca2107c9 DR |
576 | if (num_nodes < 0) |
577 | return num_nodes; | |
6ec6e0d9 | 578 | memnode_shift = compute_hash_shift(nodes, num_nodes, NULL); |
8b8ca80e DR |
579 | if (memnode_shift < 0) { |
580 | memnode_shift = 0; | |
581 | printk(KERN_ERR "No NUMA hash function found. NUMA emulation " | |
582 | "disabled.\n"); | |
583 | return -1; | |
584 | } | |
585 | ||
586 | /* | |
adc19389 DR |
587 | * We need to vacate all active ranges that may have been registered for |
588 | * the e820 memory map. | |
8b8ca80e DR |
589 | */ |
590 | remove_all_active_ranges(); | |
e3f1caee | 591 | for_each_node_mask(i, node_possible_map) { |
a9ce6bc1 | 592 | memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, |
5cb248ab | 593 | nodes[i].end >> PAGE_SHIFT); |
e3cfe529 | 594 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
5cb248ab | 595 | } |
c1c3443c | 596 | setup_physnodes(addr, max_addr, acpi, amd); |
f51bf307 | 597 | fake_physnodes(acpi, amd, num_nodes); |
e3cfe529 TG |
598 | numa_init_array(); |
599 | return 0; | |
1da177e4 | 600 | } |
8b8ca80e | 601 | #endif /* CONFIG_NUMA_EMU */ |
1da177e4 | 602 | |
8ee2debc | 603 | void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, |
eec1d4fa | 604 | int acpi, int amd) |
e3cfe529 | 605 | { |
1da177e4 LT |
606 | int i; |
607 | ||
e3f1caee | 608 | nodes_clear(node_possible_map); |
b7ad149d | 609 | nodes_clear(node_online_map); |
e3f1caee | 610 | |
1da177e4 | 611 | #ifdef CONFIG_NUMA_EMU |
c1c3443c DR |
612 | setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, |
613 | acpi, amd); | |
eec1d4fa | 614 | if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) |
e3cfe529 | 615 | return; |
c1c3443c DR |
616 | setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, |
617 | acpi, amd); | |
e3f1caee | 618 | nodes_clear(node_possible_map); |
b7ad149d | 619 | nodes_clear(node_online_map); |
1da177e4 LT |
620 | #endif |
621 | ||
622 | #ifdef CONFIG_ACPI_NUMA | |
8716273c DR |
623 | if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, |
624 | last_pfn << PAGE_SHIFT)) | |
e3cfe529 | 625 | return; |
e3f1caee | 626 | nodes_clear(node_possible_map); |
b7ad149d | 627 | nodes_clear(node_online_map); |
1da177e4 LT |
628 | #endif |
629 | ||
eec1d4fa HR |
630 | #ifdef CONFIG_AMD_NUMA |
631 | if (!numa_off && amd && !amd_scan_nodes()) | |
1da177e4 | 632 | return; |
e3f1caee | 633 | nodes_clear(node_possible_map); |
b7ad149d | 634 | nodes_clear(node_online_map); |
1da177e4 LT |
635 | #endif |
636 | printk(KERN_INFO "%s\n", | |
637 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | |
638 | ||
e3cfe529 | 639 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", |
1da177e4 | 640 | start_pfn << PAGE_SHIFT, |
886533a3 | 641 | last_pfn << PAGE_SHIFT); |
e3cfe529 TG |
642 | /* setup dummy node covering all memory */ |
643 | memnode_shift = 63; | |
076422d2 | 644 | memnodemap = memnode.embedded_map; |
1da177e4 | 645 | memnodemap[0] = 0; |
1da177e4 | 646 | node_set_online(0); |
e3f1caee | 647 | node_set(0, node_possible_map); |
168ef543 | 648 | for (i = 0; i < nr_cpu_ids; i++) |
69d81fcd | 649 | numa_set_node(i, 0); |
a9ce6bc1 | 650 | memblock_x86_register_active_regions(0, start_pfn, last_pfn); |
886533a3 | 651 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); |
69d81fcd AK |
652 | } |
653 | ||
e3cfe529 TG |
654 | unsigned long __init numa_free_all_bootmem(void) |
655 | { | |
1da177e4 | 656 | unsigned long pages = 0; |
e3cfe529 TG |
657 | int i; |
658 | ||
659 | for_each_online_node(i) | |
1da177e4 | 660 | pages += free_all_bootmem_node(NODE_DATA(i)); |
e3cfe529 | 661 | |
08677214 | 662 | pages += free_all_memory_core_early(MAX_NUMNODES); |
08677214 | 663 | |
1da177e4 | 664 | return pages; |
e3cfe529 | 665 | } |
1da177e4 | 666 | |
23ca4bba | 667 | #ifdef CONFIG_NUMA |
d9c2d5ac YL |
668 | |
669 | static __init int find_near_online_node(int node) | |
670 | { | |
671 | int n, val; | |
672 | int min_val = INT_MAX; | |
673 | int best_node = -1; | |
674 | ||
675 | for_each_online_node(n) { | |
676 | val = node_distance(node, n); | |
677 | ||
678 | if (val < min_val) { | |
679 | min_val = val; | |
680 | best_node = n; | |
681 | } | |
682 | } | |
683 | ||
684 | return best_node; | |
685 | } | |
686 | ||
05b3cbd8 RT |
687 | /* |
688 | * Setup early cpu_to_node. | |
689 | * | |
690 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
691 | * and apicid_to_node[] tables have valid entries for a CPU. | |
692 | * This means we skip cpu_to_node[] initialisation for NUMA | |
693 | * emulation and faking node case (when running a kernel compiled | |
694 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
695 | * is already initialized in a round robin manner at numa_init_array, | |
696 | * prior to this call, and this initialization is good enough | |
697 | * for the fake NUMA cases. | |
23ca4bba MT |
698 | * |
699 | * Called before the per_cpu areas are setup. | |
05b3cbd8 RT |
700 | */ |
701 | void __init init_cpu_to_node(void) | |
702 | { | |
23ca4bba MT |
703 | int cpu; |
704 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
e3cfe529 | 705 | |
23ca4bba MT |
706 | BUG_ON(cpu_to_apicid == NULL); |
707 | ||
708 | for_each_possible_cpu(cpu) { | |
bbc9e2f4 | 709 | int node = numa_cpu_node(cpu); |
e3cfe529 | 710 | |
7c9e92b6 | 711 | if (node == NUMA_NO_NODE) |
05b3cbd8 | 712 | continue; |
7c9e92b6 | 713 | if (!node_online(node)) |
d9c2d5ac | 714 | node = find_near_online_node(node); |
23ca4bba | 715 | numa_set_node(cpu, node); |
05b3cbd8 RT |
716 | } |
717 | } | |
23ca4bba | 718 | #endif |
05b3cbd8 | 719 | |
bbc9e2f4 TH |
720 | int __cpuinit numa_cpu_node(int cpu) |
721 | { | |
722 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
723 | ||
724 | if (apicid != BAD_APICID) | |
725 | return __apicid_to_node[apicid]; | |
726 | return NUMA_NO_NODE; | |
727 | } | |
cf050132 | 728 | |
6470aff6 BG |
729 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
730 | ||
c1c3443c | 731 | #ifndef CONFIG_NUMA_EMU |
6470aff6 BG |
732 | void __cpuinit numa_add_cpu(int cpu) |
733 | { | |
c032ef60 | 734 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
6470aff6 BG |
735 | } |
736 | ||
737 | void __cpuinit numa_remove_cpu(int cpu) | |
738 | { | |
c032ef60 | 739 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
6470aff6 | 740 | } |
c1c3443c DR |
741 | #else |
742 | void __cpuinit numa_add_cpu(int cpu) | |
743 | { | |
744 | unsigned long addr; | |
bbc9e2f4 | 745 | int physnid, nid; |
c1c3443c | 746 | |
bbc9e2f4 | 747 | nid = numa_cpu_node(cpu); |
c1c3443c DR |
748 | if (nid == NUMA_NO_NODE) |
749 | nid = early_cpu_to_node(cpu); | |
750 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | |
751 | ||
752 | /* | |
753 | * Use the starting address of the emulated node to find which physical | |
754 | * node it is allocated on. | |
755 | */ | |
756 | addr = node_start_pfn(nid) << PAGE_SHIFT; | |
757 | for (physnid = 0; physnid < MAX_NUMNODES; physnid++) | |
758 | if (addr >= physnodes[physnid].start && | |
759 | addr < physnodes[physnid].end) | |
760 | break; | |
761 | ||
762 | /* | |
763 | * Map the cpu to each emulated node that is allocated on the physical | |
764 | * node of the cpu's apic id. | |
765 | */ | |
766 | for_each_online_node(nid) { | |
767 | addr = node_start_pfn(nid) << PAGE_SHIFT; | |
768 | if (addr >= physnodes[physnid].start && | |
769 | addr < physnodes[physnid].end) | |
770 | cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); | |
771 | } | |
772 | } | |
773 | ||
774 | void __cpuinit numa_remove_cpu(int cpu) | |
775 | { | |
776 | int i; | |
777 | ||
778 | for_each_online_node(i) | |
779 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | |
780 | } | |
781 | #endif /* !CONFIG_NUMA_EMU */ | |
6470aff6 BG |
782 | |
783 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | |
d906f0eb DR |
784 | static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) |
785 | { | |
786 | int node = early_cpu_to_node(cpu); | |
787 | struct cpumask *mask; | |
788 | char buf[64]; | |
789 | ||
790 | mask = node_to_cpumask_map[node]; | |
791 | if (!mask) { | |
792 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
793 | dump_stack(); | |
794 | return NULL; | |
795 | } | |
796 | ||
797 | cpulist_scnprintf(buf, sizeof(buf), mask); | |
798 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | |
799 | enable ? "numa_add_cpu" : "numa_remove_cpu", | |
800 | cpu, node, buf); | |
801 | return mask; | |
802 | } | |
6470aff6 BG |
803 | |
804 | /* | |
805 | * --------- debug versions of the numa functions --------- | |
806 | */ | |
d906f0eb DR |
807 | #ifndef CONFIG_NUMA_EMU |
808 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | |
809 | { | |
810 | struct cpumask *mask; | |
811 | ||
812 | mask = debug_cpumask_set_cpu(cpu, enable); | |
813 | if (!mask) | |
814 | return; | |
815 | ||
816 | if (enable) | |
817 | cpumask_set_cpu(cpu, mask); | |
818 | else | |
819 | cpumask_clear_cpu(cpu, mask); | |
820 | } | |
821 | #else | |
6470aff6 BG |
822 | static void __cpuinit numa_set_cpumask(int cpu, int enable) |
823 | { | |
824 | int node = early_cpu_to_node(cpu); | |
73e907de | 825 | struct cpumask *mask; |
c1c3443c | 826 | int i; |
6470aff6 | 827 | |
c1c3443c DR |
828 | for_each_online_node(i) { |
829 | unsigned long addr; | |
6470aff6 | 830 | |
c1c3443c DR |
831 | addr = node_start_pfn(i) << PAGE_SHIFT; |
832 | if (addr < physnodes[node].start || | |
833 | addr >= physnodes[node].end) | |
834 | continue; | |
d906f0eb DR |
835 | mask = debug_cpumask_set_cpu(cpu, enable); |
836 | if (!mask) | |
c1c3443c | 837 | return; |
c1c3443c DR |
838 | |
839 | if (enable) | |
840 | cpumask_set_cpu(cpu, mask); | |
841 | else | |
842 | cpumask_clear_cpu(cpu, mask); | |
c1c3443c | 843 | } |
6470aff6 | 844 | } |
d906f0eb | 845 | #endif /* CONFIG_NUMA_EMU */ |
6470aff6 BG |
846 | |
847 | void __cpuinit numa_add_cpu(int cpu) | |
848 | { | |
849 | numa_set_cpumask(cpu, 1); | |
850 | } | |
851 | ||
852 | void __cpuinit numa_remove_cpu(int cpu) | |
853 | { | |
854 | numa_set_cpumask(cpu, 0); | |
855 | } | |
6470aff6 BG |
856 | /* |
857 | * --------- end of debug versions of the numa functions --------- | |
858 | */ | |
859 | ||
860 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ |