]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/mm/numa.c
[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / mm / numa.c
CommitLineData
1da177e4
LT
1/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
45fb6cea 20#include <asm/sparsemem.h>
1da177e4 21#include <asm/lmb.h>
cf00a8d1 22#include <asm/system.h>
2249ca9d 23#include <asm/smp.h>
1da177e4
LT
24
25static int numa_enabled = 1;
26
27static int numa_debug;
28#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
29
45fb6cea 30int numa_cpu_lookup_table[NR_CPUS];
1da177e4 31cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
1da177e4 32struct pglist_data *node_data[MAX_NUMNODES];
45fb6cea
AB
33
34EXPORT_SYMBOL(numa_cpu_lookup_table);
35EXPORT_SYMBOL(numa_cpumask_lookup_table);
36EXPORT_SYMBOL(node_data);
37
38static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
1da177e4
LT
39static int min_common_depth;
40
41/*
45fb6cea 42 * We need somewhere to store start/end/node for each region until we have
1da177e4
LT
43 * allocated the real node_data structures.
44 */
45fb6cea 45#define MAX_REGIONS (MAX_LMB_REGIONS*2)
1da177e4 46static struct {
45fb6cea
AB
47 unsigned long start_pfn;
48 unsigned long end_pfn;
49 int nid;
50} init_node_data[MAX_REGIONS] __initdata;
1da177e4 51
45fb6cea
AB
52int __init early_pfn_to_nid(unsigned long pfn)
53{
54 unsigned int i;
55
56 for (i = 0; init_node_data[i].end_pfn; i++) {
57 unsigned long start_pfn = init_node_data[i].start_pfn;
58 unsigned long end_pfn = init_node_data[i].end_pfn;
59
60 if ((start_pfn <= pfn) && (pfn < end_pfn))
61 return init_node_data[i].nid;
62 }
63
64 return -1;
65}
66
67void __init add_region(unsigned int nid, unsigned long start_pfn,
68 unsigned long pages)
69{
70 unsigned int i;
71
72 dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n",
73 nid, start_pfn, pages);
74
75 for (i = 0; init_node_data[i].end_pfn; i++) {
76 if (init_node_data[i].nid != nid)
77 continue;
78 if (init_node_data[i].end_pfn == start_pfn) {
79 init_node_data[i].end_pfn += pages;
80 return;
81 }
82 if (init_node_data[i].start_pfn == (start_pfn + pages)) {
83 init_node_data[i].start_pfn -= pages;
84 return;
85 }
86 }
87
88 /*
89 * Leave last entry NULL so we dont iterate off the end (we use
90 * entry.end_pfn to terminate the walk).
91 */
92 if (i >= (MAX_REGIONS - 1)) {
93 printk(KERN_ERR "WARNING: too many memory regions in "
94 "numa code, truncating\n");
95 return;
96 }
97
98 init_node_data[i].start_pfn = start_pfn;
99 init_node_data[i].end_pfn = start_pfn + pages;
100 init_node_data[i].nid = nid;
101}
102
103/* We assume init_node_data has no overlapping regions */
104void __init get_region(unsigned int nid, unsigned long *start_pfn,
105 unsigned long *end_pfn, unsigned long *pages_present)
106{
107 unsigned int i;
108
109 *start_pfn = -1UL;
110 *end_pfn = *pages_present = 0;
111
112 for (i = 0; init_node_data[i].end_pfn; i++) {
113 if (init_node_data[i].nid != nid)
114 continue;
115
116 *pages_present += init_node_data[i].end_pfn -
117 init_node_data[i].start_pfn;
118
119 if (init_node_data[i].start_pfn < *start_pfn)
120 *start_pfn = init_node_data[i].start_pfn;
121
122 if (init_node_data[i].end_pfn > *end_pfn)
123 *end_pfn = init_node_data[i].end_pfn;
124 }
125
126 /* We didnt find a matching region, return start/end as 0 */
127 if (*start_pfn == -1UL)
6d91bb93 128 *start_pfn = 0;
45fb6cea 129}
1da177e4
LT
130
131static inline void map_cpu_to_node(int cpu, int node)
132{
133 numa_cpu_lookup_table[cpu] = node;
45fb6cea
AB
134
135 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
1da177e4 136 cpu_set(cpu, numa_cpumask_lookup_table[node]);
1da177e4
LT
137}
138
139#ifdef CONFIG_HOTPLUG_CPU
140static void unmap_cpu_from_node(unsigned long cpu)
141{
142 int node = numa_cpu_lookup_table[cpu];
143
144 dbg("removing cpu %lu from node %d\n", cpu, node);
145
146 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
147 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
1da177e4
LT
148 } else {
149 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
150 cpu, node);
151 }
152}
153#endif /* CONFIG_HOTPLUG_CPU */
154
45fb6cea 155static struct device_node *find_cpu_node(unsigned int cpu)
1da177e4
LT
156{
157 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
158 struct device_node *cpu_node = NULL;
159 unsigned int *interrupt_server, *reg;
160 int len;
161
162 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
163 /* Try interrupt server first */
164 interrupt_server = (unsigned int *)get_property(cpu_node,
165 "ibm,ppc-interrupt-server#s", &len);
166
167 len = len / sizeof(u32);
168
169 if (interrupt_server && (len > 0)) {
170 while (len--) {
171 if (interrupt_server[len] == hw_cpuid)
172 return cpu_node;
173 }
174 } else {
175 reg = (unsigned int *)get_property(cpu_node,
176 "reg", &len);
177 if (reg && (len > 0) && (reg[0] == hw_cpuid))
178 return cpu_node;
179 }
180 }
181
182 return NULL;
183}
184
185/* must hold reference to node during call */
186static int *of_get_associativity(struct device_node *dev)
187{
188 return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
189}
190
191static int of_node_numa_domain(struct device_node *device)
192{
193 int numa_domain;
194 unsigned int *tmp;
195
196 if (min_common_depth == -1)
197 return 0;
198
199 tmp = of_get_associativity(device);
200 if (tmp && (tmp[0] >= min_common_depth)) {
201 numa_domain = tmp[min_common_depth];
202 } else {
203 dbg("WARNING: no NUMA information for %s\n",
204 device->full_name);
205 numa_domain = 0;
206 }
207 return numa_domain;
208}
209
210/*
211 * In theory, the "ibm,associativity" property may contain multiple
212 * associativity lists because a resource may be multiply connected
213 * into the machine. This resource then has different associativity
214 * characteristics relative to its multiple connections. We ignore
215 * this for now. We also assume that all cpu and memory sets have
216 * their distances represented at a common level. This won't be
217 * true for heirarchical NUMA.
218 *
219 * In any case the ibm,associativity-reference-points should give
220 * the correct depth for a normal NUMA system.
221 *
222 * - Dave Hansen <haveblue@us.ibm.com>
223 */
224static int __init find_min_common_depth(void)
225{
226 int depth;
227 unsigned int *ref_points;
228 struct device_node *rtas_root;
229 unsigned int len;
230
231 rtas_root = of_find_node_by_path("/rtas");
232
233 if (!rtas_root)
234 return -1;
235
236 /*
237 * this property is 2 32-bit integers, each representing a level of
238 * depth in the associativity nodes. The first is for an SMP
239 * configuration (should be all 0's) and the second is for a normal
240 * NUMA configuration.
241 */
242 ref_points = (unsigned int *)get_property(rtas_root,
243 "ibm,associativity-reference-points", &len);
244
245 if ((len >= 1) && ref_points) {
246 depth = ref_points[1];
247 } else {
248 dbg("WARNING: could not find NUMA "
249 "associativity reference point\n");
250 depth = -1;
251 }
252 of_node_put(rtas_root);
253
254 return depth;
255}
256
84c9fdd1 257static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
1da177e4
LT
258{
259 struct device_node *memory = NULL;
1da177e4
LT
260
261 memory = of_find_node_by_type(memory, "memory");
54c23310 262 if (!memory)
84c9fdd1 263 panic("numa.c: No memory nodes found!");
54c23310 264
84c9fdd1
MK
265 *n_addr_cells = prom_n_addr_cells(memory);
266 *n_size_cells = prom_n_size_cells(memory);
267 of_node_put(memory);
1da177e4
LT
268}
269
45fb6cea 270static unsigned long __init read_n_cells(int n, unsigned int **buf)
1da177e4
LT
271{
272 unsigned long result = 0;
273
274 while (n--) {
275 result = (result << 32) | **buf;
276 (*buf)++;
277 }
278 return result;
279}
280
281/*
282 * Figure out to which domain a cpu belongs and stick it there.
283 * Return the id of the domain used.
284 */
285static int numa_setup_cpu(unsigned long lcpu)
286{
287 int numa_domain = 0;
288 struct device_node *cpu = find_cpu_node(lcpu);
289
290 if (!cpu) {
291 WARN_ON(1);
292 goto out;
293 }
294
295 numa_domain = of_node_numa_domain(cpu);
296
297 if (numa_domain >= num_online_nodes()) {
298 /*
299 * POWER4 LPAR uses 0xffff as invalid node,
300 * dont warn in this case.
301 */
302 if (numa_domain != 0xffff)
303 printk(KERN_ERR "WARNING: cpu %ld "
304 "maps to invalid NUMA node %d\n",
305 lcpu, numa_domain);
306 numa_domain = 0;
307 }
308out:
309 node_set_online(numa_domain);
310
311 map_cpu_to_node(lcpu, numa_domain);
312
313 of_node_put(cpu);
314
315 return numa_domain;
316}
317
318static int cpu_numa_callback(struct notifier_block *nfb,
319 unsigned long action,
320 void *hcpu)
321{
322 unsigned long lcpu = (unsigned long)hcpu;
323 int ret = NOTIFY_DONE;
324
325 switch (action) {
326 case CPU_UP_PREPARE:
327 if (min_common_depth == -1 || !numa_enabled)
328 map_cpu_to_node(lcpu, 0);
329 else
330 numa_setup_cpu(lcpu);
331 ret = NOTIFY_OK;
332 break;
333#ifdef CONFIG_HOTPLUG_CPU
334 case CPU_DEAD:
335 case CPU_UP_CANCELED:
336 unmap_cpu_from_node(lcpu);
337 break;
338 ret = NOTIFY_OK;
339#endif
340 }
341 return ret;
342}
343
344/*
345 * Check and possibly modify a memory region to enforce the memory limit.
346 *
347 * Returns the size the region should have to enforce the memory limit.
348 * This will either be the original value of size, a truncated value,
349 * or zero. If the returned value of size is 0 the region should be
350 * discarded as it lies wholy above the memory limit.
351 */
45fb6cea
AB
352static unsigned long __init numa_enforce_memory_limit(unsigned long start,
353 unsigned long size)
1da177e4
LT
354{
355 /*
356 * We use lmb_end_of_DRAM() in here instead of memory_limit because
357 * we've already adjusted it for the limit and it takes care of
358 * having memory holes below the limit.
359 */
1da177e4
LT
360
361 if (! memory_limit)
362 return size;
363
364 if (start + size <= lmb_end_of_DRAM())
365 return size;
366
367 if (start >= lmb_end_of_DRAM())
368 return 0;
369
370 return lmb_end_of_DRAM() - start;
371}
372
373static int __init parse_numa_properties(void)
374{
375 struct device_node *cpu = NULL;
376 struct device_node *memory = NULL;
84c9fdd1 377 int n_addr_cells, n_size_cells;
45fb6cea 378 int max_domain;
1da177e4
LT
379 unsigned long i;
380
381 if (numa_enabled == 0) {
382 printk(KERN_WARNING "NUMA disabled by user\n");
383 return -1;
384 }
385
1da177e4
LT
386 min_common_depth = find_min_common_depth();
387
388 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
389 if (min_common_depth < 0)
390 return min_common_depth;
391
392 max_domain = numa_setup_cpu(boot_cpuid);
393
394 /*
395 * Even though we connect cpus to numa domains later in SMP init,
396 * we need to know the maximum node id now. This is because each
397 * node id must have NODE_DATA etc backing it.
398 * As a result of hotplug we could still have cpus appear later on
399 * with larger node ids. In that case we force the cpu into node 0.
400 */
401 for_each_cpu(i) {
402 int numa_domain;
403
404 cpu = find_cpu_node(i);
405
406 if (cpu) {
407 numa_domain = of_node_numa_domain(cpu);
408 of_node_put(cpu);
409
410 if (numa_domain < MAX_NUMNODES &&
411 max_domain < numa_domain)
412 max_domain = numa_domain;
413 }
414 }
415
84c9fdd1 416 get_n_mem_cells(&n_addr_cells, &n_size_cells);
1da177e4
LT
417 memory = NULL;
418 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
419 unsigned long start;
420 unsigned long size;
421 int numa_domain;
422 int ranges;
423 unsigned int *memcell_buf;
424 unsigned int len;
425
426 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
427 if (!memcell_buf || len <= 0)
428 continue;
429
430 ranges = memory->n_addrs;
431new_range:
432 /* these are order-sensitive, and modify the buffer pointer */
84c9fdd1
MK
433 start = read_n_cells(n_addr_cells, &memcell_buf);
434 size = read_n_cells(n_size_cells, &memcell_buf);
1da177e4 435
1da177e4
LT
436 numa_domain = of_node_numa_domain(memory);
437
438 if (numa_domain >= MAX_NUMNODES) {
439 if (numa_domain != 0xffff)
440 printk(KERN_ERR "WARNING: memory at %lx maps "
441 "to invalid NUMA node %d\n", start,
442 numa_domain);
443 numa_domain = 0;
444 }
445
446 if (max_domain < numa_domain)
447 max_domain = numa_domain;
448
45fb6cea 449 if (!(size = numa_enforce_memory_limit(start, size))) {
1da177e4
LT
450 if (--ranges)
451 goto new_range;
452 else
453 continue;
454 }
455
45fb6cea
AB
456 add_region(numa_domain, start >> PAGE_SHIFT,
457 size >> PAGE_SHIFT);
1da177e4
LT
458
459 if (--ranges)
460 goto new_range;
461 }
462
463 for (i = 0; i <= max_domain; i++)
464 node_set_online(i);
465
466 return 0;
467}
468
469static void __init setup_nonnuma(void)
470{
471 unsigned long top_of_ram = lmb_end_of_DRAM();
472 unsigned long total_ram = lmb_phys_mem_size();
fb6d73d3 473 unsigned int i;
1da177e4
LT
474
475 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
476 top_of_ram, total_ram);
477 printk(KERN_INFO "Memory hole size: %ldMB\n",
478 (top_of_ram - total_ram) >> 20);
479
1da177e4 480 map_cpu_to_node(boot_cpuid, 0);
fb6d73d3
PM
481 for (i = 0; i < lmb.memory.cnt; ++i)
482 add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,
483 lmb_size_pages(&lmb.memory, i));
1da177e4 484 node_set_online(0);
1da177e4
LT
485}
486
487static void __init dump_numa_topology(void)
488{
489 unsigned int node;
490 unsigned int count;
491
492 if (min_common_depth == -1 || !numa_enabled)
493 return;
494
495 for_each_online_node(node) {
496 unsigned long i;
497
498 printk(KERN_INFO "Node %d Memory:", node);
499
500 count = 0;
501
45fb6cea
AB
502 for (i = 0; i < lmb_end_of_DRAM();
503 i += (1 << SECTION_SIZE_BITS)) {
504 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
1da177e4
LT
505 if (count == 0)
506 printk(" 0x%lx", i);
507 ++count;
508 } else {
509 if (count > 0)
510 printk("-0x%lx", i);
511 count = 0;
512 }
513 }
514
515 if (count > 0)
516 printk("-0x%lx", i);
517 printk("\n");
518 }
519 return;
520}
521
522/*
523 * Allocate some memory, satisfying the lmb or bootmem allocator where
524 * required. nid is the preferred node and end is the physical address of
525 * the highest address in the node.
526 *
527 * Returns the physical address of the memory.
528 */
45fb6cea
AB
529static void __init *careful_allocation(int nid, unsigned long size,
530 unsigned long align,
531 unsigned long end_pfn)
1da177e4 532{
45fb6cea
AB
533 int new_nid;
534 unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
1da177e4
LT
535
536 /* retry over all memory */
537 if (!ret)
538 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
539
540 if (!ret)
541 panic("numa.c: cannot allocate %lu bytes on node %d",
542 size, nid);
543
544 /*
545 * If the memory came from a previously allocated node, we must
546 * retry with the bootmem allocator.
547 */
45fb6cea
AB
548 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
549 if (new_nid < nid) {
550 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
1da177e4
LT
551 size, align, 0);
552
553 if (!ret)
554 panic("numa.c: cannot allocate %lu bytes on node %d",
45fb6cea 555 size, new_nid);
1da177e4 556
45fb6cea 557 ret = __pa(ret);
1da177e4
LT
558
559 dbg("alloc_bootmem %lx %lx\n", ret, size);
560 }
561
45fb6cea 562 return (void *)ret;
1da177e4
LT
563}
564
565void __init do_init_bootmem(void)
566{
567 int nid;
45fb6cea 568 unsigned int i;
1da177e4
LT
569 static struct notifier_block ppc64_numa_nb = {
570 .notifier_call = cpu_numa_callback,
571 .priority = 1 /* Must run before sched domains notifier. */
572 };
573
574 min_low_pfn = 0;
575 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
576 max_pfn = max_low_pfn;
577
578 if (parse_numa_properties())
579 setup_nonnuma();
580 else
581 dump_numa_topology();
582
583 register_cpu_notifier(&ppc64_numa_nb);
584
585 for_each_online_node(nid) {
45fb6cea 586 unsigned long start_pfn, end_pfn, pages_present;
1da177e4
LT
587 unsigned long bootmem_paddr;
588 unsigned long bootmap_pages;
589
45fb6cea 590 get_region(nid, &start_pfn, &end_pfn, &pages_present);
1da177e4
LT
591
592 /* Allocate the node structure node local if possible */
45fb6cea 593 NODE_DATA(nid) = careful_allocation(nid,
1da177e4 594 sizeof(struct pglist_data),
45fb6cea
AB
595 SMP_CACHE_BYTES, end_pfn);
596 NODE_DATA(nid) = __va(NODE_DATA(nid));
1da177e4
LT
597 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
598
599 dbg("node %d\n", nid);
600 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
601
602 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
45fb6cea
AB
603 NODE_DATA(nid)->node_start_pfn = start_pfn;
604 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1da177e4
LT
605
606 if (NODE_DATA(nid)->node_spanned_pages == 0)
607 continue;
608
45fb6cea
AB
609 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
610 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1da177e4 611
45fb6cea
AB
612 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
613 bootmem_paddr = (unsigned long)careful_allocation(nid,
614 bootmap_pages << PAGE_SHIFT,
615 PAGE_SIZE, end_pfn);
616 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
1da177e4 617
1da177e4
LT
618 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
619
620 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
45fb6cea 621 start_pfn, end_pfn);
1da177e4 622
45fb6cea
AB
623 /* Add free regions on this node */
624 for (i = 0; init_node_data[i].end_pfn; i++) {
625 unsigned long start, end;
1da177e4 626
45fb6cea 627 if (init_node_data[i].nid != nid)
1da177e4
LT
628 continue;
629
45fb6cea
AB
630 start = init_node_data[i].start_pfn << PAGE_SHIFT;
631 end = init_node_data[i].end_pfn << PAGE_SHIFT;
1da177e4 632
45fb6cea
AB
633 dbg("free_bootmem %lx %lx\n", start, end - start);
634 free_bootmem_node(NODE_DATA(nid), start, end - start);
1da177e4
LT
635 }
636
45fb6cea 637 /* Mark reserved regions on this node */
1da177e4 638 for (i = 0; i < lmb.reserved.cnt; i++) {
180379dc 639 unsigned long physbase = lmb.reserved.region[i].base;
1da177e4 640 unsigned long size = lmb.reserved.region[i].size;
45fb6cea
AB
641 unsigned long start_paddr = start_pfn << PAGE_SHIFT;
642 unsigned long end_paddr = end_pfn << PAGE_SHIFT;
1da177e4 643
45fb6cea
AB
644 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
645 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
1da177e4
LT
646 continue;
647
648 if (physbase < end_paddr &&
649 (physbase+size) > start_paddr) {
650 /* overlaps */
651 if (physbase < start_paddr) {
652 size -= start_paddr - physbase;
653 physbase = start_paddr;
654 }
655
656 if (size > end_paddr - physbase)
657 size = end_paddr - physbase;
658
659 dbg("reserve_bootmem %lx %lx\n", physbase,
660 size);
661 reserve_bootmem_node(NODE_DATA(nid), physbase,
662 size);
663 }
664 }
802f192e 665
45fb6cea
AB
666 /* Add regions into sparsemem */
667 for (i = 0; init_node_data[i].end_pfn; i++) {
668 unsigned long start, end;
669
670 if (init_node_data[i].nid != nid)
802f192e
BP
671 continue;
672
45fb6cea
AB
673 start = init_node_data[i].start_pfn;
674 end = init_node_data[i].end_pfn;
802f192e 675
45fb6cea 676 memory_present(nid, start, end);
802f192e 677 }
1da177e4
LT
678 }
679}
680
681void __init paging_init(void)
682{
683 unsigned long zones_size[MAX_NR_ZONES];
684 unsigned long zholes_size[MAX_NR_ZONES];
685 int nid;
686
687 memset(zones_size, 0, sizeof(zones_size));
688 memset(zholes_size, 0, sizeof(zholes_size));
689
690 for_each_online_node(nid) {
45fb6cea 691 unsigned long start_pfn, end_pfn, pages_present;
1da177e4 692
45fb6cea 693 get_region(nid, &start_pfn, &end_pfn, &pages_present);
1da177e4
LT
694
695 zones_size[ZONE_DMA] = end_pfn - start_pfn;
45fb6cea 696 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present;
1da177e4
LT
697
698 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
699 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
700
45fb6cea
AB
701 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn,
702 zholes_size);
1da177e4
LT
703 }
704}
705
706static int __init early_numa(char *p)
707{
708 if (!p)
709 return 0;
710
711 if (strstr(p, "off"))
712 numa_enabled = 0;
713
714 if (strstr(p, "debug"))
715 numa_debug = 1;
716
717 return 0;
718}
719early_param("numa", early_numa);