4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <asm/sparsemem.h>
22 #include <asm/system.h>
25 static int numa_enabled
= 1;
27 static int numa_debug
;
28 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 int numa_cpu_lookup_table
[NR_CPUS
];
31 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
32 struct pglist_data
*node_data
[MAX_NUMNODES
];
34 EXPORT_SYMBOL(numa_cpu_lookup_table
);
35 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
36 EXPORT_SYMBOL(node_data
);
38 static bootmem_data_t __initdata plat_node_bdata
[MAX_NUMNODES
];
39 static int min_common_depth
;
42 * We need somewhere to store start/end/node for each region until we have
43 * allocated the real node_data structures.
45 #define MAX_REGIONS (MAX_LMB_REGIONS*2)
47 unsigned long start_pfn
;
48 unsigned long end_pfn
;
50 } init_node_data
[MAX_REGIONS
] __initdata
;
52 int __init
early_pfn_to_nid(unsigned long pfn
)
56 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
57 unsigned long start_pfn
= init_node_data
[i
].start_pfn
;
58 unsigned long end_pfn
= init_node_data
[i
].end_pfn
;
60 if ((start_pfn
<= pfn
) && (pfn
< end_pfn
))
61 return init_node_data
[i
].nid
;
67 void __init
add_region(unsigned int nid
, unsigned long start_pfn
,
72 dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n",
73 nid
, start_pfn
, pages
);
75 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
76 if (init_node_data
[i
].nid
!= nid
)
78 if (init_node_data
[i
].end_pfn
== start_pfn
) {
79 init_node_data
[i
].end_pfn
+= pages
;
82 if (init_node_data
[i
].start_pfn
== (start_pfn
+ pages
)) {
83 init_node_data
[i
].start_pfn
-= pages
;
89 * Leave last entry NULL so we dont iterate off the end (we use
90 * entry.end_pfn to terminate the walk).
92 if (i
>= (MAX_REGIONS
- 1)) {
93 printk(KERN_ERR
"WARNING: too many memory regions in "
94 "numa code, truncating\n");
98 init_node_data
[i
].start_pfn
= start_pfn
;
99 init_node_data
[i
].end_pfn
= start_pfn
+ pages
;
100 init_node_data
[i
].nid
= nid
;
103 /* We assume init_node_data has no overlapping regions */
104 void __init
get_region(unsigned int nid
, unsigned long *start_pfn
,
105 unsigned long *end_pfn
, unsigned long *pages_present
)
110 *end_pfn
= *pages_present
= 0;
112 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
113 if (init_node_data
[i
].nid
!= nid
)
116 *pages_present
+= init_node_data
[i
].end_pfn
-
117 init_node_data
[i
].start_pfn
;
119 if (init_node_data
[i
].start_pfn
< *start_pfn
)
120 *start_pfn
= init_node_data
[i
].start_pfn
;
122 if (init_node_data
[i
].end_pfn
> *end_pfn
)
123 *end_pfn
= init_node_data
[i
].end_pfn
;
126 /* We didnt find a matching region, return start/end as 0 */
127 if (*start_pfn
== -1UL)
131 static inline void map_cpu_to_node(int cpu
, int node
)
133 numa_cpu_lookup_table
[cpu
] = node
;
135 if (!(cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])))
136 cpu_set(cpu
, numa_cpumask_lookup_table
[node
]);
139 #ifdef CONFIG_HOTPLUG_CPU
140 static void unmap_cpu_from_node(unsigned long cpu
)
142 int node
= numa_cpu_lookup_table
[cpu
];
144 dbg("removing cpu %lu from node %d\n", cpu
, node
);
146 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
147 cpu_clear(cpu
, numa_cpumask_lookup_table
[node
]);
149 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
153 #endif /* CONFIG_HOTPLUG_CPU */
155 static struct device_node
*find_cpu_node(unsigned int cpu
)
157 unsigned int hw_cpuid
= get_hard_smp_processor_id(cpu
);
158 struct device_node
*cpu_node
= NULL
;
159 unsigned int *interrupt_server
, *reg
;
162 while ((cpu_node
= of_find_node_by_type(cpu_node
, "cpu")) != NULL
) {
163 /* Try interrupt server first */
164 interrupt_server
= (unsigned int *)get_property(cpu_node
,
165 "ibm,ppc-interrupt-server#s", &len
);
167 len
= len
/ sizeof(u32
);
169 if (interrupt_server
&& (len
> 0)) {
171 if (interrupt_server
[len
] == hw_cpuid
)
175 reg
= (unsigned int *)get_property(cpu_node
,
177 if (reg
&& (len
> 0) && (reg
[0] == hw_cpuid
))
185 /* must hold reference to node during call */
186 static int *of_get_associativity(struct device_node
*dev
)
188 return (unsigned int *)get_property(dev
, "ibm,associativity", NULL
);
191 static int of_node_numa_domain(struct device_node
*device
)
196 if (min_common_depth
== -1)
199 tmp
= of_get_associativity(device
);
200 if (tmp
&& (tmp
[0] >= min_common_depth
)) {
201 numa_domain
= tmp
[min_common_depth
];
203 dbg("WARNING: no NUMA information for %s\n",
211 * In theory, the "ibm,associativity" property may contain multiple
212 * associativity lists because a resource may be multiply connected
213 * into the machine. This resource then has different associativity
214 * characteristics relative to its multiple connections. We ignore
215 * this for now. We also assume that all cpu and memory sets have
216 * their distances represented at a common level. This won't be
217 * true for heirarchical NUMA.
219 * In any case the ibm,associativity-reference-points should give
220 * the correct depth for a normal NUMA system.
222 * - Dave Hansen <haveblue@us.ibm.com>
224 static int __init
find_min_common_depth(void)
227 unsigned int *ref_points
;
228 struct device_node
*rtas_root
;
231 rtas_root
= of_find_node_by_path("/rtas");
237 * this property is 2 32-bit integers, each representing a level of
238 * depth in the associativity nodes. The first is for an SMP
239 * configuration (should be all 0's) and the second is for a normal
240 * NUMA configuration.
242 ref_points
= (unsigned int *)get_property(rtas_root
,
243 "ibm,associativity-reference-points", &len
);
245 if ((len
>= 1) && ref_points
) {
246 depth
= ref_points
[1];
248 dbg("WARNING: could not find NUMA "
249 "associativity reference point\n");
252 of_node_put(rtas_root
);
257 static int __init
get_mem_addr_cells(void)
259 struct device_node
*memory
= NULL
;
262 memory
= of_find_node_by_type(memory
, "memory");
264 return 0; /* it won't matter */
266 rc
= prom_n_addr_cells(memory
);
270 static int __init
get_mem_size_cells(void)
272 struct device_node
*memory
= NULL
;
275 memory
= of_find_node_by_type(memory
, "memory");
277 return 0; /* it won't matter */
278 rc
= prom_n_size_cells(memory
);
282 static unsigned long __init
read_n_cells(int n
, unsigned int **buf
)
284 unsigned long result
= 0;
287 result
= (result
<< 32) | **buf
;
294 * Figure out to which domain a cpu belongs and stick it there.
295 * Return the id of the domain used.
297 static int numa_setup_cpu(unsigned long lcpu
)
300 struct device_node
*cpu
= find_cpu_node(lcpu
);
307 numa_domain
= of_node_numa_domain(cpu
);
309 if (numa_domain
>= num_online_nodes()) {
311 * POWER4 LPAR uses 0xffff as invalid node,
312 * dont warn in this case.
314 if (numa_domain
!= 0xffff)
315 printk(KERN_ERR
"WARNING: cpu %ld "
316 "maps to invalid NUMA node %d\n",
321 node_set_online(numa_domain
);
323 map_cpu_to_node(lcpu
, numa_domain
);
330 static int cpu_numa_callback(struct notifier_block
*nfb
,
331 unsigned long action
,
334 unsigned long lcpu
= (unsigned long)hcpu
;
335 int ret
= NOTIFY_DONE
;
339 if (min_common_depth
== -1 || !numa_enabled
)
340 map_cpu_to_node(lcpu
, 0);
342 numa_setup_cpu(lcpu
);
345 #ifdef CONFIG_HOTPLUG_CPU
347 case CPU_UP_CANCELED
:
348 unmap_cpu_from_node(lcpu
);
357 * Check and possibly modify a memory region to enforce the memory limit.
359 * Returns the size the region should have to enforce the memory limit.
360 * This will either be the original value of size, a truncated value,
361 * or zero. If the returned value of size is 0 the region should be
362 * discarded as it lies wholy above the memory limit.
364 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
368 * We use lmb_end_of_DRAM() in here instead of memory_limit because
369 * we've already adjusted it for the limit and it takes care of
370 * having memory holes below the limit.
376 if (start
+ size
<= lmb_end_of_DRAM())
379 if (start
>= lmb_end_of_DRAM())
382 return lmb_end_of_DRAM() - start
;
385 static int __init
parse_numa_properties(void)
387 struct device_node
*cpu
= NULL
;
388 struct device_node
*memory
= NULL
;
389 int addr_cells
, size_cells
;
393 if (numa_enabled
== 0) {
394 printk(KERN_WARNING
"NUMA disabled by user\n");
398 min_common_depth
= find_min_common_depth();
400 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
401 if (min_common_depth
< 0)
402 return min_common_depth
;
404 max_domain
= numa_setup_cpu(boot_cpuid
);
407 * Even though we connect cpus to numa domains later in SMP init,
408 * we need to know the maximum node id now. This is because each
409 * node id must have NODE_DATA etc backing it.
410 * As a result of hotplug we could still have cpus appear later on
411 * with larger node ids. In that case we force the cpu into node 0.
416 cpu
= find_cpu_node(i
);
419 numa_domain
= of_node_numa_domain(cpu
);
422 if (numa_domain
< MAX_NUMNODES
&&
423 max_domain
< numa_domain
)
424 max_domain
= numa_domain
;
428 addr_cells
= get_mem_addr_cells();
429 size_cells
= get_mem_size_cells();
431 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
436 unsigned int *memcell_buf
;
439 memcell_buf
= (unsigned int *)get_property(memory
, "reg", &len
);
440 if (!memcell_buf
|| len
<= 0)
443 ranges
= memory
->n_addrs
;
445 /* these are order-sensitive, and modify the buffer pointer */
446 start
= read_n_cells(addr_cells
, &memcell_buf
);
447 size
= read_n_cells(size_cells
, &memcell_buf
);
449 numa_domain
= of_node_numa_domain(memory
);
451 if (numa_domain
>= MAX_NUMNODES
) {
452 if (numa_domain
!= 0xffff)
453 printk(KERN_ERR
"WARNING: memory at %lx maps "
454 "to invalid NUMA node %d\n", start
,
459 if (max_domain
< numa_domain
)
460 max_domain
= numa_domain
;
462 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
469 add_region(numa_domain
, start
>> PAGE_SHIFT
,
476 for (i
= 0; i
<= max_domain
; i
++)
482 static void __init
setup_nonnuma(void)
484 unsigned long top_of_ram
= lmb_end_of_DRAM();
485 unsigned long total_ram
= lmb_phys_mem_size();
488 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
489 top_of_ram
, total_ram
);
490 printk(KERN_INFO
"Memory hole size: %ldMB\n",
491 (top_of_ram
- total_ram
) >> 20);
493 map_cpu_to_node(boot_cpuid
, 0);
494 for (i
= 0; i
< lmb
.memory
.cnt
; ++i
)
495 add_region(0, lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
,
496 lmb_size_pages(&lmb
.memory
, i
));
500 static void __init
dump_numa_topology(void)
505 if (min_common_depth
== -1 || !numa_enabled
)
508 for_each_online_node(node
) {
511 printk(KERN_INFO
"Node %d Memory:", node
);
515 for (i
= 0; i
< lmb_end_of_DRAM();
516 i
+= (1 << SECTION_SIZE_BITS
)) {
517 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
536 * Allocate some memory, satisfying the lmb or bootmem allocator where
537 * required. nid is the preferred node and end is the physical address of
538 * the highest address in the node.
540 * Returns the physical address of the memory.
542 static void __init
*careful_allocation(int nid
, unsigned long size
,
544 unsigned long end_pfn
)
547 unsigned long ret
= lmb_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
549 /* retry over all memory */
551 ret
= lmb_alloc_base(size
, align
, lmb_end_of_DRAM());
554 panic("numa.c: cannot allocate %lu bytes on node %d",
558 * If the memory came from a previously allocated node, we must
559 * retry with the bootmem allocator.
561 new_nid
= early_pfn_to_nid(ret
>> PAGE_SHIFT
);
563 ret
= (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid
),
567 panic("numa.c: cannot allocate %lu bytes on node %d",
572 dbg("alloc_bootmem %lx %lx\n", ret
, size
);
578 void __init
do_init_bootmem(void)
582 static struct notifier_block ppc64_numa_nb
= {
583 .notifier_call
= cpu_numa_callback
,
584 .priority
= 1 /* Must run before sched domains notifier. */
588 max_low_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
589 max_pfn
= max_low_pfn
;
591 if (parse_numa_properties())
594 dump_numa_topology();
596 register_cpu_notifier(&ppc64_numa_nb
);
598 for_each_online_node(nid
) {
599 unsigned long start_pfn
, end_pfn
, pages_present
;
600 unsigned long bootmem_paddr
;
601 unsigned long bootmap_pages
;
603 get_region(nid
, &start_pfn
, &end_pfn
, &pages_present
);
605 /* Allocate the node structure node local if possible */
606 NODE_DATA(nid
) = careful_allocation(nid
,
607 sizeof(struct pglist_data
),
608 SMP_CACHE_BYTES
, end_pfn
);
609 NODE_DATA(nid
) = __va(NODE_DATA(nid
));
610 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
612 dbg("node %d\n", nid
);
613 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
615 NODE_DATA(nid
)->bdata
= &plat_node_bdata
[nid
];
616 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
617 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
619 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
622 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
623 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
625 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
626 bootmem_paddr
= (unsigned long)careful_allocation(nid
,
627 bootmap_pages
<< PAGE_SHIFT
,
629 memset(__va(bootmem_paddr
), 0, bootmap_pages
<< PAGE_SHIFT
);
631 dbg("bootmap_paddr = %lx\n", bootmem_paddr
);
633 init_bootmem_node(NODE_DATA(nid
), bootmem_paddr
>> PAGE_SHIFT
,
636 /* Add free regions on this node */
637 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
638 unsigned long start
, end
;
640 if (init_node_data
[i
].nid
!= nid
)
643 start
= init_node_data
[i
].start_pfn
<< PAGE_SHIFT
;
644 end
= init_node_data
[i
].end_pfn
<< PAGE_SHIFT
;
646 dbg("free_bootmem %lx %lx\n", start
, end
- start
);
647 free_bootmem_node(NODE_DATA(nid
), start
, end
- start
);
650 /* Mark reserved regions on this node */
651 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
652 unsigned long physbase
= lmb
.reserved
.region
[i
].base
;
653 unsigned long size
= lmb
.reserved
.region
[i
].size
;
654 unsigned long start_paddr
= start_pfn
<< PAGE_SHIFT
;
655 unsigned long end_paddr
= end_pfn
<< PAGE_SHIFT
;
657 if (early_pfn_to_nid(physbase
>> PAGE_SHIFT
) != nid
&&
658 early_pfn_to_nid((physbase
+size
-1) >> PAGE_SHIFT
) != nid
)
661 if (physbase
< end_paddr
&&
662 (physbase
+size
) > start_paddr
) {
664 if (physbase
< start_paddr
) {
665 size
-= start_paddr
- physbase
;
666 physbase
= start_paddr
;
669 if (size
> end_paddr
- physbase
)
670 size
= end_paddr
- physbase
;
672 dbg("reserve_bootmem %lx %lx\n", physbase
,
674 reserve_bootmem_node(NODE_DATA(nid
), physbase
,
679 /* Add regions into sparsemem */
680 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
681 unsigned long start
, end
;
683 if (init_node_data
[i
].nid
!= nid
)
686 start
= init_node_data
[i
].start_pfn
;
687 end
= init_node_data
[i
].end_pfn
;
689 memory_present(nid
, start
, end
);
694 void __init
paging_init(void)
696 unsigned long zones_size
[MAX_NR_ZONES
];
697 unsigned long zholes_size
[MAX_NR_ZONES
];
700 memset(zones_size
, 0, sizeof(zones_size
));
701 memset(zholes_size
, 0, sizeof(zholes_size
));
703 for_each_online_node(nid
) {
704 unsigned long start_pfn
, end_pfn
, pages_present
;
706 get_region(nid
, &start_pfn
, &end_pfn
, &pages_present
);
708 zones_size
[ZONE_DMA
] = end_pfn
- start_pfn
;
709 zholes_size
[ZONE_DMA
] = zones_size
[ZONE_DMA
] - pages_present
;
711 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid
,
712 zones_size
[ZONE_DMA
], start_pfn
, zholes_size
[ZONE_DMA
]);
714 free_area_init_node(nid
, NODE_DATA(nid
), zones_size
, start_pfn
,
719 static int __init
early_numa(char *p
)
724 if (strstr(p
, "off"))
727 if (strstr(p
, "debug"))
732 early_param("numa", early_numa
);