4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <asm/sparsemem.h>
22 #include <asm/system.h>
25 static int numa_enabled
= 1;
27 static int numa_debug
;
28 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 int numa_cpu_lookup_table
[NR_CPUS
];
31 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
32 struct pglist_data
*node_data
[MAX_NUMNODES
];
34 EXPORT_SYMBOL(numa_cpu_lookup_table
);
35 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
36 EXPORT_SYMBOL(node_data
);
38 static bootmem_data_t __initdata plat_node_bdata
[MAX_NUMNODES
];
39 static int min_common_depth
;
40 static int n_mem_addr_cells
, n_mem_size_cells
;
43 * We need somewhere to store start/end/node for each region until we have
44 * allocated the real node_data structures.
46 #define MAX_REGIONS (MAX_LMB_REGIONS*2)
48 unsigned long start_pfn
;
49 unsigned long end_pfn
;
51 } init_node_data
[MAX_REGIONS
] __initdata
;
53 int __init
early_pfn_to_nid(unsigned long pfn
)
57 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
58 unsigned long start_pfn
= init_node_data
[i
].start_pfn
;
59 unsigned long end_pfn
= init_node_data
[i
].end_pfn
;
61 if ((start_pfn
<= pfn
) && (pfn
< end_pfn
))
62 return init_node_data
[i
].nid
;
68 void __init
add_region(unsigned int nid
, unsigned long start_pfn
,
73 dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n",
74 nid
, start_pfn
, pages
);
76 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
77 if (init_node_data
[i
].nid
!= nid
)
79 if (init_node_data
[i
].end_pfn
== start_pfn
) {
80 init_node_data
[i
].end_pfn
+= pages
;
83 if (init_node_data
[i
].start_pfn
== (start_pfn
+ pages
)) {
84 init_node_data
[i
].start_pfn
-= pages
;
90 * Leave last entry NULL so we dont iterate off the end (we use
91 * entry.end_pfn to terminate the walk).
93 if (i
>= (MAX_REGIONS
- 1)) {
94 printk(KERN_ERR
"WARNING: too many memory regions in "
95 "numa code, truncating\n");
99 init_node_data
[i
].start_pfn
= start_pfn
;
100 init_node_data
[i
].end_pfn
= start_pfn
+ pages
;
101 init_node_data
[i
].nid
= nid
;
104 /* We assume init_node_data has no overlapping regions */
105 void __init
get_region(unsigned int nid
, unsigned long *start_pfn
,
106 unsigned long *end_pfn
, unsigned long *pages_present
)
111 *end_pfn
= *pages_present
= 0;
113 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
114 if (init_node_data
[i
].nid
!= nid
)
117 *pages_present
+= init_node_data
[i
].end_pfn
-
118 init_node_data
[i
].start_pfn
;
120 if (init_node_data
[i
].start_pfn
< *start_pfn
)
121 *start_pfn
= init_node_data
[i
].start_pfn
;
123 if (init_node_data
[i
].end_pfn
> *end_pfn
)
124 *end_pfn
= init_node_data
[i
].end_pfn
;
127 /* We didnt find a matching region, return start/end as 0 */
128 if (*start_pfn
== -1UL)
132 static void __cpuinit
map_cpu_to_node(int cpu
, int node
)
134 numa_cpu_lookup_table
[cpu
] = node
;
136 dbg("adding cpu %d to node %d\n", cpu
, node
);
138 if (!(cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])))
139 cpu_set(cpu
, numa_cpumask_lookup_table
[node
]);
142 #ifdef CONFIG_HOTPLUG_CPU
143 static void unmap_cpu_from_node(unsigned long cpu
)
145 int node
= numa_cpu_lookup_table
[cpu
];
147 dbg("removing cpu %lu from node %d\n", cpu
, node
);
149 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
150 cpu_clear(cpu
, numa_cpumask_lookup_table
[node
]);
152 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
156 #endif /* CONFIG_HOTPLUG_CPU */
158 static struct device_node
* __cpuinit
find_cpu_node(unsigned int cpu
)
160 unsigned int hw_cpuid
= get_hard_smp_processor_id(cpu
);
161 struct device_node
*cpu_node
= NULL
;
162 unsigned int *interrupt_server
, *reg
;
165 while ((cpu_node
= of_find_node_by_type(cpu_node
, "cpu")) != NULL
) {
166 /* Try interrupt server first */
167 interrupt_server
= (unsigned int *)get_property(cpu_node
,
168 "ibm,ppc-interrupt-server#s", &len
);
170 len
= len
/ sizeof(u32
);
172 if (interrupt_server
&& (len
> 0)) {
174 if (interrupt_server
[len
] == hw_cpuid
)
178 reg
= (unsigned int *)get_property(cpu_node
,
180 if (reg
&& (len
> 0) && (reg
[0] == hw_cpuid
))
188 /* must hold reference to node during call */
189 static int *of_get_associativity(struct device_node
*dev
)
191 return (unsigned int *)get_property(dev
, "ibm,associativity", NULL
);
194 static int of_node_to_nid(struct device_node
*device
)
199 if (min_common_depth
== -1)
202 tmp
= of_get_associativity(device
);
203 if (tmp
&& (tmp
[0] >= min_common_depth
)) {
204 nid
= tmp
[min_common_depth
];
206 dbg("WARNING: no NUMA information for %s\n",
214 * In theory, the "ibm,associativity" property may contain multiple
215 * associativity lists because a resource may be multiply connected
216 * into the machine. This resource then has different associativity
217 * characteristics relative to its multiple connections. We ignore
218 * this for now. We also assume that all cpu and memory sets have
219 * their distances represented at a common level. This won't be
220 * true for heirarchical NUMA.
222 * In any case the ibm,associativity-reference-points should give
223 * the correct depth for a normal NUMA system.
225 * - Dave Hansen <haveblue@us.ibm.com>
227 static int __init
find_min_common_depth(void)
230 unsigned int *ref_points
;
231 struct device_node
*rtas_root
;
234 rtas_root
= of_find_node_by_path("/rtas");
240 * this property is 2 32-bit integers, each representing a level of
241 * depth in the associativity nodes. The first is for an SMP
242 * configuration (should be all 0's) and the second is for a normal
243 * NUMA configuration.
245 ref_points
= (unsigned int *)get_property(rtas_root
,
246 "ibm,associativity-reference-points", &len
);
248 if ((len
>= 1) && ref_points
) {
249 depth
= ref_points
[1];
251 dbg("NUMA: ibm,associativity-reference-points not found.\n");
254 of_node_put(rtas_root
);
259 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
261 struct device_node
*memory
= NULL
;
263 memory
= of_find_node_by_type(memory
, "memory");
265 panic("numa.c: No memory nodes found!");
267 *n_addr_cells
= prom_n_addr_cells(memory
);
268 *n_size_cells
= prom_n_size_cells(memory
);
272 static unsigned long __devinit
read_n_cells(int n
, unsigned int **buf
)
274 unsigned long result
= 0;
277 result
= (result
<< 32) | **buf
;
284 * Figure out to which domain a cpu belongs and stick it there.
285 * Return the id of the domain used.
287 static int __cpuinit
numa_setup_cpu(unsigned long lcpu
)
290 struct device_node
*cpu
= find_cpu_node(lcpu
);
297 nid
= of_node_to_nid(cpu
);
299 if (nid
>= num_online_nodes()) {
301 * POWER4 LPAR uses 0xffff as invalid node,
302 * dont warn in this case.
305 printk(KERN_ERR
"WARNING: cpu %ld "
306 "maps to invalid NUMA node %d\n",
311 node_set_online(nid
);
313 map_cpu_to_node(lcpu
, nid
);
320 static int cpu_numa_callback(struct notifier_block
*nfb
,
321 unsigned long action
,
324 unsigned long lcpu
= (unsigned long)hcpu
;
325 int ret
= NOTIFY_DONE
;
329 if (min_common_depth
== -1 || !numa_enabled
)
330 map_cpu_to_node(lcpu
, 0);
332 numa_setup_cpu(lcpu
);
335 #ifdef CONFIG_HOTPLUG_CPU
337 case CPU_UP_CANCELED
:
338 unmap_cpu_from_node(lcpu
);
347 * Check and possibly modify a memory region to enforce the memory limit.
349 * Returns the size the region should have to enforce the memory limit.
350 * This will either be the original value of size, a truncated value,
351 * or zero. If the returned value of size is 0 the region should be
352 * discarded as it lies wholy above the memory limit.
354 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
358 * We use lmb_end_of_DRAM() in here instead of memory_limit because
359 * we've already adjusted it for the limit and it takes care of
360 * having memory holes below the limit.
366 if (start
+ size
<= lmb_end_of_DRAM())
369 if (start
>= lmb_end_of_DRAM())
372 return lmb_end_of_DRAM() - start
;
375 static int __init
parse_numa_properties(void)
377 struct device_node
*cpu
= NULL
;
378 struct device_node
*memory
= NULL
;
382 if (numa_enabled
== 0) {
383 printk(KERN_WARNING
"NUMA disabled by user\n");
387 min_common_depth
= find_min_common_depth();
389 if (min_common_depth
< 0)
390 return min_common_depth
;
392 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
395 * Even though we connect cpus to numa domains later in SMP init,
396 * we need to know the maximum node id now. This is because each
397 * node id must have NODE_DATA etc backing it.
398 * As a result of hotplug we could still have cpus appear later on
399 * with larger node ids. In that case we force the cpu into node 0.
404 cpu
= find_cpu_node(i
);
407 nid
= of_node_to_nid(cpu
);
410 if (nid
< MAX_NUMNODES
&&
416 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
418 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
423 unsigned int *memcell_buf
;
426 memcell_buf
= (unsigned int *)get_property(memory
,
427 "linux,usable-memory", &len
);
428 if (!memcell_buf
|| len
<= 0)
430 (unsigned int *)get_property(memory
, "reg",
432 if (!memcell_buf
|| len
<= 0)
436 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
438 /* these are order-sensitive, and modify the buffer pointer */
439 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
440 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
442 nid
= of_node_to_nid(memory
);
444 if (nid
>= MAX_NUMNODES
) {
446 printk(KERN_ERR
"WARNING: memory at %lx maps "
447 "to invalid NUMA node %d\n", start
,
452 if (max_domain
< nid
)
455 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
462 add_region(nid
, start
>> PAGE_SHIFT
,
469 for (i
= 0; i
<= max_domain
; i
++)
472 max_domain
= numa_setup_cpu(boot_cpuid
);
477 static void __init
setup_nonnuma(void)
479 unsigned long top_of_ram
= lmb_end_of_DRAM();
480 unsigned long total_ram
= lmb_phys_mem_size();
483 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
484 top_of_ram
, total_ram
);
485 printk(KERN_INFO
"Memory hole size: %ldMB\n",
486 (top_of_ram
- total_ram
) >> 20);
488 map_cpu_to_node(boot_cpuid
, 0);
489 for (i
= 0; i
< lmb
.memory
.cnt
; ++i
)
490 add_region(0, lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
,
491 lmb_size_pages(&lmb
.memory
, i
));
495 void __init
dump_numa_cpu_topology(void)
498 unsigned int cpu
, count
;
500 if (min_common_depth
== -1 || !numa_enabled
)
503 for_each_online_node(node
) {
504 printk(KERN_INFO
"Node %d CPUs:", node
);
508 * If we used a CPU iterator here we would miss printing
509 * the holes in the cpumap.
511 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
512 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
518 printk("-%u", cpu
- 1);
524 printk("-%u", NR_CPUS
- 1);
529 static void __init
dump_numa_memory_topology(void)
534 if (min_common_depth
== -1 || !numa_enabled
)
537 for_each_online_node(node
) {
540 printk(KERN_INFO
"Node %d Memory:", node
);
544 for (i
= 0; i
< lmb_end_of_DRAM();
545 i
+= (1 << SECTION_SIZE_BITS
)) {
546 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
564 * Allocate some memory, satisfying the lmb or bootmem allocator where
565 * required. nid is the preferred node and end is the physical address of
566 * the highest address in the node.
568 * Returns the physical address of the memory.
570 static void __init
*careful_allocation(int nid
, unsigned long size
,
572 unsigned long end_pfn
)
575 unsigned long ret
= __lmb_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
577 /* retry over all memory */
579 ret
= __lmb_alloc_base(size
, align
, lmb_end_of_DRAM());
582 panic("numa.c: cannot allocate %lu bytes on node %d",
586 * If the memory came from a previously allocated node, we must
587 * retry with the bootmem allocator.
589 new_nid
= early_pfn_to_nid(ret
>> PAGE_SHIFT
);
591 ret
= (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid
),
595 panic("numa.c: cannot allocate %lu bytes on node %d",
600 dbg("alloc_bootmem %lx %lx\n", ret
, size
);
606 void __init
do_init_bootmem(void)
610 static struct notifier_block ppc64_numa_nb
= {
611 .notifier_call
= cpu_numa_callback
,
612 .priority
= 1 /* Must run before sched domains notifier. */
616 max_low_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
617 max_pfn
= max_low_pfn
;
619 if (parse_numa_properties())
622 dump_numa_memory_topology();
624 register_cpu_notifier(&ppc64_numa_nb
);
626 for_each_online_node(nid
) {
627 unsigned long start_pfn
, end_pfn
, pages_present
;
628 unsigned long bootmem_paddr
;
629 unsigned long bootmap_pages
;
631 get_region(nid
, &start_pfn
, &end_pfn
, &pages_present
);
633 /* Allocate the node structure node local if possible */
634 NODE_DATA(nid
) = careful_allocation(nid
,
635 sizeof(struct pglist_data
),
636 SMP_CACHE_BYTES
, end_pfn
);
637 NODE_DATA(nid
) = __va(NODE_DATA(nid
));
638 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
640 dbg("node %d\n", nid
);
641 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
643 NODE_DATA(nid
)->bdata
= &plat_node_bdata
[nid
];
644 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
645 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
647 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
650 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
651 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
653 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
654 bootmem_paddr
= (unsigned long)careful_allocation(nid
,
655 bootmap_pages
<< PAGE_SHIFT
,
657 memset(__va(bootmem_paddr
), 0, bootmap_pages
<< PAGE_SHIFT
);
659 dbg("bootmap_paddr = %lx\n", bootmem_paddr
);
661 init_bootmem_node(NODE_DATA(nid
), bootmem_paddr
>> PAGE_SHIFT
,
664 /* Add free regions on this node */
665 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
666 unsigned long start
, end
;
668 if (init_node_data
[i
].nid
!= nid
)
671 start
= init_node_data
[i
].start_pfn
<< PAGE_SHIFT
;
672 end
= init_node_data
[i
].end_pfn
<< PAGE_SHIFT
;
674 dbg("free_bootmem %lx %lx\n", start
, end
- start
);
675 free_bootmem_node(NODE_DATA(nid
), start
, end
- start
);
678 /* Mark reserved regions on this node */
679 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
680 unsigned long physbase
= lmb
.reserved
.region
[i
].base
;
681 unsigned long size
= lmb
.reserved
.region
[i
].size
;
682 unsigned long start_paddr
= start_pfn
<< PAGE_SHIFT
;
683 unsigned long end_paddr
= end_pfn
<< PAGE_SHIFT
;
685 if (early_pfn_to_nid(physbase
>> PAGE_SHIFT
) != nid
&&
686 early_pfn_to_nid((physbase
+size
-1) >> PAGE_SHIFT
) != nid
)
689 if (physbase
< end_paddr
&&
690 (physbase
+size
) > start_paddr
) {
692 if (physbase
< start_paddr
) {
693 size
-= start_paddr
- physbase
;
694 physbase
= start_paddr
;
697 if (size
> end_paddr
- physbase
)
698 size
= end_paddr
- physbase
;
700 dbg("reserve_bootmem %lx %lx\n", physbase
,
702 reserve_bootmem_node(NODE_DATA(nid
), physbase
,
707 /* Add regions into sparsemem */
708 for (i
= 0; init_node_data
[i
].end_pfn
; i
++) {
709 unsigned long start
, end
;
711 if (init_node_data
[i
].nid
!= nid
)
714 start
= init_node_data
[i
].start_pfn
;
715 end
= init_node_data
[i
].end_pfn
;
717 memory_present(nid
, start
, end
);
722 void __init
paging_init(void)
724 unsigned long zones_size
[MAX_NR_ZONES
];
725 unsigned long zholes_size
[MAX_NR_ZONES
];
728 memset(zones_size
, 0, sizeof(zones_size
));
729 memset(zholes_size
, 0, sizeof(zholes_size
));
731 for_each_online_node(nid
) {
732 unsigned long start_pfn
, end_pfn
, pages_present
;
734 get_region(nid
, &start_pfn
, &end_pfn
, &pages_present
);
736 zones_size
[ZONE_DMA
] = end_pfn
- start_pfn
;
737 zholes_size
[ZONE_DMA
] = zones_size
[ZONE_DMA
] - pages_present
;
739 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid
,
740 zones_size
[ZONE_DMA
], start_pfn
, zholes_size
[ZONE_DMA
]);
742 free_area_init_node(nid
, NODE_DATA(nid
), zones_size
, start_pfn
,
747 static int __init
early_numa(char *p
)
752 if (strstr(p
, "off"))
755 if (strstr(p
, "debug"))
760 early_param("numa", early_numa
);
762 #ifdef CONFIG_MEMORY_HOTPLUG
764 * Find the node associated with a hot added memory section. Section
765 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
766 * sections are fully contained within a single LMB.
768 int hot_add_scn_to_nid(unsigned long scn_addr
)
770 struct device_node
*memory
= NULL
;
774 if (!numa_enabled
|| (min_common_depth
< 0))
777 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
778 unsigned long start
, size
;
780 unsigned int *memcell_buf
;
783 memcell_buf
= (unsigned int *)get_property(memory
, "reg", &len
);
784 if (!memcell_buf
|| len
<= 0)
788 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
790 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
791 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
792 nid
= of_node_to_nid(memory
);
794 /* Domains not present at boot default to 0 */
795 if (!node_online(nid
))
796 nid
= any_online_node(NODE_MASK_ALL
);
798 if ((scn_addr
>= start
) && (scn_addr
< (start
+ size
))) {
803 if (--ranges
) /* process all ranges in cell */
806 BUG(); /* section address should be found above */
808 /* Temporary code to ensure that returned node is not empty */
811 while (NODE_DATA(nid
)->node_spanned_pages
== 0) {
812 node_clear(nid
, nodes
);
813 nid
= any_online_node(nodes
);
817 #endif /* CONFIG_MEMORY_HOTPLUG */