4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <asm/sparsemem.h>
27 #include <asm/system.h>
29 #include <asm/firmware.h>
31 #include <asm/hvcall.h>
33 static int numa_enabled
= 1;
35 static char *cmdline __initdata
;
37 static int numa_debug
;
38 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
40 int numa_cpu_lookup_table
[NR_CPUS
];
41 cpumask_var_t node_to_cpumask_map
[MAX_NUMNODES
];
42 struct pglist_data
*node_data
[MAX_NUMNODES
];
44 EXPORT_SYMBOL(numa_cpu_lookup_table
);
45 EXPORT_SYMBOL(node_to_cpumask_map
);
46 EXPORT_SYMBOL(node_data
);
48 static int min_common_depth
;
49 static int n_mem_addr_cells
, n_mem_size_cells
;
50 static int form1_affinity
;
52 #define MAX_DISTANCE_REF_POINTS 4
53 static int distance_ref_points_depth
;
54 static const unsigned int *distance_ref_points
;
55 static int distance_lookup_table
[MAX_NUMNODES
][MAX_DISTANCE_REF_POINTS
];
58 * Allocate node_to_cpumask_map based on number of available nodes
59 * Requires node_possible_map to be valid.
61 * Note: node_to_cpumask() is not valid until after this is done.
63 static void __init
setup_node_to_cpumask_map(void)
65 unsigned int node
, num
= 0;
67 /* setup nr_node_ids if not done yet */
68 if (nr_node_ids
== MAX_NUMNODES
) {
69 for_each_node_mask(node
, node_possible_map
)
71 nr_node_ids
= num
+ 1;
74 /* allocate the map */
75 for (node
= 0; node
< nr_node_ids
; node
++)
76 alloc_bootmem_cpumask_var(&node_to_cpumask_map
[node
]);
78 /* cpumask_of_node() will now work */
79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids
);
82 static int __cpuinit
fake_numa_create_new_node(unsigned long end_pfn
,
85 unsigned long long mem
;
87 static unsigned int fake_nid
;
88 static unsigned long long curr_boundary
;
91 * Modify node id, iff we started creating NUMA nodes
92 * We want to continue from where we left of the last time
97 * In case there are no more arguments to parse, the
98 * node_id should be the same as the last fake node id
99 * (we've handled this above).
104 mem
= memparse(p
, &p
);
108 if (mem
< curr_boundary
)
113 if ((end_pfn
<< PAGE_SHIFT
) > mem
) {
115 * Skip commas and spaces
117 while (*p
== ',' || *p
== ' ' || *p
== '\t')
123 dbg("created new fake_node with id %d\n", fake_nid
);
130 * get_active_region_work_fn - A helper function for get_node_active_region
131 * Returns datax set to the start_pfn and end_pfn if they contain
132 * the initial value of datax->start_pfn between them
133 * @start_pfn: start page(inclusive) of region to check
134 * @end_pfn: end page(exclusive) of region to check
135 * @datax: comes in with ->start_pfn set to value to search for and
136 * goes out with active range if it contains it
137 * Returns 1 if search value is in range else 0
139 static int __init
get_active_region_work_fn(unsigned long start_pfn
,
140 unsigned long end_pfn
, void *datax
)
142 struct node_active_region
*data
;
143 data
= (struct node_active_region
*)datax
;
145 if (start_pfn
<= data
->start_pfn
&& end_pfn
> data
->start_pfn
) {
146 data
->start_pfn
= start_pfn
;
147 data
->end_pfn
= end_pfn
;
155 * get_node_active_region - Return active region containing start_pfn
156 * Active range returned is empty if none found.
157 * @start_pfn: The page to return the region for.
158 * @node_ar: Returned set to the active region containing start_pfn
160 static void __init
get_node_active_region(unsigned long start_pfn
,
161 struct node_active_region
*node_ar
)
163 int nid
= early_pfn_to_nid(start_pfn
);
166 node_ar
->start_pfn
= start_pfn
;
167 node_ar
->end_pfn
= start_pfn
;
168 work_with_active_regions(nid
, get_active_region_work_fn
, node_ar
);
171 static void map_cpu_to_node(int cpu
, int node
)
173 numa_cpu_lookup_table
[cpu
] = node
;
175 dbg("adding cpu %d to node %d\n", cpu
, node
);
177 if (!(cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])))
178 cpumask_set_cpu(cpu
, node_to_cpumask_map
[node
]);
181 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
182 static void unmap_cpu_from_node(unsigned long cpu
)
184 int node
= numa_cpu_lookup_table
[cpu
];
186 dbg("removing cpu %lu from node %d\n", cpu
, node
);
188 if (cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])) {
189 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[node
]);
191 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
195 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
197 /* must hold reference to node during call */
198 static const int *of_get_associativity(struct device_node
*dev
)
200 return of_get_property(dev
, "ibm,associativity", NULL
);
204 * Returns the property linux,drconf-usable-memory if
205 * it exists (the property exists only in kexec/kdump kernels,
206 * added by kexec-tools)
208 static const u32
*of_get_usable_memory(struct device_node
*memory
)
212 prop
= of_get_property(memory
, "linux,drconf-usable-memory", &len
);
213 if (!prop
|| len
< sizeof(unsigned int))
218 int __node_distance(int a
, int b
)
221 int distance
= LOCAL_DISTANCE
;
226 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
227 if (distance_lookup_table
[a
][i
] == distance_lookup_table
[b
][i
])
230 /* Double the distance for each NUMA level */
237 static void initialize_distance_lookup_table(int nid
,
238 const unsigned int *associativity
)
245 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
246 distance_lookup_table
[nid
][i
] =
247 associativity
[distance_ref_points
[i
]];
251 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
254 static int associativity_to_nid(const unsigned int *associativity
)
258 if (min_common_depth
== -1)
261 if (associativity
[0] >= min_common_depth
)
262 nid
= associativity
[min_common_depth
];
264 /* POWER4 LPAR uses 0xffff as invalid node */
265 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
268 if (nid
> 0 && associativity
[0] >= distance_ref_points_depth
)
269 initialize_distance_lookup_table(nid
, associativity
);
275 /* Returns the nid associated with the given device tree node,
276 * or -1 if not found.
278 static int of_node_to_nid_single(struct device_node
*device
)
281 const unsigned int *tmp
;
283 tmp
= of_get_associativity(device
);
285 nid
= associativity_to_nid(tmp
);
289 /* Walk the device tree upwards, looking for an associativity id */
290 int of_node_to_nid(struct device_node
*device
)
292 struct device_node
*tmp
;
297 nid
= of_node_to_nid_single(device
);
302 device
= of_get_parent(tmp
);
309 EXPORT_SYMBOL_GPL(of_node_to_nid
);
311 static int __init
find_min_common_depth(void)
314 struct device_node
*rtas_root
;
315 struct device_node
*chosen
;
318 rtas_root
= of_find_node_by_path("/rtas");
324 * This property is a set of 32-bit integers, each representing
325 * an index into the ibm,associativity nodes.
327 * With form 0 affinity the first integer is for an SMP configuration
328 * (should be all 0's) and the second is for a normal NUMA
329 * configuration. We have only one level of NUMA.
331 * With form 1 affinity the first integer is the most significant
332 * NUMA boundary and the following are progressively less significant
333 * boundaries. There can be more than one level of NUMA.
335 distance_ref_points
= of_get_property(rtas_root
,
336 "ibm,associativity-reference-points",
337 &distance_ref_points_depth
);
339 if (!distance_ref_points
) {
340 dbg("NUMA: ibm,associativity-reference-points not found.\n");
344 distance_ref_points_depth
/= sizeof(int);
346 #define VEC5_AFFINITY_BYTE 5
347 #define VEC5_AFFINITY 0x80
348 chosen
= of_find_node_by_path("/chosen");
350 vec5
= of_get_property(chosen
, "ibm,architecture-vec-5", NULL
);
351 if (vec5
&& (vec5
[VEC5_AFFINITY_BYTE
] & VEC5_AFFINITY
)) {
352 dbg("Using form 1 affinity\n");
357 if (form1_affinity
) {
358 depth
= distance_ref_points
[0];
360 if (distance_ref_points_depth
< 2) {
361 printk(KERN_WARNING
"NUMA: "
362 "short ibm,associativity-reference-points\n");
366 depth
= distance_ref_points
[1];
370 * Warn and cap if the hardware supports more than
371 * MAX_DISTANCE_REF_POINTS domains.
373 if (distance_ref_points_depth
> MAX_DISTANCE_REF_POINTS
) {
374 printk(KERN_WARNING
"NUMA: distance array capped at "
375 "%d entries\n", MAX_DISTANCE_REF_POINTS
);
376 distance_ref_points_depth
= MAX_DISTANCE_REF_POINTS
;
379 of_node_put(rtas_root
);
383 of_node_put(rtas_root
);
387 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
389 struct device_node
*memory
= NULL
;
391 memory
= of_find_node_by_type(memory
, "memory");
393 panic("numa.c: No memory nodes found!");
395 *n_addr_cells
= of_n_addr_cells(memory
);
396 *n_size_cells
= of_n_size_cells(memory
);
400 static unsigned long __devinit
read_n_cells(int n
, const unsigned int **buf
)
402 unsigned long result
= 0;
405 result
= (result
<< 32) | **buf
;
411 struct of_drconf_cell
{
419 #define DRCONF_MEM_ASSIGNED 0x00000008
420 #define DRCONF_MEM_AI_INVALID 0x00000040
421 #define DRCONF_MEM_RESERVED 0x00000080
424 * Read the next memblock list entry from the ibm,dynamic-memory property
425 * and return the information in the provided of_drconf_cell structure.
427 static void read_drconf_cell(struct of_drconf_cell
*drmem
, const u32
**cellp
)
431 drmem
->base_addr
= read_n_cells(n_mem_addr_cells
, cellp
);
434 drmem
->drc_index
= cp
[0];
435 drmem
->reserved
= cp
[1];
436 drmem
->aa_index
= cp
[2];
437 drmem
->flags
= cp
[3];
443 * Retreive and validate the ibm,dynamic-memory property of the device tree.
445 * The layout of the ibm,dynamic-memory property is a number N of memblock
446 * list entries followed by N memblock list entries. Each memblock list entry
447 * contains information as layed out in the of_drconf_cell struct above.
449 static int of_get_drconf_memory(struct device_node
*memory
, const u32
**dm
)
454 prop
= of_get_property(memory
, "ibm,dynamic-memory", &len
);
455 if (!prop
|| len
< sizeof(unsigned int))
460 /* Now that we know the number of entries, revalidate the size
461 * of the property read in to ensure we have everything
463 if (len
< (entries
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int))
471 * Retreive and validate the ibm,lmb-size property for drconf memory
472 * from the device tree.
474 static u64
of_get_lmb_size(struct device_node
*memory
)
479 prop
= of_get_property(memory
, "ibm,lmb-size", &len
);
480 if (!prop
|| len
< sizeof(unsigned int))
483 return read_n_cells(n_mem_size_cells
, &prop
);
486 struct assoc_arrays
{
493 * Retreive and validate the list of associativity arrays for drconf
494 * memory from the ibm,associativity-lookup-arrays property of the
497 * The layout of the ibm,associativity-lookup-arrays property is a number N
498 * indicating the number of associativity arrays, followed by a number M
499 * indicating the size of each associativity array, followed by a list
500 * of N associativity arrays.
502 static int of_get_assoc_arrays(struct device_node
*memory
,
503 struct assoc_arrays
*aa
)
508 prop
= of_get_property(memory
, "ibm,associativity-lookup-arrays", &len
);
509 if (!prop
|| len
< 2 * sizeof(unsigned int))
512 aa
->n_arrays
= *prop
++;
513 aa
->array_sz
= *prop
++;
515 /* Now that we know the number of arrrays and size of each array,
516 * revalidate the size of the property read in.
518 if (len
< (aa
->n_arrays
* aa
->array_sz
+ 2) * sizeof(unsigned int))
526 * This is like of_node_to_nid_single() for memory represented in the
527 * ibm,dynamic-reconfiguration-memory node.
529 static int of_drconf_to_nid_single(struct of_drconf_cell
*drmem
,
530 struct assoc_arrays
*aa
)
533 int nid
= default_nid
;
536 if (min_common_depth
> 0 && min_common_depth
<= aa
->array_sz
&&
537 !(drmem
->flags
& DRCONF_MEM_AI_INVALID
) &&
538 drmem
->aa_index
< aa
->n_arrays
) {
539 index
= drmem
->aa_index
* aa
->array_sz
+ min_common_depth
- 1;
540 nid
= aa
->arrays
[index
];
542 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
550 * Figure out to which domain a cpu belongs and stick it there.
551 * Return the id of the domain used.
553 static int __cpuinit
numa_setup_cpu(unsigned long lcpu
)
556 struct device_node
*cpu
= of_get_cpu_node(lcpu
, NULL
);
563 nid
= of_node_to_nid_single(cpu
);
565 if (nid
< 0 || !node_online(nid
))
566 nid
= first_online_node
;
568 map_cpu_to_node(lcpu
, nid
);
575 static int __cpuinit
cpu_numa_callback(struct notifier_block
*nfb
,
576 unsigned long action
,
579 unsigned long lcpu
= (unsigned long)hcpu
;
580 int ret
= NOTIFY_DONE
;
584 case CPU_UP_PREPARE_FROZEN
:
585 numa_setup_cpu(lcpu
);
588 #ifdef CONFIG_HOTPLUG_CPU
590 case CPU_DEAD_FROZEN
:
591 case CPU_UP_CANCELED
:
592 case CPU_UP_CANCELED_FROZEN
:
593 unmap_cpu_from_node(lcpu
);
602 * Check and possibly modify a memory region to enforce the memory limit.
604 * Returns the size the region should have to enforce the memory limit.
605 * This will either be the original value of size, a truncated value,
606 * or zero. If the returned value of size is 0 the region should be
607 * discarded as it lies wholy above the memory limit.
609 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
613 * We use memblock_end_of_DRAM() in here instead of memory_limit because
614 * we've already adjusted it for the limit and it takes care of
615 * having memory holes below the limit. Also, in the case of
616 * iommu_is_off, memory_limit is not set but is implicitly enforced.
619 if (start
+ size
<= memblock_end_of_DRAM())
622 if (start
>= memblock_end_of_DRAM())
625 return memblock_end_of_DRAM() - start
;
629 * Reads the counter for a given entry in
630 * linux,drconf-usable-memory property
632 static inline int __init
read_usm_ranges(const u32
**usm
)
635 * For each lmb in ibm,dynamic-memory a corresponding
636 * entry in linux,drconf-usable-memory property contains
637 * a counter followed by that many (base, size) duple.
638 * read the counter from linux,drconf-usable-memory
640 return read_n_cells(n_mem_size_cells
, usm
);
644 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
645 * node. This assumes n_mem_{addr,size}_cells have been set.
647 static void __init
parse_drconf_memory(struct device_node
*memory
)
650 unsigned int n
, rc
, ranges
, is_kexec_kdump
= 0;
651 unsigned long lmb_size
, base
, size
, sz
;
653 struct assoc_arrays aa
;
655 n
= of_get_drconf_memory(memory
, &dm
);
659 lmb_size
= of_get_lmb_size(memory
);
663 rc
= of_get_assoc_arrays(memory
, &aa
);
667 /* check if this is a kexec/kdump kernel */
668 usm
= of_get_usable_memory(memory
);
672 for (; n
!= 0; --n
) {
673 struct of_drconf_cell drmem
;
675 read_drconf_cell(&drmem
, &dm
);
677 /* skip this block if the reserved bit is set in flags (0x80)
678 or if the block is not assigned to this partition (0x8) */
679 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
680 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
683 base
= drmem
.base_addr
;
687 if (is_kexec_kdump
) {
688 ranges
= read_usm_ranges(&usm
);
689 if (!ranges
) /* there are no (base, size) duple */
693 if (is_kexec_kdump
) {
694 base
= read_n_cells(n_mem_addr_cells
, &usm
);
695 size
= read_n_cells(n_mem_size_cells
, &usm
);
697 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
698 fake_numa_create_new_node(
699 ((base
+ size
) >> PAGE_SHIFT
),
701 node_set_online(nid
);
702 sz
= numa_enforce_memory_limit(base
, size
);
704 add_active_range(nid
, base
>> PAGE_SHIFT
,
706 + (sz
>> PAGE_SHIFT
));
711 static int __init
parse_numa_properties(void)
713 struct device_node
*cpu
= NULL
;
714 struct device_node
*memory
= NULL
;
718 if (numa_enabled
== 0) {
719 printk(KERN_WARNING
"NUMA disabled by user\n");
723 min_common_depth
= find_min_common_depth();
725 if (min_common_depth
< 0)
726 return min_common_depth
;
728 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
731 * Even though we connect cpus to numa domains later in SMP
732 * init, we need to know the node ids now. This is because
733 * each node to be onlined must have NODE_DATA etc backing it.
735 for_each_present_cpu(i
) {
738 cpu
= of_get_cpu_node(i
, NULL
);
740 nid
= of_node_to_nid_single(cpu
);
744 * Don't fall back to default_nid yet -- we will plug
745 * cpus into nodes once the memory scan has discovered
750 node_set_online(nid
);
753 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
755 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
760 const unsigned int *memcell_buf
;
763 memcell_buf
= of_get_property(memory
,
764 "linux,usable-memory", &len
);
765 if (!memcell_buf
|| len
<= 0)
766 memcell_buf
= of_get_property(memory
, "reg", &len
);
767 if (!memcell_buf
|| len
<= 0)
771 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
773 /* these are order-sensitive, and modify the buffer pointer */
774 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
775 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
778 * Assumption: either all memory nodes or none will
779 * have associativity properties. If none, then
780 * everything goes to default_nid.
782 nid
= of_node_to_nid_single(memory
);
786 fake_numa_create_new_node(((start
+ size
) >> PAGE_SHIFT
), &nid
);
787 node_set_online(nid
);
789 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
796 add_active_range(nid
, start
>> PAGE_SHIFT
,
797 (start
>> PAGE_SHIFT
) + (size
>> PAGE_SHIFT
));
804 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
805 * property in the ibm,dynamic-reconfiguration-memory node.
807 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
809 parse_drconf_memory(memory
);
814 static void __init
setup_nonnuma(void)
816 unsigned long top_of_ram
= memblock_end_of_DRAM();
817 unsigned long total_ram
= memblock_phys_mem_size();
818 unsigned long start_pfn
, end_pfn
;
819 unsigned int nid
= 0;
820 struct memblock_region
*reg
;
822 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
823 top_of_ram
, total_ram
);
824 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
825 (top_of_ram
- total_ram
) >> 20);
827 for_each_memblock(memory
, reg
) {
828 start_pfn
= memblock_region_memory_base_pfn(reg
);
829 end_pfn
= memblock_region_memory_end_pfn(reg
);
831 fake_numa_create_new_node(end_pfn
, &nid
);
832 add_active_range(nid
, start_pfn
, end_pfn
);
833 node_set_online(nid
);
837 void __init
dump_numa_cpu_topology(void)
840 unsigned int cpu
, count
;
842 if (min_common_depth
== -1 || !numa_enabled
)
845 for_each_online_node(node
) {
846 printk(KERN_DEBUG
"Node %d CPUs:", node
);
850 * If we used a CPU iterator here we would miss printing
851 * the holes in the cpumap.
853 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
854 if (cpumask_test_cpu(cpu
,
855 node_to_cpumask_map
[node
])) {
861 printk("-%u", cpu
- 1);
867 printk("-%u", nr_cpu_ids
- 1);
872 static void __init
dump_numa_memory_topology(void)
877 if (min_common_depth
== -1 || !numa_enabled
)
880 for_each_online_node(node
) {
883 printk(KERN_DEBUG
"Node %d Memory:", node
);
887 for (i
= 0; i
< memblock_end_of_DRAM();
888 i
+= (1 << SECTION_SIZE_BITS
)) {
889 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
907 * Allocate some memory, satisfying the memblock or bootmem allocator where
908 * required. nid is the preferred node and end is the physical address of
909 * the highest address in the node.
911 * Returns the virtual address of the memory.
913 static void __init
*careful_zallocation(int nid
, unsigned long size
,
915 unsigned long end_pfn
)
919 unsigned long ret_paddr
;
921 ret_paddr
= __memblock_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
923 /* retry over all memory */
925 ret_paddr
= __memblock_alloc_base(size
, align
, memblock_end_of_DRAM());
928 panic("numa.c: cannot allocate %lu bytes for node %d",
931 ret
= __va(ret_paddr
);
934 * We initialize the nodes in numeric order: 0, 1, 2...
935 * and hand over control from the MEMBLOCK allocator to the
936 * bootmem allocator. If this function is called for
937 * node 5, then we know that all nodes <5 are using the
938 * bootmem allocator instead of the MEMBLOCK allocator.
940 * So, check the nid from which this allocation came
941 * and double check to see if we need to use bootmem
942 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
943 * since it would be useless.
945 new_nid
= early_pfn_to_nid(ret_paddr
>> PAGE_SHIFT
);
947 ret
= __alloc_bootmem_node(NODE_DATA(new_nid
),
950 dbg("alloc_bootmem %p %lx\n", ret
, size
);
953 memset(ret
, 0, size
);
957 static struct notifier_block __cpuinitdata ppc64_numa_nb
= {
958 .notifier_call
= cpu_numa_callback
,
959 .priority
= 1 /* Must run before sched domains notifier. */
962 static void mark_reserved_regions_for_nid(int nid
)
964 struct pglist_data
*node
= NODE_DATA(nid
);
965 struct memblock_region
*reg
;
967 for_each_memblock(reserved
, reg
) {
968 unsigned long physbase
= reg
->base
;
969 unsigned long size
= reg
->size
;
970 unsigned long start_pfn
= physbase
>> PAGE_SHIFT
;
971 unsigned long end_pfn
= PFN_UP(physbase
+ size
);
972 struct node_active_region node_ar
;
973 unsigned long node_end_pfn
= node
->node_start_pfn
+
974 node
->node_spanned_pages
;
977 * Check to make sure that this memblock.reserved area is
978 * within the bounds of the node that we care about.
979 * Checking the nid of the start and end points is not
980 * sufficient because the reserved area could span the
983 if (end_pfn
<= node
->node_start_pfn
||
984 start_pfn
>= node_end_pfn
)
987 get_node_active_region(start_pfn
, &node_ar
);
988 while (start_pfn
< end_pfn
&&
989 node_ar
.start_pfn
< node_ar
.end_pfn
) {
990 unsigned long reserve_size
= size
;
992 * if reserved region extends past active region
993 * then trim size to active region
995 if (end_pfn
> node_ar
.end_pfn
)
996 reserve_size
= (node_ar
.end_pfn
<< PAGE_SHIFT
)
999 * Only worry about *this* node, others may not
1000 * yet have valid NODE_DATA().
1002 if (node_ar
.nid
== nid
) {
1003 dbg("reserve_bootmem %lx %lx nid=%d\n",
1004 physbase
, reserve_size
, node_ar
.nid
);
1005 reserve_bootmem_node(NODE_DATA(node_ar
.nid
),
1006 physbase
, reserve_size
,
1010 * if reserved region is contained in the active region
1013 if (end_pfn
<= node_ar
.end_pfn
)
1017 * reserved region extends past the active region
1018 * get next active region that contains this
1021 start_pfn
= node_ar
.end_pfn
;
1022 physbase
= start_pfn
<< PAGE_SHIFT
;
1023 size
= size
- reserve_size
;
1024 get_node_active_region(start_pfn
, &node_ar
);
1030 void __init
do_init_bootmem(void)
1035 max_low_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
1036 max_pfn
= max_low_pfn
;
1038 if (parse_numa_properties())
1041 dump_numa_memory_topology();
1043 for_each_online_node(nid
) {
1044 unsigned long start_pfn
, end_pfn
;
1045 void *bootmem_vaddr
;
1046 unsigned long bootmap_pages
;
1048 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
1051 * Allocate the node structure node local if possible
1053 * Be careful moving this around, as it relies on all
1054 * previous nodes' bootmem to be initialized and have
1055 * all reserved areas marked.
1057 NODE_DATA(nid
) = careful_zallocation(nid
,
1058 sizeof(struct pglist_data
),
1059 SMP_CACHE_BYTES
, end_pfn
);
1061 dbg("node %d\n", nid
);
1062 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
1064 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
1065 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
1066 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
1068 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
1071 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
1072 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
1074 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
1075 bootmem_vaddr
= careful_zallocation(nid
,
1076 bootmap_pages
<< PAGE_SHIFT
,
1077 PAGE_SIZE
, end_pfn
);
1079 dbg("bootmap_vaddr = %p\n", bootmem_vaddr
);
1081 init_bootmem_node(NODE_DATA(nid
),
1082 __pa(bootmem_vaddr
) >> PAGE_SHIFT
,
1083 start_pfn
, end_pfn
);
1085 free_bootmem_with_active_regions(nid
, end_pfn
);
1087 * Be very careful about moving this around. Future
1088 * calls to careful_zallocation() depend on this getting
1091 mark_reserved_regions_for_nid(nid
);
1092 sparse_memory_present_with_active_regions(nid
);
1095 init_bootmem_done
= 1;
1098 * Now bootmem is initialised we can create the node to cpumask
1099 * lookup tables and setup the cpu callback to populate them.
1101 setup_node_to_cpumask_map();
1103 register_cpu_notifier(&ppc64_numa_nb
);
1104 cpu_numa_callback(&ppc64_numa_nb
, CPU_UP_PREPARE
,
1105 (void *)(unsigned long)boot_cpuid
);
1108 void __init
paging_init(void)
1110 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1111 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1112 max_zone_pfns
[ZONE_DMA
] = memblock_end_of_DRAM() >> PAGE_SHIFT
;
1113 free_area_init_nodes(max_zone_pfns
);
1116 static int __init
early_numa(char *p
)
1121 if (strstr(p
, "off"))
1124 if (strstr(p
, "debug"))
1127 p
= strstr(p
, "fake=");
1129 cmdline
= p
+ strlen("fake=");
1133 early_param("numa", early_numa
);
1135 #ifdef CONFIG_MEMORY_HOTPLUG
1137 * Find the node associated with a hot added memory section for
1138 * memory represented in the device tree by the property
1139 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1141 static int hot_add_drconf_scn_to_nid(struct device_node
*memory
,
1142 unsigned long scn_addr
)
1145 unsigned int drconf_cell_cnt
, rc
;
1146 unsigned long lmb_size
;
1147 struct assoc_arrays aa
;
1150 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1151 if (!drconf_cell_cnt
)
1154 lmb_size
= of_get_lmb_size(memory
);
1158 rc
= of_get_assoc_arrays(memory
, &aa
);
1162 for (; drconf_cell_cnt
!= 0; --drconf_cell_cnt
) {
1163 struct of_drconf_cell drmem
;
1165 read_drconf_cell(&drmem
, &dm
);
1167 /* skip this block if it is reserved or not assigned to
1169 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
1170 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
1173 if ((scn_addr
< drmem
.base_addr
)
1174 || (scn_addr
>= (drmem
.base_addr
+ lmb_size
)))
1177 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
1185 * Find the node associated with a hot added memory section for memory
1186 * represented in the device tree as a node (i.e. memory@XXXX) for
1189 int hot_add_node_scn_to_nid(unsigned long scn_addr
)
1191 struct device_node
*memory
= NULL
;
1194 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
1195 unsigned long start
, size
;
1197 const unsigned int *memcell_buf
;
1200 memcell_buf
= of_get_property(memory
, "reg", &len
);
1201 if (!memcell_buf
|| len
<= 0)
1204 /* ranges in cell */
1205 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
1208 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
1209 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
1211 if ((scn_addr
< start
) || (scn_addr
>= (start
+ size
)))
1214 nid
= of_node_to_nid_single(memory
);
1218 of_node_put(memory
);
1227 * Find the node associated with a hot added memory section. Section
1228 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1229 * sections are fully contained within a single MEMBLOCK.
1231 int hot_add_scn_to_nid(unsigned long scn_addr
)
1233 struct device_node
*memory
= NULL
;
1236 if (!numa_enabled
|| (min_common_depth
< 0))
1237 return first_online_node
;
1239 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1241 nid
= hot_add_drconf_scn_to_nid(memory
, scn_addr
);
1242 of_node_put(memory
);
1244 nid
= hot_add_node_scn_to_nid(scn_addr
);
1247 if (nid
< 0 || !node_online(nid
))
1248 nid
= first_online_node
;
1250 if (NODE_DATA(nid
)->node_spanned_pages
)
1253 for_each_online_node(nid
) {
1254 if (NODE_DATA(nid
)->node_spanned_pages
) {
1264 static u64
hot_add_drconf_memory_max(void)
1266 struct device_node
*memory
= NULL
;
1267 unsigned int drconf_cell_cnt
= 0;
1271 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1273 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1274 lmb_size
= of_get_lmb_size(memory
);
1275 of_node_put(memory
);
1277 return lmb_size
* drconf_cell_cnt
;
1281 * memory_hotplug_max - return max address of memory that may be added
1283 * This is currently only used on systems that support drconfig memory
1286 u64
memory_hotplug_max(void)
1288 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1290 #endif /* CONFIG_MEMORY_HOTPLUG */
1292 /* Virtual Processor Home Node (VPHN) support */
1293 #ifdef CONFIG_PPC_SPLPAR
1294 static u8 vphn_cpu_change_counts
[NR_CPUS
][MAX_DISTANCE_REF_POINTS
];
1295 static cpumask_t cpu_associativity_changes_mask
;
1296 static int vphn_enabled
;
1297 static void set_topology_timer(void);
1300 * Store the current values of the associativity change counters in the
1303 static void setup_cpu_associativity_change_counters(void)
1307 /* The VPHN feature supports a maximum of 8 reference points */
1308 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS
> 8);
1310 for_each_possible_cpu(cpu
) {
1312 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1313 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1315 for (i
= 0; i
< distance_ref_points_depth
; i
++)
1316 counts
[i
] = hypervisor_counts
[i
];
1321 * The hypervisor maintains a set of 8 associativity change counters in
1322 * the VPA of each cpu that correspond to the associativity levels in the
1323 * ibm,associativity-reference-points property. When an associativity
1324 * level changes, the corresponding counter is incremented.
1326 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1327 * node associativity levels have changed.
1329 * Returns the number of cpus with unhandled associativity changes.
1331 static int update_cpu_associativity_changes_mask(void)
1333 int cpu
, nr_cpus
= 0;
1334 cpumask_t
*changes
= &cpu_associativity_changes_mask
;
1336 cpumask_clear(changes
);
1338 for_each_possible_cpu(cpu
) {
1340 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1341 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1343 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
1344 if (hypervisor_counts
[i
] != counts
[i
]) {
1345 counts
[i
] = hypervisor_counts
[i
];
1350 cpumask_set_cpu(cpu
, changes
);
1359 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1360 * the complete property we have to add the length in the first cell.
1362 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1365 * Convert the associativity domain numbers returned from the hypervisor
1366 * to the sequence they would appear in the ibm,associativity property.
1368 static int vphn_unpack_associativity(const long *packed
, unsigned int *unpacked
)
1370 int i
, nr_assoc_doms
= 0;
1371 const u16
*field
= (const u16
*) packed
;
1373 #define VPHN_FIELD_UNUSED (0xffff)
1374 #define VPHN_FIELD_MSB (0x8000)
1375 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1377 for (i
= 1; i
< VPHN_ASSOC_BUFSIZE
; i
++) {
1378 if (*field
== VPHN_FIELD_UNUSED
) {
1379 /* All significant fields processed, and remaining
1380 * fields contain the reserved value of all 1's.
1383 unpacked
[i
] = *((u32
*)field
);
1385 } else if (*field
& VPHN_FIELD_MSB
) {
1386 /* Data is in the lower 15 bits of this field */
1387 unpacked
[i
] = *field
& VPHN_FIELD_MASK
;
1391 /* Data is in the lower 15 bits of this field
1392 * concatenated with the next 16 bit field
1394 unpacked
[i
] = *((u32
*)field
);
1400 /* The first cell contains the length of the property */
1401 unpacked
[0] = nr_assoc_doms
;
1403 return nr_assoc_doms
;
1407 * Retrieve the new associativity information for a virtual processor's
1410 static long hcall_vphn(unsigned long cpu
, unsigned int *associativity
)
1413 long retbuf
[PLPAR_HCALL9_BUFSIZE
] = {0};
1415 int hwcpu
= get_hard_smp_processor_id(cpu
);
1417 rc
= plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY
, retbuf
, flags
, hwcpu
);
1418 vphn_unpack_associativity(retbuf
, associativity
);
1423 static long vphn_get_associativity(unsigned long cpu
,
1424 unsigned int *associativity
)
1428 rc
= hcall_vphn(cpu
, associativity
);
1433 "VPHN is not supported. Disabling polling...\n");
1434 stop_topology_update();
1438 "hcall_vphn() experienced a hardware fault "
1439 "preventing VPHN. Disabling polling...\n");
1440 stop_topology_update();
1447 * Update the node maps and sysfs entries for each cpu whose home node
1450 int arch_update_cpu_topology(void)
1452 int cpu
, nid
, old_nid
;
1453 unsigned int associativity
[VPHN_ASSOC_BUFSIZE
] = {0};
1454 struct sys_device
*sysdev
;
1456 for_each_cpu_mask(cpu
, cpu_associativity_changes_mask
) {
1457 vphn_get_associativity(cpu
, associativity
);
1458 nid
= associativity_to_nid(associativity
);
1460 if (nid
< 0 || !node_online(nid
))
1461 nid
= first_online_node
;
1463 old_nid
= numa_cpu_lookup_table
[cpu
];
1465 /* Disable hotplug while we update the cpu
1469 unregister_cpu_under_node(cpu
, old_nid
);
1470 unmap_cpu_from_node(cpu
);
1471 map_cpu_to_node(cpu
, nid
);
1472 register_cpu_under_node(cpu
, nid
);
1475 sysdev
= get_cpu_sysdev(cpu
);
1477 kobject_uevent(&sysdev
->kobj
, KOBJ_CHANGE
);
1483 static void topology_work_fn(struct work_struct
*work
)
1485 rebuild_sched_domains();
1487 static DECLARE_WORK(topology_work
, topology_work_fn
);
1489 void topology_schedule_update(void)
1491 schedule_work(&topology_work
);
1494 static void topology_timer_fn(unsigned long ignored
)
1498 if (update_cpu_associativity_changes_mask() > 0)
1499 topology_schedule_update();
1500 set_topology_timer();
1502 static struct timer_list topology_timer
=
1503 TIMER_INITIALIZER(topology_timer_fn
, 0, 0);
1505 static void set_topology_timer(void)
1507 topology_timer
.data
= 0;
1508 topology_timer
.expires
= jiffies
+ 60 * HZ
;
1509 add_timer(&topology_timer
);
1513 * Start polling for VPHN associativity changes.
1515 int start_topology_update(void)
1519 if (firmware_has_feature(FW_FEATURE_VPHN
) &&
1520 get_lppaca()->shared_proc
) {
1522 setup_cpu_associativity_change_counters();
1523 init_timer_deferrable(&topology_timer
);
1524 set_topology_timer();
1530 __initcall(start_topology_update
);
1533 * Disable polling for VPHN associativity changes.
1535 int stop_topology_update(void)
1538 return del_timer_sync(&topology_timer
);
1540 #endif /* CONFIG_PPC_SPLPAR */