]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * pSeries NUMA support | |
3 | * | |
4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/threads.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/mmzone.h> | |
4b16f8e2 | 16 | #include <linux/export.h> |
1da177e4 LT |
17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> | |
19 | #include <linux/notifier.h> | |
95f72d1e | 20 | #include <linux/memblock.h> |
6df1646e | 21 | #include <linux/of.h> |
06eccea6 | 22 | #include <linux/pfn.h> |
9eff1a38 JL |
23 | #include <linux/cpuset.h> |
24 | #include <linux/node.h> | |
45fb6cea | 25 | #include <asm/sparsemem.h> |
d9b2b2a2 | 26 | #include <asm/prom.h> |
cf00a8d1 | 27 | #include <asm/system.h> |
2249ca9d | 28 | #include <asm/smp.h> |
9eff1a38 JL |
29 | #include <asm/firmware.h> |
30 | #include <asm/paca.h> | |
39bf990e | 31 | #include <asm/hvcall.h> |
1da177e4 LT |
32 | |
33 | static int numa_enabled = 1; | |
34 | ||
1daa6d08 BS |
35 | static char *cmdline __initdata; |
36 | ||
1da177e4 LT |
37 | static int numa_debug; |
38 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | |
39 | ||
45fb6cea | 40 | int numa_cpu_lookup_table[NR_CPUS]; |
25863de0 | 41 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
1da177e4 | 42 | struct pglist_data *node_data[MAX_NUMNODES]; |
45fb6cea AB |
43 | |
44 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
25863de0 | 45 | EXPORT_SYMBOL(node_to_cpumask_map); |
45fb6cea AB |
46 | EXPORT_SYMBOL(node_data); |
47 | ||
1da177e4 | 48 | static int min_common_depth; |
237a0989 | 49 | static int n_mem_addr_cells, n_mem_size_cells; |
41eab6f8 AB |
50 | static int form1_affinity; |
51 | ||
52 | #define MAX_DISTANCE_REF_POINTS 4 | |
53 | static int distance_ref_points_depth; | |
54 | static const unsigned int *distance_ref_points; | |
55 | static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; | |
1da177e4 | 56 | |
25863de0 AB |
57 | /* |
58 | * Allocate node_to_cpumask_map based on number of available nodes | |
59 | * Requires node_possible_map to be valid. | |
60 | * | |
61 | * Note: node_to_cpumask() is not valid until after this is done. | |
62 | */ | |
63 | static void __init setup_node_to_cpumask_map(void) | |
64 | { | |
65 | unsigned int node, num = 0; | |
66 | ||
67 | /* setup nr_node_ids if not done yet */ | |
68 | if (nr_node_ids == MAX_NUMNODES) { | |
69 | for_each_node_mask(node, node_possible_map) | |
70 | num = node; | |
71 | nr_node_ids = num + 1; | |
72 | } | |
73 | ||
74 | /* allocate the map */ | |
75 | for (node = 0; node < nr_node_ids; node++) | |
76 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
77 | ||
78 | /* cpumask_of_node() will now work */ | |
79 | dbg("Node to cpumask map for %d nodes\n", nr_node_ids); | |
80 | } | |
81 | ||
1daa6d08 BS |
82 | static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, |
83 | unsigned int *nid) | |
84 | { | |
85 | unsigned long long mem; | |
86 | char *p = cmdline; | |
87 | static unsigned int fake_nid; | |
88 | static unsigned long long curr_boundary; | |
89 | ||
90 | /* | |
91 | * Modify node id, iff we started creating NUMA nodes | |
92 | * We want to continue from where we left of the last time | |
93 | */ | |
94 | if (fake_nid) | |
95 | *nid = fake_nid; | |
96 | /* | |
97 | * In case there are no more arguments to parse, the | |
98 | * node_id should be the same as the last fake node id | |
99 | * (we've handled this above). | |
100 | */ | |
101 | if (!p) | |
102 | return 0; | |
103 | ||
104 | mem = memparse(p, &p); | |
105 | if (!mem) | |
106 | return 0; | |
107 | ||
108 | if (mem < curr_boundary) | |
109 | return 0; | |
110 | ||
111 | curr_boundary = mem; | |
112 | ||
113 | if ((end_pfn << PAGE_SHIFT) > mem) { | |
114 | /* | |
115 | * Skip commas and spaces | |
116 | */ | |
117 | while (*p == ',' || *p == ' ' || *p == '\t') | |
118 | p++; | |
119 | ||
120 | cmdline = p; | |
121 | fake_nid++; | |
122 | *nid = fake_nid; | |
123 | dbg("created new fake_node with id %d\n", fake_nid); | |
124 | return 1; | |
125 | } | |
126 | return 0; | |
127 | } | |
128 | ||
8f64e1f2 JT |
129 | /* |
130 | * get_active_region_work_fn - A helper function for get_node_active_region | |
131 | * Returns datax set to the start_pfn and end_pfn if they contain | |
132 | * the initial value of datax->start_pfn between them | |
133 | * @start_pfn: start page(inclusive) of region to check | |
134 | * @end_pfn: end page(exclusive) of region to check | |
135 | * @datax: comes in with ->start_pfn set to value to search for and | |
136 | * goes out with active range if it contains it | |
137 | * Returns 1 if search value is in range else 0 | |
138 | */ | |
139 | static int __init get_active_region_work_fn(unsigned long start_pfn, | |
140 | unsigned long end_pfn, void *datax) | |
141 | { | |
142 | struct node_active_region *data; | |
143 | data = (struct node_active_region *)datax; | |
144 | ||
145 | if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { | |
146 | data->start_pfn = start_pfn; | |
147 | data->end_pfn = end_pfn; | |
148 | return 1; | |
149 | } | |
150 | return 0; | |
151 | ||
152 | } | |
153 | ||
154 | /* | |
155 | * get_node_active_region - Return active region containing start_pfn | |
e8170372 | 156 | * Active range returned is empty if none found. |
8f64e1f2 JT |
157 | * @start_pfn: The page to return the region for. |
158 | * @node_ar: Returned set to the active region containing start_pfn | |
159 | */ | |
160 | static void __init get_node_active_region(unsigned long start_pfn, | |
161 | struct node_active_region *node_ar) | |
162 | { | |
163 | int nid = early_pfn_to_nid(start_pfn); | |
164 | ||
165 | node_ar->nid = nid; | |
166 | node_ar->start_pfn = start_pfn; | |
e8170372 | 167 | node_ar->end_pfn = start_pfn; |
8f64e1f2 JT |
168 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); |
169 | } | |
170 | ||
39bf990e | 171 | static void map_cpu_to_node(int cpu, int node) |
1da177e4 LT |
172 | { |
173 | numa_cpu_lookup_table[cpu] = node; | |
45fb6cea | 174 | |
bf4b85b0 NL |
175 | dbg("adding cpu %d to node %d\n", cpu, node); |
176 | ||
25863de0 AB |
177 | if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) |
178 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | |
1da177e4 LT |
179 | } |
180 | ||
39bf990e | 181 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) |
1da177e4 LT |
182 | static void unmap_cpu_from_node(unsigned long cpu) |
183 | { | |
184 | int node = numa_cpu_lookup_table[cpu]; | |
185 | ||
186 | dbg("removing cpu %lu from node %d\n", cpu, node); | |
187 | ||
25863de0 | 188 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { |
429f4d8d | 189 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
1da177e4 LT |
190 | } else { |
191 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | |
192 | cpu, node); | |
193 | } | |
194 | } | |
39bf990e | 195 | #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ |
1da177e4 | 196 | |
1da177e4 | 197 | /* must hold reference to node during call */ |
a7f67bdf | 198 | static const int *of_get_associativity(struct device_node *dev) |
1da177e4 | 199 | { |
e2eb6392 | 200 | return of_get_property(dev, "ibm,associativity", NULL); |
1da177e4 LT |
201 | } |
202 | ||
cf00085d C |
203 | /* |
204 | * Returns the property linux,drconf-usable-memory if | |
205 | * it exists (the property exists only in kexec/kdump kernels, | |
206 | * added by kexec-tools) | |
207 | */ | |
208 | static const u32 *of_get_usable_memory(struct device_node *memory) | |
209 | { | |
210 | const u32 *prop; | |
211 | u32 len; | |
212 | prop = of_get_property(memory, "linux,drconf-usable-memory", &len); | |
213 | if (!prop || len < sizeof(unsigned int)) | |
214 | return 0; | |
215 | return prop; | |
216 | } | |
217 | ||
41eab6f8 AB |
218 | int __node_distance(int a, int b) |
219 | { | |
220 | int i; | |
221 | int distance = LOCAL_DISTANCE; | |
222 | ||
223 | if (!form1_affinity) | |
224 | return distance; | |
225 | ||
226 | for (i = 0; i < distance_ref_points_depth; i++) { | |
227 | if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) | |
228 | break; | |
229 | ||
230 | /* Double the distance for each NUMA level */ | |
231 | distance *= 2; | |
232 | } | |
233 | ||
234 | return distance; | |
235 | } | |
236 | ||
237 | static void initialize_distance_lookup_table(int nid, | |
238 | const unsigned int *associativity) | |
239 | { | |
240 | int i; | |
241 | ||
242 | if (!form1_affinity) | |
243 | return; | |
244 | ||
245 | for (i = 0; i < distance_ref_points_depth; i++) { | |
246 | distance_lookup_table[nid][i] = | |
247 | associativity[distance_ref_points[i]]; | |
248 | } | |
249 | } | |
250 | ||
482ec7c4 NL |
251 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa |
252 | * info is found. | |
253 | */ | |
9eff1a38 | 254 | static int associativity_to_nid(const unsigned int *associativity) |
1da177e4 | 255 | { |
482ec7c4 | 256 | int nid = -1; |
1da177e4 LT |
257 | |
258 | if (min_common_depth == -1) | |
482ec7c4 | 259 | goto out; |
1da177e4 | 260 | |
9eff1a38 JL |
261 | if (associativity[0] >= min_common_depth) |
262 | nid = associativity[min_common_depth]; | |
bc16a759 NL |
263 | |
264 | /* POWER4 LPAR uses 0xffff as invalid node */ | |
482ec7c4 NL |
265 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
266 | nid = -1; | |
41eab6f8 | 267 | |
9eff1a38 JL |
268 | if (nid > 0 && associativity[0] >= distance_ref_points_depth) |
269 | initialize_distance_lookup_table(nid, associativity); | |
41eab6f8 | 270 | |
482ec7c4 | 271 | out: |
cf950b7a | 272 | return nid; |
1da177e4 LT |
273 | } |
274 | ||
9eff1a38 JL |
275 | /* Returns the nid associated with the given device tree node, |
276 | * or -1 if not found. | |
277 | */ | |
278 | static int of_node_to_nid_single(struct device_node *device) | |
279 | { | |
280 | int nid = -1; | |
281 | const unsigned int *tmp; | |
282 | ||
283 | tmp = of_get_associativity(device); | |
284 | if (tmp) | |
285 | nid = associativity_to_nid(tmp); | |
286 | return nid; | |
287 | } | |
288 | ||
953039c8 JK |
289 | /* Walk the device tree upwards, looking for an associativity id */ |
290 | int of_node_to_nid(struct device_node *device) | |
291 | { | |
292 | struct device_node *tmp; | |
293 | int nid = -1; | |
294 | ||
295 | of_node_get(device); | |
296 | while (device) { | |
297 | nid = of_node_to_nid_single(device); | |
298 | if (nid != -1) | |
299 | break; | |
300 | ||
301 | tmp = device; | |
302 | device = of_get_parent(tmp); | |
303 | of_node_put(tmp); | |
304 | } | |
305 | of_node_put(device); | |
306 | ||
307 | return nid; | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(of_node_to_nid); | |
310 | ||
1da177e4 LT |
311 | static int __init find_min_common_depth(void) |
312 | { | |
41eab6f8 | 313 | int depth; |
bc8449cc | 314 | struct device_node *chosen; |
e70606eb | 315 | struct device_node *root; |
bc8449cc | 316 | const char *vec5; |
1da177e4 | 317 | |
1c8ee733 DS |
318 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
319 | root = of_find_node_by_path("/ibm,opal"); | |
320 | else | |
321 | root = of_find_node_by_path("/rtas"); | |
e70606eb ME |
322 | if (!root) |
323 | root = of_find_node_by_path("/"); | |
1da177e4 LT |
324 | |
325 | /* | |
41eab6f8 AB |
326 | * This property is a set of 32-bit integers, each representing |
327 | * an index into the ibm,associativity nodes. | |
328 | * | |
329 | * With form 0 affinity the first integer is for an SMP configuration | |
330 | * (should be all 0's) and the second is for a normal NUMA | |
331 | * configuration. We have only one level of NUMA. | |
332 | * | |
333 | * With form 1 affinity the first integer is the most significant | |
334 | * NUMA boundary and the following are progressively less significant | |
335 | * boundaries. There can be more than one level of NUMA. | |
1da177e4 | 336 | */ |
e70606eb | 337 | distance_ref_points = of_get_property(root, |
41eab6f8 AB |
338 | "ibm,associativity-reference-points", |
339 | &distance_ref_points_depth); | |
340 | ||
341 | if (!distance_ref_points) { | |
342 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); | |
343 | goto err; | |
344 | } | |
345 | ||
346 | distance_ref_points_depth /= sizeof(int); | |
1da177e4 | 347 | |
bc8449cc AB |
348 | #define VEC5_AFFINITY_BYTE 5 |
349 | #define VEC5_AFFINITY 0x80 | |
1c8ee733 DS |
350 | |
351 | if (firmware_has_feature(FW_FEATURE_OPAL)) | |
352 | form1_affinity = 1; | |
353 | else { | |
354 | chosen = of_find_node_by_path("/chosen"); | |
355 | if (chosen) { | |
356 | vec5 = of_get_property(chosen, | |
357 | "ibm,architecture-vec-5", NULL); | |
358 | if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & | |
359 | VEC5_AFFINITY)) { | |
360 | dbg("Using form 1 affinity\n"); | |
361 | form1_affinity = 1; | |
362 | } | |
bc8449cc | 363 | } |
4b83c330 AB |
364 | } |
365 | ||
41eab6f8 AB |
366 | if (form1_affinity) { |
367 | depth = distance_ref_points[0]; | |
1da177e4 | 368 | } else { |
41eab6f8 AB |
369 | if (distance_ref_points_depth < 2) { |
370 | printk(KERN_WARNING "NUMA: " | |
371 | "short ibm,associativity-reference-points\n"); | |
372 | goto err; | |
373 | } | |
374 | ||
375 | depth = distance_ref_points[1]; | |
1da177e4 | 376 | } |
1da177e4 | 377 | |
41eab6f8 AB |
378 | /* |
379 | * Warn and cap if the hardware supports more than | |
380 | * MAX_DISTANCE_REF_POINTS domains. | |
381 | */ | |
382 | if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { | |
383 | printk(KERN_WARNING "NUMA: distance array capped at " | |
384 | "%d entries\n", MAX_DISTANCE_REF_POINTS); | |
385 | distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; | |
386 | } | |
387 | ||
e70606eb | 388 | of_node_put(root); |
1da177e4 | 389 | return depth; |
41eab6f8 AB |
390 | |
391 | err: | |
e70606eb | 392 | of_node_put(root); |
41eab6f8 | 393 | return -1; |
1da177e4 LT |
394 | } |
395 | ||
84c9fdd1 | 396 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
1da177e4 LT |
397 | { |
398 | struct device_node *memory = NULL; | |
1da177e4 LT |
399 | |
400 | memory = of_find_node_by_type(memory, "memory"); | |
54c23310 | 401 | if (!memory) |
84c9fdd1 | 402 | panic("numa.c: No memory nodes found!"); |
54c23310 | 403 | |
a8bda5dd | 404 | *n_addr_cells = of_n_addr_cells(memory); |
9213feea | 405 | *n_size_cells = of_n_size_cells(memory); |
84c9fdd1 | 406 | of_node_put(memory); |
1da177e4 LT |
407 | } |
408 | ||
a7f67bdf | 409 | static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) |
1da177e4 LT |
410 | { |
411 | unsigned long result = 0; | |
412 | ||
413 | while (n--) { | |
414 | result = (result << 32) | **buf; | |
415 | (*buf)++; | |
416 | } | |
417 | return result; | |
418 | } | |
419 | ||
8342681d NF |
420 | struct of_drconf_cell { |
421 | u64 base_addr; | |
422 | u32 drc_index; | |
423 | u32 reserved; | |
424 | u32 aa_index; | |
425 | u32 flags; | |
426 | }; | |
427 | ||
428 | #define DRCONF_MEM_ASSIGNED 0x00000008 | |
429 | #define DRCONF_MEM_AI_INVALID 0x00000040 | |
430 | #define DRCONF_MEM_RESERVED 0x00000080 | |
431 | ||
432 | /* | |
95f72d1e | 433 | * Read the next memblock list entry from the ibm,dynamic-memory property |
8342681d NF |
434 | * and return the information in the provided of_drconf_cell structure. |
435 | */ | |
436 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | |
437 | { | |
438 | const u32 *cp; | |
439 | ||
440 | drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); | |
441 | ||
442 | cp = *cellp; | |
443 | drmem->drc_index = cp[0]; | |
444 | drmem->reserved = cp[1]; | |
445 | drmem->aa_index = cp[2]; | |
446 | drmem->flags = cp[3]; | |
447 | ||
448 | *cellp = cp + 4; | |
449 | } | |
450 | ||
451 | /* | |
25985edc | 452 | * Retrieve and validate the ibm,dynamic-memory property of the device tree. |
8342681d | 453 | * |
95f72d1e YL |
454 | * The layout of the ibm,dynamic-memory property is a number N of memblock |
455 | * list entries followed by N memblock list entries. Each memblock list entry | |
25985edc | 456 | * contains information as laid out in the of_drconf_cell struct above. |
8342681d NF |
457 | */ |
458 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |
459 | { | |
460 | const u32 *prop; | |
461 | u32 len, entries; | |
462 | ||
463 | prop = of_get_property(memory, "ibm,dynamic-memory", &len); | |
464 | if (!prop || len < sizeof(unsigned int)) | |
465 | return 0; | |
466 | ||
467 | entries = *prop++; | |
468 | ||
469 | /* Now that we know the number of entries, revalidate the size | |
470 | * of the property read in to ensure we have everything | |
471 | */ | |
472 | if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) | |
473 | return 0; | |
474 | ||
475 | *dm = prop; | |
476 | return entries; | |
477 | } | |
478 | ||
479 | /* | |
25985edc | 480 | * Retrieve and validate the ibm,lmb-size property for drconf memory |
8342681d NF |
481 | * from the device tree. |
482 | */ | |
3fdfd990 | 483 | static u64 of_get_lmb_size(struct device_node *memory) |
8342681d NF |
484 | { |
485 | const u32 *prop; | |
486 | u32 len; | |
487 | ||
3fdfd990 | 488 | prop = of_get_property(memory, "ibm,lmb-size", &len); |
8342681d NF |
489 | if (!prop || len < sizeof(unsigned int)) |
490 | return 0; | |
491 | ||
492 | return read_n_cells(n_mem_size_cells, &prop); | |
493 | } | |
494 | ||
495 | struct assoc_arrays { | |
496 | u32 n_arrays; | |
497 | u32 array_sz; | |
498 | const u32 *arrays; | |
499 | }; | |
500 | ||
501 | /* | |
25985edc | 502 | * Retrieve and validate the list of associativity arrays for drconf |
8342681d NF |
503 | * memory from the ibm,associativity-lookup-arrays property of the |
504 | * device tree.. | |
505 | * | |
506 | * The layout of the ibm,associativity-lookup-arrays property is a number N | |
507 | * indicating the number of associativity arrays, followed by a number M | |
508 | * indicating the size of each associativity array, followed by a list | |
509 | * of N associativity arrays. | |
510 | */ | |
511 | static int of_get_assoc_arrays(struct device_node *memory, | |
512 | struct assoc_arrays *aa) | |
513 | { | |
514 | const u32 *prop; | |
515 | u32 len; | |
516 | ||
517 | prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); | |
518 | if (!prop || len < 2 * sizeof(unsigned int)) | |
519 | return -1; | |
520 | ||
521 | aa->n_arrays = *prop++; | |
522 | aa->array_sz = *prop++; | |
523 | ||
524 | /* Now that we know the number of arrrays and size of each array, | |
525 | * revalidate the size of the property read in. | |
526 | */ | |
527 | if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) | |
528 | return -1; | |
529 | ||
530 | aa->arrays = prop; | |
531 | return 0; | |
532 | } | |
533 | ||
534 | /* | |
535 | * This is like of_node_to_nid_single() for memory represented in the | |
536 | * ibm,dynamic-reconfiguration-memory node. | |
537 | */ | |
538 | static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, | |
539 | struct assoc_arrays *aa) | |
540 | { | |
541 | int default_nid = 0; | |
542 | int nid = default_nid; | |
543 | int index; | |
544 | ||
545 | if (min_common_depth > 0 && min_common_depth <= aa->array_sz && | |
546 | !(drmem->flags & DRCONF_MEM_AI_INVALID) && | |
547 | drmem->aa_index < aa->n_arrays) { | |
548 | index = drmem->aa_index * aa->array_sz + min_common_depth - 1; | |
549 | nid = aa->arrays[index]; | |
550 | ||
551 | if (nid == 0xffff || nid >= MAX_NUMNODES) | |
552 | nid = default_nid; | |
553 | } | |
554 | ||
555 | return nid; | |
556 | } | |
557 | ||
1da177e4 LT |
558 | /* |
559 | * Figure out to which domain a cpu belongs and stick it there. | |
560 | * Return the id of the domain used. | |
561 | */ | |
2e5ce39d | 562 | static int __cpuinit numa_setup_cpu(unsigned long lcpu) |
1da177e4 | 563 | { |
cf950b7a | 564 | int nid = 0; |
8b16cd23 | 565 | struct device_node *cpu = of_get_cpu_node(lcpu, NULL); |
1da177e4 LT |
566 | |
567 | if (!cpu) { | |
568 | WARN_ON(1); | |
569 | goto out; | |
570 | } | |
571 | ||
953039c8 | 572 | nid = of_node_to_nid_single(cpu); |
1da177e4 | 573 | |
482ec7c4 | 574 | if (nid < 0 || !node_online(nid)) |
72c33688 | 575 | nid = first_online_node; |
1da177e4 | 576 | out: |
cf950b7a | 577 | map_cpu_to_node(lcpu, nid); |
1da177e4 LT |
578 | |
579 | of_node_put(cpu); | |
580 | ||
cf950b7a | 581 | return nid; |
1da177e4 LT |
582 | } |
583 | ||
74b85f37 | 584 | static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, |
1da177e4 LT |
585 | unsigned long action, |
586 | void *hcpu) | |
587 | { | |
588 | unsigned long lcpu = (unsigned long)hcpu; | |
589 | int ret = NOTIFY_DONE; | |
590 | ||
591 | switch (action) { | |
592 | case CPU_UP_PREPARE: | |
8bb78442 | 593 | case CPU_UP_PREPARE_FROZEN: |
2b261227 | 594 | numa_setup_cpu(lcpu); |
1da177e4 LT |
595 | ret = NOTIFY_OK; |
596 | break; | |
597 | #ifdef CONFIG_HOTPLUG_CPU | |
598 | case CPU_DEAD: | |
8bb78442 | 599 | case CPU_DEAD_FROZEN: |
1da177e4 | 600 | case CPU_UP_CANCELED: |
8bb78442 | 601 | case CPU_UP_CANCELED_FROZEN: |
1da177e4 LT |
602 | unmap_cpu_from_node(lcpu); |
603 | break; | |
604 | ret = NOTIFY_OK; | |
605 | #endif | |
606 | } | |
607 | return ret; | |
608 | } | |
609 | ||
610 | /* | |
611 | * Check and possibly modify a memory region to enforce the memory limit. | |
612 | * | |
613 | * Returns the size the region should have to enforce the memory limit. | |
614 | * This will either be the original value of size, a truncated value, | |
615 | * or zero. If the returned value of size is 0 the region should be | |
25985edc | 616 | * discarded as it lies wholly above the memory limit. |
1da177e4 | 617 | */ |
45fb6cea AB |
618 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
619 | unsigned long size) | |
1da177e4 LT |
620 | { |
621 | /* | |
95f72d1e | 622 | * We use memblock_end_of_DRAM() in here instead of memory_limit because |
1da177e4 | 623 | * we've already adjusted it for the limit and it takes care of |
fe55249d MM |
624 | * having memory holes below the limit. Also, in the case of |
625 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | |
1da177e4 | 626 | */ |
1da177e4 | 627 | |
95f72d1e | 628 | if (start + size <= memblock_end_of_DRAM()) |
1da177e4 LT |
629 | return size; |
630 | ||
95f72d1e | 631 | if (start >= memblock_end_of_DRAM()) |
1da177e4 LT |
632 | return 0; |
633 | ||
95f72d1e | 634 | return memblock_end_of_DRAM() - start; |
1da177e4 LT |
635 | } |
636 | ||
cf00085d C |
637 | /* |
638 | * Reads the counter for a given entry in | |
639 | * linux,drconf-usable-memory property | |
640 | */ | |
641 | static inline int __init read_usm_ranges(const u32 **usm) | |
642 | { | |
643 | /* | |
3fdfd990 | 644 | * For each lmb in ibm,dynamic-memory a corresponding |
cf00085d C |
645 | * entry in linux,drconf-usable-memory property contains |
646 | * a counter followed by that many (base, size) duple. | |
647 | * read the counter from linux,drconf-usable-memory | |
648 | */ | |
649 | return read_n_cells(n_mem_size_cells, usm); | |
650 | } | |
651 | ||
0204568a PM |
652 | /* |
653 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory | |
654 | * node. This assumes n_mem_{addr,size}_cells have been set. | |
655 | */ | |
656 | static void __init parse_drconf_memory(struct device_node *memory) | |
657 | { | |
cf00085d C |
658 | const u32 *dm, *usm; |
659 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | |
3fdfd990 | 660 | unsigned long lmb_size, base, size, sz; |
8342681d NF |
661 | int nid; |
662 | struct assoc_arrays aa; | |
663 | ||
664 | n = of_get_drconf_memory(memory, &dm); | |
665 | if (!n) | |
0204568a PM |
666 | return; |
667 | ||
3fdfd990 BH |
668 | lmb_size = of_get_lmb_size(memory); |
669 | if (!lmb_size) | |
8342681d NF |
670 | return; |
671 | ||
672 | rc = of_get_assoc_arrays(memory, &aa); | |
673 | if (rc) | |
0204568a PM |
674 | return; |
675 | ||
cf00085d C |
676 | /* check if this is a kexec/kdump kernel */ |
677 | usm = of_get_usable_memory(memory); | |
678 | if (usm != NULL) | |
679 | is_kexec_kdump = 1; | |
680 | ||
0204568a | 681 | for (; n != 0; --n) { |
8342681d NF |
682 | struct of_drconf_cell drmem; |
683 | ||
684 | read_drconf_cell(&drmem, &dm); | |
685 | ||
686 | /* skip this block if the reserved bit is set in flags (0x80) | |
687 | or if the block is not assigned to this partition (0x8) */ | |
688 | if ((drmem.flags & DRCONF_MEM_RESERVED) | |
689 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | |
0204568a | 690 | continue; |
1daa6d08 | 691 | |
cf00085d | 692 | base = drmem.base_addr; |
3fdfd990 | 693 | size = lmb_size; |
cf00085d | 694 | ranges = 1; |
8342681d | 695 | |
cf00085d C |
696 | if (is_kexec_kdump) { |
697 | ranges = read_usm_ranges(&usm); | |
698 | if (!ranges) /* there are no (base, size) duple */ | |
699 | continue; | |
700 | } | |
701 | do { | |
702 | if (is_kexec_kdump) { | |
703 | base = read_n_cells(n_mem_addr_cells, &usm); | |
704 | size = read_n_cells(n_mem_size_cells, &usm); | |
705 | } | |
706 | nid = of_drconf_to_nid_single(&drmem, &aa); | |
707 | fake_numa_create_new_node( | |
708 | ((base + size) >> PAGE_SHIFT), | |
8342681d | 709 | &nid); |
cf00085d C |
710 | node_set_online(nid); |
711 | sz = numa_enforce_memory_limit(base, size); | |
712 | if (sz) | |
713 | add_active_range(nid, base >> PAGE_SHIFT, | |
714 | (base >> PAGE_SHIFT) | |
715 | + (sz >> PAGE_SHIFT)); | |
716 | } while (--ranges); | |
0204568a PM |
717 | } |
718 | } | |
719 | ||
1da177e4 LT |
720 | static int __init parse_numa_properties(void) |
721 | { | |
94db7c5e | 722 | struct device_node *memory; |
482ec7c4 | 723 | int default_nid = 0; |
1da177e4 LT |
724 | unsigned long i; |
725 | ||
726 | if (numa_enabled == 0) { | |
727 | printk(KERN_WARNING "NUMA disabled by user\n"); | |
728 | return -1; | |
729 | } | |
730 | ||
1da177e4 LT |
731 | min_common_depth = find_min_common_depth(); |
732 | ||
1da177e4 LT |
733 | if (min_common_depth < 0) |
734 | return min_common_depth; | |
735 | ||
bf4b85b0 NL |
736 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); |
737 | ||
1da177e4 | 738 | /* |
482ec7c4 NL |
739 | * Even though we connect cpus to numa domains later in SMP |
740 | * init, we need to know the node ids now. This is because | |
741 | * each node to be onlined must have NODE_DATA etc backing it. | |
1da177e4 | 742 | */ |
482ec7c4 | 743 | for_each_present_cpu(i) { |
dfbe93a2 | 744 | struct device_node *cpu; |
cf950b7a | 745 | int nid; |
1da177e4 | 746 | |
8b16cd23 | 747 | cpu = of_get_cpu_node(i, NULL); |
482ec7c4 | 748 | BUG_ON(!cpu); |
953039c8 | 749 | nid = of_node_to_nid_single(cpu); |
482ec7c4 | 750 | of_node_put(cpu); |
1da177e4 | 751 | |
482ec7c4 NL |
752 | /* |
753 | * Don't fall back to default_nid yet -- we will plug | |
754 | * cpus into nodes once the memory scan has discovered | |
755 | * the topology. | |
756 | */ | |
757 | if (nid < 0) | |
758 | continue; | |
759 | node_set_online(nid); | |
1da177e4 LT |
760 | } |
761 | ||
237a0989 | 762 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
94db7c5e AB |
763 | |
764 | for_each_node_by_type(memory, "memory") { | |
1da177e4 LT |
765 | unsigned long start; |
766 | unsigned long size; | |
cf950b7a | 767 | int nid; |
1da177e4 | 768 | int ranges; |
a7f67bdf | 769 | const unsigned int *memcell_buf; |
1da177e4 LT |
770 | unsigned int len; |
771 | ||
e2eb6392 | 772 | memcell_buf = of_get_property(memory, |
ba759485 ME |
773 | "linux,usable-memory", &len); |
774 | if (!memcell_buf || len <= 0) | |
e2eb6392 | 775 | memcell_buf = of_get_property(memory, "reg", &len); |
1da177e4 LT |
776 | if (!memcell_buf || len <= 0) |
777 | continue; | |
778 | ||
cc5d0189 BH |
779 | /* ranges in cell */ |
780 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1da177e4 LT |
781 | new_range: |
782 | /* these are order-sensitive, and modify the buffer pointer */ | |
237a0989 MK |
783 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
784 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1da177e4 | 785 | |
482ec7c4 NL |
786 | /* |
787 | * Assumption: either all memory nodes or none will | |
788 | * have associativity properties. If none, then | |
789 | * everything goes to default_nid. | |
790 | */ | |
953039c8 | 791 | nid = of_node_to_nid_single(memory); |
482ec7c4 NL |
792 | if (nid < 0) |
793 | nid = default_nid; | |
1daa6d08 BS |
794 | |
795 | fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); | |
482ec7c4 | 796 | node_set_online(nid); |
1da177e4 | 797 | |
45fb6cea | 798 | if (!(size = numa_enforce_memory_limit(start, size))) { |
1da177e4 LT |
799 | if (--ranges) |
800 | goto new_range; | |
801 | else | |
802 | continue; | |
803 | } | |
804 | ||
c67c3cb4 MG |
805 | add_active_range(nid, start >> PAGE_SHIFT, |
806 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); | |
1da177e4 LT |
807 | |
808 | if (--ranges) | |
809 | goto new_range; | |
810 | } | |
811 | ||
0204568a | 812 | /* |
dfbe93a2 AB |
813 | * Now do the same thing for each MEMBLOCK listed in the |
814 | * ibm,dynamic-memory property in the | |
815 | * ibm,dynamic-reconfiguration-memory node. | |
0204568a PM |
816 | */ |
817 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
818 | if (memory) | |
819 | parse_drconf_memory(memory); | |
820 | ||
1da177e4 LT |
821 | return 0; |
822 | } | |
823 | ||
824 | static void __init setup_nonnuma(void) | |
825 | { | |
95f72d1e YL |
826 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
827 | unsigned long total_ram = memblock_phys_mem_size(); | |
c67c3cb4 | 828 | unsigned long start_pfn, end_pfn; |
28be7072 BH |
829 | unsigned int nid = 0; |
830 | struct memblock_region *reg; | |
1da177e4 | 831 | |
e110b281 | 832 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
1da177e4 | 833 | top_of_ram, total_ram); |
e110b281 | 834 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
1da177e4 LT |
835 | (top_of_ram - total_ram) >> 20); |
836 | ||
28be7072 | 837 | for_each_memblock(memory, reg) { |
c7fc2de0 YL |
838 | start_pfn = memblock_region_memory_base_pfn(reg); |
839 | end_pfn = memblock_region_memory_end_pfn(reg); | |
1daa6d08 BS |
840 | |
841 | fake_numa_create_new_node(end_pfn, &nid); | |
842 | add_active_range(nid, start_pfn, end_pfn); | |
843 | node_set_online(nid); | |
c67c3cb4 | 844 | } |
1da177e4 LT |
845 | } |
846 | ||
4b703a23 AB |
847 | void __init dump_numa_cpu_topology(void) |
848 | { | |
849 | unsigned int node; | |
850 | unsigned int cpu, count; | |
851 | ||
852 | if (min_common_depth == -1 || !numa_enabled) | |
853 | return; | |
854 | ||
855 | for_each_online_node(node) { | |
e110b281 | 856 | printk(KERN_DEBUG "Node %d CPUs:", node); |
4b703a23 AB |
857 | |
858 | count = 0; | |
859 | /* | |
860 | * If we used a CPU iterator here we would miss printing | |
861 | * the holes in the cpumap. | |
862 | */ | |
25863de0 AB |
863 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { |
864 | if (cpumask_test_cpu(cpu, | |
865 | node_to_cpumask_map[node])) { | |
4b703a23 AB |
866 | if (count == 0) |
867 | printk(" %u", cpu); | |
868 | ++count; | |
869 | } else { | |
870 | if (count > 1) | |
871 | printk("-%u", cpu - 1); | |
872 | count = 0; | |
873 | } | |
874 | } | |
875 | ||
876 | if (count > 1) | |
25863de0 | 877 | printk("-%u", nr_cpu_ids - 1); |
4b703a23 AB |
878 | printk("\n"); |
879 | } | |
880 | } | |
881 | ||
882 | static void __init dump_numa_memory_topology(void) | |
1da177e4 LT |
883 | { |
884 | unsigned int node; | |
885 | unsigned int count; | |
886 | ||
887 | if (min_common_depth == -1 || !numa_enabled) | |
888 | return; | |
889 | ||
890 | for_each_online_node(node) { | |
891 | unsigned long i; | |
892 | ||
e110b281 | 893 | printk(KERN_DEBUG "Node %d Memory:", node); |
1da177e4 LT |
894 | |
895 | count = 0; | |
896 | ||
95f72d1e | 897 | for (i = 0; i < memblock_end_of_DRAM(); |
45fb6cea AB |
898 | i += (1 << SECTION_SIZE_BITS)) { |
899 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | |
1da177e4 LT |
900 | if (count == 0) |
901 | printk(" 0x%lx", i); | |
902 | ++count; | |
903 | } else { | |
904 | if (count > 0) | |
905 | printk("-0x%lx", i); | |
906 | count = 0; | |
907 | } | |
908 | } | |
909 | ||
910 | if (count > 0) | |
911 | printk("-0x%lx", i); | |
912 | printk("\n"); | |
913 | } | |
1da177e4 LT |
914 | } |
915 | ||
916 | /* | |
95f72d1e | 917 | * Allocate some memory, satisfying the memblock or bootmem allocator where |
1da177e4 LT |
918 | * required. nid is the preferred node and end is the physical address of |
919 | * the highest address in the node. | |
920 | * | |
0be210fd | 921 | * Returns the virtual address of the memory. |
1da177e4 | 922 | */ |
893473df | 923 | static void __init *careful_zallocation(int nid, unsigned long size, |
45fb6cea AB |
924 | unsigned long align, |
925 | unsigned long end_pfn) | |
1da177e4 | 926 | { |
0be210fd | 927 | void *ret; |
45fb6cea | 928 | int new_nid; |
0be210fd DH |
929 | unsigned long ret_paddr; |
930 | ||
95f72d1e | 931 | ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); |
1da177e4 LT |
932 | |
933 | /* retry over all memory */ | |
0be210fd | 934 | if (!ret_paddr) |
95f72d1e | 935 | ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); |
1da177e4 | 936 | |
0be210fd | 937 | if (!ret_paddr) |
5d21ea2b | 938 | panic("numa.c: cannot allocate %lu bytes for node %d", |
1da177e4 LT |
939 | size, nid); |
940 | ||
0be210fd DH |
941 | ret = __va(ret_paddr); |
942 | ||
1da177e4 | 943 | /* |
c555e520 | 944 | * We initialize the nodes in numeric order: 0, 1, 2... |
95f72d1e | 945 | * and hand over control from the MEMBLOCK allocator to the |
c555e520 DH |
946 | * bootmem allocator. If this function is called for |
947 | * node 5, then we know that all nodes <5 are using the | |
95f72d1e | 948 | * bootmem allocator instead of the MEMBLOCK allocator. |
c555e520 DH |
949 | * |
950 | * So, check the nid from which this allocation came | |
951 | * and double check to see if we need to use bootmem | |
95f72d1e | 952 | * instead of the MEMBLOCK. We don't free the MEMBLOCK memory |
c555e520 | 953 | * since it would be useless. |
1da177e4 | 954 | */ |
0be210fd | 955 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); |
45fb6cea | 956 | if (new_nid < nid) { |
0be210fd | 957 | ret = __alloc_bootmem_node(NODE_DATA(new_nid), |
1da177e4 LT |
958 | size, align, 0); |
959 | ||
0be210fd | 960 | dbg("alloc_bootmem %p %lx\n", ret, size); |
1da177e4 LT |
961 | } |
962 | ||
893473df | 963 | memset(ret, 0, size); |
0be210fd | 964 | return ret; |
1da177e4 LT |
965 | } |
966 | ||
74b85f37 CS |
967 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { |
968 | .notifier_call = cpu_numa_callback, | |
969 | .priority = 1 /* Must run before sched domains notifier. */ | |
970 | }; | |
971 | ||
4a618669 DH |
972 | static void mark_reserved_regions_for_nid(int nid) |
973 | { | |
974 | struct pglist_data *node = NODE_DATA(nid); | |
28be7072 | 975 | struct memblock_region *reg; |
4a618669 | 976 | |
28be7072 BH |
977 | for_each_memblock(reserved, reg) { |
978 | unsigned long physbase = reg->base; | |
979 | unsigned long size = reg->size; | |
4a618669 | 980 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
06eccea6 | 981 | unsigned long end_pfn = PFN_UP(physbase + size); |
4a618669 DH |
982 | struct node_active_region node_ar; |
983 | unsigned long node_end_pfn = node->node_start_pfn + | |
984 | node->node_spanned_pages; | |
985 | ||
986 | /* | |
95f72d1e | 987 | * Check to make sure that this memblock.reserved area is |
4a618669 DH |
988 | * within the bounds of the node that we care about. |
989 | * Checking the nid of the start and end points is not | |
990 | * sufficient because the reserved area could span the | |
991 | * entire node. | |
992 | */ | |
993 | if (end_pfn <= node->node_start_pfn || | |
994 | start_pfn >= node_end_pfn) | |
995 | continue; | |
996 | ||
997 | get_node_active_region(start_pfn, &node_ar); | |
998 | while (start_pfn < end_pfn && | |
999 | node_ar.start_pfn < node_ar.end_pfn) { | |
1000 | unsigned long reserve_size = size; | |
1001 | /* | |
1002 | * if reserved region extends past active region | |
1003 | * then trim size to active region | |
1004 | */ | |
1005 | if (end_pfn > node_ar.end_pfn) | |
1006 | reserve_size = (node_ar.end_pfn << PAGE_SHIFT) | |
06eccea6 | 1007 | - physbase; |
a4c74ddd DH |
1008 | /* |
1009 | * Only worry about *this* node, others may not | |
1010 | * yet have valid NODE_DATA(). | |
1011 | */ | |
1012 | if (node_ar.nid == nid) { | |
1013 | dbg("reserve_bootmem %lx %lx nid=%d\n", | |
1014 | physbase, reserve_size, node_ar.nid); | |
1015 | reserve_bootmem_node(NODE_DATA(node_ar.nid), | |
1016 | physbase, reserve_size, | |
1017 | BOOTMEM_DEFAULT); | |
1018 | } | |
4a618669 DH |
1019 | /* |
1020 | * if reserved region is contained in the active region | |
1021 | * then done. | |
1022 | */ | |
1023 | if (end_pfn <= node_ar.end_pfn) | |
1024 | break; | |
1025 | ||
1026 | /* | |
1027 | * reserved region extends past the active region | |
1028 | * get next active region that contains this | |
1029 | * reserved region | |
1030 | */ | |
1031 | start_pfn = node_ar.end_pfn; | |
1032 | physbase = start_pfn << PAGE_SHIFT; | |
1033 | size = size - reserve_size; | |
1034 | get_node_active_region(start_pfn, &node_ar); | |
1035 | } | |
1036 | } | |
1037 | } | |
1038 | ||
1039 | ||
1da177e4 LT |
1040 | void __init do_init_bootmem(void) |
1041 | { | |
1042 | int nid; | |
1da177e4 LT |
1043 | |
1044 | min_low_pfn = 0; | |
95f72d1e | 1045 | max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
1da177e4 LT |
1046 | max_pfn = max_low_pfn; |
1047 | ||
1048 | if (parse_numa_properties()) | |
1049 | setup_nonnuma(); | |
1050 | else | |
4b703a23 | 1051 | dump_numa_memory_topology(); |
1da177e4 | 1052 | |
1da177e4 | 1053 | for_each_online_node(nid) { |
c67c3cb4 | 1054 | unsigned long start_pfn, end_pfn; |
0be210fd | 1055 | void *bootmem_vaddr; |
1da177e4 LT |
1056 | unsigned long bootmap_pages; |
1057 | ||
c67c3cb4 | 1058 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
1da177e4 | 1059 | |
4a618669 DH |
1060 | /* |
1061 | * Allocate the node structure node local if possible | |
1062 | * | |
1063 | * Be careful moving this around, as it relies on all | |
1064 | * previous nodes' bootmem to be initialized and have | |
1065 | * all reserved areas marked. | |
1066 | */ | |
893473df | 1067 | NODE_DATA(nid) = careful_zallocation(nid, |
1da177e4 | 1068 | sizeof(struct pglist_data), |
45fb6cea | 1069 | SMP_CACHE_BYTES, end_pfn); |
1da177e4 LT |
1070 | |
1071 | dbg("node %d\n", nid); | |
1072 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | |
1073 | ||
b61bfa3c | 1074 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
45fb6cea AB |
1075 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
1076 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
1da177e4 LT |
1077 | |
1078 | if (NODE_DATA(nid)->node_spanned_pages == 0) | |
1079 | continue; | |
1080 | ||
45fb6cea AB |
1081 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); |
1082 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | |
1da177e4 | 1083 | |
45fb6cea | 1084 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
893473df | 1085 | bootmem_vaddr = careful_zallocation(nid, |
45fb6cea AB |
1086 | bootmap_pages << PAGE_SHIFT, |
1087 | PAGE_SIZE, end_pfn); | |
1da177e4 | 1088 | |
0be210fd | 1089 | dbg("bootmap_vaddr = %p\n", bootmem_vaddr); |
1da177e4 | 1090 | |
0be210fd DH |
1091 | init_bootmem_node(NODE_DATA(nid), |
1092 | __pa(bootmem_vaddr) >> PAGE_SHIFT, | |
45fb6cea | 1093 | start_pfn, end_pfn); |
1da177e4 | 1094 | |
c67c3cb4 | 1095 | free_bootmem_with_active_regions(nid, end_pfn); |
4a618669 DH |
1096 | /* |
1097 | * Be very careful about moving this around. Future | |
893473df | 1098 | * calls to careful_zallocation() depend on this getting |
4a618669 DH |
1099 | * done correctly. |
1100 | */ | |
1101 | mark_reserved_regions_for_nid(nid); | |
8f64e1f2 | 1102 | sparse_memory_present_with_active_regions(nid); |
4a618669 | 1103 | } |
d3f6204a BH |
1104 | |
1105 | init_bootmem_done = 1; | |
25863de0 AB |
1106 | |
1107 | /* | |
1108 | * Now bootmem is initialised we can create the node to cpumask | |
1109 | * lookup tables and setup the cpu callback to populate them. | |
1110 | */ | |
1111 | setup_node_to_cpumask_map(); | |
1112 | ||
1113 | register_cpu_notifier(&ppc64_numa_nb); | |
1114 | cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, | |
1115 | (void *)(unsigned long)boot_cpuid); | |
1da177e4 LT |
1116 | } |
1117 | ||
1118 | void __init paging_init(void) | |
1119 | { | |
6391af17 MG |
1120 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
1121 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
95f72d1e | 1122 | max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; |
c67c3cb4 | 1123 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
1124 | } |
1125 | ||
1126 | static int __init early_numa(char *p) | |
1127 | { | |
1128 | if (!p) | |
1129 | return 0; | |
1130 | ||
1131 | if (strstr(p, "off")) | |
1132 | numa_enabled = 0; | |
1133 | ||
1134 | if (strstr(p, "debug")) | |
1135 | numa_debug = 1; | |
1136 | ||
1daa6d08 BS |
1137 | p = strstr(p, "fake="); |
1138 | if (p) | |
1139 | cmdline = p + strlen("fake="); | |
1140 | ||
1da177e4 LT |
1141 | return 0; |
1142 | } | |
1143 | early_param("numa", early_numa); | |
237a0989 MK |
1144 | |
1145 | #ifdef CONFIG_MEMORY_HOTPLUG | |
0db9360a | 1146 | /* |
0f16ef7f NF |
1147 | * Find the node associated with a hot added memory section for |
1148 | * memory represented in the device tree by the property | |
1149 | * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. | |
0db9360a NF |
1150 | */ |
1151 | static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |
1152 | unsigned long scn_addr) | |
1153 | { | |
1154 | const u32 *dm; | |
0f16ef7f | 1155 | unsigned int drconf_cell_cnt, rc; |
3fdfd990 | 1156 | unsigned long lmb_size; |
0db9360a | 1157 | struct assoc_arrays aa; |
0f16ef7f | 1158 | int nid = -1; |
0db9360a | 1159 | |
0f16ef7f NF |
1160 | drconf_cell_cnt = of_get_drconf_memory(memory, &dm); |
1161 | if (!drconf_cell_cnt) | |
1162 | return -1; | |
0db9360a | 1163 | |
3fdfd990 BH |
1164 | lmb_size = of_get_lmb_size(memory); |
1165 | if (!lmb_size) | |
0f16ef7f | 1166 | return -1; |
0db9360a NF |
1167 | |
1168 | rc = of_get_assoc_arrays(memory, &aa); | |
1169 | if (rc) | |
0f16ef7f | 1170 | return -1; |
0db9360a | 1171 | |
0f16ef7f | 1172 | for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { |
0db9360a NF |
1173 | struct of_drconf_cell drmem; |
1174 | ||
1175 | read_drconf_cell(&drmem, &dm); | |
1176 | ||
1177 | /* skip this block if it is reserved or not assigned to | |
1178 | * this partition */ | |
1179 | if ((drmem.flags & DRCONF_MEM_RESERVED) | |
1180 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | |
1181 | continue; | |
1182 | ||
0f16ef7f | 1183 | if ((scn_addr < drmem.base_addr) |
3fdfd990 | 1184 | || (scn_addr >= (drmem.base_addr + lmb_size))) |
0f16ef7f NF |
1185 | continue; |
1186 | ||
0db9360a | 1187 | nid = of_drconf_to_nid_single(&drmem, &aa); |
0f16ef7f NF |
1188 | break; |
1189 | } | |
1190 | ||
1191 | return nid; | |
1192 | } | |
1193 | ||
1194 | /* | |
1195 | * Find the node associated with a hot added memory section for memory | |
1196 | * represented in the device tree as a node (i.e. memory@XXXX) for | |
95f72d1e | 1197 | * each memblock. |
0f16ef7f NF |
1198 | */ |
1199 | int hot_add_node_scn_to_nid(unsigned long scn_addr) | |
1200 | { | |
94db7c5e | 1201 | struct device_node *memory; |
0f16ef7f NF |
1202 | int nid = -1; |
1203 | ||
94db7c5e | 1204 | for_each_node_by_type(memory, "memory") { |
0f16ef7f NF |
1205 | unsigned long start, size; |
1206 | int ranges; | |
1207 | const unsigned int *memcell_buf; | |
1208 | unsigned int len; | |
1209 | ||
1210 | memcell_buf = of_get_property(memory, "reg", &len); | |
1211 | if (!memcell_buf || len <= 0) | |
1212 | continue; | |
1213 | ||
1214 | /* ranges in cell */ | |
1215 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1216 | ||
1217 | while (ranges--) { | |
1218 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
1219 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1220 | ||
1221 | if ((scn_addr < start) || (scn_addr >= (start + size))) | |
1222 | continue; | |
1223 | ||
1224 | nid = of_node_to_nid_single(memory); | |
1225 | break; | |
1226 | } | |
0db9360a | 1227 | |
0f16ef7f NF |
1228 | if (nid >= 0) |
1229 | break; | |
0db9360a NF |
1230 | } |
1231 | ||
60831842 AB |
1232 | of_node_put(memory); |
1233 | ||
0f16ef7f | 1234 | return nid; |
0db9360a NF |
1235 | } |
1236 | ||
237a0989 MK |
1237 | /* |
1238 | * Find the node associated with a hot added memory section. Section | |
95f72d1e YL |
1239 | * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that |
1240 | * sections are fully contained within a single MEMBLOCK. | |
237a0989 MK |
1241 | */ |
1242 | int hot_add_scn_to_nid(unsigned long scn_addr) | |
1243 | { | |
1244 | struct device_node *memory = NULL; | |
0f16ef7f | 1245 | int nid, found = 0; |
237a0989 MK |
1246 | |
1247 | if (!numa_enabled || (min_common_depth < 0)) | |
72c33688 | 1248 | return first_online_node; |
0db9360a NF |
1249 | |
1250 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
1251 | if (memory) { | |
1252 | nid = hot_add_drconf_scn_to_nid(memory, scn_addr); | |
1253 | of_node_put(memory); | |
0f16ef7f NF |
1254 | } else { |
1255 | nid = hot_add_node_scn_to_nid(scn_addr); | |
0db9360a | 1256 | } |
237a0989 | 1257 | |
0f16ef7f | 1258 | if (nid < 0 || !node_online(nid)) |
72c33688 | 1259 | nid = first_online_node; |
237a0989 | 1260 | |
0f16ef7f NF |
1261 | if (NODE_DATA(nid)->node_spanned_pages) |
1262 | return nid; | |
237a0989 | 1263 | |
0f16ef7f NF |
1264 | for_each_online_node(nid) { |
1265 | if (NODE_DATA(nid)->node_spanned_pages) { | |
1266 | found = 1; | |
1267 | break; | |
237a0989 | 1268 | } |
237a0989 | 1269 | } |
0f16ef7f NF |
1270 | |
1271 | BUG_ON(!found); | |
1272 | return nid; | |
237a0989 | 1273 | } |
0f16ef7f | 1274 | |
cd34206e NA |
1275 | static u64 hot_add_drconf_memory_max(void) |
1276 | { | |
1277 | struct device_node *memory = NULL; | |
1278 | unsigned int drconf_cell_cnt = 0; | |
1279 | u64 lmb_size = 0; | |
1280 | const u32 *dm = 0; | |
1281 | ||
1282 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
1283 | if (memory) { | |
1284 | drconf_cell_cnt = of_get_drconf_memory(memory, &dm); | |
1285 | lmb_size = of_get_lmb_size(memory); | |
1286 | of_node_put(memory); | |
1287 | } | |
1288 | return lmb_size * drconf_cell_cnt; | |
1289 | } | |
1290 | ||
1291 | /* | |
1292 | * memory_hotplug_max - return max address of memory that may be added | |
1293 | * | |
1294 | * This is currently only used on systems that support drconfig memory | |
1295 | * hotplug. | |
1296 | */ | |
1297 | u64 memory_hotplug_max(void) | |
1298 | { | |
1299 | return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); | |
1300 | } | |
237a0989 | 1301 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
9eff1a38 | 1302 | |
bd03403a | 1303 | /* Virtual Processor Home Node (VPHN) support */ |
39bf990e | 1304 | #ifdef CONFIG_PPC_SPLPAR |
5de16699 | 1305 | static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; |
9eff1a38 JL |
1306 | static cpumask_t cpu_associativity_changes_mask; |
1307 | static int vphn_enabled; | |
1308 | static void set_topology_timer(void); | |
9eff1a38 JL |
1309 | |
1310 | /* | |
1311 | * Store the current values of the associativity change counters in the | |
1312 | * hypervisor. | |
1313 | */ | |
1314 | static void setup_cpu_associativity_change_counters(void) | |
1315 | { | |
cd9d6cc7 | 1316 | int cpu; |
9eff1a38 | 1317 | |
5de16699 AB |
1318 | /* The VPHN feature supports a maximum of 8 reference points */ |
1319 | BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); | |
1320 | ||
9eff1a38 | 1321 | for_each_possible_cpu(cpu) { |
cd9d6cc7 | 1322 | int i; |
9eff1a38 JL |
1323 | u8 *counts = vphn_cpu_change_counts[cpu]; |
1324 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | |
1325 | ||
5de16699 | 1326 | for (i = 0; i < distance_ref_points_depth; i++) |
9eff1a38 | 1327 | counts[i] = hypervisor_counts[i]; |
9eff1a38 JL |
1328 | } |
1329 | } | |
1330 | ||
1331 | /* | |
1332 | * The hypervisor maintains a set of 8 associativity change counters in | |
1333 | * the VPA of each cpu that correspond to the associativity levels in the | |
1334 | * ibm,associativity-reference-points property. When an associativity | |
1335 | * level changes, the corresponding counter is incremented. | |
1336 | * | |
1337 | * Set a bit in cpu_associativity_changes_mask for each cpu whose home | |
1338 | * node associativity levels have changed. | |
1339 | * | |
1340 | * Returns the number of cpus with unhandled associativity changes. | |
1341 | */ | |
1342 | static int update_cpu_associativity_changes_mask(void) | |
1343 | { | |
cd9d6cc7 | 1344 | int cpu, nr_cpus = 0; |
9eff1a38 JL |
1345 | cpumask_t *changes = &cpu_associativity_changes_mask; |
1346 | ||
1347 | cpumask_clear(changes); | |
1348 | ||
1349 | for_each_possible_cpu(cpu) { | |
1350 | int i, changed = 0; | |
1351 | u8 *counts = vphn_cpu_change_counts[cpu]; | |
1352 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | |
1353 | ||
5de16699 | 1354 | for (i = 0; i < distance_ref_points_depth; i++) { |
d69043e8 | 1355 | if (hypervisor_counts[i] != counts[i]) { |
9eff1a38 JL |
1356 | counts[i] = hypervisor_counts[i]; |
1357 | changed = 1; | |
1358 | } | |
1359 | } | |
1360 | if (changed) { | |
1361 | cpumask_set_cpu(cpu, changes); | |
1362 | nr_cpus++; | |
1363 | } | |
1364 | } | |
1365 | ||
1366 | return nr_cpus; | |
1367 | } | |
1368 | ||
c0e5e46f AB |
1369 | /* |
1370 | * 6 64-bit registers unpacked into 12 32-bit associativity values. To form | |
1371 | * the complete property we have to add the length in the first cell. | |
1372 | */ | |
1373 | #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) | |
9eff1a38 JL |
1374 | |
1375 | /* | |
1376 | * Convert the associativity domain numbers returned from the hypervisor | |
1377 | * to the sequence they would appear in the ibm,associativity property. | |
1378 | */ | |
1379 | static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) | |
1380 | { | |
cd9d6cc7 | 1381 | int i, nr_assoc_doms = 0; |
9eff1a38 JL |
1382 | const u16 *field = (const u16*) packed; |
1383 | ||
1384 | #define VPHN_FIELD_UNUSED (0xffff) | |
1385 | #define VPHN_FIELD_MSB (0x8000) | |
1386 | #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) | |
1387 | ||
c0e5e46f | 1388 | for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { |
9eff1a38 JL |
1389 | if (*field == VPHN_FIELD_UNUSED) { |
1390 | /* All significant fields processed, and remaining | |
1391 | * fields contain the reserved value of all 1's. | |
1392 | * Just store them. | |
1393 | */ | |
1394 | unpacked[i] = *((u32*)field); | |
1395 | field += 2; | |
7639adaa | 1396 | } else if (*field & VPHN_FIELD_MSB) { |
9eff1a38 JL |
1397 | /* Data is in the lower 15 bits of this field */ |
1398 | unpacked[i] = *field & VPHN_FIELD_MASK; | |
1399 | field++; | |
1400 | nr_assoc_doms++; | |
7639adaa | 1401 | } else { |
9eff1a38 JL |
1402 | /* Data is in the lower 15 bits of this field |
1403 | * concatenated with the next 16 bit field | |
1404 | */ | |
1405 | unpacked[i] = *((u32*)field); | |
1406 | field += 2; | |
1407 | nr_assoc_doms++; | |
1408 | } | |
1409 | } | |
1410 | ||
c0e5e46f AB |
1411 | /* The first cell contains the length of the property */ |
1412 | unpacked[0] = nr_assoc_doms; | |
1413 | ||
9eff1a38 JL |
1414 | return nr_assoc_doms; |
1415 | } | |
1416 | ||
1417 | /* | |
1418 | * Retrieve the new associativity information for a virtual processor's | |
1419 | * home node. | |
1420 | */ | |
1421 | static long hcall_vphn(unsigned long cpu, unsigned int *associativity) | |
1422 | { | |
cd9d6cc7 | 1423 | long rc; |
9eff1a38 JL |
1424 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; |
1425 | u64 flags = 1; | |
1426 | int hwcpu = get_hard_smp_processor_id(cpu); | |
1427 | ||
1428 | rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); | |
1429 | vphn_unpack_associativity(retbuf, associativity); | |
1430 | ||
1431 | return rc; | |
1432 | } | |
1433 | ||
1434 | static long vphn_get_associativity(unsigned long cpu, | |
1435 | unsigned int *associativity) | |
1436 | { | |
cd9d6cc7 | 1437 | long rc; |
9eff1a38 JL |
1438 | |
1439 | rc = hcall_vphn(cpu, associativity); | |
1440 | ||
1441 | switch (rc) { | |
1442 | case H_FUNCTION: | |
1443 | printk(KERN_INFO | |
1444 | "VPHN is not supported. Disabling polling...\n"); | |
1445 | stop_topology_update(); | |
1446 | break; | |
1447 | case H_HARDWARE: | |
1448 | printk(KERN_ERR | |
1449 | "hcall_vphn() experienced a hardware fault " | |
1450 | "preventing VPHN. Disabling polling...\n"); | |
1451 | stop_topology_update(); | |
1452 | } | |
1453 | ||
1454 | return rc; | |
1455 | } | |
1456 | ||
1457 | /* | |
1458 | * Update the node maps and sysfs entries for each cpu whose home node | |
1459 | * has changed. | |
1460 | */ | |
1461 | int arch_update_cpu_topology(void) | |
1462 | { | |
cd9d6cc7 | 1463 | int cpu, nid, old_nid; |
9eff1a38 | 1464 | unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; |
cd9d6cc7 | 1465 | struct sys_device *sysdev; |
9eff1a38 | 1466 | |
104699c0 | 1467 | for_each_cpu(cpu,&cpu_associativity_changes_mask) { |
9eff1a38 JL |
1468 | vphn_get_associativity(cpu, associativity); |
1469 | nid = associativity_to_nid(associativity); | |
1470 | ||
1471 | if (nid < 0 || !node_online(nid)) | |
1472 | nid = first_online_node; | |
1473 | ||
1474 | old_nid = numa_cpu_lookup_table[cpu]; | |
1475 | ||
1476 | /* Disable hotplug while we update the cpu | |
1477 | * masks and sysfs. | |
1478 | */ | |
1479 | get_online_cpus(); | |
1480 | unregister_cpu_under_node(cpu, old_nid); | |
1481 | unmap_cpu_from_node(cpu); | |
1482 | map_cpu_to_node(cpu, nid); | |
1483 | register_cpu_under_node(cpu, nid); | |
1484 | put_online_cpus(); | |
1485 | ||
1486 | sysdev = get_cpu_sysdev(cpu); | |
1487 | if (sysdev) | |
1488 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | |
1489 | } | |
1490 | ||
1491 | return 1; | |
1492 | } | |
1493 | ||
1494 | static void topology_work_fn(struct work_struct *work) | |
1495 | { | |
1496 | rebuild_sched_domains(); | |
1497 | } | |
1498 | static DECLARE_WORK(topology_work, topology_work_fn); | |
1499 | ||
1500 | void topology_schedule_update(void) | |
1501 | { | |
1502 | schedule_work(&topology_work); | |
1503 | } | |
1504 | ||
1505 | static void topology_timer_fn(unsigned long ignored) | |
1506 | { | |
1507 | if (!vphn_enabled) | |
1508 | return; | |
1509 | if (update_cpu_associativity_changes_mask() > 0) | |
1510 | topology_schedule_update(); | |
1511 | set_topology_timer(); | |
1512 | } | |
1513 | static struct timer_list topology_timer = | |
1514 | TIMER_INITIALIZER(topology_timer_fn, 0, 0); | |
1515 | ||
1516 | static void set_topology_timer(void) | |
1517 | { | |
1518 | topology_timer.data = 0; | |
1519 | topology_timer.expires = jiffies + 60 * HZ; | |
1520 | add_timer(&topology_timer); | |
1521 | } | |
1522 | ||
1523 | /* | |
1524 | * Start polling for VPHN associativity changes. | |
1525 | */ | |
1526 | int start_topology_update(void) | |
1527 | { | |
1528 | int rc = 0; | |
1529 | ||
36e8695c BH |
1530 | /* Disabled until races with load balancing are fixed */ |
1531 | if (0 && firmware_has_feature(FW_FEATURE_VPHN) && | |
fe5cfd63 | 1532 | get_lppaca()->shared_proc) { |
9eff1a38 JL |
1533 | vphn_enabled = 1; |
1534 | setup_cpu_associativity_change_counters(); | |
1535 | init_timer_deferrable(&topology_timer); | |
1536 | set_topology_timer(); | |
1537 | rc = 1; | |
1538 | } | |
1539 | ||
1540 | return rc; | |
1541 | } | |
1542 | __initcall(start_topology_update); | |
1543 | ||
1544 | /* | |
1545 | * Disable polling for VPHN associativity changes. | |
1546 | */ | |
1547 | int stop_topology_update(void) | |
1548 | { | |
1549 | vphn_enabled = 0; | |
1550 | return del_timer_sync(&topology_timer); | |
1551 | } | |
39bf990e | 1552 | #endif /* CONFIG_PPC_SPLPAR */ |