]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * pSeries NUMA support | |
3 | * | |
4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/threads.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/mmzone.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/nodemask.h> | |
18 | #include <linux/cpu.h> | |
19 | #include <linux/notifier.h> | |
20 | #include <linux/lmb.h> | |
21 | #include <linux/of.h> | |
22 | #include <asm/sparsemem.h> | |
23 | #include <asm/prom.h> | |
24 | #include <asm/system.h> | |
25 | #include <asm/smp.h> | |
26 | ||
27 | static int numa_enabled = 1; | |
28 | ||
29 | static char *cmdline __initdata; | |
30 | ||
31 | static int numa_debug; | |
32 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | |
33 | ||
34 | int numa_cpu_lookup_table[NR_CPUS]; | |
35 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | |
36 | struct pglist_data *node_data[MAX_NUMNODES]; | |
37 | ||
38 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
39 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | |
40 | EXPORT_SYMBOL(node_data); | |
41 | ||
42 | static int min_common_depth; | |
43 | static int n_mem_addr_cells, n_mem_size_cells; | |
44 | ||
45 | static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, | |
46 | unsigned int *nid) | |
47 | { | |
48 | unsigned long long mem; | |
49 | char *p = cmdline; | |
50 | static unsigned int fake_nid; | |
51 | static unsigned long long curr_boundary; | |
52 | ||
53 | /* | |
54 | * Modify node id, iff we started creating NUMA nodes | |
55 | * We want to continue from where we left of the last time | |
56 | */ | |
57 | if (fake_nid) | |
58 | *nid = fake_nid; | |
59 | /* | |
60 | * In case there are no more arguments to parse, the | |
61 | * node_id should be the same as the last fake node id | |
62 | * (we've handled this above). | |
63 | */ | |
64 | if (!p) | |
65 | return 0; | |
66 | ||
67 | mem = memparse(p, &p); | |
68 | if (!mem) | |
69 | return 0; | |
70 | ||
71 | if (mem < curr_boundary) | |
72 | return 0; | |
73 | ||
74 | curr_boundary = mem; | |
75 | ||
76 | if ((end_pfn << PAGE_SHIFT) > mem) { | |
77 | /* | |
78 | * Skip commas and spaces | |
79 | */ | |
80 | while (*p == ',' || *p == ' ' || *p == '\t') | |
81 | p++; | |
82 | ||
83 | cmdline = p; | |
84 | fake_nid++; | |
85 | *nid = fake_nid; | |
86 | dbg("created new fake_node with id %d\n", fake_nid); | |
87 | return 1; | |
88 | } | |
89 | return 0; | |
90 | } | |
91 | ||
92 | /* | |
93 | * get_active_region_work_fn - A helper function for get_node_active_region | |
94 | * Returns datax set to the start_pfn and end_pfn if they contain | |
95 | * the initial value of datax->start_pfn between them | |
96 | * @start_pfn: start page(inclusive) of region to check | |
97 | * @end_pfn: end page(exclusive) of region to check | |
98 | * @datax: comes in with ->start_pfn set to value to search for and | |
99 | * goes out with active range if it contains it | |
100 | * Returns 1 if search value is in range else 0 | |
101 | */ | |
102 | static int __init get_active_region_work_fn(unsigned long start_pfn, | |
103 | unsigned long end_pfn, void *datax) | |
104 | { | |
105 | struct node_active_region *data; | |
106 | data = (struct node_active_region *)datax; | |
107 | ||
108 | if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { | |
109 | data->start_pfn = start_pfn; | |
110 | data->end_pfn = end_pfn; | |
111 | return 1; | |
112 | } | |
113 | return 0; | |
114 | ||
115 | } | |
116 | ||
117 | /* | |
118 | * get_node_active_region - Return active region containing start_pfn | |
119 | * Active range returned is empty if none found. | |
120 | * @start_pfn: The page to return the region for. | |
121 | * @node_ar: Returned set to the active region containing start_pfn | |
122 | */ | |
123 | static void __init get_node_active_region(unsigned long start_pfn, | |
124 | struct node_active_region *node_ar) | |
125 | { | |
126 | int nid = early_pfn_to_nid(start_pfn); | |
127 | ||
128 | node_ar->nid = nid; | |
129 | node_ar->start_pfn = start_pfn; | |
130 | node_ar->end_pfn = start_pfn; | |
131 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); | |
132 | } | |
133 | ||
134 | static void __cpuinit map_cpu_to_node(int cpu, int node) | |
135 | { | |
136 | numa_cpu_lookup_table[cpu] = node; | |
137 | ||
138 | dbg("adding cpu %d to node %d\n", cpu, node); | |
139 | ||
140 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) | |
141 | cpu_set(cpu, numa_cpumask_lookup_table[node]); | |
142 | } | |
143 | ||
144 | #ifdef CONFIG_HOTPLUG_CPU | |
145 | static void unmap_cpu_from_node(unsigned long cpu) | |
146 | { | |
147 | int node = numa_cpu_lookup_table[cpu]; | |
148 | ||
149 | dbg("removing cpu %lu from node %d\n", cpu, node); | |
150 | ||
151 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
152 | cpu_clear(cpu, numa_cpumask_lookup_table[node]); | |
153 | } else { | |
154 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | |
155 | cpu, node); | |
156 | } | |
157 | } | |
158 | #endif /* CONFIG_HOTPLUG_CPU */ | |
159 | ||
160 | static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) | |
161 | { | |
162 | unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); | |
163 | struct device_node *cpu_node = NULL; | |
164 | const unsigned int *interrupt_server, *reg; | |
165 | int len; | |
166 | ||
167 | while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) { | |
168 | /* Try interrupt server first */ | |
169 | interrupt_server = of_get_property(cpu_node, | |
170 | "ibm,ppc-interrupt-server#s", &len); | |
171 | ||
172 | len = len / sizeof(u32); | |
173 | ||
174 | if (interrupt_server && (len > 0)) { | |
175 | while (len--) { | |
176 | if (interrupt_server[len] == hw_cpuid) | |
177 | return cpu_node; | |
178 | } | |
179 | } else { | |
180 | reg = of_get_property(cpu_node, "reg", &len); | |
181 | if (reg && (len > 0) && (reg[0] == hw_cpuid)) | |
182 | return cpu_node; | |
183 | } | |
184 | } | |
185 | ||
186 | return NULL; | |
187 | } | |
188 | ||
189 | /* must hold reference to node during call */ | |
190 | static const int *of_get_associativity(struct device_node *dev) | |
191 | { | |
192 | return of_get_property(dev, "ibm,associativity", NULL); | |
193 | } | |
194 | ||
195 | /* | |
196 | * Returns the property linux,drconf-usable-memory if | |
197 | * it exists (the property exists only in kexec/kdump kernels, | |
198 | * added by kexec-tools) | |
199 | */ | |
200 | static const u32 *of_get_usable_memory(struct device_node *memory) | |
201 | { | |
202 | const u32 *prop; | |
203 | u32 len; | |
204 | prop = of_get_property(memory, "linux,drconf-usable-memory", &len); | |
205 | if (!prop || len < sizeof(unsigned int)) | |
206 | return 0; | |
207 | return prop; | |
208 | } | |
209 | ||
210 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa | |
211 | * info is found. | |
212 | */ | |
213 | static int of_node_to_nid_single(struct device_node *device) | |
214 | { | |
215 | int nid = -1; | |
216 | const unsigned int *tmp; | |
217 | ||
218 | if (min_common_depth == -1) | |
219 | goto out; | |
220 | ||
221 | tmp = of_get_associativity(device); | |
222 | if (!tmp) | |
223 | goto out; | |
224 | ||
225 | if (tmp[0] >= min_common_depth) | |
226 | nid = tmp[min_common_depth]; | |
227 | ||
228 | /* POWER4 LPAR uses 0xffff as invalid node */ | |
229 | if (nid == 0xffff || nid >= MAX_NUMNODES) | |
230 | nid = -1; | |
231 | out: | |
232 | return nid; | |
233 | } | |
234 | ||
235 | /* Walk the device tree upwards, looking for an associativity id */ | |
236 | int of_node_to_nid(struct device_node *device) | |
237 | { | |
238 | struct device_node *tmp; | |
239 | int nid = -1; | |
240 | ||
241 | of_node_get(device); | |
242 | while (device) { | |
243 | nid = of_node_to_nid_single(device); | |
244 | if (nid != -1) | |
245 | break; | |
246 | ||
247 | tmp = device; | |
248 | device = of_get_parent(tmp); | |
249 | of_node_put(tmp); | |
250 | } | |
251 | of_node_put(device); | |
252 | ||
253 | return nid; | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(of_node_to_nid); | |
256 | ||
257 | /* | |
258 | * In theory, the "ibm,associativity" property may contain multiple | |
259 | * associativity lists because a resource may be multiply connected | |
260 | * into the machine. This resource then has different associativity | |
261 | * characteristics relative to its multiple connections. We ignore | |
262 | * this for now. We also assume that all cpu and memory sets have | |
263 | * their distances represented at a common level. This won't be | |
264 | * true for hierarchical NUMA. | |
265 | * | |
266 | * In any case the ibm,associativity-reference-points should give | |
267 | * the correct depth for a normal NUMA system. | |
268 | * | |
269 | * - Dave Hansen <haveblue@us.ibm.com> | |
270 | */ | |
271 | static int __init find_min_common_depth(void) | |
272 | { | |
273 | int depth; | |
274 | const unsigned int *ref_points; | |
275 | struct device_node *rtas_root; | |
276 | unsigned int len; | |
277 | ||
278 | rtas_root = of_find_node_by_path("/rtas"); | |
279 | ||
280 | if (!rtas_root) | |
281 | return -1; | |
282 | ||
283 | /* | |
284 | * this property is 2 32-bit integers, each representing a level of | |
285 | * depth in the associativity nodes. The first is for an SMP | |
286 | * configuration (should be all 0's) and the second is for a normal | |
287 | * NUMA configuration. | |
288 | */ | |
289 | ref_points = of_get_property(rtas_root, | |
290 | "ibm,associativity-reference-points", &len); | |
291 | ||
292 | if ((len >= 1) && ref_points) { | |
293 | depth = ref_points[1]; | |
294 | } else { | |
295 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); | |
296 | depth = -1; | |
297 | } | |
298 | of_node_put(rtas_root); | |
299 | ||
300 | return depth; | |
301 | } | |
302 | ||
303 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) | |
304 | { | |
305 | struct device_node *memory = NULL; | |
306 | ||
307 | memory = of_find_node_by_type(memory, "memory"); | |
308 | if (!memory) | |
309 | panic("numa.c: No memory nodes found!"); | |
310 | ||
311 | *n_addr_cells = of_n_addr_cells(memory); | |
312 | *n_size_cells = of_n_size_cells(memory); | |
313 | of_node_put(memory); | |
314 | } | |
315 | ||
316 | static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) | |
317 | { | |
318 | unsigned long result = 0; | |
319 | ||
320 | while (n--) { | |
321 | result = (result << 32) | **buf; | |
322 | (*buf)++; | |
323 | } | |
324 | return result; | |
325 | } | |
326 | ||
327 | struct of_drconf_cell { | |
328 | u64 base_addr; | |
329 | u32 drc_index; | |
330 | u32 reserved; | |
331 | u32 aa_index; | |
332 | u32 flags; | |
333 | }; | |
334 | ||
335 | #define DRCONF_MEM_ASSIGNED 0x00000008 | |
336 | #define DRCONF_MEM_AI_INVALID 0x00000040 | |
337 | #define DRCONF_MEM_RESERVED 0x00000080 | |
338 | ||
339 | /* | |
340 | * Read the next lmb list entry from the ibm,dynamic-memory property | |
341 | * and return the information in the provided of_drconf_cell structure. | |
342 | */ | |
343 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | |
344 | { | |
345 | const u32 *cp; | |
346 | ||
347 | drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); | |
348 | ||
349 | cp = *cellp; | |
350 | drmem->drc_index = cp[0]; | |
351 | drmem->reserved = cp[1]; | |
352 | drmem->aa_index = cp[2]; | |
353 | drmem->flags = cp[3]; | |
354 | ||
355 | *cellp = cp + 4; | |
356 | } | |
357 | ||
358 | /* | |
359 | * Retreive and validate the ibm,dynamic-memory property of the device tree. | |
360 | * | |
361 | * The layout of the ibm,dynamic-memory property is a number N of lmb | |
362 | * list entries followed by N lmb list entries. Each lmb list entry | |
363 | * contains information as layed out in the of_drconf_cell struct above. | |
364 | */ | |
365 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |
366 | { | |
367 | const u32 *prop; | |
368 | u32 len, entries; | |
369 | ||
370 | prop = of_get_property(memory, "ibm,dynamic-memory", &len); | |
371 | if (!prop || len < sizeof(unsigned int)) | |
372 | return 0; | |
373 | ||
374 | entries = *prop++; | |
375 | ||
376 | /* Now that we know the number of entries, revalidate the size | |
377 | * of the property read in to ensure we have everything | |
378 | */ | |
379 | if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) | |
380 | return 0; | |
381 | ||
382 | *dm = prop; | |
383 | return entries; | |
384 | } | |
385 | ||
386 | /* | |
387 | * Retreive and validate the ibm,lmb-size property for drconf memory | |
388 | * from the device tree. | |
389 | */ | |
390 | static u64 of_get_lmb_size(struct device_node *memory) | |
391 | { | |
392 | const u32 *prop; | |
393 | u32 len; | |
394 | ||
395 | prop = of_get_property(memory, "ibm,lmb-size", &len); | |
396 | if (!prop || len < sizeof(unsigned int)) | |
397 | return 0; | |
398 | ||
399 | return read_n_cells(n_mem_size_cells, &prop); | |
400 | } | |
401 | ||
402 | struct assoc_arrays { | |
403 | u32 n_arrays; | |
404 | u32 array_sz; | |
405 | const u32 *arrays; | |
406 | }; | |
407 | ||
408 | /* | |
409 | * Retreive and validate the list of associativity arrays for drconf | |
410 | * memory from the ibm,associativity-lookup-arrays property of the | |
411 | * device tree.. | |
412 | * | |
413 | * The layout of the ibm,associativity-lookup-arrays property is a number N | |
414 | * indicating the number of associativity arrays, followed by a number M | |
415 | * indicating the size of each associativity array, followed by a list | |
416 | * of N associativity arrays. | |
417 | */ | |
418 | static int of_get_assoc_arrays(struct device_node *memory, | |
419 | struct assoc_arrays *aa) | |
420 | { | |
421 | const u32 *prop; | |
422 | u32 len; | |
423 | ||
424 | prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); | |
425 | if (!prop || len < 2 * sizeof(unsigned int)) | |
426 | return -1; | |
427 | ||
428 | aa->n_arrays = *prop++; | |
429 | aa->array_sz = *prop++; | |
430 | ||
431 | /* Now that we know the number of arrrays and size of each array, | |
432 | * revalidate the size of the property read in. | |
433 | */ | |
434 | if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) | |
435 | return -1; | |
436 | ||
437 | aa->arrays = prop; | |
438 | return 0; | |
439 | } | |
440 | ||
441 | /* | |
442 | * This is like of_node_to_nid_single() for memory represented in the | |
443 | * ibm,dynamic-reconfiguration-memory node. | |
444 | */ | |
445 | static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, | |
446 | struct assoc_arrays *aa) | |
447 | { | |
448 | int default_nid = 0; | |
449 | int nid = default_nid; | |
450 | int index; | |
451 | ||
452 | if (min_common_depth > 0 && min_common_depth <= aa->array_sz && | |
453 | !(drmem->flags & DRCONF_MEM_AI_INVALID) && | |
454 | drmem->aa_index < aa->n_arrays) { | |
455 | index = drmem->aa_index * aa->array_sz + min_common_depth - 1; | |
456 | nid = aa->arrays[index]; | |
457 | ||
458 | if (nid == 0xffff || nid >= MAX_NUMNODES) | |
459 | nid = default_nid; | |
460 | } | |
461 | ||
462 | return nid; | |
463 | } | |
464 | ||
465 | /* | |
466 | * Figure out to which domain a cpu belongs and stick it there. | |
467 | * Return the id of the domain used. | |
468 | */ | |
469 | static int __cpuinit numa_setup_cpu(unsigned long lcpu) | |
470 | { | |
471 | int nid = 0; | |
472 | struct device_node *cpu = find_cpu_node(lcpu); | |
473 | ||
474 | if (!cpu) { | |
475 | WARN_ON(1); | |
476 | goto out; | |
477 | } | |
478 | ||
479 | nid = of_node_to_nid_single(cpu); | |
480 | ||
481 | if (nid < 0 || !node_online(nid)) | |
482 | nid = any_online_node(NODE_MASK_ALL); | |
483 | out: | |
484 | map_cpu_to_node(lcpu, nid); | |
485 | ||
486 | of_node_put(cpu); | |
487 | ||
488 | return nid; | |
489 | } | |
490 | ||
491 | static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, | |
492 | unsigned long action, | |
493 | void *hcpu) | |
494 | { | |
495 | unsigned long lcpu = (unsigned long)hcpu; | |
496 | int ret = NOTIFY_DONE; | |
497 | ||
498 | switch (action) { | |
499 | case CPU_UP_PREPARE: | |
500 | case CPU_UP_PREPARE_FROZEN: | |
501 | numa_setup_cpu(lcpu); | |
502 | ret = NOTIFY_OK; | |
503 | break; | |
504 | #ifdef CONFIG_HOTPLUG_CPU | |
505 | case CPU_DEAD: | |
506 | case CPU_DEAD_FROZEN: | |
507 | case CPU_UP_CANCELED: | |
508 | case CPU_UP_CANCELED_FROZEN: | |
509 | unmap_cpu_from_node(lcpu); | |
510 | break; | |
511 | ret = NOTIFY_OK; | |
512 | #endif | |
513 | } | |
514 | return ret; | |
515 | } | |
516 | ||
517 | /* | |
518 | * Check and possibly modify a memory region to enforce the memory limit. | |
519 | * | |
520 | * Returns the size the region should have to enforce the memory limit. | |
521 | * This will either be the original value of size, a truncated value, | |
522 | * or zero. If the returned value of size is 0 the region should be | |
523 | * discarded as it lies wholy above the memory limit. | |
524 | */ | |
525 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |
526 | unsigned long size) | |
527 | { | |
528 | /* | |
529 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | |
530 | * we've already adjusted it for the limit and it takes care of | |
531 | * having memory holes below the limit. Also, in the case of | |
532 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | |
533 | */ | |
534 | ||
535 | if (start + size <= lmb_end_of_DRAM()) | |
536 | return size; | |
537 | ||
538 | if (start >= lmb_end_of_DRAM()) | |
539 | return 0; | |
540 | ||
541 | return lmb_end_of_DRAM() - start; | |
542 | } | |
543 | ||
544 | /* | |
545 | * Reads the counter for a given entry in | |
546 | * linux,drconf-usable-memory property | |
547 | */ | |
548 | static inline int __init read_usm_ranges(const u32 **usm) | |
549 | { | |
550 | /* | |
551 | * For each lmb in ibm,dynamic-memory a corresponding | |
552 | * entry in linux,drconf-usable-memory property contains | |
553 | * a counter followed by that many (base, size) duple. | |
554 | * read the counter from linux,drconf-usable-memory | |
555 | */ | |
556 | return read_n_cells(n_mem_size_cells, usm); | |
557 | } | |
558 | ||
559 | /* | |
560 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory | |
561 | * node. This assumes n_mem_{addr,size}_cells have been set. | |
562 | */ | |
563 | static void __init parse_drconf_memory(struct device_node *memory) | |
564 | { | |
565 | const u32 *dm, *usm; | |
566 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | |
567 | unsigned long lmb_size, base, size, sz; | |
568 | int nid; | |
569 | struct assoc_arrays aa; | |
570 | ||
571 | n = of_get_drconf_memory(memory, &dm); | |
572 | if (!n) | |
573 | return; | |
574 | ||
575 | lmb_size = of_get_lmb_size(memory); | |
576 | if (!lmb_size) | |
577 | return; | |
578 | ||
579 | rc = of_get_assoc_arrays(memory, &aa); | |
580 | if (rc) | |
581 | return; | |
582 | ||
583 | /* check if this is a kexec/kdump kernel */ | |
584 | usm = of_get_usable_memory(memory); | |
585 | if (usm != NULL) | |
586 | is_kexec_kdump = 1; | |
587 | ||
588 | for (; n != 0; --n) { | |
589 | struct of_drconf_cell drmem; | |
590 | ||
591 | read_drconf_cell(&drmem, &dm); | |
592 | ||
593 | /* skip this block if the reserved bit is set in flags (0x80) | |
594 | or if the block is not assigned to this partition (0x8) */ | |
595 | if ((drmem.flags & DRCONF_MEM_RESERVED) | |
596 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | |
597 | continue; | |
598 | ||
599 | base = drmem.base_addr; | |
600 | size = lmb_size; | |
601 | ranges = 1; | |
602 | ||
603 | if (is_kexec_kdump) { | |
604 | ranges = read_usm_ranges(&usm); | |
605 | if (!ranges) /* there are no (base, size) duple */ | |
606 | continue; | |
607 | } | |
608 | do { | |
609 | if (is_kexec_kdump) { | |
610 | base = read_n_cells(n_mem_addr_cells, &usm); | |
611 | size = read_n_cells(n_mem_size_cells, &usm); | |
612 | } | |
613 | nid = of_drconf_to_nid_single(&drmem, &aa); | |
614 | fake_numa_create_new_node( | |
615 | ((base + size) >> PAGE_SHIFT), | |
616 | &nid); | |
617 | node_set_online(nid); | |
618 | sz = numa_enforce_memory_limit(base, size); | |
619 | if (sz) | |
620 | add_active_range(nid, base >> PAGE_SHIFT, | |
621 | (base >> PAGE_SHIFT) | |
622 | + (sz >> PAGE_SHIFT)); | |
623 | } while (--ranges); | |
624 | } | |
625 | } | |
626 | ||
627 | static int __init parse_numa_properties(void) | |
628 | { | |
629 | struct device_node *cpu = NULL; | |
630 | struct device_node *memory = NULL; | |
631 | int default_nid = 0; | |
632 | unsigned long i; | |
633 | ||
634 | if (numa_enabled == 0) { | |
635 | printk(KERN_WARNING "NUMA disabled by user\n"); | |
636 | return -1; | |
637 | } | |
638 | ||
639 | min_common_depth = find_min_common_depth(); | |
640 | ||
641 | if (min_common_depth < 0) | |
642 | return min_common_depth; | |
643 | ||
644 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); | |
645 | ||
646 | /* | |
647 | * Even though we connect cpus to numa domains later in SMP | |
648 | * init, we need to know the node ids now. This is because | |
649 | * each node to be onlined must have NODE_DATA etc backing it. | |
650 | */ | |
651 | for_each_present_cpu(i) { | |
652 | int nid; | |
653 | ||
654 | cpu = find_cpu_node(i); | |
655 | BUG_ON(!cpu); | |
656 | nid = of_node_to_nid_single(cpu); | |
657 | of_node_put(cpu); | |
658 | ||
659 | /* | |
660 | * Don't fall back to default_nid yet -- we will plug | |
661 | * cpus into nodes once the memory scan has discovered | |
662 | * the topology. | |
663 | */ | |
664 | if (nid < 0) | |
665 | continue; | |
666 | node_set_online(nid); | |
667 | } | |
668 | ||
669 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); | |
670 | memory = NULL; | |
671 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | |
672 | unsigned long start; | |
673 | unsigned long size; | |
674 | int nid; | |
675 | int ranges; | |
676 | const unsigned int *memcell_buf; | |
677 | unsigned int len; | |
678 | ||
679 | memcell_buf = of_get_property(memory, | |
680 | "linux,usable-memory", &len); | |
681 | if (!memcell_buf || len <= 0) | |
682 | memcell_buf = of_get_property(memory, "reg", &len); | |
683 | if (!memcell_buf || len <= 0) | |
684 | continue; | |
685 | ||
686 | /* ranges in cell */ | |
687 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
688 | new_range: | |
689 | /* these are order-sensitive, and modify the buffer pointer */ | |
690 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
691 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
692 | ||
693 | /* | |
694 | * Assumption: either all memory nodes or none will | |
695 | * have associativity properties. If none, then | |
696 | * everything goes to default_nid. | |
697 | */ | |
698 | nid = of_node_to_nid_single(memory); | |
699 | if (nid < 0) | |
700 | nid = default_nid; | |
701 | ||
702 | fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); | |
703 | node_set_online(nid); | |
704 | ||
705 | if (!(size = numa_enforce_memory_limit(start, size))) { | |
706 | if (--ranges) | |
707 | goto new_range; | |
708 | else | |
709 | continue; | |
710 | } | |
711 | ||
712 | add_active_range(nid, start >> PAGE_SHIFT, | |
713 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); | |
714 | ||
715 | if (--ranges) | |
716 | goto new_range; | |
717 | } | |
718 | ||
719 | /* | |
720 | * Now do the same thing for each LMB listed in the ibm,dynamic-memory | |
721 | * property in the ibm,dynamic-reconfiguration-memory node. | |
722 | */ | |
723 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
724 | if (memory) | |
725 | parse_drconf_memory(memory); | |
726 | ||
727 | return 0; | |
728 | } | |
729 | ||
730 | static void __init setup_nonnuma(void) | |
731 | { | |
732 | unsigned long top_of_ram = lmb_end_of_DRAM(); | |
733 | unsigned long total_ram = lmb_phys_mem_size(); | |
734 | unsigned long start_pfn, end_pfn; | |
735 | unsigned int i, nid = 0; | |
736 | ||
737 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | |
738 | top_of_ram, total_ram); | |
739 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | |
740 | (top_of_ram - total_ram) >> 20); | |
741 | ||
742 | for (i = 0; i < lmb.memory.cnt; ++i) { | |
743 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | |
744 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | |
745 | ||
746 | fake_numa_create_new_node(end_pfn, &nid); | |
747 | add_active_range(nid, start_pfn, end_pfn); | |
748 | node_set_online(nid); | |
749 | } | |
750 | } | |
751 | ||
752 | void __init dump_numa_cpu_topology(void) | |
753 | { | |
754 | unsigned int node; | |
755 | unsigned int cpu, count; | |
756 | ||
757 | if (min_common_depth == -1 || !numa_enabled) | |
758 | return; | |
759 | ||
760 | for_each_online_node(node) { | |
761 | printk(KERN_DEBUG "Node %d CPUs:", node); | |
762 | ||
763 | count = 0; | |
764 | /* | |
765 | * If we used a CPU iterator here we would miss printing | |
766 | * the holes in the cpumap. | |
767 | */ | |
768 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
769 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
770 | if (count == 0) | |
771 | printk(" %u", cpu); | |
772 | ++count; | |
773 | } else { | |
774 | if (count > 1) | |
775 | printk("-%u", cpu - 1); | |
776 | count = 0; | |
777 | } | |
778 | } | |
779 | ||
780 | if (count > 1) | |
781 | printk("-%u", NR_CPUS - 1); | |
782 | printk("\n"); | |
783 | } | |
784 | } | |
785 | ||
786 | static void __init dump_numa_memory_topology(void) | |
787 | { | |
788 | unsigned int node; | |
789 | unsigned int count; | |
790 | ||
791 | if (min_common_depth == -1 || !numa_enabled) | |
792 | return; | |
793 | ||
794 | for_each_online_node(node) { | |
795 | unsigned long i; | |
796 | ||
797 | printk(KERN_DEBUG "Node %d Memory:", node); | |
798 | ||
799 | count = 0; | |
800 | ||
801 | for (i = 0; i < lmb_end_of_DRAM(); | |
802 | i += (1 << SECTION_SIZE_BITS)) { | |
803 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | |
804 | if (count == 0) | |
805 | printk(" 0x%lx", i); | |
806 | ++count; | |
807 | } else { | |
808 | if (count > 0) | |
809 | printk("-0x%lx", i); | |
810 | count = 0; | |
811 | } | |
812 | } | |
813 | ||
814 | if (count > 0) | |
815 | printk("-0x%lx", i); | |
816 | printk("\n"); | |
817 | } | |
818 | } | |
819 | ||
820 | /* | |
821 | * Allocate some memory, satisfying the lmb or bootmem allocator where | |
822 | * required. nid is the preferred node and end is the physical address of | |
823 | * the highest address in the node. | |
824 | * | |
825 | * Returns the physical address of the memory. | |
826 | */ | |
827 | static void __init *careful_allocation(int nid, unsigned long size, | |
828 | unsigned long align, | |
829 | unsigned long end_pfn) | |
830 | { | |
831 | int new_nid; | |
832 | unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | |
833 | ||
834 | /* retry over all memory */ | |
835 | if (!ret) | |
836 | ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); | |
837 | ||
838 | if (!ret) | |
839 | panic("numa.c: cannot allocate %lu bytes on node %d", | |
840 | size, nid); | |
841 | ||
842 | /* | |
843 | * If the memory came from a previously allocated node, we must | |
844 | * retry with the bootmem allocator. | |
845 | */ | |
846 | new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); | |
847 | if (new_nid < nid) { | |
848 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), | |
849 | size, align, 0); | |
850 | ||
851 | if (!ret) | |
852 | panic("numa.c: cannot allocate %lu bytes on node %d", | |
853 | size, new_nid); | |
854 | ||
855 | ret = __pa(ret); | |
856 | ||
857 | dbg("alloc_bootmem %lx %lx\n", ret, size); | |
858 | } | |
859 | ||
860 | return (void *)ret; | |
861 | } | |
862 | ||
863 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |
864 | .notifier_call = cpu_numa_callback, | |
865 | .priority = 1 /* Must run before sched domains notifier. */ | |
866 | }; | |
867 | ||
868 | static void mark_reserved_regions_for_nid(int nid) | |
869 | { | |
870 | struct pglist_data *node = NODE_DATA(nid); | |
871 | int i; | |
872 | ||
873 | for (i = 0; i < lmb.reserved.cnt; i++) { | |
874 | unsigned long physbase = lmb.reserved.region[i].base; | |
875 | unsigned long size = lmb.reserved.region[i].size; | |
876 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | |
877 | unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT); | |
878 | struct node_active_region node_ar; | |
879 | unsigned long node_end_pfn = node->node_start_pfn + | |
880 | node->node_spanned_pages; | |
881 | ||
882 | /* | |
883 | * Check to make sure that this lmb.reserved area is | |
884 | * within the bounds of the node that we care about. | |
885 | * Checking the nid of the start and end points is not | |
886 | * sufficient because the reserved area could span the | |
887 | * entire node. | |
888 | */ | |
889 | if (end_pfn <= node->node_start_pfn || | |
890 | start_pfn >= node_end_pfn) | |
891 | continue; | |
892 | ||
893 | get_node_active_region(start_pfn, &node_ar); | |
894 | while (start_pfn < end_pfn && | |
895 | node_ar.start_pfn < node_ar.end_pfn) { | |
896 | unsigned long reserve_size = size; | |
897 | /* | |
898 | * if reserved region extends past active region | |
899 | * then trim size to active region | |
900 | */ | |
901 | if (end_pfn > node_ar.end_pfn) | |
902 | reserve_size = (node_ar.end_pfn << PAGE_SHIFT) | |
903 | - (start_pfn << PAGE_SHIFT); | |
904 | dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, | |
905 | reserve_size, node_ar.nid); | |
906 | reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, | |
907 | reserve_size, BOOTMEM_DEFAULT); | |
908 | /* | |
909 | * if reserved region is contained in the active region | |
910 | * then done. | |
911 | */ | |
912 | if (end_pfn <= node_ar.end_pfn) | |
913 | break; | |
914 | ||
915 | /* | |
916 | * reserved region extends past the active region | |
917 | * get next active region that contains this | |
918 | * reserved region | |
919 | */ | |
920 | start_pfn = node_ar.end_pfn; | |
921 | physbase = start_pfn << PAGE_SHIFT; | |
922 | size = size - reserve_size; | |
923 | get_node_active_region(start_pfn, &node_ar); | |
924 | } | |
925 | } | |
926 | } | |
927 | ||
928 | ||
929 | void __init do_init_bootmem(void) | |
930 | { | |
931 | int nid; | |
932 | unsigned int i; | |
933 | ||
934 | min_low_pfn = 0; | |
935 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | |
936 | max_pfn = max_low_pfn; | |
937 | ||
938 | if (parse_numa_properties()) | |
939 | setup_nonnuma(); | |
940 | else | |
941 | dump_numa_memory_topology(); | |
942 | ||
943 | register_cpu_notifier(&ppc64_numa_nb); | |
944 | cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, | |
945 | (void *)(unsigned long)boot_cpuid); | |
946 | ||
947 | for_each_online_node(nid) { | |
948 | unsigned long start_pfn, end_pfn; | |
949 | unsigned long bootmem_paddr; | |
950 | unsigned long bootmap_pages; | |
951 | ||
952 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | |
953 | ||
954 | /* | |
955 | * Allocate the node structure node local if possible | |
956 | * | |
957 | * Be careful moving this around, as it relies on all | |
958 | * previous nodes' bootmem to be initialized and have | |
959 | * all reserved areas marked. | |
960 | */ | |
961 | NODE_DATA(nid) = careful_allocation(nid, | |
962 | sizeof(struct pglist_data), | |
963 | SMP_CACHE_BYTES, end_pfn); | |
964 | NODE_DATA(nid) = __va(NODE_DATA(nid)); | |
965 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | |
966 | ||
967 | dbg("node %d\n", nid); | |
968 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | |
969 | ||
970 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | |
971 | NODE_DATA(nid)->node_start_pfn = start_pfn; | |
972 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
973 | ||
974 | if (NODE_DATA(nid)->node_spanned_pages == 0) | |
975 | continue; | |
976 | ||
977 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); | |
978 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | |
979 | ||
980 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | |
981 | bootmem_paddr = (unsigned long)careful_allocation(nid, | |
982 | bootmap_pages << PAGE_SHIFT, | |
983 | PAGE_SIZE, end_pfn); | |
984 | memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); | |
985 | ||
986 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); | |
987 | ||
988 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | |
989 | start_pfn, end_pfn); | |
990 | ||
991 | free_bootmem_with_active_regions(nid, end_pfn); | |
992 | /* | |
993 | * Be very careful about moving this around. Future | |
994 | * calls to careful_allocation() depend on this getting | |
995 | * done correctly. | |
996 | */ | |
997 | mark_reserved_regions_for_nid(nid); | |
998 | sparse_memory_present_with_active_regions(nid); | |
999 | } | |
1000 | } | |
1001 | ||
1002 | void __init paging_init(void) | |
1003 | { | |
1004 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | |
1005 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
1006 | max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; | |
1007 | free_area_init_nodes(max_zone_pfns); | |
1008 | } | |
1009 | ||
1010 | static int __init early_numa(char *p) | |
1011 | { | |
1012 | if (!p) | |
1013 | return 0; | |
1014 | ||
1015 | if (strstr(p, "off")) | |
1016 | numa_enabled = 0; | |
1017 | ||
1018 | if (strstr(p, "debug")) | |
1019 | numa_debug = 1; | |
1020 | ||
1021 | p = strstr(p, "fake="); | |
1022 | if (p) | |
1023 | cmdline = p + strlen("fake="); | |
1024 | ||
1025 | return 0; | |
1026 | } | |
1027 | early_param("numa", early_numa); | |
1028 | ||
1029 | #ifdef CONFIG_MEMORY_HOTPLUG | |
1030 | /* | |
1031 | * Validate the node associated with the memory section we are | |
1032 | * trying to add. | |
1033 | */ | |
1034 | int valid_hot_add_scn(int *nid, unsigned long start, u32 lmb_size, | |
1035 | unsigned long scn_addr) | |
1036 | { | |
1037 | nodemask_t nodes; | |
1038 | ||
1039 | if (*nid < 0 || !node_online(*nid)) | |
1040 | *nid = any_online_node(NODE_MASK_ALL); | |
1041 | ||
1042 | if ((scn_addr >= start) && (scn_addr < (start + lmb_size))) { | |
1043 | nodes_setall(nodes); | |
1044 | while (NODE_DATA(*nid)->node_spanned_pages == 0) { | |
1045 | node_clear(*nid, nodes); | |
1046 | *nid = any_online_node(nodes); | |
1047 | } | |
1048 | ||
1049 | return 1; | |
1050 | } | |
1051 | ||
1052 | return 0; | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * Find the node associated with a hot added memory section represented | |
1057 | * by the ibm,dynamic-reconfiguration-memory node. | |
1058 | */ | |
1059 | static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |
1060 | unsigned long scn_addr) | |
1061 | { | |
1062 | const u32 *dm; | |
1063 | unsigned int n, rc; | |
1064 | unsigned long lmb_size; | |
1065 | int default_nid = any_online_node(NODE_MASK_ALL); | |
1066 | int nid; | |
1067 | struct assoc_arrays aa; | |
1068 | ||
1069 | n = of_get_drconf_memory(memory, &dm); | |
1070 | if (!n) | |
1071 | return default_nid;; | |
1072 | ||
1073 | lmb_size = of_get_lmb_size(memory); | |
1074 | if (!lmb_size) | |
1075 | return default_nid; | |
1076 | ||
1077 | rc = of_get_assoc_arrays(memory, &aa); | |
1078 | if (rc) | |
1079 | return default_nid; | |
1080 | ||
1081 | for (; n != 0; --n) { | |
1082 | struct of_drconf_cell drmem; | |
1083 | ||
1084 | read_drconf_cell(&drmem, &dm); | |
1085 | ||
1086 | /* skip this block if it is reserved or not assigned to | |
1087 | * this partition */ | |
1088 | if ((drmem.flags & DRCONF_MEM_RESERVED) | |
1089 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | |
1090 | continue; | |
1091 | ||
1092 | nid = of_drconf_to_nid_single(&drmem, &aa); | |
1093 | ||
1094 | if (valid_hot_add_scn(&nid, drmem.base_addr, lmb_size, | |
1095 | scn_addr)) | |
1096 | return nid; | |
1097 | } | |
1098 | ||
1099 | BUG(); /* section address should be found above */ | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | /* | |
1104 | * Find the node associated with a hot added memory section. Section | |
1105 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that | |
1106 | * sections are fully contained within a single LMB. | |
1107 | */ | |
1108 | int hot_add_scn_to_nid(unsigned long scn_addr) | |
1109 | { | |
1110 | struct device_node *memory = NULL; | |
1111 | int nid; | |
1112 | ||
1113 | if (!numa_enabled || (min_common_depth < 0)) | |
1114 | return any_online_node(NODE_MASK_ALL); | |
1115 | ||
1116 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
1117 | if (memory) { | |
1118 | nid = hot_add_drconf_scn_to_nid(memory, scn_addr); | |
1119 | of_node_put(memory); | |
1120 | return nid; | |
1121 | } | |
1122 | ||
1123 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | |
1124 | unsigned long start, size; | |
1125 | int ranges; | |
1126 | const unsigned int *memcell_buf; | |
1127 | unsigned int len; | |
1128 | ||
1129 | memcell_buf = of_get_property(memory, "reg", &len); | |
1130 | if (!memcell_buf || len <= 0) | |
1131 | continue; | |
1132 | ||
1133 | /* ranges in cell */ | |
1134 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1135 | ha_new_range: | |
1136 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
1137 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1138 | nid = of_node_to_nid_single(memory); | |
1139 | ||
1140 | if (valid_hot_add_scn(&nid, start, size, scn_addr)) { | |
1141 | of_node_put(memory); | |
1142 | return nid; | |
1143 | } | |
1144 | ||
1145 | if (--ranges) /* process all ranges in cell */ | |
1146 | goto ha_new_range; | |
1147 | } | |
1148 | BUG(); /* section address should be found above */ | |
1149 | return 0; | |
1150 | } | |
1151 | #endif /* CONFIG_MEMORY_HOTPLUG */ |