]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/mm/numa.c
powerpc: Enable CONFIG_CRASH_DUMP=y for ppc64_defconfig
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / mm / numa.c
1 /*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/cputhreads.h>
31 #include <asm/sparsemem.h>
32 #include <asm/prom.h>
33 #include <asm/smp.h>
34 #include <asm/cputhreads.h>
35 #include <asm/topology.h>
36 #include <asm/firmware.h>
37 #include <asm/paca.h>
38 #include <asm/hvcall.h>
39 #include <asm/setup.h>
40 #include <asm/vdso.h>
41
42 static int numa_enabled = 1;
43
44 static char *cmdline __initdata;
45
46 static int numa_debug;
47 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
48
49 int numa_cpu_lookup_table[NR_CPUS];
50 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
51 struct pglist_data *node_data[MAX_NUMNODES];
52
53 EXPORT_SYMBOL(numa_cpu_lookup_table);
54 EXPORT_SYMBOL(node_to_cpumask_map);
55 EXPORT_SYMBOL(node_data);
56
57 static int min_common_depth;
58 static int n_mem_addr_cells, n_mem_size_cells;
59 static int form1_affinity;
60
61 #define MAX_DISTANCE_REF_POINTS 4
62 static int distance_ref_points_depth;
63 static const __be32 *distance_ref_points;
64 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
65
66 /*
67 * Allocate node_to_cpumask_map based on number of available nodes
68 * Requires node_possible_map to be valid.
69 *
70 * Note: cpumask_of_node() is not valid until after this is done.
71 */
72 static void __init setup_node_to_cpumask_map(void)
73 {
74 unsigned int node;
75
76 /* setup nr_node_ids if not done yet */
77 if (nr_node_ids == MAX_NUMNODES)
78 setup_nr_node_ids();
79
80 /* allocate the map */
81 for (node = 0; node < nr_node_ids; node++)
82 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
83
84 /* cpumask_of_node() will now work */
85 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
86 }
87
88 static int __init fake_numa_create_new_node(unsigned long end_pfn,
89 unsigned int *nid)
90 {
91 unsigned long long mem;
92 char *p = cmdline;
93 static unsigned int fake_nid;
94 static unsigned long long curr_boundary;
95
96 /*
97 * Modify node id, iff we started creating NUMA nodes
98 * We want to continue from where we left of the last time
99 */
100 if (fake_nid)
101 *nid = fake_nid;
102 /*
103 * In case there are no more arguments to parse, the
104 * node_id should be the same as the last fake node id
105 * (we've handled this above).
106 */
107 if (!p)
108 return 0;
109
110 mem = memparse(p, &p);
111 if (!mem)
112 return 0;
113
114 if (mem < curr_boundary)
115 return 0;
116
117 curr_boundary = mem;
118
119 if ((end_pfn << PAGE_SHIFT) > mem) {
120 /*
121 * Skip commas and spaces
122 */
123 while (*p == ',' || *p == ' ' || *p == '\t')
124 p++;
125
126 cmdline = p;
127 fake_nid++;
128 *nid = fake_nid;
129 dbg("created new fake_node with id %d\n", fake_nid);
130 return 1;
131 }
132 return 0;
133 }
134
135 /*
136 * get_node_active_region - Return active region containing pfn
137 * Active range returned is empty if none found.
138 * @pfn: The page to return the region for
139 * @node_ar: Returned set to the active region containing @pfn
140 */
141 static void __init get_node_active_region(unsigned long pfn,
142 struct node_active_region *node_ar)
143 {
144 unsigned long start_pfn, end_pfn;
145 int i, nid;
146
147 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
148 if (pfn >= start_pfn && pfn < end_pfn) {
149 node_ar->nid = nid;
150 node_ar->start_pfn = start_pfn;
151 node_ar->end_pfn = end_pfn;
152 break;
153 }
154 }
155 }
156
157 static void reset_numa_cpu_lookup_table(void)
158 {
159 unsigned int cpu;
160
161 for_each_possible_cpu(cpu)
162 numa_cpu_lookup_table[cpu] = -1;
163 }
164
165 static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
166 {
167 numa_cpu_lookup_table[cpu] = node;
168 }
169
170 static void map_cpu_to_node(int cpu, int node)
171 {
172 update_numa_cpu_lookup_table(cpu, node);
173
174 dbg("adding cpu %d to node %d\n", cpu, node);
175
176 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
177 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
178 }
179
180 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
181 static void unmap_cpu_from_node(unsigned long cpu)
182 {
183 int node = numa_cpu_lookup_table[cpu];
184
185 dbg("removing cpu %lu from node %d\n", cpu, node);
186
187 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
188 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
189 } else {
190 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
191 cpu, node);
192 }
193 }
194 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
195
196 /* must hold reference to node during call */
197 static const __be32 *of_get_associativity(struct device_node *dev)
198 {
199 return of_get_property(dev, "ibm,associativity", NULL);
200 }
201
202 /*
203 * Returns the property linux,drconf-usable-memory if
204 * it exists (the property exists only in kexec/kdump kernels,
205 * added by kexec-tools)
206 */
207 static const __be32 *of_get_usable_memory(struct device_node *memory)
208 {
209 const __be32 *prop;
210 u32 len;
211 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
212 if (!prop || len < sizeof(unsigned int))
213 return NULL;
214 return prop;
215 }
216
217 int __node_distance(int a, int b)
218 {
219 int i;
220 int distance = LOCAL_DISTANCE;
221
222 if (!form1_affinity)
223 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
224
225 for (i = 0; i < distance_ref_points_depth; i++) {
226 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
227 break;
228
229 /* Double the distance for each NUMA level */
230 distance *= 2;
231 }
232
233 return distance;
234 }
235 EXPORT_SYMBOL(__node_distance);
236
237 static void initialize_distance_lookup_table(int nid,
238 const __be32 *associativity)
239 {
240 int i;
241
242 if (!form1_affinity)
243 return;
244
245 for (i = 0; i < distance_ref_points_depth; i++) {
246 const __be32 *entry;
247
248 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
249 distance_lookup_table[nid][i] = of_read_number(entry, 1);
250 }
251 }
252
253 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
254 * info is found.
255 */
256 static int associativity_to_nid(const __be32 *associativity)
257 {
258 int nid = -1;
259
260 if (min_common_depth == -1)
261 goto out;
262
263 if (of_read_number(associativity, 1) >= min_common_depth)
264 nid = of_read_number(&associativity[min_common_depth], 1);
265
266 /* POWER4 LPAR uses 0xffff as invalid node */
267 if (nid == 0xffff || nid >= MAX_NUMNODES)
268 nid = -1;
269
270 if (nid > 0 &&
271 of_read_number(associativity, 1) >= distance_ref_points_depth)
272 initialize_distance_lookup_table(nid, associativity);
273
274 out:
275 return nid;
276 }
277
278 /* Returns the nid associated with the given device tree node,
279 * or -1 if not found.
280 */
281 static int of_node_to_nid_single(struct device_node *device)
282 {
283 int nid = -1;
284 const __be32 *tmp;
285
286 tmp = of_get_associativity(device);
287 if (tmp)
288 nid = associativity_to_nid(tmp);
289 return nid;
290 }
291
292 /* Walk the device tree upwards, looking for an associativity id */
293 int of_node_to_nid(struct device_node *device)
294 {
295 struct device_node *tmp;
296 int nid = -1;
297
298 of_node_get(device);
299 while (device) {
300 nid = of_node_to_nid_single(device);
301 if (nid != -1)
302 break;
303
304 tmp = device;
305 device = of_get_parent(tmp);
306 of_node_put(tmp);
307 }
308 of_node_put(device);
309
310 return nid;
311 }
312 EXPORT_SYMBOL_GPL(of_node_to_nid);
313
314 static int __init find_min_common_depth(void)
315 {
316 int depth;
317 struct device_node *root;
318
319 if (firmware_has_feature(FW_FEATURE_OPAL))
320 root = of_find_node_by_path("/ibm,opal");
321 else
322 root = of_find_node_by_path("/rtas");
323 if (!root)
324 root = of_find_node_by_path("/");
325
326 /*
327 * This property is a set of 32-bit integers, each representing
328 * an index into the ibm,associativity nodes.
329 *
330 * With form 0 affinity the first integer is for an SMP configuration
331 * (should be all 0's) and the second is for a normal NUMA
332 * configuration. We have only one level of NUMA.
333 *
334 * With form 1 affinity the first integer is the most significant
335 * NUMA boundary and the following are progressively less significant
336 * boundaries. There can be more than one level of NUMA.
337 */
338 distance_ref_points = of_get_property(root,
339 "ibm,associativity-reference-points",
340 &distance_ref_points_depth);
341
342 if (!distance_ref_points) {
343 dbg("NUMA: ibm,associativity-reference-points not found.\n");
344 goto err;
345 }
346
347 distance_ref_points_depth /= sizeof(int);
348
349 if (firmware_has_feature(FW_FEATURE_OPAL) ||
350 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
351 dbg("Using form 1 affinity\n");
352 form1_affinity = 1;
353 }
354
355 if (form1_affinity) {
356 depth = of_read_number(distance_ref_points, 1);
357 } else {
358 if (distance_ref_points_depth < 2) {
359 printk(KERN_WARNING "NUMA: "
360 "short ibm,associativity-reference-points\n");
361 goto err;
362 }
363
364 depth = of_read_number(&distance_ref_points[1], 1);
365 }
366
367 /*
368 * Warn and cap if the hardware supports more than
369 * MAX_DISTANCE_REF_POINTS domains.
370 */
371 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
372 printk(KERN_WARNING "NUMA: distance array capped at "
373 "%d entries\n", MAX_DISTANCE_REF_POINTS);
374 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
375 }
376
377 of_node_put(root);
378 return depth;
379
380 err:
381 of_node_put(root);
382 return -1;
383 }
384
385 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
386 {
387 struct device_node *memory = NULL;
388
389 memory = of_find_node_by_type(memory, "memory");
390 if (!memory)
391 panic("numa.c: No memory nodes found!");
392
393 *n_addr_cells = of_n_addr_cells(memory);
394 *n_size_cells = of_n_size_cells(memory);
395 of_node_put(memory);
396 }
397
398 static unsigned long read_n_cells(int n, const __be32 **buf)
399 {
400 unsigned long result = 0;
401
402 while (n--) {
403 result = (result << 32) | of_read_number(*buf, 1);
404 (*buf)++;
405 }
406 return result;
407 }
408
409 /*
410 * Read the next memblock list entry from the ibm,dynamic-memory property
411 * and return the information in the provided of_drconf_cell structure.
412 */
413 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
414 {
415 const __be32 *cp;
416
417 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
418
419 cp = *cellp;
420 drmem->drc_index = of_read_number(cp, 1);
421 drmem->reserved = of_read_number(&cp[1], 1);
422 drmem->aa_index = of_read_number(&cp[2], 1);
423 drmem->flags = of_read_number(&cp[3], 1);
424
425 *cellp = cp + 4;
426 }
427
428 /*
429 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
430 *
431 * The layout of the ibm,dynamic-memory property is a number N of memblock
432 * list entries followed by N memblock list entries. Each memblock list entry
433 * contains information as laid out in the of_drconf_cell struct above.
434 */
435 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
436 {
437 const __be32 *prop;
438 u32 len, entries;
439
440 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
441 if (!prop || len < sizeof(unsigned int))
442 return 0;
443
444 entries = of_read_number(prop++, 1);
445
446 /* Now that we know the number of entries, revalidate the size
447 * of the property read in to ensure we have everything
448 */
449 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
450 return 0;
451
452 *dm = prop;
453 return entries;
454 }
455
456 /*
457 * Retrieve and validate the ibm,lmb-size property for drconf memory
458 * from the device tree.
459 */
460 static u64 of_get_lmb_size(struct device_node *memory)
461 {
462 const __be32 *prop;
463 u32 len;
464
465 prop = of_get_property(memory, "ibm,lmb-size", &len);
466 if (!prop || len < sizeof(unsigned int))
467 return 0;
468
469 return read_n_cells(n_mem_size_cells, &prop);
470 }
471
472 struct assoc_arrays {
473 u32 n_arrays;
474 u32 array_sz;
475 const __be32 *arrays;
476 };
477
478 /*
479 * Retrieve and validate the list of associativity arrays for drconf
480 * memory from the ibm,associativity-lookup-arrays property of the
481 * device tree..
482 *
483 * The layout of the ibm,associativity-lookup-arrays property is a number N
484 * indicating the number of associativity arrays, followed by a number M
485 * indicating the size of each associativity array, followed by a list
486 * of N associativity arrays.
487 */
488 static int of_get_assoc_arrays(struct device_node *memory,
489 struct assoc_arrays *aa)
490 {
491 const __be32 *prop;
492 u32 len;
493
494 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
495 if (!prop || len < 2 * sizeof(unsigned int))
496 return -1;
497
498 aa->n_arrays = of_read_number(prop++, 1);
499 aa->array_sz = of_read_number(prop++, 1);
500
501 /* Now that we know the number of arrays and size of each array,
502 * revalidate the size of the property read in.
503 */
504 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
505 return -1;
506
507 aa->arrays = prop;
508 return 0;
509 }
510
511 /*
512 * This is like of_node_to_nid_single() for memory represented in the
513 * ibm,dynamic-reconfiguration-memory node.
514 */
515 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
516 struct assoc_arrays *aa)
517 {
518 int default_nid = 0;
519 int nid = default_nid;
520 int index;
521
522 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
523 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
524 drmem->aa_index < aa->n_arrays) {
525 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
526 nid = of_read_number(&aa->arrays[index], 1);
527
528 if (nid == 0xffff || nid >= MAX_NUMNODES)
529 nid = default_nid;
530 }
531
532 return nid;
533 }
534
535 /*
536 * Figure out to which domain a cpu belongs and stick it there.
537 * Return the id of the domain used.
538 */
539 static int numa_setup_cpu(unsigned long lcpu)
540 {
541 int nid = -1;
542 struct device_node *cpu;
543
544 /*
545 * If a valid cpu-to-node mapping is already available, use it
546 * directly instead of querying the firmware, since it represents
547 * the most recent mapping notified to us by the platform (eg: VPHN).
548 */
549 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
550 map_cpu_to_node(lcpu, nid);
551 return nid;
552 }
553
554 cpu = of_get_cpu_node(lcpu, NULL);
555
556 if (!cpu) {
557 WARN_ON(1);
558 if (cpu_present(lcpu))
559 goto out_present;
560 else
561 goto out;
562 }
563
564 nid = of_node_to_nid_single(cpu);
565
566 out_present:
567 if (nid < 0 || !node_online(nid))
568 nid = first_online_node;
569
570 map_cpu_to_node(lcpu, nid);
571 of_node_put(cpu);
572 out:
573 return nid;
574 }
575
576 static void verify_cpu_node_mapping(int cpu, int node)
577 {
578 int base, sibling, i;
579
580 /* Verify that all the threads in the core belong to the same node */
581 base = cpu_first_thread_sibling(cpu);
582
583 for (i = 0; i < threads_per_core; i++) {
584 sibling = base + i;
585
586 if (sibling == cpu || cpu_is_offline(sibling))
587 continue;
588
589 if (cpu_to_node(sibling) != node) {
590 WARN(1, "CPU thread siblings %d and %d don't belong"
591 " to the same node!\n", cpu, sibling);
592 break;
593 }
594 }
595 }
596
597 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
598 void *hcpu)
599 {
600 unsigned long lcpu = (unsigned long)hcpu;
601 int ret = NOTIFY_DONE, nid;
602
603 switch (action) {
604 case CPU_UP_PREPARE:
605 case CPU_UP_PREPARE_FROZEN:
606 nid = numa_setup_cpu(lcpu);
607 verify_cpu_node_mapping((int)lcpu, nid);
608 ret = NOTIFY_OK;
609 break;
610 #ifdef CONFIG_HOTPLUG_CPU
611 case CPU_DEAD:
612 case CPU_DEAD_FROZEN:
613 case CPU_UP_CANCELED:
614 case CPU_UP_CANCELED_FROZEN:
615 unmap_cpu_from_node(lcpu);
616 ret = NOTIFY_OK;
617 break;
618 #endif
619 }
620 return ret;
621 }
622
623 /*
624 * Check and possibly modify a memory region to enforce the memory limit.
625 *
626 * Returns the size the region should have to enforce the memory limit.
627 * This will either be the original value of size, a truncated value,
628 * or zero. If the returned value of size is 0 the region should be
629 * discarded as it lies wholly above the memory limit.
630 */
631 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
632 unsigned long size)
633 {
634 /*
635 * We use memblock_end_of_DRAM() in here instead of memory_limit because
636 * we've already adjusted it for the limit and it takes care of
637 * having memory holes below the limit. Also, in the case of
638 * iommu_is_off, memory_limit is not set but is implicitly enforced.
639 */
640
641 if (start + size <= memblock_end_of_DRAM())
642 return size;
643
644 if (start >= memblock_end_of_DRAM())
645 return 0;
646
647 return memblock_end_of_DRAM() - start;
648 }
649
650 /*
651 * Reads the counter for a given entry in
652 * linux,drconf-usable-memory property
653 */
654 static inline int __init read_usm_ranges(const __be32 **usm)
655 {
656 /*
657 * For each lmb in ibm,dynamic-memory a corresponding
658 * entry in linux,drconf-usable-memory property contains
659 * a counter followed by that many (base, size) duple.
660 * read the counter from linux,drconf-usable-memory
661 */
662 return read_n_cells(n_mem_size_cells, usm);
663 }
664
665 /*
666 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
667 * node. This assumes n_mem_{addr,size}_cells have been set.
668 */
669 static void __init parse_drconf_memory(struct device_node *memory)
670 {
671 const __be32 *uninitialized_var(dm), *usm;
672 unsigned int n, rc, ranges, is_kexec_kdump = 0;
673 unsigned long lmb_size, base, size, sz;
674 int nid;
675 struct assoc_arrays aa = { .arrays = NULL };
676
677 n = of_get_drconf_memory(memory, &dm);
678 if (!n)
679 return;
680
681 lmb_size = of_get_lmb_size(memory);
682 if (!lmb_size)
683 return;
684
685 rc = of_get_assoc_arrays(memory, &aa);
686 if (rc)
687 return;
688
689 /* check if this is a kexec/kdump kernel */
690 usm = of_get_usable_memory(memory);
691 if (usm != NULL)
692 is_kexec_kdump = 1;
693
694 for (; n != 0; --n) {
695 struct of_drconf_cell drmem;
696
697 read_drconf_cell(&drmem, &dm);
698
699 /* skip this block if the reserved bit is set in flags (0x80)
700 or if the block is not assigned to this partition (0x8) */
701 if ((drmem.flags & DRCONF_MEM_RESERVED)
702 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
703 continue;
704
705 base = drmem.base_addr;
706 size = lmb_size;
707 ranges = 1;
708
709 if (is_kexec_kdump) {
710 ranges = read_usm_ranges(&usm);
711 if (!ranges) /* there are no (base, size) duple */
712 continue;
713 }
714 do {
715 if (is_kexec_kdump) {
716 base = read_n_cells(n_mem_addr_cells, &usm);
717 size = read_n_cells(n_mem_size_cells, &usm);
718 }
719 nid = of_drconf_to_nid_single(&drmem, &aa);
720 fake_numa_create_new_node(
721 ((base + size) >> PAGE_SHIFT),
722 &nid);
723 node_set_online(nid);
724 sz = numa_enforce_memory_limit(base, size);
725 if (sz)
726 memblock_set_node(base, sz,
727 &memblock.memory, nid);
728 } while (--ranges);
729 }
730 }
731
732 static int __init parse_numa_properties(void)
733 {
734 struct device_node *memory;
735 int default_nid = 0;
736 unsigned long i;
737
738 if (numa_enabled == 0) {
739 printk(KERN_WARNING "NUMA disabled by user\n");
740 return -1;
741 }
742
743 min_common_depth = find_min_common_depth();
744
745 if (min_common_depth < 0)
746 return min_common_depth;
747
748 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
749
750 /*
751 * Even though we connect cpus to numa domains later in SMP
752 * init, we need to know the node ids now. This is because
753 * each node to be onlined must have NODE_DATA etc backing it.
754 */
755 for_each_present_cpu(i) {
756 struct device_node *cpu;
757 int nid;
758
759 cpu = of_get_cpu_node(i, NULL);
760 BUG_ON(!cpu);
761 nid = of_node_to_nid_single(cpu);
762 of_node_put(cpu);
763
764 /*
765 * Don't fall back to default_nid yet -- we will plug
766 * cpus into nodes once the memory scan has discovered
767 * the topology.
768 */
769 if (nid < 0)
770 continue;
771 node_set_online(nid);
772 }
773
774 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
775
776 for_each_node_by_type(memory, "memory") {
777 unsigned long start;
778 unsigned long size;
779 int nid;
780 int ranges;
781 const __be32 *memcell_buf;
782 unsigned int len;
783
784 memcell_buf = of_get_property(memory,
785 "linux,usable-memory", &len);
786 if (!memcell_buf || len <= 0)
787 memcell_buf = of_get_property(memory, "reg", &len);
788 if (!memcell_buf || len <= 0)
789 continue;
790
791 /* ranges in cell */
792 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
793 new_range:
794 /* these are order-sensitive, and modify the buffer pointer */
795 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
796 size = read_n_cells(n_mem_size_cells, &memcell_buf);
797
798 /*
799 * Assumption: either all memory nodes or none will
800 * have associativity properties. If none, then
801 * everything goes to default_nid.
802 */
803 nid = of_node_to_nid_single(memory);
804 if (nid < 0)
805 nid = default_nid;
806
807 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
808 node_set_online(nid);
809
810 if (!(size = numa_enforce_memory_limit(start, size))) {
811 if (--ranges)
812 goto new_range;
813 else
814 continue;
815 }
816
817 memblock_set_node(start, size, &memblock.memory, nid);
818
819 if (--ranges)
820 goto new_range;
821 }
822
823 /*
824 * Now do the same thing for each MEMBLOCK listed in the
825 * ibm,dynamic-memory property in the
826 * ibm,dynamic-reconfiguration-memory node.
827 */
828 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
829 if (memory)
830 parse_drconf_memory(memory);
831
832 return 0;
833 }
834
835 static void __init setup_nonnuma(void)
836 {
837 unsigned long top_of_ram = memblock_end_of_DRAM();
838 unsigned long total_ram = memblock_phys_mem_size();
839 unsigned long start_pfn, end_pfn;
840 unsigned int nid = 0;
841 struct memblock_region *reg;
842
843 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
844 top_of_ram, total_ram);
845 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
846 (top_of_ram - total_ram) >> 20);
847
848 for_each_memblock(memory, reg) {
849 start_pfn = memblock_region_memory_base_pfn(reg);
850 end_pfn = memblock_region_memory_end_pfn(reg);
851
852 fake_numa_create_new_node(end_pfn, &nid);
853 memblock_set_node(PFN_PHYS(start_pfn),
854 PFN_PHYS(end_pfn - start_pfn),
855 &memblock.memory, nid);
856 node_set_online(nid);
857 }
858 }
859
860 void __init dump_numa_cpu_topology(void)
861 {
862 unsigned int node;
863 unsigned int cpu, count;
864
865 if (min_common_depth == -1 || !numa_enabled)
866 return;
867
868 for_each_online_node(node) {
869 printk(KERN_DEBUG "Node %d CPUs:", node);
870
871 count = 0;
872 /*
873 * If we used a CPU iterator here we would miss printing
874 * the holes in the cpumap.
875 */
876 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
877 if (cpumask_test_cpu(cpu,
878 node_to_cpumask_map[node])) {
879 if (count == 0)
880 printk(" %u", cpu);
881 ++count;
882 } else {
883 if (count > 1)
884 printk("-%u", cpu - 1);
885 count = 0;
886 }
887 }
888
889 if (count > 1)
890 printk("-%u", nr_cpu_ids - 1);
891 printk("\n");
892 }
893 }
894
895 static void __init dump_numa_memory_topology(void)
896 {
897 unsigned int node;
898 unsigned int count;
899
900 if (min_common_depth == -1 || !numa_enabled)
901 return;
902
903 for_each_online_node(node) {
904 unsigned long i;
905
906 printk(KERN_DEBUG "Node %d Memory:", node);
907
908 count = 0;
909
910 for (i = 0; i < memblock_end_of_DRAM();
911 i += (1 << SECTION_SIZE_BITS)) {
912 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
913 if (count == 0)
914 printk(" 0x%lx", i);
915 ++count;
916 } else {
917 if (count > 0)
918 printk("-0x%lx", i);
919 count = 0;
920 }
921 }
922
923 if (count > 0)
924 printk("-0x%lx", i);
925 printk("\n");
926 }
927 }
928
929 /*
930 * Allocate some memory, satisfying the memblock or bootmem allocator where
931 * required. nid is the preferred node and end is the physical address of
932 * the highest address in the node.
933 *
934 * Returns the virtual address of the memory.
935 */
936 static void __init *careful_zallocation(int nid, unsigned long size,
937 unsigned long align,
938 unsigned long end_pfn)
939 {
940 void *ret;
941 int new_nid;
942 unsigned long ret_paddr;
943
944 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
945
946 /* retry over all memory */
947 if (!ret_paddr)
948 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
949
950 if (!ret_paddr)
951 panic("numa.c: cannot allocate %lu bytes for node %d",
952 size, nid);
953
954 ret = __va(ret_paddr);
955
956 /*
957 * We initialize the nodes in numeric order: 0, 1, 2...
958 * and hand over control from the MEMBLOCK allocator to the
959 * bootmem allocator. If this function is called for
960 * node 5, then we know that all nodes <5 are using the
961 * bootmem allocator instead of the MEMBLOCK allocator.
962 *
963 * So, check the nid from which this allocation came
964 * and double check to see if we need to use bootmem
965 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
966 * since it would be useless.
967 */
968 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
969 if (new_nid < nid) {
970 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
971 size, align, 0);
972
973 dbg("alloc_bootmem %p %lx\n", ret, size);
974 }
975
976 memset(ret, 0, size);
977 return ret;
978 }
979
980 static struct notifier_block ppc64_numa_nb = {
981 .notifier_call = cpu_numa_callback,
982 .priority = 1 /* Must run before sched domains notifier. */
983 };
984
985 static void __init mark_reserved_regions_for_nid(int nid)
986 {
987 struct pglist_data *node = NODE_DATA(nid);
988 struct memblock_region *reg;
989
990 for_each_memblock(reserved, reg) {
991 unsigned long physbase = reg->base;
992 unsigned long size = reg->size;
993 unsigned long start_pfn = physbase >> PAGE_SHIFT;
994 unsigned long end_pfn = PFN_UP(physbase + size);
995 struct node_active_region node_ar;
996 unsigned long node_end_pfn = pgdat_end_pfn(node);
997
998 /*
999 * Check to make sure that this memblock.reserved area is
1000 * within the bounds of the node that we care about.
1001 * Checking the nid of the start and end points is not
1002 * sufficient because the reserved area could span the
1003 * entire node.
1004 */
1005 if (end_pfn <= node->node_start_pfn ||
1006 start_pfn >= node_end_pfn)
1007 continue;
1008
1009 get_node_active_region(start_pfn, &node_ar);
1010 while (start_pfn < end_pfn &&
1011 node_ar.start_pfn < node_ar.end_pfn) {
1012 unsigned long reserve_size = size;
1013 /*
1014 * if reserved region extends past active region
1015 * then trim size to active region
1016 */
1017 if (end_pfn > node_ar.end_pfn)
1018 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
1019 - physbase;
1020 /*
1021 * Only worry about *this* node, others may not
1022 * yet have valid NODE_DATA().
1023 */
1024 if (node_ar.nid == nid) {
1025 dbg("reserve_bootmem %lx %lx nid=%d\n",
1026 physbase, reserve_size, node_ar.nid);
1027 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1028 physbase, reserve_size,
1029 BOOTMEM_DEFAULT);
1030 }
1031 /*
1032 * if reserved region is contained in the active region
1033 * then done.
1034 */
1035 if (end_pfn <= node_ar.end_pfn)
1036 break;
1037
1038 /*
1039 * reserved region extends past the active region
1040 * get next active region that contains this
1041 * reserved region
1042 */
1043 start_pfn = node_ar.end_pfn;
1044 physbase = start_pfn << PAGE_SHIFT;
1045 size = size - reserve_size;
1046 get_node_active_region(start_pfn, &node_ar);
1047 }
1048 }
1049 }
1050
1051
1052 void __init do_init_bootmem(void)
1053 {
1054 int nid, cpu;
1055
1056 min_low_pfn = 0;
1057 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1058 max_pfn = max_low_pfn;
1059
1060 if (parse_numa_properties())
1061 setup_nonnuma();
1062 else
1063 dump_numa_memory_topology();
1064
1065 for_each_online_node(nid) {
1066 unsigned long start_pfn, end_pfn;
1067 void *bootmem_vaddr;
1068 unsigned long bootmap_pages;
1069
1070 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1071
1072 /*
1073 * Allocate the node structure node local if possible
1074 *
1075 * Be careful moving this around, as it relies on all
1076 * previous nodes' bootmem to be initialized and have
1077 * all reserved areas marked.
1078 */
1079 NODE_DATA(nid) = careful_zallocation(nid,
1080 sizeof(struct pglist_data),
1081 SMP_CACHE_BYTES, end_pfn);
1082
1083 dbg("node %d\n", nid);
1084 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1085
1086 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1087 NODE_DATA(nid)->node_start_pfn = start_pfn;
1088 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1089
1090 if (NODE_DATA(nid)->node_spanned_pages == 0)
1091 continue;
1092
1093 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1094 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1095
1096 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1097 bootmem_vaddr = careful_zallocation(nid,
1098 bootmap_pages << PAGE_SHIFT,
1099 PAGE_SIZE, end_pfn);
1100
1101 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1102
1103 init_bootmem_node(NODE_DATA(nid),
1104 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1105 start_pfn, end_pfn);
1106
1107 free_bootmem_with_active_regions(nid, end_pfn);
1108 /*
1109 * Be very careful about moving this around. Future
1110 * calls to careful_zallocation() depend on this getting
1111 * done correctly.
1112 */
1113 mark_reserved_regions_for_nid(nid);
1114 sparse_memory_present_with_active_regions(nid);
1115 }
1116
1117 init_bootmem_done = 1;
1118
1119 /*
1120 * Now bootmem is initialised we can create the node to cpumask
1121 * lookup tables and setup the cpu callback to populate them.
1122 */
1123 setup_node_to_cpumask_map();
1124
1125 reset_numa_cpu_lookup_table();
1126 register_cpu_notifier(&ppc64_numa_nb);
1127 /*
1128 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1129 * even before we online them, so that we can use cpu_to_{node,mem}
1130 * early in boot, cf. smp_prepare_cpus().
1131 */
1132 for_each_present_cpu(cpu) {
1133 numa_setup_cpu((unsigned long)cpu);
1134 }
1135 }
1136
1137 void __init paging_init(void)
1138 {
1139 unsigned long max_zone_pfns[MAX_NR_ZONES];
1140 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1141 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1142 free_area_init_nodes(max_zone_pfns);
1143 }
1144
1145 static int __init early_numa(char *p)
1146 {
1147 if (!p)
1148 return 0;
1149
1150 if (strstr(p, "off"))
1151 numa_enabled = 0;
1152
1153 if (strstr(p, "debug"))
1154 numa_debug = 1;
1155
1156 p = strstr(p, "fake=");
1157 if (p)
1158 cmdline = p + strlen("fake=");
1159
1160 return 0;
1161 }
1162 early_param("numa", early_numa);
1163
1164 #ifdef CONFIG_MEMORY_HOTPLUG
1165 /*
1166 * Find the node associated with a hot added memory section for
1167 * memory represented in the device tree by the property
1168 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1169 */
1170 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1171 unsigned long scn_addr)
1172 {
1173 const __be32 *dm;
1174 unsigned int drconf_cell_cnt, rc;
1175 unsigned long lmb_size;
1176 struct assoc_arrays aa;
1177 int nid = -1;
1178
1179 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1180 if (!drconf_cell_cnt)
1181 return -1;
1182
1183 lmb_size = of_get_lmb_size(memory);
1184 if (!lmb_size)
1185 return -1;
1186
1187 rc = of_get_assoc_arrays(memory, &aa);
1188 if (rc)
1189 return -1;
1190
1191 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1192 struct of_drconf_cell drmem;
1193
1194 read_drconf_cell(&drmem, &dm);
1195
1196 /* skip this block if it is reserved or not assigned to
1197 * this partition */
1198 if ((drmem.flags & DRCONF_MEM_RESERVED)
1199 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1200 continue;
1201
1202 if ((scn_addr < drmem.base_addr)
1203 || (scn_addr >= (drmem.base_addr + lmb_size)))
1204 continue;
1205
1206 nid = of_drconf_to_nid_single(&drmem, &aa);
1207 break;
1208 }
1209
1210 return nid;
1211 }
1212
1213 /*
1214 * Find the node associated with a hot added memory section for memory
1215 * represented in the device tree as a node (i.e. memory@XXXX) for
1216 * each memblock.
1217 */
1218 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1219 {
1220 struct device_node *memory;
1221 int nid = -1;
1222
1223 for_each_node_by_type(memory, "memory") {
1224 unsigned long start, size;
1225 int ranges;
1226 const __be32 *memcell_buf;
1227 unsigned int len;
1228
1229 memcell_buf = of_get_property(memory, "reg", &len);
1230 if (!memcell_buf || len <= 0)
1231 continue;
1232
1233 /* ranges in cell */
1234 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1235
1236 while (ranges--) {
1237 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1238 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1239
1240 if ((scn_addr < start) || (scn_addr >= (start + size)))
1241 continue;
1242
1243 nid = of_node_to_nid_single(memory);
1244 break;
1245 }
1246
1247 if (nid >= 0)
1248 break;
1249 }
1250
1251 of_node_put(memory);
1252
1253 return nid;
1254 }
1255
1256 /*
1257 * Find the node associated with a hot added memory section. Section
1258 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1259 * sections are fully contained within a single MEMBLOCK.
1260 */
1261 int hot_add_scn_to_nid(unsigned long scn_addr)
1262 {
1263 struct device_node *memory = NULL;
1264 int nid, found = 0;
1265
1266 if (!numa_enabled || (min_common_depth < 0))
1267 return first_online_node;
1268
1269 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1270 if (memory) {
1271 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1272 of_node_put(memory);
1273 } else {
1274 nid = hot_add_node_scn_to_nid(scn_addr);
1275 }
1276
1277 if (nid < 0 || !node_online(nid))
1278 nid = first_online_node;
1279
1280 if (NODE_DATA(nid)->node_spanned_pages)
1281 return nid;
1282
1283 for_each_online_node(nid) {
1284 if (NODE_DATA(nid)->node_spanned_pages) {
1285 found = 1;
1286 break;
1287 }
1288 }
1289
1290 BUG_ON(!found);
1291 return nid;
1292 }
1293
1294 static u64 hot_add_drconf_memory_max(void)
1295 {
1296 struct device_node *memory = NULL;
1297 unsigned int drconf_cell_cnt = 0;
1298 u64 lmb_size = 0;
1299 const __be32 *dm = NULL;
1300
1301 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1302 if (memory) {
1303 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1304 lmb_size = of_get_lmb_size(memory);
1305 of_node_put(memory);
1306 }
1307 return lmb_size * drconf_cell_cnt;
1308 }
1309
1310 /*
1311 * memory_hotplug_max - return max address of memory that may be added
1312 *
1313 * This is currently only used on systems that support drconfig memory
1314 * hotplug.
1315 */
1316 u64 memory_hotplug_max(void)
1317 {
1318 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1319 }
1320 #endif /* CONFIG_MEMORY_HOTPLUG */
1321
1322 /* Virtual Processor Home Node (VPHN) support */
1323 #ifdef CONFIG_PPC_SPLPAR
1324 struct topology_update_data {
1325 struct topology_update_data *next;
1326 unsigned int cpu;
1327 int old_nid;
1328 int new_nid;
1329 };
1330
1331 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1332 static cpumask_t cpu_associativity_changes_mask;
1333 static int vphn_enabled;
1334 static int prrn_enabled;
1335 static void reset_topology_timer(void);
1336
1337 /*
1338 * Store the current values of the associativity change counters in the
1339 * hypervisor.
1340 */
1341 static void setup_cpu_associativity_change_counters(void)
1342 {
1343 int cpu;
1344
1345 /* The VPHN feature supports a maximum of 8 reference points */
1346 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1347
1348 for_each_possible_cpu(cpu) {
1349 int i;
1350 u8 *counts = vphn_cpu_change_counts[cpu];
1351 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1352
1353 for (i = 0; i < distance_ref_points_depth; i++)
1354 counts[i] = hypervisor_counts[i];
1355 }
1356 }
1357
1358 /*
1359 * The hypervisor maintains a set of 8 associativity change counters in
1360 * the VPA of each cpu that correspond to the associativity levels in the
1361 * ibm,associativity-reference-points property. When an associativity
1362 * level changes, the corresponding counter is incremented.
1363 *
1364 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1365 * node associativity levels have changed.
1366 *
1367 * Returns the number of cpus with unhandled associativity changes.
1368 */
1369 static int update_cpu_associativity_changes_mask(void)
1370 {
1371 int cpu;
1372 cpumask_t *changes = &cpu_associativity_changes_mask;
1373
1374 for_each_possible_cpu(cpu) {
1375 int i, changed = 0;
1376 u8 *counts = vphn_cpu_change_counts[cpu];
1377 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1378
1379 for (i = 0; i < distance_ref_points_depth; i++) {
1380 if (hypervisor_counts[i] != counts[i]) {
1381 counts[i] = hypervisor_counts[i];
1382 changed = 1;
1383 }
1384 }
1385 if (changed) {
1386 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1387 cpu = cpu_last_thread_sibling(cpu);
1388 }
1389 }
1390
1391 return cpumask_weight(changes);
1392 }
1393
1394 /*
1395 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1396 * the complete property we have to add the length in the first cell.
1397 */
1398 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1399
1400 /*
1401 * Convert the associativity domain numbers returned from the hypervisor
1402 * to the sequence they would appear in the ibm,associativity property.
1403 */
1404 static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1405 {
1406 int i, nr_assoc_doms = 0;
1407 const __be16 *field = (const __be16 *) packed;
1408
1409 #define VPHN_FIELD_UNUSED (0xffff)
1410 #define VPHN_FIELD_MSB (0x8000)
1411 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1412
1413 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1414 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1415 /* All significant fields processed, and remaining
1416 * fields contain the reserved value of all 1's.
1417 * Just store them.
1418 */
1419 unpacked[i] = *((__be32 *)field);
1420 field += 2;
1421 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1422 /* Data is in the lower 15 bits of this field */
1423 unpacked[i] = cpu_to_be32(
1424 be16_to_cpup(field) & VPHN_FIELD_MASK);
1425 field++;
1426 nr_assoc_doms++;
1427 } else {
1428 /* Data is in the lower 15 bits of this field
1429 * concatenated with the next 16 bit field
1430 */
1431 unpacked[i] = *((__be32 *)field);
1432 field += 2;
1433 nr_assoc_doms++;
1434 }
1435 }
1436
1437 /* The first cell contains the length of the property */
1438 unpacked[0] = cpu_to_be32(nr_assoc_doms);
1439
1440 return nr_assoc_doms;
1441 }
1442
1443 /*
1444 * Retrieve the new associativity information for a virtual processor's
1445 * home node.
1446 */
1447 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1448 {
1449 long rc;
1450 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1451 u64 flags = 1;
1452 int hwcpu = get_hard_smp_processor_id(cpu);
1453
1454 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1455 vphn_unpack_associativity(retbuf, associativity);
1456
1457 return rc;
1458 }
1459
1460 static long vphn_get_associativity(unsigned long cpu,
1461 __be32 *associativity)
1462 {
1463 long rc;
1464
1465 rc = hcall_vphn(cpu, associativity);
1466
1467 switch (rc) {
1468 case H_FUNCTION:
1469 printk(KERN_INFO
1470 "VPHN is not supported. Disabling polling...\n");
1471 stop_topology_update();
1472 break;
1473 case H_HARDWARE:
1474 printk(KERN_ERR
1475 "hcall_vphn() experienced a hardware fault "
1476 "preventing VPHN. Disabling polling...\n");
1477 stop_topology_update();
1478 }
1479
1480 return rc;
1481 }
1482
1483 /*
1484 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1485 * characteristics change. This function doesn't perform any locking and is
1486 * only safe to call from stop_machine().
1487 */
1488 static int update_cpu_topology(void *data)
1489 {
1490 struct topology_update_data *update;
1491 unsigned long cpu;
1492
1493 if (!data)
1494 return -EINVAL;
1495
1496 cpu = smp_processor_id();
1497
1498 for (update = data; update; update = update->next) {
1499 if (cpu != update->cpu)
1500 continue;
1501
1502 unmap_cpu_from_node(update->cpu);
1503 map_cpu_to_node(update->cpu, update->new_nid);
1504 vdso_getcpu_init();
1505 }
1506
1507 return 0;
1508 }
1509
1510 static int update_lookup_table(void *data)
1511 {
1512 struct topology_update_data *update;
1513
1514 if (!data)
1515 return -EINVAL;
1516
1517 /*
1518 * Upon topology update, the numa-cpu lookup table needs to be updated
1519 * for all threads in the core, including offline CPUs, to ensure that
1520 * future hotplug operations respect the cpu-to-node associativity
1521 * properly.
1522 */
1523 for (update = data; update; update = update->next) {
1524 int nid, base, j;
1525
1526 nid = update->new_nid;
1527 base = cpu_first_thread_sibling(update->cpu);
1528
1529 for (j = 0; j < threads_per_core; j++) {
1530 update_numa_cpu_lookup_table(base + j, nid);
1531 }
1532 }
1533
1534 return 0;
1535 }
1536
1537 /*
1538 * Update the node maps and sysfs entries for each cpu whose home node
1539 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1540 */
1541 int arch_update_cpu_topology(void)
1542 {
1543 unsigned int cpu, sibling, changed = 0;
1544 struct topology_update_data *updates, *ud;
1545 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1546 cpumask_t updated_cpus;
1547 struct device *dev;
1548 int weight, new_nid, i = 0;
1549
1550 weight = cpumask_weight(&cpu_associativity_changes_mask);
1551 if (!weight)
1552 return 0;
1553
1554 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1555 if (!updates)
1556 return 0;
1557
1558 cpumask_clear(&updated_cpus);
1559
1560 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1561 /*
1562 * If siblings aren't flagged for changes, updates list
1563 * will be too short. Skip on this update and set for next
1564 * update.
1565 */
1566 if (!cpumask_subset(cpu_sibling_mask(cpu),
1567 &cpu_associativity_changes_mask)) {
1568 pr_info("Sibling bits not set for associativity "
1569 "change, cpu%d\n", cpu);
1570 cpumask_or(&cpu_associativity_changes_mask,
1571 &cpu_associativity_changes_mask,
1572 cpu_sibling_mask(cpu));
1573 cpu = cpu_last_thread_sibling(cpu);
1574 continue;
1575 }
1576
1577 /* Use associativity from first thread for all siblings */
1578 vphn_get_associativity(cpu, associativity);
1579 new_nid = associativity_to_nid(associativity);
1580 if (new_nid < 0 || !node_online(new_nid))
1581 new_nid = first_online_node;
1582
1583 if (new_nid == numa_cpu_lookup_table[cpu]) {
1584 cpumask_andnot(&cpu_associativity_changes_mask,
1585 &cpu_associativity_changes_mask,
1586 cpu_sibling_mask(cpu));
1587 cpu = cpu_last_thread_sibling(cpu);
1588 continue;
1589 }
1590
1591 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1592 ud = &updates[i++];
1593 ud->cpu = sibling;
1594 ud->new_nid = new_nid;
1595 ud->old_nid = numa_cpu_lookup_table[sibling];
1596 cpumask_set_cpu(sibling, &updated_cpus);
1597 if (i < weight)
1598 ud->next = &updates[i];
1599 }
1600 cpu = cpu_last_thread_sibling(cpu);
1601 }
1602
1603 /*
1604 * In cases where we have nothing to update (because the updates list
1605 * is too short or because the new topology is same as the old one),
1606 * skip invoking update_cpu_topology() via stop-machine(). This is
1607 * necessary (and not just a fast-path optimization) since stop-machine
1608 * can end up electing a random CPU to run update_cpu_topology(), and
1609 * thus trick us into setting up incorrect cpu-node mappings (since
1610 * 'updates' is kzalloc()'ed).
1611 *
1612 * And for the similar reason, we will skip all the following updating.
1613 */
1614 if (!cpumask_weight(&updated_cpus))
1615 goto out;
1616
1617 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1618
1619 /*
1620 * Update the numa-cpu lookup table with the new mappings, even for
1621 * offline CPUs. It is best to perform this update from the stop-
1622 * machine context.
1623 */
1624 stop_machine(update_lookup_table, &updates[0],
1625 cpumask_of(raw_smp_processor_id()));
1626
1627 for (ud = &updates[0]; ud; ud = ud->next) {
1628 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1629 register_cpu_under_node(ud->cpu, ud->new_nid);
1630
1631 dev = get_cpu_device(ud->cpu);
1632 if (dev)
1633 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1634 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1635 changed = 1;
1636 }
1637
1638 out:
1639 kfree(updates);
1640 return changed;
1641 }
1642
1643 static void topology_work_fn(struct work_struct *work)
1644 {
1645 rebuild_sched_domains();
1646 }
1647 static DECLARE_WORK(topology_work, topology_work_fn);
1648
1649 static void topology_schedule_update(void)
1650 {
1651 schedule_work(&topology_work);
1652 }
1653
1654 static void topology_timer_fn(unsigned long ignored)
1655 {
1656 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1657 topology_schedule_update();
1658 else if (vphn_enabled) {
1659 if (update_cpu_associativity_changes_mask() > 0)
1660 topology_schedule_update();
1661 reset_topology_timer();
1662 }
1663 }
1664 static struct timer_list topology_timer =
1665 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1666
1667 static void reset_topology_timer(void)
1668 {
1669 topology_timer.data = 0;
1670 topology_timer.expires = jiffies + 60 * HZ;
1671 mod_timer(&topology_timer, topology_timer.expires);
1672 }
1673
1674 #ifdef CONFIG_SMP
1675
1676 static void stage_topology_update(int core_id)
1677 {
1678 cpumask_or(&cpu_associativity_changes_mask,
1679 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1680 reset_topology_timer();
1681 }
1682
1683 static int dt_update_callback(struct notifier_block *nb,
1684 unsigned long action, void *data)
1685 {
1686 struct of_prop_reconfig *update;
1687 int rc = NOTIFY_DONE;
1688
1689 switch (action) {
1690 case OF_RECONFIG_UPDATE_PROPERTY:
1691 update = (struct of_prop_reconfig *)data;
1692 if (!of_prop_cmp(update->dn->type, "cpu") &&
1693 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1694 u32 core_id;
1695 of_property_read_u32(update->dn, "reg", &core_id);
1696 stage_topology_update(core_id);
1697 rc = NOTIFY_OK;
1698 }
1699 break;
1700 }
1701
1702 return rc;
1703 }
1704
1705 static struct notifier_block dt_update_nb = {
1706 .notifier_call = dt_update_callback,
1707 };
1708
1709 #endif
1710
1711 /*
1712 * Start polling for associativity changes.
1713 */
1714 int start_topology_update(void)
1715 {
1716 int rc = 0;
1717
1718 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1719 if (!prrn_enabled) {
1720 prrn_enabled = 1;
1721 vphn_enabled = 0;
1722 #ifdef CONFIG_SMP
1723 rc = of_reconfig_notifier_register(&dt_update_nb);
1724 #endif
1725 }
1726 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1727 lppaca_shared_proc(get_lppaca())) {
1728 if (!vphn_enabled) {
1729 prrn_enabled = 0;
1730 vphn_enabled = 1;
1731 setup_cpu_associativity_change_counters();
1732 init_timer_deferrable(&topology_timer);
1733 reset_topology_timer();
1734 }
1735 }
1736
1737 return rc;
1738 }
1739
1740 /*
1741 * Disable polling for VPHN associativity changes.
1742 */
1743 int stop_topology_update(void)
1744 {
1745 int rc = 0;
1746
1747 if (prrn_enabled) {
1748 prrn_enabled = 0;
1749 #ifdef CONFIG_SMP
1750 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1751 #endif
1752 } else if (vphn_enabled) {
1753 vphn_enabled = 0;
1754 rc = del_timer_sync(&topology_timer);
1755 }
1756
1757 return rc;
1758 }
1759
1760 int prrn_is_enabled(void)
1761 {
1762 return prrn_enabled;
1763 }
1764
1765 static int topology_read(struct seq_file *file, void *v)
1766 {
1767 if (vphn_enabled || prrn_enabled)
1768 seq_puts(file, "on\n");
1769 else
1770 seq_puts(file, "off\n");
1771
1772 return 0;
1773 }
1774
1775 static int topology_open(struct inode *inode, struct file *file)
1776 {
1777 return single_open(file, topology_read, NULL);
1778 }
1779
1780 static ssize_t topology_write(struct file *file, const char __user *buf,
1781 size_t count, loff_t *off)
1782 {
1783 char kbuf[4]; /* "on" or "off" plus null. */
1784 int read_len;
1785
1786 read_len = count < 3 ? count : 3;
1787 if (copy_from_user(kbuf, buf, read_len))
1788 return -EINVAL;
1789
1790 kbuf[read_len] = '\0';
1791
1792 if (!strncmp(kbuf, "on", 2))
1793 start_topology_update();
1794 else if (!strncmp(kbuf, "off", 3))
1795 stop_topology_update();
1796 else
1797 return -EINVAL;
1798
1799 return count;
1800 }
1801
1802 static const struct file_operations topology_ops = {
1803 .read = seq_read,
1804 .write = topology_write,
1805 .open = topology_open,
1806 .release = single_release
1807 };
1808
1809 static int topology_update_init(void)
1810 {
1811 start_topology_update();
1812 proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops);
1813
1814 return 0;
1815 }
1816 device_initcall(topology_update_init);
1817 #endif /* CONFIG_PPC_SPLPAR */