]>
Commit | Line | Data |
---|---|---|
f6e763b9 MB |
1 | /* |
2 | * arch/arm64/kernel/topology.c | |
3 | * | |
4 | * Copyright (C) 2011,2013,2014 Linaro Limited. | |
5 | * | |
6 | * Based on the arm32 version written by Vincent Guittot in turn based on | |
7 | * arch/sh/kernel/topology.c | |
8 | * | |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | ||
606f4226 | 14 | #include <linux/acpi.h> |
f6e763b9 MB |
15 | #include <linux/cpu.h> |
16 | #include <linux/cpumask.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/node.h> | |
20 | #include <linux/nodemask.h> | |
ebdc9447 | 21 | #include <linux/of.h> |
f6e763b9 | 22 | #include <linux/sched.h> |
7202bde8 | 23 | #include <linux/slab.h> |
be8f185d | 24 | #include <linux/string.h> |
7202bde8 | 25 | #include <linux/cpufreq.h> |
f6e763b9 | 26 | |
be8f185d | 27 | #include <asm/cpu.h> |
4e6f7084 | 28 | #include <asm/cputype.h> |
f6e763b9 MB |
29 | #include <asm/topology.h> |
30 | ||
7202bde8 | 31 | static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
be8f185d | 32 | static DEFINE_MUTEX(cpu_scale_mutex); |
7202bde8 JL |
33 | |
34 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) | |
35 | { | |
36 | return per_cpu(cpu_scale, cpu); | |
37 | } | |
38 | ||
39 | static void set_capacity_scale(unsigned int cpu, unsigned long capacity) | |
40 | { | |
41 | per_cpu(cpu_scale, cpu) = capacity; | |
42 | } | |
43 | ||
be8f185d JL |
44 | static ssize_t cpu_capacity_show(struct device *dev, |
45 | struct device_attribute *attr, | |
46 | char *buf) | |
47 | { | |
48 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
49 | ||
50 | return sprintf(buf, "%lu\n", | |
51 | arch_scale_cpu_capacity(NULL, cpu->dev.id)); | |
52 | } | |
53 | ||
54 | static ssize_t cpu_capacity_store(struct device *dev, | |
55 | struct device_attribute *attr, | |
56 | const char *buf, | |
57 | size_t count) | |
58 | { | |
59 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
60 | int this_cpu = cpu->dev.id, i; | |
61 | unsigned long new_capacity; | |
62 | ssize_t ret; | |
63 | ||
64 | if (count) { | |
65 | ret = kstrtoul(buf, 0, &new_capacity); | |
66 | if (ret) | |
67 | return ret; | |
68 | if (new_capacity > SCHED_CAPACITY_SCALE) | |
69 | return -EINVAL; | |
70 | ||
71 | mutex_lock(&cpu_scale_mutex); | |
72 | for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) | |
73 | set_capacity_scale(i, new_capacity); | |
74 | mutex_unlock(&cpu_scale_mutex); | |
75 | } | |
76 | ||
77 | return count; | |
78 | } | |
79 | ||
80 | static DEVICE_ATTR_RW(cpu_capacity); | |
81 | ||
82 | static int register_cpu_capacity_sysctl(void) | |
83 | { | |
84 | int i; | |
85 | struct device *cpu; | |
86 | ||
87 | for_each_possible_cpu(i) { | |
88 | cpu = get_cpu_device(i); | |
89 | if (!cpu) { | |
90 | pr_err("%s: too early to get CPU%d device!\n", | |
91 | __func__, i); | |
92 | continue; | |
93 | } | |
94 | device_create_file(cpu, &dev_attr_cpu_capacity); | |
95 | } | |
96 | ||
97 | return 0; | |
98 | } | |
99 | subsys_initcall(register_cpu_capacity_sysctl); | |
be8f185d | 100 | |
7202bde8 JL |
101 | static u32 capacity_scale; |
102 | static u32 *raw_capacity; | |
103 | static bool cap_parsing_failed; | |
104 | ||
105 | static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) | |
106 | { | |
107 | int ret; | |
108 | u32 cpu_capacity; | |
109 | ||
110 | if (cap_parsing_failed) | |
111 | return; | |
112 | ||
113 | ret = of_property_read_u32(cpu_node, | |
114 | "capacity-dmips-mhz", | |
115 | &cpu_capacity); | |
116 | if (!ret) { | |
117 | if (!raw_capacity) { | |
118 | raw_capacity = kcalloc(num_possible_cpus(), | |
119 | sizeof(*raw_capacity), | |
120 | GFP_KERNEL); | |
121 | if (!raw_capacity) { | |
122 | pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); | |
123 | cap_parsing_failed = true; | |
124 | return; | |
125 | } | |
126 | } | |
127 | capacity_scale = max(cpu_capacity, capacity_scale); | |
128 | raw_capacity[cpu] = cpu_capacity; | |
129 | pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n", | |
130 | cpu_node->full_name, raw_capacity[cpu]); | |
131 | } else { | |
132 | if (raw_capacity) { | |
133 | pr_err("cpu_capacity: missing %s raw capacity\n", | |
134 | cpu_node->full_name); | |
135 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); | |
136 | } | |
137 | cap_parsing_failed = true; | |
138 | kfree(raw_capacity); | |
139 | } | |
140 | } | |
141 | ||
142 | static void normalize_cpu_capacity(void) | |
143 | { | |
144 | u64 capacity; | |
145 | int cpu; | |
146 | ||
147 | if (!raw_capacity || cap_parsing_failed) | |
148 | return; | |
149 | ||
150 | pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); | |
be8f185d | 151 | mutex_lock(&cpu_scale_mutex); |
7202bde8 JL |
152 | for_each_possible_cpu(cpu) { |
153 | pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", | |
154 | cpu, raw_capacity[cpu]); | |
155 | capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) | |
156 | / capacity_scale; | |
157 | set_capacity_scale(cpu, capacity); | |
158 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", | |
159 | cpu, arch_scale_cpu_capacity(NULL, cpu)); | |
160 | } | |
be8f185d | 161 | mutex_unlock(&cpu_scale_mutex); |
7202bde8 JL |
162 | } |
163 | ||
164 | #ifdef CONFIG_CPU_FREQ | |
165 | static cpumask_var_t cpus_to_visit; | |
166 | static bool cap_parsing_done; | |
167 | static void parsing_done_workfn(struct work_struct *work); | |
168 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | |
169 | ||
170 | static int | |
171 | init_cpu_capacity_callback(struct notifier_block *nb, | |
172 | unsigned long val, | |
173 | void *data) | |
174 | { | |
175 | struct cpufreq_policy *policy = data; | |
176 | int cpu; | |
177 | ||
178 | if (cap_parsing_failed || cap_parsing_done) | |
179 | return 0; | |
180 | ||
181 | switch (val) { | |
182 | case CPUFREQ_NOTIFY: | |
183 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", | |
184 | cpumask_pr_args(policy->related_cpus), | |
185 | cpumask_pr_args(cpus_to_visit)); | |
186 | cpumask_andnot(cpus_to_visit, | |
187 | cpus_to_visit, | |
188 | policy->related_cpus); | |
189 | for_each_cpu(cpu, policy->related_cpus) { | |
190 | raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) * | |
191 | policy->cpuinfo.max_freq / 1000UL; | |
192 | capacity_scale = max(raw_capacity[cpu], capacity_scale); | |
193 | } | |
194 | if (cpumask_empty(cpus_to_visit)) { | |
195 | normalize_cpu_capacity(); | |
196 | kfree(raw_capacity); | |
197 | pr_debug("cpu_capacity: parsing done\n"); | |
198 | cap_parsing_done = true; | |
199 | schedule_work(&parsing_done_work); | |
200 | } | |
201 | } | |
202 | return 0; | |
203 | } | |
204 | ||
205 | static struct notifier_block init_cpu_capacity_notifier = { | |
206 | .notifier_call = init_cpu_capacity_callback, | |
207 | }; | |
208 | ||
209 | static int __init register_cpufreq_notifier(void) | |
210 | { | |
606f4226 PP |
211 | /* |
212 | * on ACPI-based systems we need to use the default cpu capacity | |
213 | * until we have the necessary code to parse the cpu capacity, so | |
214 | * skip registering cpufreq notifier. | |
215 | */ | |
216 | if (!acpi_disabled || cap_parsing_failed) | |
7202bde8 JL |
217 | return -EINVAL; |
218 | ||
219 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { | |
220 | pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n"); | |
221 | return -ENOMEM; | |
222 | } | |
223 | cpumask_copy(cpus_to_visit, cpu_possible_mask); | |
224 | ||
225 | return cpufreq_register_notifier(&init_cpu_capacity_notifier, | |
226 | CPUFREQ_POLICY_NOTIFIER); | |
227 | } | |
228 | core_initcall(register_cpufreq_notifier); | |
229 | ||
230 | static void parsing_done_workfn(struct work_struct *work) | |
231 | { | |
232 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | |
233 | CPUFREQ_POLICY_NOTIFIER); | |
234 | } | |
235 | ||
236 | #else | |
237 | static int __init free_raw_capacity(void) | |
238 | { | |
239 | kfree(raw_capacity); | |
240 | ||
241 | return 0; | |
242 | } | |
243 | core_initcall(free_raw_capacity); | |
244 | #endif | |
245 | ||
ebdc9447 MB |
246 | static int __init get_cpu_for_node(struct device_node *node) |
247 | { | |
248 | struct device_node *cpu_node; | |
249 | int cpu; | |
250 | ||
251 | cpu_node = of_parse_phandle(node, "cpu", 0); | |
252 | if (!cpu_node) | |
253 | return -1; | |
254 | ||
255 | for_each_possible_cpu(cpu) { | |
256 | if (of_get_cpu_node(cpu, NULL) == cpu_node) { | |
7202bde8 | 257 | parse_cpu_capacity(cpu_node, cpu); |
ebdc9447 MB |
258 | of_node_put(cpu_node); |
259 | return cpu; | |
260 | } | |
261 | } | |
262 | ||
263 | pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name); | |
264 | ||
265 | of_node_put(cpu_node); | |
266 | return -1; | |
267 | } | |
268 | ||
269 | static int __init parse_core(struct device_node *core, int cluster_id, | |
270 | int core_id) | |
271 | { | |
272 | char name[10]; | |
273 | bool leaf = true; | |
274 | int i = 0; | |
275 | int cpu; | |
276 | struct device_node *t; | |
277 | ||
278 | do { | |
279 | snprintf(name, sizeof(name), "thread%d", i); | |
280 | t = of_get_child_by_name(core, name); | |
281 | if (t) { | |
282 | leaf = false; | |
283 | cpu = get_cpu_for_node(t); | |
284 | if (cpu >= 0) { | |
285 | cpu_topology[cpu].cluster_id = cluster_id; | |
286 | cpu_topology[cpu].core_id = core_id; | |
287 | cpu_topology[cpu].thread_id = i; | |
288 | } else { | |
289 | pr_err("%s: Can't get CPU for thread\n", | |
290 | t->full_name); | |
291 | of_node_put(t); | |
292 | return -EINVAL; | |
293 | } | |
294 | of_node_put(t); | |
295 | } | |
296 | i++; | |
297 | } while (t); | |
298 | ||
299 | cpu = get_cpu_for_node(core); | |
300 | if (cpu >= 0) { | |
301 | if (!leaf) { | |
302 | pr_err("%s: Core has both threads and CPU\n", | |
303 | core->full_name); | |
304 | return -EINVAL; | |
305 | } | |
306 | ||
307 | cpu_topology[cpu].cluster_id = cluster_id; | |
308 | cpu_topology[cpu].core_id = core_id; | |
309 | } else if (leaf) { | |
310 | pr_err("%s: Can't get CPU for leaf core\n", core->full_name); | |
311 | return -EINVAL; | |
312 | } | |
313 | ||
314 | return 0; | |
315 | } | |
316 | ||
317 | static int __init parse_cluster(struct device_node *cluster, int depth) | |
318 | { | |
319 | char name[10]; | |
320 | bool leaf = true; | |
321 | bool has_cores = false; | |
322 | struct device_node *c; | |
323 | static int cluster_id __initdata; | |
324 | int core_id = 0; | |
325 | int i, ret; | |
326 | ||
327 | /* | |
328 | * First check for child clusters; we currently ignore any | |
329 | * information about the nesting of clusters and present the | |
330 | * scheduler with a flat list of them. | |
331 | */ | |
332 | i = 0; | |
333 | do { | |
334 | snprintf(name, sizeof(name), "cluster%d", i); | |
335 | c = of_get_child_by_name(cluster, name); | |
336 | if (c) { | |
337 | leaf = false; | |
338 | ret = parse_cluster(c, depth + 1); | |
339 | of_node_put(c); | |
340 | if (ret != 0) | |
341 | return ret; | |
342 | } | |
343 | i++; | |
344 | } while (c); | |
345 | ||
346 | /* Now check for cores */ | |
347 | i = 0; | |
348 | do { | |
349 | snprintf(name, sizeof(name), "core%d", i); | |
350 | c = of_get_child_by_name(cluster, name); | |
351 | if (c) { | |
352 | has_cores = true; | |
353 | ||
354 | if (depth == 0) { | |
355 | pr_err("%s: cpu-map children should be clusters\n", | |
356 | c->full_name); | |
357 | of_node_put(c); | |
358 | return -EINVAL; | |
359 | } | |
360 | ||
361 | if (leaf) { | |
362 | ret = parse_core(c, cluster_id, core_id++); | |
363 | } else { | |
364 | pr_err("%s: Non-leaf cluster with core %s\n", | |
365 | cluster->full_name, name); | |
366 | ret = -EINVAL; | |
367 | } | |
368 | ||
369 | of_node_put(c); | |
370 | if (ret != 0) | |
371 | return ret; | |
372 | } | |
373 | i++; | |
374 | } while (c); | |
375 | ||
376 | if (leaf && !has_cores) | |
377 | pr_warn("%s: empty cluster\n", cluster->full_name); | |
378 | ||
379 | if (leaf) | |
380 | cluster_id++; | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static int __init parse_dt_topology(void) | |
386 | { | |
387 | struct device_node *cn, *map; | |
388 | int ret = 0; | |
389 | int cpu; | |
390 | ||
391 | cn = of_find_node_by_path("/cpus"); | |
392 | if (!cn) { | |
393 | pr_err("No CPU information found in DT\n"); | |
394 | return 0; | |
395 | } | |
396 | ||
397 | /* | |
398 | * When topology is provided cpu-map is essentially a root | |
399 | * cluster with restricted subnodes. | |
400 | */ | |
401 | map = of_get_child_by_name(cn, "cpu-map"); | |
7202bde8 JL |
402 | if (!map) { |
403 | cap_parsing_failed = true; | |
ebdc9447 | 404 | goto out; |
7202bde8 | 405 | } |
ebdc9447 MB |
406 | |
407 | ret = parse_cluster(map, 0); | |
408 | if (ret != 0) | |
409 | goto out_map; | |
410 | ||
7202bde8 JL |
411 | normalize_cpu_capacity(); |
412 | ||
ebdc9447 MB |
413 | /* |
414 | * Check that all cores are in the topology; the SMP code will | |
415 | * only mark cores described in the DT as possible. | |
416 | */ | |
4e6f7084 ZSL |
417 | for_each_possible_cpu(cpu) |
418 | if (cpu_topology[cpu].cluster_id == -1) | |
ebdc9447 | 419 | ret = -EINVAL; |
ebdc9447 MB |
420 | |
421 | out_map: | |
422 | of_node_put(map); | |
423 | out: | |
424 | of_node_put(cn); | |
425 | return ret; | |
426 | } | |
427 | ||
f6e763b9 MB |
428 | /* |
429 | * cpu topology table | |
430 | */ | |
431 | struct cpu_topology cpu_topology[NR_CPUS]; | |
432 | EXPORT_SYMBOL_GPL(cpu_topology); | |
433 | ||
434 | const struct cpumask *cpu_coregroup_mask(int cpu) | |
435 | { | |
436 | return &cpu_topology[cpu].core_sibling; | |
437 | } | |
438 | ||
439 | static void update_siblings_masks(unsigned int cpuid) | |
440 | { | |
441 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | |
442 | int cpu; | |
443 | ||
f6e763b9 MB |
444 | /* update core and thread sibling masks */ |
445 | for_each_possible_cpu(cpu) { | |
446 | cpu_topo = &cpu_topology[cpu]; | |
447 | ||
448 | if (cpuid_topo->cluster_id != cpu_topo->cluster_id) | |
449 | continue; | |
450 | ||
451 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | |
452 | if (cpu != cpuid) | |
453 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | |
454 | ||
455 | if (cpuid_topo->core_id != cpu_topo->core_id) | |
456 | continue; | |
457 | ||
458 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | |
459 | if (cpu != cpuid) | |
460 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | |
461 | } | |
462 | } | |
463 | ||
464 | void store_cpu_topology(unsigned int cpuid) | |
465 | { | |
4e6f7084 ZSL |
466 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; |
467 | u64 mpidr; | |
468 | ||
469 | if (cpuid_topo->cluster_id != -1) | |
470 | goto topology_populated; | |
471 | ||
472 | mpidr = read_cpuid_mpidr(); | |
473 | ||
474 | /* Uniprocessor systems can rely on default topology values */ | |
475 | if (mpidr & MPIDR_UP_BITMASK) | |
476 | return; | |
477 | ||
478 | /* Create cpu topology mapping based on MPIDR. */ | |
479 | if (mpidr & MPIDR_MT_BITMASK) { | |
480 | /* Multiprocessor system : Multi-threads per core */ | |
481 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
482 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
1cefdaea MB |
483 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | |
484 | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; | |
4e6f7084 ZSL |
485 | } else { |
486 | /* Multiprocessor system : Single-thread per core */ | |
487 | cpuid_topo->thread_id = -1; | |
488 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
1cefdaea MB |
489 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | |
490 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | | |
491 | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; | |
4e6f7084 ZSL |
492 | } |
493 | ||
494 | pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", | |
495 | cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id, | |
496 | cpuid_topo->thread_id, mpidr); | |
497 | ||
498 | topology_populated: | |
f6e763b9 MB |
499 | update_siblings_masks(cpuid); |
500 | } | |
501 | ||
ebdc9447 | 502 | static void __init reset_cpu_topology(void) |
f6e763b9 MB |
503 | { |
504 | unsigned int cpu; | |
505 | ||
f6e763b9 MB |
506 | for_each_possible_cpu(cpu) { |
507 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | |
508 | ||
509 | cpu_topo->thread_id = -1; | |
c31bf048 | 510 | cpu_topo->core_id = 0; |
f6e763b9 | 511 | cpu_topo->cluster_id = -1; |
c31bf048 | 512 | |
f6e763b9 | 513 | cpumask_clear(&cpu_topo->core_sibling); |
c31bf048 | 514 | cpumask_set_cpu(cpu, &cpu_topo->core_sibling); |
f6e763b9 | 515 | cpumask_clear(&cpu_topo->thread_sibling); |
c31bf048 | 516 | cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); |
f6e763b9 MB |
517 | } |
518 | } | |
ebdc9447 MB |
519 | |
520 | void __init init_cpu_topology(void) | |
521 | { | |
522 | reset_cpu_topology(); | |
523 | ||
524 | /* | |
525 | * Discard anything that was parsed if we hit an error so we | |
526 | * don't use partial information. | |
527 | */ | |
e094d445 | 528 | if (of_have_populated_dt() && parse_dt_topology()) |
ebdc9447 MB |
529 | reset_cpu_topology(); |
530 | } |