]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm/kernel/topology.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-bionic-kernel.git] / arch / arm / kernel / topology.c
1 /*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14 #include <linux/cpu.h>
15 #include <linux/cpufreq.h>
16 #include <linux/cpumask.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
20 #include <linux/node.h>
21 #include <linux/nodemask.h>
22 #include <linux/of.h>
23 #include <linux/sched.h>
24 #include <linux/sched/topology.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27
28 #include <asm/cpu.h>
29 #include <asm/cputype.h>
30 #include <asm/topology.h>
31
32 /*
33 * cpu capacity scale management
34 */
35
36 /*
37 * cpu capacity table
38 * This per cpu data structure describes the relative capacity of each core.
39 * On a heteregenous system, cores don't have the same computation capacity
40 * and we reflect that difference in the cpu_capacity field so the scheduler
41 * can take this difference into account during load balance. A per cpu
42 * structure is preferred because each CPU updates its own cpu_capacity field
43 * during the load balance except for idle cores. One idle core is selected
44 * to run the rebalance_domains for all idle cores and the cpu_capacity can be
45 * updated during this sequence.
46 */
47 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
48 static DEFINE_MUTEX(cpu_scale_mutex);
49
50 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
51 {
52 return per_cpu(cpu_scale, cpu);
53 }
54
55 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
56 {
57 per_cpu(cpu_scale, cpu) = capacity;
58 }
59
60 #ifdef CONFIG_PROC_SYSCTL
61 static ssize_t cpu_capacity_show(struct device *dev,
62 struct device_attribute *attr,
63 char *buf)
64 {
65 struct cpu *cpu = container_of(dev, struct cpu, dev);
66
67 return sprintf(buf, "%lu\n",
68 arch_scale_cpu_capacity(NULL, cpu->dev.id));
69 }
70
71 static ssize_t cpu_capacity_store(struct device *dev,
72 struct device_attribute *attr,
73 const char *buf,
74 size_t count)
75 {
76 struct cpu *cpu = container_of(dev, struct cpu, dev);
77 int this_cpu = cpu->dev.id, i;
78 unsigned long new_capacity;
79 ssize_t ret;
80
81 if (count) {
82 ret = kstrtoul(buf, 0, &new_capacity);
83 if (ret)
84 return ret;
85 if (new_capacity > SCHED_CAPACITY_SCALE)
86 return -EINVAL;
87
88 mutex_lock(&cpu_scale_mutex);
89 for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
90 set_capacity_scale(i, new_capacity);
91 mutex_unlock(&cpu_scale_mutex);
92 }
93
94 return count;
95 }
96
97 static DEVICE_ATTR_RW(cpu_capacity);
98
99 static int register_cpu_capacity_sysctl(void)
100 {
101 int i;
102 struct device *cpu;
103
104 for_each_possible_cpu(i) {
105 cpu = get_cpu_device(i);
106 if (!cpu) {
107 pr_err("%s: too early to get CPU%d device!\n",
108 __func__, i);
109 continue;
110 }
111 device_create_file(cpu, &dev_attr_cpu_capacity);
112 }
113
114 return 0;
115 }
116 subsys_initcall(register_cpu_capacity_sysctl);
117 #endif
118
119 #ifdef CONFIG_OF
120 struct cpu_efficiency {
121 const char *compatible;
122 unsigned long efficiency;
123 };
124
125 /*
126 * Table of relative efficiency of each processors
127 * The efficiency value must fit in 20bit and the final
128 * cpu_scale value must be in the range
129 * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
130 * in order to return at most 1 when DIV_ROUND_CLOSEST
131 * is used to compute the capacity of a CPU.
132 * Processors that are not defined in the table,
133 * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
134 */
135 static const struct cpu_efficiency table_efficiency[] = {
136 {"arm,cortex-a15", 3891},
137 {"arm,cortex-a7", 2048},
138 {NULL, },
139 };
140
141 static unsigned long *__cpu_capacity;
142 #define cpu_capacity(cpu) __cpu_capacity[cpu]
143
144 static unsigned long middle_capacity = 1;
145 static bool cap_from_dt = true;
146 static u32 *raw_capacity;
147 static bool cap_parsing_failed;
148 static u32 capacity_scale;
149
150 static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
151 {
152 int ret = 1;
153 u32 cpu_capacity;
154
155 if (cap_parsing_failed)
156 return !ret;
157
158 ret = of_property_read_u32(cpu_node,
159 "capacity-dmips-mhz",
160 &cpu_capacity);
161 if (!ret) {
162 if (!raw_capacity) {
163 raw_capacity = kcalloc(num_possible_cpus(),
164 sizeof(*raw_capacity),
165 GFP_KERNEL);
166 if (!raw_capacity) {
167 pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
168 cap_parsing_failed = true;
169 return !ret;
170 }
171 }
172 capacity_scale = max(cpu_capacity, capacity_scale);
173 raw_capacity[cpu] = cpu_capacity;
174 pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
175 cpu_node->full_name, raw_capacity[cpu]);
176 } else {
177 if (raw_capacity) {
178 pr_err("cpu_capacity: missing %s raw capacity\n",
179 cpu_node->full_name);
180 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
181 }
182 cap_parsing_failed = true;
183 kfree(raw_capacity);
184 }
185
186 return !ret;
187 }
188
189 static void normalize_cpu_capacity(void)
190 {
191 u64 capacity;
192 int cpu;
193
194 if (!raw_capacity || cap_parsing_failed)
195 return;
196
197 pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
198 mutex_lock(&cpu_scale_mutex);
199 for_each_possible_cpu(cpu) {
200 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
201 / capacity_scale;
202 set_capacity_scale(cpu, capacity);
203 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
204 cpu, arch_scale_cpu_capacity(NULL, cpu));
205 }
206 mutex_unlock(&cpu_scale_mutex);
207 }
208
209 #ifdef CONFIG_CPU_FREQ
210 static cpumask_var_t cpus_to_visit;
211 static bool cap_parsing_done;
212 static void parsing_done_workfn(struct work_struct *work);
213 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
214
215 static int
216 init_cpu_capacity_callback(struct notifier_block *nb,
217 unsigned long val,
218 void *data)
219 {
220 struct cpufreq_policy *policy = data;
221 int cpu;
222
223 if (cap_parsing_failed || cap_parsing_done)
224 return 0;
225
226 switch (val) {
227 case CPUFREQ_NOTIFY:
228 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
229 cpumask_pr_args(policy->related_cpus),
230 cpumask_pr_args(cpus_to_visit));
231 cpumask_andnot(cpus_to_visit,
232 cpus_to_visit,
233 policy->related_cpus);
234 for_each_cpu(cpu, policy->related_cpus) {
235 raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
236 policy->cpuinfo.max_freq / 1000UL;
237 capacity_scale = max(raw_capacity[cpu], capacity_scale);
238 }
239 if (cpumask_empty(cpus_to_visit)) {
240 normalize_cpu_capacity();
241 kfree(raw_capacity);
242 pr_debug("cpu_capacity: parsing done\n");
243 cap_parsing_done = true;
244 schedule_work(&parsing_done_work);
245 }
246 }
247 return 0;
248 }
249
250 static struct notifier_block init_cpu_capacity_notifier = {
251 .notifier_call = init_cpu_capacity_callback,
252 };
253
254 static int __init register_cpufreq_notifier(void)
255 {
256 if (cap_parsing_failed)
257 return -EINVAL;
258
259 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
260 pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
261 return -ENOMEM;
262 }
263 cpumask_copy(cpus_to_visit, cpu_possible_mask);
264
265 return cpufreq_register_notifier(&init_cpu_capacity_notifier,
266 CPUFREQ_POLICY_NOTIFIER);
267 }
268 core_initcall(register_cpufreq_notifier);
269
270 static void parsing_done_workfn(struct work_struct *work)
271 {
272 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
273 CPUFREQ_POLICY_NOTIFIER);
274 }
275
276 #else
277 static int __init free_raw_capacity(void)
278 {
279 kfree(raw_capacity);
280
281 return 0;
282 }
283 core_initcall(free_raw_capacity);
284 #endif
285
286 /*
287 * Iterate all CPUs' descriptor in DT and compute the efficiency
288 * (as per table_efficiency). Also calculate a middle efficiency
289 * as close as possible to (max{eff_i} - min{eff_i}) / 2
290 * This is later used to scale the cpu_capacity field such that an
291 * 'average' CPU is of middle capacity. Also see the comments near
292 * table_efficiency[] and update_cpu_capacity().
293 */
294 static void __init parse_dt_topology(void)
295 {
296 const struct cpu_efficiency *cpu_eff;
297 struct device_node *cn = NULL;
298 unsigned long min_capacity = ULONG_MAX;
299 unsigned long max_capacity = 0;
300 unsigned long capacity = 0;
301 int cpu = 0;
302
303 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
304 GFP_NOWAIT);
305
306 cn = of_find_node_by_path("/cpus");
307 if (!cn) {
308 pr_err("No CPU information found in DT\n");
309 return;
310 }
311
312 for_each_possible_cpu(cpu) {
313 const u32 *rate;
314 int len;
315
316 /* too early to use cpu->of_node */
317 cn = of_get_cpu_node(cpu, NULL);
318 if (!cn) {
319 pr_err("missing device node for CPU %d\n", cpu);
320 continue;
321 }
322
323 if (parse_cpu_capacity(cn, cpu)) {
324 of_node_put(cn);
325 continue;
326 }
327
328 cap_from_dt = false;
329
330 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
331 if (of_device_is_compatible(cn, cpu_eff->compatible))
332 break;
333
334 if (cpu_eff->compatible == NULL)
335 continue;
336
337 rate = of_get_property(cn, "clock-frequency", &len);
338 if (!rate || len != 4) {
339 pr_err("%s missing clock-frequency property\n",
340 cn->full_name);
341 continue;
342 }
343
344 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
345
346 /* Save min capacity of the system */
347 if (capacity < min_capacity)
348 min_capacity = capacity;
349
350 /* Save max capacity of the system */
351 if (capacity > max_capacity)
352 max_capacity = capacity;
353
354 cpu_capacity(cpu) = capacity;
355 }
356
357 /* If min and max capacities are equals, we bypass the update of the
358 * cpu_scale because all CPUs have the same capacity. Otherwise, we
359 * compute a middle_capacity factor that will ensure that the capacity
360 * of an 'average' CPU of the system will be as close as possible to
361 * SCHED_CAPACITY_SCALE, which is the default value, but with the
362 * constraint explained near table_efficiency[].
363 */
364 if (4*max_capacity < (3*(max_capacity + min_capacity)))
365 middle_capacity = (min_capacity + max_capacity)
366 >> (SCHED_CAPACITY_SHIFT+1);
367 else
368 middle_capacity = ((max_capacity / 3)
369 >> (SCHED_CAPACITY_SHIFT-1)) + 1;
370
371 if (cap_from_dt && !cap_parsing_failed)
372 normalize_cpu_capacity();
373 }
374
375 /*
376 * Look for a customed capacity of a CPU in the cpu_capacity table during the
377 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
378 * function returns directly for SMP system.
379 */
380 static void update_cpu_capacity(unsigned int cpu)
381 {
382 if (!cpu_capacity(cpu) || cap_from_dt)
383 return;
384
385 set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
386
387 pr_info("CPU%u: update cpu_capacity %lu\n",
388 cpu, arch_scale_cpu_capacity(NULL, cpu));
389 }
390
391 #else
392 static inline void parse_dt_topology(void) {}
393 static inline void update_cpu_capacity(unsigned int cpuid) {}
394 #endif
395
396 /*
397 * cpu topology table
398 */
399 struct cputopo_arm cpu_topology[NR_CPUS];
400 EXPORT_SYMBOL_GPL(cpu_topology);
401
402 const struct cpumask *cpu_coregroup_mask(int cpu)
403 {
404 return &cpu_topology[cpu].core_sibling;
405 }
406
407 /*
408 * The current assumption is that we can power gate each core independently.
409 * This will be superseded by DT binding once available.
410 */
411 const struct cpumask *cpu_corepower_mask(int cpu)
412 {
413 return &cpu_topology[cpu].thread_sibling;
414 }
415
416 static void update_siblings_masks(unsigned int cpuid)
417 {
418 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
419 int cpu;
420
421 /* update core and thread sibling masks */
422 for_each_possible_cpu(cpu) {
423 cpu_topo = &cpu_topology[cpu];
424
425 if (cpuid_topo->socket_id != cpu_topo->socket_id)
426 continue;
427
428 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
429 if (cpu != cpuid)
430 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
431
432 if (cpuid_topo->core_id != cpu_topo->core_id)
433 continue;
434
435 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
436 if (cpu != cpuid)
437 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
438 }
439 smp_wmb();
440 }
441
442 /*
443 * store_cpu_topology is called at boot when only one cpu is running
444 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
445 * which prevents simultaneous write access to cpu_topology array
446 */
447 void store_cpu_topology(unsigned int cpuid)
448 {
449 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
450 unsigned int mpidr;
451
452 /* If the cpu topology has been already set, just return */
453 if (cpuid_topo->core_id != -1)
454 return;
455
456 mpidr = read_cpuid_mpidr();
457
458 /* create cpu topology mapping */
459 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
460 /*
461 * This is a multiprocessor system
462 * multiprocessor format & multiprocessor mode field are set
463 */
464
465 if (mpidr & MPIDR_MT_BITMASK) {
466 /* core performance interdependency */
467 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
468 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
469 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
470 } else {
471 /* largely independent cores */
472 cpuid_topo->thread_id = -1;
473 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
474 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
475 }
476 } else {
477 /*
478 * This is an uniprocessor system
479 * we are in multiprocessor format but uniprocessor system
480 * or in the old uniprocessor format
481 */
482 cpuid_topo->thread_id = -1;
483 cpuid_topo->core_id = 0;
484 cpuid_topo->socket_id = -1;
485 }
486
487 update_siblings_masks(cpuid);
488
489 update_cpu_capacity(cpuid);
490
491 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
492 cpuid, cpu_topology[cpuid].thread_id,
493 cpu_topology[cpuid].core_id,
494 cpu_topology[cpuid].socket_id, mpidr);
495 }
496
497 static inline int cpu_corepower_flags(void)
498 {
499 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
500 }
501
502 static struct sched_domain_topology_level arm_topology[] = {
503 #ifdef CONFIG_SCHED_MC
504 { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
505 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
506 #endif
507 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
508 { NULL, },
509 };
510
511 /*
512 * init_cpu_topology is called at boot when only one cpu is running
513 * which prevent simultaneous write access to cpu_topology array
514 */
515 void __init init_cpu_topology(void)
516 {
517 unsigned int cpu;
518
519 /* init core mask and capacity */
520 for_each_possible_cpu(cpu) {
521 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
522
523 cpu_topo->thread_id = -1;
524 cpu_topo->core_id = -1;
525 cpu_topo->socket_id = -1;
526 cpumask_clear(&cpu_topo->core_sibling);
527 cpumask_clear(&cpu_topo->thread_sibling);
528 }
529 smp_wmb();
530
531 parse_dt_topology();
532
533 /* Set scheduler topology descriptor */
534 set_sched_topology(arm_topology);
535 }