]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm/kernel/topology.c
Merge tag 'irqchip-4.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm...
[mirror_ubuntu-artful-kernel.git] / arch / arm / kernel / topology.c
1 /*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14 #include <linux/arch_topology.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpumask.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/node.h>
22 #include <linux/nodemask.h>
23 #include <linux/of.h>
24 #include <linux/sched.h>
25 #include <linux/sched/topology.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28
29 #include <asm/cpu.h>
30 #include <asm/cputype.h>
31 #include <asm/topology.h>
32
33 /*
34 * cpu capacity scale management
35 */
36
37 /*
38 * cpu capacity table
39 * This per cpu data structure describes the relative capacity of each core.
40 * On a heteregenous system, cores don't have the same computation capacity
41 * and we reflect that difference in the cpu_capacity field so the scheduler
42 * can take this difference into account during load balance. A per cpu
43 * structure is preferred because each CPU updates its own cpu_capacity field
44 * during the load balance except for idle cores. One idle core is selected
45 * to run the rebalance_domains for all idle cores and the cpu_capacity can be
46 * updated during this sequence.
47 */
48
49 #ifdef CONFIG_OF
50 struct cpu_efficiency {
51 const char *compatible;
52 unsigned long efficiency;
53 };
54
55 /*
56 * Table of relative efficiency of each processors
57 * The efficiency value must fit in 20bit and the final
58 * cpu_scale value must be in the range
59 * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
60 * in order to return at most 1 when DIV_ROUND_CLOSEST
61 * is used to compute the capacity of a CPU.
62 * Processors that are not defined in the table,
63 * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
64 */
65 static const struct cpu_efficiency table_efficiency[] = {
66 {"arm,cortex-a15", 3891},
67 {"arm,cortex-a7", 2048},
68 {NULL, },
69 };
70
71 static unsigned long *__cpu_capacity;
72 #define cpu_capacity(cpu) __cpu_capacity[cpu]
73
74 static unsigned long middle_capacity = 1;
75 static bool cap_from_dt = true;
76
77 /*
78 * Iterate all CPUs' descriptor in DT and compute the efficiency
79 * (as per table_efficiency). Also calculate a middle efficiency
80 * as close as possible to (max{eff_i} - min{eff_i}) / 2
81 * This is later used to scale the cpu_capacity field such that an
82 * 'average' CPU is of middle capacity. Also see the comments near
83 * table_efficiency[] and update_cpu_capacity().
84 */
85 static void __init parse_dt_topology(void)
86 {
87 const struct cpu_efficiency *cpu_eff;
88 struct device_node *cn = NULL;
89 unsigned long min_capacity = ULONG_MAX;
90 unsigned long max_capacity = 0;
91 unsigned long capacity = 0;
92 int cpu = 0;
93
94 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
95 GFP_NOWAIT);
96
97 cn = of_find_node_by_path("/cpus");
98 if (!cn) {
99 pr_err("No CPU information found in DT\n");
100 return;
101 }
102
103 for_each_possible_cpu(cpu) {
104 const u32 *rate;
105 int len;
106
107 /* too early to use cpu->of_node */
108 cn = of_get_cpu_node(cpu, NULL);
109 if (!cn) {
110 pr_err("missing device node for CPU %d\n", cpu);
111 continue;
112 }
113
114 if (topology_parse_cpu_capacity(cn, cpu)) {
115 of_node_put(cn);
116 continue;
117 }
118
119 cap_from_dt = false;
120
121 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
122 if (of_device_is_compatible(cn, cpu_eff->compatible))
123 break;
124
125 if (cpu_eff->compatible == NULL)
126 continue;
127
128 rate = of_get_property(cn, "clock-frequency", &len);
129 if (!rate || len != 4) {
130 pr_err("%s missing clock-frequency property\n",
131 cn->full_name);
132 continue;
133 }
134
135 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
136
137 /* Save min capacity of the system */
138 if (capacity < min_capacity)
139 min_capacity = capacity;
140
141 /* Save max capacity of the system */
142 if (capacity > max_capacity)
143 max_capacity = capacity;
144
145 cpu_capacity(cpu) = capacity;
146 }
147
148 /* If min and max capacities are equals, we bypass the update of the
149 * cpu_scale because all CPUs have the same capacity. Otherwise, we
150 * compute a middle_capacity factor that will ensure that the capacity
151 * of an 'average' CPU of the system will be as close as possible to
152 * SCHED_CAPACITY_SCALE, which is the default value, but with the
153 * constraint explained near table_efficiency[].
154 */
155 if (4*max_capacity < (3*(max_capacity + min_capacity)))
156 middle_capacity = (min_capacity + max_capacity)
157 >> (SCHED_CAPACITY_SHIFT+1);
158 else
159 middle_capacity = ((max_capacity / 3)
160 >> (SCHED_CAPACITY_SHIFT-1)) + 1;
161
162 if (cap_from_dt)
163 topology_normalize_cpu_scale();
164 }
165
166 /*
167 * Look for a customed capacity of a CPU in the cpu_capacity table during the
168 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
169 * function returns directly for SMP system.
170 */
171 static void update_cpu_capacity(unsigned int cpu)
172 {
173 if (!cpu_capacity(cpu) || cap_from_dt)
174 return;
175
176 topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
177
178 pr_info("CPU%u: update cpu_capacity %lu\n",
179 cpu, topology_get_cpu_scale(NULL, cpu));
180 }
181
182 #else
183 static inline void parse_dt_topology(void) {}
184 static inline void update_cpu_capacity(unsigned int cpuid) {}
185 #endif
186
187 /*
188 * cpu topology table
189 */
190 struct cputopo_arm cpu_topology[NR_CPUS];
191 EXPORT_SYMBOL_GPL(cpu_topology);
192
193 const struct cpumask *cpu_coregroup_mask(int cpu)
194 {
195 return &cpu_topology[cpu].core_sibling;
196 }
197
198 /*
199 * The current assumption is that we can power gate each core independently.
200 * This will be superseded by DT binding once available.
201 */
202 const struct cpumask *cpu_corepower_mask(int cpu)
203 {
204 return &cpu_topology[cpu].thread_sibling;
205 }
206
207 static void update_siblings_masks(unsigned int cpuid)
208 {
209 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
210 int cpu;
211
212 /* update core and thread sibling masks */
213 for_each_possible_cpu(cpu) {
214 cpu_topo = &cpu_topology[cpu];
215
216 if (cpuid_topo->socket_id != cpu_topo->socket_id)
217 continue;
218
219 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
220 if (cpu != cpuid)
221 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
222
223 if (cpuid_topo->core_id != cpu_topo->core_id)
224 continue;
225
226 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
227 if (cpu != cpuid)
228 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
229 }
230 smp_wmb();
231 }
232
233 /*
234 * store_cpu_topology is called at boot when only one cpu is running
235 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
236 * which prevents simultaneous write access to cpu_topology array
237 */
238 void store_cpu_topology(unsigned int cpuid)
239 {
240 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
241 unsigned int mpidr;
242
243 /* If the cpu topology has been already set, just return */
244 if (cpuid_topo->core_id != -1)
245 return;
246
247 mpidr = read_cpuid_mpidr();
248
249 /* create cpu topology mapping */
250 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
251 /*
252 * This is a multiprocessor system
253 * multiprocessor format & multiprocessor mode field are set
254 */
255
256 if (mpidr & MPIDR_MT_BITMASK) {
257 /* core performance interdependency */
258 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
259 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
260 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
261 } else {
262 /* largely independent cores */
263 cpuid_topo->thread_id = -1;
264 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
265 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
266 }
267 } else {
268 /*
269 * This is an uniprocessor system
270 * we are in multiprocessor format but uniprocessor system
271 * or in the old uniprocessor format
272 */
273 cpuid_topo->thread_id = -1;
274 cpuid_topo->core_id = 0;
275 cpuid_topo->socket_id = -1;
276 }
277
278 update_siblings_masks(cpuid);
279
280 update_cpu_capacity(cpuid);
281
282 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
283 cpuid, cpu_topology[cpuid].thread_id,
284 cpu_topology[cpuid].core_id,
285 cpu_topology[cpuid].socket_id, mpidr);
286 }
287
288 static inline int cpu_corepower_flags(void)
289 {
290 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
291 }
292
293 static struct sched_domain_topology_level arm_topology[] = {
294 #ifdef CONFIG_SCHED_MC
295 { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
296 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
297 #endif
298 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
299 { NULL, },
300 };
301
302 /*
303 * init_cpu_topology is called at boot when only one cpu is running
304 * which prevent simultaneous write access to cpu_topology array
305 */
306 void __init init_cpu_topology(void)
307 {
308 unsigned int cpu;
309
310 /* init core mask and capacity */
311 for_each_possible_cpu(cpu) {
312 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
313
314 cpu_topo->thread_id = -1;
315 cpu_topo->core_id = -1;
316 cpu_topo->socket_id = -1;
317 cpumask_clear(&cpu_topo->core_sibling);
318 cpumask_clear(&cpu_topo->thread_sibling);
319 }
320 smp_wmb();
321
322 parse_dt_topology();
323
324 /* Set scheduler topology descriptor */
325 set_sched_topology(arm_topology);
326 }