]>
Commit | Line | Data |
---|---|---|
6ee97d35 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2ef7a295 JL |
2 | /* |
3 | * Arch specific cpu topology information | |
4 | * | |
5 | * Copyright (C) 2016, ARM Ltd. | |
6 | * Written by: Juri Lelli, ARM Ltd. | |
2ef7a295 JL |
7 | */ |
8 | ||
9 | #include <linux/acpi.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/cpufreq.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/of.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/sched/topology.h> | |
bb1fbdd3 | 17 | #include <linux/cpuset.h> |
60c1b220 AP |
18 | #include <linux/cpumask.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/percpu.h> | |
83150f5d | 21 | #include <linux/rcupdate.h> |
60c1b220 AP |
22 | #include <linux/sched.h> |
23 | #include <linux/smp.h> | |
2ef7a295 | 24 | |
83150f5d | 25 | static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); |
01e055c1 VK |
26 | static struct cpumask scale_freq_counters_mask; |
27 | static bool scale_freq_invariant; | |
28 | ||
29 | static bool supports_scale_freq_counters(const struct cpumask *cpus) | |
30 | { | |
31 | return cpumask_subset(cpus, &scale_freq_counters_mask); | |
32 | } | |
33 | ||
15e5d5b4 VS |
34 | bool topology_scale_freq_invariant(void) |
35 | { | |
36 | return cpufreq_supports_freq_invariance() || | |
01e055c1 VK |
37 | supports_scale_freq_counters(cpu_online_mask); |
38 | } | |
39 | ||
40 | static void update_scale_freq_invariant(bool status) | |
41 | { | |
42 | if (scale_freq_invariant == status) | |
43 | return; | |
44 | ||
45 | /* | |
46 | * Task scheduler behavior depends on frequency invariance support, | |
47 | * either cpufreq or counter driven. If the support status changes as | |
48 | * a result of counter initialisation and use, retrigger the build of | |
49 | * scheduling domains to ensure the information is propagated properly. | |
50 | */ | |
51 | if (topology_scale_freq_invariant() == status) { | |
52 | scale_freq_invariant = status; | |
53 | rebuild_sched_domains_energy(); | |
54 | } | |
55 | } | |
56 | ||
57 | void topology_set_scale_freq_source(struct scale_freq_data *data, | |
58 | const struct cpumask *cpus) | |
59 | { | |
60 | struct scale_freq_data *sfd; | |
61 | int cpu; | |
62 | ||
63 | /* | |
64 | * Avoid calling rebuild_sched_domains() unnecessarily if FIE is | |
65 | * supported by cpufreq. | |
66 | */ | |
67 | if (cpumask_empty(&scale_freq_counters_mask)) | |
68 | scale_freq_invariant = topology_scale_freq_invariant(); | |
69 | ||
83150f5d VK |
70 | rcu_read_lock(); |
71 | ||
01e055c1 | 72 | for_each_cpu(cpu, cpus) { |
83150f5d | 73 | sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); |
01e055c1 VK |
74 | |
75 | /* Use ARCH provided counters whenever possible */ | |
76 | if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { | |
83150f5d | 77 | rcu_assign_pointer(per_cpu(sft_data, cpu), data); |
01e055c1 VK |
78 | cpumask_set_cpu(cpu, &scale_freq_counters_mask); |
79 | } | |
80 | } | |
81 | ||
83150f5d VK |
82 | rcu_read_unlock(); |
83 | ||
01e055c1 | 84 | update_scale_freq_invariant(true); |
15e5d5b4 | 85 | } |
2f533958 | 86 | EXPORT_SYMBOL_GPL(topology_set_scale_freq_source); |
15e5d5b4 | 87 | |
01e055c1 VK |
88 | void topology_clear_scale_freq_source(enum scale_freq_source source, |
89 | const struct cpumask *cpus) | |
cd0ed03a | 90 | { |
01e055c1 VK |
91 | struct scale_freq_data *sfd; |
92 | int cpu; | |
93 | ||
83150f5d VK |
94 | rcu_read_lock(); |
95 | ||
01e055c1 | 96 | for_each_cpu(cpu, cpus) { |
83150f5d | 97 | sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); |
01e055c1 VK |
98 | |
99 | if (sfd && sfd->source == source) { | |
83150f5d | 100 | rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); |
01e055c1 VK |
101 | cpumask_clear_cpu(cpu, &scale_freq_counters_mask); |
102 | } | |
103 | } | |
104 | ||
83150f5d VK |
105 | rcu_read_unlock(); |
106 | ||
107 | /* | |
108 | * Make sure all references to previous sft_data are dropped to avoid | |
109 | * use-after-free races. | |
110 | */ | |
111 | synchronize_rcu(); | |
112 | ||
01e055c1 | 113 | update_scale_freq_invariant(false); |
cd0ed03a | 114 | } |
2f533958 | 115 | EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source); |
01e055c1 VK |
116 | |
117 | void topology_scale_freq_tick(void) | |
118 | { | |
83150f5d | 119 | struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data)); |
01e055c1 VK |
120 | |
121 | if (sfd) | |
122 | sfd->set_freq_scale(); | |
123 | } | |
124 | ||
eec73529 | 125 | DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; |
2f533958 | 126 | EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale); |
2ef7a295 | 127 | |
a20b7053 IV |
128 | void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, |
129 | unsigned long max_freq) | |
2ef7a295 | 130 | { |
0e27c567 DE |
131 | unsigned long scale; |
132 | int i; | |
133 | ||
0a10d3fe IV |
134 | if (WARN_ON_ONCE(!cur_freq || !max_freq)) |
135 | return; | |
136 | ||
cd0ed03a IV |
137 | /* |
138 | * If the use of counters for FIE is enabled, just return as we don't | |
139 | * want to update the scale factor with information from CPUFREQ. | |
140 | * Instead the scale factor will be updated from arch_scale_freq_tick. | |
141 | */ | |
01e055c1 | 142 | if (supports_scale_freq_counters(cpus)) |
cd0ed03a IV |
143 | return; |
144 | ||
0e27c567 DE |
145 | scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; |
146 | ||
147 | for_each_cpu(i, cpus) | |
eec73529 | 148 | per_cpu(arch_freq_scale, i) = scale; |
2ef7a295 JL |
149 | } |
150 | ||
8216f588 | 151 | DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
275157b3 | 152 | EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); |
2ef7a295 | 153 | |
4ca4f26a | 154 | void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) |
2ef7a295 JL |
155 | { |
156 | per_cpu(cpu_scale, cpu) = capacity; | |
157 | } | |
158 | ||
25980c7a VS |
159 | DEFINE_PER_CPU(unsigned long, thermal_pressure); |
160 | ||
161 | void topology_set_thermal_pressure(const struct cpumask *cpus, | |
162 | unsigned long th_pressure) | |
163 | { | |
164 | int cpu; | |
165 | ||
166 | for_each_cpu(cpu, cpus) | |
167 | WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); | |
168 | } | |
275157b3 | 169 | EXPORT_SYMBOL_GPL(topology_set_thermal_pressure); |
25980c7a | 170 | |
2ef7a295 JL |
171 | static ssize_t cpu_capacity_show(struct device *dev, |
172 | struct device_attribute *attr, | |
173 | char *buf) | |
174 | { | |
175 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
176 | ||
aa838896 | 177 | return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); |
2ef7a295 JL |
178 | } |
179 | ||
bb1fbdd3 MR |
180 | static void update_topology_flags_workfn(struct work_struct *work); |
181 | static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); | |
182 | ||
5d777b18 | 183 | static DEVICE_ATTR_RO(cpu_capacity); |
2ef7a295 JL |
184 | |
185 | static int register_cpu_capacity_sysctl(void) | |
186 | { | |
187 | int i; | |
188 | struct device *cpu; | |
189 | ||
190 | for_each_possible_cpu(i) { | |
191 | cpu = get_cpu_device(i); | |
192 | if (!cpu) { | |
193 | pr_err("%s: too early to get CPU%d device!\n", | |
194 | __func__, i); | |
195 | continue; | |
196 | } | |
197 | device_create_file(cpu, &dev_attr_cpu_capacity); | |
198 | } | |
199 | ||
200 | return 0; | |
201 | } | |
202 | subsys_initcall(register_cpu_capacity_sysctl); | |
203 | ||
bb1fbdd3 MR |
204 | static int update_topology; |
205 | ||
206 | int topology_update_cpu_topology(void) | |
207 | { | |
208 | return update_topology; | |
209 | } | |
210 | ||
211 | /* | |
212 | * Updating the sched_domains can't be done directly from cpufreq callbacks | |
213 | * due to locking, so queue the work for later. | |
214 | */ | |
215 | static void update_topology_flags_workfn(struct work_struct *work) | |
216 | { | |
217 | update_topology = 1; | |
218 | rebuild_sched_domains(); | |
219 | pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); | |
220 | update_topology = 0; | |
221 | } | |
222 | ||
b8fe128d | 223 | static DEFINE_PER_CPU(u32, freq_factor) = 1; |
2ef7a295 | 224 | static u32 *raw_capacity; |
62de1161 | 225 | |
82d8ba71 | 226 | static int free_raw_capacity(void) |
62de1161 VK |
227 | { |
228 | kfree(raw_capacity); | |
229 | raw_capacity = NULL; | |
230 | ||
231 | return 0; | |
232 | } | |
2ef7a295 | 233 | |
4ca4f26a | 234 | void topology_normalize_cpu_scale(void) |
2ef7a295 JL |
235 | { |
236 | u64 capacity; | |
b8fe128d | 237 | u64 capacity_scale; |
2ef7a295 JL |
238 | int cpu; |
239 | ||
62de1161 | 240 | if (!raw_capacity) |
2ef7a295 JL |
241 | return; |
242 | ||
b8fe128d | 243 | capacity_scale = 1; |
2ef7a295 | 244 | for_each_possible_cpu(cpu) { |
b8fe128d JC |
245 | capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); |
246 | capacity_scale = max(capacity, capacity_scale); | |
247 | } | |
248 | ||
249 | pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); | |
250 | for_each_possible_cpu(cpu) { | |
251 | capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); | |
252 | capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, | |
253 | capacity_scale); | |
4ca4f26a | 254 | topology_set_cpu_scale(cpu, capacity); |
2ef7a295 | 255 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", |
8ec59c0f | 256 | cpu, topology_get_cpu_scale(cpu)); |
2ef7a295 | 257 | } |
2ef7a295 JL |
258 | } |
259 | ||
805df296 | 260 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
2ef7a295 | 261 | { |
b8fe128d | 262 | struct clk *cpu_clk; |
62de1161 | 263 | static bool cap_parsing_failed; |
805df296 | 264 | int ret; |
2ef7a295 JL |
265 | u32 cpu_capacity; |
266 | ||
267 | if (cap_parsing_failed) | |
805df296 | 268 | return false; |
2ef7a295 | 269 | |
3eeba1a2 | 270 | ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", |
2ef7a295 JL |
271 | &cpu_capacity); |
272 | if (!ret) { | |
273 | if (!raw_capacity) { | |
274 | raw_capacity = kcalloc(num_possible_cpus(), | |
275 | sizeof(*raw_capacity), | |
276 | GFP_KERNEL); | |
277 | if (!raw_capacity) { | |
2ef7a295 | 278 | cap_parsing_failed = true; |
805df296 | 279 | return false; |
2ef7a295 JL |
280 | } |
281 | } | |
2ef7a295 | 282 | raw_capacity[cpu] = cpu_capacity; |
6ef2541f RH |
283 | pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", |
284 | cpu_node, raw_capacity[cpu]); | |
b8fe128d JC |
285 | |
286 | /* | |
287 | * Update freq_factor for calculating early boot cpu capacities. | |
288 | * For non-clk CPU DVFS mechanism, there's no way to get the | |
289 | * frequency value now, assuming they are running at the same | |
290 | * frequency (by keeping the initial freq_factor value). | |
291 | */ | |
292 | cpu_clk = of_clk_get(cpu_node, 0); | |
4dfff3d5 | 293 | if (!PTR_ERR_OR_ZERO(cpu_clk)) { |
b8fe128d JC |
294 | per_cpu(freq_factor, cpu) = |
295 | clk_get_rate(cpu_clk) / 1000; | |
4dfff3d5 JC |
296 | clk_put(cpu_clk); |
297 | } | |
2ef7a295 JL |
298 | } else { |
299 | if (raw_capacity) { | |
6ef2541f RH |
300 | pr_err("cpu_capacity: missing %pOF raw capacity\n", |
301 | cpu_node); | |
2ef7a295 JL |
302 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); |
303 | } | |
304 | cap_parsing_failed = true; | |
62de1161 | 305 | free_raw_capacity(); |
2ef7a295 JL |
306 | } |
307 | ||
308 | return !ret; | |
309 | } | |
310 | ||
311 | #ifdef CONFIG_CPU_FREQ | |
9de9a449 GI |
312 | static cpumask_var_t cpus_to_visit; |
313 | static void parsing_done_workfn(struct work_struct *work); | |
314 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | |
2ef7a295 | 315 | |
9de9a449 | 316 | static int |
2ef7a295 JL |
317 | init_cpu_capacity_callback(struct notifier_block *nb, |
318 | unsigned long val, | |
319 | void *data) | |
320 | { | |
321 | struct cpufreq_policy *policy = data; | |
322 | int cpu; | |
323 | ||
d8bcf4db | 324 | if (!raw_capacity) |
2ef7a295 JL |
325 | return 0; |
326 | ||
40f0fc2a | 327 | if (val != CPUFREQ_CREATE_POLICY) |
93a57081 VK |
328 | return 0; |
329 | ||
330 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", | |
331 | cpumask_pr_args(policy->related_cpus), | |
332 | cpumask_pr_args(cpus_to_visit)); | |
333 | ||
334 | cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); | |
335 | ||
b8fe128d JC |
336 | for_each_cpu(cpu, policy->related_cpus) |
337 | per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; | |
93a57081 VK |
338 | |
339 | if (cpumask_empty(cpus_to_visit)) { | |
340 | topology_normalize_cpu_scale(); | |
bb1fbdd3 | 341 | schedule_work(&update_topology_flags_work); |
62de1161 | 342 | free_raw_capacity(); |
93a57081 | 343 | pr_debug("cpu_capacity: parsing done\n"); |
93a57081 VK |
344 | schedule_work(&parsing_done_work); |
345 | } | |
346 | ||
2ef7a295 JL |
347 | return 0; |
348 | } | |
349 | ||
9de9a449 | 350 | static struct notifier_block init_cpu_capacity_notifier = { |
2ef7a295 JL |
351 | .notifier_call = init_cpu_capacity_callback, |
352 | }; | |
353 | ||
354 | static int __init register_cpufreq_notifier(void) | |
355 | { | |
5408211a DE |
356 | int ret; |
357 | ||
2ef7a295 JL |
358 | /* |
359 | * on ACPI-based systems we need to use the default cpu capacity | |
360 | * until we have the necessary code to parse the cpu capacity, so | |
361 | * skip registering cpufreq notifier. | |
362 | */ | |
c105aa31 | 363 | if (!acpi_disabled || !raw_capacity) |
2ef7a295 JL |
364 | return -EINVAL; |
365 | ||
0fd33116 | 366 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) |
2ef7a295 | 367 | return -ENOMEM; |
2ef7a295 JL |
368 | |
369 | cpumask_copy(cpus_to_visit, cpu_possible_mask); | |
370 | ||
5408211a DE |
371 | ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, |
372 | CPUFREQ_POLICY_NOTIFIER); | |
373 | ||
374 | if (ret) | |
375 | free_cpumask_var(cpus_to_visit); | |
376 | ||
377 | return ret; | |
2ef7a295 JL |
378 | } |
379 | core_initcall(register_cpufreq_notifier); | |
380 | ||
9de9a449 | 381 | static void parsing_done_workfn(struct work_struct *work) |
2ef7a295 JL |
382 | { |
383 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | |
384 | CPUFREQ_POLICY_NOTIFIER); | |
5408211a | 385 | free_cpumask_var(cpus_to_visit); |
2ef7a295 JL |
386 | } |
387 | ||
388 | #else | |
2ef7a295 JL |
389 | core_initcall(free_raw_capacity); |
390 | #endif | |
60c1b220 AP |
391 | |
392 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) | |
f3c19481 ZT |
393 | /* |
394 | * This function returns the logic cpu number of the node. | |
395 | * There are basically three kinds of return values: | |
396 | * (1) logic cpu number which is > 0. | |
397 | * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but | |
398 | * there is no possible logical CPU in the kernel to match. This happens | |
399 | * when CONFIG_NR_CPUS is configure to be smaller than the number of | |
400 | * CPU nodes in DT. We need to just ignore this case. | |
401 | * (3) -1 if the node does not exist in the device tree | |
402 | */ | |
60c1b220 AP |
403 | static int __init get_cpu_for_node(struct device_node *node) |
404 | { | |
405 | struct device_node *cpu_node; | |
406 | int cpu; | |
407 | ||
408 | cpu_node = of_parse_phandle(node, "cpu", 0); | |
409 | if (!cpu_node) | |
410 | return -1; | |
411 | ||
412 | cpu = of_cpu_node_to_id(cpu_node); | |
413 | if (cpu >= 0) | |
414 | topology_parse_cpu_capacity(cpu_node, cpu); | |
415 | else | |
f3c19481 ZT |
416 | pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", |
417 | cpu_node, cpumask_pr_args(cpu_possible_mask)); | |
60c1b220 AP |
418 | |
419 | of_node_put(cpu_node); | |
420 | return cpu; | |
421 | } | |
422 | ||
423 | static int __init parse_core(struct device_node *core, int package_id, | |
424 | int core_id) | |
425 | { | |
4a33691c | 426 | char name[20]; |
60c1b220 AP |
427 | bool leaf = true; |
428 | int i = 0; | |
429 | int cpu; | |
430 | struct device_node *t; | |
431 | ||
432 | do { | |
433 | snprintf(name, sizeof(name), "thread%d", i); | |
434 | t = of_get_child_by_name(core, name); | |
435 | if (t) { | |
436 | leaf = false; | |
437 | cpu = get_cpu_for_node(t); | |
438 | if (cpu >= 0) { | |
439 | cpu_topology[cpu].package_id = package_id; | |
440 | cpu_topology[cpu].core_id = core_id; | |
441 | cpu_topology[cpu].thread_id = i; | |
f3c19481 ZT |
442 | } else if (cpu != -ENODEV) { |
443 | pr_err("%pOF: Can't get CPU for thread\n", t); | |
60c1b220 AP |
444 | of_node_put(t); |
445 | return -EINVAL; | |
446 | } | |
447 | of_node_put(t); | |
448 | } | |
449 | i++; | |
450 | } while (t); | |
451 | ||
452 | cpu = get_cpu_for_node(core); | |
453 | if (cpu >= 0) { | |
454 | if (!leaf) { | |
455 | pr_err("%pOF: Core has both threads and CPU\n", | |
456 | core); | |
457 | return -EINVAL; | |
458 | } | |
459 | ||
460 | cpu_topology[cpu].package_id = package_id; | |
461 | cpu_topology[cpu].core_id = core_id; | |
f3c19481 | 462 | } else if (leaf && cpu != -ENODEV) { |
60c1b220 AP |
463 | pr_err("%pOF: Can't get CPU for leaf core\n", core); |
464 | return -EINVAL; | |
465 | } | |
466 | ||
467 | return 0; | |
468 | } | |
469 | ||
470 | static int __init parse_cluster(struct device_node *cluster, int depth) | |
471 | { | |
4a33691c | 472 | char name[20]; |
60c1b220 AP |
473 | bool leaf = true; |
474 | bool has_cores = false; | |
475 | struct device_node *c; | |
476 | static int package_id __initdata; | |
477 | int core_id = 0; | |
478 | int i, ret; | |
479 | ||
480 | /* | |
481 | * First check for child clusters; we currently ignore any | |
482 | * information about the nesting of clusters and present the | |
483 | * scheduler with a flat list of them. | |
484 | */ | |
485 | i = 0; | |
486 | do { | |
487 | snprintf(name, sizeof(name), "cluster%d", i); | |
488 | c = of_get_child_by_name(cluster, name); | |
489 | if (c) { | |
490 | leaf = false; | |
491 | ret = parse_cluster(c, depth + 1); | |
492 | of_node_put(c); | |
493 | if (ret != 0) | |
494 | return ret; | |
495 | } | |
496 | i++; | |
497 | } while (c); | |
498 | ||
499 | /* Now check for cores */ | |
500 | i = 0; | |
501 | do { | |
502 | snprintf(name, sizeof(name), "core%d", i); | |
503 | c = of_get_child_by_name(cluster, name); | |
504 | if (c) { | |
505 | has_cores = true; | |
506 | ||
507 | if (depth == 0) { | |
508 | pr_err("%pOF: cpu-map children should be clusters\n", | |
509 | c); | |
510 | of_node_put(c); | |
511 | return -EINVAL; | |
512 | } | |
513 | ||
514 | if (leaf) { | |
515 | ret = parse_core(c, package_id, core_id++); | |
516 | } else { | |
517 | pr_err("%pOF: Non-leaf cluster with core %s\n", | |
518 | cluster, name); | |
519 | ret = -EINVAL; | |
520 | } | |
521 | ||
522 | of_node_put(c); | |
523 | if (ret != 0) | |
524 | return ret; | |
525 | } | |
526 | i++; | |
527 | } while (c); | |
528 | ||
529 | if (leaf && !has_cores) | |
530 | pr_warn("%pOF: empty cluster\n", cluster); | |
531 | ||
532 | if (leaf) | |
533 | package_id++; | |
534 | ||
535 | return 0; | |
536 | } | |
537 | ||
538 | static int __init parse_dt_topology(void) | |
539 | { | |
540 | struct device_node *cn, *map; | |
541 | int ret = 0; | |
542 | int cpu; | |
543 | ||
544 | cn = of_find_node_by_path("/cpus"); | |
545 | if (!cn) { | |
546 | pr_err("No CPU information found in DT\n"); | |
547 | return 0; | |
548 | } | |
549 | ||
550 | /* | |
551 | * When topology is provided cpu-map is essentially a root | |
552 | * cluster with restricted subnodes. | |
553 | */ | |
554 | map = of_get_child_by_name(cn, "cpu-map"); | |
555 | if (!map) | |
556 | goto out; | |
557 | ||
558 | ret = parse_cluster(map, 0); | |
559 | if (ret != 0) | |
560 | goto out_map; | |
561 | ||
562 | topology_normalize_cpu_scale(); | |
563 | ||
564 | /* | |
565 | * Check that all cores are in the topology; the SMP code will | |
566 | * only mark cores described in the DT as possible. | |
567 | */ | |
568 | for_each_possible_cpu(cpu) | |
569 | if (cpu_topology[cpu].package_id == -1) | |
570 | ret = -EINVAL; | |
571 | ||
572 | out_map: | |
573 | of_node_put(map); | |
574 | out: | |
575 | of_node_put(cn); | |
576 | return ret; | |
577 | } | |
ca74b316 | 578 | #endif |
60c1b220 AP |
579 | |
580 | /* | |
581 | * cpu topology table | |
582 | */ | |
583 | struct cpu_topology cpu_topology[NR_CPUS]; | |
584 | EXPORT_SYMBOL_GPL(cpu_topology); | |
585 | ||
586 | const struct cpumask *cpu_coregroup_mask(int cpu) | |
587 | { | |
588 | const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); | |
589 | ||
590 | /* Find the smaller of NUMA, core or LLC siblings */ | |
591 | if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { | |
592 | /* not numa in package, lets use the package siblings */ | |
593 | core_mask = &cpu_topology[cpu].core_sibling; | |
594 | } | |
595 | if (cpu_topology[cpu].llc_id != -1) { | |
596 | if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) | |
597 | core_mask = &cpu_topology[cpu].llc_sibling; | |
598 | } | |
599 | ||
600 | return core_mask; | |
601 | } | |
602 | ||
603 | void update_siblings_masks(unsigned int cpuid) | |
604 | { | |
605 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | |
606 | int cpu; | |
607 | ||
608 | /* update core and thread sibling masks */ | |
609 | for_each_online_cpu(cpu) { | |
610 | cpu_topo = &cpu_topology[cpu]; | |
611 | ||
1db51d4d | 612 | if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { |
60c1b220 AP |
613 | cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); |
614 | cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); | |
615 | } | |
616 | ||
617 | if (cpuid_topo->package_id != cpu_topo->package_id) | |
618 | continue; | |
619 | ||
620 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | |
621 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | |
622 | ||
623 | if (cpuid_topo->core_id != cpu_topo->core_id) | |
624 | continue; | |
625 | ||
626 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | |
627 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | |
628 | } | |
629 | } | |
630 | ||
631 | static void clear_cpu_topology(int cpu) | |
632 | { | |
633 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | |
634 | ||
635 | cpumask_clear(&cpu_topo->llc_sibling); | |
636 | cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); | |
637 | ||
638 | cpumask_clear(&cpu_topo->core_sibling); | |
639 | cpumask_set_cpu(cpu, &cpu_topo->core_sibling); | |
640 | cpumask_clear(&cpu_topo->thread_sibling); | |
641 | cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); | |
642 | } | |
643 | ||
ca74b316 | 644 | void __init reset_cpu_topology(void) |
60c1b220 AP |
645 | { |
646 | unsigned int cpu; | |
647 | ||
648 | for_each_possible_cpu(cpu) { | |
649 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | |
650 | ||
651 | cpu_topo->thread_id = -1; | |
652 | cpu_topo->core_id = -1; | |
653 | cpu_topo->package_id = -1; | |
654 | cpu_topo->llc_id = -1; | |
655 | ||
656 | clear_cpu_topology(cpu); | |
657 | } | |
658 | } | |
659 | ||
660 | void remove_cpu_topology(unsigned int cpu) | |
661 | { | |
662 | int sibling; | |
663 | ||
664 | for_each_cpu(sibling, topology_core_cpumask(cpu)) | |
665 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); | |
666 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) | |
667 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); | |
668 | for_each_cpu(sibling, topology_llc_cpumask(cpu)) | |
669 | cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); | |
670 | ||
671 | clear_cpu_topology(cpu); | |
672 | } | |
673 | ||
674 | __weak int __init parse_acpi_topology(void) | |
675 | { | |
676 | return 0; | |
677 | } | |
678 | ||
ca74b316 | 679 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) |
60c1b220 AP |
680 | void __init init_cpu_topology(void) |
681 | { | |
682 | reset_cpu_topology(); | |
683 | ||
684 | /* | |
685 | * Discard anything that was parsed if we hit an error so we | |
686 | * don't use partial information. | |
687 | */ | |
688 | if (parse_acpi_topology()) | |
689 | reset_cpu_topology(); | |
690 | else if (of_have_populated_dt() && parse_dt_topology()) | |
691 | reset_cpu_topology(); | |
692 | } | |
693 | #endif |