]>
Commit | Line | Data |
---|---|---|
6ee97d35 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2ef7a295 JL |
2 | /* |
3 | * Arch specific cpu topology information | |
4 | * | |
5 | * Copyright (C) 2016, ARM Ltd. | |
6 | * Written by: Juri Lelli, ARM Ltd. | |
2ef7a295 JL |
7 | */ |
8 | ||
9 | #include <linux/acpi.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/cpufreq.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/of.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/sched/topology.h> | |
bb1fbdd3 | 17 | #include <linux/cpuset.h> |
2ef7a295 | 18 | |
0e27c567 | 19 | DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; |
2ef7a295 | 20 | |
0e27c567 DE |
21 | void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, |
22 | unsigned long max_freq) | |
2ef7a295 | 23 | { |
0e27c567 DE |
24 | unsigned long scale; |
25 | int i; | |
26 | ||
27 | scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; | |
28 | ||
29 | for_each_cpu(i, cpus) | |
30 | per_cpu(freq_scale, i) = scale; | |
2ef7a295 JL |
31 | } |
32 | ||
8216f588 | 33 | DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
2ef7a295 | 34 | |
4ca4f26a | 35 | void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) |
2ef7a295 JL |
36 | { |
37 | per_cpu(cpu_scale, cpu) = capacity; | |
38 | } | |
39 | ||
40 | static ssize_t cpu_capacity_show(struct device *dev, | |
41 | struct device_attribute *attr, | |
42 | char *buf) | |
43 | { | |
44 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
45 | ||
8ec59c0f | 46 | return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); |
2ef7a295 JL |
47 | } |
48 | ||
bb1fbdd3 MR |
49 | static void update_topology_flags_workfn(struct work_struct *work); |
50 | static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); | |
51 | ||
5d777b18 | 52 | static DEVICE_ATTR_RO(cpu_capacity); |
2ef7a295 JL |
53 | |
54 | static int register_cpu_capacity_sysctl(void) | |
55 | { | |
56 | int i; | |
57 | struct device *cpu; | |
58 | ||
59 | for_each_possible_cpu(i) { | |
60 | cpu = get_cpu_device(i); | |
61 | if (!cpu) { | |
62 | pr_err("%s: too early to get CPU%d device!\n", | |
63 | __func__, i); | |
64 | continue; | |
65 | } | |
66 | device_create_file(cpu, &dev_attr_cpu_capacity); | |
67 | } | |
68 | ||
69 | return 0; | |
70 | } | |
71 | subsys_initcall(register_cpu_capacity_sysctl); | |
72 | ||
bb1fbdd3 MR |
73 | static int update_topology; |
74 | ||
75 | int topology_update_cpu_topology(void) | |
76 | { | |
77 | return update_topology; | |
78 | } | |
79 | ||
80 | /* | |
81 | * Updating the sched_domains can't be done directly from cpufreq callbacks | |
82 | * due to locking, so queue the work for later. | |
83 | */ | |
84 | static void update_topology_flags_workfn(struct work_struct *work) | |
85 | { | |
86 | update_topology = 1; | |
87 | rebuild_sched_domains(); | |
88 | pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); | |
89 | update_topology = 0; | |
90 | } | |
91 | ||
2ef7a295 JL |
92 | static u32 capacity_scale; |
93 | static u32 *raw_capacity; | |
62de1161 | 94 | |
82d8ba71 | 95 | static int free_raw_capacity(void) |
62de1161 VK |
96 | { |
97 | kfree(raw_capacity); | |
98 | raw_capacity = NULL; | |
99 | ||
100 | return 0; | |
101 | } | |
2ef7a295 | 102 | |
4ca4f26a | 103 | void topology_normalize_cpu_scale(void) |
2ef7a295 JL |
104 | { |
105 | u64 capacity; | |
106 | int cpu; | |
107 | ||
62de1161 | 108 | if (!raw_capacity) |
2ef7a295 JL |
109 | return; |
110 | ||
111 | pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); | |
2ef7a295 JL |
112 | for_each_possible_cpu(cpu) { |
113 | pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", | |
114 | cpu, raw_capacity[cpu]); | |
115 | capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) | |
116 | / capacity_scale; | |
4ca4f26a | 117 | topology_set_cpu_scale(cpu, capacity); |
2ef7a295 | 118 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", |
8ec59c0f | 119 | cpu, topology_get_cpu_scale(cpu)); |
2ef7a295 | 120 | } |
2ef7a295 JL |
121 | } |
122 | ||
805df296 | 123 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
2ef7a295 | 124 | { |
62de1161 | 125 | static bool cap_parsing_failed; |
805df296 | 126 | int ret; |
2ef7a295 JL |
127 | u32 cpu_capacity; |
128 | ||
129 | if (cap_parsing_failed) | |
805df296 | 130 | return false; |
2ef7a295 | 131 | |
3eeba1a2 | 132 | ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", |
2ef7a295 JL |
133 | &cpu_capacity); |
134 | if (!ret) { | |
135 | if (!raw_capacity) { | |
136 | raw_capacity = kcalloc(num_possible_cpus(), | |
137 | sizeof(*raw_capacity), | |
138 | GFP_KERNEL); | |
139 | if (!raw_capacity) { | |
2ef7a295 | 140 | cap_parsing_failed = true; |
805df296 | 141 | return false; |
2ef7a295 JL |
142 | } |
143 | } | |
144 | capacity_scale = max(cpu_capacity, capacity_scale); | |
145 | raw_capacity[cpu] = cpu_capacity; | |
6ef2541f RH |
146 | pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", |
147 | cpu_node, raw_capacity[cpu]); | |
2ef7a295 JL |
148 | } else { |
149 | if (raw_capacity) { | |
6ef2541f RH |
150 | pr_err("cpu_capacity: missing %pOF raw capacity\n", |
151 | cpu_node); | |
2ef7a295 JL |
152 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); |
153 | } | |
154 | cap_parsing_failed = true; | |
62de1161 | 155 | free_raw_capacity(); |
2ef7a295 JL |
156 | } |
157 | ||
158 | return !ret; | |
159 | } | |
160 | ||
161 | #ifdef CONFIG_CPU_FREQ | |
9de9a449 GI |
162 | static cpumask_var_t cpus_to_visit; |
163 | static void parsing_done_workfn(struct work_struct *work); | |
164 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | |
2ef7a295 | 165 | |
9de9a449 | 166 | static int |
2ef7a295 JL |
167 | init_cpu_capacity_callback(struct notifier_block *nb, |
168 | unsigned long val, | |
169 | void *data) | |
170 | { | |
171 | struct cpufreq_policy *policy = data; | |
172 | int cpu; | |
173 | ||
d8bcf4db | 174 | if (!raw_capacity) |
2ef7a295 JL |
175 | return 0; |
176 | ||
93a57081 VK |
177 | if (val != CPUFREQ_NOTIFY) |
178 | return 0; | |
179 | ||
180 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", | |
181 | cpumask_pr_args(policy->related_cpus), | |
182 | cpumask_pr_args(cpus_to_visit)); | |
183 | ||
184 | cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); | |
185 | ||
186 | for_each_cpu(cpu, policy->related_cpus) { | |
8ec59c0f | 187 | raw_capacity[cpu] = topology_get_cpu_scale(cpu) * |
93a57081 VK |
188 | policy->cpuinfo.max_freq / 1000UL; |
189 | capacity_scale = max(raw_capacity[cpu], capacity_scale); | |
2ef7a295 | 190 | } |
93a57081 VK |
191 | |
192 | if (cpumask_empty(cpus_to_visit)) { | |
193 | topology_normalize_cpu_scale(); | |
bb1fbdd3 | 194 | schedule_work(&update_topology_flags_work); |
62de1161 | 195 | free_raw_capacity(); |
93a57081 | 196 | pr_debug("cpu_capacity: parsing done\n"); |
93a57081 VK |
197 | schedule_work(&parsing_done_work); |
198 | } | |
199 | ||
2ef7a295 JL |
200 | return 0; |
201 | } | |
202 | ||
9de9a449 | 203 | static struct notifier_block init_cpu_capacity_notifier = { |
2ef7a295 JL |
204 | .notifier_call = init_cpu_capacity_callback, |
205 | }; | |
206 | ||
207 | static int __init register_cpufreq_notifier(void) | |
208 | { | |
5408211a DE |
209 | int ret; |
210 | ||
2ef7a295 JL |
211 | /* |
212 | * on ACPI-based systems we need to use the default cpu capacity | |
213 | * until we have the necessary code to parse the cpu capacity, so | |
214 | * skip registering cpufreq notifier. | |
215 | */ | |
c105aa31 | 216 | if (!acpi_disabled || !raw_capacity) |
2ef7a295 JL |
217 | return -EINVAL; |
218 | ||
0fd33116 | 219 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) |
2ef7a295 | 220 | return -ENOMEM; |
2ef7a295 JL |
221 | |
222 | cpumask_copy(cpus_to_visit, cpu_possible_mask); | |
223 | ||
5408211a DE |
224 | ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, |
225 | CPUFREQ_POLICY_NOTIFIER); | |
226 | ||
227 | if (ret) | |
228 | free_cpumask_var(cpus_to_visit); | |
229 | ||
230 | return ret; | |
2ef7a295 JL |
231 | } |
232 | core_initcall(register_cpufreq_notifier); | |
233 | ||
9de9a449 | 234 | static void parsing_done_workfn(struct work_struct *work) |
2ef7a295 JL |
235 | { |
236 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | |
237 | CPUFREQ_POLICY_NOTIFIER); | |
5408211a | 238 | free_cpumask_var(cpus_to_visit); |
2ef7a295 JL |
239 | } |
240 | ||
241 | #else | |
2ef7a295 JL |
242 | core_initcall(free_raw_capacity); |
243 | #endif |