]>
Commit | Line | Data |
---|---|---|
95ceafd4 SG |
1 | /* |
2 | * Copyright (C) 2012 Freescale Semiconductor, Inc. | |
3 | * | |
748c8766 VK |
4 | * Copyright (C) 2014 Linaro. |
5 | * Viresh Kumar <viresh.kumar@linaro.org> | |
6 | * | |
bbcf0719 | 7 | * The OPP code in function set_target() is reused from |
95ceafd4 SG |
8 | * drivers/cpufreq/omap-cpufreq.c |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
16 | ||
17 | #include <linux/clk.h> | |
e1825b25 | 18 | #include <linux/cpu.h> |
77cff592 | 19 | #include <linux/cpu_cooling.h> |
95ceafd4 | 20 | #include <linux/cpufreq.h> |
34e5a527 | 21 | #include <linux/cpufreq-dt.h> |
77cff592 | 22 | #include <linux/cpumask.h> |
95ceafd4 SG |
23 | #include <linux/err.h> |
24 | #include <linux/module.h> | |
25 | #include <linux/of.h> | |
e4db1c74 | 26 | #include <linux/pm_opp.h> |
5553f9e2 | 27 | #include <linux/platform_device.h> |
95ceafd4 SG |
28 | #include <linux/regulator/consumer.h> |
29 | #include <linux/slab.h> | |
77cff592 | 30 | #include <linux/thermal.h> |
95ceafd4 | 31 | |
d2f31f1d VK |
32 | struct private_data { |
33 | struct device *cpu_dev; | |
34 | struct regulator *cpu_reg; | |
35 | struct thermal_cooling_device *cdev; | |
36 | unsigned int voltage_tolerance; /* in percentage */ | |
37 | }; | |
95ceafd4 | 38 | |
21c36d35 BZ |
39 | static struct freq_attr *cpufreq_dt_attr[] = { |
40 | &cpufreq_freq_attr_scaling_available_freqs, | |
41 | NULL, /* Extra space for boost-attr if required */ | |
42 | NULL, | |
43 | }; | |
44 | ||
bbcf0719 | 45 | static int set_target(struct cpufreq_policy *policy, unsigned int index) |
95ceafd4 | 46 | { |
47d43ba7 | 47 | struct dev_pm_opp *opp; |
d2f31f1d VK |
48 | struct cpufreq_frequency_table *freq_table = policy->freq_table; |
49 | struct clk *cpu_clk = policy->clk; | |
50 | struct private_data *priv = policy->driver_data; | |
51 | struct device *cpu_dev = priv->cpu_dev; | |
52 | struct regulator *cpu_reg = priv->cpu_reg; | |
5df60559 | 53 | unsigned long volt = 0, volt_old = 0, tol = 0; |
d4019f0a | 54 | unsigned int old_freq, new_freq; |
0ca68436 | 55 | long freq_Hz, freq_exact; |
95ceafd4 SG |
56 | int ret; |
57 | ||
95ceafd4 | 58 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); |
2209b0c9 | 59 | if (freq_Hz <= 0) |
95ceafd4 | 60 | freq_Hz = freq_table[index].frequency * 1000; |
95ceafd4 | 61 | |
d4019f0a VK |
62 | freq_exact = freq_Hz; |
63 | new_freq = freq_Hz / 1000; | |
64 | old_freq = clk_get_rate(cpu_clk) / 1000; | |
95ceafd4 | 65 | |
4a511de9 | 66 | if (!IS_ERR(cpu_reg)) { |
0a1e879d SW |
67 | unsigned long opp_freq; |
68 | ||
78e8eb8f | 69 | rcu_read_lock(); |
5d4879cd | 70 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); |
95ceafd4 | 71 | if (IS_ERR(opp)) { |
78e8eb8f | 72 | rcu_read_unlock(); |
fbd48ca5 VK |
73 | dev_err(cpu_dev, "failed to find OPP for %ld\n", |
74 | freq_Hz); | |
d4019f0a | 75 | return PTR_ERR(opp); |
95ceafd4 | 76 | } |
5d4879cd | 77 | volt = dev_pm_opp_get_voltage(opp); |
0a1e879d | 78 | opp_freq = dev_pm_opp_get_freq(opp); |
78e8eb8f | 79 | rcu_read_unlock(); |
d2f31f1d | 80 | tol = volt * priv->voltage_tolerance / 100; |
95ceafd4 | 81 | volt_old = regulator_get_voltage(cpu_reg); |
0a1e879d SW |
82 | dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", |
83 | opp_freq / 1000, volt); | |
95ceafd4 SG |
84 | } |
85 | ||
fbd48ca5 | 86 | dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", |
8197bb1b | 87 | old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, |
fbd48ca5 | 88 | new_freq / 1000, volt ? volt / 1000 : -1); |
95ceafd4 SG |
89 | |
90 | /* scaling up? scale voltage before frequency */ | |
d4019f0a | 91 | if (!IS_ERR(cpu_reg) && new_freq > old_freq) { |
95ceafd4 SG |
92 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); |
93 | if (ret) { | |
fbd48ca5 VK |
94 | dev_err(cpu_dev, "failed to scale voltage up: %d\n", |
95 | ret); | |
d4019f0a | 96 | return ret; |
95ceafd4 SG |
97 | } |
98 | } | |
99 | ||
0ca68436 | 100 | ret = clk_set_rate(cpu_clk, freq_exact); |
95ceafd4 | 101 | if (ret) { |
fbd48ca5 | 102 | dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); |
8197bb1b | 103 | if (!IS_ERR(cpu_reg) && volt_old > 0) |
95ceafd4 | 104 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); |
d4019f0a | 105 | return ret; |
95ceafd4 SG |
106 | } |
107 | ||
108 | /* scaling down? scale voltage after frequency */ | |
d4019f0a | 109 | if (!IS_ERR(cpu_reg) && new_freq < old_freq) { |
95ceafd4 SG |
110 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); |
111 | if (ret) { | |
fbd48ca5 VK |
112 | dev_err(cpu_dev, "failed to scale voltage down: %d\n", |
113 | ret); | |
d4019f0a | 114 | clk_set_rate(cpu_clk, old_freq * 1000); |
95ceafd4 SG |
115 | } |
116 | } | |
117 | ||
fd143b4d | 118 | return ret; |
95ceafd4 SG |
119 | } |
120 | ||
95b61058 | 121 | static int allocate_resources(int cpu, struct device **cdev, |
d2f31f1d | 122 | struct regulator **creg, struct clk **cclk) |
95ceafd4 | 123 | { |
d2f31f1d VK |
124 | struct device *cpu_dev; |
125 | struct regulator *cpu_reg; | |
126 | struct clk *cpu_clk; | |
127 | int ret = 0; | |
2d2c5e0e | 128 | char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg; |
95ceafd4 | 129 | |
95b61058 | 130 | cpu_dev = get_cpu_device(cpu); |
e1825b25 | 131 | if (!cpu_dev) { |
95b61058 | 132 | pr_err("failed to get cpu%d device\n", cpu); |
e1825b25 SH |
133 | return -ENODEV; |
134 | } | |
6754f556 | 135 | |
2d2c5e0e | 136 | /* Try "cpu0" for older DTs */ |
95b61058 VK |
137 | if (!cpu) |
138 | reg = reg_cpu0; | |
139 | else | |
140 | reg = reg_cpu; | |
2d2c5e0e VK |
141 | |
142 | try_again: | |
143 | cpu_reg = regulator_get_optional(cpu_dev, reg); | |
fc31d6f5 NM |
144 | if (IS_ERR(cpu_reg)) { |
145 | /* | |
95b61058 | 146 | * If cpu's regulator supply node is present, but regulator is |
fc31d6f5 NM |
147 | * not yet registered, we should try defering probe. |
148 | */ | |
149 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | |
95b61058 VK |
150 | dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", |
151 | cpu); | |
d2f31f1d | 152 | return -EPROBE_DEFER; |
fc31d6f5 | 153 | } |
2d2c5e0e VK |
154 | |
155 | /* Try with "cpu-supply" */ | |
156 | if (reg == reg_cpu0) { | |
157 | reg = reg_cpu; | |
158 | goto try_again; | |
159 | } | |
160 | ||
a00de1ab TP |
161 | dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", |
162 | cpu, PTR_ERR(cpu_reg)); | |
fc31d6f5 NM |
163 | } |
164 | ||
e3beb0ac | 165 | cpu_clk = clk_get(cpu_dev, NULL); |
95ceafd4 | 166 | if (IS_ERR(cpu_clk)) { |
d2f31f1d VK |
167 | /* put regulator */ |
168 | if (!IS_ERR(cpu_reg)) | |
169 | regulator_put(cpu_reg); | |
170 | ||
95ceafd4 | 171 | ret = PTR_ERR(cpu_clk); |
48a8624b VK |
172 | |
173 | /* | |
174 | * If cpu's clk node is present, but clock is not yet | |
175 | * registered, we should try defering probe. | |
176 | */ | |
177 | if (ret == -EPROBE_DEFER) | |
95b61058 | 178 | dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); |
48a8624b | 179 | else |
71796210 AK |
180 | dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu, |
181 | ret); | |
d2f31f1d VK |
182 | } else { |
183 | *cdev = cpu_dev; | |
184 | *creg = cpu_reg; | |
185 | *cclk = cpu_clk; | |
186 | } | |
187 | ||
188 | return ret; | |
189 | } | |
190 | ||
bbcf0719 | 191 | static int cpufreq_init(struct cpufreq_policy *policy) |
d2f31f1d VK |
192 | { |
193 | struct cpufreq_frequency_table *freq_table; | |
d2f31f1d VK |
194 | struct device_node *np; |
195 | struct private_data *priv; | |
196 | struct device *cpu_dev; | |
197 | struct regulator *cpu_reg; | |
198 | struct clk *cpu_clk; | |
045ee45c | 199 | unsigned long min_uV = ~0, max_uV = 0; |
d2f31f1d | 200 | unsigned int transition_latency; |
2e02d872 | 201 | bool need_update = false; |
d2f31f1d VK |
202 | int ret; |
203 | ||
95b61058 | 204 | ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); |
d2f31f1d | 205 | if (ret) { |
edd52b1c | 206 | pr_err("%s: Failed to allocate resources: %d\n", __func__, ret); |
d2f31f1d VK |
207 | return ret; |
208 | } | |
48a8624b | 209 | |
d2f31f1d VK |
210 | np = of_node_get(cpu_dev->of_node); |
211 | if (!np) { | |
212 | dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); | |
213 | ret = -ENOENT; | |
214 | goto out_put_reg_clk; | |
95ceafd4 SG |
215 | } |
216 | ||
2e02d872 VK |
217 | /* Get OPP-sharing information from "operating-points-v2" bindings */ |
218 | ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus); | |
219 | if (ret) { | |
220 | /* | |
221 | * operating-points-v2 not supported, fallback to old method of | |
222 | * finding shared-OPPs for backward compatibility. | |
223 | */ | |
224 | if (ret == -ENOENT) | |
225 | need_update = true; | |
226 | else | |
227 | goto out_node_put; | |
228 | } | |
229 | ||
230 | /* | |
231 | * Initialize OPP tables for all policy->cpus. They will be shared by | |
232 | * all CPUs which have marked their CPUs shared with OPP bindings. | |
233 | * | |
234 | * For platforms not using operating-points-v2 bindings, we do this | |
235 | * before updating policy->cpus. Otherwise, we will end up creating | |
236 | * duplicate OPPs for policy->cpus. | |
237 | * | |
238 | * OPPs might be populated at runtime, don't check for error here | |
239 | */ | |
240 | of_cpumask_init_opp_table(policy->cpus); | |
241 | ||
7d5d0c8b VK |
242 | /* |
243 | * But we need OPP table to function so if it is not there let's | |
244 | * give platform code chance to provide it for us. | |
245 | */ | |
246 | ret = dev_pm_opp_get_opp_count(cpu_dev); | |
247 | if (ret <= 0) { | |
248 | pr_debug("OPP table is not ready, deferring probe\n"); | |
249 | ret = -EPROBE_DEFER; | |
250 | goto out_free_opp; | |
251 | } | |
252 | ||
2e02d872 VK |
253 | if (need_update) { |
254 | struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); | |
255 | ||
256 | if (!pd || !pd->independent_clocks) | |
257 | cpumask_setall(policy->cpus); | |
258 | ||
259 | /* | |
260 | * OPP tables are initialized only for policy->cpu, do it for | |
261 | * others as well. | |
262 | */ | |
263 | set_cpus_sharing_opps(cpu_dev, policy->cpus); | |
264 | ||
265 | of_property_read_u32(np, "clock-latency", &transition_latency); | |
266 | } else { | |
267 | transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); | |
268 | } | |
95ceafd4 | 269 | |
d2f31f1d VK |
270 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
271 | if (!priv) { | |
272 | ret = -ENOMEM; | |
2f0f609f | 273 | goto out_free_opp; |
95ceafd4 SG |
274 | } |
275 | ||
d2f31f1d | 276 | of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); |
95ceafd4 | 277 | |
2e02d872 | 278 | if (!transition_latency) |
95ceafd4 SG |
279 | transition_latency = CPUFREQ_ETERNAL; |
280 | ||
43c638e3 | 281 | if (!IS_ERR(cpu_reg)) { |
045ee45c | 282 | unsigned long opp_freq = 0; |
95ceafd4 SG |
283 | |
284 | /* | |
045ee45c LS |
285 | * Disable any OPPs where the connected regulator isn't able to |
286 | * provide the specified voltage and record minimum and maximum | |
287 | * voltage levels. | |
95ceafd4 | 288 | */ |
045ee45c LS |
289 | while (1) { |
290 | struct dev_pm_opp *opp; | |
291 | unsigned long opp_uV, tol_uV; | |
292 | ||
293 | rcu_read_lock(); | |
294 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); | |
295 | if (IS_ERR(opp)) { | |
296 | rcu_read_unlock(); | |
297 | break; | |
298 | } | |
299 | opp_uV = dev_pm_opp_get_voltage(opp); | |
300 | rcu_read_unlock(); | |
301 | ||
302 | tol_uV = opp_uV * priv->voltage_tolerance / 100; | |
303 | if (regulator_is_supported_voltage(cpu_reg, opp_uV, | |
304 | opp_uV + tol_uV)) { | |
305 | if (opp_uV < min_uV) | |
306 | min_uV = opp_uV; | |
307 | if (opp_uV > max_uV) | |
308 | max_uV = opp_uV; | |
309 | } else { | |
310 | dev_pm_opp_disable(cpu_dev, opp_freq); | |
311 | } | |
312 | ||
313 | opp_freq++; | |
314 | } | |
315 | ||
95ceafd4 SG |
316 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); |
317 | if (ret > 0) | |
318 | transition_latency += ret * 1000; | |
319 | } | |
320 | ||
045ee45c LS |
321 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
322 | if (ret) { | |
323 | pr_err("failed to init cpufreq table: %d\n", ret); | |
324 | goto out_free_priv; | |
325 | } | |
326 | ||
d2f31f1d VK |
327 | priv->cpu_dev = cpu_dev; |
328 | priv->cpu_reg = cpu_reg; | |
329 | policy->driver_data = priv; | |
330 | ||
331 | policy->clk = cpu_clk; | |
34e5a527 TP |
332 | ret = cpufreq_table_validate_and_show(policy, freq_table); |
333 | if (ret) { | |
334 | dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, | |
335 | ret); | |
9a004428 | 336 | goto out_free_cpufreq_table; |
d15fa862 VK |
337 | } |
338 | ||
339 | /* Support turbo/boost mode */ | |
340 | if (policy_has_boost_freq(policy)) { | |
341 | /* This gets disabled by core on driver unregister */ | |
342 | ret = cpufreq_enable_boost_support(); | |
343 | if (ret) | |
344 | goto out_free_cpufreq_table; | |
21c36d35 | 345 | cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; |
34e5a527 TP |
346 | } |
347 | ||
348 | policy->cpuinfo.transition_latency = transition_latency; | |
349 | ||
f9739d27 LS |
350 | of_node_put(np); |
351 | ||
95ceafd4 SG |
352 | return 0; |
353 | ||
9a004428 | 354 | out_free_cpufreq_table: |
5d4879cd | 355 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
045ee45c LS |
356 | out_free_priv: |
357 | kfree(priv); | |
2f0f609f | 358 | out_free_opp: |
2e02d872 VK |
359 | of_cpumask_free_opp_table(policy->cpus); |
360 | out_node_put: | |
d2f31f1d VK |
361 | of_node_put(np); |
362 | out_put_reg_clk: | |
ed4b053c | 363 | clk_put(cpu_clk); |
e3beb0ac LS |
364 | if (!IS_ERR(cpu_reg)) |
365 | regulator_put(cpu_reg); | |
d2f31f1d VK |
366 | |
367 | return ret; | |
368 | } | |
369 | ||
bbcf0719 | 370 | static int cpufreq_exit(struct cpufreq_policy *policy) |
d2f31f1d VK |
371 | { |
372 | struct private_data *priv = policy->driver_data; | |
373 | ||
17ad13ba | 374 | cpufreq_cooling_unregister(priv->cdev); |
d2f31f1d | 375 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
2e02d872 | 376 | of_cpumask_free_opp_table(policy->related_cpus); |
d2f31f1d VK |
377 | clk_put(policy->clk); |
378 | if (!IS_ERR(priv->cpu_reg)) | |
379 | regulator_put(priv->cpu_reg); | |
380 | kfree(priv); | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
9a004428 VK |
385 | static void cpufreq_ready(struct cpufreq_policy *policy) |
386 | { | |
387 | struct private_data *priv = policy->driver_data; | |
388 | struct device_node *np = of_node_get(priv->cpu_dev->of_node); | |
389 | ||
390 | if (WARN_ON(!np)) | |
391 | return; | |
392 | ||
393 | /* | |
394 | * For now, just loading the cooling device; | |
395 | * thermal DT code takes care of matching them. | |
396 | */ | |
397 | if (of_find_property(np, "#cooling-cells", NULL)) { | |
398 | priv->cdev = of_cpufreq_cooling_register(np, | |
399 | policy->related_cpus); | |
400 | if (IS_ERR(priv->cdev)) { | |
401 | dev_err(priv->cpu_dev, | |
402 | "running cpufreq without cooling device: %ld\n", | |
403 | PTR_ERR(priv->cdev)); | |
404 | ||
405 | priv->cdev = NULL; | |
406 | } | |
407 | } | |
408 | ||
409 | of_node_put(np); | |
410 | } | |
411 | ||
bbcf0719 | 412 | static struct cpufreq_driver dt_cpufreq_driver = { |
d2f31f1d VK |
413 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, |
414 | .verify = cpufreq_generic_frequency_table_verify, | |
bbcf0719 | 415 | .target_index = set_target, |
d2f31f1d | 416 | .get = cpufreq_generic_get, |
bbcf0719 VK |
417 | .init = cpufreq_init, |
418 | .exit = cpufreq_exit, | |
9a004428 | 419 | .ready = cpufreq_ready, |
bbcf0719 | 420 | .name = "cpufreq-dt", |
21c36d35 | 421 | .attr = cpufreq_dt_attr, |
d2f31f1d VK |
422 | }; |
423 | ||
bbcf0719 | 424 | static int dt_cpufreq_probe(struct platform_device *pdev) |
d2f31f1d VK |
425 | { |
426 | struct device *cpu_dev; | |
427 | struct regulator *cpu_reg; | |
428 | struct clk *cpu_clk; | |
429 | int ret; | |
430 | ||
431 | /* | |
432 | * All per-cluster (CPUs sharing clock/voltages) initialization is done | |
433 | * from ->init(). In probe(), we just need to make sure that clk and | |
434 | * regulators are available. Else defer probe and retry. | |
435 | * | |
436 | * FIXME: Is checking this only for CPU0 sufficient ? | |
437 | */ | |
95b61058 | 438 | ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); |
d2f31f1d VK |
439 | if (ret) |
440 | return ret; | |
441 | ||
442 | clk_put(cpu_clk); | |
443 | if (!IS_ERR(cpu_reg)) | |
444 | regulator_put(cpu_reg); | |
445 | ||
34e5a527 TP |
446 | dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); |
447 | ||
bbcf0719 | 448 | ret = cpufreq_register_driver(&dt_cpufreq_driver); |
d2f31f1d VK |
449 | if (ret) |
450 | dev_err(cpu_dev, "failed register driver: %d\n", ret); | |
451 | ||
95ceafd4 SG |
452 | return ret; |
453 | } | |
5553f9e2 | 454 | |
bbcf0719 | 455 | static int dt_cpufreq_remove(struct platform_device *pdev) |
5553f9e2 | 456 | { |
bbcf0719 | 457 | cpufreq_unregister_driver(&dt_cpufreq_driver); |
5553f9e2 SG |
458 | return 0; |
459 | } | |
460 | ||
bbcf0719 | 461 | static struct platform_driver dt_cpufreq_platdrv = { |
5553f9e2 | 462 | .driver = { |
bbcf0719 | 463 | .name = "cpufreq-dt", |
5553f9e2 | 464 | }, |
bbcf0719 VK |
465 | .probe = dt_cpufreq_probe, |
466 | .remove = dt_cpufreq_remove, | |
5553f9e2 | 467 | }; |
bbcf0719 | 468 | module_platform_driver(dt_cpufreq_platdrv); |
95ceafd4 | 469 | |
07949bf9 | 470 | MODULE_ALIAS("platform:cpufreq-dt"); |
748c8766 | 471 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); |
95ceafd4 | 472 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); |
bbcf0719 | 473 | MODULE_DESCRIPTION("Generic cpufreq driver"); |
95ceafd4 | 474 | MODULE_LICENSE("GPL"); |