]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/opp/core.c
PM / OPP: Pass opp_table to dev_pm_opp_put_regulator()
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / opp / core.c
CommitLineData
e1f60b29
NM
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
d6d2a528
VK
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
d54974c2 16#include <linux/clk.h>
e1f60b29
NM
17#include <linux/errno.h>
18#include <linux/err.h>
e1f60b29 19#include <linux/slab.h>
51990e82 20#include <linux/device.h>
80126ce7 21#include <linux/export.h>
9f8ea969 22#include <linux/regulator/consumer.h>
e1f60b29 23
f59d3ee8 24#include "opp.h"
e1f60b29
NM
25
26/*
2c2709dc
VK
27 * The root of the list of all opp-tables. All opp_table structures branch off
28 * from here, with each opp_table containing the list of opps it supports in
e1f60b29
NM
29 * various states of availability.
30 */
f47b72a1 31LIST_HEAD(opp_tables);
e1f60b29 32/* Lock to allow exclusive modification to the device and opp lists */
2c2709dc 33DEFINE_MUTEX(opp_table_lock);
e1f60b29 34
b02ded24
DT
35#define opp_rcu_lockdep_assert() \
36do { \
f78f5b90 37 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
2c2709dc
VK
38 !lockdep_is_held(&opp_table_lock), \
39 "Missing rcu_read_lock() or " \
40 "opp_table_lock protection"); \
b02ded24
DT
41} while (0)
42
2c2709dc
VK
43static struct opp_device *_find_opp_dev(const struct device *dev,
44 struct opp_table *opp_table)
06441658 45{
2c2709dc 46 struct opp_device *opp_dev;
06441658 47
2c2709dc
VK
48 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
49 if (opp_dev->dev == dev)
50 return opp_dev;
06441658
VK
51
52 return NULL;
53}
54
e1f60b29 55/**
2c2709dc
VK
56 * _find_opp_table() - find opp_table struct using device pointer
57 * @dev: device pointer used to lookup OPP table
e1f60b29 58 *
2c2709dc
VK
59 * Search OPP table for one containing matching device. Does a RCU reader
60 * operation to grab the pointer needed.
e1f60b29 61 *
2c2709dc 62 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
e1f60b29
NM
63 * -EINVAL based on type of error.
64 *
0597e818 65 * Locking: For readers, this function must be called under rcu_read_lock().
2c2709dc 66 * opp_table is a RCU protected pointer, which means that opp_table is valid
0597e818
VK
67 * as long as we are under RCU lock.
68 *
2c2709dc 69 * For Writers, this function must be called with opp_table_lock held.
e1f60b29 70 */
2c2709dc 71struct opp_table *_find_opp_table(struct device *dev)
e1f60b29 72{
2c2709dc 73 struct opp_table *opp_table;
e1f60b29 74
0597e818
VK
75 opp_rcu_lockdep_assert();
76
50a3cb04 77 if (IS_ERR_OR_NULL(dev)) {
e1f60b29
NM
78 pr_err("%s: Invalid parameters\n", __func__);
79 return ERR_PTR(-EINVAL);
80 }
81
2c2709dc
VK
82 list_for_each_entry_rcu(opp_table, &opp_tables, node)
83 if (_find_opp_dev(dev, opp_table))
84 return opp_table;
e1f60b29 85
06441658 86 return ERR_PTR(-ENODEV);
e1f60b29
NM
87}
88
89/**
d6d00742 90 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
e1f60b29
NM
91 * @opp: opp for which voltage has to be returned for
92 *
984f16c8 93 * Return: voltage in micro volt corresponding to the opp, else
e1f60b29
NM
94 * return 0
95 *
96 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
97 * protected pointer. This means that opp which could have been fetched by
98 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
99 * under RCU lock. The pointer returned by the opp_find_freq family must be
100 * used in the same section as the usage of this function with the pointer
101 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
102 * pointer.
103 */
47d43ba7 104unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
e1f60b29 105{
47d43ba7 106 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
107 unsigned long v = 0;
108
04bf1c7f
KK
109 opp_rcu_lockdep_assert();
110
e1f60b29 111 tmp_opp = rcu_dereference(opp);
d6d00742 112 if (IS_ERR_OR_NULL(tmp_opp))
e1f60b29
NM
113 pr_err("%s: Invalid parameters\n", __func__);
114 else
115 v = tmp_opp->u_volt;
116
117 return v;
118}
5d4879cd 119EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
e1f60b29
NM
120
121/**
5d4879cd 122 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
e1f60b29
NM
123 * @opp: opp for which frequency has to be returned for
124 *
984f16c8 125 * Return: frequency in hertz corresponding to the opp, else
e1f60b29
NM
126 * return 0
127 *
128 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
129 * protected pointer. This means that opp which could have been fetched by
130 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
131 * under RCU lock. The pointer returned by the opp_find_freq family must be
132 * used in the same section as the usage of this function with the pointer
133 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
134 * pointer.
135 */
47d43ba7 136unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
e1f60b29 137{
47d43ba7 138 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
139 unsigned long f = 0;
140
04bf1c7f
KK
141 opp_rcu_lockdep_assert();
142
e1f60b29 143 tmp_opp = rcu_dereference(opp);
50a3cb04 144 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
e1f60b29
NM
145 pr_err("%s: Invalid parameters\n", __func__);
146 else
147 f = tmp_opp->rate;
148
149 return f;
150}
5d4879cd 151EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
e1f60b29 152
19445b25
BZ
153/**
154 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
155 * @opp: opp for which turbo mode is being verified
156 *
157 * Turbo OPPs are not for normal use, and can be enabled (under certain
158 * conditions) for short duration of times to finish high throughput work
159 * quickly. Running on them for longer times may overheat the chip.
160 *
161 * Return: true if opp is turbo opp, else false.
162 *
163 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
164 * protected pointer. This means that opp which could have been fetched by
165 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
166 * under RCU lock. The pointer returned by the opp_find_freq family must be
167 * used in the same section as the usage of this function with the pointer
168 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
169 * pointer.
170 */
171bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
172{
173 struct dev_pm_opp *tmp_opp;
174
175 opp_rcu_lockdep_assert();
176
177 tmp_opp = rcu_dereference(opp);
178 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
179 pr_err("%s: Invalid parameters\n", __func__);
180 return false;
181 }
182
183 return tmp_opp->turbo;
184}
185EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
186
3ca9bb33
VK
187/**
188 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
189 * @dev: device for which we do this operation
190 *
191 * Return: This function returns the max clock latency in nanoseconds.
192 *
193 * Locking: This function takes rcu_read_lock().
194 */
195unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
196{
2c2709dc 197 struct opp_table *opp_table;
3ca9bb33
VK
198 unsigned long clock_latency_ns;
199
200 rcu_read_lock();
201
2c2709dc
VK
202 opp_table = _find_opp_table(dev);
203 if (IS_ERR(opp_table))
3ca9bb33
VK
204 clock_latency_ns = 0;
205 else
2c2709dc 206 clock_latency_ns = opp_table->clock_latency_ns_max;
3ca9bb33
VK
207
208 rcu_read_unlock();
209 return clock_latency_ns;
210}
211EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
212
655c9df9
VK
213/**
214 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
215 * @dev: device for which we do this operation
216 *
217 * Return: This function returns the max voltage latency in nanoseconds.
218 *
219 * Locking: This function takes rcu_read_lock().
220 */
221unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
222{
2c2709dc 223 struct opp_table *opp_table;
655c9df9
VK
224 struct dev_pm_opp *opp;
225 struct regulator *reg;
226 unsigned long latency_ns = 0;
227 unsigned long min_uV = ~0, max_uV = 0;
228 int ret;
229
230 rcu_read_lock();
231
2c2709dc
VK
232 opp_table = _find_opp_table(dev);
233 if (IS_ERR(opp_table)) {
655c9df9
VK
234 rcu_read_unlock();
235 return 0;
236 }
237
2c2709dc 238 reg = opp_table->regulator;
0c717d0f 239 if (IS_ERR(reg)) {
655c9df9 240 /* Regulator may not be required for device */
655c9df9
VK
241 rcu_read_unlock();
242 return 0;
243 }
244
2c2709dc 245 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
655c9df9
VK
246 if (!opp->available)
247 continue;
248
249 if (opp->u_volt_min < min_uV)
250 min_uV = opp->u_volt_min;
251 if (opp->u_volt_max > max_uV)
252 max_uV = opp->u_volt_max;
253 }
254
255 rcu_read_unlock();
256
257 /*
2c2709dc 258 * The caller needs to ensure that opp_table (and hence the regulator)
655c9df9
VK
259 * isn't freed, while we are executing this routine.
260 */
261 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
262 if (ret > 0)
263 latency_ns = ret * 1000;
264
265 return latency_ns;
266}
267EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
268
21743447
VK
269/**
270 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
271 * nanoseconds
272 * @dev: device for which we do this operation
273 *
274 * Return: This function returns the max transition latency, in nanoseconds, to
275 * switch from one OPP to other.
276 *
277 * Locking: This function takes rcu_read_lock().
278 */
279unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
280{
281 return dev_pm_opp_get_max_volt_latency(dev) +
282 dev_pm_opp_get_max_clock_latency(dev);
283}
284EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
285
4eafbd15
BZ
286/**
287 * dev_pm_opp_get_suspend_opp() - Get suspend opp
288 * @dev: device for which we do this operation
289 *
290 * Return: This function returns pointer to the suspend opp if it is
1b2b90cb 291 * defined and available, otherwise it returns NULL.
4eafbd15
BZ
292 *
293 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
294 * protected pointer. The reason for the same is that the opp pointer which is
295 * returned will remain valid for use with opp_get_{voltage, freq} only while
296 * under the locked area. The pointer returned must be used prior to unlocking
297 * with rcu_read_unlock() to maintain the integrity of the pointer.
298 */
299struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
300{
2c2709dc 301 struct opp_table *opp_table;
4eafbd15
BZ
302
303 opp_rcu_lockdep_assert();
304
2c2709dc
VK
305 opp_table = _find_opp_table(dev);
306 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
307 !opp_table->suspend_opp->available)
1b2b90cb 308 return NULL;
4eafbd15 309
2c2709dc 310 return opp_table->suspend_opp;
4eafbd15
BZ
311}
312EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
313
e1f60b29 314/**
2c2709dc 315 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
e1f60b29
NM
316 * @dev: device for which we do this operation
317 *
984f16c8 318 * Return: This function returns the number of available opps if there are any,
e1f60b29
NM
319 * else returns 0 if none or the corresponding error value.
320 *
b4718c02 321 * Locking: This function takes rcu_read_lock().
e1f60b29 322 */
5d4879cd 323int dev_pm_opp_get_opp_count(struct device *dev)
e1f60b29 324{
2c2709dc 325 struct opp_table *opp_table;
47d43ba7 326 struct dev_pm_opp *temp_opp;
e1f60b29
NM
327 int count = 0;
328
b4718c02 329 rcu_read_lock();
b02ded24 330
2c2709dc
VK
331 opp_table = _find_opp_table(dev);
332 if (IS_ERR(opp_table)) {
333 count = PTR_ERR(opp_table);
334 dev_err(dev, "%s: OPP table not found (%d)\n",
b4718c02
DT
335 __func__, count);
336 goto out_unlock;
e1f60b29
NM
337 }
338
2c2709dc 339 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
340 if (temp_opp->available)
341 count++;
342 }
343
b4718c02
DT
344out_unlock:
345 rcu_read_unlock();
e1f60b29
NM
346 return count;
347}
5d4879cd 348EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
e1f60b29
NM
349
350/**
5d4879cd 351 * dev_pm_opp_find_freq_exact() - search for an exact frequency
e1f60b29
NM
352 * @dev: device for which we do this operation
353 * @freq: frequency to search for
7ae49618 354 * @available: true/false - match for available opp
e1f60b29 355 *
2c2709dc 356 * Return: Searches for exact match in the opp table and returns pointer to the
984f16c8
NM
357 * matching opp if found, else returns ERR_PTR in case of error and should
358 * be handled using IS_ERR. Error return values can be:
0779726c
NM
359 * EINVAL: for bad pointer
360 * ERANGE: no match found for search
361 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
362 *
363 * Note: available is a modifier for the search. if available=true, then the
364 * match is for exact matching frequency and is available in the stored OPP
365 * table. if false, the match is for exact frequency which is not available.
366 *
367 * This provides a mechanism to enable an opp which is not available currently
368 * or the opposite as well.
369 *
370 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
371 * protected pointer. The reason for the same is that the opp pointer which is
372 * returned will remain valid for use with opp_get_{voltage, freq} only while
373 * under the locked area. The pointer returned must be used prior to unlocking
374 * with rcu_read_unlock() to maintain the integrity of the pointer.
375 */
47d43ba7
NM
376struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
377 unsigned long freq,
378 bool available)
e1f60b29 379{
2c2709dc 380 struct opp_table *opp_table;
47d43ba7 381 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 382
b02ded24
DT
383 opp_rcu_lockdep_assert();
384
2c2709dc
VK
385 opp_table = _find_opp_table(dev);
386 if (IS_ERR(opp_table)) {
387 int r = PTR_ERR(opp_table);
388
389 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
e1f60b29
NM
390 return ERR_PTR(r);
391 }
392
2c2709dc 393 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
394 if (temp_opp->available == available &&
395 temp_opp->rate == freq) {
396 opp = temp_opp;
397 break;
398 }
399 }
400
401 return opp;
402}
5d4879cd 403EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
e1f60b29 404
067b7ce0
JZ
405static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
406 unsigned long *freq)
407{
408 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
409
410 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
411 if (temp_opp->available && temp_opp->rate >= *freq) {
412 opp = temp_opp;
413 *freq = opp->rate;
414 break;
415 }
416 }
417
418 return opp;
419}
420
e1f60b29 421/**
5d4879cd 422 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
e1f60b29
NM
423 * @dev: device for which we do this operation
424 * @freq: Start frequency
425 *
426 * Search for the matching ceil *available* OPP from a starting freq
427 * for a device.
428 *
984f16c8 429 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
430 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
431 * values can be:
432 * EINVAL: for bad pointer
433 * ERANGE: no match found for search
434 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
435 *
436 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
437 * protected pointer. The reason for the same is that the opp pointer which is
438 * returned will remain valid for use with opp_get_{voltage, freq} only while
439 * under the locked area. The pointer returned must be used prior to unlocking
440 * with rcu_read_unlock() to maintain the integrity of the pointer.
441 */
47d43ba7
NM
442struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
443 unsigned long *freq)
e1f60b29 444{
2c2709dc 445 struct opp_table *opp_table;
e1f60b29 446
b02ded24
DT
447 opp_rcu_lockdep_assert();
448
e1f60b29
NM
449 if (!dev || !freq) {
450 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
451 return ERR_PTR(-EINVAL);
452 }
453
2c2709dc
VK
454 opp_table = _find_opp_table(dev);
455 if (IS_ERR(opp_table))
456 return ERR_CAST(opp_table);
e1f60b29 457
067b7ce0 458 return _find_freq_ceil(opp_table, freq);
e1f60b29 459}
5d4879cd 460EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
e1f60b29
NM
461
462/**
5d4879cd 463 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
e1f60b29
NM
464 * @dev: device for which we do this operation
465 * @freq: Start frequency
466 *
467 * Search for the matching floor *available* OPP from a starting freq
468 * for a device.
469 *
984f16c8 470 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
471 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
472 * values can be:
473 * EINVAL: for bad pointer
474 * ERANGE: no match found for search
475 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
476 *
477 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
478 * protected pointer. The reason for the same is that the opp pointer which is
479 * returned will remain valid for use with opp_get_{voltage, freq} only while
480 * under the locked area. The pointer returned must be used prior to unlocking
481 * with rcu_read_unlock() to maintain the integrity of the pointer.
482 */
47d43ba7
NM
483struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
484 unsigned long *freq)
e1f60b29 485{
2c2709dc 486 struct opp_table *opp_table;
47d43ba7 487 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 488
b02ded24
DT
489 opp_rcu_lockdep_assert();
490
e1f60b29
NM
491 if (!dev || !freq) {
492 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
493 return ERR_PTR(-EINVAL);
494 }
495
2c2709dc
VK
496 opp_table = _find_opp_table(dev);
497 if (IS_ERR(opp_table))
498 return ERR_CAST(opp_table);
e1f60b29 499
2c2709dc 500 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
501 if (temp_opp->available) {
502 /* go to the next node, before choosing prev */
503 if (temp_opp->rate > *freq)
504 break;
505 else
506 opp = temp_opp;
507 }
508 }
509 if (!IS_ERR(opp))
510 *freq = opp->rate;
511
512 return opp;
513}
5d4879cd 514EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
e1f60b29 515
6a0712f6 516/*
2c2709dc 517 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
6a0712f6
VK
518 * while clk returned here is used.
519 */
520static struct clk *_get_opp_clk(struct device *dev)
521{
2c2709dc 522 struct opp_table *opp_table;
6a0712f6
VK
523 struct clk *clk;
524
525 rcu_read_lock();
526
2c2709dc
VK
527 opp_table = _find_opp_table(dev);
528 if (IS_ERR(opp_table)) {
6a0712f6 529 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
2c2709dc 530 clk = ERR_CAST(opp_table);
6a0712f6
VK
531 goto unlock;
532 }
533
2c2709dc 534 clk = opp_table->clk;
6a0712f6
VK
535 if (IS_ERR(clk))
536 dev_err(dev, "%s: No clock available for the device\n",
537 __func__);
538
539unlock:
540 rcu_read_unlock();
541 return clk;
542}
543
544static int _set_opp_voltage(struct device *dev, struct regulator *reg,
545 unsigned long u_volt, unsigned long u_volt_min,
546 unsigned long u_volt_max)
547{
548 int ret;
549
550 /* Regulator not available for device */
551 if (IS_ERR(reg)) {
552 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
553 PTR_ERR(reg));
554 return 0;
555 }
556
557 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
558 u_volt, u_volt_max);
559
560 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
561 u_volt_max);
562 if (ret)
563 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
564 __func__, u_volt_min, u_volt, u_volt_max, ret);
565
566 return ret;
567}
568
569/**
570 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
571 * @dev: device for which we do this operation
572 * @target_freq: frequency to achieve
573 *
574 * This configures the power-supplies and clock source to the levels specified
575 * by the OPP corresponding to the target_freq.
576 *
577 * Locking: This function takes rcu_read_lock().
578 */
579int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
580{
2c2709dc 581 struct opp_table *opp_table;
6a0712f6
VK
582 struct dev_pm_opp *old_opp, *opp;
583 struct regulator *reg;
584 struct clk *clk;
585 unsigned long freq, old_freq;
586 unsigned long u_volt, u_volt_min, u_volt_max;
6a0712f6
VK
587 int ret;
588
589 if (unlikely(!target_freq)) {
590 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
591 target_freq);
592 return -EINVAL;
593 }
594
595 clk = _get_opp_clk(dev);
596 if (IS_ERR(clk))
597 return PTR_ERR(clk);
598
599 freq = clk_round_rate(clk, target_freq);
600 if ((long)freq <= 0)
601 freq = target_freq;
602
603 old_freq = clk_get_rate(clk);
604
605 /* Return early if nothing to do */
606 if (old_freq == freq) {
607 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
608 __func__, freq);
609 return 0;
610 }
611
612 rcu_read_lock();
613
2c2709dc
VK
614 opp_table = _find_opp_table(dev);
615 if (IS_ERR(opp_table)) {
6a0712f6
VK
616 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
617 rcu_read_unlock();
2c2709dc 618 return PTR_ERR(opp_table);
6a0712f6
VK
619 }
620
067b7ce0 621 old_opp = _find_freq_ceil(opp_table, &old_freq);
4df27c91 622 if (IS_ERR(old_opp)) {
6a0712f6
VK
623 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
624 __func__, old_freq, PTR_ERR(old_opp));
625 }
626
067b7ce0 627 opp = _find_freq_ceil(opp_table, &freq);
6a0712f6
VK
628 if (IS_ERR(opp)) {
629 ret = PTR_ERR(opp);
630 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
631 __func__, freq, ret);
632 rcu_read_unlock();
633 return ret;
634 }
635
636 u_volt = opp->u_volt;
637 u_volt_min = opp->u_volt_min;
638 u_volt_max = opp->u_volt_max;
639
2c2709dc 640 reg = opp_table->regulator;
6a0712f6
VK
641
642 rcu_read_unlock();
643
644 /* Scaling up? Scale voltage before frequency */
645 if (freq > old_freq) {
646 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
647 u_volt_max);
648 if (ret)
649 goto restore_voltage;
650 }
651
652 /* Change frequency */
653
654 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
655 __func__, old_freq, freq);
656
657 ret = clk_set_rate(clk, freq);
658 if (ret) {
659 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
660 ret);
661 goto restore_voltage;
662 }
663
664 /* Scaling down? Scale voltage after frequency */
665 if (freq < old_freq) {
666 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
667 u_volt_max);
668 if (ret)
669 goto restore_freq;
670 }
671
672 return 0;
673
674restore_freq:
675 if (clk_set_rate(clk, old_freq))
676 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
677 __func__, old_freq);
678restore_voltage:
679 /* This shouldn't harm even if the voltages weren't updated earlier */
680 if (!IS_ERR(old_opp))
4df27c91
AB
681 _set_opp_voltage(dev, reg, old_opp->u_volt,
682 old_opp->u_volt_min, old_opp->u_volt_max);
6a0712f6
VK
683
684 return ret;
685}
686EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
687
2c2709dc
VK
688/* OPP-dev Helpers */
689static void _kfree_opp_dev_rcu(struct rcu_head *head)
06441658 690{
2c2709dc 691 struct opp_device *opp_dev;
06441658 692
2c2709dc
VK
693 opp_dev = container_of(head, struct opp_device, rcu_head);
694 kfree_rcu(opp_dev, rcu_head);
06441658
VK
695}
696
2c2709dc
VK
697static void _remove_opp_dev(struct opp_device *opp_dev,
698 struct opp_table *opp_table)
06441658 699{
2c2709dc
VK
700 opp_debug_unregister(opp_dev, opp_table);
701 list_del(&opp_dev->node);
702 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
703 _kfree_opp_dev_rcu);
06441658
VK
704}
705
2c2709dc
VK
706struct opp_device *_add_opp_dev(const struct device *dev,
707 struct opp_table *opp_table)
06441658 708{
2c2709dc 709 struct opp_device *opp_dev;
deaa5146 710 int ret;
06441658 711
2c2709dc
VK
712 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
713 if (!opp_dev)
06441658
VK
714 return NULL;
715
2c2709dc
VK
716 /* Initialize opp-dev */
717 opp_dev->dev = dev;
718 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
06441658 719
2c2709dc
VK
720 /* Create debugfs entries for the opp_table */
721 ret = opp_debug_register(opp_dev, opp_table);
deaa5146
VK
722 if (ret)
723 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
724 __func__, ret);
725
2c2709dc 726 return opp_dev;
06441658
VK
727}
728
984f16c8 729/**
2c2709dc 730 * _add_opp_table() - Find OPP table or allocate a new one
984f16c8
NM
731 * @dev: device for which we do this operation
732 *
aa5f2f85
VK
733 * It tries to find an existing table first, if it couldn't find one, it
734 * allocates a new OPP table and returns that.
984f16c8 735 *
2c2709dc 736 * Return: valid opp_table pointer if success, else NULL.
984f16c8 737 */
2c2709dc 738static struct opp_table *_add_opp_table(struct device *dev)
07cce74a 739{
2c2709dc
VK
740 struct opp_table *opp_table;
741 struct opp_device *opp_dev;
d54974c2 742 int ret;
07cce74a 743
2c2709dc
VK
744 /* Check for existing table for 'dev' first */
745 opp_table = _find_opp_table(dev);
746 if (!IS_ERR(opp_table))
747 return opp_table;
07cce74a
VK
748
749 /*
2c2709dc 750 * Allocate a new OPP table. In the infrequent case where a new
07cce74a
VK
751 * device is needed to be added, we pay this penalty.
752 */
2c2709dc
VK
753 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
754 if (!opp_table)
07cce74a
VK
755 return NULL;
756
2c2709dc 757 INIT_LIST_HEAD(&opp_table->dev_list);
06441658 758
2c2709dc
VK
759 opp_dev = _add_opp_dev(dev, opp_table);
760 if (!opp_dev) {
761 kfree(opp_table);
06441658
VK
762 return NULL;
763 }
764
f47b72a1 765 _of_init_opp_table(opp_table, dev);
50f8cfbd 766
0c717d0f 767 /* Set regulator to a non-NULL error value */
2c2709dc 768 opp_table->regulator = ERR_PTR(-ENXIO);
0c717d0f 769
d54974c2 770 /* Find clk for the device */
2c2709dc
VK
771 opp_table->clk = clk_get(dev, NULL);
772 if (IS_ERR(opp_table->clk)) {
773 ret = PTR_ERR(opp_table->clk);
d54974c2
VK
774 if (ret != -EPROBE_DEFER)
775 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
776 ret);
777 }
778
2c2709dc
VK
779 srcu_init_notifier_head(&opp_table->srcu_head);
780 INIT_LIST_HEAD(&opp_table->opp_list);
07cce74a 781
2c2709dc
VK
782 /* Secure the device table modification */
783 list_add_rcu(&opp_table->node, &opp_tables);
784 return opp_table;
07cce74a
VK
785}
786
984f16c8 787/**
2c2709dc 788 * _kfree_device_rcu() - Free opp_table RCU handler
737002b5 789 * @head: RCU head
984f16c8 790 */
737002b5 791static void _kfree_device_rcu(struct rcu_head *head)
e1f60b29 792{
2c2709dc
VK
793 struct opp_table *opp_table = container_of(head, struct opp_table,
794 rcu_head);
6ce4184d 795
2c2709dc 796 kfree_rcu(opp_table, rcu_head);
e1f60b29 797}
38393409
VK
798
799/**
2c2709dc
VK
800 * _remove_opp_table() - Removes a OPP table
801 * @opp_table: OPP table to be removed.
38393409 802 *
2c2709dc 803 * Removes/frees OPP table if it doesn't contain any OPPs.
38393409 804 */
2c2709dc 805static void _remove_opp_table(struct opp_table *opp_table)
38393409 806{
2c2709dc 807 struct opp_device *opp_dev;
06441658 808
2c2709dc 809 if (!list_empty(&opp_table->opp_list))
3bac42ca
VK
810 return;
811
2c2709dc 812 if (opp_table->supported_hw)
7de36b0a
VK
813 return;
814
2c2709dc 815 if (opp_table->prop_name)
01fb4d3c
VK
816 return;
817
2c2709dc 818 if (!IS_ERR(opp_table->regulator))
9f8ea969
VK
819 return;
820
d54974c2 821 /* Release clk */
2c2709dc
VK
822 if (!IS_ERR(opp_table->clk))
823 clk_put(opp_table->clk);
d54974c2 824
2c2709dc
VK
825 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
826 node);
06441658 827
2c2709dc 828 _remove_opp_dev(opp_dev, opp_table);
06441658
VK
829
830 /* dev_list must be empty now */
2c2709dc 831 WARN_ON(!list_empty(&opp_table->dev_list));
06441658 832
2c2709dc
VK
833 list_del_rcu(&opp_table->node);
834 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
3bac42ca 835 _kfree_device_rcu);
38393409 836}
e1f60b29 837
984f16c8
NM
838/**
839 * _kfree_opp_rcu() - Free OPP RCU handler
840 * @head: RCU head
841 */
327854c8 842static void _kfree_opp_rcu(struct rcu_head *head)
129eec55
VK
843{
844 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
845
846 kfree_rcu(opp, rcu_head);
847}
848
984f16c8
NM
849/**
850 * _opp_remove() - Remove an OPP from a table definition
2c2709dc 851 * @opp_table: points back to the opp_table struct this opp belongs to
984f16c8 852 * @opp: pointer to the OPP to remove
23dacf6d 853 * @notify: OPP_EVENT_REMOVE notification should be sent or not
984f16c8 854 *
2c2709dc 855 * This function removes an opp definition from the opp table.
984f16c8 856 *
2c2709dc 857 * Locking: The internal opp_table and opp structures are RCU protected.
984f16c8
NM
858 * It is assumed that the caller holds required mutex for an RCU updater
859 * strategy.
860 */
f47b72a1
VK
861void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
862 bool notify)
129eec55
VK
863{
864 /*
865 * Notify the changes in the availability of the operable
866 * frequency/voltage list.
867 */
23dacf6d 868 if (notify)
2c2709dc
VK
869 srcu_notifier_call_chain(&opp_table->srcu_head,
870 OPP_EVENT_REMOVE, opp);
deaa5146 871 opp_debug_remove_one(opp);
129eec55 872 list_del_rcu(&opp->node);
2c2709dc 873 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
129eec55 874
2c2709dc 875 _remove_opp_table(opp_table);
129eec55
VK
876}
877
878/**
2c2709dc 879 * dev_pm_opp_remove() - Remove an OPP from OPP table
129eec55
VK
880 * @dev: device for which we do this operation
881 * @freq: OPP to remove with matching 'freq'
882 *
2c2709dc 883 * This function removes an opp from the opp table.
984f16c8 884 *
2c2709dc 885 * Locking: The internal opp_table and opp structures are RCU protected.
984f16c8
NM
886 * Hence this function internally uses RCU updater strategy with mutex locks
887 * to keep the integrity of the internal data structures. Callers should ensure
888 * that this function is *NOT* called under RCU protection or in contexts where
889 * mutex cannot be locked.
129eec55
VK
890 */
891void dev_pm_opp_remove(struct device *dev, unsigned long freq)
892{
893 struct dev_pm_opp *opp;
2c2709dc 894 struct opp_table *opp_table;
129eec55
VK
895 bool found = false;
896
2c2709dc
VK
897 /* Hold our table modification lock here */
898 mutex_lock(&opp_table_lock);
129eec55 899
2c2709dc
VK
900 opp_table = _find_opp_table(dev);
901 if (IS_ERR(opp_table))
129eec55
VK
902 goto unlock;
903
2c2709dc 904 list_for_each_entry(opp, &opp_table->opp_list, node) {
129eec55
VK
905 if (opp->rate == freq) {
906 found = true;
907 break;
908 }
909 }
910
911 if (!found) {
912 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
913 __func__, freq);
914 goto unlock;
915 }
916
2c2709dc 917 _opp_remove(opp_table, opp, true);
129eec55 918unlock:
2c2709dc 919 mutex_unlock(&opp_table_lock);
129eec55
VK
920}
921EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
922
f47b72a1
VK
923struct dev_pm_opp *_allocate_opp(struct device *dev,
924 struct opp_table **opp_table)
e1f60b29 925{
23dacf6d 926 struct dev_pm_opp *opp;
e1f60b29 927
23dacf6d
VK
928 /* allocate new OPP node */
929 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
930 if (!opp)
931 return NULL;
e1f60b29 932
23dacf6d 933 INIT_LIST_HEAD(&opp->node);
e1f60b29 934
2c2709dc
VK
935 *opp_table = _add_opp_table(dev);
936 if (!*opp_table) {
23dacf6d
VK
937 kfree(opp);
938 return NULL;
939 }
940
941 return opp;
942}
943
7d34d56e 944static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
2c2709dc 945 struct opp_table *opp_table)
7d34d56e 946{
2c2709dc 947 struct regulator *reg = opp_table->regulator;
7d34d56e 948
0c717d0f 949 if (!IS_ERR(reg) &&
7d34d56e
VK
950 !regulator_is_supported_voltage(reg, opp->u_volt_min,
951 opp->u_volt_max)) {
952 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
953 __func__, opp->u_volt_min, opp->u_volt_max);
954 return false;
955 }
956
957 return true;
958}
959
f47b72a1
VK
960int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
961 struct opp_table *opp_table)
23dacf6d
VK
962{
963 struct dev_pm_opp *opp;
2c2709dc 964 struct list_head *head = &opp_table->opp_list;
deaa5146 965 int ret;
23dacf6d
VK
966
967 /*
968 * Insert new OPP in order of increasing frequency and discard if
969 * already present.
970 *
2c2709dc 971 * Need to use &opp_table->opp_list in the condition part of the 'for'
23dacf6d
VK
972 * loop, don't replace it with head otherwise it will become an infinite
973 * loop.
974 */
2c2709dc 975 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
23dacf6d
VK
976 if (new_opp->rate > opp->rate) {
977 head = &opp->node;
978 continue;
979 }
980
981 if (new_opp->rate < opp->rate)
982 break;
983
984 /* Duplicate OPPs */
06441658 985 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
23dacf6d
VK
986 __func__, opp->rate, opp->u_volt, opp->available,
987 new_opp->rate, new_opp->u_volt, new_opp->available);
988
989 return opp->available && new_opp->u_volt == opp->u_volt ?
990 0 : -EEXIST;
991 }
992
2c2709dc 993 new_opp->opp_table = opp_table;
23dacf6d
VK
994 list_add_rcu(&new_opp->node, head);
995
2c2709dc 996 ret = opp_debug_create_one(new_opp, opp_table);
deaa5146
VK
997 if (ret)
998 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
999 __func__, ret);
1000
2c2709dc 1001 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
7d34d56e
VK
1002 new_opp->available = false;
1003 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1004 __func__, new_opp->rate);
1005 }
1006
23dacf6d
VK
1007 return 0;
1008}
1009
984f16c8 1010/**
b64b9c3f 1011 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
984f16c8
NM
1012 * @dev: device for which we do this operation
1013 * @freq: Frequency in Hz for this OPP
1014 * @u_volt: Voltage in uVolts for this OPP
1015 * @dynamic: Dynamically added OPPs.
1016 *
2c2709dc 1017 * This function adds an opp definition to the opp table and returns status.
984f16c8
NM
1018 * The opp is made available by default and it can be controlled using
1019 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1020 *
8f8d37b2
VK
1021 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1022 * and freed by dev_pm_opp_of_remove_table.
984f16c8 1023 *
2c2709dc 1024 * Locking: The internal opp_table and opp structures are RCU protected.
984f16c8
NM
1025 * Hence this function internally uses RCU updater strategy with mutex locks
1026 * to keep the integrity of the internal data structures. Callers should ensure
1027 * that this function is *NOT* called under RCU protection or in contexts where
1028 * mutex cannot be locked.
1029 *
1030 * Return:
1031 * 0 On success OR
1032 * Duplicate OPPs (both freq and volt are same) and opp->available
1033 * -EEXIST Freq are same and volt are different OR
1034 * Duplicate OPPs (both freq and volt are same) and !opp->available
1035 * -ENOMEM Memory allocation failure
1036 */
f47b72a1
VK
1037int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1038 bool dynamic)
e1f60b29 1039{
2c2709dc 1040 struct opp_table *opp_table;
23dacf6d 1041 struct dev_pm_opp *new_opp;
50f8cfbd 1042 unsigned long tol;
6ce4184d 1043 int ret;
e1f60b29 1044
2c2709dc
VK
1045 /* Hold our table modification lock here */
1046 mutex_lock(&opp_table_lock);
e1f60b29 1047
2c2709dc 1048 new_opp = _allocate_opp(dev, &opp_table);
23dacf6d
VK
1049 if (!new_opp) {
1050 ret = -ENOMEM;
1051 goto unlock;
1052 }
1053
a7470db6 1054 /* populate the opp table */
a7470db6 1055 new_opp->rate = freq;
2c2709dc 1056 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
a7470db6 1057 new_opp->u_volt = u_volt;
50f8cfbd
VK
1058 new_opp->u_volt_min = u_volt - tol;
1059 new_opp->u_volt_max = u_volt + tol;
a7470db6 1060 new_opp->available = true;
23dacf6d 1061 new_opp->dynamic = dynamic;
a7470db6 1062
2c2709dc 1063 ret = _opp_add(dev, new_opp, opp_table);
23dacf6d 1064 if (ret)
6ce4184d 1065 goto free_opp;
64ce8545 1066
2c2709dc 1067 mutex_unlock(&opp_table_lock);
e1f60b29 1068
03ca370f
MH
1069 /*
1070 * Notify the changes in the availability of the operable
1071 * frequency/voltage list.
1072 */
2c2709dc 1073 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
e1f60b29 1074 return 0;
6ce4184d
VK
1075
1076free_opp:
2c2709dc 1077 _opp_remove(opp_table, new_opp, false);
23dacf6d 1078unlock:
2c2709dc 1079 mutex_unlock(&opp_table_lock);
6ce4184d 1080 return ret;
e1f60b29 1081}
38393409 1082
7de36b0a
VK
1083/**
1084 * dev_pm_opp_set_supported_hw() - Set supported platforms
1085 * @dev: Device for which supported-hw has to be set.
1086 * @versions: Array of hierarchy of versions to match.
1087 * @count: Number of elements in the array.
1088 *
1089 * This is required only for the V2 bindings, and it enables a platform to
1090 * specify the hierarchy of versions it supports. OPP layer will then enable
1091 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1092 * property.
1093 *
2c2709dc 1094 * Locking: The internal opp_table and opp structures are RCU protected.
7de36b0a
VK
1095 * Hence this function internally uses RCU updater strategy with mutex locks
1096 * to keep the integrity of the internal data structures. Callers should ensure
1097 * that this function is *NOT* called under RCU protection or in contexts where
1098 * mutex cannot be locked.
1099 */
1100int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1101 unsigned int count)
1102{
2c2709dc 1103 struct opp_table *opp_table;
7de36b0a
VK
1104 int ret = 0;
1105
2c2709dc
VK
1106 /* Hold our table modification lock here */
1107 mutex_lock(&opp_table_lock);
7de36b0a 1108
2c2709dc
VK
1109 opp_table = _add_opp_table(dev);
1110 if (!opp_table) {
7de36b0a
VK
1111 ret = -ENOMEM;
1112 goto unlock;
1113 }
1114
2c2709dc
VK
1115 /* Make sure there are no concurrent readers while updating opp_table */
1116 WARN_ON(!list_empty(&opp_table->opp_list));
7de36b0a 1117
2c2709dc
VK
1118 /* Do we already have a version hierarchy associated with opp_table? */
1119 if (opp_table->supported_hw) {
7de36b0a
VK
1120 dev_err(dev, "%s: Already have supported hardware list\n",
1121 __func__);
1122 ret = -EBUSY;
1123 goto err;
1124 }
1125
2c2709dc 1126 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
7de36b0a 1127 GFP_KERNEL);
2c2709dc 1128 if (!opp_table->supported_hw) {
7de36b0a
VK
1129 ret = -ENOMEM;
1130 goto err;
1131 }
1132
2c2709dc
VK
1133 opp_table->supported_hw_count = count;
1134 mutex_unlock(&opp_table_lock);
7de36b0a
VK
1135 return 0;
1136
1137err:
2c2709dc 1138 _remove_opp_table(opp_table);
7de36b0a 1139unlock:
2c2709dc 1140 mutex_unlock(&opp_table_lock);
7de36b0a
VK
1141
1142 return ret;
1143}
1144EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1145
1146/**
1147 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
a5da6447 1148 * @dev: Device for which supported-hw has to be put.
7de36b0a
VK
1149 *
1150 * This is required only for the V2 bindings, and is called for a matching
2c2709dc 1151 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
7de36b0a
VK
1152 * will not be freed.
1153 *
2c2709dc 1154 * Locking: The internal opp_table and opp structures are RCU protected.
7de36b0a
VK
1155 * Hence this function internally uses RCU updater strategy with mutex locks
1156 * to keep the integrity of the internal data structures. Callers should ensure
1157 * that this function is *NOT* called under RCU protection or in contexts where
1158 * mutex cannot be locked.
1159 */
1160void dev_pm_opp_put_supported_hw(struct device *dev)
1161{
2c2709dc 1162 struct opp_table *opp_table;
7de36b0a 1163
2c2709dc
VK
1164 /* Hold our table modification lock here */
1165 mutex_lock(&opp_table_lock);
7de36b0a 1166
2c2709dc
VK
1167 /* Check for existing table for 'dev' first */
1168 opp_table = _find_opp_table(dev);
1169 if (IS_ERR(opp_table)) {
1170 dev_err(dev, "Failed to find opp_table: %ld\n",
1171 PTR_ERR(opp_table));
7de36b0a
VK
1172 goto unlock;
1173 }
1174
2c2709dc
VK
1175 /* Make sure there are no concurrent readers while updating opp_table */
1176 WARN_ON(!list_empty(&opp_table->opp_list));
7de36b0a 1177
2c2709dc 1178 if (!opp_table->supported_hw) {
7de36b0a
VK
1179 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1180 __func__);
1181 goto unlock;
1182 }
1183
2c2709dc
VK
1184 kfree(opp_table->supported_hw);
1185 opp_table->supported_hw = NULL;
1186 opp_table->supported_hw_count = 0;
7de36b0a 1187
2c2709dc
VK
1188 /* Try freeing opp_table if this was the last blocking resource */
1189 _remove_opp_table(opp_table);
7de36b0a
VK
1190
1191unlock:
2c2709dc 1192 mutex_unlock(&opp_table_lock);
7de36b0a
VK
1193}
1194EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1195
01fb4d3c
VK
1196/**
1197 * dev_pm_opp_set_prop_name() - Set prop-extn name
a5da6447 1198 * @dev: Device for which the prop-name has to be set.
01fb4d3c
VK
1199 * @name: name to postfix to properties.
1200 *
1201 * This is required only for the V2 bindings, and it enables a platform to
1202 * specify the extn to be used for certain property names. The properties to
1203 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1204 * should postfix the property name with -<name> while looking for them.
1205 *
2c2709dc 1206 * Locking: The internal opp_table and opp structures are RCU protected.
01fb4d3c
VK
1207 * Hence this function internally uses RCU updater strategy with mutex locks
1208 * to keep the integrity of the internal data structures. Callers should ensure
1209 * that this function is *NOT* called under RCU protection or in contexts where
1210 * mutex cannot be locked.
1211 */
1212int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1213{
2c2709dc 1214 struct opp_table *opp_table;
01fb4d3c
VK
1215 int ret = 0;
1216
2c2709dc
VK
1217 /* Hold our table modification lock here */
1218 mutex_lock(&opp_table_lock);
01fb4d3c 1219
2c2709dc
VK
1220 opp_table = _add_opp_table(dev);
1221 if (!opp_table) {
01fb4d3c
VK
1222 ret = -ENOMEM;
1223 goto unlock;
1224 }
1225
2c2709dc
VK
1226 /* Make sure there are no concurrent readers while updating opp_table */
1227 WARN_ON(!list_empty(&opp_table->opp_list));
01fb4d3c 1228
2c2709dc
VK
1229 /* Do we already have a prop-name associated with opp_table? */
1230 if (opp_table->prop_name) {
01fb4d3c 1231 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
2c2709dc 1232 opp_table->prop_name);
01fb4d3c
VK
1233 ret = -EBUSY;
1234 goto err;
1235 }
1236
2c2709dc
VK
1237 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1238 if (!opp_table->prop_name) {
01fb4d3c
VK
1239 ret = -ENOMEM;
1240 goto err;
1241 }
1242
2c2709dc 1243 mutex_unlock(&opp_table_lock);
01fb4d3c
VK
1244 return 0;
1245
1246err:
2c2709dc 1247 _remove_opp_table(opp_table);
01fb4d3c 1248unlock:
2c2709dc 1249 mutex_unlock(&opp_table_lock);
01fb4d3c
VK
1250
1251 return ret;
1252}
1253EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1254
1255/**
1256 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
a5da6447 1257 * @dev: Device for which the prop-name has to be put.
01fb4d3c
VK
1258 *
1259 * This is required only for the V2 bindings, and is called for a matching
2c2709dc 1260 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
01fb4d3c
VK
1261 * will not be freed.
1262 *
2c2709dc 1263 * Locking: The internal opp_table and opp structures are RCU protected.
01fb4d3c
VK
1264 * Hence this function internally uses RCU updater strategy with mutex locks
1265 * to keep the integrity of the internal data structures. Callers should ensure
1266 * that this function is *NOT* called under RCU protection or in contexts where
1267 * mutex cannot be locked.
1268 */
1269void dev_pm_opp_put_prop_name(struct device *dev)
1270{
2c2709dc 1271 struct opp_table *opp_table;
01fb4d3c 1272
2c2709dc
VK
1273 /* Hold our table modification lock here */
1274 mutex_lock(&opp_table_lock);
01fb4d3c 1275
2c2709dc
VK
1276 /* Check for existing table for 'dev' first */
1277 opp_table = _find_opp_table(dev);
1278 if (IS_ERR(opp_table)) {
1279 dev_err(dev, "Failed to find opp_table: %ld\n",
1280 PTR_ERR(opp_table));
01fb4d3c
VK
1281 goto unlock;
1282 }
1283
2c2709dc
VK
1284 /* Make sure there are no concurrent readers while updating opp_table */
1285 WARN_ON(!list_empty(&opp_table->opp_list));
01fb4d3c 1286
2c2709dc 1287 if (!opp_table->prop_name) {
01fb4d3c
VK
1288 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1289 goto unlock;
1290 }
1291
2c2709dc
VK
1292 kfree(opp_table->prop_name);
1293 opp_table->prop_name = NULL;
01fb4d3c 1294
2c2709dc
VK
1295 /* Try freeing opp_table if this was the last blocking resource */
1296 _remove_opp_table(opp_table);
01fb4d3c
VK
1297
1298unlock:
2c2709dc 1299 mutex_unlock(&opp_table_lock);
01fb4d3c
VK
1300}
1301EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1302
9f8ea969
VK
1303/**
1304 * dev_pm_opp_set_regulator() - Set regulator name for the device
1305 * @dev: Device for which regulator name is being set.
1306 * @name: Name of the regulator.
1307 *
1308 * In order to support OPP switching, OPP layer needs to know the name of the
1309 * device's regulator, as the core would be required to switch voltages as well.
1310 *
1311 * This must be called before any OPPs are initialized for the device.
1312 *
2c2709dc 1313 * Locking: The internal opp_table and opp structures are RCU protected.
9f8ea969
VK
1314 * Hence this function internally uses RCU updater strategy with mutex locks
1315 * to keep the integrity of the internal data structures. Callers should ensure
1316 * that this function is *NOT* called under RCU protection or in contexts where
1317 * mutex cannot be locked.
1318 */
91291d9a 1319struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name)
9f8ea969 1320{
2c2709dc 1321 struct opp_table *opp_table;
9f8ea969
VK
1322 struct regulator *reg;
1323 int ret;
1324
2c2709dc 1325 mutex_lock(&opp_table_lock);
9f8ea969 1326
2c2709dc
VK
1327 opp_table = _add_opp_table(dev);
1328 if (!opp_table) {
9f8ea969
VK
1329 ret = -ENOMEM;
1330 goto unlock;
1331 }
1332
1333 /* This should be called before OPPs are initialized */
2c2709dc 1334 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
9f8ea969
VK
1335 ret = -EBUSY;
1336 goto err;
1337 }
1338
1339 /* Already have a regulator set */
2c2709dc 1340 if (WARN_ON(!IS_ERR(opp_table->regulator))) {
9f8ea969
VK
1341 ret = -EBUSY;
1342 goto err;
1343 }
1344 /* Allocate the regulator */
1345 reg = regulator_get_optional(dev, name);
1346 if (IS_ERR(reg)) {
1347 ret = PTR_ERR(reg);
1348 if (ret != -EPROBE_DEFER)
1349 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1350 __func__, name, ret);
1351 goto err;
1352 }
1353
2c2709dc 1354 opp_table->regulator = reg;
9f8ea969 1355
2c2709dc 1356 mutex_unlock(&opp_table_lock);
91291d9a 1357 return opp_table;
9f8ea969
VK
1358
1359err:
2c2709dc 1360 _remove_opp_table(opp_table);
9f8ea969 1361unlock:
2c2709dc 1362 mutex_unlock(&opp_table_lock);
9f8ea969 1363
91291d9a 1364 return ERR_PTR(ret);
9f8ea969
VK
1365}
1366EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1367
1368/**
1369 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
91291d9a 1370 * @opp_table: OPP table returned from dev_pm_opp_set_regulator().
9f8ea969 1371 *
2c2709dc 1372 * Locking: The internal opp_table and opp structures are RCU protected.
9f8ea969
VK
1373 * Hence this function internally uses RCU updater strategy with mutex locks
1374 * to keep the integrity of the internal data structures. Callers should ensure
1375 * that this function is *NOT* called under RCU protection or in contexts where
1376 * mutex cannot be locked.
1377 */
91291d9a 1378void dev_pm_opp_put_regulator(struct opp_table *opp_table)
9f8ea969 1379{
2c2709dc 1380 mutex_lock(&opp_table_lock);
9f8ea969 1381
2c2709dc 1382 if (IS_ERR(opp_table->regulator)) {
91291d9a 1383 pr_err("%s: Doesn't have regulator set\n", __func__);
9f8ea969
VK
1384 goto unlock;
1385 }
1386
2c2709dc
VK
1387 /* Make sure there are no concurrent readers while updating opp_table */
1388 WARN_ON(!list_empty(&opp_table->opp_list));
9f8ea969 1389
2c2709dc
VK
1390 regulator_put(opp_table->regulator);
1391 opp_table->regulator = ERR_PTR(-ENXIO);
9f8ea969 1392
2c2709dc
VK
1393 /* Try freeing opp_table if this was the last blocking resource */
1394 _remove_opp_table(opp_table);
9f8ea969
VK
1395
1396unlock:
2c2709dc 1397 mutex_unlock(&opp_table_lock);
9f8ea969
VK
1398}
1399EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1400
38393409
VK
1401/**
1402 * dev_pm_opp_add() - Add an OPP table from a table definitions
1403 * @dev: device for which we do this operation
1404 * @freq: Frequency in Hz for this OPP
1405 * @u_volt: Voltage in uVolts for this OPP
1406 *
2c2709dc 1407 * This function adds an opp definition to the opp table and returns status.
38393409
VK
1408 * The opp is made available by default and it can be controlled using
1409 * dev_pm_opp_enable/disable functions.
1410 *
2c2709dc 1411 * Locking: The internal opp_table and opp structures are RCU protected.
38393409
VK
1412 * Hence this function internally uses RCU updater strategy with mutex locks
1413 * to keep the integrity of the internal data structures. Callers should ensure
1414 * that this function is *NOT* called under RCU protection or in contexts where
1415 * mutex cannot be locked.
1416 *
1417 * Return:
984f16c8 1418 * 0 On success OR
38393409 1419 * Duplicate OPPs (both freq and volt are same) and opp->available
984f16c8 1420 * -EEXIST Freq are same and volt are different OR
38393409 1421 * Duplicate OPPs (both freq and volt are same) and !opp->available
984f16c8 1422 * -ENOMEM Memory allocation failure
38393409
VK
1423 */
1424int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1425{
b64b9c3f 1426 return _opp_add_v1(dev, freq, u_volt, true);
38393409 1427}
5d4879cd 1428EXPORT_SYMBOL_GPL(dev_pm_opp_add);
e1f60b29
NM
1429
1430/**
327854c8 1431 * _opp_set_availability() - helper to set the availability of an opp
e1f60b29
NM
1432 * @dev: device for which we do this operation
1433 * @freq: OPP frequency to modify availability
1434 * @availability_req: availability status requested for this opp
1435 *
1436 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1437 * share a common logic which is isolated here.
1438 *
984f16c8 1439 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1440 * copy operation, returns 0 if no modification was done OR modification was
e1f60b29
NM
1441 * successful.
1442 *
2c2709dc 1443 * Locking: The internal opp_table and opp structures are RCU protected.
e1f60b29
NM
1444 * Hence this function internally uses RCU updater strategy with mutex locks to
1445 * keep the integrity of the internal data structures. Callers should ensure
1446 * that this function is *NOT* called under RCU protection or in contexts where
1447 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1448 */
327854c8
NM
1449static int _opp_set_availability(struct device *dev, unsigned long freq,
1450 bool availability_req)
e1f60b29 1451{
2c2709dc 1452 struct opp_table *opp_table;
47d43ba7 1453 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
e1f60b29
NM
1454 int r = 0;
1455
1456 /* keep the node allocated */
47d43ba7 1457 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
59d84ca8 1458 if (!new_opp)
e1f60b29 1459 return -ENOMEM;
e1f60b29 1460
2c2709dc 1461 mutex_lock(&opp_table_lock);
e1f60b29 1462
2c2709dc
VK
1463 /* Find the opp_table */
1464 opp_table = _find_opp_table(dev);
1465 if (IS_ERR(opp_table)) {
1466 r = PTR_ERR(opp_table);
e1f60b29
NM
1467 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1468 goto unlock;
1469 }
1470
1471 /* Do we have the frequency? */
2c2709dc 1472 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
1473 if (tmp_opp->rate == freq) {
1474 opp = tmp_opp;
1475 break;
1476 }
1477 }
1478 if (IS_ERR(opp)) {
1479 r = PTR_ERR(opp);
1480 goto unlock;
1481 }
1482
1483 /* Is update really needed? */
1484 if (opp->available == availability_req)
1485 goto unlock;
1486 /* copy the old data over */
1487 *new_opp = *opp;
1488
1489 /* plug in new node */
1490 new_opp->available = availability_req;
1491
1492 list_replace_rcu(&opp->node, &new_opp->node);
2c2709dc
VK
1493 mutex_unlock(&opp_table_lock);
1494 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
e1f60b29 1495
03ca370f
MH
1496 /* Notify the change of the OPP availability */
1497 if (availability_req)
2c2709dc
VK
1498 srcu_notifier_call_chain(&opp_table->srcu_head,
1499 OPP_EVENT_ENABLE, new_opp);
03ca370f 1500 else
2c2709dc
VK
1501 srcu_notifier_call_chain(&opp_table->srcu_head,
1502 OPP_EVENT_DISABLE, new_opp);
03ca370f 1503
dde8437d 1504 return 0;
e1f60b29
NM
1505
1506unlock:
2c2709dc 1507 mutex_unlock(&opp_table_lock);
e1f60b29
NM
1508 kfree(new_opp);
1509 return r;
1510}
1511
1512/**
5d4879cd 1513 * dev_pm_opp_enable() - Enable a specific OPP
e1f60b29
NM
1514 * @dev: device for which we do this operation
1515 * @freq: OPP frequency to enable
1516 *
1517 * Enables a provided opp. If the operation is valid, this returns 0, else the
1518 * corresponding error value. It is meant to be used for users an OPP available
5d4879cd 1519 * after being temporarily made unavailable with dev_pm_opp_disable.
e1f60b29 1520 *
2c2709dc 1521 * Locking: The internal opp_table and opp structures are RCU protected.
e1f60b29
NM
1522 * Hence this function indirectly uses RCU and mutex locks to keep the
1523 * integrity of the internal data structures. Callers should ensure that
1524 * this function is *NOT* called under RCU protection or in contexts where
1525 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1526 *
1527 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1528 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1529 * successful.
e1f60b29 1530 */
5d4879cd 1531int dev_pm_opp_enable(struct device *dev, unsigned long freq)
e1f60b29 1532{
327854c8 1533 return _opp_set_availability(dev, freq, true);
e1f60b29 1534}
5d4879cd 1535EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
e1f60b29
NM
1536
1537/**
5d4879cd 1538 * dev_pm_opp_disable() - Disable a specific OPP
e1f60b29
NM
1539 * @dev: device for which we do this operation
1540 * @freq: OPP frequency to disable
1541 *
1542 * Disables a provided opp. If the operation is valid, this returns
1543 * 0, else the corresponding error value. It is meant to be a temporary
1544 * control by users to make this OPP not available until the circumstances are
5d4879cd 1545 * right to make it available again (with a call to dev_pm_opp_enable).
e1f60b29 1546 *
2c2709dc 1547 * Locking: The internal opp_table and opp structures are RCU protected.
e1f60b29
NM
1548 * Hence this function indirectly uses RCU and mutex locks to keep the
1549 * integrity of the internal data structures. Callers should ensure that
1550 * this function is *NOT* called under RCU protection or in contexts where
1551 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1552 *
1553 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1554 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1555 * successful.
e1f60b29 1556 */
5d4879cd 1557int dev_pm_opp_disable(struct device *dev, unsigned long freq)
e1f60b29 1558{
327854c8 1559 return _opp_set_availability(dev, freq, false);
e1f60b29 1560}
5d4879cd 1561EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
e1f60b29 1562
03ca370f 1563/**
5d4879cd 1564 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
2c2709dc 1565 * @dev: device pointer used to lookup OPP table.
984f16c8
NM
1566 *
1567 * Return: pointer to notifier head if found, otherwise -ENODEV or
1568 * -EINVAL based on type of error casted as pointer. value must be checked
1569 * with IS_ERR to determine valid pointer or error result.
1570 *
2c2709dc
VK
1571 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1572 * RCU protected pointer. The reason for the same is that the opp pointer which
1573 * is returned will remain valid for use with opp_get_{voltage, freq} only while
984f16c8
NM
1574 * under the locked area. The pointer returned must be used prior to unlocking
1575 * with rcu_read_unlock() to maintain the integrity of the pointer.
03ca370f 1576 */
5d4879cd 1577struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
03ca370f 1578{
2c2709dc 1579 struct opp_table *opp_table = _find_opp_table(dev);
03ca370f 1580
2c2709dc
VK
1581 if (IS_ERR(opp_table))
1582 return ERR_CAST(opp_table); /* matching type */
03ca370f 1583
2c2709dc 1584 return &opp_table->srcu_head;
03ca370f 1585}
4679ec37 1586EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
b496dfbc 1587
411466c5
SH
1588/*
1589 * Free OPPs either created using static entries present in DT or even the
1590 * dynamically added entries based on remove_all param.
b496dfbc 1591 */
f47b72a1 1592void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
737002b5 1593{
2c2709dc 1594 struct opp_table *opp_table;
737002b5
VK
1595 struct dev_pm_opp *opp, *tmp;
1596
2c2709dc
VK
1597 /* Hold our table modification lock here */
1598 mutex_lock(&opp_table_lock);
06441658 1599
2c2709dc
VK
1600 /* Check for existing table for 'dev' */
1601 opp_table = _find_opp_table(dev);
1602 if (IS_ERR(opp_table)) {
1603 int error = PTR_ERR(opp_table);
737002b5
VK
1604
1605 if (error != -ENODEV)
2c2709dc 1606 WARN(1, "%s: opp_table: %d\n",
737002b5
VK
1607 IS_ERR_OR_NULL(dev) ?
1608 "Invalid device" : dev_name(dev),
1609 error);
06441658 1610 goto unlock;
737002b5
VK
1611 }
1612
2c2709dc
VK
1613 /* Find if opp_table manages a single device */
1614 if (list_is_singular(&opp_table->dev_list)) {
06441658 1615 /* Free static OPPs */
2c2709dc 1616 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
411466c5 1617 if (remove_all || !opp->dynamic)
2c2709dc 1618 _opp_remove(opp_table, opp, true);
06441658
VK
1619 }
1620 } else {
2c2709dc 1621 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
737002b5
VK
1622 }
1623
06441658 1624unlock:
2c2709dc 1625 mutex_unlock(&opp_table_lock);
737002b5 1626}
129eec55
VK
1627
1628/**
411466c5 1629 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
2c2709dc 1630 * @dev: device pointer used to lookup OPP table.
129eec55 1631 *
411466c5
SH
1632 * Free both OPPs created using static entries present in DT and the
1633 * dynamically added entries.
984f16c8 1634 *
2c2709dc 1635 * Locking: The internal opp_table and opp structures are RCU protected.
984f16c8
NM
1636 * Hence this function indirectly uses RCU updater strategy with mutex locks
1637 * to keep the integrity of the internal data structures. Callers should ensure
1638 * that this function is *NOT* called under RCU protection or in contexts where
1639 * mutex cannot be locked.
129eec55 1640 */
411466c5 1641void dev_pm_opp_remove_table(struct device *dev)
129eec55 1642{
411466c5 1643 _dev_pm_opp_remove_table(dev, true);
8d4d4e98 1644}
411466c5 1645EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);