]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/opp/core.c
opp: Allocate the OPP table outside of opp_table_lock
[mirror_ubuntu-jammy-kernel.git] / drivers / opp / core.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
e1f60b29
NM
2/*
3 * Generic OPP Interface
4 *
5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6 * Nishanth Menon
7 * Romit Dasgupta
8 * Kevin Hilman
e1f60b29
NM
9 */
10
d6d2a528
VK
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
d54974c2 13#include <linux/clk.h>
e1f60b29
NM
14#include <linux/errno.h>
15#include <linux/err.h>
e1f60b29 16#include <linux/slab.h>
51990e82 17#include <linux/device.h>
80126ce7 18#include <linux/export.h>
009acd19 19#include <linux/pm_domain.h>
9f8ea969 20#include <linux/regulator/consumer.h>
e1f60b29 21
f59d3ee8 22#include "opp.h"
e1f60b29
NM
23
24/*
2c2709dc
VK
25 * The root of the list of all opp-tables. All opp_table structures branch off
26 * from here, with each opp_table containing the list of opps it supports in
e1f60b29
NM
27 * various states of availability.
28 */
f47b72a1 29LIST_HEAD(opp_tables);
e1f60b29 30/* Lock to allow exclusive modification to the device and opp lists */
2c2709dc 31DEFINE_MUTEX(opp_table_lock);
27c09484
VK
32/* Flag indicating that opp_tables list is being updated at the moment */
33static bool opp_tables_busy;
e1f60b29 34
2c2709dc
VK
35static struct opp_device *_find_opp_dev(const struct device *dev,
36 struct opp_table *opp_table)
06441658 37{
2c2709dc 38 struct opp_device *opp_dev;
06441658 39
2c2709dc
VK
40 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
41 if (opp_dev->dev == dev)
42 return opp_dev;
06441658
VK
43
44 return NULL;
45}
46
6ac42397 47static struct opp_table *_find_opp_table_unlocked(struct device *dev)
5b650b38
VK
48{
49 struct opp_table *opp_table;
3d255699 50 bool found;
5b650b38
VK
51
52 list_for_each_entry(opp_table, &opp_tables, node) {
3d255699
VK
53 mutex_lock(&opp_table->lock);
54 found = !!_find_opp_dev(dev, opp_table);
55 mutex_unlock(&opp_table->lock);
56
57 if (found) {
5b650b38
VK
58 _get_opp_table_kref(opp_table);
59
60 return opp_table;
61 }
62 }
63
64 return ERR_PTR(-ENODEV);
65}
66
e1f60b29 67/**
2c2709dc
VK
68 * _find_opp_table() - find opp_table struct using device pointer
69 * @dev: device pointer used to lookup OPP table
e1f60b29 70 *
052c6f19 71 * Search OPP table for one containing matching device.
e1f60b29 72 *
2c2709dc 73 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
e1f60b29
NM
74 * -EINVAL based on type of error.
75 *
5b650b38 76 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
e1f60b29 77 */
2c2709dc 78struct opp_table *_find_opp_table(struct device *dev)
e1f60b29 79{
2c2709dc 80 struct opp_table *opp_table;
e1f60b29 81
50a3cb04 82 if (IS_ERR_OR_NULL(dev)) {
e1f60b29
NM
83 pr_err("%s: Invalid parameters\n", __func__);
84 return ERR_PTR(-EINVAL);
85 }
86
5b650b38
VK
87 mutex_lock(&opp_table_lock);
88 opp_table = _find_opp_table_unlocked(dev);
89 mutex_unlock(&opp_table_lock);
e1f60b29 90
5b650b38 91 return opp_table;
e1f60b29
NM
92}
93
94/**
d6d00742 95 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
e1f60b29
NM
96 * @opp: opp for which voltage has to be returned for
97 *
984f16c8 98 * Return: voltage in micro volt corresponding to the opp, else
e1f60b29
NM
99 * return 0
100 *
dfbe4678 101 * This is useful only for devices with single power supply.
e1f60b29 102 */
47d43ba7 103unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
e1f60b29 104{
052c6f19 105 if (IS_ERR_OR_NULL(opp)) {
e1f60b29 106 pr_err("%s: Invalid parameters\n", __func__);
052c6f19
VK
107 return 0;
108 }
e1f60b29 109
052c6f19 110 return opp->supplies[0].u_volt;
e1f60b29 111}
5d4879cd 112EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
e1f60b29
NM
113
114/**
5d4879cd 115 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
e1f60b29
NM
116 * @opp: opp for which frequency has to be returned for
117 *
984f16c8 118 * Return: frequency in hertz corresponding to the opp, else
e1f60b29 119 * return 0
e1f60b29 120 */
47d43ba7 121unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
e1f60b29 122{
06a8a059 123 if (IS_ERR_OR_NULL(opp)) {
e1f60b29 124 pr_err("%s: Invalid parameters\n", __func__);
052c6f19
VK
125 return 0;
126 }
e1f60b29 127
052c6f19 128 return opp->rate;
e1f60b29 129}
5d4879cd 130EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
e1f60b29 131
5b93ac54
RN
132/**
133 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
134 * @opp: opp for which level value has to be returned for
135 *
136 * Return: level read from device tree corresponding to the opp, else
137 * return 0.
138 */
139unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
140{
141 if (IS_ERR_OR_NULL(opp) || !opp->available) {
142 pr_err("%s: Invalid parameters\n", __func__);
143 return 0;
144 }
145
146 return opp->level;
147}
148EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
149
19445b25
BZ
150/**
151 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
152 * @opp: opp for which turbo mode is being verified
153 *
154 * Turbo OPPs are not for normal use, and can be enabled (under certain
155 * conditions) for short duration of times to finish high throughput work
156 * quickly. Running on them for longer times may overheat the chip.
157 *
158 * Return: true if opp is turbo opp, else false.
19445b25
BZ
159 */
160bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
161{
052c6f19 162 if (IS_ERR_OR_NULL(opp) || !opp->available) {
19445b25
BZ
163 pr_err("%s: Invalid parameters\n", __func__);
164 return false;
165 }
166
052c6f19 167 return opp->turbo;
19445b25
BZ
168}
169EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
170
3ca9bb33
VK
171/**
172 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
173 * @dev: device for which we do this operation
174 *
175 * Return: This function returns the max clock latency in nanoseconds.
3ca9bb33
VK
176 */
177unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
178{
2c2709dc 179 struct opp_table *opp_table;
3ca9bb33
VK
180 unsigned long clock_latency_ns;
181
2c2709dc
VK
182 opp_table = _find_opp_table(dev);
183 if (IS_ERR(opp_table))
5b650b38
VK
184 return 0;
185
186 clock_latency_ns = opp_table->clock_latency_ns_max;
187
188 dev_pm_opp_put_opp_table(opp_table);
3ca9bb33 189
3ca9bb33
VK
190 return clock_latency_ns;
191}
192EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
193
655c9df9
VK
194/**
195 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
196 * @dev: device for which we do this operation
197 *
198 * Return: This function returns the max voltage latency in nanoseconds.
655c9df9
VK
199 */
200unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
201{
2c2709dc 202 struct opp_table *opp_table;
655c9df9 203 struct dev_pm_opp *opp;
478256bd 204 struct regulator *reg;
655c9df9 205 unsigned long latency_ns = 0;
dfbe4678
VK
206 int ret, i, count;
207 struct {
208 unsigned long min;
209 unsigned long max;
210 } *uV;
211
cdd3e614
VK
212 opp_table = _find_opp_table(dev);
213 if (IS_ERR(opp_table))
214 return 0;
215
dfbe4678 216 /* Regulator may not be required for the device */
90e3577b 217 if (!opp_table->regulators)
cdd3e614 218 goto put_opp_table;
dfbe4678 219
90e3577b
VK
220 count = opp_table->regulator_count;
221
dfbe4678
VK
222 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
223 if (!uV)
478256bd 224 goto put_opp_table;
655c9df9 225
052c6f19
VK
226 mutex_lock(&opp_table->lock);
227
dfbe4678
VK
228 for (i = 0; i < count; i++) {
229 uV[i].min = ~0;
230 uV[i].max = 0;
655c9df9 231
052c6f19 232 list_for_each_entry(opp, &opp_table->opp_list, node) {
dfbe4678
VK
233 if (!opp->available)
234 continue;
235
236 if (opp->supplies[i].u_volt_min < uV[i].min)
237 uV[i].min = opp->supplies[i].u_volt_min;
238 if (opp->supplies[i].u_volt_max > uV[i].max)
239 uV[i].max = opp->supplies[i].u_volt_max;
240 }
655c9df9
VK
241 }
242
052c6f19 243 mutex_unlock(&opp_table->lock);
655c9df9
VK
244
245 /*
2c2709dc 246 * The caller needs to ensure that opp_table (and hence the regulator)
655c9df9
VK
247 * isn't freed, while we are executing this routine.
248 */
8cc31116 249 for (i = 0; i < count; i++) {
478256bd 250 reg = opp_table->regulators[i];
dfbe4678
VK
251 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
252 if (ret > 0)
253 latency_ns += ret * 1000;
254 }
255
dfbe4678 256 kfree(uV);
cdd3e614
VK
257put_opp_table:
258 dev_pm_opp_put_opp_table(opp_table);
655c9df9
VK
259
260 return latency_ns;
261}
262EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
263
21743447
VK
264/**
265 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
266 * nanoseconds
267 * @dev: device for which we do this operation
268 *
269 * Return: This function returns the max transition latency, in nanoseconds, to
270 * switch from one OPP to other.
21743447
VK
271 */
272unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
273{
274 return dev_pm_opp_get_max_volt_latency(dev) +
275 dev_pm_opp_get_max_clock_latency(dev);
276}
277EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
278
4eafbd15 279/**
3aa26a3b 280 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
4eafbd15
BZ
281 * @dev: device for which we do this operation
282 *
3aa26a3b
VK
283 * Return: This function returns the frequency of the OPP marked as suspend_opp
284 * if one is available, else returns 0;
4eafbd15 285 */
3aa26a3b 286unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
4eafbd15 287{
2c2709dc 288 struct opp_table *opp_table;
3aa26a3b 289 unsigned long freq = 0;
4eafbd15 290
2c2709dc 291 opp_table = _find_opp_table(dev);
5b650b38
VK
292 if (IS_ERR(opp_table))
293 return 0;
3aa26a3b 294
5b650b38
VK
295 if (opp_table->suspend_opp && opp_table->suspend_opp->available)
296 freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
297
298 dev_pm_opp_put_opp_table(opp_table);
4eafbd15 299
3aa26a3b 300 return freq;
4eafbd15 301}
3aa26a3b 302EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
4eafbd15 303
a1e8c136
VK
304int _get_opp_count(struct opp_table *opp_table)
305{
306 struct dev_pm_opp *opp;
307 int count = 0;
308
309 mutex_lock(&opp_table->lock);
310
311 list_for_each_entry(opp, &opp_table->opp_list, node) {
312 if (opp->available)
313 count++;
314 }
315
316 mutex_unlock(&opp_table->lock);
317
318 return count;
319}
320
e1f60b29 321/**
2c2709dc 322 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
e1f60b29
NM
323 * @dev: device for which we do this operation
324 *
984f16c8 325 * Return: This function returns the number of available opps if there are any,
e1f60b29 326 * else returns 0 if none or the corresponding error value.
e1f60b29 327 */
5d4879cd 328int dev_pm_opp_get_opp_count(struct device *dev)
e1f60b29 329{
2c2709dc 330 struct opp_table *opp_table;
a1e8c136 331 int count;
e1f60b29 332
2c2709dc
VK
333 opp_table = _find_opp_table(dev);
334 if (IS_ERR(opp_table)) {
335 count = PTR_ERR(opp_table);
035ed072 336 dev_dbg(dev, "%s: OPP table not found (%d)\n",
b4718c02 337 __func__, count);
09f662f9 338 return count;
e1f60b29
NM
339 }
340
a1e8c136 341 count = _get_opp_count(opp_table);
5b650b38
VK
342 dev_pm_opp_put_opp_table(opp_table);
343
e1f60b29
NM
344 return count;
345}
5d4879cd 346EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
e1f60b29
NM
347
348/**
5d4879cd 349 * dev_pm_opp_find_freq_exact() - search for an exact frequency
e1f60b29
NM
350 * @dev: device for which we do this operation
351 * @freq: frequency to search for
7ae49618 352 * @available: true/false - match for available opp
e1f60b29 353 *
2c2709dc 354 * Return: Searches for exact match in the opp table and returns pointer to the
984f16c8
NM
355 * matching opp if found, else returns ERR_PTR in case of error and should
356 * be handled using IS_ERR. Error return values can be:
0779726c
NM
357 * EINVAL: for bad pointer
358 * ERANGE: no match found for search
359 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
360 *
361 * Note: available is a modifier for the search. if available=true, then the
362 * match is for exact matching frequency and is available in the stored OPP
363 * table. if false, the match is for exact frequency which is not available.
364 *
365 * This provides a mechanism to enable an opp which is not available currently
366 * or the opposite as well.
367 *
8a31d9d9
VK
368 * The callers are required to call dev_pm_opp_put() for the returned OPP after
369 * use.
e1f60b29 370 */
47d43ba7
NM
371struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
372 unsigned long freq,
373 bool available)
e1f60b29 374{
2c2709dc 375 struct opp_table *opp_table;
47d43ba7 376 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 377
2c2709dc
VK
378 opp_table = _find_opp_table(dev);
379 if (IS_ERR(opp_table)) {
380 int r = PTR_ERR(opp_table);
381
382 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
e1f60b29
NM
383 return ERR_PTR(r);
384 }
385
052c6f19 386 mutex_lock(&opp_table->lock);
5b650b38 387
052c6f19 388 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
389 if (temp_opp->available == available &&
390 temp_opp->rate == freq) {
391 opp = temp_opp;
8a31d9d9
VK
392
393 /* Increment the reference count of OPP */
394 dev_pm_opp_get(opp);
e1f60b29
NM
395 break;
396 }
397 }
398
052c6f19 399 mutex_unlock(&opp_table->lock);
5b650b38 400 dev_pm_opp_put_opp_table(opp_table);
8a31d9d9 401
e1f60b29
NM
402 return opp;
403}
5d4879cd 404EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
e1f60b29 405
71419d84
NC
406/**
407 * dev_pm_opp_find_level_exact() - search for an exact level
408 * @dev: device for which we do this operation
409 * @level: level to search for
410 *
411 * Return: Searches for exact match in the opp table and returns pointer to the
412 * matching opp if found, else returns ERR_PTR in case of error and should
413 * be handled using IS_ERR. Error return values can be:
414 * EINVAL: for bad pointer
415 * ERANGE: no match found for search
416 * ENODEV: if device not found in list of registered devices
417 *
418 * The callers are required to call dev_pm_opp_put() for the returned OPP after
419 * use.
420 */
421struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
422 unsigned int level)
423{
424 struct opp_table *opp_table;
425 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
426
427 opp_table = _find_opp_table(dev);
428 if (IS_ERR(opp_table)) {
429 int r = PTR_ERR(opp_table);
430
431 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
432 return ERR_PTR(r);
433 }
434
435 mutex_lock(&opp_table->lock);
436
437 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
438 if (temp_opp->level == level) {
439 opp = temp_opp;
440
441 /* Increment the reference count of OPP */
442 dev_pm_opp_get(opp);
443 break;
444 }
445 }
446
447 mutex_unlock(&opp_table->lock);
448 dev_pm_opp_put_opp_table(opp_table);
449
450 return opp;
451}
452EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
453
067b7ce0
JZ
454static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
455 unsigned long *freq)
456{
457 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
458
052c6f19
VK
459 mutex_lock(&opp_table->lock);
460
461 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
067b7ce0
JZ
462 if (temp_opp->available && temp_opp->rate >= *freq) {
463 opp = temp_opp;
464 *freq = opp->rate;
8a31d9d9
VK
465
466 /* Increment the reference count of OPP */
467 dev_pm_opp_get(opp);
067b7ce0
JZ
468 break;
469 }
470 }
471
052c6f19
VK
472 mutex_unlock(&opp_table->lock);
473
067b7ce0
JZ
474 return opp;
475}
476
e1f60b29 477/**
5d4879cd 478 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
e1f60b29
NM
479 * @dev: device for which we do this operation
480 * @freq: Start frequency
481 *
482 * Search for the matching ceil *available* OPP from a starting freq
483 * for a device.
484 *
984f16c8 485 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
486 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
487 * values can be:
488 * EINVAL: for bad pointer
489 * ERANGE: no match found for search
490 * ENODEV: if device not found in list of registered devices
e1f60b29 491 *
8a31d9d9
VK
492 * The callers are required to call dev_pm_opp_put() for the returned OPP after
493 * use.
e1f60b29 494 */
47d43ba7
NM
495struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
496 unsigned long *freq)
e1f60b29 497{
2c2709dc 498 struct opp_table *opp_table;
8a31d9d9 499 struct dev_pm_opp *opp;
b02ded24 500
e1f60b29
NM
501 if (!dev || !freq) {
502 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
503 return ERR_PTR(-EINVAL);
504 }
505
2c2709dc 506 opp_table = _find_opp_table(dev);
5b650b38 507 if (IS_ERR(opp_table))
2c2709dc 508 return ERR_CAST(opp_table);
5b650b38 509
8a31d9d9 510 opp = _find_freq_ceil(opp_table, freq);
e1f60b29 511
5b650b38 512 dev_pm_opp_put_opp_table(opp_table);
8a31d9d9
VK
513
514 return opp;
e1f60b29 515}
5d4879cd 516EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
e1f60b29
NM
517
518/**
5d4879cd 519 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
e1f60b29
NM
520 * @dev: device for which we do this operation
521 * @freq: Start frequency
522 *
523 * Search for the matching floor *available* OPP from a starting freq
524 * for a device.
525 *
984f16c8 526 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
527 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
528 * values can be:
529 * EINVAL: for bad pointer
530 * ERANGE: no match found for search
531 * ENODEV: if device not found in list of registered devices
e1f60b29 532 *
8a31d9d9
VK
533 * The callers are required to call dev_pm_opp_put() for the returned OPP after
534 * use.
e1f60b29 535 */
47d43ba7
NM
536struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
537 unsigned long *freq)
e1f60b29 538{
2c2709dc 539 struct opp_table *opp_table;
47d43ba7 540 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29
NM
541
542 if (!dev || !freq) {
543 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
544 return ERR_PTR(-EINVAL);
545 }
546
2c2709dc 547 opp_table = _find_opp_table(dev);
5b650b38 548 if (IS_ERR(opp_table))
2c2709dc 549 return ERR_CAST(opp_table);
5b650b38 550
052c6f19 551 mutex_lock(&opp_table->lock);
e1f60b29 552
052c6f19 553 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
554 if (temp_opp->available) {
555 /* go to the next node, before choosing prev */
556 if (temp_opp->rate > *freq)
557 break;
558 else
559 opp = temp_opp;
560 }
561 }
8a31d9d9
VK
562
563 /* Increment the reference count of OPP */
564 if (!IS_ERR(opp))
565 dev_pm_opp_get(opp);
052c6f19 566 mutex_unlock(&opp_table->lock);
5b650b38 567 dev_pm_opp_put_opp_table(opp_table);
8a31d9d9 568
e1f60b29
NM
569 if (!IS_ERR(opp))
570 *freq = opp->rate;
571
572 return opp;
573}
5d4879cd 574EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
e1f60b29 575
2f36bde0
AC
576/**
577 * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
578 * target voltage.
579 * @dev: Device for which we do this operation.
580 * @u_volt: Target voltage.
581 *
582 * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
583 *
584 * Return: matching *opp, else returns ERR_PTR in case of error which should be
585 * handled using IS_ERR.
586 *
587 * Error return values can be:
588 * EINVAL: bad parameters
589 *
590 * The callers are required to call dev_pm_opp_put() for the returned OPP after
591 * use.
592 */
593struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
594 unsigned long u_volt)
595{
596 struct opp_table *opp_table;
597 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
598
599 if (!dev || !u_volt) {
600 dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
601 u_volt);
602 return ERR_PTR(-EINVAL);
603 }
604
605 opp_table = _find_opp_table(dev);
606 if (IS_ERR(opp_table))
607 return ERR_CAST(opp_table);
608
609 mutex_lock(&opp_table->lock);
610
611 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
612 if (temp_opp->available) {
613 if (temp_opp->supplies[0].u_volt > u_volt)
614 break;
615 opp = temp_opp;
616 }
617 }
618
619 /* Increment the reference count of OPP */
620 if (!IS_ERR(opp))
621 dev_pm_opp_get(opp);
622
623 mutex_unlock(&opp_table->lock);
624 dev_pm_opp_put_opp_table(opp_table);
625
626 return opp;
627}
628EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
629
6a0712f6 630static int _set_opp_voltage(struct device *dev, struct regulator *reg,
ce31781a 631 struct dev_pm_opp_supply *supply)
6a0712f6
VK
632{
633 int ret;
634
635 /* Regulator not available for device */
636 if (IS_ERR(reg)) {
637 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
638 PTR_ERR(reg));
639 return 0;
640 }
641
ce31781a
VK
642 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
643 supply->u_volt_min, supply->u_volt, supply->u_volt_max);
6a0712f6 644
ce31781a
VK
645 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
646 supply->u_volt, supply->u_volt_max);
6a0712f6
VK
647 if (ret)
648 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
ce31781a
VK
649 __func__, supply->u_volt_min, supply->u_volt,
650 supply->u_volt_max, ret);
6a0712f6
VK
651
652 return ret;
653}
654
285881b5
VK
655static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
656 unsigned long freq)
94735585
VK
657{
658 int ret;
659
660 ret = clk_set_rate(clk, freq);
661 if (ret) {
662 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
663 ret);
664 }
665
666 return ret;
667}
668
8d45719c 669static int _generic_set_opp_regulator(struct opp_table *opp_table,
c74b32fa
VK
670 struct device *dev,
671 unsigned long old_freq,
672 unsigned long freq,
673 struct dev_pm_opp_supply *old_supply,
674 struct dev_pm_opp_supply *new_supply)
94735585 675{
c74b32fa 676 struct regulator *reg = opp_table->regulators[0];
94735585
VK
677 int ret;
678
679 /* This function only supports single regulator per device */
c74b32fa 680 if (WARN_ON(opp_table->regulator_count > 1)) {
94735585
VK
681 dev_err(dev, "multiple regulators are not supported\n");
682 return -EINVAL;
683 }
684
685 /* Scaling up? Scale voltage before frequency */
c5c2a97b 686 if (freq >= old_freq) {
94735585
VK
687 ret = _set_opp_voltage(dev, reg, new_supply);
688 if (ret)
689 goto restore_voltage;
690 }
691
692 /* Change frequency */
285881b5 693 ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
94735585
VK
694 if (ret)
695 goto restore_voltage;
696
697 /* Scaling down? Scale voltage after frequency */
698 if (freq < old_freq) {
699 ret = _set_opp_voltage(dev, reg, new_supply);
700 if (ret)
701 goto restore_freq;
702 }
703
8d45719c
KK
704 /*
705 * Enable the regulator after setting its voltages, otherwise it breaks
706 * some boot-enabled regulators.
707 */
72f80ce4 708 if (unlikely(!opp_table->enabled)) {
8d45719c
KK
709 ret = regulator_enable(reg);
710 if (ret < 0)
711 dev_warn(dev, "Failed to enable regulator: %d", ret);
8d45719c
KK
712 }
713
94735585
VK
714 return 0;
715
716restore_freq:
285881b5 717 if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
94735585
VK
718 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
719 __func__, old_freq);
720restore_voltage:
721 /* This shouldn't harm even if the voltages weren't updated earlier */
c74b32fa 722 if (old_supply)
94735585
VK
723 _set_opp_voltage(dev, reg, old_supply);
724
725 return ret;
726}
727
b00e667a
VK
728static int _set_opp_bw(const struct opp_table *opp_table,
729 struct dev_pm_opp *opp, struct device *dev, bool remove)
730{
731 u32 avg, peak;
732 int i, ret;
733
734 if (!opp_table->paths)
735 return 0;
736
737 for (i = 0; i < opp_table->path_count; i++) {
738 if (remove) {
739 avg = 0;
740 peak = 0;
741 } else {
742 avg = opp->bandwidth[i].avg;
743 peak = opp->bandwidth[i].peak;
744 }
745 ret = icc_set_bw(opp_table->paths[i], avg, peak);
746 if (ret) {
747 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
748 remove ? "remove" : "set", i, ret);
749 return ret;
750 }
751 }
752
753 return 0;
754}
755
7e535993
VK
756static int _set_opp_custom(const struct opp_table *opp_table,
757 struct device *dev, unsigned long old_freq,
758 unsigned long freq,
759 struct dev_pm_opp_supply *old_supply,
760 struct dev_pm_opp_supply *new_supply)
761{
762 struct dev_pm_set_opp_data *data;
763 int size;
764
765 data = opp_table->set_opp_data;
766 data->regulators = opp_table->regulators;
767 data->regulator_count = opp_table->regulator_count;
768 data->clk = opp_table->clk;
769 data->dev = dev;
770
771 data->old_opp.rate = old_freq;
772 size = sizeof(*old_supply) * opp_table->regulator_count;
560d1bca 773 if (!old_supply)
7e535993
VK
774 memset(data->old_opp.supplies, 0, size);
775 else
776 memcpy(data->old_opp.supplies, old_supply, size);
777
778 data->new_opp.rate = freq;
779 memcpy(data->new_opp.supplies, new_supply, size);
780
781 return opp_table->set_opp(data);
782}
783
60cdeae0
SG
784static int _set_required_opp(struct device *dev, struct device *pd_dev,
785 struct dev_pm_opp *opp, int i)
786{
787 unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
788 int ret;
789
790 if (!pd_dev)
791 return 0;
792
793 ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
794 if (ret) {
795 dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
796 dev_name(pd_dev), pstate, ret);
797 }
798
799 return ret;
800}
801
ca1b5d77
VK
802/* This is only called for PM domain for now */
803static int _set_required_opps(struct device *dev,
804 struct opp_table *opp_table,
2c59138c 805 struct dev_pm_opp *opp, bool up)
ca1b5d77
VK
806{
807 struct opp_table **required_opp_tables = opp_table->required_opp_tables;
808 struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
ca1b5d77
VK
809 int i, ret = 0;
810
811 if (!required_opp_tables)
812 return 0;
813
814 /* Single genpd case */
60cdeae0
SG
815 if (!genpd_virt_devs)
816 return _set_required_opp(dev, dev, opp, 0);
ca1b5d77
VK
817
818 /* Multiple genpd case */
819
820 /*
821 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
822 * after it is freed from another thread.
823 */
824 mutex_lock(&opp_table->genpd_virt_dev_lock);
825
2c59138c
SG
826 /* Scaling up? Set required OPPs in normal order, else reverse */
827 if (up) {
828 for (i = 0; i < opp_table->required_opp_count; i++) {
829 ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
830 if (ret)
831 break;
832 }
833 } else {
834 for (i = opp_table->required_opp_count - 1; i >= 0; i--) {
835 ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
836 if (ret)
837 break;
ca1b5d77
VK
838 }
839 }
2c59138c 840
ca1b5d77
VK
841 mutex_unlock(&opp_table->genpd_virt_dev_lock);
842
843 return ret;
844}
845
3ae1f39a
SS
846/**
847 * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp
848 * @dev: device for which we do this operation
849 * @opp: opp based on which the bandwidth levels are to be configured
850 *
851 * This configures the bandwidth to the levels specified by the OPP. However
852 * if the OPP specified is NULL the bandwidth levels are cleared out.
853 *
854 * Return: 0 on success or a negative error value.
855 */
856int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
857{
858 struct opp_table *opp_table;
859 int ret;
860
861 opp_table = _find_opp_table(dev);
862 if (IS_ERR(opp_table)) {
863 dev_err(dev, "%s: device opp table doesn't exist\n", __func__);
864 return PTR_ERR(opp_table);
865 }
866
867 if (opp)
868 ret = _set_opp_bw(opp_table, opp, dev, false);
869 else
870 ret = _set_opp_bw(opp_table, NULL, dev, true);
871
872 dev_pm_opp_put_opp_table(opp_table);
873 return ret;
874}
875EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw);
876
f3364e17
VK
877static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
878{
879 int ret;
880
881 if (!opp_table->enabled)
882 return 0;
883
884 /*
885 * Some drivers need to support cases where some platforms may
886 * have OPP table for the device, while others don't and
887 * opp_set_rate() just needs to behave like clk_set_rate().
888 */
889 if (!_get_opp_count(opp_table))
890 return 0;
891
892 ret = _set_opp_bw(opp_table, NULL, dev, true);
893 if (ret)
894 return ret;
895
896 if (opp_table->regulators)
897 regulator_disable(opp_table->regulators[0]);
898
2c59138c 899 ret = _set_required_opps(dev, opp_table, NULL, false);
f3364e17
VK
900
901 opp_table->enabled = false;
902 return ret;
903}
904
6a0712f6
VK
905/**
906 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
907 * @dev: device for which we do this operation
908 * @target_freq: frequency to achieve
909 *
b3e3759e
SB
910 * This configures the power-supplies to the levels specified by the OPP
911 * corresponding to the target_freq, and programs the clock to a value <=
912 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
913 * provided by the opp, should have already rounded to the target OPP's
914 * frequency.
6a0712f6
VK
915 */
916int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
917{
2c2709dc 918 struct opp_table *opp_table;
b3e3759e 919 unsigned long freq, old_freq, temp_freq;
6a0712f6 920 struct dev_pm_opp *old_opp, *opp;
6a0712f6 921 struct clk *clk;
b00e667a 922 int ret;
6a0712f6 923
052c6f19
VK
924 opp_table = _find_opp_table(dev);
925 if (IS_ERR(opp_table)) {
926 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
927 return PTR_ERR(opp_table);
928 }
929
cd7ea582 930 if (unlikely(!target_freq)) {
f3364e17 931 ret = _opp_set_rate_zero(dev, opp_table);
cd7ea582
RN
932 goto put_opp_table;
933 }
934
052c6f19
VK
935 clk = opp_table->clk;
936 if (IS_ERR(clk)) {
937 dev_err(dev, "%s: No clock available for the device\n",
938 __func__);
939 ret = PTR_ERR(clk);
940 goto put_opp_table;
941 }
6a0712f6
VK
942
943 freq = clk_round_rate(clk, target_freq);
944 if ((long)freq <= 0)
945 freq = target_freq;
946
947 old_freq = clk_get_rate(clk);
948
949 /* Return early if nothing to do */
10b21736
VK
950 if (opp_table->enabled && old_freq == freq) {
951 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
952 __func__, freq);
953 ret = 0;
954 goto put_opp_table;
6a0712f6
VK
955 }
956
aca48b61
RN
957 /*
958 * For IO devices which require an OPP on some platforms/SoCs
959 * while just needing to scale the clock on some others
960 * we look for empty OPP tables with just a clock handle and
961 * scale only the clk. This makes dev_pm_opp_set_rate()
962 * equivalent to a clk_set_rate()
963 */
964 if (!_get_opp_count(opp_table)) {
965 ret = _generic_set_opp_clk_only(dev, clk, freq);
966 goto put_opp_table;
967 }
968
b3e3759e
SB
969 temp_freq = old_freq;
970 old_opp = _find_freq_ceil(opp_table, &temp_freq);
4df27c91 971 if (IS_ERR(old_opp)) {
6a0712f6
VK
972 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
973 __func__, old_freq, PTR_ERR(old_opp));
974 }
975
b3e3759e
SB
976 temp_freq = freq;
977 opp = _find_freq_ceil(opp_table, &temp_freq);
6a0712f6
VK
978 if (IS_ERR(opp)) {
979 ret = PTR_ERR(opp);
980 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
981 __func__, freq, ret);
052c6f19 982 goto put_old_opp;
6a0712f6
VK
983 }
984
94735585
VK
985 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
986 old_freq, freq);
dfbe4678 987
ca1b5d77 988 /* Scaling up? Configure required OPPs before frequency */
faef080f 989 if (freq >= old_freq) {
2c59138c 990 ret = _set_required_opps(dev, opp_table, opp, true);
ca1b5d77
VK
991 if (ret)
992 goto put_opp;
993 }
994
7e535993
VK
995 if (opp_table->set_opp) {
996 ret = _set_opp_custom(opp_table, dev, old_freq, freq,
997 IS_ERR(old_opp) ? NULL : old_opp->supplies,
998 opp->supplies);
999 } else if (opp_table->regulators) {
c74b32fa
VK
1000 ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
1001 IS_ERR(old_opp) ? NULL : old_opp->supplies,
1002 opp->supplies);
1003 } else {
7e535993 1004 /* Only frequency scaling */
285881b5 1005 ret = _generic_set_opp_clk_only(dev, clk, freq);
ca1b5d77 1006 }
c74b32fa 1007
ca1b5d77
VK
1008 /* Scaling down? Configure required OPPs after frequency */
1009 if (!ret && freq < old_freq) {
2c59138c 1010 ret = _set_required_opps(dev, opp_table, opp, false);
ca1b5d77
VK
1011 if (ret)
1012 dev_err(dev, "Failed to set required opps: %d\n", ret);
dfbe4678
VK
1013 }
1014
72f80ce4 1015 if (!ret) {
b00e667a 1016 ret = _set_opp_bw(opp_table, opp, dev, false);
72f80ce4
VK
1017 if (!ret)
1018 opp_table->enabled = true;
1019 }
fe2af402 1020
ca1b5d77 1021put_opp:
8a31d9d9 1022 dev_pm_opp_put(opp);
052c6f19 1023put_old_opp:
8a31d9d9
VK
1024 if (!IS_ERR(old_opp))
1025 dev_pm_opp_put(old_opp);
052c6f19 1026put_opp_table:
5b650b38 1027 dev_pm_opp_put_opp_table(opp_table);
052c6f19 1028 return ret;
6a0712f6
VK
1029}
1030EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
1031
2c2709dc 1032/* OPP-dev Helpers */
2c2709dc
VK
1033static void _remove_opp_dev(struct opp_device *opp_dev,
1034 struct opp_table *opp_table)
06441658 1035{
2c2709dc
VK
1036 opp_debug_unregister(opp_dev, opp_table);
1037 list_del(&opp_dev->node);
052c6f19 1038 kfree(opp_dev);
06441658
VK
1039}
1040
ef43f01a
VK
1041struct opp_device *_add_opp_dev(const struct device *dev,
1042 struct opp_table *opp_table)
06441658 1043{
2c2709dc 1044 struct opp_device *opp_dev;
06441658 1045
2c2709dc
VK
1046 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
1047 if (!opp_dev)
06441658
VK
1048 return NULL;
1049
2c2709dc
VK
1050 /* Initialize opp-dev */
1051 opp_dev->dev = dev;
3d255699 1052
ef43f01a 1053 mutex_lock(&opp_table->lock);
052c6f19 1054 list_add(&opp_dev->node, &opp_table->dev_list);
ef43f01a 1055 mutex_unlock(&opp_table->lock);
06441658 1056
2c2709dc 1057 /* Create debugfs entries for the opp_table */
a2dea4cb 1058 opp_debug_register(opp_dev, opp_table);
283d55e6
VK
1059
1060 return opp_dev;
1061}
1062
eb7c8743 1063static struct opp_table *_allocate_opp_table(struct device *dev, int index)
07cce74a 1064{
2c2709dc
VK
1065 struct opp_table *opp_table;
1066 struct opp_device *opp_dev;
d54974c2 1067 int ret;
07cce74a
VK
1068
1069 /*
2c2709dc 1070 * Allocate a new OPP table. In the infrequent case where a new
07cce74a
VK
1071 * device is needed to be added, we pay this penalty.
1072 */
2c2709dc
VK
1073 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
1074 if (!opp_table)
dd461cd9 1075 return ERR_PTR(-ENOMEM);
07cce74a 1076
3d255699 1077 mutex_init(&opp_table->lock);
4f018bc0 1078 mutex_init(&opp_table->genpd_virt_dev_lock);
2c2709dc 1079 INIT_LIST_HEAD(&opp_table->dev_list);
06441658 1080
46f48aca
VK
1081 /* Mark regulator count uninitialized */
1082 opp_table->regulator_count = -1;
1083
2c2709dc
VK
1084 opp_dev = _add_opp_dev(dev, opp_table);
1085 if (!opp_dev) {
dd461cd9
SG
1086 ret = -ENOMEM;
1087 goto err;
06441658
VK
1088 }
1089
eb7c8743 1090 _of_init_opp_table(opp_table, dev, index);
50f8cfbd 1091
d54974c2 1092 /* Find clk for the device */
2c2709dc
VK
1093 opp_table->clk = clk_get(dev, NULL);
1094 if (IS_ERR(opp_table->clk)) {
1095 ret = PTR_ERR(opp_table->clk);
dd461cd9
SG
1096 if (ret == -EPROBE_DEFER)
1097 goto err;
1098
1099 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
d54974c2
VK
1100 }
1101
6d3f922c
GD
1102 /* Find interconnect path(s) for the device */
1103 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
dd461cd9
SG
1104 if (ret) {
1105 if (ret == -EPROBE_DEFER)
1106 goto err;
1107
6d3f922c
GD
1108 dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1109 __func__, ret);
dd461cd9 1110 }
6d3f922c 1111
052c6f19 1112 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
2c2709dc 1113 INIT_LIST_HEAD(&opp_table->opp_list);
f067a982 1114 kref_init(&opp_table->kref);
07cce74a 1115
2c2709dc 1116 return opp_table;
dd461cd9
SG
1117
1118err:
1119 kfree(opp_table);
1120 return ERR_PTR(ret);
07cce74a
VK
1121}
1122
f067a982 1123void _get_opp_table_kref(struct opp_table *opp_table)
b6160e26 1124{
f067a982
VK
1125 kref_get(&opp_table->kref);
1126}
1127
27c09484
VK
1128/*
1129 * We need to make sure that the OPP table for a device doesn't get added twice,
1130 * if this routine gets called in parallel with the same device pointer.
1131 *
1132 * The simplest way to enforce that is to perform everything (find existing
1133 * table and if not found, create a new one) under the opp_table_lock, so only
1134 * one creator gets access to the same. But that expands the critical section
1135 * under the lock and may end up causing circular dependencies with frameworks
1136 * like debugfs, interconnect or clock framework as they may be direct or
1137 * indirect users of OPP core.
1138 *
1139 * And for that reason we have to go for a bit tricky implementation here, which
1140 * uses the opp_tables_busy flag to indicate if another creator is in the middle
1141 * of adding an OPP table and others should wait for it to finish.
1142 */
eb7c8743 1143static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
f067a982
VK
1144{
1145 struct opp_table *opp_table;
1146
27c09484 1147again:
f067a982
VK
1148 mutex_lock(&opp_table_lock);
1149
5b650b38
VK
1150 opp_table = _find_opp_table_unlocked(dev);
1151 if (!IS_ERR(opp_table))
f067a982 1152 goto unlock;
f067a982 1153
27c09484
VK
1154 /*
1155 * The opp_tables list or an OPP table's dev_list is getting updated by
1156 * another user, wait for it to finish.
1157 */
1158 if (unlikely(opp_tables_busy)) {
1159 mutex_unlock(&opp_table_lock);
1160 cpu_relax();
1161 goto again;
1162 }
1163
1164 opp_tables_busy = true;
283d55e6 1165 opp_table = _managed_opp(dev, index);
27c09484
VK
1166
1167 /* Drop the lock to reduce the size of critical section */
1168 mutex_unlock(&opp_table_lock);
1169
283d55e6 1170 if (opp_table) {
ef43f01a 1171 if (!_add_opp_dev(dev, opp_table)) {
283d55e6 1172 dev_pm_opp_put_opp_table(opp_table);
dd461cd9 1173 opp_table = ERR_PTR(-ENOMEM);
283d55e6 1174 }
27c09484
VK
1175
1176 mutex_lock(&opp_table_lock);
1177 } else {
1178 opp_table = _allocate_opp_table(dev, index);
1179
1180 mutex_lock(&opp_table_lock);
1181 if (!IS_ERR(opp_table))
1182 list_add(&opp_table->node, &opp_tables);
283d55e6
VK
1183 }
1184
27c09484 1185 opp_tables_busy = false;
f067a982
VK
1186
1187unlock:
1188 mutex_unlock(&opp_table_lock);
1189
1190 return opp_table;
1191}
eb7c8743
VK
1192
1193struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1194{
1195 return _opp_get_opp_table(dev, 0);
1196}
f067a982
VK
1197EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1198
eb7c8743
VK
1199struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
1200 int index)
1201{
1202 return _opp_get_opp_table(dev, index);
1203}
1204
b83c1899 1205static void _opp_table_kref_release(struct kref *kref)
f067a982
VK
1206{
1207 struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
cdd6ed90 1208 struct opp_device *opp_dev, *temp;
6d3f922c 1209 int i;
b6160e26 1210
e0df59de
VK
1211 /* Drop the lock as soon as we can */
1212 list_del(&opp_table->node);
1213 mutex_unlock(&opp_table_lock);
1214
5d6d106f
VK
1215 _of_clear_opp_table(opp_table);
1216
b6160e26
VK
1217 /* Release clk */
1218 if (!IS_ERR(opp_table->clk))
1219 clk_put(opp_table->clk);
1220
6d3f922c
GD
1221 if (opp_table->paths) {
1222 for (i = 0; i < opp_table->path_count; i++)
1223 icc_put(opp_table->paths[i]);
1224 kfree(opp_table->paths);
1225 }
1226
cdd6ed90 1227 WARN_ON(!list_empty(&opp_table->opp_list));
b6160e26 1228
cdd6ed90
VK
1229 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
1230 /*
1231 * The OPP table is getting removed, drop the performance state
1232 * constraints.
1233 */
1234 if (opp_table->genpd_performance_state)
1235 dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
b6160e26 1236
cdd6ed90
VK
1237 _remove_opp_dev(opp_dev, opp_table);
1238 }
b6160e26 1239
4f018bc0 1240 mutex_destroy(&opp_table->genpd_virt_dev_lock);
37a73ec0 1241 mutex_destroy(&opp_table->lock);
052c6f19 1242 kfree(opp_table);
f067a982
VK
1243}
1244
1245void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
1246{
1247 kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
1248 &opp_table_lock);
1249}
1250EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
1251
8cd2f6e8 1252void _opp_free(struct dev_pm_opp *opp)
969fceb3
VK
1253{
1254 kfree(opp);
969fceb3
VK
1255}
1256
1690d8bb
VK
1257static void _opp_kref_release(struct dev_pm_opp *opp,
1258 struct opp_table *opp_table)
129eec55
VK
1259{
1260 /*
1261 * Notify the changes in the availability of the operable
1262 * frequency/voltage list.
1263 */
052c6f19 1264 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
da544b61 1265 _of_opp_free_required_opps(opp_table, opp);
deaa5146 1266 opp_debug_remove_one(opp);
052c6f19
VK
1267 list_del(&opp->node);
1268 kfree(opp);
1690d8bb 1269}
129eec55 1270
1690d8bb
VK
1271static void _opp_kref_release_unlocked(struct kref *kref)
1272{
1273 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1274 struct opp_table *opp_table = opp->opp_table;
1275
1276 _opp_kref_release(opp, opp_table);
1277}
1278
1279static void _opp_kref_release_locked(struct kref *kref)
1280{
1281 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1282 struct opp_table *opp_table = opp->opp_table;
1283
1284 _opp_kref_release(opp, opp_table);
37a73ec0 1285 mutex_unlock(&opp_table->lock);
129eec55
VK
1286}
1287
a88bd2a5 1288void dev_pm_opp_get(struct dev_pm_opp *opp)
8a31d9d9
VK
1289{
1290 kref_get(&opp->kref);
1291}
1292
7034764a
VK
1293void dev_pm_opp_put(struct dev_pm_opp *opp)
1294{
1690d8bb
VK
1295 kref_put_mutex(&opp->kref, _opp_kref_release_locked,
1296 &opp->opp_table->lock);
7034764a
VK
1297}
1298EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1299
1690d8bb
VK
1300static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
1301{
1302 kref_put(&opp->kref, _opp_kref_release_unlocked);
1303}
1304
129eec55 1305/**
2c2709dc 1306 * dev_pm_opp_remove() - Remove an OPP from OPP table
129eec55
VK
1307 * @dev: device for which we do this operation
1308 * @freq: OPP to remove with matching 'freq'
1309 *
2c2709dc 1310 * This function removes an opp from the opp table.
129eec55
VK
1311 */
1312void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1313{
1314 struct dev_pm_opp *opp;
2c2709dc 1315 struct opp_table *opp_table;
129eec55
VK
1316 bool found = false;
1317
2c2709dc
VK
1318 opp_table = _find_opp_table(dev);
1319 if (IS_ERR(opp_table))
5b650b38 1320 return;
129eec55 1321
37a73ec0
VK
1322 mutex_lock(&opp_table->lock);
1323
2c2709dc 1324 list_for_each_entry(opp, &opp_table->opp_list, node) {
129eec55
VK
1325 if (opp->rate == freq) {
1326 found = true;
1327 break;
1328 }
1329 }
1330
37a73ec0
VK
1331 mutex_unlock(&opp_table->lock);
1332
5b650b38
VK
1333 if (found) {
1334 dev_pm_opp_put(opp);
0ad8c623
VK
1335
1336 /* Drop the reference taken by dev_pm_opp_add() */
1337 dev_pm_opp_put_opp_table(opp_table);
5b650b38 1338 } else {
129eec55
VK
1339 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1340 __func__, freq);
129eec55
VK
1341 }
1342
0ad8c623 1343 /* Drop the reference taken by _find_opp_table() */
5b650b38 1344 dev_pm_opp_put_opp_table(opp_table);
129eec55
VK
1345}
1346EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1347
922ff075 1348bool _opp_remove_all_static(struct opp_table *opp_table)
03758d60
VK
1349{
1350 struct dev_pm_opp *opp, *tmp;
922ff075 1351 bool ret = true;
03758d60
VK
1352
1353 mutex_lock(&opp_table->lock);
1354
922ff075
VK
1355 if (!opp_table->parsed_static_opps) {
1356 ret = false;
1357 goto unlock;
1358 }
1359
1360 if (--opp_table->parsed_static_opps)
03758d60
VK
1361 goto unlock;
1362
1363 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1364 if (!opp->dynamic)
1365 dev_pm_opp_put_unlocked(opp);
1366 }
1367
1368unlock:
1369 mutex_unlock(&opp_table->lock);
922ff075
VK
1370
1371 return ret;
03758d60
VK
1372}
1373
1690d8bb
VK
1374/**
1375 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1376 * @dev: device for which we do this operation
1377 *
1378 * This function removes all dynamically created OPPs from the opp table.
1379 */
1380void dev_pm_opp_remove_all_dynamic(struct device *dev)
1381{
1382 struct opp_table *opp_table;
1383 struct dev_pm_opp *opp, *temp;
1384 int count = 0;
1385
1386 opp_table = _find_opp_table(dev);
1387 if (IS_ERR(opp_table))
1388 return;
1389
1390 mutex_lock(&opp_table->lock);
1391 list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
1392 if (opp->dynamic) {
1393 dev_pm_opp_put_unlocked(opp);
1394 count++;
1395 }
1396 }
1397 mutex_unlock(&opp_table->lock);
1398
1399 /* Drop the references taken by dev_pm_opp_add() */
1400 while (count--)
1401 dev_pm_opp_put_opp_table(opp_table);
1402
1403 /* Drop the reference taken by _find_opp_table() */
1404 dev_pm_opp_put_opp_table(opp_table);
1405}
1406EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1407
8cd2f6e8 1408struct dev_pm_opp *_opp_allocate(struct opp_table *table)
e1f60b29 1409{
23dacf6d 1410 struct dev_pm_opp *opp;
6d3f922c 1411 int supply_count, supply_size, icc_size;
e1f60b29 1412
dfbe4678 1413 /* Allocate space for at least one supply */
6d3f922c
GD
1414 supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
1415 supply_size = sizeof(*opp->supplies) * supply_count;
1416 icc_size = sizeof(*opp->bandwidth) * table->path_count;
e1f60b29 1417
dfbe4678 1418 /* allocate new OPP node and supplies structures */
6d3f922c
GD
1419 opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
1420
8cd2f6e8 1421 if (!opp)
23dacf6d 1422 return NULL;
23dacf6d 1423
dfbe4678
VK
1424 /* Put the supplies at the end of the OPP structure as an empty array */
1425 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
6d3f922c
GD
1426 if (icc_size)
1427 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
dfbe4678
VK
1428 INIT_LIST_HEAD(&opp->node);
1429
23dacf6d
VK
1430 return opp;
1431}
1432
7d34d56e 1433static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
2c2709dc 1434 struct opp_table *opp_table)
7d34d56e 1435{
dfbe4678
VK
1436 struct regulator *reg;
1437 int i;
1438
90e3577b
VK
1439 if (!opp_table->regulators)
1440 return true;
1441
dfbe4678
VK
1442 for (i = 0; i < opp_table->regulator_count; i++) {
1443 reg = opp_table->regulators[i];
1444
1445 if (!regulator_is_supported_voltage(reg,
1446 opp->supplies[i].u_volt_min,
1447 opp->supplies[i].u_volt_max)) {
1448 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1449 __func__, opp->supplies[i].u_volt_min,
1450 opp->supplies[i].u_volt_max);
1451 return false;
1452 }
7d34d56e
VK
1453 }
1454
1455 return true;
1456}
1457
6c591eec
SK
1458int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
1459{
1460 if (opp1->rate != opp2->rate)
1461 return opp1->rate < opp2->rate ? -1 : 1;
6d3f922c
GD
1462 if (opp1->bandwidth && opp2->bandwidth &&
1463 opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
1464 return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
6c591eec
SK
1465 if (opp1->level != opp2->level)
1466 return opp1->level < opp2->level ? -1 : 1;
1467 return 0;
1468}
1469
a1e8c136
VK
1470static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
1471 struct opp_table *opp_table,
1472 struct list_head **head)
23dacf6d
VK
1473{
1474 struct dev_pm_opp *opp;
6c591eec 1475 int opp_cmp;
23dacf6d
VK
1476
1477 /*
1478 * Insert new OPP in order of increasing frequency and discard if
1479 * already present.
1480 *
2c2709dc 1481 * Need to use &opp_table->opp_list in the condition part of the 'for'
23dacf6d
VK
1482 * loop, don't replace it with head otherwise it will become an infinite
1483 * loop.
1484 */
052c6f19 1485 list_for_each_entry(opp, &opp_table->opp_list, node) {
6c591eec
SK
1486 opp_cmp = _opp_compare_key(new_opp, opp);
1487 if (opp_cmp > 0) {
a1e8c136 1488 *head = &opp->node;
23dacf6d
VK
1489 continue;
1490 }
1491
6c591eec 1492 if (opp_cmp < 0)
a1e8c136 1493 return 0;
23dacf6d
VK
1494
1495 /* Duplicate OPPs */
06441658 1496 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
dfbe4678
VK
1497 __func__, opp->rate, opp->supplies[0].u_volt,
1498 opp->available, new_opp->rate,
1499 new_opp->supplies[0].u_volt, new_opp->available);
23dacf6d 1500
dfbe4678 1501 /* Should we compare voltages for all regulators here ? */
a1e8c136
VK
1502 return opp->available &&
1503 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
1504 }
1505
1506 return 0;
1507}
1508
1509/*
1510 * Returns:
1511 * 0: On success. And appropriate error message for duplicate OPPs.
1512 * -EBUSY: For OPP with same freq/volt and is available. The callers of
1513 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
1514 * sure we don't print error messages unnecessarily if different parts of
1515 * kernel try to initialize the OPP table.
1516 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
1517 * should be considered an error by the callers of _opp_add().
1518 */
1519int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1520 struct opp_table *opp_table, bool rate_not_available)
1521{
1522 struct list_head *head;
1523 int ret;
1524
1525 mutex_lock(&opp_table->lock);
1526 head = &opp_table->opp_list;
37a73ec0 1527
a1e8c136
VK
1528 if (likely(!rate_not_available)) {
1529 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
1530 if (ret) {
1531 mutex_unlock(&opp_table->lock);
1532 return ret;
1533 }
23dacf6d
VK
1534 }
1535
052c6f19 1536 list_add(&new_opp->node, head);
37a73ec0
VK
1537 mutex_unlock(&opp_table->lock);
1538
1539 new_opp->opp_table = opp_table;
7034764a 1540 kref_init(&new_opp->kref);
23dacf6d 1541
a2dea4cb 1542 opp_debug_create_one(new_opp, opp_table);
deaa5146 1543
2c2709dc 1544 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
7d34d56e
VK
1545 new_opp->available = false;
1546 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1547 __func__, new_opp->rate);
1548 }
1549
23dacf6d
VK
1550 return 0;
1551}
1552
984f16c8 1553/**
b64b9c3f 1554 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
8cd2f6e8 1555 * @opp_table: OPP table
984f16c8
NM
1556 * @dev: device for which we do this operation
1557 * @freq: Frequency in Hz for this OPP
1558 * @u_volt: Voltage in uVolts for this OPP
1559 * @dynamic: Dynamically added OPPs.
1560 *
2c2709dc 1561 * This function adds an opp definition to the opp table and returns status.
984f16c8
NM
1562 * The opp is made available by default and it can be controlled using
1563 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1564 *
8f8d37b2
VK
1565 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1566 * and freed by dev_pm_opp_of_remove_table.
984f16c8 1567 *
984f16c8
NM
1568 * Return:
1569 * 0 On success OR
1570 * Duplicate OPPs (both freq and volt are same) and opp->available
1571 * -EEXIST Freq are same and volt are different OR
1572 * Duplicate OPPs (both freq and volt are same) and !opp->available
1573 * -ENOMEM Memory allocation failure
1574 */
8cd2f6e8
VK
1575int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
1576 unsigned long freq, long u_volt, bool dynamic)
e1f60b29 1577{
23dacf6d 1578 struct dev_pm_opp *new_opp;
50f8cfbd 1579 unsigned long tol;
6ce4184d 1580 int ret;
e1f60b29 1581
8cd2f6e8
VK
1582 new_opp = _opp_allocate(opp_table);
1583 if (!new_opp)
1584 return -ENOMEM;
23dacf6d 1585
a7470db6 1586 /* populate the opp table */
a7470db6 1587 new_opp->rate = freq;
2c2709dc 1588 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
dfbe4678
VK
1589 new_opp->supplies[0].u_volt = u_volt;
1590 new_opp->supplies[0].u_volt_min = u_volt - tol;
1591 new_opp->supplies[0].u_volt_max = u_volt + tol;
a7470db6 1592 new_opp->available = true;
23dacf6d 1593 new_opp->dynamic = dynamic;
a7470db6 1594
a1e8c136 1595 ret = _opp_add(dev, new_opp, opp_table, false);
7f8538eb
VK
1596 if (ret) {
1597 /* Don't return error for duplicate OPPs */
1598 if (ret == -EBUSY)
1599 ret = 0;
6ce4184d 1600 goto free_opp;
7f8538eb 1601 }
64ce8545 1602
03ca370f
MH
1603 /*
1604 * Notify the changes in the availability of the operable
1605 * frequency/voltage list.
1606 */
052c6f19 1607 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
e1f60b29 1608 return 0;
6ce4184d
VK
1609
1610free_opp:
8cd2f6e8
VK
1611 _opp_free(new_opp);
1612
6ce4184d 1613 return ret;
e1f60b29 1614}
38393409 1615
7de36b0a
VK
1616/**
1617 * dev_pm_opp_set_supported_hw() - Set supported platforms
1618 * @dev: Device for which supported-hw has to be set.
1619 * @versions: Array of hierarchy of versions to match.
1620 * @count: Number of elements in the array.
1621 *
1622 * This is required only for the V2 bindings, and it enables a platform to
1623 * specify the hierarchy of versions it supports. OPP layer will then enable
1624 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1625 * property.
7de36b0a 1626 */
fa30184d
VK
1627struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
1628 const u32 *versions, unsigned int count)
7de36b0a 1629{
2c2709dc 1630 struct opp_table *opp_table;
7de36b0a 1631
fa30184d 1632 opp_table = dev_pm_opp_get_opp_table(dev);
dd461cd9
SG
1633 if (IS_ERR(opp_table))
1634 return opp_table;
7de36b0a 1635
2c2709dc
VK
1636 /* Make sure there are no concurrent readers while updating opp_table */
1637 WARN_ON(!list_empty(&opp_table->opp_list));
7de36b0a 1638
25419de1
VK
1639 /* Another CPU that shares the OPP table has set the property ? */
1640 if (opp_table->supported_hw)
1641 return opp_table;
7de36b0a 1642
2c2709dc 1643 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
7de36b0a 1644 GFP_KERNEL);
2c2709dc 1645 if (!opp_table->supported_hw) {
25419de1
VK
1646 dev_pm_opp_put_opp_table(opp_table);
1647 return ERR_PTR(-ENOMEM);
7de36b0a
VK
1648 }
1649
2c2709dc 1650 opp_table->supported_hw_count = count;
fa30184d
VK
1651
1652 return opp_table;
7de36b0a
VK
1653}
1654EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1655
1656/**
1657 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
fa30184d 1658 * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
7de36b0a
VK
1659 *
1660 * This is required only for the V2 bindings, and is called for a matching
2c2709dc 1661 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
7de36b0a 1662 * will not be freed.
7de36b0a 1663 */
fa30184d 1664void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
7de36b0a 1665{
2c2709dc
VK
1666 /* Make sure there are no concurrent readers while updating opp_table */
1667 WARN_ON(!list_empty(&opp_table->opp_list));
7de36b0a 1668
2c2709dc
VK
1669 kfree(opp_table->supported_hw);
1670 opp_table->supported_hw = NULL;
1671 opp_table->supported_hw_count = 0;
7de36b0a 1672
fa30184d 1673 dev_pm_opp_put_opp_table(opp_table);
7de36b0a
VK
1674}
1675EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1676
01fb4d3c
VK
1677/**
1678 * dev_pm_opp_set_prop_name() - Set prop-extn name
a5da6447 1679 * @dev: Device for which the prop-name has to be set.
01fb4d3c
VK
1680 * @name: name to postfix to properties.
1681 *
1682 * This is required only for the V2 bindings, and it enables a platform to
1683 * specify the extn to be used for certain property names. The properties to
1684 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1685 * should postfix the property name with -<name> while looking for them.
01fb4d3c 1686 */
fa30184d 1687struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
01fb4d3c 1688{
2c2709dc 1689 struct opp_table *opp_table;
01fb4d3c 1690
fa30184d 1691 opp_table = dev_pm_opp_get_opp_table(dev);
dd461cd9
SG
1692 if (IS_ERR(opp_table))
1693 return opp_table;
01fb4d3c 1694
2c2709dc
VK
1695 /* Make sure there are no concurrent readers while updating opp_table */
1696 WARN_ON(!list_empty(&opp_table->opp_list));
01fb4d3c 1697
878ec1a9
VK
1698 /* Another CPU that shares the OPP table has set the property ? */
1699 if (opp_table->prop_name)
1700 return opp_table;
01fb4d3c 1701
2c2709dc
VK
1702 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1703 if (!opp_table->prop_name) {
878ec1a9
VK
1704 dev_pm_opp_put_opp_table(opp_table);
1705 return ERR_PTR(-ENOMEM);
01fb4d3c
VK
1706 }
1707
fa30184d 1708 return opp_table;
01fb4d3c
VK
1709}
1710EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1711
1712/**
1713 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
fa30184d 1714 * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
01fb4d3c
VK
1715 *
1716 * This is required only for the V2 bindings, and is called for a matching
2c2709dc 1717 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
01fb4d3c 1718 * will not be freed.
01fb4d3c 1719 */
fa30184d 1720void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
01fb4d3c 1721{
2c2709dc
VK
1722 /* Make sure there are no concurrent readers while updating opp_table */
1723 WARN_ON(!list_empty(&opp_table->opp_list));
01fb4d3c 1724
2c2709dc
VK
1725 kfree(opp_table->prop_name);
1726 opp_table->prop_name = NULL;
01fb4d3c 1727
fa30184d 1728 dev_pm_opp_put_opp_table(opp_table);
01fb4d3c
VK
1729}
1730EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1731
94735585
VK
1732static int _allocate_set_opp_data(struct opp_table *opp_table)
1733{
1734 struct dev_pm_set_opp_data *data;
1735 int len, count = opp_table->regulator_count;
1736
90e3577b 1737 if (WARN_ON(!opp_table->regulators))
94735585
VK
1738 return -EINVAL;
1739
1740 /* space for set_opp_data */
1741 len = sizeof(*data);
1742
1743 /* space for old_opp.supplies and new_opp.supplies */
1744 len += 2 * sizeof(struct dev_pm_opp_supply) * count;
1745
1746 data = kzalloc(len, GFP_KERNEL);
1747 if (!data)
1748 return -ENOMEM;
1749
1750 data->old_opp.supplies = (void *)(data + 1);
1751 data->new_opp.supplies = data->old_opp.supplies + count;
1752
1753 opp_table->set_opp_data = data;
1754
1755 return 0;
1756}
1757
1758static void _free_set_opp_data(struct opp_table *opp_table)
1759{
1760 kfree(opp_table->set_opp_data);
1761 opp_table->set_opp_data = NULL;
1762}
1763
9f8ea969 1764/**
dfbe4678 1765 * dev_pm_opp_set_regulators() - Set regulator names for the device
9f8ea969 1766 * @dev: Device for which regulator name is being set.
dfbe4678
VK
1767 * @names: Array of pointers to the names of the regulator.
1768 * @count: Number of regulators.
9f8ea969
VK
1769 *
1770 * In order to support OPP switching, OPP layer needs to know the name of the
dfbe4678
VK
1771 * device's regulators, as the core would be required to switch voltages as
1772 * well.
9f8ea969
VK
1773 *
1774 * This must be called before any OPPs are initialized for the device.
9f8ea969 1775 */
dfbe4678
VK
1776struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
1777 const char * const names[],
1778 unsigned int count)
9f8ea969 1779{
2c2709dc 1780 struct opp_table *opp_table;
9f8ea969 1781 struct regulator *reg;
dfbe4678 1782 int ret, i;
9f8ea969 1783
fa30184d 1784 opp_table = dev_pm_opp_get_opp_table(dev);
dd461cd9
SG
1785 if (IS_ERR(opp_table))
1786 return opp_table;
9f8ea969
VK
1787
1788 /* This should be called before OPPs are initialized */
2c2709dc 1789 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
9f8ea969
VK
1790 ret = -EBUSY;
1791 goto err;
1792 }
1793
779b783c
VK
1794 /* Another CPU that shares the OPP table has set the regulators ? */
1795 if (opp_table->regulators)
1796 return opp_table;
dfbe4678
VK
1797
1798 opp_table->regulators = kmalloc_array(count,
1799 sizeof(*opp_table->regulators),
1800 GFP_KERNEL);
1801 if (!opp_table->regulators) {
1802 ret = -ENOMEM;
9f8ea969
VK
1803 goto err;
1804 }
1805
dfbe4678
VK
1806 for (i = 0; i < count; i++) {
1807 reg = regulator_get_optional(dev, names[i]);
1808 if (IS_ERR(reg)) {
1809 ret = PTR_ERR(reg);
1810 if (ret != -EPROBE_DEFER)
1811 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1812 __func__, names[i], ret);
1813 goto free_regulators;
1814 }
1815
1816 opp_table->regulators[i] = reg;
1817 }
1818
1819 opp_table->regulator_count = count;
9f8ea969 1820
94735585
VK
1821 /* Allocate block only once to pass to set_opp() routines */
1822 ret = _allocate_set_opp_data(opp_table);
1823 if (ret)
1824 goto free_regulators;
1825
91291d9a 1826 return opp_table;
9f8ea969 1827
dfbe4678 1828free_regulators:
24957db1
MS
1829 while (i != 0)
1830 regulator_put(opp_table->regulators[--i]);
dfbe4678
VK
1831
1832 kfree(opp_table->regulators);
1833 opp_table->regulators = NULL;
46f48aca 1834 opp_table->regulator_count = -1;
9f8ea969 1835err:
fa30184d 1836 dev_pm_opp_put_opp_table(opp_table);
9f8ea969 1837
91291d9a 1838 return ERR_PTR(ret);
9f8ea969 1839}
dfbe4678 1840EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
9f8ea969
VK
1841
1842/**
dfbe4678
VK
1843 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
1844 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
9f8ea969 1845 */
dfbe4678 1846void dev_pm_opp_put_regulators(struct opp_table *opp_table)
9f8ea969 1847{
dfbe4678
VK
1848 int i;
1849
779b783c
VK
1850 if (!opp_table->regulators)
1851 goto put_opp_table;
9f8ea969 1852
2c2709dc
VK
1853 /* Make sure there are no concurrent readers while updating opp_table */
1854 WARN_ON(!list_empty(&opp_table->opp_list));
9f8ea969 1855
72f80ce4 1856 if (opp_table->enabled) {
8d45719c
KK
1857 for (i = opp_table->regulator_count - 1; i >= 0; i--)
1858 regulator_disable(opp_table->regulators[i]);
8d45719c
KK
1859 }
1860
24957db1 1861 for (i = opp_table->regulator_count - 1; i >= 0; i--)
dfbe4678
VK
1862 regulator_put(opp_table->regulators[i]);
1863
94735585
VK
1864 _free_set_opp_data(opp_table);
1865
dfbe4678
VK
1866 kfree(opp_table->regulators);
1867 opp_table->regulators = NULL;
46f48aca 1868 opp_table->regulator_count = -1;
9f8ea969 1869
779b783c 1870put_opp_table:
fa30184d 1871 dev_pm_opp_put_opp_table(opp_table);
9f8ea969 1872}
dfbe4678 1873EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
9f8ea969 1874
829a4e8c
VK
1875/**
1876 * dev_pm_opp_set_clkname() - Set clk name for the device
1877 * @dev: Device for which clk name is being set.
1878 * @name: Clk name.
1879 *
1880 * In order to support OPP switching, OPP layer needs to get pointer to the
1881 * clock for the device. Simple cases work fine without using this routine (i.e.
1882 * by passing connection-id as NULL), but for a device with multiple clocks
1883 * available, the OPP core needs to know the exact name of the clk to use.
1884 *
1885 * This must be called before any OPPs are initialized for the device.
1886 */
1887struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
1888{
1889 struct opp_table *opp_table;
1890 int ret;
1891
1892 opp_table = dev_pm_opp_get_opp_table(dev);
dd461cd9
SG
1893 if (IS_ERR(opp_table))
1894 return opp_table;
829a4e8c
VK
1895
1896 /* This should be called before OPPs are initialized */
1897 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1898 ret = -EBUSY;
1899 goto err;
1900 }
1901
1902 /* Already have default clk set, free it */
1903 if (!IS_ERR(opp_table->clk))
1904 clk_put(opp_table->clk);
1905
1906 /* Find clk for the device */
1907 opp_table->clk = clk_get(dev, name);
1908 if (IS_ERR(opp_table->clk)) {
1909 ret = PTR_ERR(opp_table->clk);
1910 if (ret != -EPROBE_DEFER) {
1911 dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
1912 ret);
1913 }
1914 goto err;
1915 }
1916
1917 return opp_table;
1918
1919err:
1920 dev_pm_opp_put_opp_table(opp_table);
1921
1922 return ERR_PTR(ret);
1923}
1924EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
1925
1926/**
1927 * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
1928 * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
1929 */
1930void dev_pm_opp_put_clkname(struct opp_table *opp_table)
1931{
1932 /* Make sure there are no concurrent readers while updating opp_table */
1933 WARN_ON(!list_empty(&opp_table->opp_list));
1934
1935 clk_put(opp_table->clk);
1936 opp_table->clk = ERR_PTR(-EINVAL);
1937
1938 dev_pm_opp_put_opp_table(opp_table);
1939}
1940EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
1941
4dab160e
VK
1942/**
1943 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
1944 * @dev: Device for which the helper is getting registered.
1945 * @set_opp: Custom set OPP helper.
1946 *
1947 * This is useful to support complex platforms (like platforms with multiple
1948 * regulators per device), instead of the generic OPP set rate helper.
1949 *
1950 * This must be called before any OPPs are initialized for the device.
4dab160e 1951 */
fa30184d 1952struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
4dab160e
VK
1953 int (*set_opp)(struct dev_pm_set_opp_data *data))
1954{
1955 struct opp_table *opp_table;
4dab160e
VK
1956
1957 if (!set_opp)
fa30184d 1958 return ERR_PTR(-EINVAL);
4dab160e 1959
fa30184d 1960 opp_table = dev_pm_opp_get_opp_table(dev);
47efcbcb 1961 if (IS_ERR(opp_table))
dd461cd9 1962 return opp_table;
4dab160e
VK
1963
1964 /* This should be called before OPPs are initialized */
1965 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
5019acc6
VK
1966 dev_pm_opp_put_opp_table(opp_table);
1967 return ERR_PTR(-EBUSY);
4dab160e
VK
1968 }
1969
5019acc6
VK
1970 /* Another CPU that shares the OPP table has set the helper ? */
1971 if (!opp_table->set_opp)
1972 opp_table->set_opp = set_opp;
4dab160e 1973
fa30184d 1974 return opp_table;
4dab160e
VK
1975}
1976EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
1977
1978/**
604a7aeb 1979 * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
4dab160e 1980 * set_opp helper
fa30184d 1981 * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
4dab160e 1982 *
fa30184d 1983 * Release resources blocked for platform specific set_opp helper.
4dab160e 1984 */
604a7aeb 1985void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
4dab160e 1986{
4dab160e
VK
1987 /* Make sure there are no concurrent readers while updating opp_table */
1988 WARN_ON(!list_empty(&opp_table->opp_list));
1989
1990 opp_table->set_opp = NULL;
fa30184d 1991 dev_pm_opp_put_opp_table(opp_table);
4dab160e 1992}
604a7aeb 1993EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
4dab160e 1994
6319aee1
VK
1995static void _opp_detach_genpd(struct opp_table *opp_table)
1996{
1997 int index;
1998
cb60e960
VK
1999 if (!opp_table->genpd_virt_devs)
2000 return;
2001
6319aee1
VK
2002 for (index = 0; index < opp_table->required_opp_count; index++) {
2003 if (!opp_table->genpd_virt_devs[index])
2004 continue;
2005
2006 dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
2007 opp_table->genpd_virt_devs[index] = NULL;
2008 }
c0ab9e08
VK
2009
2010 kfree(opp_table->genpd_virt_devs);
2011 opp_table->genpd_virt_devs = NULL;
6319aee1
VK
2012}
2013
4f018bc0 2014/**
6319aee1
VK
2015 * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
2016 * @dev: Consumer device for which the genpd is getting attached.
2017 * @names: Null terminated array of pointers containing names of genpd to attach.
17a8f868 2018 * @virt_devs: Pointer to return the array of virtual devices.
4f018bc0
VK
2019 *
2020 * Multiple generic power domains for a device are supported with the help of
2021 * virtual genpd devices, which are created for each consumer device - genpd
2022 * pair. These are the device structures which are attached to the power domain
2023 * and are required by the OPP core to set the performance state of the genpd.
6319aee1
VK
2024 * The same API also works for the case where single genpd is available and so
2025 * we don't need to support that separately.
4f018bc0
VK
2026 *
2027 * This helper will normally be called by the consumer driver of the device
6319aee1 2028 * "dev", as only that has details of the genpd names.
4f018bc0 2029 *
6319aee1
VK
2030 * This helper needs to be called once with a list of all genpd to attach.
2031 * Otherwise the original device structure will be used instead by the OPP core.
baea35e4
VK
2032 *
2033 * The order of entries in the names array must match the order in which
2034 * "required-opps" are added in DT.
4f018bc0 2035 */
17a8f868
VK
2036struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
2037 const char **names, struct device ***virt_devs)
4f018bc0
VK
2038{
2039 struct opp_table *opp_table;
6319aee1 2040 struct device *virt_dev;
baea35e4 2041 int index = 0, ret = -EINVAL;
6319aee1 2042 const char **name = names;
4f018bc0
VK
2043
2044 opp_table = dev_pm_opp_get_opp_table(dev);
dd461cd9
SG
2045 if (IS_ERR(opp_table))
2046 return opp_table;
4f018bc0 2047
cb60e960
VK
2048 if (opp_table->genpd_virt_devs)
2049 return opp_table;
4f018bc0 2050
6319aee1
VK
2051 /*
2052 * If the genpd's OPP table isn't already initialized, parsing of the
2053 * required-opps fail for dev. We should retry this after genpd's OPP
2054 * table is added.
2055 */
2056 if (!opp_table->required_opp_count) {
2057 ret = -EPROBE_DEFER;
2058 goto put_table;
2059 }
2060
4f018bc0
VK
2061 mutex_lock(&opp_table->genpd_virt_dev_lock);
2062
c0ab9e08
VK
2063 opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
2064 sizeof(*opp_table->genpd_virt_devs),
2065 GFP_KERNEL);
2066 if (!opp_table->genpd_virt_devs)
2067 goto unlock;
4f018bc0 2068
6319aee1 2069 while (*name) {
6319aee1
VK
2070 if (index >= opp_table->required_opp_count) {
2071 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
2072 *name, opp_table->required_opp_count, index);
2073 goto err;
2074 }
4f018bc0 2075
6319aee1
VK
2076 virt_dev = dev_pm_domain_attach_by_name(dev, *name);
2077 if (IS_ERR(virt_dev)) {
2078 ret = PTR_ERR(virt_dev);
2079 dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
2080 goto err;
2081 }
2082
2083 opp_table->genpd_virt_devs[index] = virt_dev;
baea35e4 2084 index++;
6319aee1 2085 name++;
4f018bc0
VK
2086 }
2087
17a8f868
VK
2088 if (virt_devs)
2089 *virt_devs = opp_table->genpd_virt_devs;
4f018bc0
VK
2090 mutex_unlock(&opp_table->genpd_virt_dev_lock);
2091
2092 return opp_table;
6319aee1
VK
2093
2094err:
2095 _opp_detach_genpd(opp_table);
c0ab9e08 2096unlock:
6319aee1
VK
2097 mutex_unlock(&opp_table->genpd_virt_dev_lock);
2098
2099put_table:
2100 dev_pm_opp_put_opp_table(opp_table);
2101
2102 return ERR_PTR(ret);
4f018bc0 2103}
6319aee1 2104EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
4f018bc0
VK
2105
2106/**
6319aee1
VK
2107 * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
2108 * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
4f018bc0 2109 *
6319aee1
VK
2110 * This detaches the genpd(s), resets the virtual device pointers, and puts the
2111 * OPP table.
4f018bc0 2112 */
6319aee1 2113void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
4f018bc0 2114{
4f018bc0
VK
2115 /*
2116 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
2117 * used in parallel.
2118 */
2119 mutex_lock(&opp_table->genpd_virt_dev_lock);
6319aee1 2120 _opp_detach_genpd(opp_table);
4f018bc0
VK
2121 mutex_unlock(&opp_table->genpd_virt_dev_lock);
2122
6319aee1 2123 dev_pm_opp_put_opp_table(opp_table);
4f018bc0 2124}
6319aee1 2125EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
4f018bc0 2126
c8a59103
VK
2127/**
2128 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
2129 * @src_table: OPP table which has dst_table as one of its required OPP table.
2130 * @dst_table: Required OPP table of the src_table.
2131 * @pstate: Current performance state of the src_table.
2132 *
2133 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
2134 * "required-opps" property of the OPP (present in @src_table) which has
2135 * performance state set to @pstate.
2136 *
2137 * Return: Zero or positive performance state on success, otherwise negative
2138 * value on errors.
2139 */
2140int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
2141 struct opp_table *dst_table,
2142 unsigned int pstate)
2143{
2144 struct dev_pm_opp *opp;
2145 int dest_pstate = -EINVAL;
2146 int i;
2147
c8a59103
VK
2148 /*
2149 * Normally the src_table will have the "required_opps" property set to
2150 * point to one of the OPPs in the dst_table, but in some cases the
2151 * genpd and its master have one to one mapping of performance states
2152 * and so none of them have the "required-opps" property set. Return the
2153 * pstate of the src_table as it is in such cases.
2154 */
2155 if (!src_table->required_opp_count)
2156 return pstate;
2157
2158 for (i = 0; i < src_table->required_opp_count; i++) {
2159 if (src_table->required_opp_tables[i]->np == dst_table->np)
2160 break;
2161 }
2162
2163 if (unlikely(i == src_table->required_opp_count)) {
2164 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
2165 __func__, src_table, dst_table);
2166 return -EINVAL;
2167 }
2168
2169 mutex_lock(&src_table->lock);
2170
2171 list_for_each_entry(opp, &src_table->opp_list, node) {
2172 if (opp->pstate == pstate) {
2173 dest_pstate = opp->required_opps[i]->pstate;
2174 goto unlock;
2175 }
2176 }
2177
2178 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
2179 dst_table);
2180
2181unlock:
2182 mutex_unlock(&src_table->lock);
2183
2184 return dest_pstate;
2185}
2186
38393409
VK
2187/**
2188 * dev_pm_opp_add() - Add an OPP table from a table definitions
2189 * @dev: device for which we do this operation
2190 * @freq: Frequency in Hz for this OPP
2191 * @u_volt: Voltage in uVolts for this OPP
2192 *
2c2709dc 2193 * This function adds an opp definition to the opp table and returns status.
38393409
VK
2194 * The opp is made available by default and it can be controlled using
2195 * dev_pm_opp_enable/disable functions.
2196 *
38393409 2197 * Return:
984f16c8 2198 * 0 On success OR
38393409 2199 * Duplicate OPPs (both freq and volt are same) and opp->available
984f16c8 2200 * -EEXIST Freq are same and volt are different OR
38393409 2201 * Duplicate OPPs (both freq and volt are same) and !opp->available
984f16c8 2202 * -ENOMEM Memory allocation failure
38393409
VK
2203 */
2204int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
2205{
8cd2f6e8
VK
2206 struct opp_table *opp_table;
2207 int ret;
2208
b83c1899 2209 opp_table = dev_pm_opp_get_opp_table(dev);
dd461cd9
SG
2210 if (IS_ERR(opp_table))
2211 return PTR_ERR(opp_table);
8cd2f6e8 2212
46f48aca
VK
2213 /* Fix regulator count for dynamic OPPs */
2214 opp_table->regulator_count = 1;
2215
8cd2f6e8 2216 ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
0ad8c623
VK
2217 if (ret)
2218 dev_pm_opp_put_opp_table(opp_table);
8cd2f6e8 2219
8cd2f6e8 2220 return ret;
38393409 2221}
5d4879cd 2222EXPORT_SYMBOL_GPL(dev_pm_opp_add);
e1f60b29
NM
2223
2224/**
327854c8 2225 * _opp_set_availability() - helper to set the availability of an opp
e1f60b29
NM
2226 * @dev: device for which we do this operation
2227 * @freq: OPP frequency to modify availability
2228 * @availability_req: availability status requested for this opp
2229 *
052c6f19
VK
2230 * Set the availability of an OPP, opp_{enable,disable} share a common logic
2231 * which is isolated here.
e1f60b29 2232 *
984f16c8 2233 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 2234 * copy operation, returns 0 if no modification was done OR modification was
e1f60b29 2235 * successful.
e1f60b29 2236 */
327854c8
NM
2237static int _opp_set_availability(struct device *dev, unsigned long freq,
2238 bool availability_req)
e1f60b29 2239{
2c2709dc 2240 struct opp_table *opp_table;
a7f3987e 2241 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
e1f60b29
NM
2242 int r = 0;
2243
2c2709dc
VK
2244 /* Find the opp_table */
2245 opp_table = _find_opp_table(dev);
2246 if (IS_ERR(opp_table)) {
2247 r = PTR_ERR(opp_table);
e1f60b29 2248 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
a7f3987e 2249 return r;
e1f60b29
NM
2250 }
2251
37a73ec0
VK
2252 mutex_lock(&opp_table->lock);
2253
e1f60b29 2254 /* Do we have the frequency? */
2c2709dc 2255 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
e1f60b29
NM
2256 if (tmp_opp->rate == freq) {
2257 opp = tmp_opp;
2258 break;
2259 }
2260 }
37a73ec0 2261
e1f60b29
NM
2262 if (IS_ERR(opp)) {
2263 r = PTR_ERR(opp);
2264 goto unlock;
2265 }
2266
2267 /* Is update really needed? */
2268 if (opp->available == availability_req)
2269 goto unlock;
e1f60b29 2270
a7f3987e 2271 opp->available = availability_req;
e1f60b29 2272
e4d8ae00
VK
2273 dev_pm_opp_get(opp);
2274 mutex_unlock(&opp_table->lock);
2275
03ca370f
MH
2276 /* Notify the change of the OPP availability */
2277 if (availability_req)
052c6f19 2278 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
a7f3987e 2279 opp);
03ca370f 2280 else
052c6f19 2281 blocking_notifier_call_chain(&opp_table->head,
a7f3987e 2282 OPP_EVENT_DISABLE, opp);
e1f60b29 2283
e4d8ae00
VK
2284 dev_pm_opp_put(opp);
2285 goto put_table;
2286
e1f60b29 2287unlock:
5b650b38 2288 mutex_unlock(&opp_table->lock);
e4d8ae00 2289put_table:
5b650b38 2290 dev_pm_opp_put_opp_table(opp_table);
e1f60b29
NM
2291 return r;
2292}
2293
25cb20a2
SB
2294/**
2295 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
2296 * @dev: device for which we do this operation
2297 * @freq: OPP frequency to adjust voltage of
2298 * @u_volt: new OPP target voltage
2299 * @u_volt_min: new OPP min voltage
2300 * @u_volt_max: new OPP max voltage
2301 *
2302 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2303 * copy operation, returns 0 if no modifcation was done OR modification was
2304 * successful.
2305 */
2306int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
2307 unsigned long u_volt, unsigned long u_volt_min,
2308 unsigned long u_volt_max)
2309
2310{
2311 struct opp_table *opp_table;
2312 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2313 int r = 0;
2314
2315 /* Find the opp_table */
2316 opp_table = _find_opp_table(dev);
2317 if (IS_ERR(opp_table)) {
2318 r = PTR_ERR(opp_table);
2319 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2320 return r;
2321 }
2322
2323 mutex_lock(&opp_table->lock);
2324
2325 /* Do we have the frequency? */
2326 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2327 if (tmp_opp->rate == freq) {
2328 opp = tmp_opp;
2329 break;
2330 }
2331 }
2332
2333 if (IS_ERR(opp)) {
2334 r = PTR_ERR(opp);
2335 goto adjust_unlock;
2336 }
2337
2338 /* Is update really needed? */
2339 if (opp->supplies->u_volt == u_volt)
2340 goto adjust_unlock;
2341
2342 opp->supplies->u_volt = u_volt;
2343 opp->supplies->u_volt_min = u_volt_min;
2344 opp->supplies->u_volt_max = u_volt_max;
2345
2346 dev_pm_opp_get(opp);
2347 mutex_unlock(&opp_table->lock);
2348
2349 /* Notify the voltage change of the OPP */
2350 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
2351 opp);
2352
2353 dev_pm_opp_put(opp);
2354 goto adjust_put_table;
2355
2356adjust_unlock:
2357 mutex_unlock(&opp_table->lock);
2358adjust_put_table:
2359 dev_pm_opp_put_opp_table(opp_table);
2360 return r;
2361}
03649154 2362EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
25cb20a2 2363
e1f60b29 2364/**
5d4879cd 2365 * dev_pm_opp_enable() - Enable a specific OPP
e1f60b29
NM
2366 * @dev: device for which we do this operation
2367 * @freq: OPP frequency to enable
2368 *
2369 * Enables a provided opp. If the operation is valid, this returns 0, else the
2370 * corresponding error value. It is meant to be used for users an OPP available
5d4879cd 2371 * after being temporarily made unavailable with dev_pm_opp_disable.
e1f60b29 2372 *
984f16c8 2373 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 2374 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 2375 * successful.
e1f60b29 2376 */
5d4879cd 2377int dev_pm_opp_enable(struct device *dev, unsigned long freq)
e1f60b29 2378{
327854c8 2379 return _opp_set_availability(dev, freq, true);
e1f60b29 2380}
5d4879cd 2381EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
e1f60b29
NM
2382
2383/**
5d4879cd 2384 * dev_pm_opp_disable() - Disable a specific OPP
e1f60b29
NM
2385 * @dev: device for which we do this operation
2386 * @freq: OPP frequency to disable
2387 *
2388 * Disables a provided opp. If the operation is valid, this returns
2389 * 0, else the corresponding error value. It is meant to be a temporary
2390 * control by users to make this OPP not available until the circumstances are
5d4879cd 2391 * right to make it available again (with a call to dev_pm_opp_enable).
e1f60b29 2392 *
984f16c8 2393 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 2394 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 2395 * successful.
e1f60b29 2396 */
5d4879cd 2397int dev_pm_opp_disable(struct device *dev, unsigned long freq)
e1f60b29 2398{
327854c8 2399 return _opp_set_availability(dev, freq, false);
e1f60b29 2400}
5d4879cd 2401EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
e1f60b29 2402
03ca370f 2403/**
dc2c9ad5
VK
2404 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
2405 * @dev: Device for which notifier needs to be registered
2406 * @nb: Notifier block to be registered
984f16c8 2407 *
dc2c9ad5
VK
2408 * Return: 0 on success or a negative error value.
2409 */
2410int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
2411{
2412 struct opp_table *opp_table;
2413 int ret;
2414
dc2c9ad5 2415 opp_table = _find_opp_table(dev);
5b650b38
VK
2416 if (IS_ERR(opp_table))
2417 return PTR_ERR(opp_table);
2418
052c6f19 2419 ret = blocking_notifier_chain_register(&opp_table->head, nb);
dc2c9ad5 2420
5b650b38 2421 dev_pm_opp_put_opp_table(opp_table);
dc2c9ad5
VK
2422
2423 return ret;
2424}
2425EXPORT_SYMBOL(dev_pm_opp_register_notifier);
2426
2427/**
2428 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
2429 * @dev: Device for which notifier needs to be unregistered
2430 * @nb: Notifier block to be unregistered
984f16c8 2431 *
dc2c9ad5 2432 * Return: 0 on success or a negative error value.
03ca370f 2433 */
dc2c9ad5
VK
2434int dev_pm_opp_unregister_notifier(struct device *dev,
2435 struct notifier_block *nb)
03ca370f 2436{
dc2c9ad5
VK
2437 struct opp_table *opp_table;
2438 int ret;
03ca370f 2439
dc2c9ad5 2440 opp_table = _find_opp_table(dev);
5b650b38
VK
2441 if (IS_ERR(opp_table))
2442 return PTR_ERR(opp_table);
dc2c9ad5 2443
052c6f19 2444 ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
03ca370f 2445
5b650b38 2446 dev_pm_opp_put_opp_table(opp_table);
dc2c9ad5
VK
2447
2448 return ret;
03ca370f 2449}
dc2c9ad5 2450EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
b496dfbc 2451
8aaf6264
VK
2452/**
2453 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
2454 * @dev: device pointer used to lookup OPP table.
2455 *
2456 * Free both OPPs created using static entries present in DT and the
2457 * dynamically added entries.
2458 */
2459void dev_pm_opp_remove_table(struct device *dev)
9274c892
VK
2460{
2461 struct opp_table *opp_table;
2462
2c2709dc
VK
2463 /* Check for existing table for 'dev' */
2464 opp_table = _find_opp_table(dev);
2465 if (IS_ERR(opp_table)) {
2466 int error = PTR_ERR(opp_table);
737002b5
VK
2467
2468 if (error != -ENODEV)
2c2709dc 2469 WARN(1, "%s: opp_table: %d\n",
737002b5
VK
2470 IS_ERR_OR_NULL(dev) ?
2471 "Invalid device" : dev_name(dev),
2472 error);
5b650b38 2473 return;
737002b5
VK
2474 }
2475
922ff075
VK
2476 /*
2477 * Drop the extra reference only if the OPP table was successfully added
2478 * with dev_pm_opp_of_add_table() earlier.
2479 **/
2480 if (_opp_remove_all_static(opp_table))
2481 dev_pm_opp_put_opp_table(opp_table);
cdd6ed90
VK
2482
2483 /* Drop reference taken by _find_opp_table() */
2484 dev_pm_opp_put_opp_table(opp_table);
737002b5 2485}
411466c5 2486EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);