]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/base/power/opp/core.c
Merge tag 'nfs-for-4.9-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / opp / core.c
1 /*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/regulator/consumer.h>
23
24 #include "opp.h"
25
26 /*
27 * The root of the list of all opp-tables. All opp_table structures branch off
28 * from here, with each opp_table containing the list of opps it supports in
29 * various states of availability.
30 */
31 LIST_HEAD(opp_tables);
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
34
35 #define opp_rcu_lockdep_assert() \
36 do { \
37 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
38 !lockdep_is_held(&opp_table_lock), \
39 "Missing rcu_read_lock() or " \
40 "opp_table_lock protection"); \
41 } while (0)
42
43 static struct opp_device *_find_opp_dev(const struct device *dev,
44 struct opp_table *opp_table)
45 {
46 struct opp_device *opp_dev;
47
48 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
49 if (opp_dev->dev == dev)
50 return opp_dev;
51
52 return NULL;
53 }
54
55 /**
56 * _find_opp_table() - find opp_table struct using device pointer
57 * @dev: device pointer used to lookup OPP table
58 *
59 * Search OPP table for one containing matching device. Does a RCU reader
60 * operation to grab the pointer needed.
61 *
62 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
63 * -EINVAL based on type of error.
64 *
65 * Locking: For readers, this function must be called under rcu_read_lock().
66 * opp_table is a RCU protected pointer, which means that opp_table is valid
67 * as long as we are under RCU lock.
68 *
69 * For Writers, this function must be called with opp_table_lock held.
70 */
71 struct opp_table *_find_opp_table(struct device *dev)
72 {
73 struct opp_table *opp_table;
74
75 opp_rcu_lockdep_assert();
76
77 if (IS_ERR_OR_NULL(dev)) {
78 pr_err("%s: Invalid parameters\n", __func__);
79 return ERR_PTR(-EINVAL);
80 }
81
82 list_for_each_entry_rcu(opp_table, &opp_tables, node)
83 if (_find_opp_dev(dev, opp_table))
84 return opp_table;
85
86 return ERR_PTR(-ENODEV);
87 }
88
89 /**
90 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
91 * @opp: opp for which voltage has to be returned for
92 *
93 * Return: voltage in micro volt corresponding to the opp, else
94 * return 0
95 *
96 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
97 * protected pointer. This means that opp which could have been fetched by
98 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
99 * under RCU lock. The pointer returned by the opp_find_freq family must be
100 * used in the same section as the usage of this function with the pointer
101 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
102 * pointer.
103 */
104 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
105 {
106 struct dev_pm_opp *tmp_opp;
107 unsigned long v = 0;
108
109 opp_rcu_lockdep_assert();
110
111 tmp_opp = rcu_dereference(opp);
112 if (IS_ERR_OR_NULL(tmp_opp))
113 pr_err("%s: Invalid parameters\n", __func__);
114 else
115 v = tmp_opp->u_volt;
116
117 return v;
118 }
119 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
120
121 /**
122 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
123 * @opp: opp for which frequency has to be returned for
124 *
125 * Return: frequency in hertz corresponding to the opp, else
126 * return 0
127 *
128 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
129 * protected pointer. This means that opp which could have been fetched by
130 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
131 * under RCU lock. The pointer returned by the opp_find_freq family must be
132 * used in the same section as the usage of this function with the pointer
133 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
134 * pointer.
135 */
136 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
137 {
138 struct dev_pm_opp *tmp_opp;
139 unsigned long f = 0;
140
141 opp_rcu_lockdep_assert();
142
143 tmp_opp = rcu_dereference(opp);
144 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
145 pr_err("%s: Invalid parameters\n", __func__);
146 else
147 f = tmp_opp->rate;
148
149 return f;
150 }
151 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
152
153 /**
154 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
155 * @opp: opp for which turbo mode is being verified
156 *
157 * Turbo OPPs are not for normal use, and can be enabled (under certain
158 * conditions) for short duration of times to finish high throughput work
159 * quickly. Running on them for longer times may overheat the chip.
160 *
161 * Return: true if opp is turbo opp, else false.
162 *
163 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
164 * protected pointer. This means that opp which could have been fetched by
165 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
166 * under RCU lock. The pointer returned by the opp_find_freq family must be
167 * used in the same section as the usage of this function with the pointer
168 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
169 * pointer.
170 */
171 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
172 {
173 struct dev_pm_opp *tmp_opp;
174
175 opp_rcu_lockdep_assert();
176
177 tmp_opp = rcu_dereference(opp);
178 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
179 pr_err("%s: Invalid parameters\n", __func__);
180 return false;
181 }
182
183 return tmp_opp->turbo;
184 }
185 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
186
187 /**
188 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
189 * @dev: device for which we do this operation
190 *
191 * Return: This function returns the max clock latency in nanoseconds.
192 *
193 * Locking: This function takes rcu_read_lock().
194 */
195 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
196 {
197 struct opp_table *opp_table;
198 unsigned long clock_latency_ns;
199
200 rcu_read_lock();
201
202 opp_table = _find_opp_table(dev);
203 if (IS_ERR(opp_table))
204 clock_latency_ns = 0;
205 else
206 clock_latency_ns = opp_table->clock_latency_ns_max;
207
208 rcu_read_unlock();
209 return clock_latency_ns;
210 }
211 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
212
213 /**
214 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
215 * @dev: device for which we do this operation
216 *
217 * Return: This function returns the max voltage latency in nanoseconds.
218 *
219 * Locking: This function takes rcu_read_lock().
220 */
221 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
222 {
223 struct opp_table *opp_table;
224 struct dev_pm_opp *opp;
225 struct regulator *reg;
226 unsigned long latency_ns = 0;
227 unsigned long min_uV = ~0, max_uV = 0;
228 int ret;
229
230 rcu_read_lock();
231
232 opp_table = _find_opp_table(dev);
233 if (IS_ERR(opp_table)) {
234 rcu_read_unlock();
235 return 0;
236 }
237
238 reg = opp_table->regulator;
239 if (IS_ERR(reg)) {
240 /* Regulator may not be required for device */
241 rcu_read_unlock();
242 return 0;
243 }
244
245 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
246 if (!opp->available)
247 continue;
248
249 if (opp->u_volt_min < min_uV)
250 min_uV = opp->u_volt_min;
251 if (opp->u_volt_max > max_uV)
252 max_uV = opp->u_volt_max;
253 }
254
255 rcu_read_unlock();
256
257 /*
258 * The caller needs to ensure that opp_table (and hence the regulator)
259 * isn't freed, while we are executing this routine.
260 */
261 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
262 if (ret > 0)
263 latency_ns = ret * 1000;
264
265 return latency_ns;
266 }
267 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
268
269 /**
270 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
271 * nanoseconds
272 * @dev: device for which we do this operation
273 *
274 * Return: This function returns the max transition latency, in nanoseconds, to
275 * switch from one OPP to other.
276 *
277 * Locking: This function takes rcu_read_lock().
278 */
279 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
280 {
281 return dev_pm_opp_get_max_volt_latency(dev) +
282 dev_pm_opp_get_max_clock_latency(dev);
283 }
284 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
285
286 /**
287 * dev_pm_opp_get_suspend_opp() - Get suspend opp
288 * @dev: device for which we do this operation
289 *
290 * Return: This function returns pointer to the suspend opp if it is
291 * defined and available, otherwise it returns NULL.
292 *
293 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
294 * protected pointer. The reason for the same is that the opp pointer which is
295 * returned will remain valid for use with opp_get_{voltage, freq} only while
296 * under the locked area. The pointer returned must be used prior to unlocking
297 * with rcu_read_unlock() to maintain the integrity of the pointer.
298 */
299 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
300 {
301 struct opp_table *opp_table;
302
303 opp_rcu_lockdep_assert();
304
305 opp_table = _find_opp_table(dev);
306 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
307 !opp_table->suspend_opp->available)
308 return NULL;
309
310 return opp_table->suspend_opp;
311 }
312 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
313
314 /**
315 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
316 * @dev: device for which we do this operation
317 *
318 * Return: This function returns the number of available opps if there are any,
319 * else returns 0 if none or the corresponding error value.
320 *
321 * Locking: This function takes rcu_read_lock().
322 */
323 int dev_pm_opp_get_opp_count(struct device *dev)
324 {
325 struct opp_table *opp_table;
326 struct dev_pm_opp *temp_opp;
327 int count = 0;
328
329 rcu_read_lock();
330
331 opp_table = _find_opp_table(dev);
332 if (IS_ERR(opp_table)) {
333 count = PTR_ERR(opp_table);
334 dev_err(dev, "%s: OPP table not found (%d)\n",
335 __func__, count);
336 goto out_unlock;
337 }
338
339 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
340 if (temp_opp->available)
341 count++;
342 }
343
344 out_unlock:
345 rcu_read_unlock();
346 return count;
347 }
348 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
349
350 /**
351 * dev_pm_opp_find_freq_exact() - search for an exact frequency
352 * @dev: device for which we do this operation
353 * @freq: frequency to search for
354 * @available: true/false - match for available opp
355 *
356 * Return: Searches for exact match in the opp table and returns pointer to the
357 * matching opp if found, else returns ERR_PTR in case of error and should
358 * be handled using IS_ERR. Error return values can be:
359 * EINVAL: for bad pointer
360 * ERANGE: no match found for search
361 * ENODEV: if device not found in list of registered devices
362 *
363 * Note: available is a modifier for the search. if available=true, then the
364 * match is for exact matching frequency and is available in the stored OPP
365 * table. if false, the match is for exact frequency which is not available.
366 *
367 * This provides a mechanism to enable an opp which is not available currently
368 * or the opposite as well.
369 *
370 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
371 * protected pointer. The reason for the same is that the opp pointer which is
372 * returned will remain valid for use with opp_get_{voltage, freq} only while
373 * under the locked area. The pointer returned must be used prior to unlocking
374 * with rcu_read_unlock() to maintain the integrity of the pointer.
375 */
376 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
377 unsigned long freq,
378 bool available)
379 {
380 struct opp_table *opp_table;
381 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
382
383 opp_rcu_lockdep_assert();
384
385 opp_table = _find_opp_table(dev);
386 if (IS_ERR(opp_table)) {
387 int r = PTR_ERR(opp_table);
388
389 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
390 return ERR_PTR(r);
391 }
392
393 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
394 if (temp_opp->available == available &&
395 temp_opp->rate == freq) {
396 opp = temp_opp;
397 break;
398 }
399 }
400
401 return opp;
402 }
403 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
404
405 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
406 unsigned long *freq)
407 {
408 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
409
410 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
411 if (temp_opp->available && temp_opp->rate >= *freq) {
412 opp = temp_opp;
413 *freq = opp->rate;
414 break;
415 }
416 }
417
418 return opp;
419 }
420
421 /**
422 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
423 * @dev: device for which we do this operation
424 * @freq: Start frequency
425 *
426 * Search for the matching ceil *available* OPP from a starting freq
427 * for a device.
428 *
429 * Return: matching *opp and refreshes *freq accordingly, else returns
430 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
431 * values can be:
432 * EINVAL: for bad pointer
433 * ERANGE: no match found for search
434 * ENODEV: if device not found in list of registered devices
435 *
436 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
437 * protected pointer. The reason for the same is that the opp pointer which is
438 * returned will remain valid for use with opp_get_{voltage, freq} only while
439 * under the locked area. The pointer returned must be used prior to unlocking
440 * with rcu_read_unlock() to maintain the integrity of the pointer.
441 */
442 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
443 unsigned long *freq)
444 {
445 struct opp_table *opp_table;
446
447 opp_rcu_lockdep_assert();
448
449 if (!dev || !freq) {
450 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
451 return ERR_PTR(-EINVAL);
452 }
453
454 opp_table = _find_opp_table(dev);
455 if (IS_ERR(opp_table))
456 return ERR_CAST(opp_table);
457
458 return _find_freq_ceil(opp_table, freq);
459 }
460 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
461
462 /**
463 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
464 * @dev: device for which we do this operation
465 * @freq: Start frequency
466 *
467 * Search for the matching floor *available* OPP from a starting freq
468 * for a device.
469 *
470 * Return: matching *opp and refreshes *freq accordingly, else returns
471 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
472 * values can be:
473 * EINVAL: for bad pointer
474 * ERANGE: no match found for search
475 * ENODEV: if device not found in list of registered devices
476 *
477 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
478 * protected pointer. The reason for the same is that the opp pointer which is
479 * returned will remain valid for use with opp_get_{voltage, freq} only while
480 * under the locked area. The pointer returned must be used prior to unlocking
481 * with rcu_read_unlock() to maintain the integrity of the pointer.
482 */
483 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
484 unsigned long *freq)
485 {
486 struct opp_table *opp_table;
487 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
488
489 opp_rcu_lockdep_assert();
490
491 if (!dev || !freq) {
492 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
493 return ERR_PTR(-EINVAL);
494 }
495
496 opp_table = _find_opp_table(dev);
497 if (IS_ERR(opp_table))
498 return ERR_CAST(opp_table);
499
500 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
501 if (temp_opp->available) {
502 /* go to the next node, before choosing prev */
503 if (temp_opp->rate > *freq)
504 break;
505 else
506 opp = temp_opp;
507 }
508 }
509 if (!IS_ERR(opp))
510 *freq = opp->rate;
511
512 return opp;
513 }
514 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
515
516 /*
517 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
518 * while clk returned here is used.
519 */
520 static struct clk *_get_opp_clk(struct device *dev)
521 {
522 struct opp_table *opp_table;
523 struct clk *clk;
524
525 rcu_read_lock();
526
527 opp_table = _find_opp_table(dev);
528 if (IS_ERR(opp_table)) {
529 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
530 clk = ERR_CAST(opp_table);
531 goto unlock;
532 }
533
534 clk = opp_table->clk;
535 if (IS_ERR(clk))
536 dev_err(dev, "%s: No clock available for the device\n",
537 __func__);
538
539 unlock:
540 rcu_read_unlock();
541 return clk;
542 }
543
544 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
545 unsigned long u_volt, unsigned long u_volt_min,
546 unsigned long u_volt_max)
547 {
548 int ret;
549
550 /* Regulator not available for device */
551 if (IS_ERR(reg)) {
552 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
553 PTR_ERR(reg));
554 return 0;
555 }
556
557 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
558 u_volt, u_volt_max);
559
560 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
561 u_volt_max);
562 if (ret)
563 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
564 __func__, u_volt_min, u_volt, u_volt_max, ret);
565
566 return ret;
567 }
568
569 /**
570 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
571 * @dev: device for which we do this operation
572 * @target_freq: frequency to achieve
573 *
574 * This configures the power-supplies and clock source to the levels specified
575 * by the OPP corresponding to the target_freq.
576 *
577 * Locking: This function takes rcu_read_lock().
578 */
579 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
580 {
581 struct opp_table *opp_table;
582 struct dev_pm_opp *old_opp, *opp;
583 struct regulator *reg;
584 struct clk *clk;
585 unsigned long freq, old_freq;
586 unsigned long u_volt, u_volt_min, u_volt_max;
587 int ret;
588
589 if (unlikely(!target_freq)) {
590 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
591 target_freq);
592 return -EINVAL;
593 }
594
595 clk = _get_opp_clk(dev);
596 if (IS_ERR(clk))
597 return PTR_ERR(clk);
598
599 freq = clk_round_rate(clk, target_freq);
600 if ((long)freq <= 0)
601 freq = target_freq;
602
603 old_freq = clk_get_rate(clk);
604
605 /* Return early if nothing to do */
606 if (old_freq == freq) {
607 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
608 __func__, freq);
609 return 0;
610 }
611
612 rcu_read_lock();
613
614 opp_table = _find_opp_table(dev);
615 if (IS_ERR(opp_table)) {
616 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
617 rcu_read_unlock();
618 return PTR_ERR(opp_table);
619 }
620
621 old_opp = _find_freq_ceil(opp_table, &old_freq);
622 if (IS_ERR(old_opp)) {
623 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
624 __func__, old_freq, PTR_ERR(old_opp));
625 }
626
627 opp = _find_freq_ceil(opp_table, &freq);
628 if (IS_ERR(opp)) {
629 ret = PTR_ERR(opp);
630 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
631 __func__, freq, ret);
632 rcu_read_unlock();
633 return ret;
634 }
635
636 u_volt = opp->u_volt;
637 u_volt_min = opp->u_volt_min;
638 u_volt_max = opp->u_volt_max;
639
640 reg = opp_table->regulator;
641
642 rcu_read_unlock();
643
644 /* Scaling up? Scale voltage before frequency */
645 if (freq > old_freq) {
646 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
647 u_volt_max);
648 if (ret)
649 goto restore_voltage;
650 }
651
652 /* Change frequency */
653
654 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
655 __func__, old_freq, freq);
656
657 ret = clk_set_rate(clk, freq);
658 if (ret) {
659 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
660 ret);
661 goto restore_voltage;
662 }
663
664 /* Scaling down? Scale voltage after frequency */
665 if (freq < old_freq) {
666 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
667 u_volt_max);
668 if (ret)
669 goto restore_freq;
670 }
671
672 return 0;
673
674 restore_freq:
675 if (clk_set_rate(clk, old_freq))
676 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
677 __func__, old_freq);
678 restore_voltage:
679 /* This shouldn't harm even if the voltages weren't updated earlier */
680 if (!IS_ERR(old_opp))
681 _set_opp_voltage(dev, reg, old_opp->u_volt,
682 old_opp->u_volt_min, old_opp->u_volt_max);
683
684 return ret;
685 }
686 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
687
688 /* OPP-dev Helpers */
689 static void _kfree_opp_dev_rcu(struct rcu_head *head)
690 {
691 struct opp_device *opp_dev;
692
693 opp_dev = container_of(head, struct opp_device, rcu_head);
694 kfree_rcu(opp_dev, rcu_head);
695 }
696
697 static void _remove_opp_dev(struct opp_device *opp_dev,
698 struct opp_table *opp_table)
699 {
700 opp_debug_unregister(opp_dev, opp_table);
701 list_del(&opp_dev->node);
702 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
703 _kfree_opp_dev_rcu);
704 }
705
706 struct opp_device *_add_opp_dev(const struct device *dev,
707 struct opp_table *opp_table)
708 {
709 struct opp_device *opp_dev;
710 int ret;
711
712 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
713 if (!opp_dev)
714 return NULL;
715
716 /* Initialize opp-dev */
717 opp_dev->dev = dev;
718 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
719
720 /* Create debugfs entries for the opp_table */
721 ret = opp_debug_register(opp_dev, opp_table);
722 if (ret)
723 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
724 __func__, ret);
725
726 return opp_dev;
727 }
728
729 /**
730 * _add_opp_table() - Find OPP table or allocate a new one
731 * @dev: device for which we do this operation
732 *
733 * It tries to find an existing table first, if it couldn't find one, it
734 * allocates a new OPP table and returns that.
735 *
736 * Return: valid opp_table pointer if success, else NULL.
737 */
738 static struct opp_table *_add_opp_table(struct device *dev)
739 {
740 struct opp_table *opp_table;
741 struct opp_device *opp_dev;
742 int ret;
743
744 /* Check for existing table for 'dev' first */
745 opp_table = _find_opp_table(dev);
746 if (!IS_ERR(opp_table))
747 return opp_table;
748
749 /*
750 * Allocate a new OPP table. In the infrequent case where a new
751 * device is needed to be added, we pay this penalty.
752 */
753 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
754 if (!opp_table)
755 return NULL;
756
757 INIT_LIST_HEAD(&opp_table->dev_list);
758
759 opp_dev = _add_opp_dev(dev, opp_table);
760 if (!opp_dev) {
761 kfree(opp_table);
762 return NULL;
763 }
764
765 _of_init_opp_table(opp_table, dev);
766
767 /* Set regulator to a non-NULL error value */
768 opp_table->regulator = ERR_PTR(-ENXIO);
769
770 /* Find clk for the device */
771 opp_table->clk = clk_get(dev, NULL);
772 if (IS_ERR(opp_table->clk)) {
773 ret = PTR_ERR(opp_table->clk);
774 if (ret != -EPROBE_DEFER)
775 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
776 ret);
777 }
778
779 srcu_init_notifier_head(&opp_table->srcu_head);
780 INIT_LIST_HEAD(&opp_table->opp_list);
781
782 /* Secure the device table modification */
783 list_add_rcu(&opp_table->node, &opp_tables);
784 return opp_table;
785 }
786
787 /**
788 * _kfree_device_rcu() - Free opp_table RCU handler
789 * @head: RCU head
790 */
791 static void _kfree_device_rcu(struct rcu_head *head)
792 {
793 struct opp_table *opp_table = container_of(head, struct opp_table,
794 rcu_head);
795
796 kfree_rcu(opp_table, rcu_head);
797 }
798
799 /**
800 * _remove_opp_table() - Removes a OPP table
801 * @opp_table: OPP table to be removed.
802 *
803 * Removes/frees OPP table if it doesn't contain any OPPs.
804 */
805 static void _remove_opp_table(struct opp_table *opp_table)
806 {
807 struct opp_device *opp_dev;
808
809 if (!list_empty(&opp_table->opp_list))
810 return;
811
812 if (opp_table->supported_hw)
813 return;
814
815 if (opp_table->prop_name)
816 return;
817
818 if (!IS_ERR(opp_table->regulator))
819 return;
820
821 /* Release clk */
822 if (!IS_ERR(opp_table->clk))
823 clk_put(opp_table->clk);
824
825 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
826 node);
827
828 _remove_opp_dev(opp_dev, opp_table);
829
830 /* dev_list must be empty now */
831 WARN_ON(!list_empty(&opp_table->dev_list));
832
833 list_del_rcu(&opp_table->node);
834 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
835 _kfree_device_rcu);
836 }
837
838 /**
839 * _kfree_opp_rcu() - Free OPP RCU handler
840 * @head: RCU head
841 */
842 static void _kfree_opp_rcu(struct rcu_head *head)
843 {
844 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
845
846 kfree_rcu(opp, rcu_head);
847 }
848
849 /**
850 * _opp_remove() - Remove an OPP from a table definition
851 * @opp_table: points back to the opp_table struct this opp belongs to
852 * @opp: pointer to the OPP to remove
853 * @notify: OPP_EVENT_REMOVE notification should be sent or not
854 *
855 * This function removes an opp definition from the opp table.
856 *
857 * Locking: The internal opp_table and opp structures are RCU protected.
858 * It is assumed that the caller holds required mutex for an RCU updater
859 * strategy.
860 */
861 void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
862 bool notify)
863 {
864 /*
865 * Notify the changes in the availability of the operable
866 * frequency/voltage list.
867 */
868 if (notify)
869 srcu_notifier_call_chain(&opp_table->srcu_head,
870 OPP_EVENT_REMOVE, opp);
871 opp_debug_remove_one(opp);
872 list_del_rcu(&opp->node);
873 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
874
875 _remove_opp_table(opp_table);
876 }
877
878 /**
879 * dev_pm_opp_remove() - Remove an OPP from OPP table
880 * @dev: device for which we do this operation
881 * @freq: OPP to remove with matching 'freq'
882 *
883 * This function removes an opp from the opp table.
884 *
885 * Locking: The internal opp_table and opp structures are RCU protected.
886 * Hence this function internally uses RCU updater strategy with mutex locks
887 * to keep the integrity of the internal data structures. Callers should ensure
888 * that this function is *NOT* called under RCU protection or in contexts where
889 * mutex cannot be locked.
890 */
891 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
892 {
893 struct dev_pm_opp *opp;
894 struct opp_table *opp_table;
895 bool found = false;
896
897 /* Hold our table modification lock here */
898 mutex_lock(&opp_table_lock);
899
900 opp_table = _find_opp_table(dev);
901 if (IS_ERR(opp_table))
902 goto unlock;
903
904 list_for_each_entry(opp, &opp_table->opp_list, node) {
905 if (opp->rate == freq) {
906 found = true;
907 break;
908 }
909 }
910
911 if (!found) {
912 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
913 __func__, freq);
914 goto unlock;
915 }
916
917 _opp_remove(opp_table, opp, true);
918 unlock:
919 mutex_unlock(&opp_table_lock);
920 }
921 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
922
923 struct dev_pm_opp *_allocate_opp(struct device *dev,
924 struct opp_table **opp_table)
925 {
926 struct dev_pm_opp *opp;
927
928 /* allocate new OPP node */
929 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
930 if (!opp)
931 return NULL;
932
933 INIT_LIST_HEAD(&opp->node);
934
935 *opp_table = _add_opp_table(dev);
936 if (!*opp_table) {
937 kfree(opp);
938 return NULL;
939 }
940
941 return opp;
942 }
943
944 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
945 struct opp_table *opp_table)
946 {
947 struct regulator *reg = opp_table->regulator;
948
949 if (!IS_ERR(reg) &&
950 !regulator_is_supported_voltage(reg, opp->u_volt_min,
951 opp->u_volt_max)) {
952 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
953 __func__, opp->u_volt_min, opp->u_volt_max);
954 return false;
955 }
956
957 return true;
958 }
959
960 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
961 struct opp_table *opp_table)
962 {
963 struct dev_pm_opp *opp;
964 struct list_head *head = &opp_table->opp_list;
965 int ret;
966
967 /*
968 * Insert new OPP in order of increasing frequency and discard if
969 * already present.
970 *
971 * Need to use &opp_table->opp_list in the condition part of the 'for'
972 * loop, don't replace it with head otherwise it will become an infinite
973 * loop.
974 */
975 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
976 if (new_opp->rate > opp->rate) {
977 head = &opp->node;
978 continue;
979 }
980
981 if (new_opp->rate < opp->rate)
982 break;
983
984 /* Duplicate OPPs */
985 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
986 __func__, opp->rate, opp->u_volt, opp->available,
987 new_opp->rate, new_opp->u_volt, new_opp->available);
988
989 return opp->available && new_opp->u_volt == opp->u_volt ?
990 0 : -EEXIST;
991 }
992
993 new_opp->opp_table = opp_table;
994 list_add_rcu(&new_opp->node, head);
995
996 ret = opp_debug_create_one(new_opp, opp_table);
997 if (ret)
998 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
999 __func__, ret);
1000
1001 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1002 new_opp->available = false;
1003 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1004 __func__, new_opp->rate);
1005 }
1006
1007 return 0;
1008 }
1009
1010 /**
1011 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1012 * @dev: device for which we do this operation
1013 * @freq: Frequency in Hz for this OPP
1014 * @u_volt: Voltage in uVolts for this OPP
1015 * @dynamic: Dynamically added OPPs.
1016 *
1017 * This function adds an opp definition to the opp table and returns status.
1018 * The opp is made available by default and it can be controlled using
1019 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1020 *
1021 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1022 * and freed by dev_pm_opp_of_remove_table.
1023 *
1024 * Locking: The internal opp_table and opp structures are RCU protected.
1025 * Hence this function internally uses RCU updater strategy with mutex locks
1026 * to keep the integrity of the internal data structures. Callers should ensure
1027 * that this function is *NOT* called under RCU protection or in contexts where
1028 * mutex cannot be locked.
1029 *
1030 * Return:
1031 * 0 On success OR
1032 * Duplicate OPPs (both freq and volt are same) and opp->available
1033 * -EEXIST Freq are same and volt are different OR
1034 * Duplicate OPPs (both freq and volt are same) and !opp->available
1035 * -ENOMEM Memory allocation failure
1036 */
1037 int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1038 bool dynamic)
1039 {
1040 struct opp_table *opp_table;
1041 struct dev_pm_opp *new_opp;
1042 unsigned long tol;
1043 int ret;
1044
1045 /* Hold our table modification lock here */
1046 mutex_lock(&opp_table_lock);
1047
1048 new_opp = _allocate_opp(dev, &opp_table);
1049 if (!new_opp) {
1050 ret = -ENOMEM;
1051 goto unlock;
1052 }
1053
1054 /* populate the opp table */
1055 new_opp->rate = freq;
1056 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1057 new_opp->u_volt = u_volt;
1058 new_opp->u_volt_min = u_volt - tol;
1059 new_opp->u_volt_max = u_volt + tol;
1060 new_opp->available = true;
1061 new_opp->dynamic = dynamic;
1062
1063 ret = _opp_add(dev, new_opp, opp_table);
1064 if (ret)
1065 goto free_opp;
1066
1067 mutex_unlock(&opp_table_lock);
1068
1069 /*
1070 * Notify the changes in the availability of the operable
1071 * frequency/voltage list.
1072 */
1073 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1074 return 0;
1075
1076 free_opp:
1077 _opp_remove(opp_table, new_opp, false);
1078 unlock:
1079 mutex_unlock(&opp_table_lock);
1080 return ret;
1081 }
1082
1083 /**
1084 * dev_pm_opp_set_supported_hw() - Set supported platforms
1085 * @dev: Device for which supported-hw has to be set.
1086 * @versions: Array of hierarchy of versions to match.
1087 * @count: Number of elements in the array.
1088 *
1089 * This is required only for the V2 bindings, and it enables a platform to
1090 * specify the hierarchy of versions it supports. OPP layer will then enable
1091 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1092 * property.
1093 *
1094 * Locking: The internal opp_table and opp structures are RCU protected.
1095 * Hence this function internally uses RCU updater strategy with mutex locks
1096 * to keep the integrity of the internal data structures. Callers should ensure
1097 * that this function is *NOT* called under RCU protection or in contexts where
1098 * mutex cannot be locked.
1099 */
1100 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1101 unsigned int count)
1102 {
1103 struct opp_table *opp_table;
1104 int ret = 0;
1105
1106 /* Hold our table modification lock here */
1107 mutex_lock(&opp_table_lock);
1108
1109 opp_table = _add_opp_table(dev);
1110 if (!opp_table) {
1111 ret = -ENOMEM;
1112 goto unlock;
1113 }
1114
1115 /* Make sure there are no concurrent readers while updating opp_table */
1116 WARN_ON(!list_empty(&opp_table->opp_list));
1117
1118 /* Do we already have a version hierarchy associated with opp_table? */
1119 if (opp_table->supported_hw) {
1120 dev_err(dev, "%s: Already have supported hardware list\n",
1121 __func__);
1122 ret = -EBUSY;
1123 goto err;
1124 }
1125
1126 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1127 GFP_KERNEL);
1128 if (!opp_table->supported_hw) {
1129 ret = -ENOMEM;
1130 goto err;
1131 }
1132
1133 opp_table->supported_hw_count = count;
1134 mutex_unlock(&opp_table_lock);
1135 return 0;
1136
1137 err:
1138 _remove_opp_table(opp_table);
1139 unlock:
1140 mutex_unlock(&opp_table_lock);
1141
1142 return ret;
1143 }
1144 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1145
1146 /**
1147 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1148 * @dev: Device for which supported-hw has to be put.
1149 *
1150 * This is required only for the V2 bindings, and is called for a matching
1151 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1152 * will not be freed.
1153 *
1154 * Locking: The internal opp_table and opp structures are RCU protected.
1155 * Hence this function internally uses RCU updater strategy with mutex locks
1156 * to keep the integrity of the internal data structures. Callers should ensure
1157 * that this function is *NOT* called under RCU protection or in contexts where
1158 * mutex cannot be locked.
1159 */
1160 void dev_pm_opp_put_supported_hw(struct device *dev)
1161 {
1162 struct opp_table *opp_table;
1163
1164 /* Hold our table modification lock here */
1165 mutex_lock(&opp_table_lock);
1166
1167 /* Check for existing table for 'dev' first */
1168 opp_table = _find_opp_table(dev);
1169 if (IS_ERR(opp_table)) {
1170 dev_err(dev, "Failed to find opp_table: %ld\n",
1171 PTR_ERR(opp_table));
1172 goto unlock;
1173 }
1174
1175 /* Make sure there are no concurrent readers while updating opp_table */
1176 WARN_ON(!list_empty(&opp_table->opp_list));
1177
1178 if (!opp_table->supported_hw) {
1179 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1180 __func__);
1181 goto unlock;
1182 }
1183
1184 kfree(opp_table->supported_hw);
1185 opp_table->supported_hw = NULL;
1186 opp_table->supported_hw_count = 0;
1187
1188 /* Try freeing opp_table if this was the last blocking resource */
1189 _remove_opp_table(opp_table);
1190
1191 unlock:
1192 mutex_unlock(&opp_table_lock);
1193 }
1194 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1195
1196 /**
1197 * dev_pm_opp_set_prop_name() - Set prop-extn name
1198 * @dev: Device for which the prop-name has to be set.
1199 * @name: name to postfix to properties.
1200 *
1201 * This is required only for the V2 bindings, and it enables a platform to
1202 * specify the extn to be used for certain property names. The properties to
1203 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1204 * should postfix the property name with -<name> while looking for them.
1205 *
1206 * Locking: The internal opp_table and opp structures are RCU protected.
1207 * Hence this function internally uses RCU updater strategy with mutex locks
1208 * to keep the integrity of the internal data structures. Callers should ensure
1209 * that this function is *NOT* called under RCU protection or in contexts where
1210 * mutex cannot be locked.
1211 */
1212 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1213 {
1214 struct opp_table *opp_table;
1215 int ret = 0;
1216
1217 /* Hold our table modification lock here */
1218 mutex_lock(&opp_table_lock);
1219
1220 opp_table = _add_opp_table(dev);
1221 if (!opp_table) {
1222 ret = -ENOMEM;
1223 goto unlock;
1224 }
1225
1226 /* Make sure there are no concurrent readers while updating opp_table */
1227 WARN_ON(!list_empty(&opp_table->opp_list));
1228
1229 /* Do we already have a prop-name associated with opp_table? */
1230 if (opp_table->prop_name) {
1231 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1232 opp_table->prop_name);
1233 ret = -EBUSY;
1234 goto err;
1235 }
1236
1237 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1238 if (!opp_table->prop_name) {
1239 ret = -ENOMEM;
1240 goto err;
1241 }
1242
1243 mutex_unlock(&opp_table_lock);
1244 return 0;
1245
1246 err:
1247 _remove_opp_table(opp_table);
1248 unlock:
1249 mutex_unlock(&opp_table_lock);
1250
1251 return ret;
1252 }
1253 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1254
1255 /**
1256 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1257 * @dev: Device for which the prop-name has to be put.
1258 *
1259 * This is required only for the V2 bindings, and is called for a matching
1260 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1261 * will not be freed.
1262 *
1263 * Locking: The internal opp_table and opp structures are RCU protected.
1264 * Hence this function internally uses RCU updater strategy with mutex locks
1265 * to keep the integrity of the internal data structures. Callers should ensure
1266 * that this function is *NOT* called under RCU protection or in contexts where
1267 * mutex cannot be locked.
1268 */
1269 void dev_pm_opp_put_prop_name(struct device *dev)
1270 {
1271 struct opp_table *opp_table;
1272
1273 /* Hold our table modification lock here */
1274 mutex_lock(&opp_table_lock);
1275
1276 /* Check for existing table for 'dev' first */
1277 opp_table = _find_opp_table(dev);
1278 if (IS_ERR(opp_table)) {
1279 dev_err(dev, "Failed to find opp_table: %ld\n",
1280 PTR_ERR(opp_table));
1281 goto unlock;
1282 }
1283
1284 /* Make sure there are no concurrent readers while updating opp_table */
1285 WARN_ON(!list_empty(&opp_table->opp_list));
1286
1287 if (!opp_table->prop_name) {
1288 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1289 goto unlock;
1290 }
1291
1292 kfree(opp_table->prop_name);
1293 opp_table->prop_name = NULL;
1294
1295 /* Try freeing opp_table if this was the last blocking resource */
1296 _remove_opp_table(opp_table);
1297
1298 unlock:
1299 mutex_unlock(&opp_table_lock);
1300 }
1301 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1302
1303 /**
1304 * dev_pm_opp_set_regulator() - Set regulator name for the device
1305 * @dev: Device for which regulator name is being set.
1306 * @name: Name of the regulator.
1307 *
1308 * In order to support OPP switching, OPP layer needs to know the name of the
1309 * device's regulator, as the core would be required to switch voltages as well.
1310 *
1311 * This must be called before any OPPs are initialized for the device.
1312 *
1313 * Locking: The internal opp_table and opp structures are RCU protected.
1314 * Hence this function internally uses RCU updater strategy with mutex locks
1315 * to keep the integrity of the internal data structures. Callers should ensure
1316 * that this function is *NOT* called under RCU protection or in contexts where
1317 * mutex cannot be locked.
1318 */
1319 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1320 {
1321 struct opp_table *opp_table;
1322 struct regulator *reg;
1323 int ret;
1324
1325 mutex_lock(&opp_table_lock);
1326
1327 opp_table = _add_opp_table(dev);
1328 if (!opp_table) {
1329 ret = -ENOMEM;
1330 goto unlock;
1331 }
1332
1333 /* This should be called before OPPs are initialized */
1334 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1335 ret = -EBUSY;
1336 goto err;
1337 }
1338
1339 /* Already have a regulator set */
1340 if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1341 ret = -EBUSY;
1342 goto err;
1343 }
1344 /* Allocate the regulator */
1345 reg = regulator_get_optional(dev, name);
1346 if (IS_ERR(reg)) {
1347 ret = PTR_ERR(reg);
1348 if (ret != -EPROBE_DEFER)
1349 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1350 __func__, name, ret);
1351 goto err;
1352 }
1353
1354 opp_table->regulator = reg;
1355
1356 mutex_unlock(&opp_table_lock);
1357 return 0;
1358
1359 err:
1360 _remove_opp_table(opp_table);
1361 unlock:
1362 mutex_unlock(&opp_table_lock);
1363
1364 return ret;
1365 }
1366 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1367
1368 /**
1369 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1370 * @dev: Device for which regulator was set.
1371 *
1372 * Locking: The internal opp_table and opp structures are RCU protected.
1373 * Hence this function internally uses RCU updater strategy with mutex locks
1374 * to keep the integrity of the internal data structures. Callers should ensure
1375 * that this function is *NOT* called under RCU protection or in contexts where
1376 * mutex cannot be locked.
1377 */
1378 void dev_pm_opp_put_regulator(struct device *dev)
1379 {
1380 struct opp_table *opp_table;
1381
1382 mutex_lock(&opp_table_lock);
1383
1384 /* Check for existing table for 'dev' first */
1385 opp_table = _find_opp_table(dev);
1386 if (IS_ERR(opp_table)) {
1387 dev_err(dev, "Failed to find opp_table: %ld\n",
1388 PTR_ERR(opp_table));
1389 goto unlock;
1390 }
1391
1392 if (IS_ERR(opp_table->regulator)) {
1393 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1394 goto unlock;
1395 }
1396
1397 /* Make sure there are no concurrent readers while updating opp_table */
1398 WARN_ON(!list_empty(&opp_table->opp_list));
1399
1400 regulator_put(opp_table->regulator);
1401 opp_table->regulator = ERR_PTR(-ENXIO);
1402
1403 /* Try freeing opp_table if this was the last blocking resource */
1404 _remove_opp_table(opp_table);
1405
1406 unlock:
1407 mutex_unlock(&opp_table_lock);
1408 }
1409 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1410
1411 /**
1412 * dev_pm_opp_add() - Add an OPP table from a table definitions
1413 * @dev: device for which we do this operation
1414 * @freq: Frequency in Hz for this OPP
1415 * @u_volt: Voltage in uVolts for this OPP
1416 *
1417 * This function adds an opp definition to the opp table and returns status.
1418 * The opp is made available by default and it can be controlled using
1419 * dev_pm_opp_enable/disable functions.
1420 *
1421 * Locking: The internal opp_table and opp structures are RCU protected.
1422 * Hence this function internally uses RCU updater strategy with mutex locks
1423 * to keep the integrity of the internal data structures. Callers should ensure
1424 * that this function is *NOT* called under RCU protection or in contexts where
1425 * mutex cannot be locked.
1426 *
1427 * Return:
1428 * 0 On success OR
1429 * Duplicate OPPs (both freq and volt are same) and opp->available
1430 * -EEXIST Freq are same and volt are different OR
1431 * Duplicate OPPs (both freq and volt are same) and !opp->available
1432 * -ENOMEM Memory allocation failure
1433 */
1434 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1435 {
1436 return _opp_add_v1(dev, freq, u_volt, true);
1437 }
1438 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1439
1440 /**
1441 * _opp_set_availability() - helper to set the availability of an opp
1442 * @dev: device for which we do this operation
1443 * @freq: OPP frequency to modify availability
1444 * @availability_req: availability status requested for this opp
1445 *
1446 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1447 * share a common logic which is isolated here.
1448 *
1449 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1450 * copy operation, returns 0 if no modification was done OR modification was
1451 * successful.
1452 *
1453 * Locking: The internal opp_table and opp structures are RCU protected.
1454 * Hence this function internally uses RCU updater strategy with mutex locks to
1455 * keep the integrity of the internal data structures. Callers should ensure
1456 * that this function is *NOT* called under RCU protection or in contexts where
1457 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1458 */
1459 static int _opp_set_availability(struct device *dev, unsigned long freq,
1460 bool availability_req)
1461 {
1462 struct opp_table *opp_table;
1463 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1464 int r = 0;
1465
1466 /* keep the node allocated */
1467 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1468 if (!new_opp)
1469 return -ENOMEM;
1470
1471 mutex_lock(&opp_table_lock);
1472
1473 /* Find the opp_table */
1474 opp_table = _find_opp_table(dev);
1475 if (IS_ERR(opp_table)) {
1476 r = PTR_ERR(opp_table);
1477 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1478 goto unlock;
1479 }
1480
1481 /* Do we have the frequency? */
1482 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1483 if (tmp_opp->rate == freq) {
1484 opp = tmp_opp;
1485 break;
1486 }
1487 }
1488 if (IS_ERR(opp)) {
1489 r = PTR_ERR(opp);
1490 goto unlock;
1491 }
1492
1493 /* Is update really needed? */
1494 if (opp->available == availability_req)
1495 goto unlock;
1496 /* copy the old data over */
1497 *new_opp = *opp;
1498
1499 /* plug in new node */
1500 new_opp->available = availability_req;
1501
1502 list_replace_rcu(&opp->node, &new_opp->node);
1503 mutex_unlock(&opp_table_lock);
1504 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1505
1506 /* Notify the change of the OPP availability */
1507 if (availability_req)
1508 srcu_notifier_call_chain(&opp_table->srcu_head,
1509 OPP_EVENT_ENABLE, new_opp);
1510 else
1511 srcu_notifier_call_chain(&opp_table->srcu_head,
1512 OPP_EVENT_DISABLE, new_opp);
1513
1514 return 0;
1515
1516 unlock:
1517 mutex_unlock(&opp_table_lock);
1518 kfree(new_opp);
1519 return r;
1520 }
1521
1522 /**
1523 * dev_pm_opp_enable() - Enable a specific OPP
1524 * @dev: device for which we do this operation
1525 * @freq: OPP frequency to enable
1526 *
1527 * Enables a provided opp. If the operation is valid, this returns 0, else the
1528 * corresponding error value. It is meant to be used for users an OPP available
1529 * after being temporarily made unavailable with dev_pm_opp_disable.
1530 *
1531 * Locking: The internal opp_table and opp structures are RCU protected.
1532 * Hence this function indirectly uses RCU and mutex locks to keep the
1533 * integrity of the internal data structures. Callers should ensure that
1534 * this function is *NOT* called under RCU protection or in contexts where
1535 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1536 *
1537 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1538 * copy operation, returns 0 if no modification was done OR modification was
1539 * successful.
1540 */
1541 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1542 {
1543 return _opp_set_availability(dev, freq, true);
1544 }
1545 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1546
1547 /**
1548 * dev_pm_opp_disable() - Disable a specific OPP
1549 * @dev: device for which we do this operation
1550 * @freq: OPP frequency to disable
1551 *
1552 * Disables a provided opp. If the operation is valid, this returns
1553 * 0, else the corresponding error value. It is meant to be a temporary
1554 * control by users to make this OPP not available until the circumstances are
1555 * right to make it available again (with a call to dev_pm_opp_enable).
1556 *
1557 * Locking: The internal opp_table and opp structures are RCU protected.
1558 * Hence this function indirectly uses RCU and mutex locks to keep the
1559 * integrity of the internal data structures. Callers should ensure that
1560 * this function is *NOT* called under RCU protection or in contexts where
1561 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1562 *
1563 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1564 * copy operation, returns 0 if no modification was done OR modification was
1565 * successful.
1566 */
1567 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1568 {
1569 return _opp_set_availability(dev, freq, false);
1570 }
1571 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1572
1573 /**
1574 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1575 * @dev: device pointer used to lookup OPP table.
1576 *
1577 * Return: pointer to notifier head if found, otherwise -ENODEV or
1578 * -EINVAL based on type of error casted as pointer. value must be checked
1579 * with IS_ERR to determine valid pointer or error result.
1580 *
1581 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1582 * RCU protected pointer. The reason for the same is that the opp pointer which
1583 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1584 * under the locked area. The pointer returned must be used prior to unlocking
1585 * with rcu_read_unlock() to maintain the integrity of the pointer.
1586 */
1587 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1588 {
1589 struct opp_table *opp_table = _find_opp_table(dev);
1590
1591 if (IS_ERR(opp_table))
1592 return ERR_CAST(opp_table); /* matching type */
1593
1594 return &opp_table->srcu_head;
1595 }
1596 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1597
1598 /*
1599 * Free OPPs either created using static entries present in DT or even the
1600 * dynamically added entries based on remove_all param.
1601 */
1602 void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1603 {
1604 struct opp_table *opp_table;
1605 struct dev_pm_opp *opp, *tmp;
1606
1607 /* Hold our table modification lock here */
1608 mutex_lock(&opp_table_lock);
1609
1610 /* Check for existing table for 'dev' */
1611 opp_table = _find_opp_table(dev);
1612 if (IS_ERR(opp_table)) {
1613 int error = PTR_ERR(opp_table);
1614
1615 if (error != -ENODEV)
1616 WARN(1, "%s: opp_table: %d\n",
1617 IS_ERR_OR_NULL(dev) ?
1618 "Invalid device" : dev_name(dev),
1619 error);
1620 goto unlock;
1621 }
1622
1623 /* Find if opp_table manages a single device */
1624 if (list_is_singular(&opp_table->dev_list)) {
1625 /* Free static OPPs */
1626 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1627 if (remove_all || !opp->dynamic)
1628 _opp_remove(opp_table, opp, true);
1629 }
1630 } else {
1631 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1632 }
1633
1634 unlock:
1635 mutex_unlock(&opp_table_lock);
1636 }
1637
1638 /**
1639 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1640 * @dev: device pointer used to lookup OPP table.
1641 *
1642 * Free both OPPs created using static entries present in DT and the
1643 * dynamically added entries.
1644 *
1645 * Locking: The internal opp_table and opp structures are RCU protected.
1646 * Hence this function indirectly uses RCU updater strategy with mutex locks
1647 * to keep the integrity of the internal data structures. Callers should ensure
1648 * that this function is *NOT* called under RCU protection or in contexts where
1649 * mutex cannot be locked.
1650 */
1651 void dev_pm_opp_remove_table(struct device *dev)
1652 {
1653 _dev_pm_opp_remove_table(dev, true);
1654 }
1655 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);