]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/devfreq/devfreq.c
Revert "PM / devfreq: Add show_one macro to delete the duplicate code"
[mirror_ubuntu-bionic-kernel.git] / drivers / devfreq / devfreq.c
CommitLineData
a3c98b8b
MH
1/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
417dc4bb 18#include <linux/export.h>
a3c98b8b 19#include <linux/slab.h>
952f6d13 20#include <linux/stat.h>
e4db1c74 21#include <linux/pm_opp.h>
a3c98b8b
MH
22#include <linux/devfreq.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/list.h>
26#include <linux/printk.h>
27#include <linux/hrtimer.h>
8f510aeb 28#include <linux/of.h>
a3c98b8b
MH
29#include "governor.h"
30
1a1357ea 31static struct class *devfreq_class;
a3c98b8b
MH
32
33/*
7e6fdd4b
RV
34 * devfreq core provides delayed work based load monitoring helper
35 * functions. Governors can use these or can implement their own
36 * monitoring mechanism.
a3c98b8b 37 */
a3c98b8b 38static struct workqueue_struct *devfreq_wq;
a3c98b8b 39
3aa173b8
NM
40/* The list of all device-devfreq governors */
41static LIST_HEAD(devfreq_governor_list);
a3c98b8b
MH
42/* The list of all device-devfreq */
43static LIST_HEAD(devfreq_list);
44static DEFINE_MUTEX(devfreq_list_lock);
45
46/**
47 * find_device_devfreq() - find devfreq struct using device pointer
48 * @dev: device pointer used to lookup device devfreq.
49 *
50 * Search the list of device devfreqs and return the matched device's
51 * devfreq info. devfreq_list_lock should be held by the caller.
52 */
53static struct devfreq *find_device_devfreq(struct device *dev)
54{
55 struct devfreq *tmp_devfreq;
56
9348da2f 57 if (IS_ERR_OR_NULL(dev)) {
a3c98b8b
MH
58 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
59 return ERR_PTR(-EINVAL);
60 }
61 WARN(!mutex_is_locked(&devfreq_list_lock),
62 "devfreq_list_lock must be locked.");
63
64 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
65 if (tmp_devfreq->dev.parent == dev)
66 return tmp_devfreq;
67 }
68
69 return ERR_PTR(-ENODEV);
70}
71
ab8f58ad
CC
72static unsigned long find_available_min_freq(struct devfreq *devfreq)
73{
74 struct dev_pm_opp *opp;
75 unsigned long min_freq = 0;
76
77 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
78 if (IS_ERR(opp))
79 min_freq = 0;
80 else
81 dev_pm_opp_put(opp);
82
83 return min_freq;
84}
85
86static unsigned long find_available_max_freq(struct devfreq *devfreq)
87{
88 struct dev_pm_opp *opp;
89 unsigned long max_freq = ULONG_MAX;
90
91 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
92 if (IS_ERR(opp))
93 max_freq = 0;
94 else
95 dev_pm_opp_put(opp);
96
97 return max_freq;
98}
99
e552bbaf
JL
100/**
101 * devfreq_get_freq_level() - Lookup freq_table for the frequency
102 * @devfreq: the devfreq instance
103 * @freq: the target frequency
104 */
105static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
106{
107 int lev;
108
109 for (lev = 0; lev < devfreq->profile->max_state; lev++)
110 if (freq == devfreq->profile->freq_table[lev])
111 return lev;
112
113 return -EINVAL;
114}
115
0ec09ac2
CC
116/**
117 * devfreq_set_freq_table() - Initialize freq_table for the frequency
118 * @devfreq: the devfreq instance
119 */
120static void devfreq_set_freq_table(struct devfreq *devfreq)
121{
122 struct devfreq_dev_profile *profile = devfreq->profile;
123 struct dev_pm_opp *opp;
124 unsigned long freq;
125 int i, count;
126
127 /* Initialize the freq_table from OPP table */
128 count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
129 if (count <= 0)
130 return;
131
132 profile->max_state = count;
133 profile->freq_table = devm_kcalloc(devfreq->dev.parent,
134 profile->max_state,
135 sizeof(*profile->freq_table),
136 GFP_KERNEL);
137 if (!profile->freq_table) {
138 profile->max_state = 0;
139 return;
140 }
141
0ec09ac2
CC
142 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
143 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
144 if (IS_ERR(opp)) {
145 devm_kfree(devfreq->dev.parent, profile->freq_table);
146 profile->max_state = 0;
0ec09ac2
CC
147 return;
148 }
8a31d9d9 149 dev_pm_opp_put(opp);
0ec09ac2
CC
150 profile->freq_table[i] = freq;
151 }
0ec09ac2
CC
152}
153
e552bbaf
JL
154/**
155 * devfreq_update_status() - Update statistics of devfreq behavior
156 * @devfreq: the devfreq instance
157 * @freq: the update target frequency
158 */
30582c25 159int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
e552bbaf 160{
e35d35a1 161 int lev, prev_lev, ret = 0;
e552bbaf
JL
162 unsigned long cur_time;
163
e552bbaf 164 cur_time = jiffies;
e35d35a1 165
d0563a03
TJ
166 /* Immediately exit if previous_freq is not initialized yet. */
167 if (!devfreq->previous_freq)
168 goto out;
169
e35d35a1
SK
170 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
171 if (prev_lev < 0) {
172 ret = prev_lev;
173 goto out;
174 }
175
176 devfreq->time_in_state[prev_lev] +=
e552bbaf 177 cur_time - devfreq->last_stat_updated;
e35d35a1
SK
178
179 lev = devfreq_get_freq_level(devfreq, freq);
180 if (lev < 0) {
181 ret = lev;
182 goto out;
183 }
184
185 if (lev != prev_lev) {
e552bbaf
JL
186 devfreq->trans_table[(prev_lev *
187 devfreq->profile->max_state) + lev]++;
188 devfreq->total_trans++;
189 }
e552bbaf 190
e35d35a1
SK
191out:
192 devfreq->last_stat_updated = cur_time;
193 return ret;
e552bbaf 194}
30582c25 195EXPORT_SYMBOL(devfreq_update_status);
e552bbaf 196
3aa173b8
NM
197/**
198 * find_devfreq_governor() - find devfreq governor from name
199 * @name: name of the governor
200 *
201 * Search the list of devfreq governors and return the matched
202 * governor's pointer. devfreq_list_lock should be held by the caller.
203 */
204static struct devfreq_governor *find_devfreq_governor(const char *name)
205{
206 struct devfreq_governor *tmp_governor;
207
9348da2f 208 if (IS_ERR_OR_NULL(name)) {
3aa173b8
NM
209 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
210 return ERR_PTR(-EINVAL);
211 }
212 WARN(!mutex_is_locked(&devfreq_list_lock),
213 "devfreq_list_lock must be locked.");
214
215 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
216 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
217 return tmp_governor;
218 }
219
220 return ERR_PTR(-ENODEV);
221}
222
0fe3a664
CC
223static int devfreq_notify_transition(struct devfreq *devfreq,
224 struct devfreq_freqs *freqs, unsigned int state)
225{
226 if (!devfreq)
227 return -EINVAL;
228
229 switch (state) {
230 case DEVFREQ_PRECHANGE:
231 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
232 DEVFREQ_PRECHANGE, freqs);
233 break;
234
235 case DEVFREQ_POSTCHANGE:
236 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
237 DEVFREQ_POSTCHANGE, freqs);
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 return 0;
244}
245
7e6fdd4b
RV
246/* Load monitoring helper functions for governors use */
247
a3c98b8b
MH
248/**
249 * update_devfreq() - Reevaluate the device and configure frequency.
250 * @devfreq: the devfreq instance.
251 *
252 * Note: Lock devfreq->lock before calling update_devfreq
253 * This function is exported for governors.
254 */
255int update_devfreq(struct devfreq *devfreq)
256{
0fe3a664
CC
257 struct devfreq_freqs freqs;
258 unsigned long freq, cur_freq;
a3c98b8b 259 int err = 0;
ab5f299f 260 u32 flags = 0;
a3c98b8b
MH
261
262 if (!mutex_is_locked(&devfreq->lock)) {
263 WARN(true, "devfreq->lock must be locked by the caller.\n");
264 return -EINVAL;
265 }
266
1b5c1be2
NM
267 if (!devfreq->governor)
268 return -EINVAL;
269
a3c98b8b
MH
270 /* Reevaluate the proper frequency */
271 err = devfreq->governor->get_target_freq(devfreq, &freq);
272 if (err)
273 return err;
274
ab5f299f 275 /*
d3b7e174 276 * Adjust the frequency with user freq and QoS.
ab5f299f 277 *
d3b7e174
JM
278 * List from the highest priority
279 * max_freq
ab5f299f
MH
280 * min_freq
281 */
282
283 if (devfreq->min_freq && freq < devfreq->min_freq) {
284 freq = devfreq->min_freq;
285 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
286 }
287 if (devfreq->max_freq && freq > devfreq->max_freq) {
288 freq = devfreq->max_freq;
289 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
290 }
291
0fe3a664
CC
292 if (devfreq->profile->get_cur_freq)
293 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
294 else
295 cur_freq = devfreq->previous_freq;
296
297 freqs.old = cur_freq;
298 freqs.new = freq;
299 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
300
ab5f299f 301 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
0d37189e
CC
302 if (err) {
303 freqs.new = cur_freq;
304 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
a3c98b8b 305 return err;
0d37189e 306 }
a3c98b8b 307
0fe3a664
CC
308 freqs.new = freq;
309 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
310
e552bbaf
JL
311 if (devfreq->profile->freq_table)
312 if (devfreq_update_status(devfreq, freq))
313 dev_err(&devfreq->dev,
314 "Couldn't update frequency transition information.\n");
315
a3c98b8b
MH
316 devfreq->previous_freq = freq;
317 return err;
318}
2df5021f 319EXPORT_SYMBOL(update_devfreq);
a3c98b8b 320
7e6fdd4b
RV
321/**
322 * devfreq_monitor() - Periodically poll devfreq objects.
323 * @work: the work struct used to run devfreq_monitor periodically.
324 *
325 */
326static void devfreq_monitor(struct work_struct *work)
327{
328 int err;
329 struct devfreq *devfreq = container_of(work,
330 struct devfreq, work.work);
331
332 mutex_lock(&devfreq->lock);
333 err = update_devfreq(devfreq);
334 if (err)
335 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
336
337 queue_delayed_work(devfreq_wq, &devfreq->work,
338 msecs_to_jiffies(devfreq->profile->polling_ms));
339 mutex_unlock(&devfreq->lock);
340}
341
342/**
343 * devfreq_monitor_start() - Start load monitoring of devfreq instance
344 * @devfreq: the devfreq instance.
345 *
346 * Helper function for starting devfreq device load monitoing. By
347 * default delayed work based monitoring is supported. Function
348 * to be called from governor in response to DEVFREQ_GOV_START
349 * event when device is added to devfreq framework.
350 */
351void devfreq_monitor_start(struct devfreq *devfreq)
352{
353 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
354 if (devfreq->profile->polling_ms)
355 queue_delayed_work(devfreq_wq, &devfreq->work,
356 msecs_to_jiffies(devfreq->profile->polling_ms));
357}
6dcdd8e3 358EXPORT_SYMBOL(devfreq_monitor_start);
7e6fdd4b
RV
359
360/**
361 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
362 * @devfreq: the devfreq instance.
363 *
364 * Helper function to stop devfreq device load monitoing. Function
365 * to be called from governor in response to DEVFREQ_GOV_STOP
366 * event when device is removed from devfreq framework.
367 */
368void devfreq_monitor_stop(struct devfreq *devfreq)
369{
370 cancel_delayed_work_sync(&devfreq->work);
371}
6dcdd8e3 372EXPORT_SYMBOL(devfreq_monitor_stop);
7e6fdd4b
RV
373
374/**
375 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
376 * @devfreq: the devfreq instance.
377 *
378 * Helper function to suspend devfreq device load monitoing. Function
379 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
380 * event or when polling interval is set to zero.
381 *
382 * Note: Though this function is same as devfreq_monitor_stop(),
383 * intentionally kept separate to provide hooks for collecting
384 * transition statistics.
385 */
386void devfreq_monitor_suspend(struct devfreq *devfreq)
387{
388 mutex_lock(&devfreq->lock);
389 if (devfreq->stop_polling) {
390 mutex_unlock(&devfreq->lock);
391 return;
392 }
393
39688ce6 394 devfreq_update_status(devfreq, devfreq->previous_freq);
7e6fdd4b
RV
395 devfreq->stop_polling = true;
396 mutex_unlock(&devfreq->lock);
397 cancel_delayed_work_sync(&devfreq->work);
398}
6dcdd8e3 399EXPORT_SYMBOL(devfreq_monitor_suspend);
7e6fdd4b
RV
400
401/**
402 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
403 * @devfreq: the devfreq instance.
404 *
405 * Helper function to resume devfreq device load monitoing. Function
406 * to be called from governor in response to DEVFREQ_GOV_RESUME
407 * event or when polling interval is set to non-zero.
408 */
409void devfreq_monitor_resume(struct devfreq *devfreq)
410{
39688ce6
RV
411 unsigned long freq;
412
7e6fdd4b
RV
413 mutex_lock(&devfreq->lock);
414 if (!devfreq->stop_polling)
415 goto out;
416
417 if (!delayed_work_pending(&devfreq->work) &&
418 devfreq->profile->polling_ms)
419 queue_delayed_work(devfreq_wq, &devfreq->work,
420 msecs_to_jiffies(devfreq->profile->polling_ms));
39688ce6
RV
421
422 devfreq->last_stat_updated = jiffies;
7e6fdd4b
RV
423 devfreq->stop_polling = false;
424
39688ce6
RV
425 if (devfreq->profile->get_cur_freq &&
426 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
427 devfreq->previous_freq = freq;
428
7e6fdd4b
RV
429out:
430 mutex_unlock(&devfreq->lock);
431}
6dcdd8e3 432EXPORT_SYMBOL(devfreq_monitor_resume);
7e6fdd4b
RV
433
434/**
435 * devfreq_interval_update() - Update device devfreq monitoring interval
436 * @devfreq: the devfreq instance.
437 * @delay: new polling interval to be set.
438 *
439 * Helper function to set new load monitoring polling interval. Function
440 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
441 */
442void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
443{
444 unsigned int cur_delay = devfreq->profile->polling_ms;
445 unsigned int new_delay = *delay;
446
447 mutex_lock(&devfreq->lock);
448 devfreq->profile->polling_ms = new_delay;
449
450 if (devfreq->stop_polling)
451 goto out;
452
453 /* if new delay is zero, stop polling */
454 if (!new_delay) {
455 mutex_unlock(&devfreq->lock);
456 cancel_delayed_work_sync(&devfreq->work);
457 return;
458 }
459
460 /* if current delay is zero, start polling with new delay */
461 if (!cur_delay) {
462 queue_delayed_work(devfreq_wq, &devfreq->work,
463 msecs_to_jiffies(devfreq->profile->polling_ms));
464 goto out;
465 }
466
467 /* if current delay is greater than new delay, restart polling */
468 if (cur_delay > new_delay) {
469 mutex_unlock(&devfreq->lock);
470 cancel_delayed_work_sync(&devfreq->work);
471 mutex_lock(&devfreq->lock);
472 if (!devfreq->stop_polling)
473 queue_delayed_work(devfreq_wq, &devfreq->work,
474 msecs_to_jiffies(devfreq->profile->polling_ms));
475 }
476out:
477 mutex_unlock(&devfreq->lock);
478}
6dcdd8e3 479EXPORT_SYMBOL(devfreq_interval_update);
a3c98b8b
MH
480
481/**
482 * devfreq_notifier_call() - Notify that the device frequency requirements
483 * has been changed out of devfreq framework.
c5b4a1c1
NM
484 * @nb: the notifier_block (supposed to be devfreq->nb)
485 * @type: not used
486 * @devp: not used
a3c98b8b
MH
487 *
488 * Called by a notifier that uses devfreq->nb.
489 */
490static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
491 void *devp)
492{
493 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
494 int ret;
495
496 mutex_lock(&devfreq->lock);
497 ret = update_devfreq(devfreq);
498 mutex_unlock(&devfreq->lock);
499
500 return ret;
501}
502
503/**
29b6968b
CC
504 * devfreq_dev_release() - Callback for struct device to release the device.
505 * @dev: the devfreq device
506 *
507 * Remove devfreq from the list and release its resources.
a3c98b8b 508 */
29b6968b 509static void devfreq_dev_release(struct device *dev)
a3c98b8b 510{
29b6968b
CC
511 struct devfreq *devfreq = to_devfreq(dev);
512
7e6fdd4b
RV
513 mutex_lock(&devfreq_list_lock);
514 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
515 mutex_unlock(&devfreq_list_lock);
516 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
a3c98b8b
MH
517 return;
518 }
7e6fdd4b
RV
519 list_del(&devfreq->node);
520 mutex_unlock(&devfreq_list_lock);
a3c98b8b 521
1b5c1be2
NM
522 if (devfreq->governor)
523 devfreq->governor->event_handler(devfreq,
524 DEVFREQ_GOV_STOP, NULL);
a3c98b8b
MH
525
526 if (devfreq->profile->exit)
527 devfreq->profile->exit(devfreq->dev.parent);
528
a3c98b8b 529 mutex_destroy(&devfreq->lock);
a3c98b8b
MH
530 kfree(devfreq);
531}
532
a3c98b8b
MH
533/**
534 * devfreq_add_device() - Add devfreq feature to the device
535 * @dev: the device to add devfreq feature.
536 * @profile: device-specific profile to run devfreq.
1b5c1be2 537 * @governor_name: name of the policy to choose frequency.
a3c98b8b
MH
538 * @data: private data for the governor. The devfreq framework does not
539 * touch this value.
540 */
541struct devfreq *devfreq_add_device(struct device *dev,
542 struct devfreq_dev_profile *profile,
1b5c1be2 543 const char *governor_name,
a3c98b8b
MH
544 void *data)
545{
546 struct devfreq *devfreq;
1b5c1be2 547 struct devfreq_governor *governor;
4585fbcb 548 static atomic_t devfreq_no = ATOMIC_INIT(-1);
a3c98b8b
MH
549 int err = 0;
550
1b5c1be2 551 if (!dev || !profile || !governor_name) {
a3c98b8b
MH
552 dev_err(dev, "%s: Invalid parameters.\n", __func__);
553 return ERR_PTR(-EINVAL);
554 }
555
7e6fdd4b
RV
556 mutex_lock(&devfreq_list_lock);
557 devfreq = find_device_devfreq(dev);
558 mutex_unlock(&devfreq_list_lock);
559 if (!IS_ERR(devfreq)) {
9d0109be
CC
560 dev_err(dev, "%s: Unable to create devfreq for the device.\n",
561 __func__);
7e6fdd4b
RV
562 err = -EINVAL;
563 goto err_out;
a3c98b8b
MH
564 }
565
566 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
567 if (!devfreq) {
a3c98b8b 568 err = -ENOMEM;
3f19f08a 569 goto err_out;
a3c98b8b
MH
570 }
571
572 mutex_init(&devfreq->lock);
573 mutex_lock(&devfreq->lock);
574 devfreq->dev.parent = dev;
575 devfreq->dev.class = devfreq_class;
576 devfreq->dev.release = devfreq_dev_release;
577 devfreq->profile = profile;
1b5c1be2 578 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
a3c98b8b 579 devfreq->previous_freq = profile->initial_freq;
8d39fc08 580 devfreq->last_status.current_frequency = profile->initial_freq;
a3c98b8b 581 devfreq->data = data;
a3c98b8b
MH
582 devfreq->nb.notifier_call = devfreq_notifier_call;
583
0ec09ac2
CC
584 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
585 mutex_unlock(&devfreq->lock);
586 devfreq_set_freq_table(devfreq);
587 mutex_lock(&devfreq->lock);
588 }
589
ab8f58ad
CC
590 devfreq->min_freq = find_available_min_freq(devfreq);
591 if (!devfreq->min_freq) {
592 mutex_unlock(&devfreq->lock);
593 err = -EINVAL;
594 goto err_dev;
595 }
596
597 devfreq->max_freq = find_available_max_freq(devfreq);
598 if (!devfreq->max_freq) {
599 mutex_unlock(&devfreq->lock);
600 err = -EINVAL;
601 goto err_dev;
602 }
603
4585fbcb
CC
604 dev_set_name(&devfreq->dev, "devfreq%d",
605 atomic_inc_return(&devfreq_no));
a3c98b8b
MH
606 err = device_register(&devfreq->dev);
607 if (err) {
7e6fdd4b 608 mutex_unlock(&devfreq->lock);
9e14de10 609 goto err_dev;
a3c98b8b
MH
610 }
611
9d0109be
CC
612 devfreq->trans_table = devm_kzalloc(&devfreq->dev,
613 sizeof(unsigned int) *
3e1d7fb0
MH
614 devfreq->profile->max_state *
615 devfreq->profile->max_state,
616 GFP_KERNEL);
9d0109be
CC
617 devfreq->time_in_state = devm_kzalloc(&devfreq->dev,
618 sizeof(unsigned long) *
3e1d7fb0
MH
619 devfreq->profile->max_state,
620 GFP_KERNEL);
621 devfreq->last_stat_updated = jiffies;
622
0fe3a664
CC
623 srcu_init_notifier_head(&devfreq->transition_notifier_list);
624
a3c98b8b
MH
625 mutex_unlock(&devfreq->lock);
626
a3c98b8b 627 mutex_lock(&devfreq_list_lock);
a3c98b8b
MH
628 list_add(&devfreq->node, &devfreq_list);
629
1b5c1be2 630 governor = find_devfreq_governor(devfreq->governor_name);
73613b16
CC
631 if (IS_ERR(governor)) {
632 dev_err(dev, "%s: Unable to find governor for the device\n",
633 __func__);
634 err = PTR_ERR(governor);
635 goto err_init;
636 }
637
638 devfreq->governor = governor;
639 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
640 NULL);
7e6fdd4b
RV
641 if (err) {
642 dev_err(dev, "%s: Unable to start governor for the device\n",
643 __func__);
644 goto err_init;
a3c98b8b 645 }
0f376c9c 646 mutex_unlock(&devfreq_list_lock);
7e6fdd4b 647
3f19f08a
AL
648 return devfreq;
649
a3c98b8b 650err_init:
7e6fdd4b 651 list_del(&devfreq->node);
0f376c9c
AL
652 mutex_unlock(&devfreq_list_lock);
653
a3c98b8b 654 device_unregister(&devfreq->dev);
9e14de10
CC
655err_dev:
656 if (devfreq)
657 kfree(devfreq);
3f19f08a
AL
658err_out:
659 return ERR_PTR(err);
a3c98b8b 660}
7e6fdd4b 661EXPORT_SYMBOL(devfreq_add_device);
a3c98b8b
MH
662
663/**
664 * devfreq_remove_device() - Remove devfreq feature from a device.
c5b4a1c1 665 * @devfreq: the devfreq instance to be removed
de9c7394
MH
666 *
667 * The opposite of devfreq_add_device().
a3c98b8b
MH
668 */
669int devfreq_remove_device(struct devfreq *devfreq)
670{
671 if (!devfreq)
672 return -EINVAL;
673
585fc83e 674 device_unregister(&devfreq->dev);
9f3bdd4f 675
a3c98b8b
MH
676 return 0;
677}
7e6fdd4b 678EXPORT_SYMBOL(devfreq_remove_device);
a3c98b8b 679
8cd84092
CC
680static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
681{
682 struct devfreq **r = res;
683
684 if (WARN_ON(!r || !*r))
685 return 0;
686
687 return *r == data;
688}
689
690static void devm_devfreq_dev_release(struct device *dev, void *res)
691{
692 devfreq_remove_device(*(struct devfreq **)res);
693}
694
695/**
696 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
697 * @dev: the device to add devfreq feature.
698 * @profile: device-specific profile to run devfreq.
699 * @governor_name: name of the policy to choose frequency.
700 * @data: private data for the governor. The devfreq framework does not
701 * touch this value.
702 *
703 * This function manages automatically the memory of devfreq device using device
704 * resource management and simplify the free operation for memory of devfreq
705 * device.
706 */
707struct devfreq *devm_devfreq_add_device(struct device *dev,
708 struct devfreq_dev_profile *profile,
709 const char *governor_name,
710 void *data)
711{
712 struct devfreq **ptr, *devfreq;
713
714 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
715 if (!ptr)
716 return ERR_PTR(-ENOMEM);
717
718 devfreq = devfreq_add_device(dev, profile, governor_name, data);
719 if (IS_ERR(devfreq)) {
720 devres_free(ptr);
721 return ERR_PTR(-ENOMEM);
722 }
723
724 *ptr = devfreq;
725 devres_add(dev, ptr);
726
727 return devfreq;
728}
729EXPORT_SYMBOL(devm_devfreq_add_device);
730
8f510aeb
CC
731#ifdef CONFIG_OF
732/*
733 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
734 * @dev - instance to the given device
735 * @index - index into list of devfreq
736 *
737 * return the instance of devfreq device
738 */
739struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
740{
741 struct device_node *node;
742 struct devfreq *devfreq;
743
744 if (!dev)
745 return ERR_PTR(-EINVAL);
746
747 if (!dev->of_node)
748 return ERR_PTR(-EINVAL);
749
750 node = of_parse_phandle(dev->of_node, "devfreq", index);
751 if (!node)
752 return ERR_PTR(-ENODEV);
753
754 mutex_lock(&devfreq_list_lock);
755 list_for_each_entry(devfreq, &devfreq_list, node) {
756 if (devfreq->dev.parent
757 && devfreq->dev.parent->of_node == node) {
758 mutex_unlock(&devfreq_list_lock);
3427c6f0 759 of_node_put(node);
8f510aeb
CC
760 return devfreq;
761 }
762 }
763 mutex_unlock(&devfreq_list_lock);
3427c6f0 764 of_node_put(node);
8f510aeb
CC
765
766 return ERR_PTR(-EPROBE_DEFER);
767}
768#else
769struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
770{
771 return ERR_PTR(-ENODEV);
772}
773#endif /* CONFIG_OF */
774EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
775
8cd84092
CC
776/**
777 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
778 * @dev: the device to add devfreq feature.
779 * @devfreq: the devfreq instance to be removed
780 */
781void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
782{
783 WARN_ON(devres_release(dev, devm_devfreq_dev_release,
784 devm_devfreq_dev_match, devfreq));
785}
786EXPORT_SYMBOL(devm_devfreq_remove_device);
787
206c30cf
RV
788/**
789 * devfreq_suspend_device() - Suspend devfreq of a device.
790 * @devfreq: the devfreq instance to be suspended
de9c7394
MH
791 *
792 * This function is intended to be called by the pm callbacks
793 * (e.g., runtime_suspend, suspend) of the device driver that
794 * holds the devfreq.
206c30cf
RV
795 */
796int devfreq_suspend_device(struct devfreq *devfreq)
797{
a3c98b8b
MH
798 if (!devfreq)
799 return -EINVAL;
800
1b5c1be2
NM
801 if (!devfreq->governor)
802 return 0;
803
206c30cf
RV
804 return devfreq->governor->event_handler(devfreq,
805 DEVFREQ_GOV_SUSPEND, NULL);
806}
807EXPORT_SYMBOL(devfreq_suspend_device);
808
809/**
810 * devfreq_resume_device() - Resume devfreq of a device.
811 * @devfreq: the devfreq instance to be resumed
de9c7394
MH
812 *
813 * This function is intended to be called by the pm callbacks
814 * (e.g., runtime_resume, resume) of the device driver that
815 * holds the devfreq.
206c30cf
RV
816 */
817int devfreq_resume_device(struct devfreq *devfreq)
818{
819 if (!devfreq)
820 return -EINVAL;
821
1b5c1be2
NM
822 if (!devfreq->governor)
823 return 0;
824
206c30cf
RV
825 return devfreq->governor->event_handler(devfreq,
826 DEVFREQ_GOV_RESUME, NULL);
827}
828EXPORT_SYMBOL(devfreq_resume_device);
829
3aa173b8
NM
830/**
831 * devfreq_add_governor() - Add devfreq governor
832 * @governor: the devfreq governor to be added
833 */
834int devfreq_add_governor(struct devfreq_governor *governor)
835{
836 struct devfreq_governor *g;
1b5c1be2 837 struct devfreq *devfreq;
3aa173b8
NM
838 int err = 0;
839
840 if (!governor) {
841 pr_err("%s: Invalid parameters.\n", __func__);
842 return -EINVAL;
843 }
844
845 mutex_lock(&devfreq_list_lock);
846 g = find_devfreq_governor(governor->name);
847 if (!IS_ERR(g)) {
848 pr_err("%s: governor %s already registered\n", __func__,
849 g->name);
850 err = -EINVAL;
851 goto err_out;
852 }
9f3bdd4f 853
3aa173b8
NM
854 list_add(&governor->node, &devfreq_governor_list);
855
1b5c1be2
NM
856 list_for_each_entry(devfreq, &devfreq_list, node) {
857 int ret = 0;
858 struct device *dev = devfreq->dev.parent;
859
860 if (!strncmp(devfreq->governor_name, governor->name,
861 DEVFREQ_NAME_LEN)) {
862 /* The following should never occur */
863 if (devfreq->governor) {
864 dev_warn(dev,
865 "%s: Governor %s already present\n",
866 __func__, devfreq->governor->name);
867 ret = devfreq->governor->event_handler(devfreq,
868 DEVFREQ_GOV_STOP, NULL);
869 if (ret) {
870 dev_warn(dev,
871 "%s: Governor %s stop = %d\n",
872 __func__,
873 devfreq->governor->name, ret);
874 }
875 /* Fall through */
876 }
877 devfreq->governor = governor;
878 ret = devfreq->governor->event_handler(devfreq,
879 DEVFREQ_GOV_START, NULL);
880 if (ret) {
881 dev_warn(dev, "%s: Governor %s start=%d\n",
882 __func__, devfreq->governor->name,
883 ret);
884 }
a3c98b8b
MH
885 }
886 }
887
3aa173b8
NM
888err_out:
889 mutex_unlock(&devfreq_list_lock);
a3c98b8b 890
3aa173b8
NM
891 return err;
892}
893EXPORT_SYMBOL(devfreq_add_governor);
a3c98b8b 894
3aa173b8 895/**
bafeb42b 896 * devfreq_remove_governor() - Remove devfreq feature from a device.
3aa173b8
NM
897 * @governor: the devfreq governor to be removed
898 */
899int devfreq_remove_governor(struct devfreq_governor *governor)
900{
901 struct devfreq_governor *g;
1b5c1be2 902 struct devfreq *devfreq;
3aa173b8
NM
903 int err = 0;
904
905 if (!governor) {
906 pr_err("%s: Invalid parameters.\n", __func__);
907 return -EINVAL;
908 }
909
910 mutex_lock(&devfreq_list_lock);
911 g = find_devfreq_governor(governor->name);
912 if (IS_ERR(g)) {
913 pr_err("%s: governor %s not registered\n", __func__,
b9e1c8e8 914 governor->name);
f9c08e2a 915 err = PTR_ERR(g);
3aa173b8
NM
916 goto err_out;
917 }
1b5c1be2
NM
918 list_for_each_entry(devfreq, &devfreq_list, node) {
919 int ret;
920 struct device *dev = devfreq->dev.parent;
921
922 if (!strncmp(devfreq->governor_name, governor->name,
923 DEVFREQ_NAME_LEN)) {
924 /* we should have a devfreq governor! */
925 if (!devfreq->governor) {
926 dev_warn(dev, "%s: Governor %s NOT present\n",
927 __func__, governor->name);
928 continue;
929 /* Fall through */
930 }
931 ret = devfreq->governor->event_handler(devfreq,
932 DEVFREQ_GOV_STOP, NULL);
933 if (ret) {
934 dev_warn(dev, "%s: Governor %s stop=%d\n",
935 __func__, devfreq->governor->name,
936 ret);
937 }
938 devfreq->governor = NULL;
939 }
940 }
3aa173b8
NM
941
942 list_del(&governor->node);
943err_out:
944 mutex_unlock(&devfreq_list_lock);
945
946 return err;
a3c98b8b 947}
3aa173b8 948EXPORT_SYMBOL(devfreq_remove_governor);
a3c98b8b 949
a93d6b0a 950static ssize_t governor_show(struct device *dev,
9005b650
MH
951 struct device_attribute *attr, char *buf)
952{
1b5c1be2
NM
953 if (!to_devfreq(dev)->governor)
954 return -EINVAL;
955
9005b650
MH
956 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
957}
958
a93d6b0a 959static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
0359d1af
NM
960 const char *buf, size_t count)
961{
962 struct devfreq *df = to_devfreq(dev);
963 int ret;
964 char str_governor[DEVFREQ_NAME_LEN + 1];
965 struct devfreq_governor *governor;
966
967 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
968 if (ret != 1)
969 return -EINVAL;
970
971 mutex_lock(&devfreq_list_lock);
972 governor = find_devfreq_governor(str_governor);
973 if (IS_ERR(governor)) {
974 ret = PTR_ERR(governor);
975 goto out;
976 }
14a21e7b
TJ
977 if (df->governor == governor) {
978 ret = 0;
0359d1af 979 goto out;
bcf23c79
CC
980 } else if (df->governor->immutable || governor->immutable) {
981 ret = -EINVAL;
982 goto out;
14a21e7b 983 }
0359d1af
NM
984
985 if (df->governor) {
986 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
987 if (ret) {
988 dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
989 __func__, df->governor->name, ret);
990 goto out;
991 }
992 }
993 df->governor = governor;
994 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
995 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
996 if (ret)
997 dev_warn(dev, "%s: Governor %s not started(%d)\n",
998 __func__, df->governor->name, ret);
999out:
1000 mutex_unlock(&devfreq_list_lock);
1001
1002 if (!ret)
1003 ret = count;
1004 return ret;
1005}
a93d6b0a
GKH
1006static DEVICE_ATTR_RW(governor);
1007
1008static ssize_t available_governors_show(struct device *d,
1009 struct device_attribute *attr,
1010 char *buf)
50a5b33e 1011{
bcf23c79 1012 struct devfreq *df = to_devfreq(d);
50a5b33e
NM
1013 ssize_t count = 0;
1014
1015 mutex_lock(&devfreq_list_lock);
bcf23c79
CC
1016
1017 /*
1018 * The devfreq with immutable governor (e.g., passive) shows
1019 * only own governor.
1020 */
1021 if (df->governor->immutable) {
1022 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1023 "%s ", df->governor_name);
1024 /*
1025 * The devfreq device shows the registered governor except for
1026 * immutable governors such as passive governor .
1027 */
1028 } else {
1029 struct devfreq_governor *governor;
1030
1031 list_for_each_entry(governor, &devfreq_governor_list, node) {
1032 if (governor->immutable)
1033 continue;
1034 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1035 "%s ", governor->name);
1036 }
1037 }
1038
50a5b33e
NM
1039 mutex_unlock(&devfreq_list_lock);
1040
1041 /* Truncate the trailing space */
1042 if (count)
1043 count--;
1044
1045 count += sprintf(&buf[count], "\n");
1046
1047 return count;
1048}
a93d6b0a 1049static DEVICE_ATTR_RO(available_governors);
0359d1af 1050
a93d6b0a
GKH
1051static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1052 char *buf)
7f98a905
RV
1053{
1054 unsigned long freq;
1055 struct devfreq *devfreq = to_devfreq(dev);
1056
1057 if (devfreq->profile->get_cur_freq &&
1058 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
9d0109be 1059 return sprintf(buf, "%lu\n", freq);
7f98a905
RV
1060
1061 return sprintf(buf, "%lu\n", devfreq->previous_freq);
1062}
a93d6b0a 1063static DEVICE_ATTR_RO(cur_freq);
7f98a905 1064
a93d6b0a
GKH
1065static ssize_t target_freq_show(struct device *dev,
1066 struct device_attribute *attr, char *buf)
9005b650
MH
1067{
1068 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1069}
a93d6b0a 1070static DEVICE_ATTR_RO(target_freq);
9005b650 1071
a93d6b0a 1072static ssize_t polling_interval_show(struct device *dev,
9005b650
MH
1073 struct device_attribute *attr, char *buf)
1074{
1075 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1076}
1077
a93d6b0a 1078static ssize_t polling_interval_store(struct device *dev,
9005b650
MH
1079 struct device_attribute *attr,
1080 const char *buf, size_t count)
1081{
1082 struct devfreq *df = to_devfreq(dev);
1083 unsigned int value;
1084 int ret;
1085
1b5c1be2
NM
1086 if (!df->governor)
1087 return -EINVAL;
1088
9005b650
MH
1089 ret = sscanf(buf, "%u", &value);
1090 if (ret != 1)
12e26265 1091 return -EINVAL;
9005b650 1092
7e6fdd4b 1093 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
9005b650
MH
1094 ret = count;
1095
9005b650
MH
1096 return ret;
1097}
a93d6b0a 1098static DEVICE_ATTR_RW(polling_interval);
9005b650 1099
a93d6b0a 1100static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
6530b9de
MH
1101 const char *buf, size_t count)
1102{
1103 struct devfreq *df = to_devfreq(dev);
1104 unsigned long value;
1105 int ret;
1106 unsigned long max;
1107
1108 ret = sscanf(buf, "%lu", &value);
1109 if (ret != 1)
12e26265 1110 return -EINVAL;
6530b9de
MH
1111
1112 mutex_lock(&df->lock);
1113 max = df->max_freq;
1114 if (value && max && value > max) {
1115 ret = -EINVAL;
1116 goto unlock;
1117 }
1118
1119 df->min_freq = value;
1120 update_devfreq(df);
1121 ret = count;
1122unlock:
1123 mutex_unlock(&df->lock);
6530b9de
MH
1124 return ret;
1125}
1126
1051e2c3
CC
1127static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1128 char *buf)
1129{
1130 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
1131}
1132
a93d6b0a 1133static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
6530b9de
MH
1134 const char *buf, size_t count)
1135{
1136 struct devfreq *df = to_devfreq(dev);
1137 unsigned long value;
1138 int ret;
1139 unsigned long min;
1140
1141 ret = sscanf(buf, "%lu", &value);
1142 if (ret != 1)
12e26265 1143 return -EINVAL;
6530b9de
MH
1144
1145 mutex_lock(&df->lock);
1146 min = df->min_freq;
1147 if (value && min && value < min) {
1148 ret = -EINVAL;
1149 goto unlock;
1150 }
1151
1152 df->max_freq = value;
1153 update_devfreq(df);
1154 ret = count;
1155unlock:
1156 mutex_unlock(&df->lock);
6530b9de
MH
1157 return ret;
1158}
1051e2c3 1159static DEVICE_ATTR_RW(min_freq);
6530b9de 1160
1051e2c3
CC
1161static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1162 char *buf)
1163{
1164 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
6530b9de 1165}
a93d6b0a 1166static DEVICE_ATTR_RW(max_freq);
6530b9de 1167
a93d6b0a
GKH
1168static ssize_t available_frequencies_show(struct device *d,
1169 struct device_attribute *attr,
1170 char *buf)
d287de85
NM
1171{
1172 struct devfreq *df = to_devfreq(d);
1173 struct device *dev = df->dev.parent;
47d43ba7 1174 struct dev_pm_opp *opp;
d287de85
NM
1175 ssize_t count = 0;
1176 unsigned long freq = 0;
1177
d287de85 1178 do {
5d4879cd 1179 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
d287de85
NM
1180 if (IS_ERR(opp))
1181 break;
1182
8a31d9d9 1183 dev_pm_opp_put(opp);
d287de85
NM
1184 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1185 "%lu ", freq);
1186 freq++;
1187 } while (1);
d287de85
NM
1188
1189 /* Truncate the trailing space */
1190 if (count)
1191 count--;
1192
1193 count += sprintf(&buf[count], "\n");
1194
1195 return count;
1196}
a93d6b0a 1197static DEVICE_ATTR_RO(available_frequencies);
d287de85 1198
a93d6b0a
GKH
1199static ssize_t trans_stat_show(struct device *dev,
1200 struct device_attribute *attr, char *buf)
e552bbaf
JL
1201{
1202 struct devfreq *devfreq = to_devfreq(dev);
1203 ssize_t len;
39688ce6 1204 int i, j;
e552bbaf
JL
1205 unsigned int max_state = devfreq->profile->max_state;
1206
39688ce6
RV
1207 if (!devfreq->stop_polling &&
1208 devfreq_update_status(devfreq, devfreq->previous_freq))
e552bbaf 1209 return 0;
34bd3220
MH
1210 if (max_state == 0)
1211 return sprintf(buf, "Not Supported.\n");
e552bbaf 1212
d7df1e46
CC
1213 len = sprintf(buf, " From : To\n");
1214 len += sprintf(buf + len, " :");
e552bbaf 1215 for (i = 0; i < max_state; i++)
d7df1e46 1216 len += sprintf(buf + len, "%10lu",
e552bbaf
JL
1217 devfreq->profile->freq_table[i]);
1218
1219 len += sprintf(buf + len, " time(ms)\n");
1220
1221 for (i = 0; i < max_state; i++) {
1222 if (devfreq->profile->freq_table[i]
1223 == devfreq->previous_freq) {
1224 len += sprintf(buf + len, "*");
1225 } else {
1226 len += sprintf(buf + len, " ");
1227 }
d7df1e46 1228 len += sprintf(buf + len, "%10lu:",
e552bbaf
JL
1229 devfreq->profile->freq_table[i]);
1230 for (j = 0; j < max_state; j++)
d7df1e46 1231 len += sprintf(buf + len, "%10u",
e552bbaf
JL
1232 devfreq->trans_table[(i * max_state) + j]);
1233 len += sprintf(buf + len, "%10u\n",
1234 jiffies_to_msecs(devfreq->time_in_state[i]));
1235 }
1236
1237 len += sprintf(buf + len, "Total transition : %u\n",
1238 devfreq->total_trans);
1239 return len;
1240}
a93d6b0a
GKH
1241static DEVICE_ATTR_RO(trans_stat);
1242
1243static struct attribute *devfreq_attrs[] = {
1244 &dev_attr_governor.attr,
1245 &dev_attr_available_governors.attr,
1246 &dev_attr_cur_freq.attr,
1247 &dev_attr_available_frequencies.attr,
1248 &dev_attr_target_freq.attr,
1249 &dev_attr_polling_interval.attr,
1250 &dev_attr_min_freq.attr,
1251 &dev_attr_max_freq.attr,
1252 &dev_attr_trans_stat.attr,
1253 NULL,
9005b650 1254};
a93d6b0a 1255ATTRIBUTE_GROUPS(devfreq);
9005b650 1256
a3c98b8b
MH
1257static int __init devfreq_init(void)
1258{
1259 devfreq_class = class_create(THIS_MODULE, "devfreq");
1260 if (IS_ERR(devfreq_class)) {
1261 pr_err("%s: couldn't create class\n", __FILE__);
1262 return PTR_ERR(devfreq_class);
1263 }
7e6fdd4b
RV
1264
1265 devfreq_wq = create_freezable_workqueue("devfreq_wq");
ea7f4548 1266 if (!devfreq_wq) {
7e6fdd4b
RV
1267 class_destroy(devfreq_class);
1268 pr_err("%s: couldn't create workqueue\n", __FILE__);
ea7f4548 1269 return -ENOMEM;
7e6fdd4b 1270 }
a93d6b0a 1271 devfreq_class->dev_groups = devfreq_groups;
7e6fdd4b 1272
a3c98b8b
MH
1273 return 0;
1274}
1275subsys_initcall(devfreq_init);
1276
a3c98b8b 1277/*
4091fb95 1278 * The following are helper functions for devfreq user device drivers with
a3c98b8b
MH
1279 * OPP framework.
1280 */
1281
1282/**
1283 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1284 * freq value given to target callback.
c5b4a1c1
NM
1285 * @dev: The devfreq user device. (parent of devfreq)
1286 * @freq: The frequency given to target function
1287 * @flags: Flags handed from devfreq framework.
a3c98b8b 1288 *
8a31d9d9
VK
1289 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1290 * use.
a3c98b8b 1291 */
47d43ba7
NM
1292struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1293 unsigned long *freq,
1294 u32 flags)
a3c98b8b 1295{
47d43ba7 1296 struct dev_pm_opp *opp;
a3c98b8b 1297
ab5f299f
MH
1298 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1299 /* The freq is an upper bound. opp should be lower */
5d4879cd 1300 opp = dev_pm_opp_find_freq_floor(dev, freq);
ab5f299f
MH
1301
1302 /* If not available, use the closest opp */
0779726c 1303 if (opp == ERR_PTR(-ERANGE))
5d4879cd 1304 opp = dev_pm_opp_find_freq_ceil(dev, freq);
ab5f299f
MH
1305 } else {
1306 /* The freq is an lower bound. opp should be higher */
5d4879cd 1307 opp = dev_pm_opp_find_freq_ceil(dev, freq);
ab5f299f
MH
1308
1309 /* If not available, use the closest opp */
0779726c 1310 if (opp == ERR_PTR(-ERANGE))
5d4879cd 1311 opp = dev_pm_opp_find_freq_floor(dev, freq);
ab5f299f
MH
1312 }
1313
a3c98b8b
MH
1314 return opp;
1315}
bd7e9277 1316EXPORT_SYMBOL(devfreq_recommended_opp);
a3c98b8b
MH
1317
1318/**
1319 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1320 * for any changes in the OPP availability
1321 * changes
c5b4a1c1
NM
1322 * @dev: The devfreq user device. (parent of devfreq)
1323 * @devfreq: The devfreq object.
a3c98b8b
MH
1324 */
1325int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1326{
dc2c9ad5 1327 return dev_pm_opp_register_notifier(dev, &devfreq->nb);
a3c98b8b 1328}
bd7e9277 1329EXPORT_SYMBOL(devfreq_register_opp_notifier);
a3c98b8b
MH
1330
1331/**
1332 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1333 * notified for any changes in the OPP
1334 * availability changes anymore.
c5b4a1c1
NM
1335 * @dev: The devfreq user device. (parent of devfreq)
1336 * @devfreq: The devfreq object.
a3c98b8b
MH
1337 *
1338 * At exit() callback of devfreq_dev_profile, this must be included if
1339 * devfreq_recommended_opp is used.
1340 */
1341int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1342{
dc2c9ad5 1343 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
a3c98b8b 1344}
bd7e9277 1345EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
a3c98b8b 1346
d5b040d0
CC
1347static void devm_devfreq_opp_release(struct device *dev, void *res)
1348{
1349 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1350}
1351
1352/**
1353 * devm_ devfreq_register_opp_notifier()
1354 * - Resource-managed devfreq_register_opp_notifier()
1355 * @dev: The devfreq user device. (parent of devfreq)
1356 * @devfreq: The devfreq object.
1357 */
1358int devm_devfreq_register_opp_notifier(struct device *dev,
1359 struct devfreq *devfreq)
1360{
1361 struct devfreq **ptr;
1362 int ret;
1363
1364 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1365 if (!ptr)
1366 return -ENOMEM;
1367
1368 ret = devfreq_register_opp_notifier(dev, devfreq);
1369 if (ret) {
1370 devres_free(ptr);
1371 return ret;
1372 }
1373
1374 *ptr = devfreq;
1375 devres_add(dev, ptr);
1376
1377 return 0;
1378}
1379EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1380
1381/**
1382 * devm_devfreq_unregister_opp_notifier()
1383 * - Resource-managed devfreq_unregister_opp_notifier()
1384 * @dev: The devfreq user device. (parent of devfreq)
1385 * @devfreq: The devfreq object.
1386 */
1387void devm_devfreq_unregister_opp_notifier(struct device *dev,
1388 struct devfreq *devfreq)
1389{
1390 WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1391 devm_devfreq_dev_match, devfreq));
1392}
1393EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1394
0fe3a664
CC
1395/**
1396 * devfreq_register_notifier() - Register a driver with devfreq
1397 * @devfreq: The devfreq object.
1398 * @nb: The notifier block to register.
1399 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1400 */
1401int devfreq_register_notifier(struct devfreq *devfreq,
1402 struct notifier_block *nb,
1403 unsigned int list)
1404{
1405 int ret = 0;
1406
1407 if (!devfreq)
1408 return -EINVAL;
1409
1410 switch (list) {
1411 case DEVFREQ_TRANSITION_NOTIFIER:
1412 ret = srcu_notifier_chain_register(
1413 &devfreq->transition_notifier_list, nb);
1414 break;
1415 default:
1416 ret = -EINVAL;
1417 }
1418
1419 return ret;
1420}
1421EXPORT_SYMBOL(devfreq_register_notifier);
1422
1423/*
1424 * devfreq_unregister_notifier() - Unregister a driver with devfreq
1425 * @devfreq: The devfreq object.
1426 * @nb: The notifier block to be unregistered.
1427 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1428 */
1429int devfreq_unregister_notifier(struct devfreq *devfreq,
1430 struct notifier_block *nb,
1431 unsigned int list)
1432{
1433 int ret = 0;
1434
1435 if (!devfreq)
1436 return -EINVAL;
1437
1438 switch (list) {
1439 case DEVFREQ_TRANSITION_NOTIFIER:
1440 ret = srcu_notifier_chain_unregister(
1441 &devfreq->transition_notifier_list, nb);
1442 break;
1443 default:
1444 ret = -EINVAL;
1445 }
1446
1447 return ret;
1448}
1449EXPORT_SYMBOL(devfreq_unregister_notifier);
1450
1451struct devfreq_notifier_devres {
1452 struct devfreq *devfreq;
1453 struct notifier_block *nb;
1454 unsigned int list;
1455};
1456
1457static void devm_devfreq_notifier_release(struct device *dev, void *res)
1458{
1459 struct devfreq_notifier_devres *this = res;
1460
1461 devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1462}
1463
1464/**
1465 * devm_devfreq_register_notifier()
1466 - Resource-managed devfreq_register_notifier()
1467 * @dev: The devfreq user device. (parent of devfreq)
1468 * @devfreq: The devfreq object.
1469 * @nb: The notifier block to be unregistered.
1470 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1471 */
1472int devm_devfreq_register_notifier(struct device *dev,
1473 struct devfreq *devfreq,
1474 struct notifier_block *nb,
1475 unsigned int list)
1476{
1477 struct devfreq_notifier_devres *ptr;
1478 int ret;
1479
1480 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1481 GFP_KERNEL);
1482 if (!ptr)
1483 return -ENOMEM;
1484
1485 ret = devfreq_register_notifier(devfreq, nb, list);
1486 if (ret) {
1487 devres_free(ptr);
1488 return ret;
1489 }
1490
1491 ptr->devfreq = devfreq;
1492 ptr->nb = nb;
1493 ptr->list = list;
1494 devres_add(dev, ptr);
1495
1496 return 0;
1497}
1498EXPORT_SYMBOL(devm_devfreq_register_notifier);
1499
1500/**
1501 * devm_devfreq_unregister_notifier()
1502 - Resource-managed devfreq_unregister_notifier()
1503 * @dev: The devfreq user device. (parent of devfreq)
1504 * @devfreq: The devfreq object.
1505 * @nb: The notifier block to be unregistered.
1506 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1507 */
1508void devm_devfreq_unregister_notifier(struct device *dev,
1509 struct devfreq *devfreq,
1510 struct notifier_block *nb,
1511 unsigned int list)
1512{
1513 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1514 devm_devfreq_dev_match, devfreq));
1515}
1516EXPORT_SYMBOL(devm_devfreq_unregister_notifier);