]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/devfreq/devfreq.c
PM / devfreq: export update_devfreq
[mirror_ubuntu-artful-kernel.git] / drivers / devfreq / devfreq.c
CommitLineData
a3c98b8b
MH
1/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
952f6d13 18#include <linux/module.h>
a3c98b8b 19#include <linux/slab.h>
952f6d13 20#include <linux/stat.h>
a3c98b8b
MH
21#include <linux/opp.h>
22#include <linux/devfreq.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/list.h>
26#include <linux/printk.h>
27#include <linux/hrtimer.h>
28#include "governor.h"
29
1a1357ea 30static struct class *devfreq_class;
a3c98b8b
MH
31
32/*
7e6fdd4b
RV
33 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
a3c98b8b 36 */
a3c98b8b 37static struct workqueue_struct *devfreq_wq;
a3c98b8b
MH
38
39/* The list of all device-devfreq */
40static LIST_HEAD(devfreq_list);
41static DEFINE_MUTEX(devfreq_list_lock);
42
43/**
44 * find_device_devfreq() - find devfreq struct using device pointer
45 * @dev: device pointer used to lookup device devfreq.
46 *
47 * Search the list of device devfreqs and return the matched device's
48 * devfreq info. devfreq_list_lock should be held by the caller.
49 */
50static struct devfreq *find_device_devfreq(struct device *dev)
51{
52 struct devfreq *tmp_devfreq;
53
54 if (unlikely(IS_ERR_OR_NULL(dev))) {
55 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
56 return ERR_PTR(-EINVAL);
57 }
58 WARN(!mutex_is_locked(&devfreq_list_lock),
59 "devfreq_list_lock must be locked.");
60
61 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
62 if (tmp_devfreq->dev.parent == dev)
63 return tmp_devfreq;
64 }
65
66 return ERR_PTR(-ENODEV);
67}
68
e552bbaf
JL
69/**
70 * devfreq_get_freq_level() - Lookup freq_table for the frequency
71 * @devfreq: the devfreq instance
72 * @freq: the target frequency
73 */
74static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
75{
76 int lev;
77
78 for (lev = 0; lev < devfreq->profile->max_state; lev++)
79 if (freq == devfreq->profile->freq_table[lev])
80 return lev;
81
82 return -EINVAL;
83}
84
85/**
86 * devfreq_update_status() - Update statistics of devfreq behavior
87 * @devfreq: the devfreq instance
88 * @freq: the update target frequency
89 */
90static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
91{
92 int lev, prev_lev;
93 unsigned long cur_time;
94
95 lev = devfreq_get_freq_level(devfreq, freq);
96 if (lev < 0)
97 return lev;
98
99 cur_time = jiffies;
100 devfreq->time_in_state[lev] +=
101 cur_time - devfreq->last_stat_updated;
102 if (freq != devfreq->previous_freq) {
103 prev_lev = devfreq_get_freq_level(devfreq,
104 devfreq->previous_freq);
105 devfreq->trans_table[(prev_lev *
106 devfreq->profile->max_state) + lev]++;
107 devfreq->total_trans++;
108 }
109 devfreq->last_stat_updated = cur_time;
110
111 return 0;
112}
113
7e6fdd4b
RV
114/* Load monitoring helper functions for governors use */
115
a3c98b8b
MH
116/**
117 * update_devfreq() - Reevaluate the device and configure frequency.
118 * @devfreq: the devfreq instance.
119 *
120 * Note: Lock devfreq->lock before calling update_devfreq
121 * This function is exported for governors.
122 */
123int update_devfreq(struct devfreq *devfreq)
124{
125 unsigned long freq;
126 int err = 0;
ab5f299f 127 u32 flags = 0;
a3c98b8b
MH
128
129 if (!mutex_is_locked(&devfreq->lock)) {
130 WARN(true, "devfreq->lock must be locked by the caller.\n");
131 return -EINVAL;
132 }
133
134 /* Reevaluate the proper frequency */
135 err = devfreq->governor->get_target_freq(devfreq, &freq);
136 if (err)
137 return err;
138
ab5f299f
MH
139 /*
140 * Adjust the freuqency with user freq and QoS.
141 *
142 * List from the highest proiority
143 * max_freq (probably called by thermal when it's too hot)
144 * min_freq
145 */
146
147 if (devfreq->min_freq && freq < devfreq->min_freq) {
148 freq = devfreq->min_freq;
149 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
150 }
151 if (devfreq->max_freq && freq > devfreq->max_freq) {
152 freq = devfreq->max_freq;
153 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
154 }
155
156 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
a3c98b8b
MH
157 if (err)
158 return err;
159
e552bbaf
JL
160 if (devfreq->profile->freq_table)
161 if (devfreq_update_status(devfreq, freq))
162 dev_err(&devfreq->dev,
163 "Couldn't update frequency transition information.\n");
164
a3c98b8b
MH
165 devfreq->previous_freq = freq;
166 return err;
167}
2df5021f 168EXPORT_SYMBOL(update_devfreq);
a3c98b8b 169
7e6fdd4b
RV
170/**
171 * devfreq_monitor() - Periodically poll devfreq objects.
172 * @work: the work struct used to run devfreq_monitor periodically.
173 *
174 */
175static void devfreq_monitor(struct work_struct *work)
176{
177 int err;
178 struct devfreq *devfreq = container_of(work,
179 struct devfreq, work.work);
180
181 mutex_lock(&devfreq->lock);
182 err = update_devfreq(devfreq);
183 if (err)
184 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
185
186 queue_delayed_work(devfreq_wq, &devfreq->work,
187 msecs_to_jiffies(devfreq->profile->polling_ms));
188 mutex_unlock(&devfreq->lock);
189}
190
191/**
192 * devfreq_monitor_start() - Start load monitoring of devfreq instance
193 * @devfreq: the devfreq instance.
194 *
195 * Helper function for starting devfreq device load monitoing. By
196 * default delayed work based monitoring is supported. Function
197 * to be called from governor in response to DEVFREQ_GOV_START
198 * event when device is added to devfreq framework.
199 */
200void devfreq_monitor_start(struct devfreq *devfreq)
201{
202 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
203 if (devfreq->profile->polling_ms)
204 queue_delayed_work(devfreq_wq, &devfreq->work,
205 msecs_to_jiffies(devfreq->profile->polling_ms));
206}
207
208/**
209 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
210 * @devfreq: the devfreq instance.
211 *
212 * Helper function to stop devfreq device load monitoing. Function
213 * to be called from governor in response to DEVFREQ_GOV_STOP
214 * event when device is removed from devfreq framework.
215 */
216void devfreq_monitor_stop(struct devfreq *devfreq)
217{
218 cancel_delayed_work_sync(&devfreq->work);
219}
220
221/**
222 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
223 * @devfreq: the devfreq instance.
224 *
225 * Helper function to suspend devfreq device load monitoing. Function
226 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
227 * event or when polling interval is set to zero.
228 *
229 * Note: Though this function is same as devfreq_monitor_stop(),
230 * intentionally kept separate to provide hooks for collecting
231 * transition statistics.
232 */
233void devfreq_monitor_suspend(struct devfreq *devfreq)
234{
235 mutex_lock(&devfreq->lock);
236 if (devfreq->stop_polling) {
237 mutex_unlock(&devfreq->lock);
238 return;
239 }
240
241 devfreq->stop_polling = true;
242 mutex_unlock(&devfreq->lock);
243 cancel_delayed_work_sync(&devfreq->work);
244}
245
246/**
247 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
248 * @devfreq: the devfreq instance.
249 *
250 * Helper function to resume devfreq device load monitoing. Function
251 * to be called from governor in response to DEVFREQ_GOV_RESUME
252 * event or when polling interval is set to non-zero.
253 */
254void devfreq_monitor_resume(struct devfreq *devfreq)
255{
256 mutex_lock(&devfreq->lock);
257 if (!devfreq->stop_polling)
258 goto out;
259
260 if (!delayed_work_pending(&devfreq->work) &&
261 devfreq->profile->polling_ms)
262 queue_delayed_work(devfreq_wq, &devfreq->work,
263 msecs_to_jiffies(devfreq->profile->polling_ms));
264 devfreq->stop_polling = false;
265
266out:
267 mutex_unlock(&devfreq->lock);
268}
269
270/**
271 * devfreq_interval_update() - Update device devfreq monitoring interval
272 * @devfreq: the devfreq instance.
273 * @delay: new polling interval to be set.
274 *
275 * Helper function to set new load monitoring polling interval. Function
276 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
277 */
278void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
279{
280 unsigned int cur_delay = devfreq->profile->polling_ms;
281 unsigned int new_delay = *delay;
282
283 mutex_lock(&devfreq->lock);
284 devfreq->profile->polling_ms = new_delay;
285
286 if (devfreq->stop_polling)
287 goto out;
288
289 /* if new delay is zero, stop polling */
290 if (!new_delay) {
291 mutex_unlock(&devfreq->lock);
292 cancel_delayed_work_sync(&devfreq->work);
293 return;
294 }
295
296 /* if current delay is zero, start polling with new delay */
297 if (!cur_delay) {
298 queue_delayed_work(devfreq_wq, &devfreq->work,
299 msecs_to_jiffies(devfreq->profile->polling_ms));
300 goto out;
301 }
302
303 /* if current delay is greater than new delay, restart polling */
304 if (cur_delay > new_delay) {
305 mutex_unlock(&devfreq->lock);
306 cancel_delayed_work_sync(&devfreq->work);
307 mutex_lock(&devfreq->lock);
308 if (!devfreq->stop_polling)
309 queue_delayed_work(devfreq_wq, &devfreq->work,
310 msecs_to_jiffies(devfreq->profile->polling_ms));
311 }
312out:
313 mutex_unlock(&devfreq->lock);
314}
315
a3c98b8b
MH
316/**
317 * devfreq_notifier_call() - Notify that the device frequency requirements
318 * has been changed out of devfreq framework.
c5b4a1c1
NM
319 * @nb: the notifier_block (supposed to be devfreq->nb)
320 * @type: not used
321 * @devp: not used
a3c98b8b
MH
322 *
323 * Called by a notifier that uses devfreq->nb.
324 */
325static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
326 void *devp)
327{
328 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
329 int ret;
330
331 mutex_lock(&devfreq->lock);
332 ret = update_devfreq(devfreq);
333 mutex_unlock(&devfreq->lock);
334
335 return ret;
336}
337
338/**
7e6fdd4b 339 * _remove_devfreq() - Remove devfreq from the list and release its resources.
a3c98b8b
MH
340 * @devfreq: the devfreq struct
341 * @skip: skip calling device_unregister().
a3c98b8b
MH
342 */
343static void _remove_devfreq(struct devfreq *devfreq, bool skip)
344{
7e6fdd4b
RV
345 mutex_lock(&devfreq_list_lock);
346 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
347 mutex_unlock(&devfreq_list_lock);
348 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
a3c98b8b
MH
349 return;
350 }
7e6fdd4b
RV
351 list_del(&devfreq->node);
352 mutex_unlock(&devfreq_list_lock);
a3c98b8b 353
7e6fdd4b 354 devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
a3c98b8b
MH
355
356 if (devfreq->profile->exit)
357 devfreq->profile->exit(devfreq->dev.parent);
358
a3c98b8b
MH
359 if (!skip && get_device(&devfreq->dev)) {
360 device_unregister(&devfreq->dev);
361 put_device(&devfreq->dev);
362 }
363
a3c98b8b 364 mutex_destroy(&devfreq->lock);
a3c98b8b
MH
365 kfree(devfreq);
366}
367
368/**
369 * devfreq_dev_release() - Callback for struct device to release the device.
370 * @dev: the devfreq device
371 *
372 * This calls _remove_devfreq() if _remove_devfreq() is not called.
373 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
374 * well as by others unregistering the device.
375 */
376static void devfreq_dev_release(struct device *dev)
377{
378 struct devfreq *devfreq = to_devfreq(dev);
a3c98b8b 379
a3c98b8b 380 _remove_devfreq(devfreq, true);
a3c98b8b
MH
381}
382
383/**
384 * devfreq_add_device() - Add devfreq feature to the device
385 * @dev: the device to add devfreq feature.
386 * @profile: device-specific profile to run devfreq.
387 * @governor: the policy to choose frequency.
388 * @data: private data for the governor. The devfreq framework does not
389 * touch this value.
390 */
391struct devfreq *devfreq_add_device(struct device *dev,
392 struct devfreq_dev_profile *profile,
393 const struct devfreq_governor *governor,
394 void *data)
395{
396 struct devfreq *devfreq;
397 int err = 0;
398
399 if (!dev || !profile || !governor) {
400 dev_err(dev, "%s: Invalid parameters.\n", __func__);
401 return ERR_PTR(-EINVAL);
402 }
403
7e6fdd4b
RV
404 mutex_lock(&devfreq_list_lock);
405 devfreq = find_device_devfreq(dev);
406 mutex_unlock(&devfreq_list_lock);
407 if (!IS_ERR(devfreq)) {
408 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
409 err = -EINVAL;
410 goto err_out;
a3c98b8b
MH
411 }
412
413 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
414 if (!devfreq) {
415 dev_err(dev, "%s: Unable to create devfreq for the device\n",
416 __func__);
417 err = -ENOMEM;
3f19f08a 418 goto err_out;
a3c98b8b
MH
419 }
420
421 mutex_init(&devfreq->lock);
422 mutex_lock(&devfreq->lock);
423 devfreq->dev.parent = dev;
424 devfreq->dev.class = devfreq_class;
425 devfreq->dev.release = devfreq_dev_release;
426 devfreq->profile = profile;
427 devfreq->governor = governor;
428 devfreq->previous_freq = profile->initial_freq;
429 devfreq->data = data;
a3c98b8b
MH
430 devfreq->nb.notifier_call = devfreq_notifier_call;
431
e552bbaf
JL
432 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
433 devfreq->profile->max_state *
434 devfreq->profile->max_state,
435 GFP_KERNEL);
436 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
437 devfreq->profile->max_state,
438 GFP_KERNEL);
439 devfreq->last_stat_updated = jiffies;
440
a3c98b8b
MH
441 dev_set_name(&devfreq->dev, dev_name(dev));
442 err = device_register(&devfreq->dev);
443 if (err) {
444 put_device(&devfreq->dev);
7e6fdd4b 445 mutex_unlock(&devfreq->lock);
a3c98b8b
MH
446 goto err_dev;
447 }
448
a3c98b8b
MH
449 mutex_unlock(&devfreq->lock);
450
a3c98b8b 451 mutex_lock(&devfreq_list_lock);
a3c98b8b 452 list_add(&devfreq->node, &devfreq_list);
7e6fdd4b 453 mutex_unlock(&devfreq_list_lock);
a3c98b8b 454
7e6fdd4b
RV
455 err = devfreq->governor->event_handler(devfreq,
456 DEVFREQ_GOV_START, NULL);
457 if (err) {
458 dev_err(dev, "%s: Unable to start governor for the device\n",
459 __func__);
460 goto err_init;
a3c98b8b 461 }
7e6fdd4b 462
3f19f08a
AL
463 return devfreq;
464
a3c98b8b 465err_init:
7e6fdd4b 466 list_del(&devfreq->node);
a3c98b8b
MH
467 device_unregister(&devfreq->dev);
468err_dev:
a3c98b8b 469 kfree(devfreq);
3f19f08a
AL
470err_out:
471 return ERR_PTR(err);
a3c98b8b 472}
7e6fdd4b 473EXPORT_SYMBOL(devfreq_add_device);
a3c98b8b
MH
474
475/**
476 * devfreq_remove_device() - Remove devfreq feature from a device.
c5b4a1c1 477 * @devfreq: the devfreq instance to be removed
a3c98b8b
MH
478 */
479int devfreq_remove_device(struct devfreq *devfreq)
480{
481 if (!devfreq)
482 return -EINVAL;
483
7e6fdd4b 484 _remove_devfreq(devfreq, false);
a3c98b8b
MH
485
486 return 0;
487}
7e6fdd4b 488EXPORT_SYMBOL(devfreq_remove_device);
a3c98b8b 489
206c30cf
RV
490/**
491 * devfreq_suspend_device() - Suspend devfreq of a device.
492 * @devfreq: the devfreq instance to be suspended
493 */
494int devfreq_suspend_device(struct devfreq *devfreq)
495{
496 if (!devfreq)
497 return -EINVAL;
498
499 return devfreq->governor->event_handler(devfreq,
500 DEVFREQ_GOV_SUSPEND, NULL);
501}
502EXPORT_SYMBOL(devfreq_suspend_device);
503
504/**
505 * devfreq_resume_device() - Resume devfreq of a device.
506 * @devfreq: the devfreq instance to be resumed
507 */
508int devfreq_resume_device(struct devfreq *devfreq)
509{
510 if (!devfreq)
511 return -EINVAL;
512
513 return devfreq->governor->event_handler(devfreq,
514 DEVFREQ_GOV_RESUME, NULL);
515}
516EXPORT_SYMBOL(devfreq_resume_device);
517
9005b650
MH
518static ssize_t show_governor(struct device *dev,
519 struct device_attribute *attr, char *buf)
520{
521 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
522}
523
524static ssize_t show_freq(struct device *dev,
525 struct device_attribute *attr, char *buf)
7f98a905
RV
526{
527 unsigned long freq;
528 struct devfreq *devfreq = to_devfreq(dev);
529
530 if (devfreq->profile->get_cur_freq &&
531 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
532 return sprintf(buf, "%lu\n", freq);
533
534 return sprintf(buf, "%lu\n", devfreq->previous_freq);
535}
536
537static ssize_t show_target_freq(struct device *dev,
538 struct device_attribute *attr, char *buf)
9005b650
MH
539{
540 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
541}
542
543static ssize_t show_polling_interval(struct device *dev,
544 struct device_attribute *attr, char *buf)
545{
546 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
547}
548
549static ssize_t store_polling_interval(struct device *dev,
550 struct device_attribute *attr,
551 const char *buf, size_t count)
552{
553 struct devfreq *df = to_devfreq(dev);
554 unsigned int value;
555 int ret;
556
557 ret = sscanf(buf, "%u", &value);
558 if (ret != 1)
12e26265 559 return -EINVAL;
9005b650 560
7e6fdd4b 561 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
9005b650
MH
562 ret = count;
563
9005b650
MH
564 return ret;
565}
566
6530b9de
MH
567static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
568 const char *buf, size_t count)
569{
570 struct devfreq *df = to_devfreq(dev);
571 unsigned long value;
572 int ret;
573 unsigned long max;
574
575 ret = sscanf(buf, "%lu", &value);
576 if (ret != 1)
12e26265 577 return -EINVAL;
6530b9de
MH
578
579 mutex_lock(&df->lock);
580 max = df->max_freq;
581 if (value && max && value > max) {
582 ret = -EINVAL;
583 goto unlock;
584 }
585
586 df->min_freq = value;
587 update_devfreq(df);
588 ret = count;
589unlock:
590 mutex_unlock(&df->lock);
6530b9de
MH
591 return ret;
592}
593
594static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
595 char *buf)
596{
597 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
598}
599
600static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
601 const char *buf, size_t count)
602{
603 struct devfreq *df = to_devfreq(dev);
604 unsigned long value;
605 int ret;
606 unsigned long min;
607
608 ret = sscanf(buf, "%lu", &value);
609 if (ret != 1)
12e26265 610 return -EINVAL;
6530b9de
MH
611
612 mutex_lock(&df->lock);
613 min = df->min_freq;
614 if (value && min && value < min) {
615 ret = -EINVAL;
616 goto unlock;
617 }
618
619 df->max_freq = value;
620 update_devfreq(df);
621 ret = count;
622unlock:
623 mutex_unlock(&df->lock);
6530b9de
MH
624 return ret;
625}
626
627static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
628 char *buf)
629{
630 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
631}
632
d287de85
NM
633static ssize_t show_available_freqs(struct device *d,
634 struct device_attribute *attr,
635 char *buf)
636{
637 struct devfreq *df = to_devfreq(d);
638 struct device *dev = df->dev.parent;
639 struct opp *opp;
640 ssize_t count = 0;
641 unsigned long freq = 0;
642
643 rcu_read_lock();
644 do {
645 opp = opp_find_freq_ceil(dev, &freq);
646 if (IS_ERR(opp))
647 break;
648
649 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
650 "%lu ", freq);
651 freq++;
652 } while (1);
653 rcu_read_unlock();
654
655 /* Truncate the trailing space */
656 if (count)
657 count--;
658
659 count += sprintf(&buf[count], "\n");
660
661 return count;
662}
663
e552bbaf
JL
664static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
665 char *buf)
666{
667 struct devfreq *devfreq = to_devfreq(dev);
668 ssize_t len;
669 int i, j, err;
670 unsigned int max_state = devfreq->profile->max_state;
671
672 err = devfreq_update_status(devfreq, devfreq->previous_freq);
673 if (err)
674 return 0;
675
676 len = sprintf(buf, " From : To\n");
677 len += sprintf(buf + len, " :");
678 for (i = 0; i < max_state; i++)
679 len += sprintf(buf + len, "%8u",
680 devfreq->profile->freq_table[i]);
681
682 len += sprintf(buf + len, " time(ms)\n");
683
684 for (i = 0; i < max_state; i++) {
685 if (devfreq->profile->freq_table[i]
686 == devfreq->previous_freq) {
687 len += sprintf(buf + len, "*");
688 } else {
689 len += sprintf(buf + len, " ");
690 }
691 len += sprintf(buf + len, "%8u:",
692 devfreq->profile->freq_table[i]);
693 for (j = 0; j < max_state; j++)
694 len += sprintf(buf + len, "%8u",
695 devfreq->trans_table[(i * max_state) + j]);
696 len += sprintf(buf + len, "%10u\n",
697 jiffies_to_msecs(devfreq->time_in_state[i]));
698 }
699
700 len += sprintf(buf + len, "Total transition : %u\n",
701 devfreq->total_trans);
702 return len;
703}
704
9005b650
MH
705static struct device_attribute devfreq_attrs[] = {
706 __ATTR(governor, S_IRUGO, show_governor, NULL),
707 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
d287de85 708 __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
7f98a905 709 __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
9005b650
MH
710 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
711 store_polling_interval),
6530b9de
MH
712 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
713 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
e552bbaf 714 __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
9005b650
MH
715 { },
716};
717
a3c98b8b
MH
718static int __init devfreq_init(void)
719{
720 devfreq_class = class_create(THIS_MODULE, "devfreq");
721 if (IS_ERR(devfreq_class)) {
722 pr_err("%s: couldn't create class\n", __FILE__);
723 return PTR_ERR(devfreq_class);
724 }
7e6fdd4b
RV
725
726 devfreq_wq = create_freezable_workqueue("devfreq_wq");
727 if (IS_ERR(devfreq_wq)) {
728 class_destroy(devfreq_class);
729 pr_err("%s: couldn't create workqueue\n", __FILE__);
730 return PTR_ERR(devfreq_wq);
731 }
9005b650 732 devfreq_class->dev_attrs = devfreq_attrs;
7e6fdd4b 733
a3c98b8b
MH
734 return 0;
735}
736subsys_initcall(devfreq_init);
737
738static void __exit devfreq_exit(void)
739{
740 class_destroy(devfreq_class);
7e6fdd4b 741 destroy_workqueue(devfreq_wq);
a3c98b8b
MH
742}
743module_exit(devfreq_exit);
744
745/*
746 * The followings are helper functions for devfreq user device drivers with
747 * OPP framework.
748 */
749
750/**
751 * devfreq_recommended_opp() - Helper function to get proper OPP for the
752 * freq value given to target callback.
c5b4a1c1
NM
753 * @dev: The devfreq user device. (parent of devfreq)
754 * @freq: The frequency given to target function
755 * @flags: Flags handed from devfreq framework.
a3c98b8b
MH
756 *
757 */
ab5f299f
MH
758struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
759 u32 flags)
a3c98b8b 760{
ab5f299f 761 struct opp *opp;
a3c98b8b 762
ab5f299f
MH
763 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
764 /* The freq is an upper bound. opp should be lower */
a3c98b8b 765 opp = opp_find_freq_floor(dev, freq);
ab5f299f
MH
766
767 /* If not available, use the closest opp */
768 if (opp == ERR_PTR(-ENODEV))
769 opp = opp_find_freq_ceil(dev, freq);
770 } else {
771 /* The freq is an lower bound. opp should be higher */
772 opp = opp_find_freq_ceil(dev, freq);
773
774 /* If not available, use the closest opp */
775 if (opp == ERR_PTR(-ENODEV))
776 opp = opp_find_freq_floor(dev, freq);
777 }
778
a3c98b8b
MH
779 return opp;
780}
781
782/**
783 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
784 * for any changes in the OPP availability
785 * changes
c5b4a1c1
NM
786 * @dev: The devfreq user device. (parent of devfreq)
787 * @devfreq: The devfreq object.
a3c98b8b
MH
788 */
789int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
790{
791 struct srcu_notifier_head *nh = opp_get_notifier(dev);
792
793 if (IS_ERR(nh))
794 return PTR_ERR(nh);
795 return srcu_notifier_chain_register(nh, &devfreq->nb);
796}
797
798/**
799 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
800 * notified for any changes in the OPP
801 * availability changes anymore.
c5b4a1c1
NM
802 * @dev: The devfreq user device. (parent of devfreq)
803 * @devfreq: The devfreq object.
a3c98b8b
MH
804 *
805 * At exit() callback of devfreq_dev_profile, this must be included if
806 * devfreq_recommended_opp is used.
807 */
808int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
809{
810 struct srcu_notifier_head *nh = opp_get_notifier(dev);
811
812 if (IS_ERR(nh))
813 return PTR_ERR(nh);
814 return srcu_notifier_chain_unregister(nh, &devfreq->nb);
815}
816
817MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
818MODULE_DESCRIPTION("devfreq class support");
819MODULE_LICENSE("GPL");