]>
Commit | Line | Data |
---|---|---|
a3c98b8b MH |
1 | /* |
2 | * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework | |
3 | * for Non-CPU Devices. | |
4 | * | |
5 | * Copyright (C) 2011 Samsung Electronics | |
6 | * MyungJoo Ham <myungjoo.ham@samsung.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/init.h> | |
952f6d13 | 18 | #include <linux/module.h> |
a3c98b8b | 19 | #include <linux/slab.h> |
952f6d13 | 20 | #include <linux/stat.h> |
a3c98b8b MH |
21 | #include <linux/opp.h> |
22 | #include <linux/devfreq.h> | |
23 | #include <linux/workqueue.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/printk.h> | |
27 | #include <linux/hrtimer.h> | |
28 | #include "governor.h" | |
29 | ||
30 | struct class *devfreq_class; | |
31 | ||
32 | /* | |
7e6fdd4b RV |
33 | * devfreq core provides delayed work based load monitoring helper |
34 | * functions. Governors can use these or can implement their own | |
35 | * monitoring mechanism. | |
a3c98b8b | 36 | */ |
a3c98b8b | 37 | static struct workqueue_struct *devfreq_wq; |
a3c98b8b MH |
38 | |
39 | /* The list of all device-devfreq */ | |
40 | static LIST_HEAD(devfreq_list); | |
41 | static DEFINE_MUTEX(devfreq_list_lock); | |
42 | ||
43 | /** | |
44 | * find_device_devfreq() - find devfreq struct using device pointer | |
45 | * @dev: device pointer used to lookup device devfreq. | |
46 | * | |
47 | * Search the list of device devfreqs and return the matched device's | |
48 | * devfreq info. devfreq_list_lock should be held by the caller. | |
49 | */ | |
50 | static struct devfreq *find_device_devfreq(struct device *dev) | |
51 | { | |
52 | struct devfreq *tmp_devfreq; | |
53 | ||
54 | if (unlikely(IS_ERR_OR_NULL(dev))) { | |
55 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); | |
56 | return ERR_PTR(-EINVAL); | |
57 | } | |
58 | WARN(!mutex_is_locked(&devfreq_list_lock), | |
59 | "devfreq_list_lock must be locked."); | |
60 | ||
61 | list_for_each_entry(tmp_devfreq, &devfreq_list, node) { | |
62 | if (tmp_devfreq->dev.parent == dev) | |
63 | return tmp_devfreq; | |
64 | } | |
65 | ||
66 | return ERR_PTR(-ENODEV); | |
67 | } | |
68 | ||
7e6fdd4b RV |
69 | /* Load monitoring helper functions for governors use */ |
70 | ||
a3c98b8b MH |
71 | /** |
72 | * update_devfreq() - Reevaluate the device and configure frequency. | |
73 | * @devfreq: the devfreq instance. | |
74 | * | |
75 | * Note: Lock devfreq->lock before calling update_devfreq | |
76 | * This function is exported for governors. | |
77 | */ | |
78 | int update_devfreq(struct devfreq *devfreq) | |
79 | { | |
80 | unsigned long freq; | |
81 | int err = 0; | |
ab5f299f | 82 | u32 flags = 0; |
a3c98b8b MH |
83 | |
84 | if (!mutex_is_locked(&devfreq->lock)) { | |
85 | WARN(true, "devfreq->lock must be locked by the caller.\n"); | |
86 | return -EINVAL; | |
87 | } | |
88 | ||
89 | /* Reevaluate the proper frequency */ | |
90 | err = devfreq->governor->get_target_freq(devfreq, &freq); | |
91 | if (err) | |
92 | return err; | |
93 | ||
ab5f299f MH |
94 | /* |
95 | * Adjust the freuqency with user freq and QoS. | |
96 | * | |
97 | * List from the highest proiority | |
98 | * max_freq (probably called by thermal when it's too hot) | |
99 | * min_freq | |
100 | */ | |
101 | ||
102 | if (devfreq->min_freq && freq < devfreq->min_freq) { | |
103 | freq = devfreq->min_freq; | |
104 | flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ | |
105 | } | |
106 | if (devfreq->max_freq && freq > devfreq->max_freq) { | |
107 | freq = devfreq->max_freq; | |
108 | flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ | |
109 | } | |
110 | ||
111 | err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); | |
a3c98b8b MH |
112 | if (err) |
113 | return err; | |
114 | ||
115 | devfreq->previous_freq = freq; | |
116 | return err; | |
117 | } | |
118 | ||
7e6fdd4b RV |
119 | /** |
120 | * devfreq_monitor() - Periodically poll devfreq objects. | |
121 | * @work: the work struct used to run devfreq_monitor periodically. | |
122 | * | |
123 | */ | |
124 | static void devfreq_monitor(struct work_struct *work) | |
125 | { | |
126 | int err; | |
127 | struct devfreq *devfreq = container_of(work, | |
128 | struct devfreq, work.work); | |
129 | ||
130 | mutex_lock(&devfreq->lock); | |
131 | err = update_devfreq(devfreq); | |
132 | if (err) | |
133 | dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); | |
134 | ||
135 | queue_delayed_work(devfreq_wq, &devfreq->work, | |
136 | msecs_to_jiffies(devfreq->profile->polling_ms)); | |
137 | mutex_unlock(&devfreq->lock); | |
138 | } | |
139 | ||
140 | /** | |
141 | * devfreq_monitor_start() - Start load monitoring of devfreq instance | |
142 | * @devfreq: the devfreq instance. | |
143 | * | |
144 | * Helper function for starting devfreq device load monitoing. By | |
145 | * default delayed work based monitoring is supported. Function | |
146 | * to be called from governor in response to DEVFREQ_GOV_START | |
147 | * event when device is added to devfreq framework. | |
148 | */ | |
149 | void devfreq_monitor_start(struct devfreq *devfreq) | |
150 | { | |
151 | INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); | |
152 | if (devfreq->profile->polling_ms) | |
153 | queue_delayed_work(devfreq_wq, &devfreq->work, | |
154 | msecs_to_jiffies(devfreq->profile->polling_ms)); | |
155 | } | |
156 | ||
157 | /** | |
158 | * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance | |
159 | * @devfreq: the devfreq instance. | |
160 | * | |
161 | * Helper function to stop devfreq device load monitoing. Function | |
162 | * to be called from governor in response to DEVFREQ_GOV_STOP | |
163 | * event when device is removed from devfreq framework. | |
164 | */ | |
165 | void devfreq_monitor_stop(struct devfreq *devfreq) | |
166 | { | |
167 | cancel_delayed_work_sync(&devfreq->work); | |
168 | } | |
169 | ||
170 | /** | |
171 | * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance | |
172 | * @devfreq: the devfreq instance. | |
173 | * | |
174 | * Helper function to suspend devfreq device load monitoing. Function | |
175 | * to be called from governor in response to DEVFREQ_GOV_SUSPEND | |
176 | * event or when polling interval is set to zero. | |
177 | * | |
178 | * Note: Though this function is same as devfreq_monitor_stop(), | |
179 | * intentionally kept separate to provide hooks for collecting | |
180 | * transition statistics. | |
181 | */ | |
182 | void devfreq_monitor_suspend(struct devfreq *devfreq) | |
183 | { | |
184 | mutex_lock(&devfreq->lock); | |
185 | if (devfreq->stop_polling) { | |
186 | mutex_unlock(&devfreq->lock); | |
187 | return; | |
188 | } | |
189 | ||
190 | devfreq->stop_polling = true; | |
191 | mutex_unlock(&devfreq->lock); | |
192 | cancel_delayed_work_sync(&devfreq->work); | |
193 | } | |
194 | ||
195 | /** | |
196 | * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance | |
197 | * @devfreq: the devfreq instance. | |
198 | * | |
199 | * Helper function to resume devfreq device load monitoing. Function | |
200 | * to be called from governor in response to DEVFREQ_GOV_RESUME | |
201 | * event or when polling interval is set to non-zero. | |
202 | */ | |
203 | void devfreq_monitor_resume(struct devfreq *devfreq) | |
204 | { | |
205 | mutex_lock(&devfreq->lock); | |
206 | if (!devfreq->stop_polling) | |
207 | goto out; | |
208 | ||
209 | if (!delayed_work_pending(&devfreq->work) && | |
210 | devfreq->profile->polling_ms) | |
211 | queue_delayed_work(devfreq_wq, &devfreq->work, | |
212 | msecs_to_jiffies(devfreq->profile->polling_ms)); | |
213 | devfreq->stop_polling = false; | |
214 | ||
215 | out: | |
216 | mutex_unlock(&devfreq->lock); | |
217 | } | |
218 | ||
219 | /** | |
220 | * devfreq_interval_update() - Update device devfreq monitoring interval | |
221 | * @devfreq: the devfreq instance. | |
222 | * @delay: new polling interval to be set. | |
223 | * | |
224 | * Helper function to set new load monitoring polling interval. Function | |
225 | * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. | |
226 | */ | |
227 | void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) | |
228 | { | |
229 | unsigned int cur_delay = devfreq->profile->polling_ms; | |
230 | unsigned int new_delay = *delay; | |
231 | ||
232 | mutex_lock(&devfreq->lock); | |
233 | devfreq->profile->polling_ms = new_delay; | |
234 | ||
235 | if (devfreq->stop_polling) | |
236 | goto out; | |
237 | ||
238 | /* if new delay is zero, stop polling */ | |
239 | if (!new_delay) { | |
240 | mutex_unlock(&devfreq->lock); | |
241 | cancel_delayed_work_sync(&devfreq->work); | |
242 | return; | |
243 | } | |
244 | ||
245 | /* if current delay is zero, start polling with new delay */ | |
246 | if (!cur_delay) { | |
247 | queue_delayed_work(devfreq_wq, &devfreq->work, | |
248 | msecs_to_jiffies(devfreq->profile->polling_ms)); | |
249 | goto out; | |
250 | } | |
251 | ||
252 | /* if current delay is greater than new delay, restart polling */ | |
253 | if (cur_delay > new_delay) { | |
254 | mutex_unlock(&devfreq->lock); | |
255 | cancel_delayed_work_sync(&devfreq->work); | |
256 | mutex_lock(&devfreq->lock); | |
257 | if (!devfreq->stop_polling) | |
258 | queue_delayed_work(devfreq_wq, &devfreq->work, | |
259 | msecs_to_jiffies(devfreq->profile->polling_ms)); | |
260 | } | |
261 | out: | |
262 | mutex_unlock(&devfreq->lock); | |
263 | } | |
264 | ||
a3c98b8b MH |
265 | /** |
266 | * devfreq_notifier_call() - Notify that the device frequency requirements | |
267 | * has been changed out of devfreq framework. | |
268 | * @nb the notifier_block (supposed to be devfreq->nb) | |
269 | * @type not used | |
270 | * @devp not used | |
271 | * | |
272 | * Called by a notifier that uses devfreq->nb. | |
273 | */ | |
274 | static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, | |
275 | void *devp) | |
276 | { | |
277 | struct devfreq *devfreq = container_of(nb, struct devfreq, nb); | |
278 | int ret; | |
279 | ||
280 | mutex_lock(&devfreq->lock); | |
281 | ret = update_devfreq(devfreq); | |
282 | mutex_unlock(&devfreq->lock); | |
283 | ||
284 | return ret; | |
285 | } | |
286 | ||
287 | /** | |
7e6fdd4b | 288 | * _remove_devfreq() - Remove devfreq from the list and release its resources. |
a3c98b8b MH |
289 | * @devfreq: the devfreq struct |
290 | * @skip: skip calling device_unregister(). | |
a3c98b8b MH |
291 | */ |
292 | static void _remove_devfreq(struct devfreq *devfreq, bool skip) | |
293 | { | |
7e6fdd4b RV |
294 | mutex_lock(&devfreq_list_lock); |
295 | if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { | |
296 | mutex_unlock(&devfreq_list_lock); | |
297 | dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); | |
a3c98b8b MH |
298 | return; |
299 | } | |
7e6fdd4b RV |
300 | list_del(&devfreq->node); |
301 | mutex_unlock(&devfreq_list_lock); | |
a3c98b8b | 302 | |
7e6fdd4b | 303 | devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); |
a3c98b8b MH |
304 | |
305 | if (devfreq->profile->exit) | |
306 | devfreq->profile->exit(devfreq->dev.parent); | |
307 | ||
a3c98b8b MH |
308 | if (!skip && get_device(&devfreq->dev)) { |
309 | device_unregister(&devfreq->dev); | |
310 | put_device(&devfreq->dev); | |
311 | } | |
312 | ||
a3c98b8b | 313 | mutex_destroy(&devfreq->lock); |
a3c98b8b MH |
314 | kfree(devfreq); |
315 | } | |
316 | ||
317 | /** | |
318 | * devfreq_dev_release() - Callback for struct device to release the device. | |
319 | * @dev: the devfreq device | |
320 | * | |
321 | * This calls _remove_devfreq() if _remove_devfreq() is not called. | |
322 | * Note that devfreq_dev_release() could be called by _remove_devfreq() as | |
323 | * well as by others unregistering the device. | |
324 | */ | |
325 | static void devfreq_dev_release(struct device *dev) | |
326 | { | |
327 | struct devfreq *devfreq = to_devfreq(dev); | |
a3c98b8b | 328 | |
a3c98b8b | 329 | _remove_devfreq(devfreq, true); |
a3c98b8b MH |
330 | } |
331 | ||
332 | /** | |
333 | * devfreq_add_device() - Add devfreq feature to the device | |
334 | * @dev: the device to add devfreq feature. | |
335 | * @profile: device-specific profile to run devfreq. | |
336 | * @governor: the policy to choose frequency. | |
337 | * @data: private data for the governor. The devfreq framework does not | |
338 | * touch this value. | |
339 | */ | |
340 | struct devfreq *devfreq_add_device(struct device *dev, | |
341 | struct devfreq_dev_profile *profile, | |
342 | const struct devfreq_governor *governor, | |
343 | void *data) | |
344 | { | |
345 | struct devfreq *devfreq; | |
346 | int err = 0; | |
347 | ||
348 | if (!dev || !profile || !governor) { | |
349 | dev_err(dev, "%s: Invalid parameters.\n", __func__); | |
350 | return ERR_PTR(-EINVAL); | |
351 | } | |
352 | ||
7e6fdd4b RV |
353 | mutex_lock(&devfreq_list_lock); |
354 | devfreq = find_device_devfreq(dev); | |
355 | mutex_unlock(&devfreq_list_lock); | |
356 | if (!IS_ERR(devfreq)) { | |
357 | dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); | |
358 | err = -EINVAL; | |
359 | goto err_out; | |
a3c98b8b MH |
360 | } |
361 | ||
362 | devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); | |
363 | if (!devfreq) { | |
364 | dev_err(dev, "%s: Unable to create devfreq for the device\n", | |
365 | __func__); | |
366 | err = -ENOMEM; | |
3f19f08a | 367 | goto err_out; |
a3c98b8b MH |
368 | } |
369 | ||
370 | mutex_init(&devfreq->lock); | |
371 | mutex_lock(&devfreq->lock); | |
372 | devfreq->dev.parent = dev; | |
373 | devfreq->dev.class = devfreq_class; | |
374 | devfreq->dev.release = devfreq_dev_release; | |
375 | devfreq->profile = profile; | |
376 | devfreq->governor = governor; | |
377 | devfreq->previous_freq = profile->initial_freq; | |
378 | devfreq->data = data; | |
a3c98b8b MH |
379 | devfreq->nb.notifier_call = devfreq_notifier_call; |
380 | ||
381 | dev_set_name(&devfreq->dev, dev_name(dev)); | |
382 | err = device_register(&devfreq->dev); | |
383 | if (err) { | |
384 | put_device(&devfreq->dev); | |
7e6fdd4b | 385 | mutex_unlock(&devfreq->lock); |
a3c98b8b MH |
386 | goto err_dev; |
387 | } | |
388 | ||
a3c98b8b MH |
389 | mutex_unlock(&devfreq->lock); |
390 | ||
a3c98b8b | 391 | mutex_lock(&devfreq_list_lock); |
a3c98b8b | 392 | list_add(&devfreq->node, &devfreq_list); |
7e6fdd4b | 393 | mutex_unlock(&devfreq_list_lock); |
a3c98b8b | 394 | |
7e6fdd4b RV |
395 | err = devfreq->governor->event_handler(devfreq, |
396 | DEVFREQ_GOV_START, NULL); | |
397 | if (err) { | |
398 | dev_err(dev, "%s: Unable to start governor for the device\n", | |
399 | __func__); | |
400 | goto err_init; | |
a3c98b8b | 401 | } |
7e6fdd4b | 402 | |
3f19f08a AL |
403 | return devfreq; |
404 | ||
a3c98b8b | 405 | err_init: |
7e6fdd4b | 406 | list_del(&devfreq->node); |
a3c98b8b MH |
407 | device_unregister(&devfreq->dev); |
408 | err_dev: | |
a3c98b8b | 409 | kfree(devfreq); |
3f19f08a AL |
410 | err_out: |
411 | return ERR_PTR(err); | |
a3c98b8b | 412 | } |
7e6fdd4b | 413 | EXPORT_SYMBOL(devfreq_add_device); |
a3c98b8b MH |
414 | |
415 | /** | |
416 | * devfreq_remove_device() - Remove devfreq feature from a device. | |
417 | * @devfreq the devfreq instance to be removed | |
418 | */ | |
419 | int devfreq_remove_device(struct devfreq *devfreq) | |
420 | { | |
421 | if (!devfreq) | |
422 | return -EINVAL; | |
423 | ||
7e6fdd4b | 424 | _remove_devfreq(devfreq, false); |
a3c98b8b MH |
425 | |
426 | return 0; | |
427 | } | |
7e6fdd4b | 428 | EXPORT_SYMBOL(devfreq_remove_device); |
a3c98b8b | 429 | |
206c30cf RV |
430 | /** |
431 | * devfreq_suspend_device() - Suspend devfreq of a device. | |
432 | * @devfreq: the devfreq instance to be suspended | |
433 | */ | |
434 | int devfreq_suspend_device(struct devfreq *devfreq) | |
435 | { | |
436 | if (!devfreq) | |
437 | return -EINVAL; | |
438 | ||
439 | return devfreq->governor->event_handler(devfreq, | |
440 | DEVFREQ_GOV_SUSPEND, NULL); | |
441 | } | |
442 | EXPORT_SYMBOL(devfreq_suspend_device); | |
443 | ||
444 | /** | |
445 | * devfreq_resume_device() - Resume devfreq of a device. | |
446 | * @devfreq: the devfreq instance to be resumed | |
447 | */ | |
448 | int devfreq_resume_device(struct devfreq *devfreq) | |
449 | { | |
450 | if (!devfreq) | |
451 | return -EINVAL; | |
452 | ||
453 | return devfreq->governor->event_handler(devfreq, | |
454 | DEVFREQ_GOV_RESUME, NULL); | |
455 | } | |
456 | EXPORT_SYMBOL(devfreq_resume_device); | |
457 | ||
9005b650 MH |
458 | static ssize_t show_governor(struct device *dev, |
459 | struct device_attribute *attr, char *buf) | |
460 | { | |
461 | return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); | |
462 | } | |
463 | ||
464 | static ssize_t show_freq(struct device *dev, | |
465 | struct device_attribute *attr, char *buf) | |
466 | { | |
467 | return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); | |
468 | } | |
469 | ||
470 | static ssize_t show_polling_interval(struct device *dev, | |
471 | struct device_attribute *attr, char *buf) | |
472 | { | |
473 | return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); | |
474 | } | |
475 | ||
476 | static ssize_t store_polling_interval(struct device *dev, | |
477 | struct device_attribute *attr, | |
478 | const char *buf, size_t count) | |
479 | { | |
480 | struct devfreq *df = to_devfreq(dev); | |
481 | unsigned int value; | |
482 | int ret; | |
483 | ||
484 | ret = sscanf(buf, "%u", &value); | |
485 | if (ret != 1) | |
486 | goto out; | |
487 | ||
7e6fdd4b | 488 | df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); |
9005b650 MH |
489 | ret = count; |
490 | ||
9005b650 MH |
491 | out: |
492 | return ret; | |
493 | } | |
494 | ||
6530b9de MH |
495 | static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, |
496 | const char *buf, size_t count) | |
497 | { | |
498 | struct devfreq *df = to_devfreq(dev); | |
499 | unsigned long value; | |
500 | int ret; | |
501 | unsigned long max; | |
502 | ||
503 | ret = sscanf(buf, "%lu", &value); | |
504 | if (ret != 1) | |
505 | goto out; | |
506 | ||
507 | mutex_lock(&df->lock); | |
508 | max = df->max_freq; | |
509 | if (value && max && value > max) { | |
510 | ret = -EINVAL; | |
511 | goto unlock; | |
512 | } | |
513 | ||
514 | df->min_freq = value; | |
515 | update_devfreq(df); | |
516 | ret = count; | |
517 | unlock: | |
518 | mutex_unlock(&df->lock); | |
519 | out: | |
520 | return ret; | |
521 | } | |
522 | ||
523 | static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr, | |
524 | char *buf) | |
525 | { | |
526 | return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq); | |
527 | } | |
528 | ||
529 | static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, | |
530 | const char *buf, size_t count) | |
531 | { | |
532 | struct devfreq *df = to_devfreq(dev); | |
533 | unsigned long value; | |
534 | int ret; | |
535 | unsigned long min; | |
536 | ||
537 | ret = sscanf(buf, "%lu", &value); | |
538 | if (ret != 1) | |
539 | goto out; | |
540 | ||
541 | mutex_lock(&df->lock); | |
542 | min = df->min_freq; | |
543 | if (value && min && value < min) { | |
544 | ret = -EINVAL; | |
545 | goto unlock; | |
546 | } | |
547 | ||
548 | df->max_freq = value; | |
549 | update_devfreq(df); | |
550 | ret = count; | |
551 | unlock: | |
552 | mutex_unlock(&df->lock); | |
553 | out: | |
554 | return ret; | |
555 | } | |
556 | ||
557 | static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr, | |
558 | char *buf) | |
559 | { | |
560 | return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); | |
561 | } | |
562 | ||
9005b650 MH |
563 | static struct device_attribute devfreq_attrs[] = { |
564 | __ATTR(governor, S_IRUGO, show_governor, NULL), | |
565 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), | |
9005b650 MH |
566 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, |
567 | store_polling_interval), | |
6530b9de MH |
568 | __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), |
569 | __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), | |
9005b650 MH |
570 | { }, |
571 | }; | |
572 | ||
a3c98b8b MH |
573 | static int __init devfreq_init(void) |
574 | { | |
575 | devfreq_class = class_create(THIS_MODULE, "devfreq"); | |
576 | if (IS_ERR(devfreq_class)) { | |
577 | pr_err("%s: couldn't create class\n", __FILE__); | |
578 | return PTR_ERR(devfreq_class); | |
579 | } | |
7e6fdd4b RV |
580 | |
581 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | |
582 | if (IS_ERR(devfreq_wq)) { | |
583 | class_destroy(devfreq_class); | |
584 | pr_err("%s: couldn't create workqueue\n", __FILE__); | |
585 | return PTR_ERR(devfreq_wq); | |
586 | } | |
9005b650 | 587 | devfreq_class->dev_attrs = devfreq_attrs; |
7e6fdd4b | 588 | |
a3c98b8b MH |
589 | return 0; |
590 | } | |
591 | subsys_initcall(devfreq_init); | |
592 | ||
593 | static void __exit devfreq_exit(void) | |
594 | { | |
595 | class_destroy(devfreq_class); | |
7e6fdd4b | 596 | destroy_workqueue(devfreq_wq); |
a3c98b8b MH |
597 | } |
598 | module_exit(devfreq_exit); | |
599 | ||
600 | /* | |
601 | * The followings are helper functions for devfreq user device drivers with | |
602 | * OPP framework. | |
603 | */ | |
604 | ||
605 | /** | |
606 | * devfreq_recommended_opp() - Helper function to get proper OPP for the | |
607 | * freq value given to target callback. | |
608 | * @dev The devfreq user device. (parent of devfreq) | |
609 | * @freq The frequency given to target function | |
ab5f299f | 610 | * @flags Flags handed from devfreq framework. |
a3c98b8b MH |
611 | * |
612 | */ | |
ab5f299f MH |
613 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, |
614 | u32 flags) | |
a3c98b8b | 615 | { |
ab5f299f | 616 | struct opp *opp; |
a3c98b8b | 617 | |
ab5f299f MH |
618 | if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { |
619 | /* The freq is an upper bound. opp should be lower */ | |
a3c98b8b | 620 | opp = opp_find_freq_floor(dev, freq); |
ab5f299f MH |
621 | |
622 | /* If not available, use the closest opp */ | |
623 | if (opp == ERR_PTR(-ENODEV)) | |
624 | opp = opp_find_freq_ceil(dev, freq); | |
625 | } else { | |
626 | /* The freq is an lower bound. opp should be higher */ | |
627 | opp = opp_find_freq_ceil(dev, freq); | |
628 | ||
629 | /* If not available, use the closest opp */ | |
630 | if (opp == ERR_PTR(-ENODEV)) | |
631 | opp = opp_find_freq_floor(dev, freq); | |
632 | } | |
633 | ||
a3c98b8b MH |
634 | return opp; |
635 | } | |
636 | ||
637 | /** | |
638 | * devfreq_register_opp_notifier() - Helper function to get devfreq notified | |
639 | * for any changes in the OPP availability | |
640 | * changes | |
641 | * @dev The devfreq user device. (parent of devfreq) | |
642 | * @devfreq The devfreq object. | |
643 | */ | |
644 | int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) | |
645 | { | |
646 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | |
647 | ||
648 | if (IS_ERR(nh)) | |
649 | return PTR_ERR(nh); | |
650 | return srcu_notifier_chain_register(nh, &devfreq->nb); | |
651 | } | |
652 | ||
653 | /** | |
654 | * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq | |
655 | * notified for any changes in the OPP | |
656 | * availability changes anymore. | |
657 | * @dev The devfreq user device. (parent of devfreq) | |
658 | * @devfreq The devfreq object. | |
659 | * | |
660 | * At exit() callback of devfreq_dev_profile, this must be included if | |
661 | * devfreq_recommended_opp is used. | |
662 | */ | |
663 | int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) | |
664 | { | |
665 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | |
666 | ||
667 | if (IS_ERR(nh)) | |
668 | return PTR_ERR(nh); | |
669 | return srcu_notifier_chain_unregister(nh, &devfreq->nb); | |
670 | } | |
671 | ||
672 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | |
673 | MODULE_DESCRIPTION("devfreq class support"); | |
674 | MODULE_LICENSE("GPL"); |