2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
23 #include "cpufreq_governor.h"
25 DEFINE_MUTEX(dbs_data_mutex
);
26 EXPORT_SYMBOL_GPL(dbs_data_mutex
);
28 /* Common sysfs tunables */
30 * store_sampling_rate - update sampling rate effective immediately if needed.
32 * If new rate is smaller than the old, simply updating
33 * dbs.sampling_rate might not be appropriate. For example, if the
34 * original sampling_rate was 1 second and the requested new sampling rate is 10
35 * ms because the user needs immediate reaction from ondemand governor, but not
36 * sure if higher frequency will be required or not, then, the governor may
37 * change the sampling rate too late; up to 1 second later. Thus, if we are
38 * reducing the sampling rate, we need to make the new value effective
41 * On the other hand, if new rate is larger than the old, then we may evaluate
42 * the load too soon, and it might we worth updating sample_delay_ns then as
45 * This must be called with dbs_data->mutex held, otherwise traversing
46 * policy_dbs_list isn't safe.
48 ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
51 struct policy_dbs_info
*policy_dbs
;
54 ret
= sscanf(buf
, "%u", &rate
);
58 dbs_data
->sampling_rate
= max(rate
, dbs_data
->min_sampling_rate
);
61 * We are operating under dbs_data->mutex and so the list and its
62 * entries can't be freed concurrently.
64 list_for_each_entry(policy_dbs
, &dbs_data
->policy_dbs_list
, list
) {
65 mutex_lock(&policy_dbs
->timer_mutex
);
67 * On 32-bit architectures this may race with the
68 * sample_delay_ns read in dbs_update_util_handler(), but that
69 * really doesn't matter. If the read returns a value that's
70 * too big, the sample will be skipped, but the next invocation
71 * of dbs_update_util_handler() (when the update has been
72 * completed) will take a sample. If the returned value is too
73 * small, the sample will be taken immediately, but that isn't a
74 * problem, as we want the new rate to take effect immediately
77 * If this runs in parallel with dbs_work_handler(), we may end
78 * up overwriting the sample_delay_ns value that it has just
79 * written, but the difference should not be too big and it will
80 * be corrected next time a sample is taken, so it shouldn't be
83 gov_update_sample_delay(policy_dbs
, dbs_data
->sampling_rate
);
84 mutex_unlock(&policy_dbs
->timer_mutex
);
89 EXPORT_SYMBOL_GPL(store_sampling_rate
);
91 static inline struct dbs_data
*to_dbs_data(struct kobject
*kobj
)
93 return container_of(kobj
, struct dbs_data
, kobj
);
96 static inline struct governor_attr
*to_gov_attr(struct attribute
*attr
)
98 return container_of(attr
, struct governor_attr
, attr
);
101 static ssize_t
governor_show(struct kobject
*kobj
, struct attribute
*attr
,
104 struct dbs_data
*dbs_data
= to_dbs_data(kobj
);
105 struct governor_attr
*gattr
= to_gov_attr(attr
);
109 ret
= gattr
->show(dbs_data
, buf
);
114 static ssize_t
governor_store(struct kobject
*kobj
, struct attribute
*attr
,
115 const char *buf
, size_t count
)
117 struct dbs_data
*dbs_data
= to_dbs_data(kobj
);
118 struct governor_attr
*gattr
= to_gov_attr(attr
);
121 mutex_lock(&dbs_data
->mutex
);
124 ret
= gattr
->store(dbs_data
, buf
, count
);
126 mutex_unlock(&dbs_data
->mutex
);
132 * Sysfs Ops for accessing governor attributes.
134 * All show/store invocations for governor specific sysfs attributes, will first
135 * call the below show/store callbacks and the attribute specific callback will
136 * be called from within it.
138 static const struct sysfs_ops governor_sysfs_ops
= {
139 .show
= governor_show
,
140 .store
= governor_store
,
143 unsigned int dbs_update(struct cpufreq_policy
*policy
)
145 struct dbs_governor
*gov
= dbs_governor_of(policy
);
146 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
147 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
148 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
149 unsigned int sampling_rate
= dbs_data
->sampling_rate
;
150 unsigned int ignore_nice
= dbs_data
->ignore_nice_load
;
151 unsigned int max_load
= 0;
154 if (gov
->governor
== GOV_ONDEMAND
) {
155 struct od_cpu_dbs_info_s
*od_dbs_info
=
156 gov
->get_cpu_dbs_info_s(policy
->cpu
);
159 * Sometimes, the ondemand governor uses an additional
160 * multiplier to give long delays. So apply this multiplier to
161 * the 'sampling_rate', so as to keep the wake-up-from-idle
162 * detection logic a bit conservative.
164 sampling_rate
*= od_dbs_info
->rate_mult
;
168 /* Get Absolute Load */
169 for_each_cpu(j
, policy
->cpus
) {
170 struct cpu_dbs_info
*j_cdbs
;
171 u64 cur_wall_time
, cur_idle_time
;
172 unsigned int idle_time
, wall_time
;
176 j_cdbs
= gov
->get_cpu_cdbs(j
);
179 * For the purpose of ondemand, waiting for disk IO is
180 * an indication that you're performance critical, and
181 * not that the system is actually idle. So do not add
182 * the iowait time to the cpu idle time.
184 if (gov
->governor
== GOV_ONDEMAND
)
185 io_busy
= od_tuners
->io_is_busy
;
186 cur_idle_time
= get_cpu_idle_time(j
, &cur_wall_time
, io_busy
);
188 wall_time
= cur_wall_time
- j_cdbs
->prev_cpu_wall
;
189 j_cdbs
->prev_cpu_wall
= cur_wall_time
;
191 if (cur_idle_time
<= j_cdbs
->prev_cpu_idle
) {
194 idle_time
= cur_idle_time
- j_cdbs
->prev_cpu_idle
;
195 j_cdbs
->prev_cpu_idle
= cur_idle_time
;
199 u64 cur_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
201 idle_time
+= cputime_to_usecs(cur_nice
- j_cdbs
->prev_cpu_nice
);
202 j_cdbs
->prev_cpu_nice
= cur_nice
;
205 if (unlikely(!wall_time
|| wall_time
< idle_time
))
209 * If the CPU had gone completely idle, and a task just woke up
210 * on this CPU now, it would be unfair to calculate 'load' the
211 * usual way for this elapsed time-window, because it will show
212 * near-zero load, irrespective of how CPU intensive that task
213 * actually is. This is undesirable for latency-sensitive bursty
216 * To avoid this, we reuse the 'load' from the previous
217 * time-window and give this task a chance to start with a
218 * reasonably high CPU frequency. (However, we shouldn't over-do
219 * this copy, lest we get stuck at a high load (high frequency)
220 * for too long, even when the current system load has actually
221 * dropped down. So we perform the copy only once, upon the
222 * first wake-up from idle.)
224 * Detecting this situation is easy: the governor's utilization
225 * update handler would not have run during CPU-idle periods.
226 * Hence, an unusually large 'wall_time' (as compared to the
227 * sampling rate) indicates this scenario.
229 * prev_load can be zero in two cases and we must recalculate it
231 * - during long idle intervals
232 * - explicitly set to zero
234 if (unlikely(wall_time
> (2 * sampling_rate
) &&
235 j_cdbs
->prev_load
)) {
236 load
= j_cdbs
->prev_load
;
239 * Perform a destructive copy, to ensure that we copy
240 * the previous load only once, upon the first wake-up
243 j_cdbs
->prev_load
= 0;
245 load
= 100 * (wall_time
- idle_time
) / wall_time
;
246 j_cdbs
->prev_load
= load
;
254 EXPORT_SYMBOL_GPL(dbs_update
);
256 void gov_set_update_util(struct policy_dbs_info
*policy_dbs
,
257 unsigned int delay_us
)
259 struct cpufreq_policy
*policy
= policy_dbs
->policy
;
260 struct dbs_governor
*gov
= dbs_governor_of(policy
);
263 gov_update_sample_delay(policy_dbs
, delay_us
);
264 policy_dbs
->last_sample_time
= 0;
266 for_each_cpu(cpu
, policy
->cpus
) {
267 struct cpu_dbs_info
*cdbs
= gov
->get_cpu_cdbs(cpu
);
269 cpufreq_set_update_util_data(cpu
, &cdbs
->update_util
);
272 EXPORT_SYMBOL_GPL(gov_set_update_util
);
274 static inline void gov_clear_update_util(struct cpufreq_policy
*policy
)
278 for_each_cpu(i
, policy
->cpus
)
279 cpufreq_set_update_util_data(i
, NULL
);
284 static void gov_cancel_work(struct cpufreq_policy
*policy
)
286 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
288 gov_clear_update_util(policy_dbs
->policy
);
289 irq_work_sync(&policy_dbs
->irq_work
);
290 cancel_work_sync(&policy_dbs
->work
);
291 atomic_set(&policy_dbs
->work_count
, 0);
292 policy_dbs
->work_in_progress
= false;
295 static void dbs_work_handler(struct work_struct
*work
)
297 struct policy_dbs_info
*policy_dbs
;
298 struct cpufreq_policy
*policy
;
299 struct dbs_governor
*gov
;
302 policy_dbs
= container_of(work
, struct policy_dbs_info
, work
);
303 policy
= policy_dbs
->policy
;
304 gov
= dbs_governor_of(policy
);
307 * Make sure cpufreq_governor_limits() isn't evaluating load or the
308 * ondemand governor isn't updating the sampling rate in parallel.
310 mutex_lock(&policy_dbs
->timer_mutex
);
311 delay
= gov
->gov_dbs_timer(policy
);
312 policy_dbs
->sample_delay_ns
= jiffies_to_nsecs(delay
);
313 mutex_unlock(&policy_dbs
->timer_mutex
);
315 /* Allow the utilization update handler to queue up more work. */
316 atomic_set(&policy_dbs
->work_count
, 0);
318 * If the update below is reordered with respect to the sample delay
319 * modification, the utilization update handler may end up using a stale
320 * sample delay value.
323 policy_dbs
->work_in_progress
= false;
326 static void dbs_irq_work(struct irq_work
*irq_work
)
328 struct policy_dbs_info
*policy_dbs
;
330 policy_dbs
= container_of(irq_work
, struct policy_dbs_info
, irq_work
);
331 schedule_work(&policy_dbs
->work
);
334 static void dbs_update_util_handler(struct update_util_data
*data
, u64 time
,
335 unsigned long util
, unsigned long max
)
337 struct cpu_dbs_info
*cdbs
= container_of(data
, struct cpu_dbs_info
, update_util
);
338 struct policy_dbs_info
*policy_dbs
= cdbs
->policy_dbs
;
342 * The work may not be allowed to be queued up right now.
344 * - Work has already been queued up or is in progress.
345 * - It is too early (too little time from the previous sample).
347 if (policy_dbs
->work_in_progress
)
351 * If the reads below are reordered before the check above, the value
352 * of sample_delay_ns used in the computation may be stale.
355 delta_ns
= time
- policy_dbs
->last_sample_time
;
356 if ((s64
)delta_ns
< policy_dbs
->sample_delay_ns
)
360 * If the policy is not shared, the irq_work may be queued up right away
361 * at this point. Otherwise, we need to ensure that only one of the
362 * CPUs sharing the policy will do that.
364 if (policy_dbs
->is_shared
&&
365 !atomic_add_unless(&policy_dbs
->work_count
, 1, 1))
368 policy_dbs
->last_sample_time
= time
;
369 policy_dbs
->work_in_progress
= true;
370 irq_work_queue(&policy_dbs
->irq_work
);
373 static struct policy_dbs_info
*alloc_policy_dbs_info(struct cpufreq_policy
*policy
,
374 struct dbs_governor
*gov
)
376 struct policy_dbs_info
*policy_dbs
;
379 /* Allocate memory for the common information for policy->cpus */
380 policy_dbs
= kzalloc(sizeof(*policy_dbs
), GFP_KERNEL
);
384 policy_dbs
->policy
= policy
;
385 mutex_init(&policy_dbs
->timer_mutex
);
386 atomic_set(&policy_dbs
->work_count
, 0);
387 init_irq_work(&policy_dbs
->irq_work
, dbs_irq_work
);
388 INIT_WORK(&policy_dbs
->work
, dbs_work_handler
);
390 /* Set policy_dbs for all CPUs, online+offline */
391 for_each_cpu(j
, policy
->related_cpus
) {
392 struct cpu_dbs_info
*j_cdbs
= gov
->get_cpu_cdbs(j
);
394 j_cdbs
->policy_dbs
= policy_dbs
;
395 j_cdbs
->update_util
.func
= dbs_update_util_handler
;
400 static void free_policy_dbs_info(struct cpufreq_policy
*policy
,
401 struct dbs_governor
*gov
)
403 struct cpu_dbs_info
*cdbs
= gov
->get_cpu_cdbs(policy
->cpu
);
404 struct policy_dbs_info
*policy_dbs
= cdbs
->policy_dbs
;
407 mutex_destroy(&policy_dbs
->timer_mutex
);
409 for_each_cpu(j
, policy
->related_cpus
) {
410 struct cpu_dbs_info
*j_cdbs
= gov
->get_cpu_cdbs(j
);
412 j_cdbs
->policy_dbs
= NULL
;
413 j_cdbs
->update_util
.func
= NULL
;
418 static int cpufreq_governor_init(struct cpufreq_policy
*policy
)
420 struct dbs_governor
*gov
= dbs_governor_of(policy
);
421 struct dbs_data
*dbs_data
= gov
->gdbs_data
;
422 struct policy_dbs_info
*policy_dbs
;
423 unsigned int latency
;
426 /* State should be equivalent to EXIT */
427 if (policy
->governor_data
)
430 policy_dbs
= alloc_policy_dbs_info(policy
, gov
);
435 if (WARN_ON(have_governor_per_policy())) {
437 goto free_policy_dbs_info
;
439 policy_dbs
->dbs_data
= dbs_data
;
440 policy
->governor_data
= policy_dbs
;
442 mutex_lock(&dbs_data
->mutex
);
443 dbs_data
->usage_count
++;
444 list_add(&policy_dbs
->list
, &dbs_data
->policy_dbs_list
);
445 mutex_unlock(&dbs_data
->mutex
);
450 dbs_data
= kzalloc(sizeof(*dbs_data
), GFP_KERNEL
);
453 goto free_policy_dbs_info
;
456 INIT_LIST_HEAD(&dbs_data
->policy_dbs_list
);
457 mutex_init(&dbs_data
->mutex
);
459 ret
= gov
->init(dbs_data
, !policy
->governor
->initialized
);
461 goto free_policy_dbs_info
;
463 /* policy latency is in ns. Convert it to us first */
464 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
468 /* Bring kernel and HW constraints together */
469 dbs_data
->min_sampling_rate
= max(dbs_data
->min_sampling_rate
,
470 MIN_LATENCY_MULTIPLIER
* latency
);
471 dbs_data
->sampling_rate
= max(dbs_data
->min_sampling_rate
,
472 LATENCY_MULTIPLIER
* latency
);
474 if (!have_governor_per_policy())
475 gov
->gdbs_data
= dbs_data
;
477 policy
->governor_data
= policy_dbs
;
479 policy_dbs
->dbs_data
= dbs_data
;
480 dbs_data
->usage_count
= 1;
481 list_add(&policy_dbs
->list
, &dbs_data
->policy_dbs_list
);
483 gov
->kobj_type
.sysfs_ops
= &governor_sysfs_ops
;
484 ret
= kobject_init_and_add(&dbs_data
->kobj
, &gov
->kobj_type
,
485 get_governor_parent_kobj(policy
),
486 "%s", gov
->gov
.name
);
490 /* Failure, so roll back. */
491 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret
);
493 policy
->governor_data
= NULL
;
495 if (!have_governor_per_policy())
496 gov
->gdbs_data
= NULL
;
497 gov
->exit(dbs_data
, !policy
->governor
->initialized
);
500 free_policy_dbs_info
:
501 free_policy_dbs_info(policy
, gov
);
505 static int cpufreq_governor_exit(struct cpufreq_policy
*policy
)
507 struct dbs_governor
*gov
= dbs_governor_of(policy
);
508 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
509 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
512 mutex_lock(&dbs_data
->mutex
);
513 list_del(&policy_dbs
->list
);
514 count
= --dbs_data
->usage_count
;
515 mutex_unlock(&dbs_data
->mutex
);
518 kobject_put(&dbs_data
->kobj
);
520 policy
->governor_data
= NULL
;
522 if (!have_governor_per_policy())
523 gov
->gdbs_data
= NULL
;
525 gov
->exit(dbs_data
, policy
->governor
->initialized
== 1);
526 mutex_destroy(&dbs_data
->mutex
);
529 policy
->governor_data
= NULL
;
532 free_policy_dbs_info(policy
, gov
);
536 static int cpufreq_governor_start(struct cpufreq_policy
*policy
)
538 struct dbs_governor
*gov
= dbs_governor_of(policy
);
539 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
540 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
541 unsigned int sampling_rate
, ignore_nice
, j
, cpu
= policy
->cpu
;
547 policy_dbs
->is_shared
= policy_is_shared(policy
);
549 sampling_rate
= dbs_data
->sampling_rate
;
550 ignore_nice
= dbs_data
->ignore_nice_load
;
552 if (gov
->governor
== GOV_ONDEMAND
) {
553 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
555 io_busy
= od_tuners
->io_is_busy
;
558 for_each_cpu(j
, policy
->cpus
) {
559 struct cpu_dbs_info
*j_cdbs
= gov
->get_cpu_cdbs(j
);
560 unsigned int prev_load
;
562 j_cdbs
->prev_cpu_idle
= get_cpu_idle_time(j
, &j_cdbs
->prev_cpu_wall
, io_busy
);
564 prev_load
= j_cdbs
->prev_cpu_wall
- j_cdbs
->prev_cpu_idle
;
565 j_cdbs
->prev_load
= 100 * prev_load
/ (unsigned int)j_cdbs
->prev_cpu_wall
;
568 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
571 if (gov
->governor
== GOV_CONSERVATIVE
) {
572 struct cs_cpu_dbs_info_s
*cs_dbs_info
=
573 gov
->get_cpu_dbs_info_s(cpu
);
575 cs_dbs_info
->down_skip
= 0;
576 cs_dbs_info
->requested_freq
= policy
->cur
;
578 struct od_ops
*od_ops
= gov
->gov_ops
;
579 struct od_cpu_dbs_info_s
*od_dbs_info
= gov
->get_cpu_dbs_info_s(cpu
);
581 od_dbs_info
->rate_mult
= 1;
582 od_dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
583 od_ops
->powersave_bias_init_cpu(cpu
);
586 gov_set_update_util(policy_dbs
, sampling_rate
);
590 static int cpufreq_governor_stop(struct cpufreq_policy
*policy
)
592 gov_cancel_work(policy
);
597 static int cpufreq_governor_limits(struct cpufreq_policy
*policy
)
599 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
601 mutex_lock(&policy_dbs
->timer_mutex
);
603 if (policy
->max
< policy
->cur
)
604 __cpufreq_driver_target(policy
, policy
->max
, CPUFREQ_RELATION_H
);
605 else if (policy
->min
> policy
->cur
)
606 __cpufreq_driver_target(policy
, policy
->min
, CPUFREQ_RELATION_L
);
608 gov_update_sample_delay(policy_dbs
, 0);
610 mutex_unlock(&policy_dbs
->timer_mutex
);
615 int cpufreq_governor_dbs(struct cpufreq_policy
*policy
, unsigned int event
)
619 /* Lock governor to block concurrent initialization of governor */
620 mutex_lock(&dbs_data_mutex
);
622 if (event
== CPUFREQ_GOV_POLICY_INIT
) {
623 ret
= cpufreq_governor_init(policy
);
624 } else if (policy
->governor_data
) {
626 case CPUFREQ_GOV_POLICY_EXIT
:
627 ret
= cpufreq_governor_exit(policy
);
629 case CPUFREQ_GOV_START
:
630 ret
= cpufreq_governor_start(policy
);
632 case CPUFREQ_GOV_STOP
:
633 ret
= cpufreq_governor_stop(policy
);
635 case CPUFREQ_GOV_LIMITS
:
636 ret
= cpufreq_governor_limits(policy
);
641 mutex_unlock(&dbs_data_mutex
);
644 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs
);