2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
23 #include "cpufreq_governor.h"
25 static DEFINE_PER_CPU(struct cpu_dbs_info
, cpu_dbs
);
27 static DEFINE_MUTEX(gov_dbs_data_mutex
);
29 /* Common sysfs tunables */
31 * store_sampling_rate - update sampling rate effective immediately if needed.
33 * If new rate is smaller than the old, simply updating
34 * dbs.sampling_rate might not be appropriate. For example, if the
35 * original sampling_rate was 1 second and the requested new sampling rate is 10
36 * ms because the user needs immediate reaction from ondemand governor, but not
37 * sure if higher frequency will be required or not, then, the governor may
38 * change the sampling rate too late; up to 1 second later. Thus, if we are
39 * reducing the sampling rate, we need to make the new value effective
42 * This must be called with dbs_data->mutex held, otherwise traversing
43 * policy_dbs_list isn't safe.
45 ssize_t
store_sampling_rate(struct gov_attr_set
*attr_set
, const char *buf
,
48 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
49 struct policy_dbs_info
*policy_dbs
;
51 ret
= sscanf(buf
, "%u", &dbs_data
->sampling_rate
);
56 * We are operating under dbs_data->mutex and so the list and its
57 * entries can't be freed concurrently.
59 list_for_each_entry(policy_dbs
, &attr_set
->policy_list
, list
) {
60 mutex_lock(&policy_dbs
->update_mutex
);
62 * On 32-bit architectures this may race with the
63 * sample_delay_ns read in dbs_update_util_handler(), but that
64 * really doesn't matter. If the read returns a value that's
65 * too big, the sample will be skipped, but the next invocation
66 * of dbs_update_util_handler() (when the update has been
67 * completed) will take a sample.
69 * If this runs in parallel with dbs_work_handler(), we may end
70 * up overwriting the sample_delay_ns value that it has just
71 * written, but it will be corrected next time a sample is
72 * taken, so it shouldn't be significant.
74 gov_update_sample_delay(policy_dbs
, 0);
75 mutex_unlock(&policy_dbs
->update_mutex
);
80 EXPORT_SYMBOL_GPL(store_sampling_rate
);
83 * gov_update_cpu_data - Update CPU load data.
84 * @dbs_data: Top-level governor data pointer.
86 * Update CPU load data for all CPUs in the domain governed by @dbs_data
87 * (that may be a single policy or a bunch of them if governor tunables are
90 * Call under the @dbs_data mutex.
92 void gov_update_cpu_data(struct dbs_data
*dbs_data
)
94 struct policy_dbs_info
*policy_dbs
;
96 list_for_each_entry(policy_dbs
, &dbs_data
->attr_set
.policy_list
, list
) {
99 for_each_cpu(j
, policy_dbs
->policy
->cpus
) {
100 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
102 j_cdbs
->prev_cpu_idle
= get_cpu_idle_time(j
, &j_cdbs
->prev_update_time
,
103 dbs_data
->io_is_busy
);
104 if (dbs_data
->ignore_nice_load
)
105 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
109 EXPORT_SYMBOL_GPL(gov_update_cpu_data
);
111 unsigned int dbs_update(struct cpufreq_policy
*policy
)
113 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
114 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
115 unsigned int ignore_nice
= dbs_data
->ignore_nice_load
;
116 unsigned int max_load
= 0, idle_periods
= UINT_MAX
;
117 unsigned int sampling_rate
, io_busy
, j
;
120 * Sometimes governors may use an additional multiplier to increase
121 * sample delays temporarily. Apply that multiplier to sampling_rate
122 * so as to keep the wake-up-from-idle detection logic a bit
125 sampling_rate
= dbs_data
->sampling_rate
* policy_dbs
->rate_mult
;
127 * For the purpose of ondemand, waiting for disk IO is an indication
128 * that you're performance critical, and not that the system is actually
129 * idle, so do not add the iowait time to the CPU idle time then.
131 io_busy
= dbs_data
->io_is_busy
;
133 /* Get Absolute Load */
134 for_each_cpu(j
, policy
->cpus
) {
135 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
136 u64 update_time
, cur_idle_time
;
137 unsigned int idle_time
, time_elapsed
;
140 cur_idle_time
= get_cpu_idle_time(j
, &update_time
, io_busy
);
142 time_elapsed
= update_time
- j_cdbs
->prev_update_time
;
143 j_cdbs
->prev_update_time
= update_time
;
145 idle_time
= cur_idle_time
- j_cdbs
->prev_cpu_idle
;
146 j_cdbs
->prev_cpu_idle
= cur_idle_time
;
149 u64 cur_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
151 idle_time
+= div_u64(cur_nice
- j_cdbs
->prev_cpu_nice
, NSEC_PER_USEC
);
152 j_cdbs
->prev_cpu_nice
= cur_nice
;
155 if (unlikely(!time_elapsed
)) {
157 * That can only happen when this function is called
158 * twice in a row with a very short interval between the
159 * calls, so the previous load value can be used then.
161 load
= j_cdbs
->prev_load
;
162 } else if (unlikely(time_elapsed
> 2 * sampling_rate
&&
163 j_cdbs
->prev_load
)) {
165 * If the CPU had gone completely idle and a task has
166 * just woken up on this CPU now, it would be unfair to
167 * calculate 'load' the usual way for this elapsed
168 * time-window, because it would show near-zero load,
169 * irrespective of how CPU intensive that task actually
170 * was. This is undesirable for latency-sensitive bursty
173 * To avoid this, reuse the 'load' from the previous
174 * time-window and give this task a chance to start with
175 * a reasonably high CPU frequency. However, that
176 * shouldn't be over-done, lest we get stuck at a high
177 * load (high frequency) for too long, even when the
178 * current system load has actually dropped down, so
179 * clear prev_load to guarantee that the load will be
180 * computed again next time.
182 * Detecting this situation is easy: the governor's
183 * utilization update handler would not have run during
184 * CPU-idle periods. Hence, an unusually large
185 * 'time_elapsed' (as compared to the sampling rate)
186 * indicates this scenario.
188 load
= j_cdbs
->prev_load
;
189 j_cdbs
->prev_load
= 0;
191 if (time_elapsed
>= idle_time
) {
192 load
= 100 * (time_elapsed
- idle_time
) / time_elapsed
;
195 * That can happen if idle_time is returned by
196 * get_cpu_idle_time_jiffy(). In that case
197 * idle_time is roughly equal to the difference
198 * between time_elapsed and "busy time" obtained
199 * from CPU statistics. Then, the "busy time"
200 * can end up being greater than time_elapsed
201 * (for example, if jiffies_64 and the CPU
202 * statistics are updated by different CPUs),
203 * so idle_time may in fact be negative. That
204 * means, though, that the CPU was busy all
205 * the time (on the rough average) during the
206 * last sampling interval and 100 can be
207 * returned as the load.
209 load
= (int)idle_time
< 0 ? 100 : 0;
211 j_cdbs
->prev_load
= load
;
214 if (time_elapsed
> 2 * sampling_rate
) {
215 unsigned int periods
= time_elapsed
/ sampling_rate
;
217 if (periods
< idle_periods
)
218 idle_periods
= periods
;
225 policy_dbs
->idle_periods
= idle_periods
;
229 EXPORT_SYMBOL_GPL(dbs_update
);
231 static void dbs_work_handler(struct work_struct
*work
)
233 struct policy_dbs_info
*policy_dbs
;
234 struct cpufreq_policy
*policy
;
235 struct dbs_governor
*gov
;
237 policy_dbs
= container_of(work
, struct policy_dbs_info
, work
);
238 policy
= policy_dbs
->policy
;
239 gov
= dbs_governor_of(policy
);
242 * Make sure cpufreq_governor_limits() isn't evaluating load or the
243 * ondemand governor isn't updating the sampling rate in parallel.
245 mutex_lock(&policy_dbs
->update_mutex
);
246 gov_update_sample_delay(policy_dbs
, gov
->gov_dbs_update(policy
));
247 mutex_unlock(&policy_dbs
->update_mutex
);
249 /* Allow the utilization update handler to queue up more work. */
250 atomic_set(&policy_dbs
->work_count
, 0);
252 * If the update below is reordered with respect to the sample delay
253 * modification, the utilization update handler may end up using a stale
254 * sample delay value.
257 policy_dbs
->work_in_progress
= false;
260 static void dbs_irq_work(struct irq_work
*irq_work
)
262 struct policy_dbs_info
*policy_dbs
;
264 policy_dbs
= container_of(irq_work
, struct policy_dbs_info
, irq_work
);
265 schedule_work_on(smp_processor_id(), &policy_dbs
->work
);
268 static void dbs_update_util_handler(struct update_util_data
*data
, u64 time
,
271 struct cpu_dbs_info
*cdbs
= container_of(data
, struct cpu_dbs_info
, update_util
);
272 struct policy_dbs_info
*policy_dbs
= cdbs
->policy_dbs
;
275 if (!cpufreq_can_do_remote_dvfs(policy_dbs
->policy
))
279 * The work may not be allowed to be queued up right now.
281 * - Work has already been queued up or is in progress.
282 * - It is too early (too little time from the previous sample).
284 if (policy_dbs
->work_in_progress
)
288 * If the reads below are reordered before the check above, the value
289 * of sample_delay_ns used in the computation may be stale.
292 lst
= READ_ONCE(policy_dbs
->last_sample_time
);
293 delta_ns
= time
- lst
;
294 if ((s64
)delta_ns
< policy_dbs
->sample_delay_ns
)
298 * If the policy is not shared, the irq_work may be queued up right away
299 * at this point. Otherwise, we need to ensure that only one of the
300 * CPUs sharing the policy will do that.
302 if (policy_dbs
->is_shared
) {
303 if (!atomic_add_unless(&policy_dbs
->work_count
, 1, 1))
307 * If another CPU updated last_sample_time in the meantime, we
308 * shouldn't be here, so clear the work counter and bail out.
310 if (unlikely(lst
!= READ_ONCE(policy_dbs
->last_sample_time
))) {
311 atomic_set(&policy_dbs
->work_count
, 0);
316 policy_dbs
->last_sample_time
= time
;
317 policy_dbs
->work_in_progress
= true;
318 irq_work_queue(&policy_dbs
->irq_work
);
321 static void gov_set_update_util(struct policy_dbs_info
*policy_dbs
,
322 unsigned int delay_us
)
324 struct cpufreq_policy
*policy
= policy_dbs
->policy
;
327 gov_update_sample_delay(policy_dbs
, delay_us
);
328 policy_dbs
->last_sample_time
= 0;
330 for_each_cpu(cpu
, policy
->cpus
) {
331 struct cpu_dbs_info
*cdbs
= &per_cpu(cpu_dbs
, cpu
);
333 cpufreq_add_update_util_hook(cpu
, &cdbs
->update_util
,
334 dbs_update_util_handler
);
338 static inline void gov_clear_update_util(struct cpufreq_policy
*policy
)
342 for_each_cpu(i
, policy
->cpus
)
343 cpufreq_remove_update_util_hook(i
);
348 static struct policy_dbs_info
*alloc_policy_dbs_info(struct cpufreq_policy
*policy
,
349 struct dbs_governor
*gov
)
351 struct policy_dbs_info
*policy_dbs
;
354 /* Allocate memory for per-policy governor data. */
355 policy_dbs
= gov
->alloc();
359 policy_dbs
->policy
= policy
;
360 mutex_init(&policy_dbs
->update_mutex
);
361 atomic_set(&policy_dbs
->work_count
, 0);
362 init_irq_work(&policy_dbs
->irq_work
, dbs_irq_work
);
363 INIT_WORK(&policy_dbs
->work
, dbs_work_handler
);
365 /* Set policy_dbs for all CPUs, online+offline */
366 for_each_cpu(j
, policy
->related_cpus
) {
367 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
369 j_cdbs
->policy_dbs
= policy_dbs
;
374 static void free_policy_dbs_info(struct policy_dbs_info
*policy_dbs
,
375 struct dbs_governor
*gov
)
379 mutex_destroy(&policy_dbs
->update_mutex
);
381 for_each_cpu(j
, policy_dbs
->policy
->related_cpus
) {
382 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
384 j_cdbs
->policy_dbs
= NULL
;
385 j_cdbs
->update_util
.func
= NULL
;
387 gov
->free(policy_dbs
);
390 int cpufreq_dbs_governor_init(struct cpufreq_policy
*policy
)
392 struct dbs_governor
*gov
= dbs_governor_of(policy
);
393 struct dbs_data
*dbs_data
;
394 struct policy_dbs_info
*policy_dbs
;
397 /* State should be equivalent to EXIT */
398 if (policy
->governor_data
)
401 policy_dbs
= alloc_policy_dbs_info(policy
, gov
);
405 /* Protect gov->gdbs_data against concurrent updates. */
406 mutex_lock(&gov_dbs_data_mutex
);
408 dbs_data
= gov
->gdbs_data
;
410 if (WARN_ON(have_governor_per_policy())) {
412 goto free_policy_dbs_info
;
414 policy_dbs
->dbs_data
= dbs_data
;
415 policy
->governor_data
= policy_dbs
;
417 gov_attr_set_get(&dbs_data
->attr_set
, &policy_dbs
->list
);
421 dbs_data
= kzalloc(sizeof(*dbs_data
), GFP_KERNEL
);
424 goto free_policy_dbs_info
;
427 gov_attr_set_init(&dbs_data
->attr_set
, &policy_dbs
->list
);
429 ret
= gov
->init(dbs_data
);
431 goto free_policy_dbs_info
;
433 dbs_data
->sampling_rate
= cpufreq_policy_transition_delay_us(policy
);
435 if (!have_governor_per_policy())
436 gov
->gdbs_data
= dbs_data
;
438 policy_dbs
->dbs_data
= dbs_data
;
439 policy
->governor_data
= policy_dbs
;
441 gov
->kobj_type
.sysfs_ops
= &governor_sysfs_ops
;
442 ret
= kobject_init_and_add(&dbs_data
->attr_set
.kobj
, &gov
->kobj_type
,
443 get_governor_parent_kobj(policy
),
444 "%s", gov
->gov
.name
);
448 /* Failure, so roll back. */
449 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret
);
451 policy
->governor_data
= NULL
;
453 if (!have_governor_per_policy())
454 gov
->gdbs_data
= NULL
;
458 free_policy_dbs_info
:
459 free_policy_dbs_info(policy_dbs
, gov
);
462 mutex_unlock(&gov_dbs_data_mutex
);
465 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init
);
467 void cpufreq_dbs_governor_exit(struct cpufreq_policy
*policy
)
469 struct dbs_governor
*gov
= dbs_governor_of(policy
);
470 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
471 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
474 /* Protect gov->gdbs_data against concurrent updates. */
475 mutex_lock(&gov_dbs_data_mutex
);
477 count
= gov_attr_set_put(&dbs_data
->attr_set
, &policy_dbs
->list
);
479 policy
->governor_data
= NULL
;
482 if (!have_governor_per_policy())
483 gov
->gdbs_data
= NULL
;
489 free_policy_dbs_info(policy_dbs
, gov
);
491 mutex_unlock(&gov_dbs_data_mutex
);
493 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit
);
495 int cpufreq_dbs_governor_start(struct cpufreq_policy
*policy
)
497 struct dbs_governor
*gov
= dbs_governor_of(policy
);
498 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
499 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
500 unsigned int sampling_rate
, ignore_nice
, j
;
501 unsigned int io_busy
;
506 policy_dbs
->is_shared
= policy_is_shared(policy
);
507 policy_dbs
->rate_mult
= 1;
509 sampling_rate
= dbs_data
->sampling_rate
;
510 ignore_nice
= dbs_data
->ignore_nice_load
;
511 io_busy
= dbs_data
->io_is_busy
;
513 for_each_cpu(j
, policy
->cpus
) {
514 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
516 j_cdbs
->prev_cpu_idle
= get_cpu_idle_time(j
, &j_cdbs
->prev_update_time
, io_busy
);
518 * Make the first invocation of dbs_update() compute the load.
520 j_cdbs
->prev_load
= 0;
523 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
528 gov_set_update_util(policy_dbs
, sampling_rate
);
531 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start
);
533 void cpufreq_dbs_governor_stop(struct cpufreq_policy
*policy
)
535 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
537 gov_clear_update_util(policy_dbs
->policy
);
538 irq_work_sync(&policy_dbs
->irq_work
);
539 cancel_work_sync(&policy_dbs
->work
);
540 atomic_set(&policy_dbs
->work_count
, 0);
541 policy_dbs
->work_in_progress
= false;
543 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop
);
545 void cpufreq_dbs_governor_limits(struct cpufreq_policy
*policy
)
547 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
549 mutex_lock(&policy_dbs
->update_mutex
);
550 cpufreq_policy_apply_limits(policy
);
551 gov_update_sample_delay(policy_dbs
, 0);
553 mutex_unlock(&policy_dbs
->update_mutex
);
555 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits
);