]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/cpufreq/cpufreq_governor.c
2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
23 #include "cpufreq_governor.h"
25 static struct attribute_group
*get_sysfs_attr(struct dbs_data
*dbs_data
)
27 if (have_governor_per_policy())
28 return dbs_data
->cdata
->attr_group_gov_pol
;
30 return dbs_data
->cdata
->attr_group_gov_sys
;
33 void dbs_check_cpu(struct dbs_data
*dbs_data
, int cpu
)
35 struct cpu_dbs_info
*cdbs
= dbs_data
->cdata
->get_cpu_cdbs(cpu
);
36 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
37 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
38 struct cpufreq_policy
*policy
= cdbs
->shared
->policy
;
39 unsigned int sampling_rate
;
40 unsigned int max_load
= 0;
41 unsigned int ignore_nice
;
44 if (dbs_data
->cdata
->governor
== GOV_ONDEMAND
) {
45 struct od_cpu_dbs_info_s
*od_dbs_info
=
46 dbs_data
->cdata
->get_cpu_dbs_info_s(cpu
);
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
54 sampling_rate
= od_tuners
->sampling_rate
;
55 sampling_rate
*= od_dbs_info
->rate_mult
;
57 ignore_nice
= od_tuners
->ignore_nice_load
;
59 sampling_rate
= cs_tuners
->sampling_rate
;
60 ignore_nice
= cs_tuners
->ignore_nice_load
;
63 /* Get Absolute Load */
64 for_each_cpu(j
, policy
->cpus
) {
65 struct cpu_dbs_info
*j_cdbs
;
66 u64 cur_wall_time
, cur_idle_time
;
67 unsigned int idle_time
, wall_time
;
71 j_cdbs
= dbs_data
->cdata
->get_cpu_cdbs(j
);
74 * For the purpose of ondemand, waiting for disk IO is
75 * an indication that you're performance critical, and
76 * not that the system is actually idle. So do not add
77 * the iowait time to the cpu idle time.
79 if (dbs_data
->cdata
->governor
== GOV_ONDEMAND
)
80 io_busy
= od_tuners
->io_is_busy
;
81 cur_idle_time
= get_cpu_idle_time(j
, &cur_wall_time
, io_busy
);
83 wall_time
= (unsigned int)
84 (cur_wall_time
- j_cdbs
->prev_cpu_wall
);
85 j_cdbs
->prev_cpu_wall
= cur_wall_time
;
87 if (cur_idle_time
< j_cdbs
->prev_cpu_idle
)
88 cur_idle_time
= j_cdbs
->prev_cpu_idle
;
90 idle_time
= (unsigned int)
91 (cur_idle_time
- j_cdbs
->prev_cpu_idle
);
92 j_cdbs
->prev_cpu_idle
= cur_idle_time
;
96 unsigned long cur_nice_jiffies
;
98 cur_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
] -
101 * Assumption: nice time between sampling periods will
102 * be less than 2^32 jiffies for 32 bit sys
104 cur_nice_jiffies
= (unsigned long)
105 cputime64_to_jiffies64(cur_nice
);
107 cdbs
->prev_cpu_nice
=
108 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
109 idle_time
+= jiffies_to_usecs(cur_nice_jiffies
);
112 if (unlikely(!wall_time
|| wall_time
< idle_time
))
116 * If the CPU had gone completely idle, and a task just woke up
117 * on this CPU now, it would be unfair to calculate 'load' the
118 * usual way for this elapsed time-window, because it will show
119 * near-zero load, irrespective of how CPU intensive that task
120 * actually is. This is undesirable for latency-sensitive bursty
123 * To avoid this, we reuse the 'load' from the previous
124 * time-window and give this task a chance to start with a
125 * reasonably high CPU frequency. (However, we shouldn't over-do
126 * this copy, lest we get stuck at a high load (high frequency)
127 * for too long, even when the current system load has actually
128 * dropped down. So we perform the copy only once, upon the
129 * first wake-up from idle.)
131 * Detecting this situation is easy: the governor's deferrable
132 * timer would not have fired during CPU-idle periods. Hence
133 * an unusually large 'wall_time' (as compared to the sampling
134 * rate) indicates this scenario.
136 * prev_load can be zero in two cases and we must recalculate it
138 * - during long idle intervals
139 * - explicitly set to zero
141 if (unlikely(wall_time
> (2 * sampling_rate
) &&
142 j_cdbs
->prev_load
)) {
143 load
= j_cdbs
->prev_load
;
146 * Perform a destructive copy, to ensure that we copy
147 * the previous load only once, upon the first wake-up
150 j_cdbs
->prev_load
= 0;
152 load
= 100 * (wall_time
- idle_time
) / wall_time
;
153 j_cdbs
->prev_load
= load
;
160 dbs_data
->cdata
->gov_check_cpu(cpu
, max_load
);
162 EXPORT_SYMBOL_GPL(dbs_check_cpu
);
164 void gov_add_timers(struct cpufreq_policy
*policy
, unsigned int delay
)
166 struct dbs_data
*dbs_data
= policy
->governor_data
;
167 struct cpu_dbs_info
*cdbs
;
170 for_each_cpu(cpu
, policy
->cpus
) {
171 cdbs
= dbs_data
->cdata
->get_cpu_cdbs(cpu
);
172 cdbs
->timer
.expires
= jiffies
+ delay
;
173 add_timer_on(&cdbs
->timer
, cpu
);
176 EXPORT_SYMBOL_GPL(gov_add_timers
);
178 static inline void gov_cancel_timers(struct cpufreq_policy
*policy
)
180 struct dbs_data
*dbs_data
= policy
->governor_data
;
181 struct cpu_dbs_info
*cdbs
;
184 for_each_cpu(i
, policy
->cpus
) {
185 cdbs
= dbs_data
->cdata
->get_cpu_cdbs(i
);
186 del_timer_sync(&cdbs
->timer
);
190 void gov_cancel_work(struct cpu_common_dbs_info
*shared
)
192 /* Tell dbs_timer_handler() to skip queuing up work items. */
193 atomic_inc(&shared
->skip_work
);
195 * If dbs_timer_handler() is already running, it may not notice the
196 * incremented skip_work, so wait for it to complete to prevent its work
197 * item from being queued up after the cancel_work_sync() below.
199 gov_cancel_timers(shared
->policy
);
201 * In case dbs_timer_handler() managed to run and spawn a work item
202 * before the timers have been canceled, wait for that work item to
203 * complete and then cancel all of the timers set up by it. If
204 * dbs_timer_handler() runs again at that point, it will see the
205 * positive value of skip_work and won't spawn any more work items.
207 cancel_work_sync(&shared
->work
);
208 gov_cancel_timers(shared
->policy
);
209 atomic_set(&shared
->skip_work
, 0);
211 EXPORT_SYMBOL_GPL(gov_cancel_work
);
213 /* Will return if we need to evaluate cpu load again or not */
214 static bool need_load_eval(struct cpu_common_dbs_info
*shared
,
215 unsigned int sampling_rate
)
217 if (policy_is_shared(shared
->policy
)) {
218 ktime_t time_now
= ktime_get();
219 s64 delta_us
= ktime_us_delta(time_now
, shared
->time_stamp
);
221 /* Do nothing if we recently have sampled */
222 if (delta_us
< (s64
)(sampling_rate
/ 2))
225 shared
->time_stamp
= time_now
;
231 static void dbs_work_handler(struct work_struct
*work
)
233 struct cpu_common_dbs_info
*shared
= container_of(work
, struct
234 cpu_common_dbs_info
, work
);
235 struct cpufreq_policy
*policy
;
236 struct dbs_data
*dbs_data
;
237 unsigned int sampling_rate
, delay
;
240 policy
= shared
->policy
;
241 dbs_data
= policy
->governor_data
;
243 /* Kill all timers */
244 gov_cancel_timers(policy
);
246 if (dbs_data
->cdata
->governor
== GOV_CONSERVATIVE
) {
247 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
249 sampling_rate
= cs_tuners
->sampling_rate
;
251 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
253 sampling_rate
= od_tuners
->sampling_rate
;
256 eval_load
= need_load_eval(shared
, sampling_rate
);
259 * Make sure cpufreq_governor_limits() isn't evaluating load in
262 mutex_lock(&shared
->timer_mutex
);
263 delay
= dbs_data
->cdata
->gov_dbs_timer(policy
, eval_load
);
264 mutex_unlock(&shared
->timer_mutex
);
266 atomic_dec(&shared
->skip_work
);
268 gov_add_timers(policy
, delay
);
271 static void dbs_timer_handler(unsigned long data
)
273 struct cpu_dbs_info
*cdbs
= (struct cpu_dbs_info
*)data
;
274 struct cpu_common_dbs_info
*shared
= cdbs
->shared
;
277 * Timer handler may not be allowed to queue the work at the moment,
279 * - Another timer handler has done that
280 * - We are stopping the governor
281 * - Or we are updating the sampling rate of the ondemand governor
283 if (atomic_inc_return(&shared
->skip_work
) > 1)
284 atomic_dec(&shared
->skip_work
);
286 queue_work(system_wq
, &shared
->work
);
289 static void set_sampling_rate(struct dbs_data
*dbs_data
,
290 unsigned int sampling_rate
)
292 if (dbs_data
->cdata
->governor
== GOV_CONSERVATIVE
) {
293 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
294 cs_tuners
->sampling_rate
= sampling_rate
;
296 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
297 od_tuners
->sampling_rate
= sampling_rate
;
301 static int alloc_common_dbs_info(struct cpufreq_policy
*policy
,
302 struct common_dbs_data
*cdata
)
304 struct cpu_common_dbs_info
*shared
;
307 /* Allocate memory for the common information for policy->cpus */
308 shared
= kzalloc(sizeof(*shared
), GFP_KERNEL
);
312 /* Set shared for all CPUs, online+offline */
313 for_each_cpu(j
, policy
->related_cpus
)
314 cdata
->get_cpu_cdbs(j
)->shared
= shared
;
316 mutex_init(&shared
->timer_mutex
);
317 atomic_set(&shared
->skip_work
, 0);
318 INIT_WORK(&shared
->work
, dbs_work_handler
);
322 static void free_common_dbs_info(struct cpufreq_policy
*policy
,
323 struct common_dbs_data
*cdata
)
325 struct cpu_dbs_info
*cdbs
= cdata
->get_cpu_cdbs(policy
->cpu
);
326 struct cpu_common_dbs_info
*shared
= cdbs
->shared
;
329 mutex_destroy(&shared
->timer_mutex
);
331 for_each_cpu(j
, policy
->cpus
)
332 cdata
->get_cpu_cdbs(j
)->shared
= NULL
;
337 static int cpufreq_governor_init(struct cpufreq_policy
*policy
,
338 struct dbs_data
*dbs_data
,
339 struct common_dbs_data
*cdata
)
341 unsigned int latency
;
344 /* State should be equivalent to EXIT */
345 if (policy
->governor_data
)
349 if (WARN_ON(have_governor_per_policy()))
352 ret
= alloc_common_dbs_info(policy
, cdata
);
356 dbs_data
->usage_count
++;
357 policy
->governor_data
= dbs_data
;
361 dbs_data
= kzalloc(sizeof(*dbs_data
), GFP_KERNEL
);
365 ret
= alloc_common_dbs_info(policy
, cdata
);
369 dbs_data
->cdata
= cdata
;
370 dbs_data
->usage_count
= 1;
372 ret
= cdata
->init(dbs_data
, !policy
->governor
->initialized
);
374 goto free_common_dbs_info
;
376 /* policy latency is in ns. Convert it to us first */
377 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
381 /* Bring kernel and HW constraints together */
382 dbs_data
->min_sampling_rate
= max(dbs_data
->min_sampling_rate
,
383 MIN_LATENCY_MULTIPLIER
* latency
);
384 set_sampling_rate(dbs_data
, max(dbs_data
->min_sampling_rate
,
385 latency
* LATENCY_MULTIPLIER
));
387 if (!have_governor_per_policy())
388 cdata
->gdbs_data
= dbs_data
;
390 ret
= sysfs_create_group(get_governor_parent_kobj(policy
),
391 get_sysfs_attr(dbs_data
));
393 goto reset_gdbs_data
;
395 policy
->governor_data
= dbs_data
;
400 if (!have_governor_per_policy())
401 cdata
->gdbs_data
= NULL
;
402 cdata
->exit(dbs_data
, !policy
->governor
->initialized
);
403 free_common_dbs_info
:
404 free_common_dbs_info(policy
, cdata
);
410 static int cpufreq_governor_exit(struct cpufreq_policy
*policy
,
411 struct dbs_data
*dbs_data
)
413 struct common_dbs_data
*cdata
= dbs_data
->cdata
;
414 struct cpu_dbs_info
*cdbs
= cdata
->get_cpu_cdbs(policy
->cpu
);
416 /* State should be equivalent to INIT */
417 if (!cdbs
->shared
|| cdbs
->shared
->policy
)
420 policy
->governor_data
= NULL
;
421 if (!--dbs_data
->usage_count
) {
422 sysfs_remove_group(get_governor_parent_kobj(policy
),
423 get_sysfs_attr(dbs_data
));
425 if (!have_governor_per_policy())
426 cdata
->gdbs_data
= NULL
;
428 cdata
->exit(dbs_data
, policy
->governor
->initialized
== 1);
432 free_common_dbs_info(policy
, cdata
);
436 static int cpufreq_governor_start(struct cpufreq_policy
*policy
,
437 struct dbs_data
*dbs_data
)
439 struct common_dbs_data
*cdata
= dbs_data
->cdata
;
440 unsigned int sampling_rate
, ignore_nice
, j
, cpu
= policy
->cpu
;
441 struct cpu_dbs_info
*cdbs
= cdata
->get_cpu_cdbs(cpu
);
442 struct cpu_common_dbs_info
*shared
= cdbs
->shared
;
448 /* State should be equivalent to INIT */
449 if (!shared
|| shared
->policy
)
452 if (cdata
->governor
== GOV_CONSERVATIVE
) {
453 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
455 sampling_rate
= cs_tuners
->sampling_rate
;
456 ignore_nice
= cs_tuners
->ignore_nice_load
;
458 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
460 sampling_rate
= od_tuners
->sampling_rate
;
461 ignore_nice
= od_tuners
->ignore_nice_load
;
462 io_busy
= od_tuners
->io_is_busy
;
465 shared
->policy
= policy
;
466 shared
->time_stamp
= ktime_get();
468 for_each_cpu(j
, policy
->cpus
) {
469 struct cpu_dbs_info
*j_cdbs
= cdata
->get_cpu_cdbs(j
);
470 unsigned int prev_load
;
472 j_cdbs
->prev_cpu_idle
=
473 get_cpu_idle_time(j
, &j_cdbs
->prev_cpu_wall
, io_busy
);
475 prev_load
= (unsigned int)(j_cdbs
->prev_cpu_wall
-
476 j_cdbs
->prev_cpu_idle
);
477 j_cdbs
->prev_load
= 100 * prev_load
/
478 (unsigned int)j_cdbs
->prev_cpu_wall
;
481 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
483 __setup_timer(&j_cdbs
->timer
, dbs_timer_handler
,
484 (unsigned long)j_cdbs
,
485 TIMER_DEFERRABLE
| TIMER_IRQSAFE
);
488 if (cdata
->governor
== GOV_CONSERVATIVE
) {
489 struct cs_cpu_dbs_info_s
*cs_dbs_info
=
490 cdata
->get_cpu_dbs_info_s(cpu
);
492 cs_dbs_info
->down_skip
= 0;
493 cs_dbs_info
->requested_freq
= policy
->cur
;
495 struct od_ops
*od_ops
= cdata
->gov_ops
;
496 struct od_cpu_dbs_info_s
*od_dbs_info
= cdata
->get_cpu_dbs_info_s(cpu
);
498 od_dbs_info
->rate_mult
= 1;
499 od_dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
500 od_ops
->powersave_bias_init_cpu(cpu
);
503 gov_add_timers(policy
, delay_for_sampling_rate(sampling_rate
));
507 static int cpufreq_governor_stop(struct cpufreq_policy
*policy
,
508 struct dbs_data
*dbs_data
)
510 struct cpu_dbs_info
*cdbs
= dbs_data
->cdata
->get_cpu_cdbs(policy
->cpu
);
511 struct cpu_common_dbs_info
*shared
= cdbs
->shared
;
513 /* State should be equivalent to START */
514 if (!shared
|| !shared
->policy
)
517 gov_cancel_work(shared
);
518 shared
->policy
= NULL
;
523 static int cpufreq_governor_limits(struct cpufreq_policy
*policy
,
524 struct dbs_data
*dbs_data
)
526 struct common_dbs_data
*cdata
= dbs_data
->cdata
;
527 unsigned int cpu
= policy
->cpu
;
528 struct cpu_dbs_info
*cdbs
= cdata
->get_cpu_cdbs(cpu
);
530 /* State should be equivalent to START */
531 if (!cdbs
->shared
|| !cdbs
->shared
->policy
)
534 mutex_lock(&cdbs
->shared
->timer_mutex
);
535 if (policy
->max
< cdbs
->shared
->policy
->cur
)
536 __cpufreq_driver_target(cdbs
->shared
->policy
, policy
->max
,
538 else if (policy
->min
> cdbs
->shared
->policy
->cur
)
539 __cpufreq_driver_target(cdbs
->shared
->policy
, policy
->min
,
541 dbs_check_cpu(dbs_data
, cpu
);
542 mutex_unlock(&cdbs
->shared
->timer_mutex
);
547 int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
548 struct common_dbs_data
*cdata
, unsigned int event
)
550 struct dbs_data
*dbs_data
;
553 /* Lock governor to block concurrent initialization of governor */
554 mutex_lock(&cdata
->mutex
);
556 if (have_governor_per_policy())
557 dbs_data
= policy
->governor_data
;
559 dbs_data
= cdata
->gdbs_data
;
561 if (!dbs_data
&& (event
!= CPUFREQ_GOV_POLICY_INIT
)) {
567 case CPUFREQ_GOV_POLICY_INIT
:
568 ret
= cpufreq_governor_init(policy
, dbs_data
, cdata
);
570 case CPUFREQ_GOV_POLICY_EXIT
:
571 ret
= cpufreq_governor_exit(policy
, dbs_data
);
573 case CPUFREQ_GOV_START
:
574 ret
= cpufreq_governor_start(policy
, dbs_data
);
576 case CPUFREQ_GOV_STOP
:
577 ret
= cpufreq_governor_stop(policy
, dbs_data
);
579 case CPUFREQ_GOV_LIMITS
:
580 ret
= cpufreq_governor_limits(policy
, dbs_data
);
587 mutex_unlock(&cdata
->mutex
);
591 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs
);