]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/cpufreq/cpufreq_governor.c
d6bd402a3237ed4ef0f118160862eb536e222173
2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
23 #include "cpufreq_governor.h"
25 DEFINE_MUTEX(dbs_data_mutex
);
26 EXPORT_SYMBOL_GPL(dbs_data_mutex
);
28 static struct attribute_group
*get_sysfs_attr(struct dbs_governor
*gov
)
30 return have_governor_per_policy() ?
31 gov
->attr_group_gov_pol
: gov
->attr_group_gov_sys
;
34 void dbs_check_cpu(struct cpufreq_policy
*policy
)
36 int cpu
= policy
->cpu
;
37 struct dbs_governor
*gov
= dbs_governor_of(policy
);
38 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
39 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
40 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
41 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
42 unsigned int sampling_rate
;
43 unsigned int max_load
= 0;
44 unsigned int ignore_nice
;
47 if (gov
->governor
== GOV_ONDEMAND
) {
48 struct od_cpu_dbs_info_s
*od_dbs_info
=
49 gov
->get_cpu_dbs_info_s(cpu
);
52 * Sometimes, the ondemand governor uses an additional
53 * multiplier to give long delays. So apply this multiplier to
54 * the 'sampling_rate', so as to keep the wake-up-from-idle
55 * detection logic a bit conservative.
57 sampling_rate
= od_tuners
->sampling_rate
;
58 sampling_rate
*= od_dbs_info
->rate_mult
;
60 ignore_nice
= od_tuners
->ignore_nice_load
;
62 sampling_rate
= cs_tuners
->sampling_rate
;
63 ignore_nice
= cs_tuners
->ignore_nice_load
;
66 /* Get Absolute Load */
67 for_each_cpu(j
, policy
->cpus
) {
68 struct cpu_dbs_info
*j_cdbs
;
69 u64 cur_wall_time
, cur_idle_time
;
70 unsigned int idle_time
, wall_time
;
74 j_cdbs
= gov
->get_cpu_cdbs(j
);
77 * For the purpose of ondemand, waiting for disk IO is
78 * an indication that you're performance critical, and
79 * not that the system is actually idle. So do not add
80 * the iowait time to the cpu idle time.
82 if (gov
->governor
== GOV_ONDEMAND
)
83 io_busy
= od_tuners
->io_is_busy
;
84 cur_idle_time
= get_cpu_idle_time(j
, &cur_wall_time
, io_busy
);
86 wall_time
= (unsigned int)
87 (cur_wall_time
- j_cdbs
->prev_cpu_wall
);
88 j_cdbs
->prev_cpu_wall
= cur_wall_time
;
90 if (cur_idle_time
< j_cdbs
->prev_cpu_idle
)
91 cur_idle_time
= j_cdbs
->prev_cpu_idle
;
93 idle_time
= (unsigned int)
94 (cur_idle_time
- j_cdbs
->prev_cpu_idle
);
95 j_cdbs
->prev_cpu_idle
= cur_idle_time
;
98 struct cpu_dbs_info
*cdbs
= gov
->get_cpu_cdbs(cpu
);
100 unsigned long cur_nice_jiffies
;
102 cur_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
] -
105 * Assumption: nice time between sampling periods will
106 * be less than 2^32 jiffies for 32 bit sys
108 cur_nice_jiffies
= (unsigned long)
109 cputime64_to_jiffies64(cur_nice
);
111 cdbs
->prev_cpu_nice
=
112 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
113 idle_time
+= jiffies_to_usecs(cur_nice_jiffies
);
116 if (unlikely(!wall_time
|| wall_time
< idle_time
))
120 * If the CPU had gone completely idle, and a task just woke up
121 * on this CPU now, it would be unfair to calculate 'load' the
122 * usual way for this elapsed time-window, because it will show
123 * near-zero load, irrespective of how CPU intensive that task
124 * actually is. This is undesirable for latency-sensitive bursty
127 * To avoid this, we reuse the 'load' from the previous
128 * time-window and give this task a chance to start with a
129 * reasonably high CPU frequency. (However, we shouldn't over-do
130 * this copy, lest we get stuck at a high load (high frequency)
131 * for too long, even when the current system load has actually
132 * dropped down. So we perform the copy only once, upon the
133 * first wake-up from idle.)
135 * Detecting this situation is easy: the governor's utilization
136 * update handler would not have run during CPU-idle periods.
137 * Hence, an unusually large 'wall_time' (as compared to the
138 * sampling rate) indicates this scenario.
140 * prev_load can be zero in two cases and we must recalculate it
142 * - during long idle intervals
143 * - explicitly set to zero
145 if (unlikely(wall_time
> (2 * sampling_rate
) &&
146 j_cdbs
->prev_load
)) {
147 load
= j_cdbs
->prev_load
;
150 * Perform a destructive copy, to ensure that we copy
151 * the previous load only once, upon the first wake-up
154 j_cdbs
->prev_load
= 0;
156 load
= 100 * (wall_time
- idle_time
) / wall_time
;
157 j_cdbs
->prev_load
= load
;
164 gov
->gov_check_cpu(cpu
, max_load
);
166 EXPORT_SYMBOL_GPL(dbs_check_cpu
);
168 void gov_set_update_util(struct policy_dbs_info
*policy_dbs
,
169 unsigned int delay_us
)
171 struct cpufreq_policy
*policy
= policy_dbs
->policy
;
172 struct dbs_governor
*gov
= dbs_governor_of(policy
);
175 gov_update_sample_delay(policy_dbs
, delay_us
);
176 policy_dbs
->last_sample_time
= 0;
178 for_each_cpu(cpu
, policy
->cpus
) {
179 struct cpu_dbs_info
*cdbs
= gov
->get_cpu_cdbs(cpu
);
181 cpufreq_set_update_util_data(cpu
, &cdbs
->update_util
);
184 EXPORT_SYMBOL_GPL(gov_set_update_util
);
186 static inline void gov_clear_update_util(struct cpufreq_policy
*policy
)
190 for_each_cpu(i
, policy
->cpus
)
191 cpufreq_set_update_util_data(i
, NULL
);
196 static void gov_cancel_work(struct policy_dbs_info
*policy_dbs
)
198 /* Tell dbs_update_util_handler() to skip queuing up work items. */
199 atomic_inc(&policy_dbs
->work_count
);
201 * If dbs_update_util_handler() is already running, it may not notice
202 * the incremented work_count, so wait for it to complete to prevent its
203 * work item from being queued up after the cancel_work_sync() below.
205 gov_clear_update_util(policy_dbs
->policy
);
206 irq_work_sync(&policy_dbs
->irq_work
);
207 cancel_work_sync(&policy_dbs
->work
);
208 atomic_set(&policy_dbs
->work_count
, 0);
211 static void dbs_work_handler(struct work_struct
*work
)
213 struct policy_dbs_info
*policy_dbs
;
214 struct cpufreq_policy
*policy
;
215 struct dbs_governor
*gov
;
218 policy_dbs
= container_of(work
, struct policy_dbs_info
, work
);
219 policy
= policy_dbs
->policy
;
220 gov
= dbs_governor_of(policy
);
223 * Make sure cpufreq_governor_limits() isn't evaluating load or the
224 * ondemand governor isn't updating the sampling rate in parallel.
226 mutex_lock(&policy_dbs
->timer_mutex
);
227 delay
= gov
->gov_dbs_timer(policy
);
228 policy_dbs
->sample_delay_ns
= jiffies_to_nsecs(delay
);
229 mutex_unlock(&policy_dbs
->timer_mutex
);
232 * If the atomic operation below is reordered with respect to the
233 * sample delay modification, the utilization update handler may end
234 * up using a stale sample delay value.
236 smp_mb__before_atomic();
237 atomic_dec(&policy_dbs
->work_count
);
240 static void dbs_irq_work(struct irq_work
*irq_work
)
242 struct policy_dbs_info
*policy_dbs
;
244 policy_dbs
= container_of(irq_work
, struct policy_dbs_info
, irq_work
);
245 schedule_work(&policy_dbs
->work
);
248 static inline void gov_queue_irq_work(struct policy_dbs_info
*policy_dbs
)
251 irq_work_queue_on(&policy_dbs
->irq_work
, smp_processor_id());
253 irq_work_queue(&policy_dbs
->irq_work
);
257 static void dbs_update_util_handler(struct update_util_data
*data
, u64 time
,
258 unsigned long util
, unsigned long max
)
260 struct cpu_dbs_info
*cdbs
= container_of(data
, struct cpu_dbs_info
, update_util
);
261 struct policy_dbs_info
*policy_dbs
= cdbs
->policy_dbs
;
264 * The work may not be allowed to be queued up right now.
266 * - Work has already been queued up or is in progress.
267 * - The governor is being stopped.
268 * - It is too early (too little time from the previous sample).
270 if (atomic_inc_return(&policy_dbs
->work_count
) == 1) {
273 delta_ns
= time
- policy_dbs
->last_sample_time
;
274 if ((s64
)delta_ns
>= policy_dbs
->sample_delay_ns
) {
275 policy_dbs
->last_sample_time
= time
;
276 gov_queue_irq_work(policy_dbs
);
280 atomic_dec(&policy_dbs
->work_count
);
283 static void set_sampling_rate(struct dbs_data
*dbs_data
,
284 struct dbs_governor
*gov
,
285 unsigned int sampling_rate
)
287 if (gov
->governor
== GOV_CONSERVATIVE
) {
288 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
289 cs_tuners
->sampling_rate
= sampling_rate
;
291 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
292 od_tuners
->sampling_rate
= sampling_rate
;
296 static struct policy_dbs_info
*alloc_policy_dbs_info(struct cpufreq_policy
*policy
,
297 struct dbs_governor
*gov
)
299 struct policy_dbs_info
*policy_dbs
;
302 /* Allocate memory for the common information for policy->cpus */
303 policy_dbs
= kzalloc(sizeof(*policy_dbs
), GFP_KERNEL
);
307 mutex_init(&policy_dbs
->timer_mutex
);
308 atomic_set(&policy_dbs
->work_count
, 0);
309 init_irq_work(&policy_dbs
->irq_work
, dbs_irq_work
);
310 INIT_WORK(&policy_dbs
->work
, dbs_work_handler
);
312 /* Set policy_dbs for all CPUs, online+offline */
313 for_each_cpu(j
, policy
->related_cpus
) {
314 struct cpu_dbs_info
*j_cdbs
= gov
->get_cpu_cdbs(j
);
316 j_cdbs
->policy_dbs
= policy_dbs
;
317 j_cdbs
->update_util
.func
= dbs_update_util_handler
;
322 static void free_policy_dbs_info(struct cpufreq_policy
*policy
,
323 struct dbs_governor
*gov
)
325 struct cpu_dbs_info
*cdbs
= gov
->get_cpu_cdbs(policy
->cpu
);
326 struct policy_dbs_info
*policy_dbs
= cdbs
->policy_dbs
;
329 mutex_destroy(&policy_dbs
->timer_mutex
);
331 for_each_cpu(j
, policy
->related_cpus
) {
332 struct cpu_dbs_info
*j_cdbs
= gov
->get_cpu_cdbs(j
);
334 j_cdbs
->policy_dbs
= NULL
;
335 j_cdbs
->update_util
.func
= NULL
;
340 static int cpufreq_governor_init(struct cpufreq_policy
*policy
)
342 struct dbs_governor
*gov
= dbs_governor_of(policy
);
343 struct dbs_data
*dbs_data
= gov
->gdbs_data
;
344 struct policy_dbs_info
*policy_dbs
;
345 unsigned int latency
;
348 /* State should be equivalent to EXIT */
349 if (policy
->governor_data
)
352 policy_dbs
= alloc_policy_dbs_info(policy
, gov
);
357 if (WARN_ON(have_governor_per_policy())) {
359 goto free_policy_dbs_info
;
361 dbs_data
->usage_count
++;
362 policy_dbs
->dbs_data
= dbs_data
;
363 policy
->governor_data
= policy_dbs
;
367 dbs_data
= kzalloc(sizeof(*dbs_data
), GFP_KERNEL
);
370 goto free_policy_dbs_info
;
373 dbs_data
->usage_count
= 1;
375 ret
= gov
->init(dbs_data
, !policy
->governor
->initialized
);
377 goto free_policy_dbs_info
;
379 /* policy latency is in ns. Convert it to us first */
380 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
384 /* Bring kernel and HW constraints together */
385 dbs_data
->min_sampling_rate
= max(dbs_data
->min_sampling_rate
,
386 MIN_LATENCY_MULTIPLIER
* latency
);
387 set_sampling_rate(dbs_data
, gov
, max(dbs_data
->min_sampling_rate
,
388 latency
* LATENCY_MULTIPLIER
));
390 if (!have_governor_per_policy())
391 gov
->gdbs_data
= dbs_data
;
393 policy_dbs
->dbs_data
= dbs_data
;
394 policy
->governor_data
= policy_dbs
;
396 ret
= sysfs_create_group(get_governor_parent_kobj(policy
),
397 get_sysfs_attr(gov
));
401 /* Failure, so roll back. */
403 policy
->governor_data
= NULL
;
405 if (!have_governor_per_policy())
406 gov
->gdbs_data
= NULL
;
407 gov
->exit(dbs_data
, !policy
->governor
->initialized
);
410 free_policy_dbs_info
:
411 free_policy_dbs_info(policy
, gov
);
415 static int cpufreq_governor_exit(struct cpufreq_policy
*policy
)
417 struct dbs_governor
*gov
= dbs_governor_of(policy
);
418 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
419 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
421 /* State should be equivalent to INIT */
422 if (policy_dbs
->policy
)
425 if (!--dbs_data
->usage_count
) {
426 sysfs_remove_group(get_governor_parent_kobj(policy
),
427 get_sysfs_attr(gov
));
429 policy
->governor_data
= NULL
;
431 if (!have_governor_per_policy())
432 gov
->gdbs_data
= NULL
;
434 gov
->exit(dbs_data
, policy
->governor
->initialized
== 1);
437 policy
->governor_data
= NULL
;
440 free_policy_dbs_info(policy
, gov
);
444 static int cpufreq_governor_start(struct cpufreq_policy
*policy
)
446 struct dbs_governor
*gov
= dbs_governor_of(policy
);
447 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
448 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
449 unsigned int sampling_rate
, ignore_nice
, j
, cpu
= policy
->cpu
;
455 /* State should be equivalent to INIT */
456 if (policy_dbs
->policy
)
459 if (gov
->governor
== GOV_CONSERVATIVE
) {
460 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
462 sampling_rate
= cs_tuners
->sampling_rate
;
463 ignore_nice
= cs_tuners
->ignore_nice_load
;
465 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
467 sampling_rate
= od_tuners
->sampling_rate
;
468 ignore_nice
= od_tuners
->ignore_nice_load
;
469 io_busy
= od_tuners
->io_is_busy
;
472 for_each_cpu(j
, policy
->cpus
) {
473 struct cpu_dbs_info
*j_cdbs
= gov
->get_cpu_cdbs(j
);
474 unsigned int prev_load
;
476 j_cdbs
->prev_cpu_idle
=
477 get_cpu_idle_time(j
, &j_cdbs
->prev_cpu_wall
, io_busy
);
479 prev_load
= (unsigned int)(j_cdbs
->prev_cpu_wall
-
480 j_cdbs
->prev_cpu_idle
);
481 j_cdbs
->prev_load
= 100 * prev_load
/
482 (unsigned int)j_cdbs
->prev_cpu_wall
;
485 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
487 policy_dbs
->policy
= policy
;
489 if (gov
->governor
== GOV_CONSERVATIVE
) {
490 struct cs_cpu_dbs_info_s
*cs_dbs_info
=
491 gov
->get_cpu_dbs_info_s(cpu
);
493 cs_dbs_info
->down_skip
= 0;
494 cs_dbs_info
->requested_freq
= policy
->cur
;
496 struct od_ops
*od_ops
= gov
->gov_ops
;
497 struct od_cpu_dbs_info_s
*od_dbs_info
= gov
->get_cpu_dbs_info_s(cpu
);
499 od_dbs_info
->rate_mult
= 1;
500 od_dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
501 od_ops
->powersave_bias_init_cpu(cpu
);
504 gov_set_update_util(policy_dbs
, sampling_rate
);
508 static int cpufreq_governor_stop(struct cpufreq_policy
*policy
)
510 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
512 /* State should be equivalent to START */
513 if (!policy_dbs
->policy
)
516 gov_cancel_work(policy_dbs
);
517 policy_dbs
->policy
= NULL
;
522 static int cpufreq_governor_limits(struct cpufreq_policy
*policy
)
524 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
526 /* State should be equivalent to START */
527 if (!policy_dbs
->policy
)
530 mutex_lock(&policy_dbs
->timer_mutex
);
531 if (policy
->max
< policy
->cur
)
532 __cpufreq_driver_target(policy
, policy
->max
, CPUFREQ_RELATION_H
);
533 else if (policy
->min
> policy
->cur
)
534 __cpufreq_driver_target(policy
, policy
->min
, CPUFREQ_RELATION_L
);
535 dbs_check_cpu(policy
);
536 mutex_unlock(&policy_dbs
->timer_mutex
);
541 int cpufreq_governor_dbs(struct cpufreq_policy
*policy
, unsigned int event
)
545 /* Lock governor to block concurrent initialization of governor */
546 mutex_lock(&dbs_data_mutex
);
548 if (event
== CPUFREQ_GOV_POLICY_INIT
) {
549 ret
= cpufreq_governor_init(policy
);
550 } else if (policy
->governor_data
) {
552 case CPUFREQ_GOV_POLICY_EXIT
:
553 ret
= cpufreq_governor_exit(policy
);
555 case CPUFREQ_GOV_START
:
556 ret
= cpufreq_governor_start(policy
);
558 case CPUFREQ_GOV_STOP
:
559 ret
= cpufreq_governor_stop(policy
);
561 case CPUFREQ_GOV_LIMITS
:
562 ret
= cpufreq_governor_limits(policy
);
567 mutex_unlock(&dbs_data_mutex
);
570 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs
);