2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpufreq.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/kobject.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/percpu-defs.h>
23 #include <linux/slab.h>
24 #include <linux/sysfs.h>
25 #include <linux/tick.h>
26 #include <linux/types.h>
27 #include <linux/cpu.h>
29 #include "cpufreq_governor.h"
31 /* On-demand governor macros */
32 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
33 #define DEF_FREQUENCY_UP_THRESHOLD (80)
34 #define DEF_SAMPLING_DOWN_FACTOR (1)
35 #define MAX_SAMPLING_DOWN_FACTOR (100000)
36 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
37 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
38 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
39 #define MIN_FREQUENCY_UP_THRESHOLD (11)
40 #define MAX_FREQUENCY_UP_THRESHOLD (100)
42 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s
, od_cpu_dbs_info
);
44 static struct od_ops od_ops
;
46 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
47 static struct cpufreq_governor cpufreq_gov_ondemand
;
50 static void ondemand_powersave_bias_init_cpu(int cpu
)
52 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
54 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
55 dbs_info
->freq_lo
= 0;
59 * Not all CPUs want IO time to be accounted as busy; this depends on how
60 * efficient idling at a higher frequency/voltage is.
61 * Pavel Machek says this is not so for various generations of AMD and old
63 * Mike Chan (android.com) claims this is also not true for ARM.
64 * Because of this, whitelist specific known (series) of CPUs by default, and
65 * leave all others up to the user.
67 static int should_io_be_busy(void)
69 #if defined(CONFIG_X86)
71 * For Intel, Core 2 (model 15) and later have an efficient idle.
73 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
74 boot_cpu_data
.x86
== 6 &&
75 boot_cpu_data
.x86_model
>= 15)
82 * Find right freq to be set now with powersave_bias on.
83 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
84 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
86 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
87 unsigned int freq_next
, unsigned int relation
)
89 unsigned int freq_req
, freq_reduc
, freq_avg
;
90 unsigned int freq_hi
, freq_lo
;
91 unsigned int index
= 0;
92 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
93 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
95 struct dbs_data
*dbs_data
= policy
->governor_data
;
96 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
98 if (!dbs_info
->freq_table
) {
99 dbs_info
->freq_lo
= 0;
100 dbs_info
->freq_lo_jiffies
= 0;
104 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
106 freq_req
= dbs_info
->freq_table
[index
].frequency
;
107 freq_reduc
= freq_req
* od_tuners
->powersave_bias
/ 1000;
108 freq_avg
= freq_req
- freq_reduc
;
110 /* Find freq bounds for freq_avg in freq_table */
112 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
113 CPUFREQ_RELATION_H
, &index
);
114 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
116 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
117 CPUFREQ_RELATION_L
, &index
);
118 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
120 /* Find out how long we have to be in hi and lo freqs */
121 if (freq_hi
== freq_lo
) {
122 dbs_info
->freq_lo
= 0;
123 dbs_info
->freq_lo_jiffies
= 0;
126 jiffies_total
= usecs_to_jiffies(od_tuners
->sampling_rate
);
127 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
128 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
129 jiffies_hi
/= (freq_hi
- freq_lo
);
130 jiffies_lo
= jiffies_total
- jiffies_hi
;
131 dbs_info
->freq_lo
= freq_lo
;
132 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
133 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
137 static void ondemand_powersave_bias_init(void)
140 for_each_online_cpu(i
) {
141 ondemand_powersave_bias_init_cpu(i
);
145 static void dbs_freq_increase(struct cpufreq_policy
*p
, unsigned int freq
)
147 struct dbs_data
*dbs_data
= p
->governor_data
;
148 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
150 if (od_tuners
->powersave_bias
)
151 freq
= od_ops
.powersave_bias_target(p
, freq
,
153 else if (p
->cur
== p
->max
)
156 __cpufreq_driver_target(p
, freq
, od_tuners
->powersave_bias
?
157 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
161 * Every sampling_rate, we check, if current idle time is less than 20%
162 * (default), then we try to increase frequency. Every sampling_rate, we look
163 * for the lowest frequency which can sustain the load while keeping idle time
164 * over 30%. If such a frequency exist, we try to decrease to this frequency.
166 * Any frequency increase takes it to the maximum frequency. Frequency reduction
167 * happens at minimum steps of 5% (default) of current frequency
169 static void od_check_cpu(int cpu
, unsigned int load_freq
)
171 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
172 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.cur_policy
;
173 struct dbs_data
*dbs_data
= policy
->governor_data
;
174 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
176 dbs_info
->freq_lo
= 0;
178 /* Check for frequency increase */
179 if (load_freq
> od_tuners
->up_threshold
* policy
->cur
) {
180 /* If switching to max speed, apply sampling_down_factor */
181 if (policy
->cur
< policy
->max
)
182 dbs_info
->rate_mult
=
183 od_tuners
->sampling_down_factor
;
184 dbs_freq_increase(policy
, policy
->max
);
188 /* Check for frequency decrease */
189 /* if we cannot reduce the frequency anymore, break out early */
190 if (policy
->cur
== policy
->min
)
194 * The optimal frequency is the frequency that is the lowest that can
195 * support the current CPU usage without triggering the up policy. To be
196 * safe, we focus 10 points under the threshold.
198 if (load_freq
< od_tuners
->adj_up_threshold
200 unsigned int freq_next
;
201 freq_next
= load_freq
/ od_tuners
->adj_up_threshold
;
203 /* No longer fully busy, reset rate_mult */
204 dbs_info
->rate_mult
= 1;
206 if (freq_next
< policy
->min
)
207 freq_next
= policy
->min
;
209 if (!od_tuners
->powersave_bias
) {
210 __cpufreq_driver_target(policy
, freq_next
,
215 freq_next
= od_ops
.powersave_bias_target(policy
, freq_next
,
217 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_L
);
221 static void od_dbs_timer(struct work_struct
*work
)
223 struct od_cpu_dbs_info_s
*dbs_info
=
224 container_of(work
, struct od_cpu_dbs_info_s
, cdbs
.work
.work
);
225 unsigned int cpu
= dbs_info
->cdbs
.cur_policy
->cpu
;
226 struct od_cpu_dbs_info_s
*core_dbs_info
= &per_cpu(od_cpu_dbs_info
,
228 struct dbs_data
*dbs_data
= dbs_info
->cdbs
.cur_policy
->governor_data
;
229 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
230 int delay
= 0, sample_type
= core_dbs_info
->sample_type
;
231 bool modify_all
= true;
233 mutex_lock(&core_dbs_info
->cdbs
.timer_mutex
);
234 if (!need_load_eval(&core_dbs_info
->cdbs
, od_tuners
->sampling_rate
)) {
239 /* Common NORMAL_SAMPLE setup */
240 core_dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
241 if (sample_type
== OD_SUB_SAMPLE
) {
242 delay
= core_dbs_info
->freq_lo_jiffies
;
243 __cpufreq_driver_target(core_dbs_info
->cdbs
.cur_policy
,
244 core_dbs_info
->freq_lo
, CPUFREQ_RELATION_H
);
246 dbs_check_cpu(dbs_data
, cpu
);
247 if (core_dbs_info
->freq_lo
) {
248 /* Setup timer for SUB_SAMPLE */
249 core_dbs_info
->sample_type
= OD_SUB_SAMPLE
;
250 delay
= core_dbs_info
->freq_hi_jiffies
;
256 delay
= delay_for_sampling_rate(od_tuners
->sampling_rate
257 * core_dbs_info
->rate_mult
);
259 gov_queue_work(dbs_data
, dbs_info
->cdbs
.cur_policy
, delay
, modify_all
);
260 mutex_unlock(&core_dbs_info
->cdbs
.timer_mutex
);
263 /************************** sysfs interface ************************/
264 static struct common_dbs_data od_dbs_cdata
;
267 * update_sampling_rate - update sampling rate effective immediately if needed.
268 * @new_rate: new sampling rate
270 * If new rate is smaller than the old, simply updating
271 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
272 * original sampling_rate was 1 second and the requested new sampling rate is 10
273 * ms because the user needs immediate reaction from ondemand governor, but not
274 * sure if higher frequency will be required or not, then, the governor may
275 * change the sampling rate too late; up to 1 second later. Thus, if we are
276 * reducing the sampling rate, we need to make the new value effective
279 static void update_sampling_rate(struct dbs_data
*dbs_data
,
280 unsigned int new_rate
)
282 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
285 od_tuners
->sampling_rate
= new_rate
= max(new_rate
,
286 dbs_data
->min_sampling_rate
);
288 for_each_online_cpu(cpu
) {
289 struct cpufreq_policy
*policy
;
290 struct od_cpu_dbs_info_s
*dbs_info
;
291 unsigned long next_sampling
, appointed_at
;
293 policy
= cpufreq_cpu_get(cpu
);
296 if (policy
->governor
!= &cpufreq_gov_ondemand
) {
297 cpufreq_cpu_put(policy
);
300 dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
301 cpufreq_cpu_put(policy
);
303 mutex_lock(&dbs_info
->cdbs
.timer_mutex
);
305 if (!delayed_work_pending(&dbs_info
->cdbs
.work
)) {
306 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
310 next_sampling
= jiffies
+ usecs_to_jiffies(new_rate
);
311 appointed_at
= dbs_info
->cdbs
.work
.timer
.expires
;
313 if (time_before(next_sampling
, appointed_at
)) {
315 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
316 cancel_delayed_work_sync(&dbs_info
->cdbs
.work
);
317 mutex_lock(&dbs_info
->cdbs
.timer_mutex
);
319 gov_queue_work(dbs_data
, dbs_info
->cdbs
.cur_policy
,
320 usecs_to_jiffies(new_rate
), true);
323 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
327 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
332 ret
= sscanf(buf
, "%u", &input
);
336 update_sampling_rate(dbs_data
, input
);
340 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
,
343 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
348 ret
= sscanf(buf
, "%u", &input
);
351 od_tuners
->io_is_busy
= !!input
;
353 /* we need to re-evaluate prev_cpu_idle */
354 for_each_online_cpu(j
) {
355 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
357 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
358 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
363 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
366 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
369 ret
= sscanf(buf
, "%u", &input
);
371 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
372 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
375 /* Calculate the new adj_up_threshold */
376 od_tuners
->adj_up_threshold
+= input
;
377 od_tuners
->adj_up_threshold
-= od_tuners
->up_threshold
;
379 od_tuners
->up_threshold
= input
;
383 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
384 const char *buf
, size_t count
)
386 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
387 unsigned int input
, j
;
389 ret
= sscanf(buf
, "%u", &input
);
391 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
393 od_tuners
->sampling_down_factor
= input
;
395 /* Reset down sampling multiplier in case it was active */
396 for_each_online_cpu(j
) {
397 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
399 dbs_info
->rate_mult
= 1;
404 static ssize_t
store_ignore_nice(struct dbs_data
*dbs_data
, const char *buf
,
407 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
413 ret
= sscanf(buf
, "%u", &input
);
420 if (input
== od_tuners
->ignore_nice
) { /* nothing to do */
423 od_tuners
->ignore_nice
= input
;
425 /* we need to re-evaluate prev_cpu_idle */
426 for_each_online_cpu(j
) {
427 struct od_cpu_dbs_info_s
*dbs_info
;
428 dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
429 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
430 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
431 if (od_tuners
->ignore_nice
)
432 dbs_info
->cdbs
.prev_cpu_nice
=
433 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
439 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
,
442 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
445 ret
= sscanf(buf
, "%u", &input
);
453 od_tuners
->powersave_bias
= input
;
454 ondemand_powersave_bias_init();
458 show_store_one(od
, sampling_rate
);
459 show_store_one(od
, io_is_busy
);
460 show_store_one(od
, up_threshold
);
461 show_store_one(od
, sampling_down_factor
);
462 show_store_one(od
, ignore_nice
);
463 show_store_one(od
, powersave_bias
);
464 declare_show_sampling_rate_min(od
);
466 gov_sys_pol_attr_rw(sampling_rate
);
467 gov_sys_pol_attr_rw(io_is_busy
);
468 gov_sys_pol_attr_rw(up_threshold
);
469 gov_sys_pol_attr_rw(sampling_down_factor
);
470 gov_sys_pol_attr_rw(ignore_nice
);
471 gov_sys_pol_attr_rw(powersave_bias
);
472 gov_sys_pol_attr_ro(sampling_rate_min
);
474 static struct attribute
*dbs_attributes_gov_sys
[] = {
475 &sampling_rate_min_gov_sys
.attr
,
476 &sampling_rate_gov_sys
.attr
,
477 &up_threshold_gov_sys
.attr
,
478 &sampling_down_factor_gov_sys
.attr
,
479 &ignore_nice_gov_sys
.attr
,
480 &powersave_bias_gov_sys
.attr
,
481 &io_is_busy_gov_sys
.attr
,
485 static struct attribute_group od_attr_group_gov_sys
= {
486 .attrs
= dbs_attributes_gov_sys
,
490 static struct attribute
*dbs_attributes_gov_pol
[] = {
491 &sampling_rate_min_gov_pol
.attr
,
492 &sampling_rate_gov_pol
.attr
,
493 &up_threshold_gov_pol
.attr
,
494 &sampling_down_factor_gov_pol
.attr
,
495 &ignore_nice_gov_pol
.attr
,
496 &powersave_bias_gov_pol
.attr
,
497 &io_is_busy_gov_pol
.attr
,
501 static struct attribute_group od_attr_group_gov_pol
= {
502 .attrs
= dbs_attributes_gov_pol
,
506 /************************** sysfs end ************************/
508 static int od_init(struct dbs_data
*dbs_data
)
510 struct od_dbs_tuners
*tuners
;
514 tuners
= kzalloc(sizeof(struct od_dbs_tuners
), GFP_KERNEL
);
516 pr_err("%s: kzalloc failed\n", __func__
);
521 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
523 if (idle_time
!= -1ULL) {
524 /* Idle micro accounting is supported. Use finer thresholds */
525 tuners
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
526 tuners
->adj_up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
-
527 MICRO_FREQUENCY_DOWN_DIFFERENTIAL
;
529 * In nohz/micro accounting case we set the minimum frequency
530 * not depending on HZ, but fixed (very low). The deferred
531 * timer might skip some samples if idle/sleeping as needed.
533 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
535 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
536 tuners
->adj_up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
-
537 DEF_FREQUENCY_DOWN_DIFFERENTIAL
;
539 /* For correct statistics, we need 10 ticks for each measure */
540 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
541 jiffies_to_usecs(10);
544 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
545 tuners
->ignore_nice
= 0;
546 tuners
->powersave_bias
= 0;
547 tuners
->io_is_busy
= should_io_be_busy();
549 dbs_data
->tuners
= tuners
;
550 mutex_init(&dbs_data
->mutex
);
554 static void od_exit(struct dbs_data
*dbs_data
)
556 kfree(dbs_data
->tuners
);
559 define_get_cpu_dbs_routines(od_cpu_dbs_info
);
561 static struct od_ops od_ops
= {
562 .powersave_bias_init_cpu
= ondemand_powersave_bias_init_cpu
,
563 .powersave_bias_target
= generic_powersave_bias_target
,
564 .freq_increase
= dbs_freq_increase
,
567 static struct common_dbs_data od_dbs_cdata
= {
568 .governor
= GOV_ONDEMAND
,
569 .attr_group_gov_sys
= &od_attr_group_gov_sys
,
570 .attr_group_gov_pol
= &od_attr_group_gov_pol
,
571 .get_cpu_cdbs
= get_cpu_cdbs
,
572 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
573 .gov_dbs_timer
= od_dbs_timer
,
574 .gov_check_cpu
= od_check_cpu
,
580 static void od_set_powersave_bias(unsigned int powersave_bias
)
582 struct cpufreq_policy
*policy
;
583 struct dbs_data
*dbs_data
;
584 struct od_dbs_tuners
*od_tuners
;
588 cpumask_clear(&done
);
591 for_each_online_cpu(cpu
) {
592 if (cpumask_test_cpu(cpu
, &done
))
595 policy
= per_cpu(od_cpu_dbs_info
, cpu
).cdbs
.cur_policy
;
596 dbs_data
= policy
->governor_data
;
597 od_tuners
= dbs_data
->tuners
;
598 od_tuners
->powersave_bias
= powersave_bias
;
600 cpumask_or(&done
, &done
, policy
->cpus
);
605 void od_register_powersave_bias_handler(unsigned int (*f
)
606 (struct cpufreq_policy
*, unsigned int, unsigned int),
607 unsigned int powersave_bias
)
609 od_ops
.powersave_bias_target
= f
;
610 od_set_powersave_bias(powersave_bias
);
612 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler
);
614 void od_unregister_powersave_bias_handler(void)
616 od_ops
.powersave_bias_target
= generic_powersave_bias_target
;
617 od_set_powersave_bias(0);
619 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler
);
621 static int od_cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
624 return cpufreq_governor_dbs(policy
, &od_dbs_cdata
, event
);
627 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
630 struct cpufreq_governor cpufreq_gov_ondemand
= {
632 .governor
= od_cpufreq_governor_dbs
,
633 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
634 .owner
= THIS_MODULE
,
637 static int __init
cpufreq_gov_dbs_init(void)
639 return cpufreq_register_governor(&cpufreq_gov_ondemand
);
642 static void __exit
cpufreq_gov_dbs_exit(void)
644 cpufreq_unregister_governor(&cpufreq_gov_ondemand
);
647 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
648 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
649 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
650 "Low Latency Frequency Transition capable processors");
651 MODULE_LICENSE("GPL");
653 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
654 fs_initcall(cpufreq_gov_dbs_init
);
656 module_init(cpufreq_gov_dbs_init
);
658 module_exit(cpufreq_gov_dbs_exit
);