2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cpufreq.h>
15 #include <linux/kthread.h>
16 #include <uapi/linux/sched/types.h>
17 #include <linux/slab.h>
18 #include <trace/events/power.h>
22 #define SUGOV_KTHREAD_PRIORITY 50
24 struct sugov_tunables
{
25 struct gov_attr_set attr_set
;
26 unsigned int rate_limit_us
;
30 struct cpufreq_policy
*policy
;
32 struct sugov_tunables
*tunables
;
33 struct list_head tunables_hook
;
35 raw_spinlock_t update_lock
; /* For shared policies */
36 u64 last_freq_update_time
;
37 s64 freq_update_delay_ns
;
38 unsigned int next_freq
;
39 unsigned int cached_raw_freq
;
41 /* The next fields are only needed if fast switch cannot be used. */
42 struct irq_work irq_work
;
43 struct kthread_work work
;
44 struct mutex work_lock
;
45 struct kthread_worker worker
;
46 struct task_struct
*thread
;
47 bool work_in_progress
;
49 bool need_freq_update
;
53 struct update_util_data update_util
;
54 struct sugov_policy
*sg_policy
;
56 unsigned long iowait_boost
;
57 unsigned long iowait_boost_max
;
60 /* The fields below are only needed when sharing a policy. */
65 /* The field below is for single-CPU policies only. */
66 #ifdef CONFIG_NO_HZ_COMMON
67 unsigned long saved_idle_calls
;
71 static DEFINE_PER_CPU(struct sugov_cpu
, sugov_cpu
);
73 /************************ Governor internals ***********************/
75 static bool sugov_should_update_freq(struct sugov_policy
*sg_policy
, u64 time
)
79 if (sg_policy
->work_in_progress
)
82 if (unlikely(sg_policy
->need_freq_update
)) {
83 sg_policy
->need_freq_update
= false;
85 * This happens when limits change, so forget the previous
86 * next_freq value and force an update.
88 sg_policy
->next_freq
= UINT_MAX
;
92 delta_ns
= time
- sg_policy
->last_freq_update_time
;
93 return delta_ns
>= sg_policy
->freq_update_delay_ns
;
96 static void sugov_update_commit(struct sugov_policy
*sg_policy
, u64 time
,
97 unsigned int next_freq
)
99 struct cpufreq_policy
*policy
= sg_policy
->policy
;
101 if (sg_policy
->next_freq
== next_freq
)
104 sg_policy
->next_freq
= next_freq
;
105 sg_policy
->last_freq_update_time
= time
;
107 if (policy
->fast_switch_enabled
) {
108 next_freq
= cpufreq_driver_fast_switch(policy
, next_freq
);
109 if (next_freq
== CPUFREQ_ENTRY_INVALID
)
112 policy
->cur
= next_freq
;
113 trace_cpu_frequency(next_freq
, smp_processor_id());
115 sg_policy
->work_in_progress
= true;
116 irq_work_queue(&sg_policy
->irq_work
);
121 * get_next_freq - Compute a new frequency for a given cpufreq policy.
122 * @sg_policy: schedutil policy object to compute the new frequency for.
123 * @util: Current CPU utilization.
124 * @max: CPU capacity.
126 * If the utilization is frequency-invariant, choose the new frequency to be
127 * proportional to it, that is
129 * next_freq = C * max_freq * util / max
131 * Otherwise, approximate the would-be frequency-invariant utilization by
132 * util_raw * (curr_freq / max_freq) which leads to
134 * next_freq = C * curr_freq * util_raw / max
136 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
138 * The lowest driver-supported frequency which is equal or greater than the raw
139 * next_freq (as calculated above) is returned, subject to policy min/max and
140 * cpufreq driver limitations.
142 static unsigned int get_next_freq(struct sugov_policy
*sg_policy
,
143 unsigned long util
, unsigned long max
)
145 struct cpufreq_policy
*policy
= sg_policy
->policy
;
146 unsigned int freq
= arch_scale_freq_invariant() ?
147 policy
->cpuinfo
.max_freq
: policy
->cur
;
149 freq
= (freq
+ (freq
>> 2)) * util
/ max
;
151 if (freq
== sg_policy
->cached_raw_freq
&& sg_policy
->next_freq
!= UINT_MAX
)
152 return sg_policy
->next_freq
;
153 sg_policy
->cached_raw_freq
= freq
;
154 return cpufreq_driver_resolve_freq(policy
, freq
);
157 static void sugov_get_util(unsigned long *util
, unsigned long *max
)
159 struct rq
*rq
= this_rq();
160 unsigned long cfs_max
;
162 cfs_max
= arch_scale_cpu_capacity(NULL
, smp_processor_id());
164 *util
= min(rq
->cfs
.avg
.util_avg
, cfs_max
);
168 static void sugov_set_iowait_boost(struct sugov_cpu
*sg_cpu
, u64 time
,
171 if (flags
& SCHED_CPUFREQ_IOWAIT
) {
172 sg_cpu
->iowait_boost
= sg_cpu
->iowait_boost_max
;
173 } else if (sg_cpu
->iowait_boost
) {
174 s64 delta_ns
= time
- sg_cpu
->last_update
;
176 /* Clear iowait_boost if the CPU apprears to have been idle. */
177 if (delta_ns
> TICK_NSEC
)
178 sg_cpu
->iowait_boost
= 0;
182 static void sugov_iowait_boost(struct sugov_cpu
*sg_cpu
, unsigned long *util
,
185 unsigned long boost_util
= sg_cpu
->iowait_boost
;
186 unsigned long boost_max
= sg_cpu
->iowait_boost_max
;
191 if (*util
* boost_max
< *max
* boost_util
) {
195 sg_cpu
->iowait_boost
>>= 1;
198 #ifdef CONFIG_NO_HZ_COMMON
199 static bool sugov_cpu_is_busy(struct sugov_cpu
*sg_cpu
)
201 unsigned long idle_calls
= tick_nohz_get_idle_calls();
202 bool ret
= idle_calls
== sg_cpu
->saved_idle_calls
;
204 sg_cpu
->saved_idle_calls
= idle_calls
;
208 static inline bool sugov_cpu_is_busy(struct sugov_cpu
*sg_cpu
) { return false; }
209 #endif /* CONFIG_NO_HZ_COMMON */
211 static void sugov_update_single(struct update_util_data
*hook
, u64 time
,
214 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
215 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
216 struct cpufreq_policy
*policy
= sg_policy
->policy
;
217 unsigned long util
, max
;
221 sugov_set_iowait_boost(sg_cpu
, time
, flags
);
222 sg_cpu
->last_update
= time
;
224 if (!sugov_should_update_freq(sg_policy
, time
))
227 busy
= sugov_cpu_is_busy(sg_cpu
);
229 if (flags
& SCHED_CPUFREQ_RT_DL
) {
230 next_f
= policy
->cpuinfo
.max_freq
;
232 sugov_get_util(&util
, &max
);
233 sugov_iowait_boost(sg_cpu
, &util
, &max
);
234 next_f
= get_next_freq(sg_policy
, util
, max
);
236 * Do not reduce the frequency if the CPU has not been idle
237 * recently, as the reduction is likely to be premature then.
239 if (busy
&& next_f
< sg_policy
->next_freq
)
240 next_f
= sg_policy
->next_freq
;
242 sugov_update_commit(sg_policy
, time
, next_f
);
245 static unsigned int sugov_next_freq_shared(struct sugov_cpu
*sg_cpu
, u64 time
)
247 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
248 struct cpufreq_policy
*policy
= sg_policy
->policy
;
249 unsigned long util
= 0, max
= 1;
252 for_each_cpu(j
, policy
->cpus
) {
253 struct sugov_cpu
*j_sg_cpu
= &per_cpu(sugov_cpu
, j
);
254 unsigned long j_util
, j_max
;
258 * If the CPU utilization was last updated before the previous
259 * frequency update and the time elapsed between the last update
260 * of the CPU utilization and the last frequency update is long
261 * enough, don't take the CPU into account as it probably is
262 * idle now (and clear iowait_boost for it).
264 delta_ns
= time
- j_sg_cpu
->last_update
;
265 if (delta_ns
> TICK_NSEC
) {
266 j_sg_cpu
->iowait_boost
= 0;
269 if (j_sg_cpu
->flags
& SCHED_CPUFREQ_RT_DL
)
270 return policy
->cpuinfo
.max_freq
;
272 j_util
= j_sg_cpu
->util
;
273 j_max
= j_sg_cpu
->max
;
274 if (j_util
* max
> j_max
* util
) {
279 sugov_iowait_boost(j_sg_cpu
, &util
, &max
);
282 return get_next_freq(sg_policy
, util
, max
);
285 static void sugov_update_shared(struct update_util_data
*hook
, u64 time
,
288 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
289 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
290 unsigned long util
, max
;
293 sugov_get_util(&util
, &max
);
295 raw_spin_lock(&sg_policy
->update_lock
);
299 sg_cpu
->flags
= flags
;
301 sugov_set_iowait_boost(sg_cpu
, time
, flags
);
302 sg_cpu
->last_update
= time
;
304 if (sugov_should_update_freq(sg_policy
, time
)) {
305 if (flags
& SCHED_CPUFREQ_RT_DL
)
306 next_f
= sg_policy
->policy
->cpuinfo
.max_freq
;
308 next_f
= sugov_next_freq_shared(sg_cpu
, time
);
310 sugov_update_commit(sg_policy
, time
, next_f
);
313 raw_spin_unlock(&sg_policy
->update_lock
);
316 static void sugov_work(struct kthread_work
*work
)
318 struct sugov_policy
*sg_policy
= container_of(work
, struct sugov_policy
, work
);
320 mutex_lock(&sg_policy
->work_lock
);
321 __cpufreq_driver_target(sg_policy
->policy
, sg_policy
->next_freq
,
323 mutex_unlock(&sg_policy
->work_lock
);
325 sg_policy
->work_in_progress
= false;
328 static void sugov_irq_work(struct irq_work
*irq_work
)
330 struct sugov_policy
*sg_policy
;
332 sg_policy
= container_of(irq_work
, struct sugov_policy
, irq_work
);
335 * For RT and deadline tasks, the schedutil governor shoots the
336 * frequency to maximum. Special care must be taken to ensure that this
337 * kthread doesn't result in the same behavior.
339 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
340 * updated only at the end of the sugov_work() function and before that
341 * the schedutil governor rejects all other frequency scaling requests.
343 * There is a very rare case though, where the RT thread yields right
344 * after the work_in_progress flag is cleared. The effects of that are
347 kthread_queue_work(&sg_policy
->worker
, &sg_policy
->work
);
350 /************************** sysfs interface ************************/
352 static struct sugov_tunables
*global_tunables
;
353 static DEFINE_MUTEX(global_tunables_lock
);
355 static inline struct sugov_tunables
*to_sugov_tunables(struct gov_attr_set
*attr_set
)
357 return container_of(attr_set
, struct sugov_tunables
, attr_set
);
360 static ssize_t
rate_limit_us_show(struct gov_attr_set
*attr_set
, char *buf
)
362 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
364 return sprintf(buf
, "%u\n", tunables
->rate_limit_us
);
367 static ssize_t
rate_limit_us_store(struct gov_attr_set
*attr_set
, const char *buf
,
370 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
371 struct sugov_policy
*sg_policy
;
372 unsigned int rate_limit_us
;
374 if (kstrtouint(buf
, 10, &rate_limit_us
))
377 tunables
->rate_limit_us
= rate_limit_us
;
379 list_for_each_entry(sg_policy
, &attr_set
->policy_list
, tunables_hook
)
380 sg_policy
->freq_update_delay_ns
= rate_limit_us
* NSEC_PER_USEC
;
385 static struct governor_attr rate_limit_us
= __ATTR_RW(rate_limit_us
);
387 static struct attribute
*sugov_attributes
[] = {
392 static struct kobj_type sugov_tunables_ktype
= {
393 .default_attrs
= sugov_attributes
,
394 .sysfs_ops
= &governor_sysfs_ops
,
397 /********************** cpufreq governor interface *********************/
399 static struct cpufreq_governor schedutil_gov
;
401 static struct sugov_policy
*sugov_policy_alloc(struct cpufreq_policy
*policy
)
403 struct sugov_policy
*sg_policy
;
405 sg_policy
= kzalloc(sizeof(*sg_policy
), GFP_KERNEL
);
409 sg_policy
->policy
= policy
;
410 raw_spin_lock_init(&sg_policy
->update_lock
);
414 static void sugov_policy_free(struct sugov_policy
*sg_policy
)
419 static int sugov_kthread_create(struct sugov_policy
*sg_policy
)
421 struct task_struct
*thread
;
422 struct sched_param param
= { .sched_priority
= MAX_USER_RT_PRIO
/ 2 };
423 struct cpufreq_policy
*policy
= sg_policy
->policy
;
426 /* kthread only required for slow path */
427 if (policy
->fast_switch_enabled
)
430 kthread_init_work(&sg_policy
->work
, sugov_work
);
431 kthread_init_worker(&sg_policy
->worker
);
432 thread
= kthread_create(kthread_worker_fn
, &sg_policy
->worker
,
434 cpumask_first(policy
->related_cpus
));
435 if (IS_ERR(thread
)) {
436 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread
));
437 return PTR_ERR(thread
);
440 ret
= sched_setscheduler_nocheck(thread
, SCHED_FIFO
, ¶m
);
442 kthread_stop(thread
);
443 pr_warn("%s: failed to set SCHED_FIFO\n", __func__
);
447 sg_policy
->thread
= thread
;
448 kthread_bind_mask(thread
, policy
->related_cpus
);
449 init_irq_work(&sg_policy
->irq_work
, sugov_irq_work
);
450 mutex_init(&sg_policy
->work_lock
);
452 wake_up_process(thread
);
457 static void sugov_kthread_stop(struct sugov_policy
*sg_policy
)
459 /* kthread only required for slow path */
460 if (sg_policy
->policy
->fast_switch_enabled
)
463 kthread_flush_worker(&sg_policy
->worker
);
464 kthread_stop(sg_policy
->thread
);
465 mutex_destroy(&sg_policy
->work_lock
);
468 static struct sugov_tunables
*sugov_tunables_alloc(struct sugov_policy
*sg_policy
)
470 struct sugov_tunables
*tunables
;
472 tunables
= kzalloc(sizeof(*tunables
), GFP_KERNEL
);
474 gov_attr_set_init(&tunables
->attr_set
, &sg_policy
->tunables_hook
);
475 if (!have_governor_per_policy())
476 global_tunables
= tunables
;
481 static void sugov_tunables_free(struct sugov_tunables
*tunables
)
483 if (!have_governor_per_policy())
484 global_tunables
= NULL
;
489 static int sugov_init(struct cpufreq_policy
*policy
)
491 struct sugov_policy
*sg_policy
;
492 struct sugov_tunables
*tunables
;
495 /* State should be equivalent to EXIT */
496 if (policy
->governor_data
)
499 cpufreq_enable_fast_switch(policy
);
501 sg_policy
= sugov_policy_alloc(policy
);
504 goto disable_fast_switch
;
507 ret
= sugov_kthread_create(sg_policy
);
511 mutex_lock(&global_tunables_lock
);
513 if (global_tunables
) {
514 if (WARN_ON(have_governor_per_policy())) {
518 policy
->governor_data
= sg_policy
;
519 sg_policy
->tunables
= global_tunables
;
521 gov_attr_set_get(&global_tunables
->attr_set
, &sg_policy
->tunables_hook
);
525 tunables
= sugov_tunables_alloc(sg_policy
);
531 if (policy
->transition_delay_us
) {
532 tunables
->rate_limit_us
= policy
->transition_delay_us
;
536 tunables
->rate_limit_us
= LATENCY_MULTIPLIER
;
537 lat
= policy
->cpuinfo
.transition_latency
/ NSEC_PER_USEC
;
539 tunables
->rate_limit_us
*= lat
;
542 policy
->governor_data
= sg_policy
;
543 sg_policy
->tunables
= tunables
;
545 ret
= kobject_init_and_add(&tunables
->attr_set
.kobj
, &sugov_tunables_ktype
,
546 get_governor_parent_kobj(policy
), "%s",
552 mutex_unlock(&global_tunables_lock
);
556 policy
->governor_data
= NULL
;
557 sugov_tunables_free(tunables
);
560 sugov_kthread_stop(sg_policy
);
563 mutex_unlock(&global_tunables_lock
);
565 sugov_policy_free(sg_policy
);
568 cpufreq_disable_fast_switch(policy
);
570 pr_err("initialization failed (error %d)\n", ret
);
574 static void sugov_exit(struct cpufreq_policy
*policy
)
576 struct sugov_policy
*sg_policy
= policy
->governor_data
;
577 struct sugov_tunables
*tunables
= sg_policy
->tunables
;
580 mutex_lock(&global_tunables_lock
);
582 count
= gov_attr_set_put(&tunables
->attr_set
, &sg_policy
->tunables_hook
);
583 policy
->governor_data
= NULL
;
585 sugov_tunables_free(tunables
);
587 mutex_unlock(&global_tunables_lock
);
589 sugov_kthread_stop(sg_policy
);
590 sugov_policy_free(sg_policy
);
591 cpufreq_disable_fast_switch(policy
);
594 static int sugov_start(struct cpufreq_policy
*policy
)
596 struct sugov_policy
*sg_policy
= policy
->governor_data
;
599 sg_policy
->freq_update_delay_ns
= sg_policy
->tunables
->rate_limit_us
* NSEC_PER_USEC
;
600 sg_policy
->last_freq_update_time
= 0;
601 sg_policy
->next_freq
= UINT_MAX
;
602 sg_policy
->work_in_progress
= false;
603 sg_policy
->need_freq_update
= false;
604 sg_policy
->cached_raw_freq
= 0;
606 for_each_cpu(cpu
, policy
->cpus
) {
607 struct sugov_cpu
*sg_cpu
= &per_cpu(sugov_cpu
, cpu
);
609 memset(sg_cpu
, 0, sizeof(*sg_cpu
));
610 sg_cpu
->sg_policy
= sg_policy
;
611 sg_cpu
->flags
= SCHED_CPUFREQ_RT
;
612 sg_cpu
->iowait_boost_max
= policy
->cpuinfo
.max_freq
;
613 cpufreq_add_update_util_hook(cpu
, &sg_cpu
->update_util
,
614 policy_is_shared(policy
) ?
615 sugov_update_shared
:
616 sugov_update_single
);
621 static void sugov_stop(struct cpufreq_policy
*policy
)
623 struct sugov_policy
*sg_policy
= policy
->governor_data
;
626 for_each_cpu(cpu
, policy
->cpus
)
627 cpufreq_remove_update_util_hook(cpu
);
631 if (!policy
->fast_switch_enabled
) {
632 irq_work_sync(&sg_policy
->irq_work
);
633 kthread_cancel_work_sync(&sg_policy
->work
);
637 static void sugov_limits(struct cpufreq_policy
*policy
)
639 struct sugov_policy
*sg_policy
= policy
->governor_data
;
641 if (!policy
->fast_switch_enabled
) {
642 mutex_lock(&sg_policy
->work_lock
);
643 cpufreq_policy_apply_limits(policy
);
644 mutex_unlock(&sg_policy
->work_lock
);
647 sg_policy
->need_freq_update
= true;
650 static struct cpufreq_governor schedutil_gov
= {
652 .owner
= THIS_MODULE
,
655 .start
= sugov_start
,
657 .limits
= sugov_limits
,
660 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
661 struct cpufreq_governor
*cpufreq_default_governor(void)
663 return &schedutil_gov
;
667 static int __init
sugov_register(void)
669 return cpufreq_register_governor(&schedutil_gov
);
671 fs_initcall(sugov_register
);