2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver
*cpufreq_driver
;
39 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
40 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data_fallback
);
41 static DEFINE_RWLOCK(cpufreq_driver_lock
);
42 DEFINE_MUTEX(cpufreq_governor_lock
);
43 static LIST_HEAD(cpufreq_policy_list
);
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN
], cpufreq_cpu_governor
);
48 static inline bool has_target(void)
50 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
54 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
57 static DECLARE_RWSEM(cpufreq_rwsem
);
59 /* internal prototypes */
60 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
62 static unsigned int __cpufreq_get(unsigned int cpu
);
63 static void handle_update(struct work_struct
*work
);
66 * Two notifier lists: the "policy" list is involved in the
67 * validation process for a new CPU frequency policy; the
68 * "transition" list for kernel code that needs to handle
69 * changes to devices when the CPU clock speed changes.
70 * The mutex locks both lists.
72 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
73 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
75 static bool init_cpufreq_transition_notifier_list_called
;
76 static int __init
init_cpufreq_transition_notifier_list(void)
78 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
79 init_cpufreq_transition_notifier_list_called
= true;
82 pure_initcall(init_cpufreq_transition_notifier_list
);
84 static int off __read_mostly
;
85 static int cpufreq_disabled(void)
89 void disable_cpufreq(void)
93 static LIST_HEAD(cpufreq_governor_list
);
94 static DEFINE_MUTEX(cpufreq_governor_mutex
);
96 bool have_governor_per_policy(void)
98 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
100 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
102 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
104 if (have_governor_per_policy())
105 return &policy
->kobj
;
107 return cpufreq_global_kobject
;
109 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
111 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
117 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
119 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
120 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
121 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
122 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
123 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
124 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
126 idle_time
= cur_wall_time
- busy_time
;
128 *wall
= cputime_to_usecs(cur_wall_time
);
130 return cputime_to_usecs(idle_time
);
133 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
135 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
137 if (idle_time
== -1ULL)
138 return get_cpu_idle_time_jiffy(cpu
, wall
);
140 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
144 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
147 * This is a generic cpufreq init() routine which can be used by cpufreq
148 * drivers of SMP systems. It will do following:
149 * - validate & show freq table passed
150 * - set policies transition latency
151 * - policy->cpus with all possible CPUs
153 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
154 struct cpufreq_frequency_table
*table
,
155 unsigned int transition_latency
)
159 ret
= cpufreq_table_validate_and_show(policy
, table
);
161 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
165 policy
->cpuinfo
.transition_latency
= transition_latency
;
168 * The driver only supports the SMP configuartion where all processors
169 * share the clock and voltage and clock.
171 cpumask_setall(policy
->cpus
);
175 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
177 unsigned int cpufreq_generic_get(unsigned int cpu
)
179 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
181 if (!policy
|| IS_ERR(policy
->clk
)) {
182 pr_err("%s: No %s associated to cpu: %d\n", __func__
,
183 policy
? "clk" : "policy", cpu
);
187 return clk_get_rate(policy
->clk
) / 1000;
189 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
191 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
193 struct cpufreq_policy
*policy
= NULL
;
196 if (cpufreq_disabled() || (cpu
>= nr_cpu_ids
))
199 if (!down_read_trylock(&cpufreq_rwsem
))
202 /* get the cpufreq driver */
203 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
205 if (cpufreq_driver
) {
207 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
209 kobject_get(&policy
->kobj
);
212 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
215 up_read(&cpufreq_rwsem
);
219 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
221 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
223 if (cpufreq_disabled())
226 kobject_put(&policy
->kobj
);
227 up_read(&cpufreq_rwsem
);
229 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
231 /*********************************************************************
232 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
233 *********************************************************************/
236 * adjust_jiffies - adjust the system "loops_per_jiffy"
238 * This function alters the system "loops_per_jiffy" for the clock
239 * speed change. Note that loops_per_jiffy cannot be updated on SMP
240 * systems as each CPU might be scaled differently. So, use the arch
241 * per-CPU loops_per_jiffy value wherever possible.
244 static unsigned long l_p_j_ref
;
245 static unsigned int l_p_j_ref_freq
;
247 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
249 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
252 if (!l_p_j_ref_freq
) {
253 l_p_j_ref
= loops_per_jiffy
;
254 l_p_j_ref_freq
= ci
->old
;
255 pr_debug("saving %lu as reference value for loops_per_jiffy; "
256 "freq is %u kHz\n", l_p_j_ref
, l_p_j_ref_freq
);
258 if ((val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) ||
259 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
260 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
262 pr_debug("scaling loops_per_jiffy to %lu "
263 "for frequency %u kHz\n", loops_per_jiffy
, ci
->new);
267 static inline void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
273 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
274 struct cpufreq_freqs
*freqs
, unsigned int state
)
276 BUG_ON(irqs_disabled());
278 if (cpufreq_disabled())
281 freqs
->flags
= cpufreq_driver
->flags
;
282 pr_debug("notification %u of frequency transition to %u kHz\n",
287 case CPUFREQ_PRECHANGE
:
288 /* detect if the driver reported a value as "old frequency"
289 * which is not equal to what the cpufreq core thinks is
292 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
293 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
294 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
295 pr_debug("Warning: CPU frequency is"
296 " %u, cpufreq assumed %u kHz.\n",
297 freqs
->old
, policy
->cur
);
298 freqs
->old
= policy
->cur
;
301 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
302 CPUFREQ_PRECHANGE
, freqs
);
303 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
306 case CPUFREQ_POSTCHANGE
:
307 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
308 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs
->new,
309 (unsigned long)freqs
->cpu
);
310 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
311 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
312 CPUFREQ_POSTCHANGE
, freqs
);
313 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
314 policy
->cur
= freqs
->new;
320 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
321 * on frequency transition.
323 * This function calls the transition notifiers and the "adjust_jiffies"
324 * function. It is called twice on all CPU frequency changes that have
327 void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
328 struct cpufreq_freqs
*freqs
, unsigned int state
)
330 for_each_cpu(freqs
->cpu
, policy
->cpus
)
331 __cpufreq_notify_transition(policy
, freqs
, state
);
333 EXPORT_SYMBOL_GPL(cpufreq_notify_transition
);
335 /* Do post notifications when there are chances that transition has failed */
336 void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
337 struct cpufreq_freqs
*freqs
, int transition_failed
)
339 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
340 if (!transition_failed
)
343 swap(freqs
->old
, freqs
->new);
344 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
345 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
347 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition
);
350 /*********************************************************************
352 *********************************************************************/
353 static ssize_t
show_boost(struct kobject
*kobj
,
354 struct attribute
*attr
, char *buf
)
356 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
359 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
360 const char *buf
, size_t count
)
364 ret
= sscanf(buf
, "%d", &enable
);
365 if (ret
!= 1 || enable
< 0 || enable
> 1)
368 if (cpufreq_boost_trigger_state(enable
)) {
369 pr_err("%s: Cannot %s BOOST!\n", __func__
,
370 enable
? "enable" : "disable");
374 pr_debug("%s: cpufreq BOOST %s\n", __func__
,
375 enable
? "enabled" : "disabled");
379 define_one_global_rw(boost
);
381 static struct cpufreq_governor
*__find_governor(const char *str_governor
)
383 struct cpufreq_governor
*t
;
385 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
)
386 if (!strnicmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
393 * cpufreq_parse_governor - parse a governor string
395 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
396 struct cpufreq_governor
**governor
)
403 if (cpufreq_driver
->setpolicy
) {
404 if (!strnicmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
405 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
407 } else if (!strnicmp(str_governor
, "powersave",
409 *policy
= CPUFREQ_POLICY_POWERSAVE
;
412 } else if (has_target()) {
413 struct cpufreq_governor
*t
;
415 mutex_lock(&cpufreq_governor_mutex
);
417 t
= __find_governor(str_governor
);
422 mutex_unlock(&cpufreq_governor_mutex
);
423 ret
= request_module("cpufreq_%s", str_governor
);
424 mutex_lock(&cpufreq_governor_mutex
);
427 t
= __find_governor(str_governor
);
435 mutex_unlock(&cpufreq_governor_mutex
);
442 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information
445 * Write out information from cpufreq_driver->policy[cpu]; object must be
449 #define show_one(file_name, object) \
450 static ssize_t show_##file_name \
451 (struct cpufreq_policy *policy, char *buf) \
453 return sprintf(buf, "%u\n", policy->object); \
456 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
457 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
458 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
459 show_one(scaling_min_freq
, min
);
460 show_one(scaling_max_freq
, max
);
461 show_one(scaling_cur_freq
, cur
);
463 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
464 struct cpufreq_policy
*new_policy
);
467 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
469 #define store_one(file_name, object) \
470 static ssize_t store_##file_name \
471 (struct cpufreq_policy *policy, const char *buf, size_t count) \
474 struct cpufreq_policy new_policy; \
476 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
480 ret = sscanf(buf, "%u", &new_policy.object); \
484 ret = cpufreq_set_policy(policy, &new_policy); \
485 policy->user_policy.object = policy->object; \
487 return ret ? ret : count; \
490 store_one(scaling_min_freq
, min
);
491 store_one(scaling_max_freq
, max
);
494 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
496 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
499 unsigned int cur_freq
= __cpufreq_get(policy
->cpu
);
501 return sprintf(buf
, "<unknown>");
502 return sprintf(buf
, "%u\n", cur_freq
);
506 * show_scaling_governor - show the current policy for the specified CPU
508 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
510 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
511 return sprintf(buf
, "powersave\n");
512 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
513 return sprintf(buf
, "performance\n");
514 else if (policy
->governor
)
515 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
516 policy
->governor
->name
);
521 * store_scaling_governor - store policy for the specified CPU
523 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
524 const char *buf
, size_t count
)
527 char str_governor
[16];
528 struct cpufreq_policy new_policy
;
530 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
534 ret
= sscanf(buf
, "%15s", str_governor
);
538 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
539 &new_policy
.governor
))
542 ret
= cpufreq_set_policy(policy
, &new_policy
);
544 policy
->user_policy
.policy
= policy
->policy
;
545 policy
->user_policy
.governor
= policy
->governor
;
554 * show_scaling_driver - show the cpufreq driver currently loaded
556 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
558 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
562 * show_scaling_available_governors - show the available CPUfreq governors
564 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
568 struct cpufreq_governor
*t
;
571 i
+= sprintf(buf
, "performance powersave");
575 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
) {
576 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
577 - (CPUFREQ_NAME_LEN
+ 2)))
579 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
582 i
+= sprintf(&buf
[i
], "\n");
586 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
591 for_each_cpu(cpu
, mask
) {
593 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
594 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
595 if (i
>= (PAGE_SIZE
- 5))
598 i
+= sprintf(&buf
[i
], "\n");
601 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
604 * show_related_cpus - show the CPUs affected by each transition even if
605 * hw coordination is in use
607 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
609 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
613 * show_affected_cpus - show the CPUs affected by each transition
615 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
617 return cpufreq_show_cpus(policy
->cpus
, buf
);
620 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
621 const char *buf
, size_t count
)
623 unsigned int freq
= 0;
626 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
629 ret
= sscanf(buf
, "%u", &freq
);
633 policy
->governor
->store_setspeed(policy
, freq
);
638 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
640 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
641 return sprintf(buf
, "<unsupported>\n");
643 return policy
->governor
->show_setspeed(policy
, buf
);
647 * show_bios_limit - show the current cpufreq HW/BIOS limitation
649 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
653 if (cpufreq_driver
->bios_limit
) {
654 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
656 return sprintf(buf
, "%u\n", limit
);
658 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
661 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
662 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
663 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
664 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
665 cpufreq_freq_attr_ro(scaling_available_governors
);
666 cpufreq_freq_attr_ro(scaling_driver
);
667 cpufreq_freq_attr_ro(scaling_cur_freq
);
668 cpufreq_freq_attr_ro(bios_limit
);
669 cpufreq_freq_attr_ro(related_cpus
);
670 cpufreq_freq_attr_ro(affected_cpus
);
671 cpufreq_freq_attr_rw(scaling_min_freq
);
672 cpufreq_freq_attr_rw(scaling_max_freq
);
673 cpufreq_freq_attr_rw(scaling_governor
);
674 cpufreq_freq_attr_rw(scaling_setspeed
);
676 static struct attribute
*default_attrs
[] = {
677 &cpuinfo_min_freq
.attr
,
678 &cpuinfo_max_freq
.attr
,
679 &cpuinfo_transition_latency
.attr
,
680 &scaling_min_freq
.attr
,
681 &scaling_max_freq
.attr
,
684 &scaling_governor
.attr
,
685 &scaling_driver
.attr
,
686 &scaling_available_governors
.attr
,
687 &scaling_setspeed
.attr
,
691 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
692 #define to_attr(a) container_of(a, struct freq_attr, attr)
694 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
696 struct cpufreq_policy
*policy
= to_policy(kobj
);
697 struct freq_attr
*fattr
= to_attr(attr
);
700 if (!down_read_trylock(&cpufreq_rwsem
))
703 down_read(&policy
->rwsem
);
706 ret
= fattr
->show(policy
, buf
);
710 up_read(&policy
->rwsem
);
711 up_read(&cpufreq_rwsem
);
716 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
717 const char *buf
, size_t count
)
719 struct cpufreq_policy
*policy
= to_policy(kobj
);
720 struct freq_attr
*fattr
= to_attr(attr
);
721 ssize_t ret
= -EINVAL
;
725 if (!cpu_online(policy
->cpu
))
728 if (!down_read_trylock(&cpufreq_rwsem
))
731 down_write(&policy
->rwsem
);
734 ret
= fattr
->store(policy
, buf
, count
);
738 up_write(&policy
->rwsem
);
740 up_read(&cpufreq_rwsem
);
747 static void cpufreq_sysfs_release(struct kobject
*kobj
)
749 struct cpufreq_policy
*policy
= to_policy(kobj
);
750 pr_debug("last reference is dropped\n");
751 complete(&policy
->kobj_unregister
);
754 static const struct sysfs_ops sysfs_ops
= {
759 static struct kobj_type ktype_cpufreq
= {
760 .sysfs_ops
= &sysfs_ops
,
761 .default_attrs
= default_attrs
,
762 .release
= cpufreq_sysfs_release
,
765 struct kobject
*cpufreq_global_kobject
;
766 EXPORT_SYMBOL(cpufreq_global_kobject
);
768 static int cpufreq_global_kobject_usage
;
770 int cpufreq_get_global_kobject(void)
772 if (!cpufreq_global_kobject_usage
++)
773 return kobject_add(cpufreq_global_kobject
,
774 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
778 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
780 void cpufreq_put_global_kobject(void)
782 if (!--cpufreq_global_kobject_usage
)
783 kobject_del(cpufreq_global_kobject
);
785 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
787 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
789 int ret
= cpufreq_get_global_kobject();
792 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
794 cpufreq_put_global_kobject();
799 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
801 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
803 sysfs_remove_file(cpufreq_global_kobject
, attr
);
804 cpufreq_put_global_kobject();
806 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
808 /* symlink affected CPUs */
809 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
814 for_each_cpu(j
, policy
->cpus
) {
815 struct device
*cpu_dev
;
817 if (j
== policy
->cpu
)
820 pr_debug("Adding link for CPU: %u\n", j
);
821 cpu_dev
= get_cpu_device(j
);
822 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
830 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
,
833 struct freq_attr
**drv_attr
;
836 /* prepare interface data */
837 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
838 &dev
->kobj
, "cpufreq");
842 /* set up files for this cpu device */
843 drv_attr
= cpufreq_driver
->attr
;
844 while ((drv_attr
) && (*drv_attr
)) {
845 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
847 goto err_out_kobj_put
;
850 if (cpufreq_driver
->get
) {
851 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
853 goto err_out_kobj_put
;
856 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
858 goto err_out_kobj_put
;
860 if (cpufreq_driver
->bios_limit
) {
861 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
863 goto err_out_kobj_put
;
866 ret
= cpufreq_add_dev_symlink(policy
);
868 goto err_out_kobj_put
;
873 kobject_put(&policy
->kobj
);
874 wait_for_completion(&policy
->kobj_unregister
);
878 static void cpufreq_init_policy(struct cpufreq_policy
*policy
)
880 struct cpufreq_governor
*gov
= NULL
;
881 struct cpufreq_policy new_policy
;
884 memcpy(&new_policy
, policy
, sizeof(*policy
));
886 /* Update governor of new_policy to the governor used before hotplug */
887 gov
= __find_governor(per_cpu(cpufreq_cpu_governor
, policy
->cpu
));
889 pr_debug("Restoring governor %s for cpu %d\n",
890 policy
->governor
->name
, policy
->cpu
);
892 gov
= CPUFREQ_DEFAULT_GOVERNOR
;
894 new_policy
.governor
= gov
;
896 /* Use the default policy if its valid. */
897 if (cpufreq_driver
->setpolicy
)
898 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
, NULL
);
900 /* set default policy */
901 ret
= cpufreq_set_policy(policy
, &new_policy
);
903 pr_debug("setting policy failed\n");
904 if (cpufreq_driver
->exit
)
905 cpufreq_driver
->exit(policy
);
909 #ifdef CONFIG_HOTPLUG_CPU
910 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
,
911 unsigned int cpu
, struct device
*dev
)
917 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
919 pr_err("%s: Failed to stop governor\n", __func__
);
924 down_write(&policy
->rwsem
);
926 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
928 cpumask_set_cpu(cpu
, policy
->cpus
);
929 per_cpu(cpufreq_cpu_data
, cpu
) = policy
;
930 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
932 up_write(&policy
->rwsem
);
935 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
936 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
937 pr_err("%s: Failed to start governor\n", __func__
);
942 return sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq");
946 static struct cpufreq_policy
*cpufreq_policy_restore(unsigned int cpu
)
948 struct cpufreq_policy
*policy
;
951 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
953 policy
= per_cpu(cpufreq_cpu_data_fallback
, cpu
);
955 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
957 policy
->governor
= NULL
;
962 static struct cpufreq_policy
*cpufreq_policy_alloc(void)
964 struct cpufreq_policy
*policy
;
966 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
970 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
971 goto err_free_policy
;
973 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
974 goto err_free_cpumask
;
976 INIT_LIST_HEAD(&policy
->policy_list
);
977 init_rwsem(&policy
->rwsem
);
982 free_cpumask_var(policy
->cpus
);
989 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
)
991 struct kobject
*kobj
;
992 struct completion
*cmp
;
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
995 CPUFREQ_REMOVE_POLICY
, policy
);
997 down_read(&policy
->rwsem
);
998 kobj
= &policy
->kobj
;
999 cmp
= &policy
->kobj_unregister
;
1000 up_read(&policy
->rwsem
);
1004 * We need to make sure that the underlying kobj is
1005 * actually not referenced anymore by anybody before we
1006 * proceed with unloading.
1008 pr_debug("waiting for dropping of refcount\n");
1009 wait_for_completion(cmp
);
1010 pr_debug("wait complete\n");
1013 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
1015 free_cpumask_var(policy
->related_cpus
);
1016 free_cpumask_var(policy
->cpus
);
1020 static void update_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1022 if (WARN_ON(cpu
== policy
->cpu
))
1025 down_write(&policy
->rwsem
);
1027 policy
->last_cpu
= policy
->cpu
;
1030 up_write(&policy
->rwsem
);
1032 cpufreq_frequency_table_update_policy_cpu(policy
);
1033 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1034 CPUFREQ_UPDATE_POLICY_CPU
, policy
);
1037 static int __cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
,
1040 unsigned int j
, cpu
= dev
->id
;
1042 struct cpufreq_policy
*policy
;
1043 unsigned long flags
;
1044 #ifdef CONFIG_HOTPLUG_CPU
1045 struct cpufreq_policy
*tpolicy
;
1048 if (cpu_is_offline(cpu
))
1051 pr_debug("adding CPU %u\n", cpu
);
1054 /* check whether a different CPU already registered this
1055 * CPU because it is in the same boat. */
1056 policy
= cpufreq_cpu_get(cpu
);
1057 if (unlikely(policy
)) {
1058 cpufreq_cpu_put(policy
);
1063 if (!down_read_trylock(&cpufreq_rwsem
))
1066 #ifdef CONFIG_HOTPLUG_CPU
1067 /* Check if this cpu was hot-unplugged earlier and has siblings */
1068 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1069 list_for_each_entry(tpolicy
, &cpufreq_policy_list
, policy_list
) {
1070 if (cpumask_test_cpu(cpu
, tpolicy
->related_cpus
)) {
1071 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1072 ret
= cpufreq_add_policy_cpu(tpolicy
, cpu
, dev
);
1073 up_read(&cpufreq_rwsem
);
1077 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1081 * Restore the saved policy when doing light-weight init and fall back
1082 * to the full init if that fails.
1084 policy
= frozen
? cpufreq_policy_restore(cpu
) : NULL
;
1087 policy
= cpufreq_policy_alloc();
1093 * In the resume path, since we restore a saved policy, the assignment
1094 * to policy->cpu is like an update of the existing policy, rather than
1095 * the creation of a brand new one. So we need to perform this update
1096 * by invoking update_policy_cpu().
1098 if (frozen
&& cpu
!= policy
->cpu
)
1099 update_policy_cpu(policy
, cpu
);
1103 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1105 init_completion(&policy
->kobj_unregister
);
1106 INIT_WORK(&policy
->update
, handle_update
);
1108 /* call driver. From then on the cpufreq must be able
1109 * to accept all calls to ->verify and ->setpolicy for this CPU
1111 ret
= cpufreq_driver
->init(policy
);
1113 pr_debug("initialization failed\n");
1114 goto err_set_policy_cpu
;
1117 /* related cpus should atleast have policy->cpus */
1118 cpumask_or(policy
->related_cpus
, policy
->related_cpus
, policy
->cpus
);
1121 * affected cpus must always be the one, which are online. We aren't
1122 * managing offline cpus here.
1124 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1127 policy
->user_policy
.min
= policy
->min
;
1128 policy
->user_policy
.max
= policy
->max
;
1131 down_write(&policy
->rwsem
);
1132 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1133 for_each_cpu(j
, policy
->cpus
)
1134 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1135 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1137 if (cpufreq_driver
->get
) {
1138 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1140 pr_err("%s: ->get() failed\n", __func__
);
1146 * Sometimes boot loaders set CPU frequency to a value outside of
1147 * frequency table present with cpufreq core. In such cases CPU might be
1148 * unstable if it has to run on that frequency for long duration of time
1149 * and so its better to set it to a frequency which is specified in
1150 * freq-table. This also makes cpufreq stats inconsistent as
1151 * cpufreq-stats would fail to register because current frequency of CPU
1152 * isn't found in freq-table.
1154 * Because we don't want this change to effect boot process badly, we go
1155 * for the next freq which is >= policy->cur ('cur' must be set by now,
1156 * otherwise we will end up setting freq to lowest of the table as 'cur'
1157 * is initialized to zero).
1159 * We are passing target-freq as "policy->cur - 1" otherwise
1160 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1161 * equal to target-freq.
1163 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1165 /* Are we running at unknown frequency ? */
1166 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1167 if (ret
== -EINVAL
) {
1168 /* Warn user and fix it */
1169 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1170 __func__
, policy
->cpu
, policy
->cur
);
1171 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1172 CPUFREQ_RELATION_L
);
1175 * Reaching here after boot in a few seconds may not
1176 * mean that system will remain stable at "unknown"
1177 * frequency for longer duration. Hence, a BUG_ON().
1180 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1181 __func__
, policy
->cpu
, policy
->cur
);
1185 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1186 CPUFREQ_START
, policy
);
1189 ret
= cpufreq_add_dev_interface(policy
, dev
);
1191 goto err_out_unregister
;
1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1193 CPUFREQ_CREATE_POLICY
, policy
);
1196 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1197 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1198 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1200 cpufreq_init_policy(policy
);
1203 policy
->user_policy
.policy
= policy
->policy
;
1204 policy
->user_policy
.governor
= policy
->governor
;
1206 up_write(&policy
->rwsem
);
1208 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1209 up_read(&cpufreq_rwsem
);
1211 pr_debug("initialization complete\n");
1217 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1218 for_each_cpu(j
, policy
->cpus
)
1219 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1220 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1222 if (cpufreq_driver
->exit
)
1223 cpufreq_driver
->exit(policy
);
1226 /* Do not leave stale fallback data behind. */
1227 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = NULL
;
1228 cpufreq_policy_put_kobj(policy
);
1230 cpufreq_policy_free(policy
);
1233 up_read(&cpufreq_rwsem
);
1239 * cpufreq_add_dev - add a CPU device
1241 * Adds the cpufreq interface for a CPU device.
1243 * The Oracle says: try running cpufreq registration/unregistration concurrently
1244 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1245 * mess up, but more thorough testing is needed. - Mathieu
1247 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1249 return __cpufreq_add_dev(dev
, sif
, false);
1252 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy
*policy
,
1253 unsigned int old_cpu
)
1255 struct device
*cpu_dev
;
1258 /* first sibling now owns the new sysfs dir */
1259 cpu_dev
= get_cpu_device(cpumask_any_but(policy
->cpus
, old_cpu
));
1261 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
1262 ret
= kobject_move(&policy
->kobj
, &cpu_dev
->kobj
);
1264 pr_err("%s: Failed to move kobj: %d", __func__
, ret
);
1266 down_write(&policy
->rwsem
);
1267 cpumask_set_cpu(old_cpu
, policy
->cpus
);
1268 up_write(&policy
->rwsem
);
1270 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
1279 static int __cpufreq_remove_dev_prepare(struct device
*dev
,
1280 struct subsys_interface
*sif
,
1283 unsigned int cpu
= dev
->id
, cpus
;
1285 unsigned long flags
;
1286 struct cpufreq_policy
*policy
;
1288 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1290 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1292 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1294 /* Save the policy somewhere when doing a light-weight tear-down */
1296 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = policy
;
1298 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1301 pr_debug("%s: No cpu_data found\n", __func__
);
1306 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1308 pr_err("%s: Failed to stop governor\n", __func__
);
1313 if (!cpufreq_driver
->setpolicy
)
1314 strncpy(per_cpu(cpufreq_cpu_governor
, cpu
),
1315 policy
->governor
->name
, CPUFREQ_NAME_LEN
);
1317 down_read(&policy
->rwsem
);
1318 cpus
= cpumask_weight(policy
->cpus
);
1319 up_read(&policy
->rwsem
);
1321 if (cpu
!= policy
->cpu
) {
1322 sysfs_remove_link(&dev
->kobj
, "cpufreq");
1323 } else if (cpus
> 1) {
1324 new_cpu
= cpufreq_nominate_new_policy_cpu(policy
, cpu
);
1326 update_policy_cpu(policy
, new_cpu
);
1329 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1330 __func__
, new_cpu
, cpu
);
1338 static int __cpufreq_remove_dev_finish(struct device
*dev
,
1339 struct subsys_interface
*sif
,
1342 unsigned int cpu
= dev
->id
, cpus
;
1344 unsigned long flags
;
1345 struct cpufreq_policy
*policy
;
1347 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1348 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1349 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1352 pr_debug("%s: No cpu_data found\n", __func__
);
1356 down_write(&policy
->rwsem
);
1357 cpus
= cpumask_weight(policy
->cpus
);
1360 cpumask_clear_cpu(cpu
, policy
->cpus
);
1361 up_write(&policy
->rwsem
);
1363 /* If cpu is last user of policy, free policy */
1366 ret
= __cpufreq_governor(policy
,
1367 CPUFREQ_GOV_POLICY_EXIT
);
1369 pr_err("%s: Failed to exit governor\n",
1376 cpufreq_policy_put_kobj(policy
);
1379 * Perform the ->exit() even during light-weight tear-down,
1380 * since this is a core component, and is essential for the
1381 * subsequent light-weight ->init() to succeed.
1383 if (cpufreq_driver
->exit
)
1384 cpufreq_driver
->exit(policy
);
1386 /* Remove policy from list of active policies */
1387 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1388 list_del(&policy
->policy_list
);
1389 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1392 cpufreq_policy_free(policy
);
1395 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
1396 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
1397 pr_err("%s: Failed to start governor\n",
1404 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1409 * cpufreq_remove_dev - remove a CPU device
1411 * Removes the cpufreq interface for a CPU device.
1413 static int cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1415 unsigned int cpu
= dev
->id
;
1418 if (cpu_is_offline(cpu
))
1421 ret
= __cpufreq_remove_dev_prepare(dev
, sif
, false);
1424 ret
= __cpufreq_remove_dev_finish(dev
, sif
, false);
1429 static void handle_update(struct work_struct
*work
)
1431 struct cpufreq_policy
*policy
=
1432 container_of(work
, struct cpufreq_policy
, update
);
1433 unsigned int cpu
= policy
->cpu
;
1434 pr_debug("handle_update for cpu %u called\n", cpu
);
1435 cpufreq_update_policy(cpu
);
1439 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1442 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1443 * @new_freq: CPU frequency the CPU actually runs at
1445 * We adjust to current frequency first, and need to clean up later.
1446 * So either call to cpufreq_update_policy() or schedule handle_update()).
1448 static void cpufreq_out_of_sync(unsigned int cpu
, unsigned int old_freq
,
1449 unsigned int new_freq
)
1451 struct cpufreq_policy
*policy
;
1452 struct cpufreq_freqs freqs
;
1453 unsigned long flags
;
1455 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1456 "core thinks of %u, is %u kHz.\n", old_freq
, new_freq
);
1458 freqs
.old
= old_freq
;
1459 freqs
.new = new_freq
;
1461 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1462 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1463 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1465 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_PRECHANGE
);
1466 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_POSTCHANGE
);
1470 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1473 * This is the last known freq, without actually getting it from the driver.
1474 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1476 unsigned int cpufreq_quick_get(unsigned int cpu
)
1478 struct cpufreq_policy
*policy
;
1479 unsigned int ret_freq
= 0;
1481 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1482 return cpufreq_driver
->get(cpu
);
1484 policy
= cpufreq_cpu_get(cpu
);
1486 ret_freq
= policy
->cur
;
1487 cpufreq_cpu_put(policy
);
1492 EXPORT_SYMBOL(cpufreq_quick_get
);
1495 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1498 * Just return the max possible frequency for a given CPU.
1500 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1502 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1503 unsigned int ret_freq
= 0;
1506 ret_freq
= policy
->max
;
1507 cpufreq_cpu_put(policy
);
1512 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1514 static unsigned int __cpufreq_get(unsigned int cpu
)
1516 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1517 unsigned int ret_freq
= 0;
1519 if (!cpufreq_driver
->get
)
1522 ret_freq
= cpufreq_driver
->get(cpu
);
1524 if (ret_freq
&& policy
->cur
&&
1525 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1526 /* verify no discrepancy between actual and
1527 saved value exists */
1528 if (unlikely(ret_freq
!= policy
->cur
)) {
1529 cpufreq_out_of_sync(cpu
, policy
->cur
, ret_freq
);
1530 schedule_work(&policy
->update
);
1538 * cpufreq_get - get the current CPU frequency (in kHz)
1541 * Get the CPU current (static) CPU frequency
1543 unsigned int cpufreq_get(unsigned int cpu
)
1545 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1546 unsigned int ret_freq
= 0;
1549 down_read(&policy
->rwsem
);
1550 ret_freq
= __cpufreq_get(cpu
);
1551 up_read(&policy
->rwsem
);
1553 cpufreq_cpu_put(policy
);
1558 EXPORT_SYMBOL(cpufreq_get
);
1560 static struct subsys_interface cpufreq_interface
= {
1562 .subsys
= &cpu_subsys
,
1563 .add_dev
= cpufreq_add_dev
,
1564 .remove_dev
= cpufreq_remove_dev
,
1568 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1570 * This function is only executed for the boot processor. The other CPUs
1571 * have been put offline by means of CPU hotplug.
1573 static int cpufreq_bp_suspend(void)
1577 int cpu
= smp_processor_id();
1578 struct cpufreq_policy
*policy
;
1580 pr_debug("suspending cpu %u\n", cpu
);
1582 /* If there's no policy for the boot CPU, we have nothing to do. */
1583 policy
= cpufreq_cpu_get(cpu
);
1587 if (cpufreq_driver
->suspend
) {
1588 ret
= cpufreq_driver
->suspend(policy
);
1590 printk(KERN_ERR
"cpufreq: suspend failed in ->suspend "
1591 "step on CPU %u\n", policy
->cpu
);
1594 cpufreq_cpu_put(policy
);
1599 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1601 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1602 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1603 * restored. It will verify that the current freq is in sync with
1604 * what we believe it to be. This is a bit later than when it
1605 * should be, but nonethteless it's better than calling
1606 * cpufreq_driver->get() here which might re-enable interrupts...
1608 * This function is only executed for the boot CPU. The other CPUs have not
1609 * been turned on yet.
1611 static void cpufreq_bp_resume(void)
1615 int cpu
= smp_processor_id();
1616 struct cpufreq_policy
*policy
;
1618 pr_debug("resuming cpu %u\n", cpu
);
1620 /* If there's no policy for the boot CPU, we have nothing to do. */
1621 policy
= cpufreq_cpu_get(cpu
);
1625 if (cpufreq_driver
->resume
) {
1626 ret
= cpufreq_driver
->resume(policy
);
1628 printk(KERN_ERR
"cpufreq: resume failed in ->resume "
1629 "step on CPU %u\n", policy
->cpu
);
1634 schedule_work(&policy
->update
);
1637 cpufreq_cpu_put(policy
);
1640 static struct syscore_ops cpufreq_syscore_ops
= {
1641 .suspend
= cpufreq_bp_suspend
,
1642 .resume
= cpufreq_bp_resume
,
1646 * cpufreq_get_current_driver - return current driver's name
1648 * Return the name string of the currently loaded cpufreq driver
1651 const char *cpufreq_get_current_driver(void)
1654 return cpufreq_driver
->name
;
1658 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1660 /*********************************************************************
1661 * NOTIFIER LISTS INTERFACE *
1662 *********************************************************************/
1665 * cpufreq_register_notifier - register a driver with cpufreq
1666 * @nb: notifier function to register
1667 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1669 * Add a driver to one of two lists: either a list of drivers that
1670 * are notified about clock rate changes (once before and once after
1671 * the transition), or a list of drivers that are notified about
1672 * changes in cpufreq policy.
1674 * This function may sleep, and has the same return conditions as
1675 * blocking_notifier_chain_register.
1677 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1681 if (cpufreq_disabled())
1684 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1687 case CPUFREQ_TRANSITION_NOTIFIER
:
1688 ret
= srcu_notifier_chain_register(
1689 &cpufreq_transition_notifier_list
, nb
);
1691 case CPUFREQ_POLICY_NOTIFIER
:
1692 ret
= blocking_notifier_chain_register(
1693 &cpufreq_policy_notifier_list
, nb
);
1701 EXPORT_SYMBOL(cpufreq_register_notifier
);
1704 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1705 * @nb: notifier block to be unregistered
1706 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1708 * Remove a driver from the CPU frequency notifier list.
1710 * This function may sleep, and has the same return conditions as
1711 * blocking_notifier_chain_unregister.
1713 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1717 if (cpufreq_disabled())
1721 case CPUFREQ_TRANSITION_NOTIFIER
:
1722 ret
= srcu_notifier_chain_unregister(
1723 &cpufreq_transition_notifier_list
, nb
);
1725 case CPUFREQ_POLICY_NOTIFIER
:
1726 ret
= blocking_notifier_chain_unregister(
1727 &cpufreq_policy_notifier_list
, nb
);
1735 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1738 /*********************************************************************
1740 *********************************************************************/
1742 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1743 unsigned int target_freq
,
1744 unsigned int relation
)
1746 int retval
= -EINVAL
;
1747 unsigned int old_target_freq
= target_freq
;
1749 if (cpufreq_disabled())
1752 /* Make sure that target_freq is within supported range */
1753 if (target_freq
> policy
->max
)
1754 target_freq
= policy
->max
;
1755 if (target_freq
< policy
->min
)
1756 target_freq
= policy
->min
;
1758 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1759 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1762 * This might look like a redundant call as we are checking it again
1763 * after finding index. But it is left intentionally for cases where
1764 * exactly same freq is called again and so we can save on few function
1767 if (target_freq
== policy
->cur
)
1770 if (cpufreq_driver
->target
)
1771 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1772 else if (cpufreq_driver
->target_index
) {
1773 struct cpufreq_frequency_table
*freq_table
;
1774 struct cpufreq_freqs freqs
;
1778 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1779 if (unlikely(!freq_table
)) {
1780 pr_err("%s: Unable to find freq_table\n", __func__
);
1784 retval
= cpufreq_frequency_table_target(policy
, freq_table
,
1785 target_freq
, relation
, &index
);
1786 if (unlikely(retval
)) {
1787 pr_err("%s: Unable to find matching freq\n", __func__
);
1791 if (freq_table
[index
].frequency
== policy
->cur
) {
1796 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1799 freqs
.old
= policy
->cur
;
1800 freqs
.new = freq_table
[index
].frequency
;
1803 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1804 __func__
, policy
->cpu
, freqs
.old
,
1807 cpufreq_notify_transition(policy
, &freqs
,
1811 retval
= cpufreq_driver
->target_index(policy
, index
);
1813 pr_err("%s: Failed to change cpu frequency: %d\n",
1817 cpufreq_notify_post_transition(policy
, &freqs
, retval
);
1823 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1825 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1826 unsigned int target_freq
,
1827 unsigned int relation
)
1831 down_write(&policy
->rwsem
);
1833 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1835 up_write(&policy
->rwsem
);
1839 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1842 * when "event" is CPUFREQ_GOV_LIMITS
1845 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1850 /* Only must be defined when default governor is known to have latency
1851 restrictions, like e.g. conservative or ondemand.
1852 That this is the case is already ensured in Kconfig
1854 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1855 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1857 struct cpufreq_governor
*gov
= NULL
;
1860 if (policy
->governor
->max_transition_latency
&&
1861 policy
->cpuinfo
.transition_latency
>
1862 policy
->governor
->max_transition_latency
) {
1866 printk(KERN_WARNING
"%s governor failed, too long"
1867 " transition latency of HW, fallback"
1868 " to %s governor\n",
1869 policy
->governor
->name
,
1871 policy
->governor
= gov
;
1875 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1876 if (!try_module_get(policy
->governor
->owner
))
1879 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1880 policy
->cpu
, event
);
1882 mutex_lock(&cpufreq_governor_lock
);
1883 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
1884 || (!policy
->governor_enabled
1885 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
1886 mutex_unlock(&cpufreq_governor_lock
);
1890 if (event
== CPUFREQ_GOV_STOP
)
1891 policy
->governor_enabled
= false;
1892 else if (event
== CPUFREQ_GOV_START
)
1893 policy
->governor_enabled
= true;
1895 mutex_unlock(&cpufreq_governor_lock
);
1897 ret
= policy
->governor
->governor(policy
, event
);
1900 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1901 policy
->governor
->initialized
++;
1902 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
1903 policy
->governor
->initialized
--;
1905 /* Restore original values */
1906 mutex_lock(&cpufreq_governor_lock
);
1907 if (event
== CPUFREQ_GOV_STOP
)
1908 policy
->governor_enabled
= true;
1909 else if (event
== CPUFREQ_GOV_START
)
1910 policy
->governor_enabled
= false;
1911 mutex_unlock(&cpufreq_governor_lock
);
1914 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
1915 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
1916 module_put(policy
->governor
->owner
);
1921 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1928 if (cpufreq_disabled())
1931 mutex_lock(&cpufreq_governor_mutex
);
1933 governor
->initialized
= 0;
1935 if (__find_governor(governor
->name
) == NULL
) {
1937 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1940 mutex_unlock(&cpufreq_governor_mutex
);
1943 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1945 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1952 if (cpufreq_disabled())
1955 for_each_present_cpu(cpu
) {
1956 if (cpu_online(cpu
))
1958 if (!strcmp(per_cpu(cpufreq_cpu_governor
, cpu
), governor
->name
))
1959 strcpy(per_cpu(cpufreq_cpu_governor
, cpu
), "\0");
1962 mutex_lock(&cpufreq_governor_mutex
);
1963 list_del(&governor
->governor_list
);
1964 mutex_unlock(&cpufreq_governor_mutex
);
1967 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
1970 /*********************************************************************
1971 * POLICY INTERFACE *
1972 *********************************************************************/
1975 * cpufreq_get_policy - get the current cpufreq_policy
1976 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1979 * Reads the current cpufreq policy.
1981 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
1983 struct cpufreq_policy
*cpu_policy
;
1987 cpu_policy
= cpufreq_cpu_get(cpu
);
1991 memcpy(policy
, cpu_policy
, sizeof(*policy
));
1993 cpufreq_cpu_put(cpu_policy
);
1996 EXPORT_SYMBOL(cpufreq_get_policy
);
1999 * policy : current policy.
2000 * new_policy: policy to be set.
2002 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2003 struct cpufreq_policy
*new_policy
)
2005 struct cpufreq_governor
*old_gov
;
2008 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy
->cpu
,
2009 new_policy
->min
, new_policy
->max
);
2011 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2013 if (new_policy
->min
> policy
->max
|| new_policy
->max
< policy
->min
)
2016 /* verify the cpu speed can be set within this limit */
2017 ret
= cpufreq_driver
->verify(new_policy
);
2021 /* adjust if necessary - all reasons */
2022 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2023 CPUFREQ_ADJUST
, new_policy
);
2025 /* adjust if necessary - hardware incompatibility*/
2026 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2027 CPUFREQ_INCOMPATIBLE
, new_policy
);
2030 * verify the cpu speed can be set within this limit, which might be
2031 * different to the first one
2033 ret
= cpufreq_driver
->verify(new_policy
);
2037 /* notification of the new policy */
2038 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2039 CPUFREQ_NOTIFY
, new_policy
);
2041 policy
->min
= new_policy
->min
;
2042 policy
->max
= new_policy
->max
;
2044 pr_debug("new min and max freqs are %u - %u kHz\n",
2045 policy
->min
, policy
->max
);
2047 if (cpufreq_driver
->setpolicy
) {
2048 policy
->policy
= new_policy
->policy
;
2049 pr_debug("setting range\n");
2050 return cpufreq_driver
->setpolicy(new_policy
);
2053 if (new_policy
->governor
== policy
->governor
)
2056 pr_debug("governor switch\n");
2058 /* save old, working values */
2059 old_gov
= policy
->governor
;
2060 /* end old governor */
2062 __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2063 up_write(&policy
->rwsem
);
2064 __cpufreq_governor(policy
,CPUFREQ_GOV_POLICY_EXIT
);
2065 down_write(&policy
->rwsem
);
2068 /* start new governor */
2069 policy
->governor
= new_policy
->governor
;
2070 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
)) {
2071 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_START
))
2074 up_write(&policy
->rwsem
);
2075 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2076 down_write(&policy
->rwsem
);
2079 /* new governor failed, so re-start old one */
2080 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2082 policy
->governor
= old_gov
;
2083 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2084 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2090 pr_debug("governor: change or update limits\n");
2091 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2095 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2096 * @cpu: CPU which shall be re-evaluated
2098 * Useful for policy notifiers which have different necessities
2099 * at different times.
2101 int cpufreq_update_policy(unsigned int cpu
)
2103 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2104 struct cpufreq_policy new_policy
;
2112 down_write(&policy
->rwsem
);
2114 pr_debug("updating policy for CPU %u\n", cpu
);
2115 memcpy(&new_policy
, policy
, sizeof(*policy
));
2116 new_policy
.min
= policy
->user_policy
.min
;
2117 new_policy
.max
= policy
->user_policy
.max
;
2118 new_policy
.policy
= policy
->user_policy
.policy
;
2119 new_policy
.governor
= policy
->user_policy
.governor
;
2122 * BIOS might change freq behind our back
2123 * -> ask driver for current freq and notify governors about a change
2125 if (cpufreq_driver
->get
) {
2126 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2127 if (WARN_ON(!new_policy
.cur
)) {
2133 pr_debug("Driver did not initialize current freq");
2134 policy
->cur
= new_policy
.cur
;
2136 if (policy
->cur
!= new_policy
.cur
&& has_target())
2137 cpufreq_out_of_sync(cpu
, policy
->cur
,
2142 ret
= cpufreq_set_policy(policy
, &new_policy
);
2144 up_write(&policy
->rwsem
);
2146 cpufreq_cpu_put(policy
);
2150 EXPORT_SYMBOL(cpufreq_update_policy
);
2152 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2153 unsigned long action
, void *hcpu
)
2155 unsigned int cpu
= (unsigned long)hcpu
;
2157 bool frozen
= false;
2159 dev
= get_cpu_device(cpu
);
2162 if (action
& CPU_TASKS_FROZEN
)
2165 switch (action
& ~CPU_TASKS_FROZEN
) {
2167 __cpufreq_add_dev(dev
, NULL
, frozen
);
2170 case CPU_DOWN_PREPARE
:
2171 __cpufreq_remove_dev_prepare(dev
, NULL
, frozen
);
2175 __cpufreq_remove_dev_finish(dev
, NULL
, frozen
);
2178 case CPU_DOWN_FAILED
:
2179 __cpufreq_add_dev(dev
, NULL
, frozen
);
2186 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2187 .notifier_call
= cpufreq_cpu_callback
,
2190 /*********************************************************************
2192 *********************************************************************/
2193 static int cpufreq_boost_set_sw(int state
)
2195 struct cpufreq_frequency_table
*freq_table
;
2196 struct cpufreq_policy
*policy
;
2199 list_for_each_entry(policy
, &cpufreq_policy_list
, policy_list
) {
2200 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2202 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2205 pr_err("%s: Policy frequency update failed\n",
2209 policy
->user_policy
.max
= policy
->max
;
2210 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2217 int cpufreq_boost_trigger_state(int state
)
2219 unsigned long flags
;
2222 if (cpufreq_driver
->boost_enabled
== state
)
2225 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2226 cpufreq_driver
->boost_enabled
= state
;
2227 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2229 ret
= cpufreq_driver
->set_boost(state
);
2231 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2232 cpufreq_driver
->boost_enabled
= !state
;
2233 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2235 pr_err("%s: Cannot %s BOOST\n", __func__
,
2236 state
? "enable" : "disable");
2242 int cpufreq_boost_supported(void)
2244 if (likely(cpufreq_driver
))
2245 return cpufreq_driver
->boost_supported
;
2249 EXPORT_SYMBOL_GPL(cpufreq_boost_supported
);
2251 int cpufreq_boost_enabled(void)
2253 return cpufreq_driver
->boost_enabled
;
2255 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2257 /*********************************************************************
2258 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2259 *********************************************************************/
2262 * cpufreq_register_driver - register a CPU Frequency driver
2263 * @driver_data: A struct cpufreq_driver containing the values#
2264 * submitted by the CPU Frequency driver.
2266 * Registers a CPU Frequency driver to this core code. This code
2267 * returns zero on success, -EBUSY when another driver got here first
2268 * (and isn't unregistered in the meantime).
2271 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2273 unsigned long flags
;
2276 if (cpufreq_disabled())
2279 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2280 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2281 driver_data
->target
))
2284 pr_debug("trying to register driver %s\n", driver_data
->name
);
2286 if (driver_data
->setpolicy
)
2287 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2289 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2290 if (cpufreq_driver
) {
2291 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2294 cpufreq_driver
= driver_data
;
2295 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2297 if (cpufreq_boost_supported()) {
2299 * Check if driver provides function to enable boost -
2300 * if not, use cpufreq_boost_set_sw as default
2302 if (!cpufreq_driver
->set_boost
)
2303 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2305 ret
= cpufreq_sysfs_create_file(&boost
.attr
);
2307 pr_err("%s: cannot register global BOOST sysfs file\n",
2309 goto err_null_driver
;
2313 ret
= subsys_interface_register(&cpufreq_interface
);
2315 goto err_boost_unreg
;
2317 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
)) {
2321 /* check for at least one working CPU */
2322 for (i
= 0; i
< nr_cpu_ids
; i
++)
2323 if (cpu_possible(i
) && per_cpu(cpufreq_cpu_data
, i
)) {
2328 /* if all ->init() calls failed, unregister */
2330 pr_debug("no CPU initialized for driver %s\n",
2336 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2337 pr_debug("driver %s up and running\n", driver_data
->name
);
2341 subsys_interface_unregister(&cpufreq_interface
);
2343 if (cpufreq_boost_supported())
2344 cpufreq_sysfs_remove_file(&boost
.attr
);
2346 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2347 cpufreq_driver
= NULL
;
2348 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2351 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2354 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2356 * Unregister the current CPUFreq driver. Only call this if you have
2357 * the right to do so, i.e. if you have succeeded in initialising before!
2358 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2359 * currently not initialised.
2361 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2363 unsigned long flags
;
2365 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2368 pr_debug("unregistering driver %s\n", driver
->name
);
2370 subsys_interface_unregister(&cpufreq_interface
);
2371 if (cpufreq_boost_supported())
2372 cpufreq_sysfs_remove_file(&boost
.attr
);
2374 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2376 down_write(&cpufreq_rwsem
);
2377 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2379 cpufreq_driver
= NULL
;
2381 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2382 up_write(&cpufreq_rwsem
);
2386 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2388 static int __init
cpufreq_core_init(void)
2390 if (cpufreq_disabled())
2393 cpufreq_global_kobject
= kobject_create();
2394 BUG_ON(!cpufreq_global_kobject
);
2395 register_syscore_ops(&cpufreq_syscore_ops
);
2399 core_initcall(cpufreq_core_init
);