2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver
*cpufreq_driver
;
39 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
40 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data_fallback
);
41 static DEFINE_RWLOCK(cpufreq_driver_lock
);
42 static DEFINE_MUTEX(cpufreq_governor_lock
);
43 static LIST_HEAD(cpufreq_policy_list
);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN
], cpufreq_cpu_governor
);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(struct rw_semaphore
, cpu_policy_rwsem
);
69 #define lock_policy_rwsem(mode, cpu) \
70 static int lock_policy_rwsem_##mode(int cpu) \
72 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
79 lock_policy_rwsem(read
, cpu
);
80 lock_policy_rwsem(write
, cpu
);
82 #define unlock_policy_rwsem(mode, cpu) \
83 static void unlock_policy_rwsem_##mode(int cpu) \
85 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
87 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
90 unlock_policy_rwsem(read
, cpu
);
91 unlock_policy_rwsem(write
, cpu
);
94 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
97 static DECLARE_RWSEM(cpufreq_rwsem
);
99 /* internal prototypes */
100 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
102 static unsigned int __cpufreq_get(unsigned int cpu
);
103 static void handle_update(struct work_struct
*work
);
106 * Two notifier lists: the "policy" list is involved in the
107 * validation process for a new CPU frequency policy; the
108 * "transition" list for kernel code that needs to handle
109 * changes to devices when the CPU clock speed changes.
110 * The mutex locks both lists.
112 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
113 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
115 static bool init_cpufreq_transition_notifier_list_called
;
116 static int __init
init_cpufreq_transition_notifier_list(void)
118 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
119 init_cpufreq_transition_notifier_list_called
= true;
122 pure_initcall(init_cpufreq_transition_notifier_list
);
124 static int off __read_mostly
;
125 static int cpufreq_disabled(void)
129 void disable_cpufreq(void)
133 static LIST_HEAD(cpufreq_governor_list
);
134 static DEFINE_MUTEX(cpufreq_governor_mutex
);
136 bool have_governor_per_policy(void)
138 return cpufreq_driver
->have_governor_per_policy
;
140 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
142 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
144 if (have_governor_per_policy())
145 return &policy
->kobj
;
147 return cpufreq_global_kobject
;
149 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
151 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
157 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
159 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
160 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
161 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
162 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
163 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
164 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
166 idle_time
= cur_wall_time
- busy_time
;
168 *wall
= cputime_to_usecs(cur_wall_time
);
170 return cputime_to_usecs(idle_time
);
173 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
175 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
177 if (idle_time
== -1ULL)
178 return get_cpu_idle_time_jiffy(cpu
, wall
);
180 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
184 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
186 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
188 struct cpufreq_policy
*policy
= NULL
;
191 if (cpufreq_disabled() || (cpu
>= nr_cpu_ids
))
194 if (!down_read_trylock(&cpufreq_rwsem
))
197 /* get the cpufreq driver */
198 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
200 if (cpufreq_driver
) {
202 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
204 kobject_get(&policy
->kobj
);
207 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
210 up_read(&cpufreq_rwsem
);
214 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
216 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
218 if (cpufreq_disabled())
221 kobject_put(&policy
->kobj
);
222 up_read(&cpufreq_rwsem
);
224 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
226 /*********************************************************************
227 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
228 *********************************************************************/
231 * adjust_jiffies - adjust the system "loops_per_jiffy"
233 * This function alters the system "loops_per_jiffy" for the clock
234 * speed change. Note that loops_per_jiffy cannot be updated on SMP
235 * systems as each CPU might be scaled differently. So, use the arch
236 * per-CPU loops_per_jiffy value wherever possible.
239 static unsigned long l_p_j_ref
;
240 static unsigned int l_p_j_ref_freq
;
242 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
244 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
247 if (!l_p_j_ref_freq
) {
248 l_p_j_ref
= loops_per_jiffy
;
249 l_p_j_ref_freq
= ci
->old
;
250 pr_debug("saving %lu as reference value for loops_per_jiffy; "
251 "freq is %u kHz\n", l_p_j_ref
, l_p_j_ref_freq
);
253 if ((val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) ||
254 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
255 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
257 pr_debug("scaling loops_per_jiffy to %lu "
258 "for frequency %u kHz\n", loops_per_jiffy
, ci
->new);
262 static inline void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
268 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
269 struct cpufreq_freqs
*freqs
, unsigned int state
)
271 BUG_ON(irqs_disabled());
273 if (cpufreq_disabled())
276 freqs
->flags
= cpufreq_driver
->flags
;
277 pr_debug("notification %u of frequency transition to %u kHz\n",
282 case CPUFREQ_PRECHANGE
:
283 if (WARN(policy
->transition_ongoing
==
284 cpumask_weight(policy
->cpus
),
285 "In middle of another frequency transition\n"))
288 policy
->transition_ongoing
++;
290 /* detect if the driver reported a value as "old frequency"
291 * which is not equal to what the cpufreq core thinks is
294 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
295 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
296 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
297 pr_debug("Warning: CPU frequency is"
298 " %u, cpufreq assumed %u kHz.\n",
299 freqs
->old
, policy
->cur
);
300 freqs
->old
= policy
->cur
;
303 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
304 CPUFREQ_PRECHANGE
, freqs
);
305 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
308 case CPUFREQ_POSTCHANGE
:
309 if (WARN(!policy
->transition_ongoing
,
310 "No frequency transition in progress\n"))
313 policy
->transition_ongoing
--;
315 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
316 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs
->new,
317 (unsigned long)freqs
->cpu
);
318 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
319 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
320 CPUFREQ_POSTCHANGE
, freqs
);
321 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
322 policy
->cur
= freqs
->new;
328 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
329 * on frequency transition.
331 * This function calls the transition notifiers and the "adjust_jiffies"
332 * function. It is called twice on all CPU frequency changes that have
335 void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
336 struct cpufreq_freqs
*freqs
, unsigned int state
)
338 for_each_cpu(freqs
->cpu
, policy
->cpus
)
339 __cpufreq_notify_transition(policy
, freqs
, state
);
341 EXPORT_SYMBOL_GPL(cpufreq_notify_transition
);
344 /*********************************************************************
346 *********************************************************************/
348 static struct cpufreq_governor
*__find_governor(const char *str_governor
)
350 struct cpufreq_governor
*t
;
352 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
)
353 if (!strnicmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
360 * cpufreq_parse_governor - parse a governor string
362 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
363 struct cpufreq_governor
**governor
)
370 if (cpufreq_driver
->setpolicy
) {
371 if (!strnicmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
372 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
374 } else if (!strnicmp(str_governor
, "powersave",
376 *policy
= CPUFREQ_POLICY_POWERSAVE
;
379 } else if (cpufreq_driver
->target
) {
380 struct cpufreq_governor
*t
;
382 mutex_lock(&cpufreq_governor_mutex
);
384 t
= __find_governor(str_governor
);
389 mutex_unlock(&cpufreq_governor_mutex
);
390 ret
= request_module("cpufreq_%s", str_governor
);
391 mutex_lock(&cpufreq_governor_mutex
);
394 t
= __find_governor(str_governor
);
402 mutex_unlock(&cpufreq_governor_mutex
);
409 * cpufreq_per_cpu_attr_read() / show_##file_name() -
410 * print out cpufreq information
412 * Write out information from cpufreq_driver->policy[cpu]; object must be
416 #define show_one(file_name, object) \
417 static ssize_t show_##file_name \
418 (struct cpufreq_policy *policy, char *buf) \
420 return sprintf(buf, "%u\n", policy->object); \
423 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
424 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
425 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
426 show_one(scaling_min_freq
, min
);
427 show_one(scaling_max_freq
, max
);
428 show_one(scaling_cur_freq
, cur
);
430 static int __cpufreq_set_policy(struct cpufreq_policy
*policy
,
431 struct cpufreq_policy
*new_policy
);
434 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
436 #define store_one(file_name, object) \
437 static ssize_t store_##file_name \
438 (struct cpufreq_policy *policy, const char *buf, size_t count) \
441 struct cpufreq_policy new_policy; \
443 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
447 ret = sscanf(buf, "%u", &new_policy.object); \
451 ret = __cpufreq_set_policy(policy, &new_policy); \
452 policy->user_policy.object = policy->object; \
454 return ret ? ret : count; \
457 store_one(scaling_min_freq
, min
);
458 store_one(scaling_max_freq
, max
);
461 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
463 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
466 unsigned int cur_freq
= __cpufreq_get(policy
->cpu
);
468 return sprintf(buf
, "<unknown>");
469 return sprintf(buf
, "%u\n", cur_freq
);
473 * show_scaling_governor - show the current policy for the specified CPU
475 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
477 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
478 return sprintf(buf
, "powersave\n");
479 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
480 return sprintf(buf
, "performance\n");
481 else if (policy
->governor
)
482 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
483 policy
->governor
->name
);
488 * store_scaling_governor - store policy for the specified CPU
490 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
491 const char *buf
, size_t count
)
494 char str_governor
[16];
495 struct cpufreq_policy new_policy
;
497 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
501 ret
= sscanf(buf
, "%15s", str_governor
);
505 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
506 &new_policy
.governor
))
510 * Do not use cpufreq_set_policy here or the user_policy.max
511 * will be wrongly overridden
513 ret
= __cpufreq_set_policy(policy
, &new_policy
);
515 policy
->user_policy
.policy
= policy
->policy
;
516 policy
->user_policy
.governor
= policy
->governor
;
525 * show_scaling_driver - show the cpufreq driver currently loaded
527 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
529 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
533 * show_scaling_available_governors - show the available CPUfreq governors
535 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
539 struct cpufreq_governor
*t
;
541 if (!cpufreq_driver
->target
) {
542 i
+= sprintf(buf
, "performance powersave");
546 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
) {
547 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
548 - (CPUFREQ_NAME_LEN
+ 2)))
550 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
553 i
+= sprintf(&buf
[i
], "\n");
557 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
562 for_each_cpu(cpu
, mask
) {
564 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
565 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
566 if (i
>= (PAGE_SIZE
- 5))
569 i
+= sprintf(&buf
[i
], "\n");
572 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
575 * show_related_cpus - show the CPUs affected by each transition even if
576 * hw coordination is in use
578 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
580 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
584 * show_affected_cpus - show the CPUs affected by each transition
586 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
588 return cpufreq_show_cpus(policy
->cpus
, buf
);
591 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
592 const char *buf
, size_t count
)
594 unsigned int freq
= 0;
597 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
600 ret
= sscanf(buf
, "%u", &freq
);
604 policy
->governor
->store_setspeed(policy
, freq
);
609 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
611 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
612 return sprintf(buf
, "<unsupported>\n");
614 return policy
->governor
->show_setspeed(policy
, buf
);
618 * show_bios_limit - show the current cpufreq HW/BIOS limitation
620 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
624 if (cpufreq_driver
->bios_limit
) {
625 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
627 return sprintf(buf
, "%u\n", limit
);
629 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
632 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
633 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
634 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
635 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
636 cpufreq_freq_attr_ro(scaling_available_governors
);
637 cpufreq_freq_attr_ro(scaling_driver
);
638 cpufreq_freq_attr_ro(scaling_cur_freq
);
639 cpufreq_freq_attr_ro(bios_limit
);
640 cpufreq_freq_attr_ro(related_cpus
);
641 cpufreq_freq_attr_ro(affected_cpus
);
642 cpufreq_freq_attr_rw(scaling_min_freq
);
643 cpufreq_freq_attr_rw(scaling_max_freq
);
644 cpufreq_freq_attr_rw(scaling_governor
);
645 cpufreq_freq_attr_rw(scaling_setspeed
);
647 static struct attribute
*default_attrs
[] = {
648 &cpuinfo_min_freq
.attr
,
649 &cpuinfo_max_freq
.attr
,
650 &cpuinfo_transition_latency
.attr
,
651 &scaling_min_freq
.attr
,
652 &scaling_max_freq
.attr
,
655 &scaling_governor
.attr
,
656 &scaling_driver
.attr
,
657 &scaling_available_governors
.attr
,
658 &scaling_setspeed
.attr
,
662 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
663 #define to_attr(a) container_of(a, struct freq_attr, attr)
665 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
667 struct cpufreq_policy
*policy
= to_policy(kobj
);
668 struct freq_attr
*fattr
= to_attr(attr
);
669 ssize_t ret
= -EINVAL
;
671 if (!down_read_trylock(&cpufreq_rwsem
))
674 if (lock_policy_rwsem_read(policy
->cpu
) < 0)
678 ret
= fattr
->show(policy
, buf
);
682 unlock_policy_rwsem_read(policy
->cpu
);
685 up_read(&cpufreq_rwsem
);
690 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
691 const char *buf
, size_t count
)
693 struct cpufreq_policy
*policy
= to_policy(kobj
);
694 struct freq_attr
*fattr
= to_attr(attr
);
695 ssize_t ret
= -EINVAL
;
699 if (!cpu_online(policy
->cpu
))
702 if (!down_read_trylock(&cpufreq_rwsem
))
705 if (lock_policy_rwsem_write(policy
->cpu
) < 0)
709 ret
= fattr
->store(policy
, buf
, count
);
713 unlock_policy_rwsem_write(policy
->cpu
);
716 up_read(&cpufreq_rwsem
);
723 static void cpufreq_sysfs_release(struct kobject
*kobj
)
725 struct cpufreq_policy
*policy
= to_policy(kobj
);
726 pr_debug("last reference is dropped\n");
727 complete(&policy
->kobj_unregister
);
730 static const struct sysfs_ops sysfs_ops
= {
735 static struct kobj_type ktype_cpufreq
= {
736 .sysfs_ops
= &sysfs_ops
,
737 .default_attrs
= default_attrs
,
738 .release
= cpufreq_sysfs_release
,
741 struct kobject
*cpufreq_global_kobject
;
742 EXPORT_SYMBOL(cpufreq_global_kobject
);
744 static int cpufreq_global_kobject_usage
;
746 int cpufreq_get_global_kobject(void)
748 if (!cpufreq_global_kobject_usage
++)
749 return kobject_add(cpufreq_global_kobject
,
750 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
754 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
756 void cpufreq_put_global_kobject(void)
758 if (!--cpufreq_global_kobject_usage
)
759 kobject_del(cpufreq_global_kobject
);
761 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
763 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
765 int ret
= cpufreq_get_global_kobject();
768 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
770 cpufreq_put_global_kobject();
775 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
777 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
779 sysfs_remove_file(cpufreq_global_kobject
, attr
);
780 cpufreq_put_global_kobject();
782 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
784 /* symlink affected CPUs */
785 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
790 for_each_cpu(j
, policy
->cpus
) {
791 struct device
*cpu_dev
;
793 if (j
== policy
->cpu
)
796 pr_debug("Adding link for CPU: %u\n", j
);
797 cpu_dev
= get_cpu_device(j
);
798 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
806 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
,
809 struct freq_attr
**drv_attr
;
812 /* prepare interface data */
813 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
814 &dev
->kobj
, "cpufreq");
818 /* set up files for this cpu device */
819 drv_attr
= cpufreq_driver
->attr
;
820 while ((drv_attr
) && (*drv_attr
)) {
821 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
823 goto err_out_kobj_put
;
826 if (cpufreq_driver
->get
) {
827 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
829 goto err_out_kobj_put
;
831 if (cpufreq_driver
->target
) {
832 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
834 goto err_out_kobj_put
;
836 if (cpufreq_driver
->bios_limit
) {
837 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
839 goto err_out_kobj_put
;
842 ret
= cpufreq_add_dev_symlink(policy
);
844 goto err_out_kobj_put
;
849 kobject_put(&policy
->kobj
);
850 wait_for_completion(&policy
->kobj_unregister
);
854 static void cpufreq_init_policy(struct cpufreq_policy
*policy
)
856 struct cpufreq_policy new_policy
;
859 memcpy(&new_policy
, policy
, sizeof(*policy
));
860 /* assure that the starting sequence is run in __cpufreq_set_policy */
861 policy
->governor
= NULL
;
863 /* set default policy */
864 ret
= __cpufreq_set_policy(policy
, &new_policy
);
865 policy
->user_policy
.policy
= policy
->policy
;
866 policy
->user_policy
.governor
= policy
->governor
;
869 pr_debug("setting policy failed\n");
870 if (cpufreq_driver
->exit
)
871 cpufreq_driver
->exit(policy
);
875 #ifdef CONFIG_HOTPLUG_CPU
876 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
,
877 unsigned int cpu
, struct device
*dev
,
880 int ret
= 0, has_target
= !!cpufreq_driver
->target
;
884 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
886 pr_err("%s: Failed to stop governor\n", __func__
);
891 lock_policy_rwsem_write(policy
->cpu
);
893 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
895 cpumask_set_cpu(cpu
, policy
->cpus
);
896 per_cpu(cpufreq_cpu_data
, cpu
) = policy
;
897 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
899 unlock_policy_rwsem_write(policy
->cpu
);
902 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
903 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
904 pr_err("%s: Failed to start governor\n", __func__
);
909 /* Don't touch sysfs links during light-weight init */
911 ret
= sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq");
917 static struct cpufreq_policy
*cpufreq_policy_restore(unsigned int cpu
)
919 struct cpufreq_policy
*policy
;
922 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
924 policy
= per_cpu(cpufreq_cpu_data_fallback
, cpu
);
926 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
931 static struct cpufreq_policy
*cpufreq_policy_alloc(void)
933 struct cpufreq_policy
*policy
;
935 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
939 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
940 goto err_free_policy
;
942 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
943 goto err_free_cpumask
;
945 INIT_LIST_HEAD(&policy
->policy_list
);
949 free_cpumask_var(policy
->cpus
);
956 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
958 free_cpumask_var(policy
->related_cpus
);
959 free_cpumask_var(policy
->cpus
);
963 static int __cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
,
966 unsigned int j
, cpu
= dev
->id
;
968 struct cpufreq_policy
*policy
;
970 #ifdef CONFIG_HOTPLUG_CPU
971 struct cpufreq_policy
*tpolicy
;
972 struct cpufreq_governor
*gov
;
975 if (cpu_is_offline(cpu
))
978 pr_debug("adding CPU %u\n", cpu
);
981 /* check whether a different CPU already registered this
982 * CPU because it is in the same boat. */
983 policy
= cpufreq_cpu_get(cpu
);
984 if (unlikely(policy
)) {
985 cpufreq_cpu_put(policy
);
990 if (!down_read_trylock(&cpufreq_rwsem
))
993 #ifdef CONFIG_HOTPLUG_CPU
994 /* Check if this cpu was hot-unplugged earlier and has siblings */
995 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
996 list_for_each_entry(tpolicy
, &cpufreq_policy_list
, policy_list
) {
997 if (cpumask_test_cpu(cpu
, tpolicy
->related_cpus
)) {
998 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
999 ret
= cpufreq_add_policy_cpu(tpolicy
, cpu
, dev
, frozen
);
1000 up_read(&cpufreq_rwsem
);
1004 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1008 /* Restore the saved policy when doing light-weight init */
1009 policy
= cpufreq_policy_restore(cpu
);
1011 policy
= cpufreq_policy_alloc();
1017 policy
->governor
= CPUFREQ_DEFAULT_GOVERNOR
;
1018 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1020 init_completion(&policy
->kobj_unregister
);
1021 INIT_WORK(&policy
->update
, handle_update
);
1023 /* call driver. From then on the cpufreq must be able
1024 * to accept all calls to ->verify and ->setpolicy for this CPU
1026 ret
= cpufreq_driver
->init(policy
);
1028 pr_debug("initialization failed\n");
1029 goto err_set_policy_cpu
;
1032 /* related cpus should atleast have policy->cpus */
1033 cpumask_or(policy
->related_cpus
, policy
->related_cpus
, policy
->cpus
);
1036 * affected cpus must always be the one, which are online. We aren't
1037 * managing offline cpus here.
1039 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1041 policy
->user_policy
.min
= policy
->min
;
1042 policy
->user_policy
.max
= policy
->max
;
1044 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1045 CPUFREQ_START
, policy
);
1047 #ifdef CONFIG_HOTPLUG_CPU
1048 gov
= __find_governor(per_cpu(cpufreq_cpu_governor
, cpu
));
1050 policy
->governor
= gov
;
1051 pr_debug("Restoring governor %s for cpu %d\n",
1052 policy
->governor
->name
, cpu
);
1056 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1057 for_each_cpu(j
, policy
->cpus
)
1058 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1059 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1062 ret
= cpufreq_add_dev_interface(policy
, dev
);
1064 goto err_out_unregister
;
1067 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1068 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1069 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1071 cpufreq_init_policy(policy
);
1073 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1074 up_read(&cpufreq_rwsem
);
1076 pr_debug("initialization complete\n");
1081 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1082 for_each_cpu(j
, policy
->cpus
)
1083 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1084 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1087 cpufreq_policy_free(policy
);
1089 up_read(&cpufreq_rwsem
);
1095 * cpufreq_add_dev - add a CPU device
1097 * Adds the cpufreq interface for a CPU device.
1099 * The Oracle says: try running cpufreq registration/unregistration concurrently
1100 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1101 * mess up, but more thorough testing is needed. - Mathieu
1103 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1105 return __cpufreq_add_dev(dev
, sif
, false);
1108 static void update_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1110 policy
->last_cpu
= policy
->cpu
;
1113 #ifdef CONFIG_CPU_FREQ_TABLE
1114 cpufreq_frequency_table_update_policy_cpu(policy
);
1116 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1117 CPUFREQ_UPDATE_POLICY_CPU
, policy
);
1120 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy
*policy
,
1121 unsigned int old_cpu
, bool frozen
)
1123 struct device
*cpu_dev
;
1126 /* first sibling now owns the new sysfs dir */
1127 cpu_dev
= get_cpu_device(cpumask_first(policy
->cpus
));
1129 /* Don't touch sysfs files during light-weight tear-down */
1133 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
1134 ret
= kobject_move(&policy
->kobj
, &cpu_dev
->kobj
);
1136 pr_err("%s: Failed to move kobj: %d", __func__
, ret
);
1138 WARN_ON(lock_policy_rwsem_write(old_cpu
));
1139 cpumask_set_cpu(old_cpu
, policy
->cpus
);
1140 unlock_policy_rwsem_write(old_cpu
);
1142 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
1151 static int __cpufreq_remove_dev_prepare(struct device
*dev
,
1152 struct subsys_interface
*sif
,
1155 unsigned int cpu
= dev
->id
, cpus
;
1157 unsigned long flags
;
1158 struct cpufreq_policy
*policy
;
1160 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1162 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1164 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1166 /* Save the policy somewhere when doing a light-weight tear-down */
1168 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = policy
;
1170 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1173 pr_debug("%s: No cpu_data found\n", __func__
);
1177 if (cpufreq_driver
->target
) {
1178 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1180 pr_err("%s: Failed to stop governor\n", __func__
);
1185 #ifdef CONFIG_HOTPLUG_CPU
1186 if (!cpufreq_driver
->setpolicy
)
1187 strncpy(per_cpu(cpufreq_cpu_governor
, cpu
),
1188 policy
->governor
->name
, CPUFREQ_NAME_LEN
);
1191 WARN_ON(lock_policy_rwsem_write(cpu
));
1192 cpus
= cpumask_weight(policy
->cpus
);
1195 cpumask_clear_cpu(cpu
, policy
->cpus
);
1196 unlock_policy_rwsem_write(cpu
);
1198 if (cpu
!= policy
->cpu
&& !frozen
) {
1199 sysfs_remove_link(&dev
->kobj
, "cpufreq");
1200 } else if (cpus
> 1) {
1202 new_cpu
= cpufreq_nominate_new_policy_cpu(policy
, cpu
, frozen
);
1204 WARN_ON(lock_policy_rwsem_write(cpu
));
1205 update_policy_cpu(policy
, new_cpu
);
1206 unlock_policy_rwsem_write(cpu
);
1209 pr_debug("%s: policy Kobject moved to cpu: %d "
1210 "from: %d\n",__func__
, new_cpu
, cpu
);
1218 static int __cpufreq_remove_dev_finish(struct device
*dev
,
1219 struct subsys_interface
*sif
,
1222 unsigned int cpu
= dev
->id
, cpus
;
1224 unsigned long flags
;
1225 struct cpufreq_policy
*policy
;
1226 struct kobject
*kobj
;
1227 struct completion
*cmp
;
1229 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1230 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1231 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1234 pr_debug("%s: No cpu_data found\n", __func__
);
1238 lock_policy_rwsem_read(cpu
);
1239 cpus
= cpumask_weight(policy
->cpus
);
1240 unlock_policy_rwsem_read(cpu
);
1242 /* If cpu is last user of policy, free policy */
1244 if (cpufreq_driver
->target
) {
1245 ret
= __cpufreq_governor(policy
,
1246 CPUFREQ_GOV_POLICY_EXIT
);
1248 pr_err("%s: Failed to exit governor\n",
1255 lock_policy_rwsem_read(cpu
);
1256 kobj
= &policy
->kobj
;
1257 cmp
= &policy
->kobj_unregister
;
1258 unlock_policy_rwsem_read(cpu
);
1262 * We need to make sure that the underlying kobj is
1263 * actually not referenced anymore by anybody before we
1264 * proceed with unloading.
1266 pr_debug("waiting for dropping of refcount\n");
1267 wait_for_completion(cmp
);
1268 pr_debug("wait complete\n");
1272 * Perform the ->exit() even during light-weight tear-down,
1273 * since this is a core component, and is essential for the
1274 * subsequent light-weight ->init() to succeed.
1276 if (cpufreq_driver
->exit
)
1277 cpufreq_driver
->exit(policy
);
1279 /* Remove policy from list of active policies */
1280 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1281 list_del(&policy
->policy_list
);
1282 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1285 cpufreq_policy_free(policy
);
1287 if (cpufreq_driver
->target
) {
1288 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
1289 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
1290 pr_err("%s: Failed to start governor\n",
1297 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1302 * __cpufreq_remove_dev - remove a CPU device
1304 * Removes the cpufreq interface for a CPU device.
1305 * Caller should already have policy_rwsem in write mode for this CPU.
1306 * This routine frees the rwsem before returning.
1308 static inline int __cpufreq_remove_dev(struct device
*dev
,
1309 struct subsys_interface
*sif
,
1314 ret
= __cpufreq_remove_dev_prepare(dev
, sif
, frozen
);
1317 ret
= __cpufreq_remove_dev_finish(dev
, sif
, frozen
);
1322 static int cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1324 unsigned int cpu
= dev
->id
;
1327 if (cpu_is_offline(cpu
))
1330 retval
= __cpufreq_remove_dev(dev
, sif
, false);
1334 static void handle_update(struct work_struct
*work
)
1336 struct cpufreq_policy
*policy
=
1337 container_of(work
, struct cpufreq_policy
, update
);
1338 unsigned int cpu
= policy
->cpu
;
1339 pr_debug("handle_update for cpu %u called\n", cpu
);
1340 cpufreq_update_policy(cpu
);
1344 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1347 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1348 * @new_freq: CPU frequency the CPU actually runs at
1350 * We adjust to current frequency first, and need to clean up later.
1351 * So either call to cpufreq_update_policy() or schedule handle_update()).
1353 static void cpufreq_out_of_sync(unsigned int cpu
, unsigned int old_freq
,
1354 unsigned int new_freq
)
1356 struct cpufreq_policy
*policy
;
1357 struct cpufreq_freqs freqs
;
1358 unsigned long flags
;
1360 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1361 "core thinks of %u, is %u kHz.\n", old_freq
, new_freq
);
1363 freqs
.old
= old_freq
;
1364 freqs
.new = new_freq
;
1366 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1367 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1368 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1370 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_PRECHANGE
);
1371 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_POSTCHANGE
);
1375 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1378 * This is the last known freq, without actually getting it from the driver.
1379 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1381 unsigned int cpufreq_quick_get(unsigned int cpu
)
1383 struct cpufreq_policy
*policy
;
1384 unsigned int ret_freq
= 0;
1386 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1387 return cpufreq_driver
->get(cpu
);
1389 policy
= cpufreq_cpu_get(cpu
);
1391 ret_freq
= policy
->cur
;
1392 cpufreq_cpu_put(policy
);
1397 EXPORT_SYMBOL(cpufreq_quick_get
);
1400 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1403 * Just return the max possible frequency for a given CPU.
1405 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1407 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1408 unsigned int ret_freq
= 0;
1411 ret_freq
= policy
->max
;
1412 cpufreq_cpu_put(policy
);
1417 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1419 static unsigned int __cpufreq_get(unsigned int cpu
)
1421 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1422 unsigned int ret_freq
= 0;
1424 if (!cpufreq_driver
->get
)
1427 ret_freq
= cpufreq_driver
->get(cpu
);
1429 if (ret_freq
&& policy
->cur
&&
1430 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1431 /* verify no discrepancy between actual and
1432 saved value exists */
1433 if (unlikely(ret_freq
!= policy
->cur
)) {
1434 cpufreq_out_of_sync(cpu
, policy
->cur
, ret_freq
);
1435 schedule_work(&policy
->update
);
1443 * cpufreq_get - get the current CPU frequency (in kHz)
1446 * Get the CPU current (static) CPU frequency
1448 unsigned int cpufreq_get(unsigned int cpu
)
1450 unsigned int ret_freq
= 0;
1452 if (!down_read_trylock(&cpufreq_rwsem
))
1455 if (unlikely(lock_policy_rwsem_read(cpu
)))
1458 ret_freq
= __cpufreq_get(cpu
);
1460 unlock_policy_rwsem_read(cpu
);
1463 up_read(&cpufreq_rwsem
);
1467 EXPORT_SYMBOL(cpufreq_get
);
1469 static struct subsys_interface cpufreq_interface
= {
1471 .subsys
= &cpu_subsys
,
1472 .add_dev
= cpufreq_add_dev
,
1473 .remove_dev
= cpufreq_remove_dev
,
1477 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1479 * This function is only executed for the boot processor. The other CPUs
1480 * have been put offline by means of CPU hotplug.
1482 static int cpufreq_bp_suspend(void)
1486 int cpu
= smp_processor_id();
1487 struct cpufreq_policy
*policy
;
1489 pr_debug("suspending cpu %u\n", cpu
);
1491 /* If there's no policy for the boot CPU, we have nothing to do. */
1492 policy
= cpufreq_cpu_get(cpu
);
1496 if (cpufreq_driver
->suspend
) {
1497 ret
= cpufreq_driver
->suspend(policy
);
1499 printk(KERN_ERR
"cpufreq: suspend failed in ->suspend "
1500 "step on CPU %u\n", policy
->cpu
);
1503 cpufreq_cpu_put(policy
);
1508 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1510 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1511 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1512 * restored. It will verify that the current freq is in sync with
1513 * what we believe it to be. This is a bit later than when it
1514 * should be, but nonethteless it's better than calling
1515 * cpufreq_driver->get() here which might re-enable interrupts...
1517 * This function is only executed for the boot CPU. The other CPUs have not
1518 * been turned on yet.
1520 static void cpufreq_bp_resume(void)
1524 int cpu
= smp_processor_id();
1525 struct cpufreq_policy
*policy
;
1527 pr_debug("resuming cpu %u\n", cpu
);
1529 /* If there's no policy for the boot CPU, we have nothing to do. */
1530 policy
= cpufreq_cpu_get(cpu
);
1534 if (cpufreq_driver
->resume
) {
1535 ret
= cpufreq_driver
->resume(policy
);
1537 printk(KERN_ERR
"cpufreq: resume failed in ->resume "
1538 "step on CPU %u\n", policy
->cpu
);
1543 schedule_work(&policy
->update
);
1546 cpufreq_cpu_put(policy
);
1549 static struct syscore_ops cpufreq_syscore_ops
= {
1550 .suspend
= cpufreq_bp_suspend
,
1551 .resume
= cpufreq_bp_resume
,
1555 * cpufreq_get_current_driver - return current driver's name
1557 * Return the name string of the currently loaded cpufreq driver
1560 const char *cpufreq_get_current_driver(void)
1563 return cpufreq_driver
->name
;
1567 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1569 /*********************************************************************
1570 * NOTIFIER LISTS INTERFACE *
1571 *********************************************************************/
1574 * cpufreq_register_notifier - register a driver with cpufreq
1575 * @nb: notifier function to register
1576 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1578 * Add a driver to one of two lists: either a list of drivers that
1579 * are notified about clock rate changes (once before and once after
1580 * the transition), or a list of drivers that are notified about
1581 * changes in cpufreq policy.
1583 * This function may sleep, and has the same return conditions as
1584 * blocking_notifier_chain_register.
1586 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1590 if (cpufreq_disabled())
1593 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1596 case CPUFREQ_TRANSITION_NOTIFIER
:
1597 ret
= srcu_notifier_chain_register(
1598 &cpufreq_transition_notifier_list
, nb
);
1600 case CPUFREQ_POLICY_NOTIFIER
:
1601 ret
= blocking_notifier_chain_register(
1602 &cpufreq_policy_notifier_list
, nb
);
1610 EXPORT_SYMBOL(cpufreq_register_notifier
);
1613 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1614 * @nb: notifier block to be unregistered
1615 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1617 * Remove a driver from the CPU frequency notifier list.
1619 * This function may sleep, and has the same return conditions as
1620 * blocking_notifier_chain_unregister.
1622 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1626 if (cpufreq_disabled())
1630 case CPUFREQ_TRANSITION_NOTIFIER
:
1631 ret
= srcu_notifier_chain_unregister(
1632 &cpufreq_transition_notifier_list
, nb
);
1634 case CPUFREQ_POLICY_NOTIFIER
:
1635 ret
= blocking_notifier_chain_unregister(
1636 &cpufreq_policy_notifier_list
, nb
);
1644 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1647 /*********************************************************************
1649 *********************************************************************/
1651 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1652 unsigned int target_freq
,
1653 unsigned int relation
)
1655 int retval
= -EINVAL
;
1656 unsigned int old_target_freq
= target_freq
;
1658 if (cpufreq_disabled())
1660 if (policy
->transition_ongoing
)
1663 /* Make sure that target_freq is within supported range */
1664 if (target_freq
> policy
->max
)
1665 target_freq
= policy
->max
;
1666 if (target_freq
< policy
->min
)
1667 target_freq
= policy
->min
;
1669 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1670 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1672 if (target_freq
== policy
->cur
)
1675 if (cpufreq_driver
->target
)
1676 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1680 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1682 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1683 unsigned int target_freq
,
1684 unsigned int relation
)
1688 if (unlikely(lock_policy_rwsem_write(policy
->cpu
)))
1691 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1693 unlock_policy_rwsem_write(policy
->cpu
);
1698 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1701 * when "event" is CPUFREQ_GOV_LIMITS
1704 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1709 /* Only must be defined when default governor is known to have latency
1710 restrictions, like e.g. conservative or ondemand.
1711 That this is the case is already ensured in Kconfig
1713 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1714 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1716 struct cpufreq_governor
*gov
= NULL
;
1719 if (policy
->governor
->max_transition_latency
&&
1720 policy
->cpuinfo
.transition_latency
>
1721 policy
->governor
->max_transition_latency
) {
1725 printk(KERN_WARNING
"%s governor failed, too long"
1726 " transition latency of HW, fallback"
1727 " to %s governor\n",
1728 policy
->governor
->name
,
1730 policy
->governor
= gov
;
1734 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1735 if (!try_module_get(policy
->governor
->owner
))
1738 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1739 policy
->cpu
, event
);
1741 mutex_lock(&cpufreq_governor_lock
);
1742 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
1743 || (!policy
->governor_enabled
1744 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
1745 mutex_unlock(&cpufreq_governor_lock
);
1749 if (event
== CPUFREQ_GOV_STOP
)
1750 policy
->governor_enabled
= false;
1751 else if (event
== CPUFREQ_GOV_START
)
1752 policy
->governor_enabled
= true;
1754 mutex_unlock(&cpufreq_governor_lock
);
1756 ret
= policy
->governor
->governor(policy
, event
);
1759 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1760 policy
->governor
->initialized
++;
1761 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
1762 policy
->governor
->initialized
--;
1764 /* Restore original values */
1765 mutex_lock(&cpufreq_governor_lock
);
1766 if (event
== CPUFREQ_GOV_STOP
)
1767 policy
->governor_enabled
= true;
1768 else if (event
== CPUFREQ_GOV_START
)
1769 policy
->governor_enabled
= false;
1770 mutex_unlock(&cpufreq_governor_lock
);
1773 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
1774 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
1775 module_put(policy
->governor
->owner
);
1780 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1787 if (cpufreq_disabled())
1790 mutex_lock(&cpufreq_governor_mutex
);
1792 governor
->initialized
= 0;
1794 if (__find_governor(governor
->name
) == NULL
) {
1796 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1799 mutex_unlock(&cpufreq_governor_mutex
);
1802 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1804 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1806 #ifdef CONFIG_HOTPLUG_CPU
1813 if (cpufreq_disabled())
1816 #ifdef CONFIG_HOTPLUG_CPU
1817 for_each_present_cpu(cpu
) {
1818 if (cpu_online(cpu
))
1820 if (!strcmp(per_cpu(cpufreq_cpu_governor
, cpu
), governor
->name
))
1821 strcpy(per_cpu(cpufreq_cpu_governor
, cpu
), "\0");
1825 mutex_lock(&cpufreq_governor_mutex
);
1826 list_del(&governor
->governor_list
);
1827 mutex_unlock(&cpufreq_governor_mutex
);
1830 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
1833 /*********************************************************************
1834 * POLICY INTERFACE *
1835 *********************************************************************/
1838 * cpufreq_get_policy - get the current cpufreq_policy
1839 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1842 * Reads the current cpufreq policy.
1844 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
1846 struct cpufreq_policy
*cpu_policy
;
1850 cpu_policy
= cpufreq_cpu_get(cpu
);
1854 memcpy(policy
, cpu_policy
, sizeof(*policy
));
1856 cpufreq_cpu_put(cpu_policy
);
1859 EXPORT_SYMBOL(cpufreq_get_policy
);
1862 * data : current policy.
1863 * policy : policy to be set.
1865 static int __cpufreq_set_policy(struct cpufreq_policy
*policy
,
1866 struct cpufreq_policy
*new_policy
)
1868 int ret
= 0, failed
= 1;
1870 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy
->cpu
,
1871 new_policy
->min
, new_policy
->max
);
1873 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
1875 if (new_policy
->min
> policy
->max
|| new_policy
->max
< policy
->min
) {
1880 /* verify the cpu speed can be set within this limit */
1881 ret
= cpufreq_driver
->verify(new_policy
);
1885 /* adjust if necessary - all reasons */
1886 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1887 CPUFREQ_ADJUST
, new_policy
);
1889 /* adjust if necessary - hardware incompatibility*/
1890 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1891 CPUFREQ_INCOMPATIBLE
, new_policy
);
1894 * verify the cpu speed can be set within this limit, which might be
1895 * different to the first one
1897 ret
= cpufreq_driver
->verify(new_policy
);
1901 /* notification of the new policy */
1902 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1903 CPUFREQ_NOTIFY
, new_policy
);
1905 policy
->min
= new_policy
->min
;
1906 policy
->max
= new_policy
->max
;
1908 pr_debug("new min and max freqs are %u - %u kHz\n",
1909 policy
->min
, policy
->max
);
1911 if (cpufreq_driver
->setpolicy
) {
1912 policy
->policy
= new_policy
->policy
;
1913 pr_debug("setting range\n");
1914 ret
= cpufreq_driver
->setpolicy(new_policy
);
1916 if (new_policy
->governor
!= policy
->governor
) {
1917 /* save old, working values */
1918 struct cpufreq_governor
*old_gov
= policy
->governor
;
1920 pr_debug("governor switch\n");
1922 /* end old governor */
1923 if (policy
->governor
) {
1924 __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1925 unlock_policy_rwsem_write(new_policy
->cpu
);
1926 __cpufreq_governor(policy
,
1927 CPUFREQ_GOV_POLICY_EXIT
);
1928 lock_policy_rwsem_write(new_policy
->cpu
);
1931 /* start new governor */
1932 policy
->governor
= new_policy
->governor
;
1933 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
)) {
1934 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_START
)) {
1937 unlock_policy_rwsem_write(new_policy
->cpu
);
1938 __cpufreq_governor(policy
,
1939 CPUFREQ_GOV_POLICY_EXIT
);
1940 lock_policy_rwsem_write(new_policy
->cpu
);
1945 /* new governor failed, so re-start old one */
1946 pr_debug("starting governor %s failed\n",
1947 policy
->governor
->name
);
1949 policy
->governor
= old_gov
;
1950 __cpufreq_governor(policy
,
1951 CPUFREQ_GOV_POLICY_INIT
);
1952 __cpufreq_governor(policy
,
1958 /* might be a policy change, too, so fall through */
1960 pr_debug("governor: change or update limits\n");
1961 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1969 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1970 * @cpu: CPU which shall be re-evaluated
1972 * Useful for policy notifiers which have different necessities
1973 * at different times.
1975 int cpufreq_update_policy(unsigned int cpu
)
1977 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1978 struct cpufreq_policy new_policy
;
1986 if (unlikely(lock_policy_rwsem_write(cpu
))) {
1991 pr_debug("updating policy for CPU %u\n", cpu
);
1992 memcpy(&new_policy
, policy
, sizeof(*policy
));
1993 new_policy
.min
= policy
->user_policy
.min
;
1994 new_policy
.max
= policy
->user_policy
.max
;
1995 new_policy
.policy
= policy
->user_policy
.policy
;
1996 new_policy
.governor
= policy
->user_policy
.governor
;
1999 * BIOS might change freq behind our back
2000 * -> ask driver for current freq and notify governors about a change
2002 if (cpufreq_driver
->get
) {
2003 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2005 pr_debug("Driver did not initialize current freq");
2006 policy
->cur
= new_policy
.cur
;
2008 if (policy
->cur
!= new_policy
.cur
&& cpufreq_driver
->target
)
2009 cpufreq_out_of_sync(cpu
, policy
->cur
,
2014 ret
= __cpufreq_set_policy(policy
, &new_policy
);
2016 unlock_policy_rwsem_write(cpu
);
2019 cpufreq_cpu_put(policy
);
2023 EXPORT_SYMBOL(cpufreq_update_policy
);
2025 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2026 unsigned long action
, void *hcpu
)
2028 unsigned int cpu
= (unsigned long)hcpu
;
2030 bool frozen
= false;
2032 dev
= get_cpu_device(cpu
);
2035 if (action
& CPU_TASKS_FROZEN
)
2038 switch (action
& ~CPU_TASKS_FROZEN
) {
2040 __cpufreq_add_dev(dev
, NULL
, frozen
);
2041 cpufreq_update_policy(cpu
);
2044 case CPU_DOWN_PREPARE
:
2045 __cpufreq_remove_dev_prepare(dev
, NULL
, frozen
);
2049 __cpufreq_remove_dev_finish(dev
, NULL
, frozen
);
2052 case CPU_DOWN_FAILED
:
2053 __cpufreq_add_dev(dev
, NULL
, frozen
);
2060 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2061 .notifier_call
= cpufreq_cpu_callback
,
2064 /*********************************************************************
2065 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2066 *********************************************************************/
2069 * cpufreq_register_driver - register a CPU Frequency driver
2070 * @driver_data: A struct cpufreq_driver containing the values#
2071 * submitted by the CPU Frequency driver.
2073 * Registers a CPU Frequency driver to this core code. This code
2074 * returns zero on success, -EBUSY when another driver got here first
2075 * (and isn't unregistered in the meantime).
2078 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2080 unsigned long flags
;
2083 if (cpufreq_disabled())
2086 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2087 ((!driver_data
->setpolicy
) && (!driver_data
->target
)))
2090 pr_debug("trying to register driver %s\n", driver_data
->name
);
2092 if (driver_data
->setpolicy
)
2093 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2095 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2096 if (cpufreq_driver
) {
2097 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2100 cpufreq_driver
= driver_data
;
2101 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2103 ret
= subsys_interface_register(&cpufreq_interface
);
2105 goto err_null_driver
;
2107 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
)) {
2111 /* check for at least one working CPU */
2112 for (i
= 0; i
< nr_cpu_ids
; i
++)
2113 if (cpu_possible(i
) && per_cpu(cpufreq_cpu_data
, i
)) {
2118 /* if all ->init() calls failed, unregister */
2120 pr_debug("no CPU initialized for driver %s\n",
2126 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2127 pr_debug("driver %s up and running\n", driver_data
->name
);
2131 subsys_interface_unregister(&cpufreq_interface
);
2133 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2134 cpufreq_driver
= NULL
;
2135 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2138 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2141 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2143 * Unregister the current CPUFreq driver. Only call this if you have
2144 * the right to do so, i.e. if you have succeeded in initialising before!
2145 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2146 * currently not initialised.
2148 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2150 unsigned long flags
;
2152 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2155 pr_debug("unregistering driver %s\n", driver
->name
);
2157 subsys_interface_unregister(&cpufreq_interface
);
2158 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2160 down_write(&cpufreq_rwsem
);
2161 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2163 cpufreq_driver
= NULL
;
2165 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2166 up_write(&cpufreq_rwsem
);
2170 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2172 static int __init
cpufreq_core_init(void)
2176 if (cpufreq_disabled())
2179 for_each_possible_cpu(cpu
)
2180 init_rwsem(&per_cpu(cpu_policy_rwsem
, cpu
));
2182 cpufreq_global_kobject
= kobject_create();
2183 BUG_ON(!cpufreq_global_kobject
);
2184 register_syscore_ops(&cpufreq_syscore_ops
);
2188 core_initcall(cpufreq_core_init
);