2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver
*cpufreq_driver
;
39 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
40 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data_fallback
);
41 static DEFINE_RWLOCK(cpufreq_driver_lock
);
42 static DEFINE_MUTEX(cpufreq_governor_lock
);
43 static LIST_HEAD(cpufreq_policy_list
);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN
], cpufreq_cpu_governor
);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu
);
68 static DEFINE_PER_CPU(struct rw_semaphore
, cpu_policy_rwsem
);
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 lock_policy_rwsem(read
, cpu
);
81 lock_policy_rwsem(write
, cpu
);
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 unlock_policy_rwsem(read
, cpu
);
92 unlock_policy_rwsem(write
, cpu
);
95 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
98 static DECLARE_RWSEM(cpufreq_rwsem
);
100 /* internal prototypes */
101 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
103 static unsigned int __cpufreq_get(unsigned int cpu
);
104 static void handle_update(struct work_struct
*work
);
107 * Two notifier lists: the "policy" list is involved in the
108 * validation process for a new CPU frequency policy; the
109 * "transition" list for kernel code that needs to handle
110 * changes to devices when the CPU clock speed changes.
111 * The mutex locks both lists.
113 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
114 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
116 static bool init_cpufreq_transition_notifier_list_called
;
117 static int __init
init_cpufreq_transition_notifier_list(void)
119 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
120 init_cpufreq_transition_notifier_list_called
= true;
123 pure_initcall(init_cpufreq_transition_notifier_list
);
125 static int off __read_mostly
;
126 static int cpufreq_disabled(void)
130 void disable_cpufreq(void)
134 static LIST_HEAD(cpufreq_governor_list
);
135 static DEFINE_MUTEX(cpufreq_governor_mutex
);
137 bool have_governor_per_policy(void)
139 return cpufreq_driver
->have_governor_per_policy
;
141 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
143 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
145 if (have_governor_per_policy())
146 return &policy
->kobj
;
148 return cpufreq_global_kobject
;
150 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
152 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
158 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
160 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
161 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
162 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
163 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
164 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
165 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
167 idle_time
= cur_wall_time
- busy_time
;
169 *wall
= cputime_to_usecs(cur_wall_time
);
171 return cputime_to_usecs(idle_time
);
174 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
176 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
178 if (idle_time
== -1ULL)
179 return get_cpu_idle_time_jiffy(cpu
, wall
);
181 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
185 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
187 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
189 struct cpufreq_policy
*policy
= NULL
;
192 if (cpufreq_disabled() || (cpu
>= nr_cpu_ids
))
195 if (!down_read_trylock(&cpufreq_rwsem
))
198 /* get the cpufreq driver */
199 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
201 if (cpufreq_driver
) {
203 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
205 kobject_get(&policy
->kobj
);
208 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
211 up_read(&cpufreq_rwsem
);
215 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
217 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
219 if (cpufreq_disabled())
222 kobject_put(&policy
->kobj
);
223 up_read(&cpufreq_rwsem
);
225 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
227 /*********************************************************************
228 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
229 *********************************************************************/
232 * adjust_jiffies - adjust the system "loops_per_jiffy"
234 * This function alters the system "loops_per_jiffy" for the clock
235 * speed change. Note that loops_per_jiffy cannot be updated on SMP
236 * systems as each CPU might be scaled differently. So, use the arch
237 * per-CPU loops_per_jiffy value wherever possible.
240 static unsigned long l_p_j_ref
;
241 static unsigned int l_p_j_ref_freq
;
243 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
245 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
248 if (!l_p_j_ref_freq
) {
249 l_p_j_ref
= loops_per_jiffy
;
250 l_p_j_ref_freq
= ci
->old
;
251 pr_debug("saving %lu as reference value for loops_per_jiffy; "
252 "freq is %u kHz\n", l_p_j_ref
, l_p_j_ref_freq
);
254 if ((val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) ||
255 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
256 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
258 pr_debug("scaling loops_per_jiffy to %lu "
259 "for frequency %u kHz\n", loops_per_jiffy
, ci
->new);
263 static inline void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
269 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
270 struct cpufreq_freqs
*freqs
, unsigned int state
)
272 BUG_ON(irqs_disabled());
274 if (cpufreq_disabled())
277 freqs
->flags
= cpufreq_driver
->flags
;
278 pr_debug("notification %u of frequency transition to %u kHz\n",
283 case CPUFREQ_PRECHANGE
:
284 if (WARN(policy
->transition_ongoing
==
285 cpumask_weight(policy
->cpus
),
286 "In middle of another frequency transition\n"))
289 policy
->transition_ongoing
++;
291 /* detect if the driver reported a value as "old frequency"
292 * which is not equal to what the cpufreq core thinks is
295 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
296 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
297 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
298 pr_debug("Warning: CPU frequency is"
299 " %u, cpufreq assumed %u kHz.\n",
300 freqs
->old
, policy
->cur
);
301 freqs
->old
= policy
->cur
;
304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
305 CPUFREQ_PRECHANGE
, freqs
);
306 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
309 case CPUFREQ_POSTCHANGE
:
310 if (WARN(!policy
->transition_ongoing
,
311 "No frequency transition in progress\n"))
314 policy
->transition_ongoing
--;
316 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
317 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs
->new,
318 (unsigned long)freqs
->cpu
);
319 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
320 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
321 CPUFREQ_POSTCHANGE
, freqs
);
322 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
323 policy
->cur
= freqs
->new;
329 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
330 * on frequency transition.
332 * This function calls the transition notifiers and the "adjust_jiffies"
333 * function. It is called twice on all CPU frequency changes that have
336 void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
337 struct cpufreq_freqs
*freqs
, unsigned int state
)
339 for_each_cpu(freqs
->cpu
, policy
->cpus
)
340 __cpufreq_notify_transition(policy
, freqs
, state
);
342 EXPORT_SYMBOL_GPL(cpufreq_notify_transition
);
345 /*********************************************************************
347 *********************************************************************/
349 static struct cpufreq_governor
*__find_governor(const char *str_governor
)
351 struct cpufreq_governor
*t
;
353 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
)
354 if (!strnicmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
361 * cpufreq_parse_governor - parse a governor string
363 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
364 struct cpufreq_governor
**governor
)
371 if (cpufreq_driver
->setpolicy
) {
372 if (!strnicmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
373 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
375 } else if (!strnicmp(str_governor
, "powersave",
377 *policy
= CPUFREQ_POLICY_POWERSAVE
;
380 } else if (cpufreq_driver
->target
) {
381 struct cpufreq_governor
*t
;
383 mutex_lock(&cpufreq_governor_mutex
);
385 t
= __find_governor(str_governor
);
390 mutex_unlock(&cpufreq_governor_mutex
);
391 ret
= request_module("cpufreq_%s", str_governor
);
392 mutex_lock(&cpufreq_governor_mutex
);
395 t
= __find_governor(str_governor
);
403 mutex_unlock(&cpufreq_governor_mutex
);
410 * cpufreq_per_cpu_attr_read() / show_##file_name() -
411 * print out cpufreq information
413 * Write out information from cpufreq_driver->policy[cpu]; object must be
417 #define show_one(file_name, object) \
418 static ssize_t show_##file_name \
419 (struct cpufreq_policy *policy, char *buf) \
421 return sprintf(buf, "%u\n", policy->object); \
424 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
425 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
426 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
427 show_one(scaling_min_freq
, min
);
428 show_one(scaling_max_freq
, max
);
429 show_one(scaling_cur_freq
, cur
);
431 static int __cpufreq_set_policy(struct cpufreq_policy
*policy
,
432 struct cpufreq_policy
*new_policy
);
435 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
437 #define store_one(file_name, object) \
438 static ssize_t store_##file_name \
439 (struct cpufreq_policy *policy, const char *buf, size_t count) \
442 struct cpufreq_policy new_policy; \
444 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
448 ret = sscanf(buf, "%u", &new_policy.object); \
452 ret = __cpufreq_set_policy(policy, &new_policy); \
453 policy->user_policy.object = policy->object; \
455 return ret ? ret : count; \
458 store_one(scaling_min_freq
, min
);
459 store_one(scaling_max_freq
, max
);
462 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
464 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
467 unsigned int cur_freq
= __cpufreq_get(policy
->cpu
);
469 return sprintf(buf
, "<unknown>");
470 return sprintf(buf
, "%u\n", cur_freq
);
474 * show_scaling_governor - show the current policy for the specified CPU
476 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
478 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
479 return sprintf(buf
, "powersave\n");
480 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
481 return sprintf(buf
, "performance\n");
482 else if (policy
->governor
)
483 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
484 policy
->governor
->name
);
489 * store_scaling_governor - store policy for the specified CPU
491 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
492 const char *buf
, size_t count
)
495 char str_governor
[16];
496 struct cpufreq_policy new_policy
;
498 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
502 ret
= sscanf(buf
, "%15s", str_governor
);
506 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
507 &new_policy
.governor
))
511 * Do not use cpufreq_set_policy here or the user_policy.max
512 * will be wrongly overridden
514 ret
= __cpufreq_set_policy(policy
, &new_policy
);
516 policy
->user_policy
.policy
= policy
->policy
;
517 policy
->user_policy
.governor
= policy
->governor
;
526 * show_scaling_driver - show the cpufreq driver currently loaded
528 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
530 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
534 * show_scaling_available_governors - show the available CPUfreq governors
536 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
540 struct cpufreq_governor
*t
;
542 if (!cpufreq_driver
->target
) {
543 i
+= sprintf(buf
, "performance powersave");
547 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
) {
548 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
549 - (CPUFREQ_NAME_LEN
+ 2)))
551 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
554 i
+= sprintf(&buf
[i
], "\n");
558 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
563 for_each_cpu(cpu
, mask
) {
565 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
566 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
567 if (i
>= (PAGE_SIZE
- 5))
570 i
+= sprintf(&buf
[i
], "\n");
573 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
576 * show_related_cpus - show the CPUs affected by each transition even if
577 * hw coordination is in use
579 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
581 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
585 * show_affected_cpus - show the CPUs affected by each transition
587 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
589 return cpufreq_show_cpus(policy
->cpus
, buf
);
592 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
593 const char *buf
, size_t count
)
595 unsigned int freq
= 0;
598 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
601 ret
= sscanf(buf
, "%u", &freq
);
605 policy
->governor
->store_setspeed(policy
, freq
);
610 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
612 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
613 return sprintf(buf
, "<unsupported>\n");
615 return policy
->governor
->show_setspeed(policy
, buf
);
619 * show_bios_limit - show the current cpufreq HW/BIOS limitation
621 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
625 if (cpufreq_driver
->bios_limit
) {
626 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
628 return sprintf(buf
, "%u\n", limit
);
630 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
633 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
634 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
635 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
636 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
637 cpufreq_freq_attr_ro(scaling_available_governors
);
638 cpufreq_freq_attr_ro(scaling_driver
);
639 cpufreq_freq_attr_ro(scaling_cur_freq
);
640 cpufreq_freq_attr_ro(bios_limit
);
641 cpufreq_freq_attr_ro(related_cpus
);
642 cpufreq_freq_attr_ro(affected_cpus
);
643 cpufreq_freq_attr_rw(scaling_min_freq
);
644 cpufreq_freq_attr_rw(scaling_max_freq
);
645 cpufreq_freq_attr_rw(scaling_governor
);
646 cpufreq_freq_attr_rw(scaling_setspeed
);
648 static struct attribute
*default_attrs
[] = {
649 &cpuinfo_min_freq
.attr
,
650 &cpuinfo_max_freq
.attr
,
651 &cpuinfo_transition_latency
.attr
,
652 &scaling_min_freq
.attr
,
653 &scaling_max_freq
.attr
,
656 &scaling_governor
.attr
,
657 &scaling_driver
.attr
,
658 &scaling_available_governors
.attr
,
659 &scaling_setspeed
.attr
,
663 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
664 #define to_attr(a) container_of(a, struct freq_attr, attr)
666 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
668 struct cpufreq_policy
*policy
= to_policy(kobj
);
669 struct freq_attr
*fattr
= to_attr(attr
);
670 ssize_t ret
= -EINVAL
;
672 if (!down_read_trylock(&cpufreq_rwsem
))
675 if (lock_policy_rwsem_read(policy
->cpu
) < 0)
679 ret
= fattr
->show(policy
, buf
);
683 unlock_policy_rwsem_read(policy
->cpu
);
686 up_read(&cpufreq_rwsem
);
691 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
692 const char *buf
, size_t count
)
694 struct cpufreq_policy
*policy
= to_policy(kobj
);
695 struct freq_attr
*fattr
= to_attr(attr
);
696 ssize_t ret
= -EINVAL
;
698 if (!down_read_trylock(&cpufreq_rwsem
))
701 if (lock_policy_rwsem_write(policy
->cpu
) < 0)
705 ret
= fattr
->store(policy
, buf
, count
);
709 unlock_policy_rwsem_write(policy
->cpu
);
712 up_read(&cpufreq_rwsem
);
717 static void cpufreq_sysfs_release(struct kobject
*kobj
)
719 struct cpufreq_policy
*policy
= to_policy(kobj
);
720 pr_debug("last reference is dropped\n");
721 complete(&policy
->kobj_unregister
);
724 static const struct sysfs_ops sysfs_ops
= {
729 static struct kobj_type ktype_cpufreq
= {
730 .sysfs_ops
= &sysfs_ops
,
731 .default_attrs
= default_attrs
,
732 .release
= cpufreq_sysfs_release
,
735 struct kobject
*cpufreq_global_kobject
;
736 EXPORT_SYMBOL(cpufreq_global_kobject
);
738 static int cpufreq_global_kobject_usage
;
740 int cpufreq_get_global_kobject(void)
742 if (!cpufreq_global_kobject_usage
++)
743 return kobject_add(cpufreq_global_kobject
,
744 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
748 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
750 void cpufreq_put_global_kobject(void)
752 if (!--cpufreq_global_kobject_usage
)
753 kobject_del(cpufreq_global_kobject
);
755 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
757 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
759 int ret
= cpufreq_get_global_kobject();
762 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
764 cpufreq_put_global_kobject();
769 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
771 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
773 sysfs_remove_file(cpufreq_global_kobject
, attr
);
774 cpufreq_put_global_kobject();
776 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
778 /* symlink affected CPUs */
779 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
784 for_each_cpu(j
, policy
->cpus
) {
785 struct device
*cpu_dev
;
787 if (j
== policy
->cpu
)
790 pr_debug("Adding link for CPU: %u\n", j
);
791 cpu_dev
= get_cpu_device(j
);
792 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
800 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
,
803 struct freq_attr
**drv_attr
;
806 /* prepare interface data */
807 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
808 &dev
->kobj
, "cpufreq");
812 /* set up files for this cpu device */
813 drv_attr
= cpufreq_driver
->attr
;
814 while ((drv_attr
) && (*drv_attr
)) {
815 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
817 goto err_out_kobj_put
;
820 if (cpufreq_driver
->get
) {
821 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
823 goto err_out_kobj_put
;
825 if (cpufreq_driver
->target
) {
826 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
828 goto err_out_kobj_put
;
830 if (cpufreq_driver
->bios_limit
) {
831 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
833 goto err_out_kobj_put
;
836 ret
= cpufreq_add_dev_symlink(policy
);
838 goto err_out_kobj_put
;
843 kobject_put(&policy
->kobj
);
844 wait_for_completion(&policy
->kobj_unregister
);
848 static void cpufreq_init_policy(struct cpufreq_policy
*policy
)
850 struct cpufreq_policy new_policy
;
853 memcpy(&new_policy
, policy
, sizeof(*policy
));
854 /* assure that the starting sequence is run in __cpufreq_set_policy */
855 policy
->governor
= NULL
;
857 /* set default policy */
858 ret
= __cpufreq_set_policy(policy
, &new_policy
);
859 policy
->user_policy
.policy
= policy
->policy
;
860 policy
->user_policy
.governor
= policy
->governor
;
863 pr_debug("setting policy failed\n");
864 if (cpufreq_driver
->exit
)
865 cpufreq_driver
->exit(policy
);
869 #ifdef CONFIG_HOTPLUG_CPU
870 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
,
871 unsigned int cpu
, struct device
*dev
,
874 int ret
= 0, has_target
= !!cpufreq_driver
->target
;
878 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
880 pr_err("%s: Failed to stop governor\n", __func__
);
885 lock_policy_rwsem_write(policy
->cpu
);
887 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
889 cpumask_set_cpu(cpu
, policy
->cpus
);
890 per_cpu(cpufreq_policy_cpu
, cpu
) = policy
->cpu
;
891 per_cpu(cpufreq_cpu_data
, cpu
) = policy
;
892 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
894 unlock_policy_rwsem_write(policy
->cpu
);
897 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
898 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
899 pr_err("%s: Failed to start governor\n", __func__
);
904 /* Don't touch sysfs links during light-weight init */
906 ret
= sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq");
912 static struct cpufreq_policy
*cpufreq_policy_restore(unsigned int cpu
)
914 struct cpufreq_policy
*policy
;
917 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
919 policy
= per_cpu(cpufreq_cpu_data_fallback
, cpu
);
921 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
926 static struct cpufreq_policy
*cpufreq_policy_alloc(void)
928 struct cpufreq_policy
*policy
;
930 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
934 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
935 goto err_free_policy
;
937 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
938 goto err_free_cpumask
;
940 INIT_LIST_HEAD(&policy
->policy_list
);
944 free_cpumask_var(policy
->cpus
);
951 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
955 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
956 list_del(&policy
->policy_list
);
957 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
959 free_cpumask_var(policy
->related_cpus
);
960 free_cpumask_var(policy
->cpus
);
964 static int __cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
,
967 unsigned int j
, cpu
= dev
->id
;
969 struct cpufreq_policy
*policy
;
971 #ifdef CONFIG_HOTPLUG_CPU
972 struct cpufreq_governor
*gov
;
976 if (cpu_is_offline(cpu
))
979 pr_debug("adding CPU %u\n", cpu
);
982 /* check whether a different CPU already registered this
983 * CPU because it is in the same boat. */
984 policy
= cpufreq_cpu_get(cpu
);
985 if (unlikely(policy
)) {
986 cpufreq_cpu_put(policy
);
990 if (!down_read_trylock(&cpufreq_rwsem
))
993 #ifdef CONFIG_HOTPLUG_CPU
994 /* Check if this cpu was hot-unplugged earlier and has siblings */
995 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
996 for_each_online_cpu(sibling
) {
997 struct cpufreq_policy
*cp
= per_cpu(cpufreq_cpu_data
, sibling
);
998 if (cp
&& cpumask_test_cpu(cpu
, cp
->related_cpus
)) {
999 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1000 ret
= cpufreq_add_policy_cpu(cp
, cpu
, dev
, frozen
);
1001 up_read(&cpufreq_rwsem
);
1005 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1010 /* Restore the saved policy when doing light-weight init */
1011 policy
= cpufreq_policy_restore(cpu
);
1013 policy
= cpufreq_policy_alloc();
1019 policy
->governor
= CPUFREQ_DEFAULT_GOVERNOR
;
1020 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1022 /* Initially set CPU itself as the policy_cpu */
1023 per_cpu(cpufreq_policy_cpu
, cpu
) = cpu
;
1025 init_completion(&policy
->kobj_unregister
);
1026 INIT_WORK(&policy
->update
, handle_update
);
1028 /* call driver. From then on the cpufreq must be able
1029 * to accept all calls to ->verify and ->setpolicy for this CPU
1031 ret
= cpufreq_driver
->init(policy
);
1033 pr_debug("initialization failed\n");
1034 goto err_set_policy_cpu
;
1037 /* related cpus should atleast have policy->cpus */
1038 cpumask_or(policy
->related_cpus
, policy
->related_cpus
, policy
->cpus
);
1041 * affected cpus must always be the one, which are online. We aren't
1042 * managing offline cpus here.
1044 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1046 policy
->user_policy
.min
= policy
->min
;
1047 policy
->user_policy
.max
= policy
->max
;
1049 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1050 CPUFREQ_START
, policy
);
1052 #ifdef CONFIG_HOTPLUG_CPU
1053 gov
= __find_governor(per_cpu(cpufreq_cpu_governor
, cpu
));
1055 policy
->governor
= gov
;
1056 pr_debug("Restoring governor %s for cpu %d\n",
1057 policy
->governor
->name
, cpu
);
1061 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1062 for_each_cpu(j
, policy
->cpus
) {
1063 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1064 per_cpu(cpufreq_policy_cpu
, j
) = policy
->cpu
;
1066 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1069 ret
= cpufreq_add_dev_interface(policy
, dev
);
1071 goto err_out_unregister
;
1073 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1074 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1075 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1078 cpufreq_init_policy(policy
);
1080 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1081 up_read(&cpufreq_rwsem
);
1083 pr_debug("initialization complete\n");
1088 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1089 for_each_cpu(j
, policy
->cpus
) {
1090 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1092 per_cpu(cpufreq_policy_cpu
, j
) = -1;
1094 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1097 per_cpu(cpufreq_policy_cpu
, cpu
) = -1;
1098 cpufreq_policy_free(policy
);
1100 up_read(&cpufreq_rwsem
);
1106 * cpufreq_add_dev - add a CPU device
1108 * Adds the cpufreq interface for a CPU device.
1110 * The Oracle says: try running cpufreq registration/unregistration concurrently
1111 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1112 * mess up, but more thorough testing is needed. - Mathieu
1114 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1116 return __cpufreq_add_dev(dev
, sif
, false);
1119 static void update_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1123 policy
->last_cpu
= policy
->cpu
;
1126 for_each_cpu(j
, policy
->cpus
)
1127 per_cpu(cpufreq_policy_cpu
, j
) = cpu
;
1129 #ifdef CONFIG_CPU_FREQ_TABLE
1130 cpufreq_frequency_table_update_policy_cpu(policy
);
1132 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1133 CPUFREQ_UPDATE_POLICY_CPU
, policy
);
1136 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy
*policy
,
1137 unsigned int old_cpu
, bool frozen
)
1139 struct device
*cpu_dev
;
1140 unsigned long flags
;
1143 /* first sibling now owns the new sysfs dir */
1144 cpu_dev
= get_cpu_device(cpumask_first(policy
->cpus
));
1146 /* Don't touch sysfs files during light-weight tear-down */
1150 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
1151 ret
= kobject_move(&policy
->kobj
, &cpu_dev
->kobj
);
1153 pr_err("%s: Failed to move kobj: %d", __func__
, ret
);
1155 WARN_ON(lock_policy_rwsem_write(old_cpu
));
1156 cpumask_set_cpu(old_cpu
, policy
->cpus
);
1158 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1159 per_cpu(cpufreq_cpu_data
, old_cpu
) = policy
;
1160 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1162 unlock_policy_rwsem_write(old_cpu
);
1164 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
1174 * __cpufreq_remove_dev - remove a CPU device
1176 * Removes the cpufreq interface for a CPU device.
1177 * Caller should already have policy_rwsem in write mode for this CPU.
1178 * This routine frees the rwsem before returning.
1180 static int __cpufreq_remove_dev(struct device
*dev
,
1181 struct subsys_interface
*sif
, bool frozen
)
1183 unsigned int cpu
= dev
->id
, cpus
;
1185 unsigned long flags
;
1186 struct cpufreq_policy
*policy
;
1187 struct kobject
*kobj
;
1188 struct completion
*cmp
;
1190 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1192 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1194 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1195 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1197 /* Save the policy somewhere when doing a light-weight tear-down */
1199 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = policy
;
1201 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1204 pr_debug("%s: No cpu_data found\n", __func__
);
1208 if (cpufreq_driver
->target
) {
1209 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1211 pr_err("%s: Failed to stop governor\n", __func__
);
1216 #ifdef CONFIG_HOTPLUG_CPU
1217 if (!cpufreq_driver
->setpolicy
)
1218 strncpy(per_cpu(cpufreq_cpu_governor
, cpu
),
1219 policy
->governor
->name
, CPUFREQ_NAME_LEN
);
1222 WARN_ON(lock_policy_rwsem_write(cpu
));
1223 cpus
= cpumask_weight(policy
->cpus
);
1226 cpumask_clear_cpu(cpu
, policy
->cpus
);
1227 unlock_policy_rwsem_write(cpu
);
1229 if (cpu
!= policy
->cpu
&& !frozen
) {
1230 sysfs_remove_link(&dev
->kobj
, "cpufreq");
1231 } else if (cpus
> 1) {
1233 new_cpu
= cpufreq_nominate_new_policy_cpu(policy
, cpu
, frozen
);
1235 WARN_ON(lock_policy_rwsem_write(cpu
));
1236 update_policy_cpu(policy
, new_cpu
);
1237 unlock_policy_rwsem_write(cpu
);
1240 pr_debug("%s: policy Kobject moved to cpu: %d "
1241 "from: %d\n",__func__
, new_cpu
, cpu
);
1246 /* If cpu is last user of policy, free policy */
1248 if (cpufreq_driver
->target
) {
1249 ret
= __cpufreq_governor(policy
,
1250 CPUFREQ_GOV_POLICY_EXIT
);
1252 pr_err("%s: Failed to exit governor\n",
1259 lock_policy_rwsem_read(cpu
);
1260 kobj
= &policy
->kobj
;
1261 cmp
= &policy
->kobj_unregister
;
1262 unlock_policy_rwsem_read(cpu
);
1266 * We need to make sure that the underlying kobj is
1267 * actually not referenced anymore by anybody before we
1268 * proceed with unloading.
1270 pr_debug("waiting for dropping of refcount\n");
1271 wait_for_completion(cmp
);
1272 pr_debug("wait complete\n");
1276 * Perform the ->exit() even during light-weight tear-down,
1277 * since this is a core component, and is essential for the
1278 * subsequent light-weight ->init() to succeed.
1280 if (cpufreq_driver
->exit
)
1281 cpufreq_driver
->exit(policy
);
1284 cpufreq_policy_free(policy
);
1286 if (cpufreq_driver
->target
) {
1287 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
1288 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
1289 pr_err("%s: Failed to start governor\n",
1296 per_cpu(cpufreq_policy_cpu
, cpu
) = -1;
1300 static int cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1302 unsigned int cpu
= dev
->id
;
1305 if (cpu_is_offline(cpu
))
1308 retval
= __cpufreq_remove_dev(dev
, sif
, false);
1312 static void handle_update(struct work_struct
*work
)
1314 struct cpufreq_policy
*policy
=
1315 container_of(work
, struct cpufreq_policy
, update
);
1316 unsigned int cpu
= policy
->cpu
;
1317 pr_debug("handle_update for cpu %u called\n", cpu
);
1318 cpufreq_update_policy(cpu
);
1322 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1325 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1326 * @new_freq: CPU frequency the CPU actually runs at
1328 * We adjust to current frequency first, and need to clean up later.
1329 * So either call to cpufreq_update_policy() or schedule handle_update()).
1331 static void cpufreq_out_of_sync(unsigned int cpu
, unsigned int old_freq
,
1332 unsigned int new_freq
)
1334 struct cpufreq_policy
*policy
;
1335 struct cpufreq_freqs freqs
;
1336 unsigned long flags
;
1338 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1339 "core thinks of %u, is %u kHz.\n", old_freq
, new_freq
);
1341 freqs
.old
= old_freq
;
1342 freqs
.new = new_freq
;
1344 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1345 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1346 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1348 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_PRECHANGE
);
1349 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_POSTCHANGE
);
1353 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1356 * This is the last known freq, without actually getting it from the driver.
1357 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1359 unsigned int cpufreq_quick_get(unsigned int cpu
)
1361 struct cpufreq_policy
*policy
;
1362 unsigned int ret_freq
= 0;
1364 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1365 return cpufreq_driver
->get(cpu
);
1367 policy
= cpufreq_cpu_get(cpu
);
1369 ret_freq
= policy
->cur
;
1370 cpufreq_cpu_put(policy
);
1375 EXPORT_SYMBOL(cpufreq_quick_get
);
1378 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1381 * Just return the max possible frequency for a given CPU.
1383 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1385 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1386 unsigned int ret_freq
= 0;
1389 ret_freq
= policy
->max
;
1390 cpufreq_cpu_put(policy
);
1395 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1397 static unsigned int __cpufreq_get(unsigned int cpu
)
1399 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1400 unsigned int ret_freq
= 0;
1402 if (!cpufreq_driver
->get
)
1405 ret_freq
= cpufreq_driver
->get(cpu
);
1407 if (ret_freq
&& policy
->cur
&&
1408 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1409 /* verify no discrepancy between actual and
1410 saved value exists */
1411 if (unlikely(ret_freq
!= policy
->cur
)) {
1412 cpufreq_out_of_sync(cpu
, policy
->cur
, ret_freq
);
1413 schedule_work(&policy
->update
);
1421 * cpufreq_get - get the current CPU frequency (in kHz)
1424 * Get the CPU current (static) CPU frequency
1426 unsigned int cpufreq_get(unsigned int cpu
)
1428 unsigned int ret_freq
= 0;
1430 if (!down_read_trylock(&cpufreq_rwsem
))
1433 if (unlikely(lock_policy_rwsem_read(cpu
)))
1436 ret_freq
= __cpufreq_get(cpu
);
1438 unlock_policy_rwsem_read(cpu
);
1441 up_read(&cpufreq_rwsem
);
1445 EXPORT_SYMBOL(cpufreq_get
);
1447 static struct subsys_interface cpufreq_interface
= {
1449 .subsys
= &cpu_subsys
,
1450 .add_dev
= cpufreq_add_dev
,
1451 .remove_dev
= cpufreq_remove_dev
,
1455 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1457 * This function is only executed for the boot processor. The other CPUs
1458 * have been put offline by means of CPU hotplug.
1460 static int cpufreq_bp_suspend(void)
1464 int cpu
= smp_processor_id();
1465 struct cpufreq_policy
*policy
;
1467 pr_debug("suspending cpu %u\n", cpu
);
1469 /* If there's no policy for the boot CPU, we have nothing to do. */
1470 policy
= cpufreq_cpu_get(cpu
);
1474 if (cpufreq_driver
->suspend
) {
1475 ret
= cpufreq_driver
->suspend(policy
);
1477 printk(KERN_ERR
"cpufreq: suspend failed in ->suspend "
1478 "step on CPU %u\n", policy
->cpu
);
1481 cpufreq_cpu_put(policy
);
1486 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1488 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1489 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1490 * restored. It will verify that the current freq is in sync with
1491 * what we believe it to be. This is a bit later than when it
1492 * should be, but nonethteless it's better than calling
1493 * cpufreq_driver->get() here which might re-enable interrupts...
1495 * This function is only executed for the boot CPU. The other CPUs have not
1496 * been turned on yet.
1498 static void cpufreq_bp_resume(void)
1502 int cpu
= smp_processor_id();
1503 struct cpufreq_policy
*policy
;
1505 pr_debug("resuming cpu %u\n", cpu
);
1507 /* If there's no policy for the boot CPU, we have nothing to do. */
1508 policy
= cpufreq_cpu_get(cpu
);
1512 if (cpufreq_driver
->resume
) {
1513 ret
= cpufreq_driver
->resume(policy
);
1515 printk(KERN_ERR
"cpufreq: resume failed in ->resume "
1516 "step on CPU %u\n", policy
->cpu
);
1521 schedule_work(&policy
->update
);
1524 cpufreq_cpu_put(policy
);
1527 static struct syscore_ops cpufreq_syscore_ops
= {
1528 .suspend
= cpufreq_bp_suspend
,
1529 .resume
= cpufreq_bp_resume
,
1533 * cpufreq_get_current_driver - return current driver's name
1535 * Return the name string of the currently loaded cpufreq driver
1538 const char *cpufreq_get_current_driver(void)
1541 return cpufreq_driver
->name
;
1545 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1547 /*********************************************************************
1548 * NOTIFIER LISTS INTERFACE *
1549 *********************************************************************/
1552 * cpufreq_register_notifier - register a driver with cpufreq
1553 * @nb: notifier function to register
1554 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1556 * Add a driver to one of two lists: either a list of drivers that
1557 * are notified about clock rate changes (once before and once after
1558 * the transition), or a list of drivers that are notified about
1559 * changes in cpufreq policy.
1561 * This function may sleep, and has the same return conditions as
1562 * blocking_notifier_chain_register.
1564 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1568 if (cpufreq_disabled())
1571 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1574 case CPUFREQ_TRANSITION_NOTIFIER
:
1575 ret
= srcu_notifier_chain_register(
1576 &cpufreq_transition_notifier_list
, nb
);
1578 case CPUFREQ_POLICY_NOTIFIER
:
1579 ret
= blocking_notifier_chain_register(
1580 &cpufreq_policy_notifier_list
, nb
);
1588 EXPORT_SYMBOL(cpufreq_register_notifier
);
1591 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1592 * @nb: notifier block to be unregistered
1593 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1595 * Remove a driver from the CPU frequency notifier list.
1597 * This function may sleep, and has the same return conditions as
1598 * blocking_notifier_chain_unregister.
1600 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1604 if (cpufreq_disabled())
1608 case CPUFREQ_TRANSITION_NOTIFIER
:
1609 ret
= srcu_notifier_chain_unregister(
1610 &cpufreq_transition_notifier_list
, nb
);
1612 case CPUFREQ_POLICY_NOTIFIER
:
1613 ret
= blocking_notifier_chain_unregister(
1614 &cpufreq_policy_notifier_list
, nb
);
1622 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1625 /*********************************************************************
1627 *********************************************************************/
1629 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1630 unsigned int target_freq
,
1631 unsigned int relation
)
1633 int retval
= -EINVAL
;
1634 unsigned int old_target_freq
= target_freq
;
1636 if (cpufreq_disabled())
1638 if (policy
->transition_ongoing
)
1641 /* Make sure that target_freq is within supported range */
1642 if (target_freq
> policy
->max
)
1643 target_freq
= policy
->max
;
1644 if (target_freq
< policy
->min
)
1645 target_freq
= policy
->min
;
1647 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1648 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1650 if (target_freq
== policy
->cur
)
1653 if (cpufreq_driver
->target
)
1654 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1658 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1660 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1661 unsigned int target_freq
,
1662 unsigned int relation
)
1666 if (unlikely(lock_policy_rwsem_write(policy
->cpu
)))
1669 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1671 unlock_policy_rwsem_write(policy
->cpu
);
1676 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1679 * when "event" is CPUFREQ_GOV_LIMITS
1682 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1687 /* Only must be defined when default governor is known to have latency
1688 restrictions, like e.g. conservative or ondemand.
1689 That this is the case is already ensured in Kconfig
1691 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1692 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1694 struct cpufreq_governor
*gov
= NULL
;
1697 if (policy
->governor
->max_transition_latency
&&
1698 policy
->cpuinfo
.transition_latency
>
1699 policy
->governor
->max_transition_latency
) {
1703 printk(KERN_WARNING
"%s governor failed, too long"
1704 " transition latency of HW, fallback"
1705 " to %s governor\n",
1706 policy
->governor
->name
,
1708 policy
->governor
= gov
;
1712 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1713 if (!try_module_get(policy
->governor
->owner
))
1716 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1717 policy
->cpu
, event
);
1719 mutex_lock(&cpufreq_governor_lock
);
1720 if ((!policy
->governor_enabled
&& (event
== CPUFREQ_GOV_STOP
)) ||
1721 (policy
->governor_enabled
&& (event
== CPUFREQ_GOV_START
))) {
1722 mutex_unlock(&cpufreq_governor_lock
);
1723 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1724 module_put(policy
->governor
->owner
);
1728 if (event
== CPUFREQ_GOV_STOP
)
1729 policy
->governor_enabled
= false;
1730 else if (event
== CPUFREQ_GOV_START
)
1731 policy
->governor_enabled
= true;
1733 mutex_unlock(&cpufreq_governor_lock
);
1735 ret
= policy
->governor
->governor(policy
, event
);
1738 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1739 policy
->governor
->initialized
++;
1740 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
1741 policy
->governor
->initialized
--;
1743 /* Restore original values */
1744 mutex_lock(&cpufreq_governor_lock
);
1745 if (event
== CPUFREQ_GOV_STOP
)
1746 policy
->governor_enabled
= true;
1747 else if (event
== CPUFREQ_GOV_START
)
1748 policy
->governor_enabled
= false;
1749 mutex_unlock(&cpufreq_governor_lock
);
1752 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
1753 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
1754 module_put(policy
->governor
->owner
);
1759 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1766 if (cpufreq_disabled())
1769 mutex_lock(&cpufreq_governor_mutex
);
1771 governor
->initialized
= 0;
1773 if (__find_governor(governor
->name
) == NULL
) {
1775 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1778 mutex_unlock(&cpufreq_governor_mutex
);
1781 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1783 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1785 #ifdef CONFIG_HOTPLUG_CPU
1792 if (cpufreq_disabled())
1795 #ifdef CONFIG_HOTPLUG_CPU
1796 for_each_present_cpu(cpu
) {
1797 if (cpu_online(cpu
))
1799 if (!strcmp(per_cpu(cpufreq_cpu_governor
, cpu
), governor
->name
))
1800 strcpy(per_cpu(cpufreq_cpu_governor
, cpu
), "\0");
1804 mutex_lock(&cpufreq_governor_mutex
);
1805 list_del(&governor
->governor_list
);
1806 mutex_unlock(&cpufreq_governor_mutex
);
1809 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
1812 /*********************************************************************
1813 * POLICY INTERFACE *
1814 *********************************************************************/
1817 * cpufreq_get_policy - get the current cpufreq_policy
1818 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1821 * Reads the current cpufreq policy.
1823 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
1825 struct cpufreq_policy
*cpu_policy
;
1829 cpu_policy
= cpufreq_cpu_get(cpu
);
1833 memcpy(policy
, cpu_policy
, sizeof(*policy
));
1835 cpufreq_cpu_put(cpu_policy
);
1838 EXPORT_SYMBOL(cpufreq_get_policy
);
1841 * data : current policy.
1842 * policy : policy to be set.
1844 static int __cpufreq_set_policy(struct cpufreq_policy
*policy
,
1845 struct cpufreq_policy
*new_policy
)
1847 int ret
= 0, failed
= 1;
1849 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy
->cpu
,
1850 new_policy
->min
, new_policy
->max
);
1852 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
1854 if (new_policy
->min
> policy
->max
|| new_policy
->max
< policy
->min
) {
1859 /* verify the cpu speed can be set within this limit */
1860 ret
= cpufreq_driver
->verify(new_policy
);
1864 /* adjust if necessary - all reasons */
1865 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1866 CPUFREQ_ADJUST
, new_policy
);
1868 /* adjust if necessary - hardware incompatibility*/
1869 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1870 CPUFREQ_INCOMPATIBLE
, new_policy
);
1873 * verify the cpu speed can be set within this limit, which might be
1874 * different to the first one
1876 ret
= cpufreq_driver
->verify(new_policy
);
1880 /* notification of the new policy */
1881 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1882 CPUFREQ_NOTIFY
, new_policy
);
1884 policy
->min
= new_policy
->min
;
1885 policy
->max
= new_policy
->max
;
1887 pr_debug("new min and max freqs are %u - %u kHz\n",
1888 policy
->min
, policy
->max
);
1890 if (cpufreq_driver
->setpolicy
) {
1891 policy
->policy
= new_policy
->policy
;
1892 pr_debug("setting range\n");
1893 ret
= cpufreq_driver
->setpolicy(new_policy
);
1895 if (new_policy
->governor
!= policy
->governor
) {
1896 /* save old, working values */
1897 struct cpufreq_governor
*old_gov
= policy
->governor
;
1899 pr_debug("governor switch\n");
1901 /* end old governor */
1902 if (policy
->governor
) {
1903 __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1904 unlock_policy_rwsem_write(new_policy
->cpu
);
1905 __cpufreq_governor(policy
,
1906 CPUFREQ_GOV_POLICY_EXIT
);
1907 lock_policy_rwsem_write(new_policy
->cpu
);
1910 /* start new governor */
1911 policy
->governor
= new_policy
->governor
;
1912 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
)) {
1913 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_START
)) {
1916 unlock_policy_rwsem_write(new_policy
->cpu
);
1917 __cpufreq_governor(policy
,
1918 CPUFREQ_GOV_POLICY_EXIT
);
1919 lock_policy_rwsem_write(new_policy
->cpu
);
1924 /* new governor failed, so re-start old one */
1925 pr_debug("starting governor %s failed\n",
1926 policy
->governor
->name
);
1928 policy
->governor
= old_gov
;
1929 __cpufreq_governor(policy
,
1930 CPUFREQ_GOV_POLICY_INIT
);
1931 __cpufreq_governor(policy
,
1937 /* might be a policy change, too, so fall through */
1939 pr_debug("governor: change or update limits\n");
1940 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1948 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1949 * @cpu: CPU which shall be re-evaluated
1951 * Useful for policy notifiers which have different necessities
1952 * at different times.
1954 int cpufreq_update_policy(unsigned int cpu
)
1956 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1957 struct cpufreq_policy new_policy
;
1965 if (unlikely(lock_policy_rwsem_write(cpu
))) {
1970 pr_debug("updating policy for CPU %u\n", cpu
);
1971 memcpy(&new_policy
, policy
, sizeof(*policy
));
1972 new_policy
.min
= policy
->user_policy
.min
;
1973 new_policy
.max
= policy
->user_policy
.max
;
1974 new_policy
.policy
= policy
->user_policy
.policy
;
1975 new_policy
.governor
= policy
->user_policy
.governor
;
1978 * BIOS might change freq behind our back
1979 * -> ask driver for current freq and notify governors about a change
1981 if (cpufreq_driver
->get
) {
1982 new_policy
.cur
= cpufreq_driver
->get(cpu
);
1984 pr_debug("Driver did not initialize current freq");
1985 policy
->cur
= new_policy
.cur
;
1987 if (policy
->cur
!= new_policy
.cur
&& cpufreq_driver
->target
)
1988 cpufreq_out_of_sync(cpu
, policy
->cur
,
1993 ret
= __cpufreq_set_policy(policy
, &new_policy
);
1995 unlock_policy_rwsem_write(cpu
);
1998 cpufreq_cpu_put(policy
);
2002 EXPORT_SYMBOL(cpufreq_update_policy
);
2004 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2005 unsigned long action
, void *hcpu
)
2007 unsigned int cpu
= (unsigned long)hcpu
;
2009 bool frozen
= false;
2011 dev
= get_cpu_device(cpu
);
2014 if (action
& CPU_TASKS_FROZEN
)
2017 switch (action
& ~CPU_TASKS_FROZEN
) {
2019 __cpufreq_add_dev(dev
, NULL
, frozen
);
2020 cpufreq_update_policy(cpu
);
2023 case CPU_DOWN_PREPARE
:
2024 __cpufreq_remove_dev(dev
, NULL
, frozen
);
2027 case CPU_DOWN_FAILED
:
2028 __cpufreq_add_dev(dev
, NULL
, frozen
);
2035 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2036 .notifier_call
= cpufreq_cpu_callback
,
2039 /*********************************************************************
2040 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2041 *********************************************************************/
2044 * cpufreq_register_driver - register a CPU Frequency driver
2045 * @driver_data: A struct cpufreq_driver containing the values#
2046 * submitted by the CPU Frequency driver.
2048 * Registers a CPU Frequency driver to this core code. This code
2049 * returns zero on success, -EBUSY when another driver got here first
2050 * (and isn't unregistered in the meantime).
2053 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2055 unsigned long flags
;
2058 if (cpufreq_disabled())
2061 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2062 ((!driver_data
->setpolicy
) && (!driver_data
->target
)))
2065 pr_debug("trying to register driver %s\n", driver_data
->name
);
2067 if (driver_data
->setpolicy
)
2068 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2070 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2071 if (cpufreq_driver
) {
2072 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2075 cpufreq_driver
= driver_data
;
2076 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2078 ret
= subsys_interface_register(&cpufreq_interface
);
2080 goto err_null_driver
;
2082 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
)) {
2086 /* check for at least one working CPU */
2087 for (i
= 0; i
< nr_cpu_ids
; i
++)
2088 if (cpu_possible(i
) && per_cpu(cpufreq_cpu_data
, i
)) {
2093 /* if all ->init() calls failed, unregister */
2095 pr_debug("no CPU initialized for driver %s\n",
2101 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2102 pr_debug("driver %s up and running\n", driver_data
->name
);
2106 subsys_interface_unregister(&cpufreq_interface
);
2108 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2109 cpufreq_driver
= NULL
;
2110 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2113 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2116 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2118 * Unregister the current CPUFreq driver. Only call this if you have
2119 * the right to do so, i.e. if you have succeeded in initialising before!
2120 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2121 * currently not initialised.
2123 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2125 unsigned long flags
;
2127 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2130 pr_debug("unregistering driver %s\n", driver
->name
);
2132 subsys_interface_unregister(&cpufreq_interface
);
2133 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2135 down_write(&cpufreq_rwsem
);
2136 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2138 cpufreq_driver
= NULL
;
2140 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2141 up_write(&cpufreq_rwsem
);
2145 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2147 static int __init
cpufreq_core_init(void)
2151 if (cpufreq_disabled())
2154 for_each_possible_cpu(cpu
) {
2155 per_cpu(cpufreq_policy_cpu
, cpu
) = -1;
2156 init_rwsem(&per_cpu(cpu_policy_rwsem
, cpu
));
2159 cpufreq_global_kobject
= kobject_create();
2160 BUG_ON(!cpufreq_global_kobject
);
2161 register_syscore_ops(&cpufreq_syscore_ops
);
2165 core_initcall(cpufreq_core_init
);