2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
36 * The "cpufreq driver" - the arch- or hardware-dependent low
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
40 static struct cpufreq_driver
*cpufreq_driver
;
41 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(struct cpufreq_governor
*, cpufreq_cpu_governor
);
46 static DEFINE_SPINLOCK(cpufreq_driver_lock
);
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
65 static DEFINE_PER_CPU(int, policy_cpu
);
66 static DEFINE_PER_CPU(struct rw_semaphore
, cpu_policy_rwsem
);
68 #define lock_policy_rwsem(mode, cpu) \
69 int lock_policy_rwsem_##mode \
72 int policy_cpu = per_cpu(policy_cpu, cpu); \
73 BUG_ON(policy_cpu == -1); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
75 if (unlikely(!cpu_online(cpu))) { \
76 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
83 lock_policy_rwsem(read
, cpu
);
84 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read
);
86 lock_policy_rwsem(write
, cpu
);
87 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write
);
89 void unlock_policy_rwsem_read(int cpu
)
91 int policy_cpu
= per_cpu(policy_cpu
, cpu
);
92 BUG_ON(policy_cpu
== -1);
93 up_read(&per_cpu(cpu_policy_rwsem
, policy_cpu
));
95 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read
);
97 void unlock_policy_rwsem_write(int cpu
)
99 int policy_cpu
= per_cpu(policy_cpu
, cpu
);
100 BUG_ON(policy_cpu
== -1);
101 up_write(&per_cpu(cpu_policy_rwsem
, policy_cpu
));
103 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write
);
106 /* internal prototypes */
107 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
109 static unsigned int __cpufreq_get(unsigned int cpu
);
110 static void handle_update(struct work_struct
*work
);
113 * Two notifier lists: the "policy" list is involved in the
114 * validation process for a new CPU frequency policy; the
115 * "transition" list for kernel code that needs to handle
116 * changes to devices when the CPU clock speed changes.
117 * The mutex locks both lists.
119 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
120 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
122 static bool init_cpufreq_transition_notifier_list_called
;
123 static int __init
init_cpufreq_transition_notifier_list(void)
125 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
126 init_cpufreq_transition_notifier_list_called
= true;
129 pure_initcall(init_cpufreq_transition_notifier_list
);
131 static LIST_HEAD(cpufreq_governor_list
);
132 static DEFINE_MUTEX(cpufreq_governor_mutex
);
134 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
136 struct cpufreq_policy
*data
;
139 if (cpu
>= nr_cpu_ids
)
142 /* get the cpufreq driver */
143 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
148 if (!try_module_get(cpufreq_driver
->owner
))
153 data
= per_cpu(cpufreq_cpu_data
, cpu
);
156 goto err_out_put_module
;
158 if (!kobject_get(&data
->kobj
))
159 goto err_out_put_module
;
161 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
165 module_put(cpufreq_driver
->owner
);
167 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
171 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
174 void cpufreq_cpu_put(struct cpufreq_policy
*data
)
176 kobject_put(&data
->kobj
);
177 module_put(cpufreq_driver
->owner
);
179 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
182 /*********************************************************************
183 * UNIFIED DEBUG HELPERS *
184 *********************************************************************/
185 #ifdef CONFIG_CPU_FREQ_DEBUG
187 /* what part(s) of the CPUfreq subsystem are debugged? */
188 static unsigned int debug
;
190 /* is the debug output ratelimit'ed using printk_ratelimit? User can
191 * set or modify this value.
193 static unsigned int debug_ratelimit
= 1;
195 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
196 * loading of a cpufreq driver, temporarily disabled when a new policy
197 * is set, and disabled upon cpufreq driver removal
199 static unsigned int disable_ratelimit
= 1;
200 static DEFINE_SPINLOCK(disable_ratelimit_lock
);
202 static void cpufreq_debug_enable_ratelimit(void)
206 spin_lock_irqsave(&disable_ratelimit_lock
, flags
);
207 if (disable_ratelimit
)
209 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
212 static void cpufreq_debug_disable_ratelimit(void)
216 spin_lock_irqsave(&disable_ratelimit_lock
, flags
);
218 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
221 void cpufreq_debug_printk(unsigned int type
, const char *prefix
,
222 const char *fmt
, ...)
231 spin_lock_irqsave(&disable_ratelimit_lock
, flags
);
232 if (!disable_ratelimit
&& debug_ratelimit
233 && !printk_ratelimit()) {
234 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
237 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
239 len
= snprintf(s
, 256, KERN_DEBUG
"%s: ", prefix
);
242 len
+= vsnprintf(&s
[len
], (256 - len
), fmt
, args
);
250 EXPORT_SYMBOL(cpufreq_debug_printk
);
253 module_param(debug
, uint
, 0644);
254 MODULE_PARM_DESC(debug
, "CPUfreq debugging: add 1 to debug core,"
255 " 2 to debug drivers, and 4 to debug governors.");
257 module_param(debug_ratelimit
, uint
, 0644);
258 MODULE_PARM_DESC(debug_ratelimit
, "CPUfreq debugging:"
259 " set to 0 to disable ratelimiting.");
261 #else /* !CONFIG_CPU_FREQ_DEBUG */
263 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
264 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
266 #endif /* CONFIG_CPU_FREQ_DEBUG */
269 /*********************************************************************
270 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
271 *********************************************************************/
274 * adjust_jiffies - adjust the system "loops_per_jiffy"
276 * This function alters the system "loops_per_jiffy" for the clock
277 * speed change. Note that loops_per_jiffy cannot be updated on SMP
278 * systems as each CPU might be scaled differently. So, use the arch
279 * per-CPU loops_per_jiffy value wherever possible.
282 static unsigned long l_p_j_ref
;
283 static unsigned int l_p_j_ref_freq
;
285 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
287 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
290 if (!l_p_j_ref_freq
) {
291 l_p_j_ref
= loops_per_jiffy
;
292 l_p_j_ref_freq
= ci
->old
;
293 dprintk("saving %lu as reference value for loops_per_jiffy; "
294 "freq is %u kHz\n", l_p_j_ref
, l_p_j_ref_freq
);
296 if ((val
== CPUFREQ_PRECHANGE
&& ci
->old
< ci
->new) ||
297 (val
== CPUFREQ_POSTCHANGE
&& ci
->old
> ci
->new) ||
298 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
299 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
301 dprintk("scaling loops_per_jiffy to %lu "
302 "for frequency %u kHz\n", loops_per_jiffy
, ci
->new);
306 static inline void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
314 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
315 * on frequency transition.
317 * This function calls the transition notifiers and the "adjust_jiffies"
318 * function. It is called twice on all CPU frequency changes that have
321 void cpufreq_notify_transition(struct cpufreq_freqs
*freqs
, unsigned int state
)
323 struct cpufreq_policy
*policy
;
325 BUG_ON(irqs_disabled());
327 freqs
->flags
= cpufreq_driver
->flags
;
328 dprintk("notification %u of frequency transition to %u kHz\n",
331 policy
= per_cpu(cpufreq_cpu_data
, freqs
->cpu
);
334 case CPUFREQ_PRECHANGE
:
335 /* detect if the driver reported a value as "old frequency"
336 * which is not equal to what the cpufreq core thinks is
339 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
340 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
341 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
342 dprintk("Warning: CPU frequency is"
343 " %u, cpufreq assumed %u kHz.\n",
344 freqs
->old
, policy
->cur
);
345 freqs
->old
= policy
->cur
;
348 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
349 CPUFREQ_PRECHANGE
, freqs
);
350 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
353 case CPUFREQ_POSTCHANGE
:
354 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
355 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
356 CPUFREQ_POSTCHANGE
, freqs
);
357 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
358 policy
->cur
= freqs
->new;
362 EXPORT_SYMBOL_GPL(cpufreq_notify_transition
);
366 /*********************************************************************
368 *********************************************************************/
370 static struct cpufreq_governor
*__find_governor(const char *str_governor
)
372 struct cpufreq_governor
*t
;
374 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
)
375 if (!strnicmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
382 * cpufreq_parse_governor - parse a governor string
384 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
385 struct cpufreq_governor
**governor
)
392 if (cpufreq_driver
->setpolicy
) {
393 if (!strnicmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
394 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
396 } else if (!strnicmp(str_governor
, "powersave",
398 *policy
= CPUFREQ_POLICY_POWERSAVE
;
401 } else if (cpufreq_driver
->target
) {
402 struct cpufreq_governor
*t
;
404 mutex_lock(&cpufreq_governor_mutex
);
406 t
= __find_governor(str_governor
);
409 char *name
= kasprintf(GFP_KERNEL
, "cpufreq_%s",
415 mutex_unlock(&cpufreq_governor_mutex
);
416 ret
= request_module("%s", name
);
417 mutex_lock(&cpufreq_governor_mutex
);
420 t
= __find_governor(str_governor
);
431 mutex_unlock(&cpufreq_governor_mutex
);
439 * cpufreq_per_cpu_attr_read() / show_##file_name() -
440 * print out cpufreq information
442 * Write out information from cpufreq_driver->policy[cpu]; object must be
446 #define show_one(file_name, object) \
447 static ssize_t show_##file_name \
448 (struct cpufreq_policy *policy, char *buf) \
450 return sprintf(buf, "%u\n", policy->object); \
453 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
454 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
455 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
456 show_one(scaling_min_freq
, min
);
457 show_one(scaling_max_freq
, max
);
458 show_one(scaling_cur_freq
, cur
);
460 static int __cpufreq_set_policy(struct cpufreq_policy
*data
,
461 struct cpufreq_policy
*policy
);
464 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
466 #define store_one(file_name, object) \
467 static ssize_t store_##file_name \
468 (struct cpufreq_policy *policy, const char *buf, size_t count) \
470 unsigned int ret = -EINVAL; \
471 struct cpufreq_policy new_policy; \
473 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
477 ret = sscanf(buf, "%u", &new_policy.object); \
481 ret = __cpufreq_set_policy(policy, &new_policy); \
482 policy->user_policy.object = policy->object; \
484 return ret ? ret : count; \
487 store_one(scaling_min_freq
, min
);
488 store_one(scaling_max_freq
, max
);
491 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
493 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
496 unsigned int cur_freq
= __cpufreq_get(policy
->cpu
);
498 return sprintf(buf
, "<unknown>");
499 return sprintf(buf
, "%u\n", cur_freq
);
504 * show_scaling_governor - show the current policy for the specified CPU
506 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
508 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
509 return sprintf(buf
, "powersave\n");
510 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
511 return sprintf(buf
, "performance\n");
512 else if (policy
->governor
)
513 return scnprintf(buf
, CPUFREQ_NAME_LEN
, "%s\n",
514 policy
->governor
->name
);
520 * store_scaling_governor - store policy for the specified CPU
522 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
523 const char *buf
, size_t count
)
525 unsigned int ret
= -EINVAL
;
526 char str_governor
[16];
527 struct cpufreq_policy new_policy
;
529 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
533 ret
= sscanf(buf
, "%15s", str_governor
);
537 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
538 &new_policy
.governor
))
541 /* Do not use cpufreq_set_policy here or the user_policy.max
542 will be wrongly overridden */
543 ret
= __cpufreq_set_policy(policy
, &new_policy
);
545 policy
->user_policy
.policy
= policy
->policy
;
546 policy
->user_policy
.governor
= policy
->governor
;
555 * show_scaling_driver - show the cpufreq driver currently loaded
557 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
559 return scnprintf(buf
, CPUFREQ_NAME_LEN
, "%s\n", cpufreq_driver
->name
);
563 * show_scaling_available_governors - show the available CPUfreq governors
565 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
569 struct cpufreq_governor
*t
;
571 if (!cpufreq_driver
->target
) {
572 i
+= sprintf(buf
, "performance powersave");
576 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
) {
577 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
578 - (CPUFREQ_NAME_LEN
+ 2)))
580 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_LEN
, "%s ", t
->name
);
583 i
+= sprintf(&buf
[i
], "\n");
587 static ssize_t
show_cpus(const struct cpumask
*mask
, char *buf
)
592 for_each_cpu(cpu
, mask
) {
594 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
595 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
596 if (i
>= (PAGE_SIZE
- 5))
599 i
+= sprintf(&buf
[i
], "\n");
604 * show_related_cpus - show the CPUs affected by each transition even if
605 * hw coordination is in use
607 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
609 if (cpumask_empty(policy
->related_cpus
))
610 return show_cpus(policy
->cpus
, buf
);
611 return show_cpus(policy
->related_cpus
, buf
);
615 * show_affected_cpus - show the CPUs affected by each transition
617 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
619 return show_cpus(policy
->cpus
, buf
);
622 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
623 const char *buf
, size_t count
)
625 unsigned int freq
= 0;
628 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
631 ret
= sscanf(buf
, "%u", &freq
);
635 policy
->governor
->store_setspeed(policy
, freq
);
640 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
642 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
643 return sprintf(buf
, "<unsupported>\n");
645 return policy
->governor
->show_setspeed(policy
, buf
);
648 #define define_one_ro(_name) \
649 static struct freq_attr _name = \
650 __ATTR(_name, 0444, show_##_name, NULL)
652 #define define_one_ro0400(_name) \
653 static struct freq_attr _name = \
654 __ATTR(_name, 0400, show_##_name, NULL)
656 #define define_one_rw(_name) \
657 static struct freq_attr _name = \
658 __ATTR(_name, 0644, show_##_name, store_##_name)
660 define_one_ro0400(cpuinfo_cur_freq
);
661 define_one_ro(cpuinfo_min_freq
);
662 define_one_ro(cpuinfo_max_freq
);
663 define_one_ro(cpuinfo_transition_latency
);
664 define_one_ro(scaling_available_governors
);
665 define_one_ro(scaling_driver
);
666 define_one_ro(scaling_cur_freq
);
667 define_one_ro(related_cpus
);
668 define_one_ro(affected_cpus
);
669 define_one_rw(scaling_min_freq
);
670 define_one_rw(scaling_max_freq
);
671 define_one_rw(scaling_governor
);
672 define_one_rw(scaling_setspeed
);
674 static struct attribute
*default_attrs
[] = {
675 &cpuinfo_min_freq
.attr
,
676 &cpuinfo_max_freq
.attr
,
677 &cpuinfo_transition_latency
.attr
,
678 &scaling_min_freq
.attr
,
679 &scaling_max_freq
.attr
,
682 &scaling_governor
.attr
,
683 &scaling_driver
.attr
,
684 &scaling_available_governors
.attr
,
685 &scaling_setspeed
.attr
,
689 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
690 #define to_attr(a) container_of(a, struct freq_attr, attr)
692 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
694 struct cpufreq_policy
*policy
= to_policy(kobj
);
695 struct freq_attr
*fattr
= to_attr(attr
);
696 ssize_t ret
= -EINVAL
;
697 policy
= cpufreq_cpu_get(policy
->cpu
);
701 if (lock_policy_rwsem_read(policy
->cpu
) < 0)
705 ret
= fattr
->show(policy
, buf
);
709 unlock_policy_rwsem_read(policy
->cpu
);
711 cpufreq_cpu_put(policy
);
716 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
717 const char *buf
, size_t count
)
719 struct cpufreq_policy
*policy
= to_policy(kobj
);
720 struct freq_attr
*fattr
= to_attr(attr
);
721 ssize_t ret
= -EINVAL
;
722 policy
= cpufreq_cpu_get(policy
->cpu
);
726 if (lock_policy_rwsem_write(policy
->cpu
) < 0)
730 ret
= fattr
->store(policy
, buf
, count
);
734 unlock_policy_rwsem_write(policy
->cpu
);
736 cpufreq_cpu_put(policy
);
741 static void cpufreq_sysfs_release(struct kobject
*kobj
)
743 struct cpufreq_policy
*policy
= to_policy(kobj
);
744 dprintk("last reference is dropped\n");
745 complete(&policy
->kobj_unregister
);
748 static struct sysfs_ops sysfs_ops
= {
753 static struct kobj_type ktype_cpufreq
= {
754 .sysfs_ops
= &sysfs_ops
,
755 .default_attrs
= default_attrs
,
756 .release
= cpufreq_sysfs_release
,
760 int cpufreq_add_dev_policy(unsigned int cpu
, struct cpufreq_policy
*policy
,
761 struct sys_device
*sys_dev
)
768 #ifdef CONFIG_HOTPLUG_CPU
769 if (per_cpu(cpufreq_cpu_governor
, cpu
)) {
770 policy
->governor
= per_cpu(cpufreq_cpu_governor
, cpu
);
771 dprintk("Restoring governor %s for cpu %d\n",
772 policy
->governor
->name
, cpu
);
776 for_each_cpu(j
, policy
->cpus
) {
777 struct cpufreq_policy
*managed_policy
;
782 /* Check for existing affected CPUs.
783 * They may not be aware of it due to CPU Hotplug.
784 * cpufreq_cpu_put is called when the device is removed
785 * in __cpufreq_remove_dev()
787 managed_policy
= cpufreq_cpu_get(j
);
788 if (unlikely(managed_policy
)) {
790 /* Set proper policy_cpu */
791 unlock_policy_rwsem_write(cpu
);
792 per_cpu(policy_cpu
, cpu
) = managed_policy
->cpu
;
794 if (lock_policy_rwsem_write(cpu
) < 0) {
795 /* Should not go through policy unlock path */
796 if (cpufreq_driver
->exit
)
797 cpufreq_driver
->exit(policy
);
798 cpufreq_cpu_put(managed_policy
);
802 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
803 cpumask_copy(managed_policy
->cpus
, policy
->cpus
);
804 per_cpu(cpufreq_cpu_data
, cpu
) = managed_policy
;
805 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
807 dprintk("CPU already managed, adding link\n");
808 ret
= sysfs_create_link(&sys_dev
->kobj
,
809 &managed_policy
->kobj
,
812 cpufreq_cpu_put(managed_policy
);
814 * Success. We only needed to be added to the mask.
815 * Call driver->exit() because only the cpu parent of
816 * the kobj needed to call init().
818 if (cpufreq_driver
->exit
)
819 cpufreq_driver
->exit(policy
);
828 /* symlink affected CPUs */
829 int cpufreq_add_dev_symlink(unsigned int cpu
, struct cpufreq_policy
*policy
)
834 for_each_cpu(j
, policy
->cpus
) {
835 struct cpufreq_policy
*managed_policy
;
836 struct sys_device
*cpu_sys_dev
;
843 dprintk("CPU %u already managed, adding link\n", j
);
844 managed_policy
= cpufreq_cpu_get(cpu
);
845 cpu_sys_dev
= get_cpu_sysdev(j
);
846 ret
= sysfs_create_link(&cpu_sys_dev
->kobj
, &policy
->kobj
,
849 cpufreq_cpu_put(managed_policy
);
856 int cpufreq_add_dev_interface(unsigned int cpu
, struct cpufreq_policy
*policy
,
857 struct sys_device
*sys_dev
)
859 struct cpufreq_policy new_policy
;
860 struct freq_attr
**drv_attr
;
865 /* prepare interface data */
866 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
867 &sys_dev
->kobj
, "cpufreq");
871 /* set up files for this cpu device */
872 drv_attr
= cpufreq_driver
->attr
;
873 while ((drv_attr
) && (*drv_attr
)) {
874 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
876 goto err_out_kobj_put
;
879 if (cpufreq_driver
->get
) {
880 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
882 goto err_out_kobj_put
;
884 if (cpufreq_driver
->target
) {
885 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
887 goto err_out_kobj_put
;
890 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
891 for_each_cpu(j
, policy
->cpus
) {
894 per_cpu(cpufreq_cpu_data
, j
) = policy
;
895 per_cpu(policy_cpu
, j
) = policy
->cpu
;
897 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
899 ret
= cpufreq_add_dev_symlink(cpu
, policy
);
901 goto err_out_kobj_put
;
903 memcpy(&new_policy
, policy
, sizeof(struct cpufreq_policy
));
904 /* assure that the starting sequence is run in __cpufreq_set_policy */
905 policy
->governor
= NULL
;
907 /* set default policy */
908 ret
= __cpufreq_set_policy(policy
, &new_policy
);
909 policy
->user_policy
.policy
= policy
->policy
;
910 policy
->user_policy
.governor
= policy
->governor
;
913 dprintk("setting policy failed\n");
914 if (cpufreq_driver
->exit
)
915 cpufreq_driver
->exit(policy
);
920 kobject_put(&policy
->kobj
);
921 wait_for_completion(&policy
->kobj_unregister
);
927 * cpufreq_add_dev - add a CPU device
929 * Adds the cpufreq interface for a CPU device.
931 * The Oracle says: try running cpufreq registration/unregistration concurrently
932 * with with cpu hotplugging and all hell will break loose. Tried to clean this
933 * mess up, but more thorough testing is needed. - Mathieu
935 static int cpufreq_add_dev(struct sys_device
*sys_dev
)
937 unsigned int cpu
= sys_dev
->id
;
939 struct cpufreq_policy
*policy
;
943 if (cpu_is_offline(cpu
))
946 cpufreq_debug_disable_ratelimit();
947 dprintk("adding CPU %u\n", cpu
);
950 /* check whether a different CPU already registered this
951 * CPU because it is in the same boat. */
952 policy
= cpufreq_cpu_get(cpu
);
953 if (unlikely(policy
)) {
954 cpufreq_cpu_put(policy
);
955 cpufreq_debug_enable_ratelimit();
960 if (!try_module_get(cpufreq_driver
->owner
)) {
966 policy
= kzalloc(sizeof(struct cpufreq_policy
), GFP_KERNEL
);
970 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
971 goto err_free_policy
;
973 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
974 goto err_free_cpumask
;
977 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
979 /* Initially set CPU itself as the policy_cpu */
980 per_cpu(policy_cpu
, cpu
) = cpu
;
981 ret
= (lock_policy_rwsem_write(cpu
) < 0);
984 init_completion(&policy
->kobj_unregister
);
985 INIT_WORK(&policy
->update
, handle_update
);
987 /* Set governor before ->init, so that driver could check it */
988 policy
->governor
= CPUFREQ_DEFAULT_GOVERNOR
;
989 /* call driver. From then on the cpufreq must be able
990 * to accept all calls to ->verify and ->setpolicy for this CPU
992 ret
= cpufreq_driver
->init(policy
);
994 dprintk("initialization failed\n");
995 goto err_unlock_policy
;
997 policy
->user_policy
.min
= policy
->min
;
998 policy
->user_policy
.max
= policy
->max
;
1000 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1001 CPUFREQ_START
, policy
);
1003 ret
= cpufreq_add_dev_policy(cpu
, policy
, sys_dev
);
1005 goto err_unlock_policy
;
1007 ret
= cpufreq_add_dev_interface(cpu
, policy
, sys_dev
);
1009 goto err_out_unregister
;
1011 unlock_policy_rwsem_write(cpu
);
1013 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1014 module_put(cpufreq_driver
->owner
);
1015 dprintk("initialization complete\n");
1016 cpufreq_debug_enable_ratelimit();
1022 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1023 for_each_cpu(j
, policy
->cpus
)
1024 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1025 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1027 kobject_put(&policy
->kobj
);
1028 wait_for_completion(&policy
->kobj_unregister
);
1031 unlock_policy_rwsem_write(cpu
);
1033 free_cpumask_var(policy
->cpus
);
1037 module_put(cpufreq_driver
->owner
);
1039 cpufreq_debug_enable_ratelimit();
1045 * __cpufreq_remove_dev - remove a CPU device
1047 * Removes the cpufreq interface for a CPU device.
1048 * Caller should already have policy_rwsem in write mode for this CPU.
1049 * This routine frees the rwsem before returning.
1051 static int __cpufreq_remove_dev(struct sys_device
*sys_dev
)
1053 unsigned int cpu
= sys_dev
->id
;
1054 unsigned long flags
;
1055 struct cpufreq_policy
*data
;
1057 struct sys_device
*cpu_sys_dev
;
1061 cpufreq_debug_disable_ratelimit();
1062 dprintk("unregistering CPU %u\n", cpu
);
1064 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1065 data
= per_cpu(cpufreq_cpu_data
, cpu
);
1068 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1069 cpufreq_debug_enable_ratelimit();
1070 unlock_policy_rwsem_write(cpu
);
1073 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1077 /* if this isn't the CPU which is the parent of the kobj, we
1078 * only need to unlink, put and exit
1080 if (unlikely(cpu
!= data
->cpu
)) {
1081 dprintk("removing link\n");
1082 cpumask_clear_cpu(cpu
, data
->cpus
);
1083 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1084 sysfs_remove_link(&sys_dev
->kobj
, "cpufreq");
1085 cpufreq_cpu_put(data
);
1086 cpufreq_debug_enable_ratelimit();
1087 unlock_policy_rwsem_write(cpu
);
1094 #ifdef CONFIG_HOTPLUG_CPU
1095 per_cpu(cpufreq_cpu_governor
, cpu
) = data
->governor
;
1098 /* if we have other CPUs still registered, we need to unlink them,
1099 * or else wait_for_completion below will lock up. Clean the
1100 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1101 * the sysfs links afterwards.
1103 if (unlikely(cpumask_weight(data
->cpus
) > 1)) {
1104 for_each_cpu(j
, data
->cpus
) {
1107 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1111 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1113 if (unlikely(cpumask_weight(data
->cpus
) > 1)) {
1114 for_each_cpu(j
, data
->cpus
) {
1117 dprintk("removing link for cpu %u\n", j
);
1118 #ifdef CONFIG_HOTPLUG_CPU
1119 per_cpu(cpufreq_cpu_governor
, j
) = data
->governor
;
1121 cpu_sys_dev
= get_cpu_sysdev(j
);
1122 sysfs_remove_link(&cpu_sys_dev
->kobj
, "cpufreq");
1123 cpufreq_cpu_put(data
);
1127 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1130 if (cpufreq_driver
->target
)
1131 __cpufreq_governor(data
, CPUFREQ_GOV_STOP
);
1133 kobject_put(&data
->kobj
);
1135 /* we need to make sure that the underlying kobj is actually
1136 * not referenced anymore by anybody before we proceed with
1139 dprintk("waiting for dropping of refcount\n");
1140 wait_for_completion(&data
->kobj_unregister
);
1141 dprintk("wait complete\n");
1143 if (cpufreq_driver
->exit
)
1144 cpufreq_driver
->exit(data
);
1146 unlock_policy_rwsem_write(cpu
);
1148 free_cpumask_var(data
->related_cpus
);
1149 free_cpumask_var(data
->cpus
);
1151 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1153 cpufreq_debug_enable_ratelimit();
1158 static int cpufreq_remove_dev(struct sys_device
*sys_dev
)
1160 unsigned int cpu
= sys_dev
->id
;
1163 if (cpu_is_offline(cpu
))
1166 if (unlikely(lock_policy_rwsem_write(cpu
)))
1169 retval
= __cpufreq_remove_dev(sys_dev
);
1174 static void handle_update(struct work_struct
*work
)
1176 struct cpufreq_policy
*policy
=
1177 container_of(work
, struct cpufreq_policy
, update
);
1178 unsigned int cpu
= policy
->cpu
;
1179 dprintk("handle_update for cpu %u called\n", cpu
);
1180 cpufreq_update_policy(cpu
);
1184 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1186 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1187 * @new_freq: CPU frequency the CPU actually runs at
1189 * We adjust to current frequency first, and need to clean up later.
1190 * So either call to cpufreq_update_policy() or schedule handle_update()).
1192 static void cpufreq_out_of_sync(unsigned int cpu
, unsigned int old_freq
,
1193 unsigned int new_freq
)
1195 struct cpufreq_freqs freqs
;
1197 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1198 "core thinks of %u, is %u kHz.\n", old_freq
, new_freq
);
1201 freqs
.old
= old_freq
;
1202 freqs
.new = new_freq
;
1203 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
1204 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
1209 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1212 * This is the last known freq, without actually getting it from the driver.
1213 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1215 unsigned int cpufreq_quick_get(unsigned int cpu
)
1217 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1218 unsigned int ret_freq
= 0;
1221 ret_freq
= policy
->cur
;
1222 cpufreq_cpu_put(policy
);
1227 EXPORT_SYMBOL(cpufreq_quick_get
);
1230 static unsigned int __cpufreq_get(unsigned int cpu
)
1232 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1233 unsigned int ret_freq
= 0;
1235 if (!cpufreq_driver
->get
)
1238 ret_freq
= cpufreq_driver
->get(cpu
);
1240 if (ret_freq
&& policy
->cur
&&
1241 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1242 /* verify no discrepancy between actual and
1243 saved value exists */
1244 if (unlikely(ret_freq
!= policy
->cur
)) {
1245 cpufreq_out_of_sync(cpu
, policy
->cur
, ret_freq
);
1246 schedule_work(&policy
->update
);
1254 * cpufreq_get - get the current CPU frequency (in kHz)
1257 * Get the CPU current (static) CPU frequency
1259 unsigned int cpufreq_get(unsigned int cpu
)
1261 unsigned int ret_freq
= 0;
1262 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1267 if (unlikely(lock_policy_rwsem_read(cpu
)))
1270 ret_freq
= __cpufreq_get(cpu
);
1272 unlock_policy_rwsem_read(cpu
);
1275 cpufreq_cpu_put(policy
);
1279 EXPORT_SYMBOL(cpufreq_get
);
1283 * cpufreq_suspend - let the low level driver prepare for suspend
1286 static int cpufreq_suspend(struct sys_device
*sysdev
, pm_message_t pmsg
)
1290 int cpu
= sysdev
->id
;
1291 struct cpufreq_policy
*cpu_policy
;
1293 dprintk("suspending cpu %u\n", cpu
);
1295 if (!cpu_online(cpu
))
1298 /* we may be lax here as interrupts are off. Nonetheless
1299 * we need to grab the correct cpu policy, as to check
1300 * whether we really run on this CPU.
1303 cpu_policy
= cpufreq_cpu_get(cpu
);
1307 /* only handle each CPU group once */
1308 if (unlikely(cpu_policy
->cpu
!= cpu
))
1311 if (cpufreq_driver
->suspend
) {
1312 ret
= cpufreq_driver
->suspend(cpu_policy
, pmsg
);
1314 printk(KERN_ERR
"cpufreq: suspend failed in ->suspend "
1315 "step on CPU %u\n", cpu_policy
->cpu
);
1319 cpufreq_cpu_put(cpu_policy
);
1324 * cpufreq_resume - restore proper CPU frequency handling after resume
1326 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1327 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1328 * restored. It will verify that the current freq is in sync with
1329 * what we believe it to be. This is a bit later than when it
1330 * should be, but nonethteless it's better than calling
1331 * cpufreq_driver->get() here which might re-enable interrupts...
1333 static int cpufreq_resume(struct sys_device
*sysdev
)
1337 int cpu
= sysdev
->id
;
1338 struct cpufreq_policy
*cpu_policy
;
1340 dprintk("resuming cpu %u\n", cpu
);
1342 if (!cpu_online(cpu
))
1345 /* we may be lax here as interrupts are off. Nonetheless
1346 * we need to grab the correct cpu policy, as to check
1347 * whether we really run on this CPU.
1350 cpu_policy
= cpufreq_cpu_get(cpu
);
1354 /* only handle each CPU group once */
1355 if (unlikely(cpu_policy
->cpu
!= cpu
))
1358 if (cpufreq_driver
->resume
) {
1359 ret
= cpufreq_driver
->resume(cpu_policy
);
1361 printk(KERN_ERR
"cpufreq: resume failed in ->resume "
1362 "step on CPU %u\n", cpu_policy
->cpu
);
1367 schedule_work(&cpu_policy
->update
);
1370 cpufreq_cpu_put(cpu_policy
);
1374 static struct sysdev_driver cpufreq_sysdev_driver
= {
1375 .add
= cpufreq_add_dev
,
1376 .remove
= cpufreq_remove_dev
,
1377 .suspend
= cpufreq_suspend
,
1378 .resume
= cpufreq_resume
,
1382 /*********************************************************************
1383 * NOTIFIER LISTS INTERFACE *
1384 *********************************************************************/
1387 * cpufreq_register_notifier - register a driver with cpufreq
1388 * @nb: notifier function to register
1389 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1391 * Add a driver to one of two lists: either a list of drivers that
1392 * are notified about clock rate changes (once before and once after
1393 * the transition), or a list of drivers that are notified about
1394 * changes in cpufreq policy.
1396 * This function may sleep, and has the same return conditions as
1397 * blocking_notifier_chain_register.
1399 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1403 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1406 case CPUFREQ_TRANSITION_NOTIFIER
:
1407 ret
= srcu_notifier_chain_register(
1408 &cpufreq_transition_notifier_list
, nb
);
1410 case CPUFREQ_POLICY_NOTIFIER
:
1411 ret
= blocking_notifier_chain_register(
1412 &cpufreq_policy_notifier_list
, nb
);
1420 EXPORT_SYMBOL(cpufreq_register_notifier
);
1424 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1425 * @nb: notifier block to be unregistered
1426 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1428 * Remove a driver from the CPU frequency notifier list.
1430 * This function may sleep, and has the same return conditions as
1431 * blocking_notifier_chain_unregister.
1433 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1438 case CPUFREQ_TRANSITION_NOTIFIER
:
1439 ret
= srcu_notifier_chain_unregister(
1440 &cpufreq_transition_notifier_list
, nb
);
1442 case CPUFREQ_POLICY_NOTIFIER
:
1443 ret
= blocking_notifier_chain_unregister(
1444 &cpufreq_policy_notifier_list
, nb
);
1452 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1455 /*********************************************************************
1457 *********************************************************************/
1460 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1461 unsigned int target_freq
,
1462 unsigned int relation
)
1464 int retval
= -EINVAL
;
1466 dprintk("target for CPU %u: %u kHz, relation %u\n", policy
->cpu
,
1467 target_freq
, relation
);
1468 if (cpu_online(policy
->cpu
) && cpufreq_driver
->target
)
1469 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1473 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1475 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1476 unsigned int target_freq
,
1477 unsigned int relation
)
1481 policy
= cpufreq_cpu_get(policy
->cpu
);
1485 if (unlikely(lock_policy_rwsem_write(policy
->cpu
)))
1488 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1490 unlock_policy_rwsem_write(policy
->cpu
);
1493 cpufreq_cpu_put(policy
);
1497 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1499 int __cpufreq_driver_getavg(struct cpufreq_policy
*policy
, unsigned int cpu
)
1503 policy
= cpufreq_cpu_get(policy
->cpu
);
1507 if (cpu_online(cpu
) && cpufreq_driver
->getavg
)
1508 ret
= cpufreq_driver
->getavg(policy
, cpu
);
1510 cpufreq_cpu_put(policy
);
1513 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg
);
1516 * when "event" is CPUFREQ_GOV_LIMITS
1519 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1524 /* Only must be defined when default governor is known to have latency
1525 restrictions, like e.g. conservative or ondemand.
1526 That this is the case is already ensured in Kconfig
1528 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1529 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1531 struct cpufreq_governor
*gov
= NULL
;
1534 if (policy
->governor
->max_transition_latency
&&
1535 policy
->cpuinfo
.transition_latency
>
1536 policy
->governor
->max_transition_latency
) {
1540 printk(KERN_WARNING
"%s governor failed, too long"
1541 " transition latency of HW, fallback"
1542 " to %s governor\n",
1543 policy
->governor
->name
,
1545 policy
->governor
= gov
;
1549 if (!try_module_get(policy
->governor
->owner
))
1552 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1553 policy
->cpu
, event
);
1554 ret
= policy
->governor
->governor(policy
, event
);
1556 /* we keep one module reference alive for
1557 each CPU governed by this CPU */
1558 if ((event
!= CPUFREQ_GOV_START
) || ret
)
1559 module_put(policy
->governor
->owner
);
1560 if ((event
== CPUFREQ_GOV_STOP
) && !ret
)
1561 module_put(policy
->governor
->owner
);
1567 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1574 mutex_lock(&cpufreq_governor_mutex
);
1577 if (__find_governor(governor
->name
) == NULL
) {
1579 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1582 mutex_unlock(&cpufreq_governor_mutex
);
1585 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1588 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1593 mutex_lock(&cpufreq_governor_mutex
);
1594 list_del(&governor
->governor_list
);
1595 mutex_unlock(&cpufreq_governor_mutex
);
1598 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
1602 /*********************************************************************
1603 * POLICY INTERFACE *
1604 *********************************************************************/
1607 * cpufreq_get_policy - get the current cpufreq_policy
1608 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1611 * Reads the current cpufreq policy.
1613 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
1615 struct cpufreq_policy
*cpu_policy
;
1619 cpu_policy
= cpufreq_cpu_get(cpu
);
1623 memcpy(policy
, cpu_policy
, sizeof(struct cpufreq_policy
));
1625 cpufreq_cpu_put(cpu_policy
);
1628 EXPORT_SYMBOL(cpufreq_get_policy
);
1632 * data : current policy.
1633 * policy : policy to be set.
1635 static int __cpufreq_set_policy(struct cpufreq_policy
*data
,
1636 struct cpufreq_policy
*policy
)
1640 cpufreq_debug_disable_ratelimit();
1641 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy
->cpu
,
1642 policy
->min
, policy
->max
);
1644 memcpy(&policy
->cpuinfo
, &data
->cpuinfo
,
1645 sizeof(struct cpufreq_cpuinfo
));
1647 if (policy
->min
> data
->max
|| policy
->max
< data
->min
) {
1652 /* verify the cpu speed can be set within this limit */
1653 ret
= cpufreq_driver
->verify(policy
);
1657 /* adjust if necessary - all reasons */
1658 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1659 CPUFREQ_ADJUST
, policy
);
1661 /* adjust if necessary - hardware incompatibility*/
1662 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1663 CPUFREQ_INCOMPATIBLE
, policy
);
1665 /* verify the cpu speed can be set within this limit,
1666 which might be different to the first one */
1667 ret
= cpufreq_driver
->verify(policy
);
1671 /* notification of the new policy */
1672 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1673 CPUFREQ_NOTIFY
, policy
);
1675 data
->min
= policy
->min
;
1676 data
->max
= policy
->max
;
1678 dprintk("new min and max freqs are %u - %u kHz\n",
1679 data
->min
, data
->max
);
1681 if (cpufreq_driver
->setpolicy
) {
1682 data
->policy
= policy
->policy
;
1683 dprintk("setting range\n");
1684 ret
= cpufreq_driver
->setpolicy(policy
);
1686 if (policy
->governor
!= data
->governor
) {
1687 /* save old, working values */
1688 struct cpufreq_governor
*old_gov
= data
->governor
;
1690 dprintk("governor switch\n");
1692 /* end old governor */
1694 __cpufreq_governor(data
, CPUFREQ_GOV_STOP
);
1696 /* start new governor */
1697 data
->governor
= policy
->governor
;
1698 if (__cpufreq_governor(data
, CPUFREQ_GOV_START
)) {
1699 /* new governor failed, so re-start old one */
1700 dprintk("starting governor %s failed\n",
1701 data
->governor
->name
);
1703 data
->governor
= old_gov
;
1704 __cpufreq_governor(data
,
1710 /* might be a policy change, too, so fall through */
1712 dprintk("governor: change or update limits\n");
1713 __cpufreq_governor(data
, CPUFREQ_GOV_LIMITS
);
1717 cpufreq_debug_enable_ratelimit();
1722 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1723 * @cpu: CPU which shall be re-evaluated
1725 * Usefull for policy notifiers which have different necessities
1726 * at different times.
1728 int cpufreq_update_policy(unsigned int cpu
)
1730 struct cpufreq_policy
*data
= cpufreq_cpu_get(cpu
);
1731 struct cpufreq_policy policy
;
1739 if (unlikely(lock_policy_rwsem_write(cpu
))) {
1744 dprintk("updating policy for CPU %u\n", cpu
);
1745 memcpy(&policy
, data
, sizeof(struct cpufreq_policy
));
1746 policy
.min
= data
->user_policy
.min
;
1747 policy
.max
= data
->user_policy
.max
;
1748 policy
.policy
= data
->user_policy
.policy
;
1749 policy
.governor
= data
->user_policy
.governor
;
1751 /* BIOS might change freq behind our back
1752 -> ask driver for current freq and notify governors about a change */
1753 if (cpufreq_driver
->get
) {
1754 policy
.cur
= cpufreq_driver
->get(cpu
);
1756 dprintk("Driver did not initialize current freq");
1757 data
->cur
= policy
.cur
;
1759 if (data
->cur
!= policy
.cur
)
1760 cpufreq_out_of_sync(cpu
, data
->cur
,
1765 ret
= __cpufreq_set_policy(data
, &policy
);
1767 unlock_policy_rwsem_write(cpu
);
1770 cpufreq_cpu_put(data
);
1774 EXPORT_SYMBOL(cpufreq_update_policy
);
1776 static int __cpuinit
cpufreq_cpu_callback(struct notifier_block
*nfb
,
1777 unsigned long action
, void *hcpu
)
1779 unsigned int cpu
= (unsigned long)hcpu
;
1780 struct sys_device
*sys_dev
;
1782 sys_dev
= get_cpu_sysdev(cpu
);
1786 case CPU_ONLINE_FROZEN
:
1787 cpufreq_add_dev(sys_dev
);
1789 case CPU_DOWN_PREPARE
:
1790 case CPU_DOWN_PREPARE_FROZEN
:
1791 if (unlikely(lock_policy_rwsem_write(cpu
)))
1794 __cpufreq_remove_dev(sys_dev
);
1796 case CPU_DOWN_FAILED
:
1797 case CPU_DOWN_FAILED_FROZEN
:
1798 cpufreq_add_dev(sys_dev
);
1805 static struct notifier_block __refdata cpufreq_cpu_notifier
=
1807 .notifier_call
= cpufreq_cpu_callback
,
1810 /*********************************************************************
1811 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1812 *********************************************************************/
1815 * cpufreq_register_driver - register a CPU Frequency driver
1816 * @driver_data: A struct cpufreq_driver containing the values#
1817 * submitted by the CPU Frequency driver.
1819 * Registers a CPU Frequency driver to this core code. This code
1820 * returns zero on success, -EBUSY when another driver got here first
1821 * (and isn't unregistered in the meantime).
1824 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
1826 unsigned long flags
;
1829 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
1830 ((!driver_data
->setpolicy
) && (!driver_data
->target
)))
1833 dprintk("trying to register driver %s\n", driver_data
->name
);
1835 if (driver_data
->setpolicy
)
1836 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
1838 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1839 if (cpufreq_driver
) {
1840 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1843 cpufreq_driver
= driver_data
;
1844 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1846 ret
= sysdev_driver_register(&cpu_sysdev_class
,
1847 &cpufreq_sysdev_driver
);
1849 if ((!ret
) && !(cpufreq_driver
->flags
& CPUFREQ_STICKY
)) {
1853 /* check for at least one working CPU */
1854 for (i
= 0; i
< nr_cpu_ids
; i
++)
1855 if (cpu_possible(i
) && per_cpu(cpufreq_cpu_data
, i
)) {
1860 /* if all ->init() calls failed, unregister */
1862 dprintk("no CPU initialized for driver %s\n",
1864 sysdev_driver_unregister(&cpu_sysdev_class
,
1865 &cpufreq_sysdev_driver
);
1867 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1868 cpufreq_driver
= NULL
;
1869 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1874 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
1875 dprintk("driver %s up and running\n", driver_data
->name
);
1876 cpufreq_debug_enable_ratelimit();
1881 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
1885 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1887 * Unregister the current CPUFreq driver. Only call this if you have
1888 * the right to do so, i.e. if you have succeeded in initialising before!
1889 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1890 * currently not initialised.
1892 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
1894 unsigned long flags
;
1896 cpufreq_debug_disable_ratelimit();
1898 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
)) {
1899 cpufreq_debug_enable_ratelimit();
1903 dprintk("unregistering driver %s\n", driver
->name
);
1905 sysdev_driver_unregister(&cpu_sysdev_class
, &cpufreq_sysdev_driver
);
1906 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
1908 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1909 cpufreq_driver
= NULL
;
1910 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1914 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
1916 static int __init
cpufreq_core_init(void)
1920 for_each_possible_cpu(cpu
) {
1921 per_cpu(policy_cpu
, cpu
) = -1;
1922 init_rwsem(&per_cpu(cpu_policy_rwsem
, cpu
));
1927 core_initcall(cpufreq_core_init
);