2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 static bool suitable_policy(struct cpufreq_policy
*policy
, bool active
)
43 return active
== !policy_is_inactive(policy
);
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy
*next_policy(struct cpufreq_policy
*policy
,
51 policy
= list_next_entry(policy
, policy_list
);
53 /* No more policies in the list */
54 if (&policy
->policy_list
== &cpufreq_policy_list
)
56 } while (!suitable_policy(policy
, active
));
61 static struct cpufreq_policy
*first_policy(bool active
)
63 struct cpufreq_policy
*policy
;
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list
))
69 policy
= list_first_entry(&cpufreq_policy_list
, typeof(*policy
),
72 if (!suitable_policy(policy
, active
))
73 policy
= next_policy(policy
, active
);
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
82 __policy = next_policy(__policy, __active))
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list
);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
102 static struct cpufreq_driver
*cpufreq_driver
;
103 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
104 static DEFINE_RWLOCK(cpufreq_driver_lock
);
105 DEFINE_MUTEX(cpufreq_governor_lock
);
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended
;
110 static inline bool has_target(void)
112 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
119 static DECLARE_RWSEM(cpufreq_rwsem
);
121 /* internal prototypes */
122 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
124 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
125 static void handle_update(struct work_struct
*work
);
128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
134 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
135 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
137 static bool init_cpufreq_transition_notifier_list_called
;
138 static int __init
init_cpufreq_transition_notifier_list(void)
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
141 init_cpufreq_transition_notifier_list_called
= true;
144 pure_initcall(init_cpufreq_transition_notifier_list
);
146 static int off __read_mostly
;
147 static int cpufreq_disabled(void)
151 void disable_cpufreq(void)
155 static DEFINE_MUTEX(cpufreq_governor_mutex
);
157 bool have_governor_per_policy(void)
159 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
161 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
163 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
165 if (have_governor_per_policy())
166 return &policy
->kobj
;
168 return cpufreq_global_kobject
;
170 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
172 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
178 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
180 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
181 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
182 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
183 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
184 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
185 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
187 idle_time
= cur_wall_time
- busy_time
;
189 *wall
= cputime_to_usecs(cur_wall_time
);
191 return cputime_to_usecs(idle_time
);
194 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
196 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
198 if (idle_time
== -1ULL)
199 return get_cpu_idle_time_jiffy(cpu
, wall
);
201 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
205 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
208 * This is a generic cpufreq init() routine which can be used by cpufreq
209 * drivers of SMP systems. It will do following:
210 * - validate & show freq table passed
211 * - set policies transition latency
212 * - policy->cpus with all possible CPUs
214 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
215 struct cpufreq_frequency_table
*table
,
216 unsigned int transition_latency
)
220 ret
= cpufreq_table_validate_and_show(policy
, table
);
222 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
226 policy
->cpuinfo
.transition_latency
= transition_latency
;
229 * The driver only supports the SMP configuration where all processors
230 * share the clock and voltage and clock.
232 cpumask_setall(policy
->cpus
);
236 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
238 /* Only for cpufreq core internal use */
239 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
241 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
243 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
246 unsigned int cpufreq_generic_get(unsigned int cpu
)
248 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
250 if (!policy
|| IS_ERR(policy
->clk
)) {
251 pr_err("%s: No %s associated to cpu: %d\n",
252 __func__
, policy
? "clk" : "policy", cpu
);
256 return clk_get_rate(policy
->clk
) / 1000;
258 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
261 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
263 * @cpu: cpu to find policy for.
265 * This returns policy for 'cpu', returns NULL if it doesn't exist.
266 * It also increments the kobject reference count to mark it busy and so would
267 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
268 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
269 * freed as that depends on the kobj count.
271 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
272 * valid policy is found. This is done to make sure the driver doesn't get
273 * unregistered while the policy is being used.
275 * Return: A valid policy on success, otherwise NULL on failure.
277 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
279 struct cpufreq_policy
*policy
= NULL
;
282 if (WARN_ON(cpu
>= nr_cpu_ids
))
285 if (!down_read_trylock(&cpufreq_rwsem
))
288 /* get the cpufreq driver */
289 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
291 if (cpufreq_driver
) {
293 policy
= cpufreq_cpu_get_raw(cpu
);
295 kobject_get(&policy
->kobj
);
298 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
301 up_read(&cpufreq_rwsem
);
305 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
308 * cpufreq_cpu_put: Decrements the usage count of a policy
310 * @policy: policy earlier returned by cpufreq_cpu_get().
312 * This decrements the kobject reference count incremented earlier by calling
315 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
317 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
319 kobject_put(&policy
->kobj
);
320 up_read(&cpufreq_rwsem
);
322 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
324 /*********************************************************************
325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
333 * systems as each CPU might be scaled differently. So, use the arch
334 * per-CPU loops_per_jiffy value wherever possible.
336 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
339 static unsigned long l_p_j_ref
;
340 static unsigned int l_p_j_ref_freq
;
342 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
345 if (!l_p_j_ref_freq
) {
346 l_p_j_ref
= loops_per_jiffy
;
347 l_p_j_ref_freq
= ci
->old
;
348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref
, l_p_j_ref_freq
);
351 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
352 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy
, ci
->new);
360 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
361 struct cpufreq_freqs
*freqs
, unsigned int state
)
363 BUG_ON(irqs_disabled());
365 if (cpufreq_disabled())
368 freqs
->flags
= cpufreq_driver
->flags
;
369 pr_debug("notification %u of frequency transition to %u kHz\n",
374 case CPUFREQ_PRECHANGE
:
375 /* detect if the driver reported a value as "old frequency"
376 * which is not equal to what the cpufreq core thinks is
379 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
380 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
381 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
382 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383 freqs
->old
, policy
->cur
);
384 freqs
->old
= policy
->cur
;
387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
388 CPUFREQ_PRECHANGE
, freqs
);
389 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
392 case CPUFREQ_POSTCHANGE
:
393 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
394 pr_debug("FREQ: %lu - CPU: %lu\n",
395 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
396 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
397 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
398 CPUFREQ_POSTCHANGE
, freqs
);
399 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
400 policy
->cur
= freqs
->new;
406 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407 * on frequency transition.
409 * This function calls the transition notifiers and the "adjust_jiffies"
410 * function. It is called twice on all CPU frequency changes that have
413 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
414 struct cpufreq_freqs
*freqs
, unsigned int state
)
416 for_each_cpu(freqs
->cpu
, policy
->cpus
)
417 __cpufreq_notify_transition(policy
, freqs
, state
);
420 /* Do post notifications when there are chances that transition has failed */
421 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
422 struct cpufreq_freqs
*freqs
, int transition_failed
)
424 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
425 if (!transition_failed
)
428 swap(freqs
->old
, freqs
->new);
429 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
430 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
433 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
434 struct cpufreq_freqs
*freqs
)
438 * Catch double invocations of _begin() which lead to self-deadlock.
439 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440 * doesn't invoke _begin() on their behalf, and hence the chances of
441 * double invocations are very low. Moreover, there are scenarios
442 * where these checks can emit false-positive warnings in these
443 * drivers; so we avoid that by skipping them altogether.
445 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
446 && current
== policy
->transition_task
);
449 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
451 spin_lock(&policy
->transition_lock
);
453 if (unlikely(policy
->transition_ongoing
)) {
454 spin_unlock(&policy
->transition_lock
);
458 policy
->transition_ongoing
= true;
459 policy
->transition_task
= current
;
461 spin_unlock(&policy
->transition_lock
);
463 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
465 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
467 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
468 struct cpufreq_freqs
*freqs
, int transition_failed
)
470 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
473 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
475 policy
->transition_ongoing
= false;
476 policy
->transition_task
= NULL
;
478 wake_up(&policy
->transition_wait
);
480 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
483 /*********************************************************************
485 *********************************************************************/
486 static ssize_t
show_boost(struct kobject
*kobj
,
487 struct attribute
*attr
, char *buf
)
489 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
492 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
493 const char *buf
, size_t count
)
497 ret
= sscanf(buf
, "%d", &enable
);
498 if (ret
!= 1 || enable
< 0 || enable
> 1)
501 if (cpufreq_boost_trigger_state(enable
)) {
502 pr_err("%s: Cannot %s BOOST!\n",
503 __func__
, enable
? "enable" : "disable");
507 pr_debug("%s: cpufreq BOOST %s\n",
508 __func__
, enable
? "enabled" : "disabled");
512 define_one_global_rw(boost
);
514 static struct cpufreq_governor
*find_governor(const char *str_governor
)
516 struct cpufreq_governor
*t
;
519 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
526 * cpufreq_parse_governor - parse a governor string
528 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
529 struct cpufreq_governor
**governor
)
536 if (cpufreq_driver
->setpolicy
) {
537 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
538 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
540 } else if (!strncasecmp(str_governor
, "powersave",
542 *policy
= CPUFREQ_POLICY_POWERSAVE
;
546 struct cpufreq_governor
*t
;
548 mutex_lock(&cpufreq_governor_mutex
);
550 t
= find_governor(str_governor
);
555 mutex_unlock(&cpufreq_governor_mutex
);
556 ret
= request_module("cpufreq_%s", str_governor
);
557 mutex_lock(&cpufreq_governor_mutex
);
560 t
= find_governor(str_governor
);
568 mutex_unlock(&cpufreq_governor_mutex
);
575 * cpufreq_per_cpu_attr_read() / show_##file_name() -
576 * print out cpufreq information
578 * Write out information from cpufreq_driver->policy[cpu]; object must be
582 #define show_one(file_name, object) \
583 static ssize_t show_##file_name \
584 (struct cpufreq_policy *policy, char *buf) \
586 return sprintf(buf, "%u\n", policy->object); \
589 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
590 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
591 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
592 show_one(scaling_min_freq
, min
);
593 show_one(scaling_max_freq
, max
);
595 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
599 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
600 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
602 ret
= sprintf(buf
, "%u\n", policy
->cur
);
606 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
607 struct cpufreq_policy
*new_policy
);
610 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
612 #define store_one(file_name, object) \
613 static ssize_t store_##file_name \
614 (struct cpufreq_policy *policy, const char *buf, size_t count) \
617 struct cpufreq_policy new_policy; \
619 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
623 ret = sscanf(buf, "%u", &new_policy.object); \
627 temp = new_policy.object; \
628 ret = cpufreq_set_policy(policy, &new_policy); \
630 policy->user_policy.object = temp; \
632 return ret ? ret : count; \
635 store_one(scaling_min_freq
, min
);
636 store_one(scaling_max_freq
, max
);
639 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
641 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
644 unsigned int cur_freq
= __cpufreq_get(policy
);
646 return sprintf(buf
, "<unknown>");
647 return sprintf(buf
, "%u\n", cur_freq
);
651 * show_scaling_governor - show the current policy for the specified CPU
653 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
655 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
656 return sprintf(buf
, "powersave\n");
657 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
658 return sprintf(buf
, "performance\n");
659 else if (policy
->governor
)
660 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
661 policy
->governor
->name
);
666 * store_scaling_governor - store policy for the specified CPU
668 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
669 const char *buf
, size_t count
)
672 char str_governor
[16];
673 struct cpufreq_policy new_policy
;
675 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
679 ret
= sscanf(buf
, "%15s", str_governor
);
683 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
684 &new_policy
.governor
))
687 ret
= cpufreq_set_policy(policy
, &new_policy
);
689 policy
->user_policy
.policy
= policy
->policy
;
690 policy
->user_policy
.governor
= policy
->governor
;
699 * show_scaling_driver - show the cpufreq driver currently loaded
701 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
703 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
707 * show_scaling_available_governors - show the available CPUfreq governors
709 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
713 struct cpufreq_governor
*t
;
716 i
+= sprintf(buf
, "performance powersave");
720 for_each_governor(t
) {
721 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
722 - (CPUFREQ_NAME_LEN
+ 2)))
724 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
727 i
+= sprintf(&buf
[i
], "\n");
731 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
736 for_each_cpu(cpu
, mask
) {
738 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
739 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
740 if (i
>= (PAGE_SIZE
- 5))
743 i
+= sprintf(&buf
[i
], "\n");
746 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
749 * show_related_cpus - show the CPUs affected by each transition even if
750 * hw coordination is in use
752 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
754 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
758 * show_affected_cpus - show the CPUs affected by each transition
760 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
762 return cpufreq_show_cpus(policy
->cpus
, buf
);
765 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
766 const char *buf
, size_t count
)
768 unsigned int freq
= 0;
771 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
774 ret
= sscanf(buf
, "%u", &freq
);
778 policy
->governor
->store_setspeed(policy
, freq
);
783 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
785 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
786 return sprintf(buf
, "<unsupported>\n");
788 return policy
->governor
->show_setspeed(policy
, buf
);
792 * show_bios_limit - show the current cpufreq HW/BIOS limitation
794 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
798 if (cpufreq_driver
->bios_limit
) {
799 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
801 return sprintf(buf
, "%u\n", limit
);
803 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
806 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
807 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
808 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
809 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
810 cpufreq_freq_attr_ro(scaling_available_governors
);
811 cpufreq_freq_attr_ro(scaling_driver
);
812 cpufreq_freq_attr_ro(scaling_cur_freq
);
813 cpufreq_freq_attr_ro(bios_limit
);
814 cpufreq_freq_attr_ro(related_cpus
);
815 cpufreq_freq_attr_ro(affected_cpus
);
816 cpufreq_freq_attr_rw(scaling_min_freq
);
817 cpufreq_freq_attr_rw(scaling_max_freq
);
818 cpufreq_freq_attr_rw(scaling_governor
);
819 cpufreq_freq_attr_rw(scaling_setspeed
);
821 static struct attribute
*default_attrs
[] = {
822 &cpuinfo_min_freq
.attr
,
823 &cpuinfo_max_freq
.attr
,
824 &cpuinfo_transition_latency
.attr
,
825 &scaling_min_freq
.attr
,
826 &scaling_max_freq
.attr
,
829 &scaling_governor
.attr
,
830 &scaling_driver
.attr
,
831 &scaling_available_governors
.attr
,
832 &scaling_setspeed
.attr
,
836 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
837 #define to_attr(a) container_of(a, struct freq_attr, attr)
839 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
841 struct cpufreq_policy
*policy
= to_policy(kobj
);
842 struct freq_attr
*fattr
= to_attr(attr
);
845 if (!down_read_trylock(&cpufreq_rwsem
))
848 down_read(&policy
->rwsem
);
851 ret
= fattr
->show(policy
, buf
);
855 up_read(&policy
->rwsem
);
856 up_read(&cpufreq_rwsem
);
861 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
862 const char *buf
, size_t count
)
864 struct cpufreq_policy
*policy
= to_policy(kobj
);
865 struct freq_attr
*fattr
= to_attr(attr
);
866 ssize_t ret
= -EINVAL
;
870 if (!cpu_online(policy
->cpu
))
873 if (!down_read_trylock(&cpufreq_rwsem
))
876 down_write(&policy
->rwsem
);
878 /* Updating inactive policies is invalid, so avoid doing that. */
879 if (unlikely(policy_is_inactive(policy
))) {
881 goto unlock_policy_rwsem
;
885 ret
= fattr
->store(policy
, buf
, count
);
890 up_write(&policy
->rwsem
);
892 up_read(&cpufreq_rwsem
);
899 static void cpufreq_sysfs_release(struct kobject
*kobj
)
901 struct cpufreq_policy
*policy
= to_policy(kobj
);
902 pr_debug("last reference is dropped\n");
903 complete(&policy
->kobj_unregister
);
906 static const struct sysfs_ops sysfs_ops
= {
911 static struct kobj_type ktype_cpufreq
= {
912 .sysfs_ops
= &sysfs_ops
,
913 .default_attrs
= default_attrs
,
914 .release
= cpufreq_sysfs_release
,
917 struct kobject
*cpufreq_global_kobject
;
918 EXPORT_SYMBOL(cpufreq_global_kobject
);
920 static int cpufreq_global_kobject_usage
;
922 int cpufreq_get_global_kobject(void)
924 if (!cpufreq_global_kobject_usage
++)
925 return kobject_add(cpufreq_global_kobject
,
926 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
930 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
932 void cpufreq_put_global_kobject(void)
934 if (!--cpufreq_global_kobject_usage
)
935 kobject_del(cpufreq_global_kobject
);
937 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
939 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
941 int ret
= cpufreq_get_global_kobject();
944 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
946 cpufreq_put_global_kobject();
951 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
953 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
955 sysfs_remove_file(cpufreq_global_kobject
, attr
);
956 cpufreq_put_global_kobject();
958 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
960 static int add_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
962 struct device
*cpu_dev
;
964 pr_debug("%s: Adding symlink for CPU: %u\n", __func__
, cpu
);
969 cpu_dev
= get_cpu_device(cpu
);
970 if (WARN_ON(!cpu_dev
))
973 return sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
, "cpufreq");
976 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
978 struct device
*cpu_dev
;
980 pr_debug("%s: Removing symlink for CPU: %u\n", __func__
, cpu
);
982 cpu_dev
= get_cpu_device(cpu
);
983 if (WARN_ON(!cpu_dev
))
986 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
989 /* Add/remove symlinks for all related CPUs */
990 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
995 /* Some related CPUs might not be present (physically hotplugged) */
996 for_each_cpu_and(j
, policy
->related_cpus
, cpu_present_mask
) {
997 if (j
== policy
->kobj_cpu
)
1000 ret
= add_cpu_dev_symlink(policy
, j
);
1008 static void cpufreq_remove_dev_symlink(struct cpufreq_policy
*policy
)
1012 /* Some related CPUs might not be present (physically hotplugged) */
1013 for_each_cpu_and(j
, policy
->related_cpus
, cpu_present_mask
) {
1014 if (j
== policy
->kobj_cpu
)
1017 remove_cpu_dev_symlink(policy
, j
);
1021 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
,
1024 struct freq_attr
**drv_attr
;
1027 /* set up files for this cpu device */
1028 drv_attr
= cpufreq_driver
->attr
;
1029 while (drv_attr
&& *drv_attr
) {
1030 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
1035 if (cpufreq_driver
->get
) {
1036 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
1041 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
1045 if (cpufreq_driver
->bios_limit
) {
1046 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
1051 return cpufreq_add_dev_symlink(policy
);
1054 static void cpufreq_init_policy(struct cpufreq_policy
*policy
)
1056 struct cpufreq_governor
*gov
= NULL
;
1057 struct cpufreq_policy new_policy
;
1060 memcpy(&new_policy
, policy
, sizeof(*policy
));
1062 /* Update governor of new_policy to the governor used before hotplug */
1063 gov
= find_governor(policy
->last_governor
);
1065 pr_debug("Restoring governor %s for cpu %d\n",
1066 policy
->governor
->name
, policy
->cpu
);
1068 gov
= CPUFREQ_DEFAULT_GOVERNOR
;
1070 new_policy
.governor
= gov
;
1072 /* Use the default policy if its valid. */
1073 if (cpufreq_driver
->setpolicy
)
1074 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
, NULL
);
1076 /* set default policy */
1077 ret
= cpufreq_set_policy(policy
, &new_policy
);
1079 pr_debug("setting policy failed\n");
1080 if (cpufreq_driver
->exit
)
1081 cpufreq_driver
->exit(policy
);
1085 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
,
1086 unsigned int cpu
, struct device
*dev
)
1090 /* Has this CPU been taken care of already? */
1091 if (cpumask_test_cpu(cpu
, policy
->cpus
))
1095 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1097 pr_err("%s: Failed to stop governor\n", __func__
);
1102 down_write(&policy
->rwsem
);
1103 cpumask_set_cpu(cpu
, policy
->cpus
);
1104 up_write(&policy
->rwsem
);
1107 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1109 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1112 pr_err("%s: Failed to start governor\n", __func__
);
1120 static struct cpufreq_policy
*cpufreq_policy_restore(unsigned int cpu
)
1122 struct cpufreq_policy
*policy
;
1123 unsigned long flags
;
1125 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1126 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1127 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1129 if (likely(policy
)) {
1130 /* Policy should be inactive here */
1131 WARN_ON(!policy_is_inactive(policy
));
1133 down_write(&policy
->rwsem
);
1135 up_write(&policy
->rwsem
);
1141 static struct cpufreq_policy
*cpufreq_policy_alloc(struct device
*dev
)
1143 struct cpufreq_policy
*policy
;
1146 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1150 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1151 goto err_free_policy
;
1153 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1154 goto err_free_cpumask
;
1156 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
, &dev
->kobj
,
1159 pr_err("%s: failed to init policy->kobj: %d\n", __func__
, ret
);
1160 goto err_free_rcpumask
;
1163 INIT_LIST_HEAD(&policy
->policy_list
);
1164 init_rwsem(&policy
->rwsem
);
1165 spin_lock_init(&policy
->transition_lock
);
1166 init_waitqueue_head(&policy
->transition_wait
);
1167 init_completion(&policy
->kobj_unregister
);
1168 INIT_WORK(&policy
->update
, handle_update
);
1170 policy
->cpu
= dev
->id
;
1172 /* Set this once on allocation */
1173 policy
->kobj_cpu
= dev
->id
;
1178 free_cpumask_var(policy
->related_cpus
);
1180 free_cpumask_var(policy
->cpus
);
1187 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
, bool notify
)
1189 struct kobject
*kobj
;
1190 struct completion
*cmp
;
1193 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1194 CPUFREQ_REMOVE_POLICY
, policy
);
1196 down_write(&policy
->rwsem
);
1197 cpufreq_remove_dev_symlink(policy
);
1198 kobj
= &policy
->kobj
;
1199 cmp
= &policy
->kobj_unregister
;
1200 up_write(&policy
->rwsem
);
1204 * We need to make sure that the underlying kobj is
1205 * actually not referenced anymore by anybody before we
1206 * proceed with unloading.
1208 pr_debug("waiting for dropping of refcount\n");
1209 wait_for_completion(cmp
);
1210 pr_debug("wait complete\n");
1213 static void cpufreq_policy_free(struct cpufreq_policy
*policy
, bool notify
)
1215 unsigned long flags
;
1218 /* Remove policy from list */
1219 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1220 list_del(&policy
->policy_list
);
1222 for_each_cpu(cpu
, policy
->related_cpus
)
1223 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1224 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1226 cpufreq_policy_put_kobj(policy
, notify
);
1227 free_cpumask_var(policy
->related_cpus
);
1228 free_cpumask_var(policy
->cpus
);
1233 * cpufreq_add_dev - add a CPU device
1235 * Adds the cpufreq interface for a CPU device.
1237 * The Oracle says: try running cpufreq registration/unregistration concurrently
1238 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1239 * mess up, but more thorough testing is needed. - Mathieu
1241 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1243 unsigned int j
, cpu
= dev
->id
;
1245 struct cpufreq_policy
*policy
;
1246 unsigned long flags
;
1247 bool recover_policy
= !sif
;
1249 pr_debug("adding CPU %u\n", cpu
);
1252 * Only possible if 'cpu' wasn't physically present earlier and we are
1253 * here from subsys_interface add callback. A hotplug notifier will
1254 * follow and we will handle it like logical CPU hotplug then. For now,
1255 * just create the sysfs link.
1257 if (cpu_is_offline(cpu
))
1258 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data
, cpu
), cpu
);
1260 if (!down_read_trylock(&cpufreq_rwsem
))
1263 /* Check if this CPU already has a policy to manage it */
1264 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1265 if (policy
&& !policy_is_inactive(policy
)) {
1266 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1267 ret
= cpufreq_add_policy_cpu(policy
, cpu
, dev
);
1268 up_read(&cpufreq_rwsem
);
1273 * Restore the saved policy when doing light-weight init and fall back
1274 * to the full init if that fails.
1276 policy
= recover_policy
? cpufreq_policy_restore(cpu
) : NULL
;
1278 recover_policy
= false;
1279 policy
= cpufreq_policy_alloc(dev
);
1284 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1286 /* call driver. From then on the cpufreq must be able
1287 * to accept all calls to ->verify and ->setpolicy for this CPU
1289 ret
= cpufreq_driver
->init(policy
);
1291 pr_debug("initialization failed\n");
1292 goto err_set_policy_cpu
;
1295 down_write(&policy
->rwsem
);
1297 /* related cpus should atleast have policy->cpus */
1298 cpumask_or(policy
->related_cpus
, policy
->related_cpus
, policy
->cpus
);
1301 * affected cpus must always be the one, which are online. We aren't
1302 * managing offline cpus here.
1304 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1306 if (!recover_policy
) {
1307 policy
->user_policy
.min
= policy
->min
;
1308 policy
->user_policy
.max
= policy
->max
;
1310 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1311 for_each_cpu(j
, policy
->related_cpus
)
1312 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1313 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1316 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1317 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1319 pr_err("%s: ->get() failed\n", __func__
);
1325 * Sometimes boot loaders set CPU frequency to a value outside of
1326 * frequency table present with cpufreq core. In such cases CPU might be
1327 * unstable if it has to run on that frequency for long duration of time
1328 * and so its better to set it to a frequency which is specified in
1329 * freq-table. This also makes cpufreq stats inconsistent as
1330 * cpufreq-stats would fail to register because current frequency of CPU
1331 * isn't found in freq-table.
1333 * Because we don't want this change to effect boot process badly, we go
1334 * for the next freq which is >= policy->cur ('cur' must be set by now,
1335 * otherwise we will end up setting freq to lowest of the table as 'cur'
1336 * is initialized to zero).
1338 * We are passing target-freq as "policy->cur - 1" otherwise
1339 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1340 * equal to target-freq.
1342 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1344 /* Are we running at unknown frequency ? */
1345 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1346 if (ret
== -EINVAL
) {
1347 /* Warn user and fix it */
1348 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1349 __func__
, policy
->cpu
, policy
->cur
);
1350 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1351 CPUFREQ_RELATION_L
);
1354 * Reaching here after boot in a few seconds may not
1355 * mean that system will remain stable at "unknown"
1356 * frequency for longer duration. Hence, a BUG_ON().
1359 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1360 __func__
, policy
->cpu
, policy
->cur
);
1364 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1365 CPUFREQ_START
, policy
);
1367 if (!recover_policy
) {
1368 ret
= cpufreq_add_dev_interface(policy
, dev
);
1370 goto err_out_unregister
;
1371 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1372 CPUFREQ_CREATE_POLICY
, policy
);
1374 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1375 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1376 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1379 cpufreq_init_policy(policy
);
1381 if (!recover_policy
) {
1382 policy
->user_policy
.policy
= policy
->policy
;
1383 policy
->user_policy
.governor
= policy
->governor
;
1385 up_write(&policy
->rwsem
);
1387 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1389 up_read(&cpufreq_rwsem
);
1391 /* Callback for handling stuff after policy is ready */
1392 if (cpufreq_driver
->ready
)
1393 cpufreq_driver
->ready(policy
);
1395 pr_debug("initialization complete\n");
1401 up_write(&policy
->rwsem
);
1403 if (cpufreq_driver
->exit
)
1404 cpufreq_driver
->exit(policy
);
1406 cpufreq_policy_free(policy
, recover_policy
);
1408 up_read(&cpufreq_rwsem
);
1413 static int __cpufreq_remove_dev_prepare(struct device
*dev
,
1414 struct subsys_interface
*sif
)
1416 unsigned int cpu
= dev
->id
;
1418 struct cpufreq_policy
*policy
;
1420 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1422 policy
= cpufreq_cpu_get_raw(cpu
);
1424 pr_debug("%s: No cpu_data found\n", __func__
);
1429 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1431 pr_err("%s: Failed to stop governor\n", __func__
);
1436 down_write(&policy
->rwsem
);
1437 cpumask_clear_cpu(cpu
, policy
->cpus
);
1439 if (policy_is_inactive(policy
)) {
1441 strncpy(policy
->last_governor
, policy
->governor
->name
,
1443 } else if (cpu
== policy
->cpu
) {
1444 /* Nominate new CPU */
1445 policy
->cpu
= cpumask_any(policy
->cpus
);
1447 up_write(&policy
->rwsem
);
1449 /* Start governor again for active policy */
1450 if (!policy_is_inactive(policy
)) {
1452 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1454 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1457 pr_err("%s: Failed to start governor\n", __func__
);
1459 } else if (cpufreq_driver
->stop_cpu
) {
1460 cpufreq_driver
->stop_cpu(policy
);
1466 static int __cpufreq_remove_dev_finish(struct device
*dev
,
1467 struct subsys_interface
*sif
)
1469 unsigned int cpu
= dev
->id
;
1471 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1474 pr_debug("%s: No cpu_data found\n", __func__
);
1478 /* Only proceed for inactive policies */
1479 if (!policy_is_inactive(policy
))
1482 /* If cpu is last user of policy, free policy */
1484 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
1486 pr_err("%s: Failed to exit governor\n", __func__
);
1492 * Perform the ->exit() even during light-weight tear-down,
1493 * since this is a core component, and is essential for the
1494 * subsequent light-weight ->init() to succeed.
1496 if (cpufreq_driver
->exit
)
1497 cpufreq_driver
->exit(policy
);
1499 /* Free the policy only if the driver is getting removed. */
1501 cpufreq_policy_free(policy
, true);
1507 * cpufreq_remove_dev - remove a CPU device
1509 * Removes the cpufreq interface for a CPU device.
1511 static int cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1513 unsigned int cpu
= dev
->id
;
1517 * Only possible if 'cpu' is getting physically removed now. A hotplug
1518 * notifier should have already been called and we just need to remove
1519 * link or free policy here.
1521 if (cpu_is_offline(cpu
)) {
1522 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1523 struct cpumask mask
;
1528 cpumask_copy(&mask
, policy
->related_cpus
);
1529 cpumask_clear_cpu(cpu
, &mask
);
1532 * Free policy only if all policy->related_cpus are removed
1535 if (cpumask_intersects(&mask
, cpu_present_mask
)) {
1536 remove_cpu_dev_symlink(policy
, cpu
);
1540 cpufreq_policy_free(policy
, true);
1544 ret
= __cpufreq_remove_dev_prepare(dev
, sif
);
1547 ret
= __cpufreq_remove_dev_finish(dev
, sif
);
1552 static void handle_update(struct work_struct
*work
)
1554 struct cpufreq_policy
*policy
=
1555 container_of(work
, struct cpufreq_policy
, update
);
1556 unsigned int cpu
= policy
->cpu
;
1557 pr_debug("handle_update for cpu %u called\n", cpu
);
1558 cpufreq_update_policy(cpu
);
1562 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1564 * @policy: policy managing CPUs
1565 * @new_freq: CPU frequency the CPU actually runs at
1567 * We adjust to current frequency first, and need to clean up later.
1568 * So either call to cpufreq_update_policy() or schedule handle_update()).
1570 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1571 unsigned int new_freq
)
1573 struct cpufreq_freqs freqs
;
1575 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1576 policy
->cur
, new_freq
);
1578 freqs
.old
= policy
->cur
;
1579 freqs
.new = new_freq
;
1581 cpufreq_freq_transition_begin(policy
, &freqs
);
1582 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1586 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1589 * This is the last known freq, without actually getting it from the driver.
1590 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1592 unsigned int cpufreq_quick_get(unsigned int cpu
)
1594 struct cpufreq_policy
*policy
;
1595 unsigned int ret_freq
= 0;
1597 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1598 return cpufreq_driver
->get(cpu
);
1600 policy
= cpufreq_cpu_get(cpu
);
1602 ret_freq
= policy
->cur
;
1603 cpufreq_cpu_put(policy
);
1608 EXPORT_SYMBOL(cpufreq_quick_get
);
1611 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1614 * Just return the max possible frequency for a given CPU.
1616 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1618 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1619 unsigned int ret_freq
= 0;
1622 ret_freq
= policy
->max
;
1623 cpufreq_cpu_put(policy
);
1628 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1630 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1632 unsigned int ret_freq
= 0;
1634 if (!cpufreq_driver
->get
)
1637 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1639 /* Updating inactive policies is invalid, so avoid doing that. */
1640 if (unlikely(policy_is_inactive(policy
)))
1643 if (ret_freq
&& policy
->cur
&&
1644 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1645 /* verify no discrepancy between actual and
1646 saved value exists */
1647 if (unlikely(ret_freq
!= policy
->cur
)) {
1648 cpufreq_out_of_sync(policy
, ret_freq
);
1649 schedule_work(&policy
->update
);
1657 * cpufreq_get - get the current CPU frequency (in kHz)
1660 * Get the CPU current (static) CPU frequency
1662 unsigned int cpufreq_get(unsigned int cpu
)
1664 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1665 unsigned int ret_freq
= 0;
1668 down_read(&policy
->rwsem
);
1669 ret_freq
= __cpufreq_get(policy
);
1670 up_read(&policy
->rwsem
);
1672 cpufreq_cpu_put(policy
);
1677 EXPORT_SYMBOL(cpufreq_get
);
1679 static struct subsys_interface cpufreq_interface
= {
1681 .subsys
= &cpu_subsys
,
1682 .add_dev
= cpufreq_add_dev
,
1683 .remove_dev
= cpufreq_remove_dev
,
1687 * In case platform wants some specific frequency to be configured
1690 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1694 if (!policy
->suspend_freq
) {
1695 pr_err("%s: suspend_freq can't be zero\n", __func__
);
1699 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1700 policy
->suspend_freq
);
1702 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1703 CPUFREQ_RELATION_H
);
1705 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1706 __func__
, policy
->suspend_freq
, ret
);
1710 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1713 * cpufreq_suspend() - Suspend CPUFreq governors
1715 * Called during system wide Suspend/Hibernate cycles for suspending governors
1716 * as some platforms can't change frequency after this point in suspend cycle.
1717 * Because some of the devices (like: i2c, regulators, etc) they use for
1718 * changing frequency are suspended quickly after this point.
1720 void cpufreq_suspend(void)
1722 struct cpufreq_policy
*policy
;
1724 if (!cpufreq_driver
)
1730 pr_debug("%s: Suspending Governors\n", __func__
);
1732 for_each_active_policy(policy
) {
1733 if (__cpufreq_governor(policy
, CPUFREQ_GOV_STOP
))
1734 pr_err("%s: Failed to stop governor for policy: %p\n",
1736 else if (cpufreq_driver
->suspend
1737 && cpufreq_driver
->suspend(policy
))
1738 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1743 cpufreq_suspended
= true;
1747 * cpufreq_resume() - Resume CPUFreq governors
1749 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1750 * are suspended with cpufreq_suspend().
1752 void cpufreq_resume(void)
1754 struct cpufreq_policy
*policy
;
1756 if (!cpufreq_driver
)
1759 cpufreq_suspended
= false;
1764 pr_debug("%s: Resuming Governors\n", __func__
);
1766 for_each_active_policy(policy
) {
1767 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
))
1768 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1770 else if (__cpufreq_governor(policy
, CPUFREQ_GOV_START
)
1771 || __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))
1772 pr_err("%s: Failed to start governor for policy: %p\n",
1777 * schedule call cpufreq_update_policy() for first-online CPU, as that
1778 * wouldn't be hotplugged-out on suspend. It will verify that the
1779 * current freq is in sync with what we believe it to be.
1781 policy
= cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask
));
1782 if (WARN_ON(!policy
))
1785 schedule_work(&policy
->update
);
1789 * cpufreq_get_current_driver - return current driver's name
1791 * Return the name string of the currently loaded cpufreq driver
1794 const char *cpufreq_get_current_driver(void)
1797 return cpufreq_driver
->name
;
1801 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1804 * cpufreq_get_driver_data - return current driver data
1806 * Return the private data of the currently loaded cpufreq
1807 * driver, or NULL if no cpufreq driver is loaded.
1809 void *cpufreq_get_driver_data(void)
1812 return cpufreq_driver
->driver_data
;
1816 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1818 /*********************************************************************
1819 * NOTIFIER LISTS INTERFACE *
1820 *********************************************************************/
1823 * cpufreq_register_notifier - register a driver with cpufreq
1824 * @nb: notifier function to register
1825 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1827 * Add a driver to one of two lists: either a list of drivers that
1828 * are notified about clock rate changes (once before and once after
1829 * the transition), or a list of drivers that are notified about
1830 * changes in cpufreq policy.
1832 * This function may sleep, and has the same return conditions as
1833 * blocking_notifier_chain_register.
1835 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1839 if (cpufreq_disabled())
1842 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1845 case CPUFREQ_TRANSITION_NOTIFIER
:
1846 ret
= srcu_notifier_chain_register(
1847 &cpufreq_transition_notifier_list
, nb
);
1849 case CPUFREQ_POLICY_NOTIFIER
:
1850 ret
= blocking_notifier_chain_register(
1851 &cpufreq_policy_notifier_list
, nb
);
1859 EXPORT_SYMBOL(cpufreq_register_notifier
);
1862 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1863 * @nb: notifier block to be unregistered
1864 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1866 * Remove a driver from the CPU frequency notifier list.
1868 * This function may sleep, and has the same return conditions as
1869 * blocking_notifier_chain_unregister.
1871 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1875 if (cpufreq_disabled())
1879 case CPUFREQ_TRANSITION_NOTIFIER
:
1880 ret
= srcu_notifier_chain_unregister(
1881 &cpufreq_transition_notifier_list
, nb
);
1883 case CPUFREQ_POLICY_NOTIFIER
:
1884 ret
= blocking_notifier_chain_unregister(
1885 &cpufreq_policy_notifier_list
, nb
);
1893 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1896 /*********************************************************************
1898 *********************************************************************/
1900 /* Must set freqs->new to intermediate frequency */
1901 static int __target_intermediate(struct cpufreq_policy
*policy
,
1902 struct cpufreq_freqs
*freqs
, int index
)
1906 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1908 /* We don't need to switch to intermediate freq */
1912 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1913 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1915 cpufreq_freq_transition_begin(policy
, freqs
);
1916 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1917 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1920 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1926 static int __target_index(struct cpufreq_policy
*policy
,
1927 struct cpufreq_frequency_table
*freq_table
, int index
)
1929 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1930 unsigned int intermediate_freq
= 0;
1931 int retval
= -EINVAL
;
1934 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1936 /* Handle switching to intermediate frequency */
1937 if (cpufreq_driver
->get_intermediate
) {
1938 retval
= __target_intermediate(policy
, &freqs
, index
);
1942 intermediate_freq
= freqs
.new;
1943 /* Set old freq to intermediate */
1944 if (intermediate_freq
)
1945 freqs
.old
= freqs
.new;
1948 freqs
.new = freq_table
[index
].frequency
;
1949 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1950 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1952 cpufreq_freq_transition_begin(policy
, &freqs
);
1955 retval
= cpufreq_driver
->target_index(policy
, index
);
1957 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1961 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1964 * Failed after setting to intermediate freq? Driver should have
1965 * reverted back to initial frequency and so should we. Check
1966 * here for intermediate_freq instead of get_intermediate, in
1967 * case we haven't switched to intermediate freq at all.
1969 if (unlikely(retval
&& intermediate_freq
)) {
1970 freqs
.old
= intermediate_freq
;
1971 freqs
.new = policy
->restore_freq
;
1972 cpufreq_freq_transition_begin(policy
, &freqs
);
1973 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1980 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1981 unsigned int target_freq
,
1982 unsigned int relation
)
1984 unsigned int old_target_freq
= target_freq
;
1985 int retval
= -EINVAL
;
1987 if (cpufreq_disabled())
1990 /* Make sure that target_freq is within supported range */
1991 if (target_freq
> policy
->max
)
1992 target_freq
= policy
->max
;
1993 if (target_freq
< policy
->min
)
1994 target_freq
= policy
->min
;
1996 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1997 policy
->cpu
, target_freq
, relation
, old_target_freq
);
2000 * This might look like a redundant call as we are checking it again
2001 * after finding index. But it is left intentionally for cases where
2002 * exactly same freq is called again and so we can save on few function
2005 if (target_freq
== policy
->cur
)
2008 /* Save last value to restore later on errors */
2009 policy
->restore_freq
= policy
->cur
;
2011 if (cpufreq_driver
->target
)
2012 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
2013 else if (cpufreq_driver
->target_index
) {
2014 struct cpufreq_frequency_table
*freq_table
;
2017 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2018 if (unlikely(!freq_table
)) {
2019 pr_err("%s: Unable to find freq_table\n", __func__
);
2023 retval
= cpufreq_frequency_table_target(policy
, freq_table
,
2024 target_freq
, relation
, &index
);
2025 if (unlikely(retval
)) {
2026 pr_err("%s: Unable to find matching freq\n", __func__
);
2030 if (freq_table
[index
].frequency
== policy
->cur
) {
2035 retval
= __target_index(policy
, freq_table
, index
);
2041 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
2043 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
2044 unsigned int target_freq
,
2045 unsigned int relation
)
2049 down_write(&policy
->rwsem
);
2051 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
2053 up_write(&policy
->rwsem
);
2057 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
2059 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
2064 /* Only must be defined when default governor is known to have latency
2065 restrictions, like e.g. conservative or ondemand.
2066 That this is the case is already ensured in Kconfig
2068 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2069 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
2071 struct cpufreq_governor
*gov
= NULL
;
2074 /* Don't start any governor operations if we are entering suspend */
2075 if (cpufreq_suspended
)
2078 * Governor might not be initiated here if ACPI _PPC changed
2079 * notification happened, so check it.
2081 if (!policy
->governor
)
2084 if (policy
->governor
->max_transition_latency
&&
2085 policy
->cpuinfo
.transition_latency
>
2086 policy
->governor
->max_transition_latency
) {
2090 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2091 policy
->governor
->name
, gov
->name
);
2092 policy
->governor
= gov
;
2096 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2097 if (!try_module_get(policy
->governor
->owner
))
2100 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2101 policy
->cpu
, event
);
2103 mutex_lock(&cpufreq_governor_lock
);
2104 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
2105 || (!policy
->governor_enabled
2106 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
2107 mutex_unlock(&cpufreq_governor_lock
);
2111 if (event
== CPUFREQ_GOV_STOP
)
2112 policy
->governor_enabled
= false;
2113 else if (event
== CPUFREQ_GOV_START
)
2114 policy
->governor_enabled
= true;
2116 mutex_unlock(&cpufreq_governor_lock
);
2118 ret
= policy
->governor
->governor(policy
, event
);
2121 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2122 policy
->governor
->initialized
++;
2123 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
2124 policy
->governor
->initialized
--;
2126 /* Restore original values */
2127 mutex_lock(&cpufreq_governor_lock
);
2128 if (event
== CPUFREQ_GOV_STOP
)
2129 policy
->governor_enabled
= true;
2130 else if (event
== CPUFREQ_GOV_START
)
2131 policy
->governor_enabled
= false;
2132 mutex_unlock(&cpufreq_governor_lock
);
2135 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
2136 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
2137 module_put(policy
->governor
->owner
);
2142 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2149 if (cpufreq_disabled())
2152 mutex_lock(&cpufreq_governor_mutex
);
2154 governor
->initialized
= 0;
2156 if (!find_governor(governor
->name
)) {
2158 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2161 mutex_unlock(&cpufreq_governor_mutex
);
2164 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2166 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2168 struct cpufreq_policy
*policy
;
2169 unsigned long flags
;
2174 if (cpufreq_disabled())
2177 /* clear last_governor for all inactive policies */
2178 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2179 for_each_inactive_policy(policy
) {
2180 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2181 policy
->governor
= NULL
;
2182 strcpy(policy
->last_governor
, "\0");
2185 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2187 mutex_lock(&cpufreq_governor_mutex
);
2188 list_del(&governor
->governor_list
);
2189 mutex_unlock(&cpufreq_governor_mutex
);
2192 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2195 /*********************************************************************
2196 * POLICY INTERFACE *
2197 *********************************************************************/
2200 * cpufreq_get_policy - get the current cpufreq_policy
2201 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2204 * Reads the current cpufreq policy.
2206 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2208 struct cpufreq_policy
*cpu_policy
;
2212 cpu_policy
= cpufreq_cpu_get(cpu
);
2216 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2218 cpufreq_cpu_put(cpu_policy
);
2221 EXPORT_SYMBOL(cpufreq_get_policy
);
2224 * policy : current policy.
2225 * new_policy: policy to be set.
2227 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2228 struct cpufreq_policy
*new_policy
)
2230 struct cpufreq_governor
*old_gov
;
2233 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2234 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2236 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2238 if (new_policy
->min
> policy
->max
|| new_policy
->max
< policy
->min
)
2241 /* verify the cpu speed can be set within this limit */
2242 ret
= cpufreq_driver
->verify(new_policy
);
2246 /* adjust if necessary - all reasons */
2247 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2248 CPUFREQ_ADJUST
, new_policy
);
2250 /* adjust if necessary - hardware incompatibility*/
2251 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2252 CPUFREQ_INCOMPATIBLE
, new_policy
);
2255 * verify the cpu speed can be set within this limit, which might be
2256 * different to the first one
2258 ret
= cpufreq_driver
->verify(new_policy
);
2262 /* notification of the new policy */
2263 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2264 CPUFREQ_NOTIFY
, new_policy
);
2266 policy
->min
= new_policy
->min
;
2267 policy
->max
= new_policy
->max
;
2269 pr_debug("new min and max freqs are %u - %u kHz\n",
2270 policy
->min
, policy
->max
);
2272 if (cpufreq_driver
->setpolicy
) {
2273 policy
->policy
= new_policy
->policy
;
2274 pr_debug("setting range\n");
2275 return cpufreq_driver
->setpolicy(new_policy
);
2278 if (new_policy
->governor
== policy
->governor
)
2281 pr_debug("governor switch\n");
2283 /* save old, working values */
2284 old_gov
= policy
->governor
;
2285 /* end old governor */
2287 __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2288 up_write(&policy
->rwsem
);
2289 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2290 down_write(&policy
->rwsem
);
2293 /* start new governor */
2294 policy
->governor
= new_policy
->governor
;
2295 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
)) {
2296 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_START
))
2299 up_write(&policy
->rwsem
);
2300 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2301 down_write(&policy
->rwsem
);
2304 /* new governor failed, so re-start old one */
2305 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2307 policy
->governor
= old_gov
;
2308 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2309 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2315 pr_debug("governor: change or update limits\n");
2316 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2320 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2321 * @cpu: CPU which shall be re-evaluated
2323 * Useful for policy notifiers which have different necessities
2324 * at different times.
2326 int cpufreq_update_policy(unsigned int cpu
)
2328 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2329 struct cpufreq_policy new_policy
;
2335 down_write(&policy
->rwsem
);
2337 pr_debug("updating policy for CPU %u\n", cpu
);
2338 memcpy(&new_policy
, policy
, sizeof(*policy
));
2339 new_policy
.min
= policy
->user_policy
.min
;
2340 new_policy
.max
= policy
->user_policy
.max
;
2341 new_policy
.policy
= policy
->user_policy
.policy
;
2342 new_policy
.governor
= policy
->user_policy
.governor
;
2345 * BIOS might change freq behind our back
2346 * -> ask driver for current freq and notify governors about a change
2348 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2349 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2350 if (WARN_ON(!new_policy
.cur
)) {
2356 pr_debug("Driver did not initialize current freq\n");
2357 policy
->cur
= new_policy
.cur
;
2359 if (policy
->cur
!= new_policy
.cur
&& has_target())
2360 cpufreq_out_of_sync(policy
, new_policy
.cur
);
2364 ret
= cpufreq_set_policy(policy
, &new_policy
);
2367 up_write(&policy
->rwsem
);
2369 cpufreq_cpu_put(policy
);
2372 EXPORT_SYMBOL(cpufreq_update_policy
);
2374 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2375 unsigned long action
, void *hcpu
)
2377 unsigned int cpu
= (unsigned long)hcpu
;
2380 dev
= get_cpu_device(cpu
);
2382 switch (action
& ~CPU_TASKS_FROZEN
) {
2384 cpufreq_add_dev(dev
, NULL
);
2387 case CPU_DOWN_PREPARE
:
2388 __cpufreq_remove_dev_prepare(dev
, NULL
);
2392 __cpufreq_remove_dev_finish(dev
, NULL
);
2395 case CPU_DOWN_FAILED
:
2396 cpufreq_add_dev(dev
, NULL
);
2403 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2404 .notifier_call
= cpufreq_cpu_callback
,
2407 /*********************************************************************
2409 *********************************************************************/
2410 static int cpufreq_boost_set_sw(int state
)
2412 struct cpufreq_frequency_table
*freq_table
;
2413 struct cpufreq_policy
*policy
;
2416 for_each_active_policy(policy
) {
2417 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2419 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2422 pr_err("%s: Policy frequency update failed\n",
2426 policy
->user_policy
.max
= policy
->max
;
2427 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2434 int cpufreq_boost_trigger_state(int state
)
2436 unsigned long flags
;
2439 if (cpufreq_driver
->boost_enabled
== state
)
2442 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2443 cpufreq_driver
->boost_enabled
= state
;
2444 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2446 ret
= cpufreq_driver
->set_boost(state
);
2448 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2449 cpufreq_driver
->boost_enabled
= !state
;
2450 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2452 pr_err("%s: Cannot %s BOOST\n",
2453 __func__
, state
? "enable" : "disable");
2459 int cpufreq_boost_supported(void)
2461 if (likely(cpufreq_driver
))
2462 return cpufreq_driver
->boost_supported
;
2466 EXPORT_SYMBOL_GPL(cpufreq_boost_supported
);
2468 int cpufreq_boost_enabled(void)
2470 return cpufreq_driver
->boost_enabled
;
2472 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2474 /*********************************************************************
2475 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2476 *********************************************************************/
2479 * cpufreq_register_driver - register a CPU Frequency driver
2480 * @driver_data: A struct cpufreq_driver containing the values#
2481 * submitted by the CPU Frequency driver.
2483 * Registers a CPU Frequency driver to this core code. This code
2484 * returns zero on success, -EBUSY when another driver got here first
2485 * (and isn't unregistered in the meantime).
2488 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2490 unsigned long flags
;
2493 if (cpufreq_disabled())
2496 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2497 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2498 driver_data
->target
) ||
2499 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2500 driver_data
->target
)) ||
2501 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2504 pr_debug("trying to register driver %s\n", driver_data
->name
);
2506 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2507 if (cpufreq_driver
) {
2508 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2511 cpufreq_driver
= driver_data
;
2512 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2514 if (driver_data
->setpolicy
)
2515 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2517 if (cpufreq_boost_supported()) {
2519 * Check if driver provides function to enable boost -
2520 * if not, use cpufreq_boost_set_sw as default
2522 if (!cpufreq_driver
->set_boost
)
2523 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2525 ret
= cpufreq_sysfs_create_file(&boost
.attr
);
2527 pr_err("%s: cannot register global BOOST sysfs file\n",
2529 goto err_null_driver
;
2533 ret
= subsys_interface_register(&cpufreq_interface
);
2535 goto err_boost_unreg
;
2537 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2538 list_empty(&cpufreq_policy_list
)) {
2539 /* if all ->init() calls failed, unregister */
2540 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2545 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2546 pr_debug("driver %s up and running\n", driver_data
->name
);
2550 subsys_interface_unregister(&cpufreq_interface
);
2552 if (cpufreq_boost_supported())
2553 cpufreq_sysfs_remove_file(&boost
.attr
);
2555 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2556 cpufreq_driver
= NULL
;
2557 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2560 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2563 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2565 * Unregister the current CPUFreq driver. Only call this if you have
2566 * the right to do so, i.e. if you have succeeded in initialising before!
2567 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2568 * currently not initialised.
2570 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2572 unsigned long flags
;
2574 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2577 pr_debug("unregistering driver %s\n", driver
->name
);
2579 subsys_interface_unregister(&cpufreq_interface
);
2580 if (cpufreq_boost_supported())
2581 cpufreq_sysfs_remove_file(&boost
.attr
);
2583 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2585 down_write(&cpufreq_rwsem
);
2586 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2588 cpufreq_driver
= NULL
;
2590 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2591 up_write(&cpufreq_rwsem
);
2595 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2598 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2599 * or mutexes when secondary CPUs are halted.
2601 static struct syscore_ops cpufreq_syscore_ops
= {
2602 .shutdown
= cpufreq_suspend
,
2605 static int __init
cpufreq_core_init(void)
2607 if (cpufreq_disabled())
2610 cpufreq_global_kobject
= kobject_create();
2611 BUG_ON(!cpufreq_global_kobject
);
2613 register_syscore_ops(&cpufreq_syscore_ops
);
2617 core_initcall(cpufreq_core_init
);