]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/cpufreq/cpufreq.c
cpufreq: Don't clear cpufreq_cpu_data and policy list for inactive policies
[mirror_ubuntu-artful-kernel.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
90de2a4a 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
b4f0676f 34static LIST_HEAD(cpufreq_policy_list);
f963735a
VK
35
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
b4f0676f
VK
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
f7b27061
VK
92/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
1da177e4 97/**
cd878479 98 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
1c3d85dd 102static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 104static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 105static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 106DEFINE_MUTEX(cpufreq_governor_lock);
bb176f7d 107
084f3493 108/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 109static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
1da177e4 110
2f0aea93
VK
111/* Flag to suspend/resume CPUFreq governors */
112static bool cpufreq_suspended;
1da177e4 113
9c0ebcf7
VK
114static inline bool has_target(void)
115{
116 return cpufreq_driver->target_index || cpufreq_driver->target;
117}
118
6eed9404
VK
119/*
120 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
121 * sections
122 */
123static DECLARE_RWSEM(cpufreq_rwsem);
124
1da177e4 125/* internal prototypes */
29464f28
DJ
126static int __cpufreq_governor(struct cpufreq_policy *policy,
127 unsigned int event);
d92d50a4 128static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
65f27f38 129static void handle_update(struct work_struct *work);
1da177e4
LT
130
131/**
32ee8c3e
DJ
132 * Two notifier lists: the "policy" list is involved in the
133 * validation process for a new CPU frequency policy; the
1da177e4
LT
134 * "transition" list for kernel code that needs to handle
135 * changes to devices when the CPU clock speed changes.
136 * The mutex locks both lists.
137 */
e041c683 138static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 139static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 140
74212ca4 141static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
142static int __init init_cpufreq_transition_notifier_list(void)
143{
144 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 145 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
146 return 0;
147}
b3438f82 148pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 149
a7b422cd 150static int off __read_mostly;
da584455 151static int cpufreq_disabled(void)
a7b422cd
KRW
152{
153 return off;
154}
155void disable_cpufreq(void)
156{
157 off = 1;
158}
29464f28 159static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 160
4d5dcc42
VK
161bool have_governor_per_policy(void)
162{
0b981e70 163 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 164}
3f869d6d 165EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 166
944e9a03
VK
167struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
168{
169 if (have_governor_per_policy())
170 return &policy->kobj;
171 else
172 return cpufreq_global_kobject;
173}
174EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
175
72a4ce34
VK
176static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
177{
178 u64 idle_time;
179 u64 cur_wall_time;
180 u64 busy_time;
181
182 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
183
184 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
186 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
187 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
188 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
189 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
190
191 idle_time = cur_wall_time - busy_time;
192 if (wall)
193 *wall = cputime_to_usecs(cur_wall_time);
194
195 return cputime_to_usecs(idle_time);
196}
197
198u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
199{
200 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
201
202 if (idle_time == -1ULL)
203 return get_cpu_idle_time_jiffy(cpu, wall);
204 else if (!io_busy)
205 idle_time += get_cpu_iowait_time_us(cpu, wall);
206
207 return idle_time;
208}
209EXPORT_SYMBOL_GPL(get_cpu_idle_time);
210
70e9e778
VK
211/*
212 * This is a generic cpufreq init() routine which can be used by cpufreq
213 * drivers of SMP systems. It will do following:
214 * - validate & show freq table passed
215 * - set policies transition latency
216 * - policy->cpus with all possible CPUs
217 */
218int cpufreq_generic_init(struct cpufreq_policy *policy,
219 struct cpufreq_frequency_table *table,
220 unsigned int transition_latency)
221{
222 int ret;
223
224 ret = cpufreq_table_validate_and_show(policy, table);
225 if (ret) {
226 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
227 return ret;
228 }
229
230 policy->cpuinfo.transition_latency = transition_latency;
231
232 /*
233 * The driver only supports the SMP configuartion where all processors
234 * share the clock and voltage and clock.
235 */
236 cpumask_setall(policy->cpus);
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(cpufreq_generic_init);
241
988bed09
VK
242/* Only for cpufreq core internal use */
243struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
652ed95d
VK
244{
245 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
246
988bed09
VK
247 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
248}
249
250unsigned int cpufreq_generic_get(unsigned int cpu)
251{
252 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
253
652ed95d 254 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
255 pr_err("%s: No %s associated to cpu: %d\n",
256 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
257 return 0;
258 }
259
260 return clk_get_rate(policy->clk) / 1000;
261}
262EXPORT_SYMBOL_GPL(cpufreq_generic_get);
263
50e9c852
VK
264/**
265 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
266 *
267 * @cpu: cpu to find policy for.
268 *
269 * This returns policy for 'cpu', returns NULL if it doesn't exist.
270 * It also increments the kobject reference count to mark it busy and so would
271 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
272 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
273 * freed as that depends on the kobj count.
274 *
275 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
276 * valid policy is found. This is done to make sure the driver doesn't get
277 * unregistered while the policy is being used.
278 *
279 * Return: A valid policy on success, otherwise NULL on failure.
280 */
6eed9404 281struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 282{
6eed9404 283 struct cpufreq_policy *policy = NULL;
1da177e4
LT
284 unsigned long flags;
285
1b947c90 286 if (WARN_ON(cpu >= nr_cpu_ids))
6eed9404
VK
287 return NULL;
288
289 if (!down_read_trylock(&cpufreq_rwsem))
290 return NULL;
1da177e4
LT
291
292 /* get the cpufreq driver */
1c3d85dd 293 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 294
6eed9404
VK
295 if (cpufreq_driver) {
296 /* get the CPU */
988bed09 297 policy = cpufreq_cpu_get_raw(cpu);
6eed9404
VK
298 if (policy)
299 kobject_get(&policy->kobj);
300 }
1da177e4 301
6eed9404 302 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 303
3a3e9e06 304 if (!policy)
6eed9404 305 up_read(&cpufreq_rwsem);
1da177e4 306
3a3e9e06 307 return policy;
a9144436 308}
1da177e4
LT
309EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
310
50e9c852
VK
311/**
312 * cpufreq_cpu_put: Decrements the usage count of a policy
313 *
314 * @policy: policy earlier returned by cpufreq_cpu_get().
315 *
316 * This decrements the kobject reference count incremented earlier by calling
317 * cpufreq_cpu_get().
318 *
319 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
320 */
3a3e9e06 321void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 322{
6eed9404
VK
323 kobject_put(&policy->kobj);
324 up_read(&cpufreq_rwsem);
1da177e4
LT
325}
326EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
327
1da177e4
LT
328/*********************************************************************
329 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
330 *********************************************************************/
331
332/**
333 * adjust_jiffies - adjust the system "loops_per_jiffy"
334 *
335 * This function alters the system "loops_per_jiffy" for the clock
336 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 337 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
338 * per-CPU loops_per_jiffy value wherever possible.
339 */
858119e1 340static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4 341{
39c132ee
VK
342#ifndef CONFIG_SMP
343 static unsigned long l_p_j_ref;
344 static unsigned int l_p_j_ref_freq;
345
1da177e4
LT
346 if (ci->flags & CPUFREQ_CONST_LOOPS)
347 return;
348
349 if (!l_p_j_ref_freq) {
350 l_p_j_ref = loops_per_jiffy;
351 l_p_j_ref_freq = ci->old;
e837f9b5
JP
352 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
353 l_p_j_ref, l_p_j_ref_freq);
1da177e4 354 }
0b443ead 355 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
e08f5f5b
GS
356 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
357 ci->new);
e837f9b5
JP
358 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
359 loops_per_jiffy, ci->new);
1da177e4 360 }
1da177e4 361#endif
39c132ee 362}
1da177e4 363
0956df9c 364static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 365 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
366{
367 BUG_ON(irqs_disabled());
368
d5aaffa9
DB
369 if (cpufreq_disabled())
370 return;
371
1c3d85dd 372 freqs->flags = cpufreq_driver->flags;
2d06d8c4 373 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 374 state, freqs->new);
1da177e4 375
1da177e4 376 switch (state) {
e4472cb3 377
1da177e4 378 case CPUFREQ_PRECHANGE:
32ee8c3e 379 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
380 * which is not equal to what the cpufreq core thinks is
381 * "old frequency".
1da177e4 382 */
1c3d85dd 383 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
384 if ((policy) && (policy->cpu == freqs->cpu) &&
385 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
386 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
387 freqs->old, policy->cur);
e4472cb3 388 freqs->old = policy->cur;
1da177e4
LT
389 }
390 }
b4dfdbb3 391 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 392 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
393 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
394 break;
e4472cb3 395
1da177e4
LT
396 case CPUFREQ_POSTCHANGE:
397 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
398 pr_debug("FREQ: %lu - CPU: %lu\n",
399 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 400 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 401 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 402 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
403 if (likely(policy) && likely(policy->cpu == freqs->cpu))
404 policy->cur = freqs->new;
1da177e4
LT
405 break;
406 }
1da177e4 407}
bb176f7d 408
b43a7ffb
VK
409/**
410 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
411 * on frequency transition.
412 *
413 * This function calls the transition notifiers and the "adjust_jiffies"
414 * function. It is called twice on all CPU frequency changes that have
415 * external effects.
416 */
236a9800 417static void cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb
VK
418 struct cpufreq_freqs *freqs, unsigned int state)
419{
420 for_each_cpu(freqs->cpu, policy->cpus)
421 __cpufreq_notify_transition(policy, freqs, state);
422}
1da177e4 423
f7ba3b41 424/* Do post notifications when there are chances that transition has failed */
236a9800 425static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
f7ba3b41
VK
426 struct cpufreq_freqs *freqs, int transition_failed)
427{
428 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
429 if (!transition_failed)
430 return;
431
432 swap(freqs->old, freqs->new);
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
434 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
435}
f7ba3b41 436
12478cf0
SB
437void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
438 struct cpufreq_freqs *freqs)
439{
ca654dc3
SB
440
441 /*
442 * Catch double invocations of _begin() which lead to self-deadlock.
443 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
444 * doesn't invoke _begin() on their behalf, and hence the chances of
445 * double invocations are very low. Moreover, there are scenarios
446 * where these checks can emit false-positive warnings in these
447 * drivers; so we avoid that by skipping them altogether.
448 */
449 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
450 && current == policy->transition_task);
451
12478cf0
SB
452wait:
453 wait_event(policy->transition_wait, !policy->transition_ongoing);
454
455 spin_lock(&policy->transition_lock);
456
457 if (unlikely(policy->transition_ongoing)) {
458 spin_unlock(&policy->transition_lock);
459 goto wait;
460 }
461
462 policy->transition_ongoing = true;
ca654dc3 463 policy->transition_task = current;
12478cf0
SB
464
465 spin_unlock(&policy->transition_lock);
466
467 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
468}
469EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
470
471void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
472 struct cpufreq_freqs *freqs, int transition_failed)
473{
474 if (unlikely(WARN_ON(!policy->transition_ongoing)))
475 return;
476
477 cpufreq_notify_post_transition(policy, freqs, transition_failed);
478
479 policy->transition_ongoing = false;
ca654dc3 480 policy->transition_task = NULL;
12478cf0
SB
481
482 wake_up(&policy->transition_wait);
483}
484EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
485
1da177e4 486
1da177e4
LT
487/*********************************************************************
488 * SYSFS INTERFACE *
489 *********************************************************************/
8a5c74a1 490static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
491 struct attribute *attr, char *buf)
492{
493 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
494}
495
496static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
497 const char *buf, size_t count)
498{
499 int ret, enable;
500
501 ret = sscanf(buf, "%d", &enable);
502 if (ret != 1 || enable < 0 || enable > 1)
503 return -EINVAL;
504
505 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
506 pr_err("%s: Cannot %s BOOST!\n",
507 __func__, enable ? "enable" : "disable");
6f19efc0
LM
508 return -EINVAL;
509 }
510
e837f9b5
JP
511 pr_debug("%s: cpufreq BOOST %s\n",
512 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
513
514 return count;
515}
516define_one_global_rw(boost);
1da177e4 517
42f91fa1 518static struct cpufreq_governor *find_governor(const char *str_governor)
3bcb09a3
JF
519{
520 struct cpufreq_governor *t;
521
f7b27061 522 for_each_governor(t)
7c4f4539 523 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
524 return t;
525
526 return NULL;
527}
528
1da177e4
LT
529/**
530 * cpufreq_parse_governor - parse a governor string
531 */
905d77cd 532static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
533 struct cpufreq_governor **governor)
534{
3bcb09a3 535 int err = -EINVAL;
1c3d85dd
RW
536
537 if (!cpufreq_driver)
3bcb09a3
JF
538 goto out;
539
1c3d85dd 540 if (cpufreq_driver->setpolicy) {
7c4f4539 541 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
1da177e4 542 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 543 err = 0;
7c4f4539 544 } else if (!strncasecmp(str_governor, "powersave",
e08f5f5b 545 CPUFREQ_NAME_LEN)) {
1da177e4 546 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 547 err = 0;
1da177e4 548 }
2e1cc3a5 549 } else {
1da177e4 550 struct cpufreq_governor *t;
3bcb09a3 551
3fc54d37 552 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3 553
42f91fa1 554 t = find_governor(str_governor);
3bcb09a3 555
ea714970 556 if (t == NULL) {
1a8e1463 557 int ret;
ea714970 558
1a8e1463
KC
559 mutex_unlock(&cpufreq_governor_mutex);
560 ret = request_module("cpufreq_%s", str_governor);
561 mutex_lock(&cpufreq_governor_mutex);
ea714970 562
1a8e1463 563 if (ret == 0)
42f91fa1 564 t = find_governor(str_governor);
ea714970
JF
565 }
566
3bcb09a3
JF
567 if (t != NULL) {
568 *governor = t;
569 err = 0;
1da177e4 570 }
3bcb09a3 571
3fc54d37 572 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 573 }
29464f28 574out:
3bcb09a3 575 return err;
1da177e4 576}
1da177e4 577
1da177e4 578/**
e08f5f5b
GS
579 * cpufreq_per_cpu_attr_read() / show_##file_name() -
580 * print out cpufreq information
1da177e4
LT
581 *
582 * Write out information from cpufreq_driver->policy[cpu]; object must be
583 * "unsigned int".
584 */
585
32ee8c3e
DJ
586#define show_one(file_name, object) \
587static ssize_t show_##file_name \
905d77cd 588(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 589{ \
29464f28 590 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
591}
592
593show_one(cpuinfo_min_freq, cpuinfo.min_freq);
594show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 595show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
596show_one(scaling_min_freq, min);
597show_one(scaling_max_freq, max);
c034b02e 598
09347b29 599static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
c034b02e
DB
600{
601 ssize_t ret;
602
603 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
604 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
605 else
606 ret = sprintf(buf, "%u\n", policy->cur);
607 return ret;
608}
1da177e4 609
037ce839 610static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 611 struct cpufreq_policy *new_policy);
7970e08b 612
1da177e4
LT
613/**
614 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
615 */
616#define store_one(file_name, object) \
617static ssize_t store_##file_name \
905d77cd 618(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 619{ \
619c144c 620 int ret, temp; \
1da177e4
LT
621 struct cpufreq_policy new_policy; \
622 \
623 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
624 if (ret) \
625 return -EINVAL; \
626 \
29464f28 627 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
628 if (ret != 1) \
629 return -EINVAL; \
630 \
619c144c 631 temp = new_policy.object; \
037ce839 632 ret = cpufreq_set_policy(policy, &new_policy); \
619c144c
VH
633 if (!ret) \
634 policy->user_policy.object = temp; \
1da177e4
LT
635 \
636 return ret ? ret : count; \
637}
638
29464f28
DJ
639store_one(scaling_min_freq, min);
640store_one(scaling_max_freq, max);
1da177e4
LT
641
642/**
643 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
644 */
905d77cd
DJ
645static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
646 char *buf)
1da177e4 647{
d92d50a4 648 unsigned int cur_freq = __cpufreq_get(policy);
1da177e4
LT
649 if (!cur_freq)
650 return sprintf(buf, "<unknown>");
651 return sprintf(buf, "%u\n", cur_freq);
652}
653
1da177e4
LT
654/**
655 * show_scaling_governor - show the current policy for the specified CPU
656 */
905d77cd 657static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 658{
29464f28 659 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
660 return sprintf(buf, "powersave\n");
661 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
662 return sprintf(buf, "performance\n");
663 else if (policy->governor)
4b972f0b 664 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 665 policy->governor->name);
1da177e4
LT
666 return -EINVAL;
667}
668
1da177e4
LT
669/**
670 * store_scaling_governor - store policy for the specified CPU
671 */
905d77cd
DJ
672static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
673 const char *buf, size_t count)
1da177e4 674{
5136fa56 675 int ret;
1da177e4
LT
676 char str_governor[16];
677 struct cpufreq_policy new_policy;
678
679 ret = cpufreq_get_policy(&new_policy, policy->cpu);
680 if (ret)
681 return ret;
682
29464f28 683 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
684 if (ret != 1)
685 return -EINVAL;
686
e08f5f5b
GS
687 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
688 &new_policy.governor))
1da177e4
LT
689 return -EINVAL;
690
037ce839 691 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
692
693 policy->user_policy.policy = policy->policy;
694 policy->user_policy.governor = policy->governor;
7970e08b 695
e08f5f5b
GS
696 if (ret)
697 return ret;
698 else
699 return count;
1da177e4
LT
700}
701
702/**
703 * show_scaling_driver - show the cpufreq driver currently loaded
704 */
905d77cd 705static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 706{
1c3d85dd 707 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
708}
709
710/**
711 * show_scaling_available_governors - show the available CPUfreq governors
712 */
905d77cd
DJ
713static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
714 char *buf)
1da177e4
LT
715{
716 ssize_t i = 0;
717 struct cpufreq_governor *t;
718
9c0ebcf7 719 if (!has_target()) {
1da177e4
LT
720 i += sprintf(buf, "performance powersave");
721 goto out;
722 }
723
f7b27061 724 for_each_governor(t) {
29464f28
DJ
725 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
726 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 727 goto out;
4b972f0b 728 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 729 }
7d5e350f 730out:
1da177e4
LT
731 i += sprintf(&buf[i], "\n");
732 return i;
733}
e8628dd0 734
f4fd3797 735ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
736{
737 ssize_t i = 0;
738 unsigned int cpu;
739
835481d9 740 for_each_cpu(cpu, mask) {
1da177e4
LT
741 if (i)
742 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
743 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
744 if (i >= (PAGE_SIZE - 5))
29464f28 745 break;
1da177e4
LT
746 }
747 i += sprintf(&buf[i], "\n");
748 return i;
749}
f4fd3797 750EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 751
e8628dd0
DW
752/**
753 * show_related_cpus - show the CPUs affected by each transition even if
754 * hw coordination is in use
755 */
756static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
757{
f4fd3797 758 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
759}
760
761/**
762 * show_affected_cpus - show the CPUs affected by each transition
763 */
764static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
765{
f4fd3797 766 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
767}
768
9e76988e 769static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 770 const char *buf, size_t count)
9e76988e
VP
771{
772 unsigned int freq = 0;
773 unsigned int ret;
774
879000f9 775 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
776 return -EINVAL;
777
778 ret = sscanf(buf, "%u", &freq);
779 if (ret != 1)
780 return -EINVAL;
781
782 policy->governor->store_setspeed(policy, freq);
783
784 return count;
785}
786
787static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
788{
879000f9 789 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
790 return sprintf(buf, "<unsupported>\n");
791
792 return policy->governor->show_setspeed(policy, buf);
793}
1da177e4 794
e2f74f35 795/**
8bf1ac72 796 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
797 */
798static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
799{
800 unsigned int limit;
801 int ret;
1c3d85dd
RW
802 if (cpufreq_driver->bios_limit) {
803 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
804 if (!ret)
805 return sprintf(buf, "%u\n", limit);
806 }
807 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
808}
809
6dad2a29
BP
810cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
811cpufreq_freq_attr_ro(cpuinfo_min_freq);
812cpufreq_freq_attr_ro(cpuinfo_max_freq);
813cpufreq_freq_attr_ro(cpuinfo_transition_latency);
814cpufreq_freq_attr_ro(scaling_available_governors);
815cpufreq_freq_attr_ro(scaling_driver);
816cpufreq_freq_attr_ro(scaling_cur_freq);
817cpufreq_freq_attr_ro(bios_limit);
818cpufreq_freq_attr_ro(related_cpus);
819cpufreq_freq_attr_ro(affected_cpus);
820cpufreq_freq_attr_rw(scaling_min_freq);
821cpufreq_freq_attr_rw(scaling_max_freq);
822cpufreq_freq_attr_rw(scaling_governor);
823cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 824
905d77cd 825static struct attribute *default_attrs[] = {
1da177e4
LT
826 &cpuinfo_min_freq.attr,
827 &cpuinfo_max_freq.attr,
ed129784 828 &cpuinfo_transition_latency.attr,
1da177e4
LT
829 &scaling_min_freq.attr,
830 &scaling_max_freq.attr,
831 &affected_cpus.attr,
e8628dd0 832 &related_cpus.attr,
1da177e4
LT
833 &scaling_governor.attr,
834 &scaling_driver.attr,
835 &scaling_available_governors.attr,
9e76988e 836 &scaling_setspeed.attr,
1da177e4
LT
837 NULL
838};
839
29464f28
DJ
840#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
841#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 842
29464f28 843static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 844{
905d77cd
DJ
845 struct cpufreq_policy *policy = to_policy(kobj);
846 struct freq_attr *fattr = to_attr(attr);
1b750e3b 847 ssize_t ret;
6eed9404
VK
848
849 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 850 return -EINVAL;
5a01f2e8 851
ad7722da 852 down_read(&policy->rwsem);
5a01f2e8 853
e08f5f5b
GS
854 if (fattr->show)
855 ret = fattr->show(policy, buf);
856 else
857 ret = -EIO;
858
ad7722da 859 up_read(&policy->rwsem);
6eed9404 860 up_read(&cpufreq_rwsem);
1b750e3b 861
1da177e4
LT
862 return ret;
863}
864
905d77cd
DJ
865static ssize_t store(struct kobject *kobj, struct attribute *attr,
866 const char *buf, size_t count)
1da177e4 867{
905d77cd
DJ
868 struct cpufreq_policy *policy = to_policy(kobj);
869 struct freq_attr *fattr = to_attr(attr);
a07530b4 870 ssize_t ret = -EINVAL;
6eed9404 871
4f750c93
SB
872 get_online_cpus();
873
874 if (!cpu_online(policy->cpu))
875 goto unlock;
876
6eed9404 877 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 878 goto unlock;
5a01f2e8 879
ad7722da 880 down_write(&policy->rwsem);
5a01f2e8 881
e08f5f5b
GS
882 if (fattr->store)
883 ret = fattr->store(policy, buf, count);
884 else
885 ret = -EIO;
886
ad7722da 887 up_write(&policy->rwsem);
6eed9404 888
6eed9404 889 up_read(&cpufreq_rwsem);
4f750c93
SB
890unlock:
891 put_online_cpus();
892
1da177e4
LT
893 return ret;
894}
895
905d77cd 896static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 897{
905d77cd 898 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 899 pr_debug("last reference is dropped\n");
1da177e4
LT
900 complete(&policy->kobj_unregister);
901}
902
52cf25d0 903static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
904 .show = show,
905 .store = store,
906};
907
908static struct kobj_type ktype_cpufreq = {
909 .sysfs_ops = &sysfs_ops,
910 .default_attrs = default_attrs,
911 .release = cpufreq_sysfs_release,
912};
913
2361be23
VK
914struct kobject *cpufreq_global_kobject;
915EXPORT_SYMBOL(cpufreq_global_kobject);
916
917static int cpufreq_global_kobject_usage;
918
919int cpufreq_get_global_kobject(void)
920{
921 if (!cpufreq_global_kobject_usage++)
922 return kobject_add(cpufreq_global_kobject,
923 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
924
925 return 0;
926}
927EXPORT_SYMBOL(cpufreq_get_global_kobject);
928
929void cpufreq_put_global_kobject(void)
930{
931 if (!--cpufreq_global_kobject_usage)
932 kobject_del(cpufreq_global_kobject);
933}
934EXPORT_SYMBOL(cpufreq_put_global_kobject);
935
936int cpufreq_sysfs_create_file(const struct attribute *attr)
937{
938 int ret = cpufreq_get_global_kobject();
939
940 if (!ret) {
941 ret = sysfs_create_file(cpufreq_global_kobject, attr);
942 if (ret)
943 cpufreq_put_global_kobject();
944 }
945
946 return ret;
947}
948EXPORT_SYMBOL(cpufreq_sysfs_create_file);
949
950void cpufreq_sysfs_remove_file(const struct attribute *attr)
951{
952 sysfs_remove_file(cpufreq_global_kobject, attr);
953 cpufreq_put_global_kobject();
954}
955EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
956
19d6f7ec 957/* symlink affected CPUs */
308b60e7 958static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
959{
960 unsigned int j;
961 int ret = 0;
962
963 for_each_cpu(j, policy->cpus) {
8a25a2fd 964 struct device *cpu_dev;
19d6f7ec 965
308b60e7 966 if (j == policy->cpu)
19d6f7ec 967 continue;
19d6f7ec 968
e8fdde10 969 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
970 cpu_dev = get_cpu_device(j);
971 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 972 "cpufreq");
71c3461e
RW
973 if (ret)
974 break;
19d6f7ec
DJ
975 }
976 return ret;
977}
978
308b60e7 979static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 980 struct device *dev)
909a694e
DJ
981{
982 struct freq_attr **drv_attr;
909a694e 983 int ret = 0;
909a694e 984
909a694e 985 /* set up files for this cpu device */
1c3d85dd 986 drv_attr = cpufreq_driver->attr;
f13f1184 987 while (drv_attr && *drv_attr) {
909a694e
DJ
988 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
989 if (ret)
6d4e81ed 990 return ret;
909a694e
DJ
991 drv_attr++;
992 }
1c3d85dd 993 if (cpufreq_driver->get) {
909a694e
DJ
994 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
995 if (ret)
6d4e81ed 996 return ret;
909a694e 997 }
c034b02e
DB
998
999 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1000 if (ret)
6d4e81ed 1001 return ret;
c034b02e 1002
1c3d85dd 1003 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
1004 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1005 if (ret)
6d4e81ed 1006 return ret;
e2f74f35 1007 }
909a694e 1008
6d4e81ed 1009 return cpufreq_add_dev_symlink(policy);
e18f1682
SB
1010}
1011
1012static void cpufreq_init_policy(struct cpufreq_policy *policy)
1013{
6e2c89d1 1014 struct cpufreq_governor *gov = NULL;
e18f1682
SB
1015 struct cpufreq_policy new_policy;
1016 int ret = 0;
1017
d5b73cd8 1018 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 1019
6e2c89d1 1020 /* Update governor of new_policy to the governor used before hotplug */
42f91fa1 1021 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
6e2c89d1 1022 if (gov)
1023 pr_debug("Restoring governor %s for cpu %d\n",
1024 policy->governor->name, policy->cpu);
1025 else
1026 gov = CPUFREQ_DEFAULT_GOVERNOR;
1027
1028 new_policy.governor = gov;
1029
a27a9ab7
JB
1030 /* Use the default policy if its valid. */
1031 if (cpufreq_driver->setpolicy)
6e2c89d1 1032 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
1033
1034 /* set default policy */
037ce839 1035 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 1036 if (ret) {
2d06d8c4 1037 pr_debug("setting policy failed\n");
1c3d85dd
RW
1038 if (cpufreq_driver->exit)
1039 cpufreq_driver->exit(policy);
ecf7e461 1040 }
909a694e
DJ
1041}
1042
d8d3b471 1043static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1044 unsigned int cpu, struct device *dev)
fcf80582 1045{
9c0ebcf7 1046 int ret = 0;
fcf80582 1047
bb29ae15
VK
1048 /* Has this CPU been taken care of already? */
1049 if (cpumask_test_cpu(cpu, policy->cpus))
1050 return 0;
1051
9c0ebcf7 1052 if (has_target()) {
3de9bdeb
VK
1053 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1054 if (ret) {
1055 pr_err("%s: Failed to stop governor\n", __func__);
1056 return ret;
1057 }
1058 }
fcf80582 1059
ad7722da 1060 down_write(&policy->rwsem);
fcf80582 1061 cpumask_set_cpu(cpu, policy->cpus);
ad7722da 1062 up_write(&policy->rwsem);
2eaa3e2d 1063
9c0ebcf7 1064 if (has_target()) {
e5c87b76
SK
1065 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1066 if (!ret)
1067 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1068
1069 if (ret) {
3de9bdeb
VK
1070 pr_err("%s: Failed to start governor\n", __func__);
1071 return ret;
1072 }
820c6ca2 1073 }
fcf80582 1074
42f921a6 1075 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582 1076}
1da177e4 1077
8414809c
SB
1078static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1079{
1080 struct cpufreq_policy *policy;
1081 unsigned long flags;
1082
44871c9c 1083 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
1084
1085 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1086
44871c9c 1087 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 1088
09712f55
GU
1089 if (policy)
1090 policy->governor = NULL;
6e2c89d1 1091
8414809c
SB
1092 return policy;
1093}
1094
e9698cc5
SB
1095static struct cpufreq_policy *cpufreq_policy_alloc(void)
1096{
1097 struct cpufreq_policy *policy;
1098
1099 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1100 if (!policy)
1101 return NULL;
1102
1103 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1104 goto err_free_policy;
1105
1106 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1107 goto err_free_cpumask;
1108
c88a1f8b 1109 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 1110 init_rwsem(&policy->rwsem);
12478cf0
SB
1111 spin_lock_init(&policy->transition_lock);
1112 init_waitqueue_head(&policy->transition_wait);
818c5712
VK
1113 init_completion(&policy->kobj_unregister);
1114 INIT_WORK(&policy->update, handle_update);
ad7722da 1115
e9698cc5
SB
1116 return policy;
1117
1118err_free_cpumask:
1119 free_cpumask_var(policy->cpus);
1120err_free_policy:
1121 kfree(policy);
1122
1123 return NULL;
1124}
1125
42f921a6
VK
1126static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1127{
1128 struct kobject *kobj;
1129 struct completion *cmp;
1130
fcd7af91
VK
1131 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1132 CPUFREQ_REMOVE_POLICY, policy);
1133
42f921a6
VK
1134 down_read(&policy->rwsem);
1135 kobj = &policy->kobj;
1136 cmp = &policy->kobj_unregister;
1137 up_read(&policy->rwsem);
1138 kobject_put(kobj);
1139
1140 /*
1141 * We need to make sure that the underlying kobj is
1142 * actually not referenced anymore by anybody before we
1143 * proceed with unloading.
1144 */
1145 pr_debug("waiting for dropping of refcount\n");
1146 wait_for_completion(cmp);
1147 pr_debug("wait complete\n");
1148}
1149
e9698cc5
SB
1150static void cpufreq_policy_free(struct cpufreq_policy *policy)
1151{
988bed09
VK
1152 unsigned long flags;
1153 int cpu;
1154
1155 /* Remove policy from list */
1156 write_lock_irqsave(&cpufreq_driver_lock, flags);
1157 list_del(&policy->policy_list);
1158
1159 for_each_cpu(cpu, policy->related_cpus)
1160 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1161 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1162
e9698cc5
SB
1163 free_cpumask_var(policy->related_cpus);
1164 free_cpumask_var(policy->cpus);
1165 kfree(policy);
1166}
1167
1bfb425b
VK
1168static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1169 struct device *cpu_dev)
0d66b91e 1170{
1bfb425b
VK
1171 int ret;
1172
99ec899e 1173 if (WARN_ON(cpu == policy->cpu))
1bfb425b
VK
1174 return 0;
1175
1176 /* Move kobject to the new policy->cpu */
1177 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1178 if (ret) {
1179 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1180 return ret;
1181 }
cb38ed5c 1182
ad7722da 1183 down_write(&policy->rwsem);
0d66b91e 1184 policy->cpu = cpu;
ad7722da 1185 up_write(&policy->rwsem);
8efd5765 1186
1bfb425b 1187 return 0;
0d66b91e
SB
1188}
1189
23faf0b7
VK
1190/**
1191 * cpufreq_add_dev - add a CPU device
1192 *
1193 * Adds the cpufreq interface for a CPU device.
1194 *
1195 * The Oracle says: try running cpufreq registration/unregistration concurrently
1196 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1197 * mess up, but more thorough testing is needed. - Mathieu
1198 */
1199static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1200{
fcf80582 1201 unsigned int j, cpu = dev->id;
65922465 1202 int ret = -ENOMEM;
7f0c020a 1203 struct cpufreq_policy *policy;
1da177e4 1204 unsigned long flags;
96bbbe4a 1205 bool recover_policy = cpufreq_suspended;
1da177e4 1206
c32b6b8e
AR
1207 if (cpu_is_offline(cpu))
1208 return 0;
1209
2d06d8c4 1210 pr_debug("adding CPU %u\n", cpu);
1da177e4 1211
6eed9404
VK
1212 if (!down_read_trylock(&cpufreq_rwsem))
1213 return 0;
1214
bb29ae15 1215 /* Check if this CPU already has a policy to manage it */
0d1857a1 1216 read_lock_irqsave(&cpufreq_driver_lock, flags);
f963735a 1217 for_each_active_policy(policy) {
7f0c020a 1218 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
0d1857a1 1219 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7f0c020a 1220 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
6eed9404
VK
1221 up_read(&cpufreq_rwsem);
1222 return ret;
2eaa3e2d 1223 }
fcf80582 1224 }
0d1857a1 1225 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1226
72368d12
RW
1227 /*
1228 * Restore the saved policy when doing light-weight init and fall back
1229 * to the full init if that fails.
1230 */
96bbbe4a 1231 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1232 if (!policy) {
96bbbe4a 1233 recover_policy = false;
8414809c 1234 policy = cpufreq_policy_alloc();
72368d12
RW
1235 if (!policy)
1236 goto nomem_out;
1237 }
0d66b91e
SB
1238
1239 /*
1240 * In the resume path, since we restore a saved policy, the assignment
1241 * to policy->cpu is like an update of the existing policy, rather than
1242 * the creation of a brand new one. So we need to perform this update
1243 * by invoking update_policy_cpu().
1244 */
1bfb425b
VK
1245 if (recover_policy && cpu != policy->cpu)
1246 WARN_ON(update_policy_cpu(policy, cpu, dev));
1247 else
0d66b91e
SB
1248 policy->cpu = cpu;
1249
835481d9 1250 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1251
1da177e4
LT
1252 /* call driver. From then on the cpufreq must be able
1253 * to accept all calls to ->verify and ->setpolicy for this CPU
1254 */
1c3d85dd 1255 ret = cpufreq_driver->init(policy);
1da177e4 1256 if (ret) {
2d06d8c4 1257 pr_debug("initialization failed\n");
2eaa3e2d 1258 goto err_set_policy_cpu;
1da177e4 1259 }
643ae6e8 1260
6d4e81ed
TV
1261 down_write(&policy->rwsem);
1262
5a7e56a5
VK
1263 /* related cpus should atleast have policy->cpus */
1264 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1265
1266 /*
1267 * affected cpus must always be the one, which are online. We aren't
1268 * managing offline cpus here.
1269 */
1270 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1271
96bbbe4a 1272 if (!recover_policy) {
5a7e56a5
VK
1273 policy->user_policy.min = policy->min;
1274 policy->user_policy.max = policy->max;
6d4e81ed
TV
1275
1276 /* prepare interface data */
1277 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1278 &dev->kobj, "cpufreq");
1279 if (ret) {
1280 pr_err("%s: failed to init policy->kobj: %d\n",
1281 __func__, ret);
1282 goto err_init_policy_kobj;
1283 }
5a7e56a5 1284
988bed09
VK
1285 write_lock_irqsave(&cpufreq_driver_lock, flags);
1286 for_each_cpu(j, policy->related_cpus)
1287 per_cpu(cpufreq_cpu_data, j) = policy;
1288 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1289 }
652ed95d 1290
2ed99e39 1291 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1292 policy->cur = cpufreq_driver->get(policy->cpu);
1293 if (!policy->cur) {
1294 pr_err("%s: ->get() failed\n", __func__);
1295 goto err_get_freq;
1296 }
1297 }
1298
d3916691
VK
1299 /*
1300 * Sometimes boot loaders set CPU frequency to a value outside of
1301 * frequency table present with cpufreq core. In such cases CPU might be
1302 * unstable if it has to run on that frequency for long duration of time
1303 * and so its better to set it to a frequency which is specified in
1304 * freq-table. This also makes cpufreq stats inconsistent as
1305 * cpufreq-stats would fail to register because current frequency of CPU
1306 * isn't found in freq-table.
1307 *
1308 * Because we don't want this change to effect boot process badly, we go
1309 * for the next freq which is >= policy->cur ('cur' must be set by now,
1310 * otherwise we will end up setting freq to lowest of the table as 'cur'
1311 * is initialized to zero).
1312 *
1313 * We are passing target-freq as "policy->cur - 1" otherwise
1314 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1315 * equal to target-freq.
1316 */
1317 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1318 && has_target()) {
1319 /* Are we running at unknown frequency ? */
1320 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1321 if (ret == -EINVAL) {
1322 /* Warn user and fix it */
1323 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1324 __func__, policy->cpu, policy->cur);
1325 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1326 CPUFREQ_RELATION_L);
1327
1328 /*
1329 * Reaching here after boot in a few seconds may not
1330 * mean that system will remain stable at "unknown"
1331 * frequency for longer duration. Hence, a BUG_ON().
1332 */
1333 BUG_ON(ret);
1334 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1335 __func__, policy->cpu, policy->cur);
1336 }
1337 }
1338
a1531acd
TR
1339 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1340 CPUFREQ_START, policy);
1341
96bbbe4a 1342 if (!recover_policy) {
308b60e7 1343 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1344 if (ret)
1345 goto err_out_unregister;
fcd7af91
VK
1346 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1347 CPUFREQ_CREATE_POLICY, policy);
8ff69732 1348
988bed09
VK
1349 write_lock_irqsave(&cpufreq_driver_lock, flags);
1350 list_add(&policy->policy_list, &cpufreq_policy_list);
1351 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1352 }
9515f4d6 1353
e18f1682
SB
1354 cpufreq_init_policy(policy);
1355
96bbbe4a 1356 if (!recover_policy) {
08fd8c1c
VK
1357 policy->user_policy.policy = policy->policy;
1358 policy->user_policy.governor = policy->governor;
1359 }
4e97b631 1360 up_write(&policy->rwsem);
08fd8c1c 1361
038c5b3e 1362 kobject_uevent(&policy->kobj, KOBJ_ADD);
7c45cf31 1363
6eed9404
VK
1364 up_read(&cpufreq_rwsem);
1365
7c45cf31
VK
1366 /* Callback for handling stuff after policy is ready */
1367 if (cpufreq_driver->ready)
1368 cpufreq_driver->ready(policy);
1369
2d06d8c4 1370 pr_debug("initialization complete\n");
87c32271 1371
1da177e4
LT
1372 return 0;
1373
1da177e4 1374err_out_unregister:
652ed95d 1375err_get_freq:
6d4e81ed
TV
1376 if (!recover_policy) {
1377 kobject_put(&policy->kobj);
1378 wait_for_completion(&policy->kobj_unregister);
1379 }
1380err_init_policy_kobj:
7106e02b
PB
1381 up_write(&policy->rwsem);
1382
da60ce9f
VK
1383 if (cpufreq_driver->exit)
1384 cpufreq_driver->exit(policy);
2eaa3e2d 1385err_set_policy_cpu:
96bbbe4a 1386 if (recover_policy) {
72368d12
RW
1387 /* Do not leave stale fallback data behind. */
1388 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1389 cpufreq_policy_put_kobj(policy);
72368d12 1390 }
e9698cc5 1391 cpufreq_policy_free(policy);
42f921a6 1392
1da177e4 1393nomem_out:
6eed9404
VK
1394 up_read(&cpufreq_rwsem);
1395
1da177e4
LT
1396 return ret;
1397}
1398
cedb70af 1399static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1400 struct subsys_interface *sif)
1da177e4 1401{
f9ba680d 1402 unsigned int cpu = dev->id, cpus;
1bfb425b 1403 int ret;
1da177e4 1404 unsigned long flags;
3a3e9e06 1405 struct cpufreq_policy *policy;
1da177e4 1406
b8eed8af 1407 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1408
0d1857a1 1409 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1410
988bed09 1411 policy = cpufreq_cpu_get_raw(cpu);
2eaa3e2d 1412
8414809c 1413 /* Save the policy somewhere when doing a light-weight tear-down */
96bbbe4a 1414 if (cpufreq_suspended)
3a3e9e06 1415 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1416
0d1857a1 1417 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1418
3a3e9e06 1419 if (!policy) {
b8eed8af 1420 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1421 return -EINVAL;
1422 }
1da177e4 1423
9c0ebcf7 1424 if (has_target()) {
3de9bdeb
VK
1425 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1426 if (ret) {
1427 pr_err("%s: Failed to stop governor\n", __func__);
1428 return ret;
1429 }
1da177e4 1430
fa69e33f 1431 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1432 policy->governor->name, CPUFREQ_NAME_LEN);
db5f2995 1433 }
1da177e4 1434
ad7722da 1435 down_read(&policy->rwsem);
3a3e9e06 1436 cpus = cpumask_weight(policy->cpus);
ad7722da 1437 up_read(&policy->rwsem);
084f3493 1438
61173f25 1439 if (cpu != policy->cpu) {
6964d91d 1440 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1441 } else if (cpus > 1) {
1bfb425b
VK
1442 /* Nominate new CPU */
1443 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1444 struct device *cpu_dev = get_cpu_device(new_cpu);
a82fab29 1445
1bfb425b
VK
1446 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1447 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1448 if (ret) {
1449 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1450 "cpufreq"))
1451 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1452 __func__, cpu_dev->id);
1453 return ret;
1da177e4 1454 }
1bfb425b
VK
1455
1456 if (!cpufreq_suspended)
1457 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1458 __func__, new_cpu, cpu);
789ca243 1459 } else if (cpufreq_driver->stop_cpu) {
367dc4aa 1460 cpufreq_driver->stop_cpu(policy);
1da177e4 1461 }
1da177e4 1462
cedb70af
SB
1463 return 0;
1464}
1465
1466static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1467 struct subsys_interface *sif)
cedb70af 1468{
988bed09 1469 unsigned int cpu = dev->id;
cedb70af 1470 int ret;
988bed09 1471 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cedb70af
SB
1472
1473 if (!policy) {
1474 pr_debug("%s: No cpu_data found\n", __func__);
1475 return -EINVAL;
1476 }
1477
ad7722da 1478 down_write(&policy->rwsem);
303ae723 1479 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1480 up_write(&policy->rwsem);
cedb70af 1481
b8eed8af 1482 /* If cpu is last user of policy, free policy */
988bed09 1483 if (policy_is_inactive(policy)) {
9c0ebcf7 1484 if (has_target()) {
3de9bdeb
VK
1485 ret = __cpufreq_governor(policy,
1486 CPUFREQ_GOV_POLICY_EXIT);
1487 if (ret) {
1488 pr_err("%s: Failed to exit governor\n",
e837f9b5 1489 __func__);
3de9bdeb
VK
1490 return ret;
1491 }
edab2fbc 1492 }
2a998599 1493
96bbbe4a 1494 if (!cpufreq_suspended)
42f921a6 1495 cpufreq_policy_put_kobj(policy);
7d26e2d5 1496
8414809c
SB
1497 /*
1498 * Perform the ->exit() even during light-weight tear-down,
1499 * since this is a core component, and is essential for the
1500 * subsequent light-weight ->init() to succeed.
b8eed8af 1501 */
1c3d85dd 1502 if (cpufreq_driver->exit)
3a3e9e06 1503 cpufreq_driver->exit(policy);
27ecddc2 1504
96bbbe4a 1505 if (!cpufreq_suspended)
3a3e9e06 1506 cpufreq_policy_free(policy);
e5c87b76
SK
1507 } else if (has_target()) {
1508 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1509 if (!ret)
1510 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1511
1512 if (ret) {
1513 pr_err("%s: Failed to start governor\n", __func__);
1514 return ret;
2a998599 1515 }
27ecddc2 1516 }
1da177e4 1517
1da177e4
LT
1518 return 0;
1519}
1520
cedb70af 1521/**
27a862e9 1522 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1523 *
1524 * Removes the cpufreq interface for a CPU device.
cedb70af 1525 */
8a25a2fd 1526static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1527{
8a25a2fd 1528 unsigned int cpu = dev->id;
27a862e9 1529 int ret;
ec28297a
VP
1530
1531 if (cpu_is_offline(cpu))
1532 return 0;
1533
96bbbe4a 1534 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1535
1536 if (!ret)
96bbbe4a 1537 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1538
1539 return ret;
5a01f2e8
VP
1540}
1541
65f27f38 1542static void handle_update(struct work_struct *work)
1da177e4 1543{
65f27f38
DH
1544 struct cpufreq_policy *policy =
1545 container_of(work, struct cpufreq_policy, update);
1546 unsigned int cpu = policy->cpu;
2d06d8c4 1547 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1548 cpufreq_update_policy(cpu);
1549}
1550
1551/**
bb176f7d
VK
1552 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1553 * in deep trouble.
a1e1dc41 1554 * @policy: policy managing CPUs
1da177e4
LT
1555 * @new_freq: CPU frequency the CPU actually runs at
1556 *
29464f28
DJ
1557 * We adjust to current frequency first, and need to clean up later.
1558 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1559 */
a1e1dc41 1560static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
e08f5f5b 1561 unsigned int new_freq)
1da177e4
LT
1562{
1563 struct cpufreq_freqs freqs;
b43a7ffb 1564
e837f9b5 1565 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
a1e1dc41 1566 policy->cur, new_freq);
1da177e4 1567
a1e1dc41 1568 freqs.old = policy->cur;
1da177e4 1569 freqs.new = new_freq;
b43a7ffb 1570
8fec051e
VK
1571 cpufreq_freq_transition_begin(policy, &freqs);
1572 cpufreq_freq_transition_end(policy, &freqs, 0);
1da177e4
LT
1573}
1574
32ee8c3e 1575/**
4ab70df4 1576 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1577 * @cpu: CPU number
1578 *
1579 * This is the last known freq, without actually getting it from the driver.
1580 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1581 */
1582unsigned int cpufreq_quick_get(unsigned int cpu)
1583{
9e21ba8b 1584 struct cpufreq_policy *policy;
e08f5f5b 1585 unsigned int ret_freq = 0;
95235ca2 1586
1c3d85dd
RW
1587 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1588 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1589
1590 policy = cpufreq_cpu_get(cpu);
95235ca2 1591 if (policy) {
e08f5f5b 1592 ret_freq = policy->cur;
95235ca2
VP
1593 cpufreq_cpu_put(policy);
1594 }
1595
4d34a67d 1596 return ret_freq;
95235ca2
VP
1597}
1598EXPORT_SYMBOL(cpufreq_quick_get);
1599
3d737108
JB
1600/**
1601 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1602 * @cpu: CPU number
1603 *
1604 * Just return the max possible frequency for a given CPU.
1605 */
1606unsigned int cpufreq_quick_get_max(unsigned int cpu)
1607{
1608 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1609 unsigned int ret_freq = 0;
1610
1611 if (policy) {
1612 ret_freq = policy->max;
1613 cpufreq_cpu_put(policy);
1614 }
1615
1616 return ret_freq;
1617}
1618EXPORT_SYMBOL(cpufreq_quick_get_max);
1619
d92d50a4 1620static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1da177e4 1621{
e08f5f5b 1622 unsigned int ret_freq = 0;
5800043b 1623
1c3d85dd 1624 if (!cpufreq_driver->get)
4d34a67d 1625 return ret_freq;
1da177e4 1626
d92d50a4 1627 ret_freq = cpufreq_driver->get(policy->cpu);
1da177e4 1628
e08f5f5b 1629 if (ret_freq && policy->cur &&
1c3d85dd 1630 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1631 /* verify no discrepancy between actual and
1632 saved value exists */
1633 if (unlikely(ret_freq != policy->cur)) {
a1e1dc41 1634 cpufreq_out_of_sync(policy, ret_freq);
1da177e4
LT
1635 schedule_work(&policy->update);
1636 }
1637 }
1638
4d34a67d 1639 return ret_freq;
5a01f2e8 1640}
1da177e4 1641
5a01f2e8
VP
1642/**
1643 * cpufreq_get - get the current CPU frequency (in kHz)
1644 * @cpu: CPU number
1645 *
1646 * Get the CPU current (static) CPU frequency
1647 */
1648unsigned int cpufreq_get(unsigned int cpu)
1649{
999976e0 1650 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1651 unsigned int ret_freq = 0;
5a01f2e8 1652
999976e0
AP
1653 if (policy) {
1654 down_read(&policy->rwsem);
d92d50a4 1655 ret_freq = __cpufreq_get(policy);
999976e0 1656 up_read(&policy->rwsem);
5a01f2e8 1657
999976e0
AP
1658 cpufreq_cpu_put(policy);
1659 }
6eed9404 1660
4d34a67d 1661 return ret_freq;
1da177e4
LT
1662}
1663EXPORT_SYMBOL(cpufreq_get);
1664
8a25a2fd
KS
1665static struct subsys_interface cpufreq_interface = {
1666 .name = "cpufreq",
1667 .subsys = &cpu_subsys,
1668 .add_dev = cpufreq_add_dev,
1669 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1670};
1671
e28867ea
VK
1672/*
1673 * In case platform wants some specific frequency to be configured
1674 * during suspend..
1675 */
1676int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1677{
1678 int ret;
1679
1680 if (!policy->suspend_freq) {
1681 pr_err("%s: suspend_freq can't be zero\n", __func__);
1682 return -EINVAL;
1683 }
1684
1685 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1686 policy->suspend_freq);
1687
1688 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1689 CPUFREQ_RELATION_H);
1690 if (ret)
1691 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1692 __func__, policy->suspend_freq, ret);
1693
1694 return ret;
1695}
1696EXPORT_SYMBOL(cpufreq_generic_suspend);
1697
42d4dc3f 1698/**
2f0aea93 1699 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1700 *
2f0aea93
VK
1701 * Called during system wide Suspend/Hibernate cycles for suspending governors
1702 * as some platforms can't change frequency after this point in suspend cycle.
1703 * Because some of the devices (like: i2c, regulators, etc) they use for
1704 * changing frequency are suspended quickly after this point.
42d4dc3f 1705 */
2f0aea93 1706void cpufreq_suspend(void)
42d4dc3f 1707{
3a3e9e06 1708 struct cpufreq_policy *policy;
42d4dc3f 1709
2f0aea93
VK
1710 if (!cpufreq_driver)
1711 return;
42d4dc3f 1712
2f0aea93 1713 if (!has_target())
b1b12bab 1714 goto suspend;
42d4dc3f 1715
2f0aea93
VK
1716 pr_debug("%s: Suspending Governors\n", __func__);
1717
f963735a 1718 for_each_active_policy(policy) {
2f0aea93
VK
1719 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1720 pr_err("%s: Failed to stop governor for policy: %p\n",
1721 __func__, policy);
1722 else if (cpufreq_driver->suspend
1723 && cpufreq_driver->suspend(policy))
1724 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1725 policy);
42d4dc3f 1726 }
b1b12bab
VK
1727
1728suspend:
1729 cpufreq_suspended = true;
42d4dc3f
BH
1730}
1731
1da177e4 1732/**
2f0aea93 1733 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1734 *
2f0aea93
VK
1735 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1736 * are suspended with cpufreq_suspend().
1da177e4 1737 */
2f0aea93 1738void cpufreq_resume(void)
1da177e4 1739{
3a3e9e06 1740 struct cpufreq_policy *policy;
1da177e4 1741
2f0aea93
VK
1742 if (!cpufreq_driver)
1743 return;
1da177e4 1744
8e30444e
LT
1745 cpufreq_suspended = false;
1746
2f0aea93 1747 if (!has_target())
e00e56df 1748 return;
1da177e4 1749
2f0aea93 1750 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1751
f963735a 1752 for_each_active_policy(policy) {
0c5aa405
VK
1753 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1754 pr_err("%s: Failed to resume driver: %p\n", __func__,
1755 policy);
1756 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
2f0aea93
VK
1757 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1758 pr_err("%s: Failed to start governor for policy: %p\n",
1759 __func__, policy);
2f0aea93 1760 }
c75de0ac
VK
1761
1762 /*
1763 * schedule call cpufreq_update_policy() for first-online CPU, as that
1764 * wouldn't be hotplugged-out on suspend. It will verify that the
1765 * current freq is in sync with what we believe it to be.
1766 */
1767 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1768 if (WARN_ON(!policy))
1769 return;
1770
1771 schedule_work(&policy->update);
2f0aea93 1772}
1da177e4 1773
9d95046e
BP
1774/**
1775 * cpufreq_get_current_driver - return current driver's name
1776 *
1777 * Return the name string of the currently loaded cpufreq driver
1778 * or NULL, if none.
1779 */
1780const char *cpufreq_get_current_driver(void)
1781{
1c3d85dd
RW
1782 if (cpufreq_driver)
1783 return cpufreq_driver->name;
1784
1785 return NULL;
9d95046e
BP
1786}
1787EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4 1788
51315cdf
TP
1789/**
1790 * cpufreq_get_driver_data - return current driver data
1791 *
1792 * Return the private data of the currently loaded cpufreq
1793 * driver, or NULL if no cpufreq driver is loaded.
1794 */
1795void *cpufreq_get_driver_data(void)
1796{
1797 if (cpufreq_driver)
1798 return cpufreq_driver->driver_data;
1799
1800 return NULL;
1801}
1802EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1803
1da177e4
LT
1804/*********************************************************************
1805 * NOTIFIER LISTS INTERFACE *
1806 *********************************************************************/
1807
1808/**
1809 * cpufreq_register_notifier - register a driver with cpufreq
1810 * @nb: notifier function to register
1811 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1812 *
32ee8c3e 1813 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1814 * are notified about clock rate changes (once before and once after
1815 * the transition), or a list of drivers that are notified about
1816 * changes in cpufreq policy.
1817 *
1818 * This function may sleep, and has the same return conditions as
e041c683 1819 * blocking_notifier_chain_register.
1da177e4
LT
1820 */
1821int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1822{
1823 int ret;
1824
d5aaffa9
DB
1825 if (cpufreq_disabled())
1826 return -EINVAL;
1827
74212ca4
CEB
1828 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1829
1da177e4
LT
1830 switch (list) {
1831 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1832 ret = srcu_notifier_chain_register(
e041c683 1833 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1834 break;
1835 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1836 ret = blocking_notifier_chain_register(
1837 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1838 break;
1839 default:
1840 ret = -EINVAL;
1841 }
1da177e4
LT
1842
1843 return ret;
1844}
1845EXPORT_SYMBOL(cpufreq_register_notifier);
1846
1da177e4
LT
1847/**
1848 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1849 * @nb: notifier block to be unregistered
bb176f7d 1850 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1851 *
1852 * Remove a driver from the CPU frequency notifier list.
1853 *
1854 * This function may sleep, and has the same return conditions as
e041c683 1855 * blocking_notifier_chain_unregister.
1da177e4
LT
1856 */
1857int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1858{
1859 int ret;
1860
d5aaffa9
DB
1861 if (cpufreq_disabled())
1862 return -EINVAL;
1863
1da177e4
LT
1864 switch (list) {
1865 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1866 ret = srcu_notifier_chain_unregister(
e041c683 1867 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1868 break;
1869 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1870 ret = blocking_notifier_chain_unregister(
1871 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1872 break;
1873 default:
1874 ret = -EINVAL;
1875 }
1da177e4
LT
1876
1877 return ret;
1878}
1879EXPORT_SYMBOL(cpufreq_unregister_notifier);
1880
1881
1882/*********************************************************************
1883 * GOVERNORS *
1884 *********************************************************************/
1885
1c03a2d0
VK
1886/* Must set freqs->new to intermediate frequency */
1887static int __target_intermediate(struct cpufreq_policy *policy,
1888 struct cpufreq_freqs *freqs, int index)
1889{
1890 int ret;
1891
1892 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1893
1894 /* We don't need to switch to intermediate freq */
1895 if (!freqs->new)
1896 return 0;
1897
1898 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1899 __func__, policy->cpu, freqs->old, freqs->new);
1900
1901 cpufreq_freq_transition_begin(policy, freqs);
1902 ret = cpufreq_driver->target_intermediate(policy, index);
1903 cpufreq_freq_transition_end(policy, freqs, ret);
1904
1905 if (ret)
1906 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1907 __func__, ret);
1908
1909 return ret;
1910}
1911
8d65775d
VK
1912static int __target_index(struct cpufreq_policy *policy,
1913 struct cpufreq_frequency_table *freq_table, int index)
1914{
1c03a2d0
VK
1915 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1916 unsigned int intermediate_freq = 0;
8d65775d
VK
1917 int retval = -EINVAL;
1918 bool notify;
1919
1920 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
8d65775d 1921 if (notify) {
1c03a2d0
VK
1922 /* Handle switching to intermediate frequency */
1923 if (cpufreq_driver->get_intermediate) {
1924 retval = __target_intermediate(policy, &freqs, index);
1925 if (retval)
1926 return retval;
1927
1928 intermediate_freq = freqs.new;
1929 /* Set old freq to intermediate */
1930 if (intermediate_freq)
1931 freqs.old = freqs.new;
1932 }
8d65775d 1933
1c03a2d0 1934 freqs.new = freq_table[index].frequency;
8d65775d
VK
1935 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1936 __func__, policy->cpu, freqs.old, freqs.new);
1937
1938 cpufreq_freq_transition_begin(policy, &freqs);
1939 }
1940
1941 retval = cpufreq_driver->target_index(policy, index);
1942 if (retval)
1943 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1944 retval);
1945
1c03a2d0 1946 if (notify) {
8d65775d
VK
1947 cpufreq_freq_transition_end(policy, &freqs, retval);
1948
1c03a2d0
VK
1949 /*
1950 * Failed after setting to intermediate freq? Driver should have
1951 * reverted back to initial frequency and so should we. Check
1952 * here for intermediate_freq instead of get_intermediate, in
1953 * case we have't switched to intermediate freq at all.
1954 */
1955 if (unlikely(retval && intermediate_freq)) {
1956 freqs.old = intermediate_freq;
1957 freqs.new = policy->restore_freq;
1958 cpufreq_freq_transition_begin(policy, &freqs);
1959 cpufreq_freq_transition_end(policy, &freqs, 0);
1960 }
1961 }
1962
8d65775d
VK
1963 return retval;
1964}
1965
1da177e4
LT
1966int __cpufreq_driver_target(struct cpufreq_policy *policy,
1967 unsigned int target_freq,
1968 unsigned int relation)
1969{
7249924e 1970 unsigned int old_target_freq = target_freq;
8d65775d 1971 int retval = -EINVAL;
c32b6b8e 1972
a7b422cd
KRW
1973 if (cpufreq_disabled())
1974 return -ENODEV;
1975
7249924e
VK
1976 /* Make sure that target_freq is within supported range */
1977 if (target_freq > policy->max)
1978 target_freq = policy->max;
1979 if (target_freq < policy->min)
1980 target_freq = policy->min;
1981
1982 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1983 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1984
9c0ebcf7
VK
1985 /*
1986 * This might look like a redundant call as we are checking it again
1987 * after finding index. But it is left intentionally for cases where
1988 * exactly same freq is called again and so we can save on few function
1989 * calls.
1990 */
5a1c0228
VK
1991 if (target_freq == policy->cur)
1992 return 0;
1993
1c03a2d0
VK
1994 /* Save last value to restore later on errors */
1995 policy->restore_freq = policy->cur;
1996
1c3d85dd
RW
1997 if (cpufreq_driver->target)
1998 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1999 else if (cpufreq_driver->target_index) {
2000 struct cpufreq_frequency_table *freq_table;
2001 int index;
90d45d17 2002
9c0ebcf7
VK
2003 freq_table = cpufreq_frequency_get_table(policy->cpu);
2004 if (unlikely(!freq_table)) {
2005 pr_err("%s: Unable to find freq_table\n", __func__);
2006 goto out;
2007 }
2008
2009 retval = cpufreq_frequency_table_target(policy, freq_table,
2010 target_freq, relation, &index);
2011 if (unlikely(retval)) {
2012 pr_err("%s: Unable to find matching freq\n", __func__);
2013 goto out;
2014 }
2015
d4019f0a 2016 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 2017 retval = 0;
d4019f0a
VK
2018 goto out;
2019 }
2020
8d65775d 2021 retval = __target_index(policy, freq_table, index);
9c0ebcf7
VK
2022 }
2023
2024out:
1da177e4
LT
2025 return retval;
2026}
2027EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2028
1da177e4
LT
2029int cpufreq_driver_target(struct cpufreq_policy *policy,
2030 unsigned int target_freq,
2031 unsigned int relation)
2032{
f1829e4a 2033 int ret = -EINVAL;
1da177e4 2034
ad7722da 2035 down_write(&policy->rwsem);
1da177e4
LT
2036
2037 ret = __cpufreq_driver_target(policy, target_freq, relation);
2038
ad7722da 2039 up_write(&policy->rwsem);
1da177e4 2040
1da177e4
LT
2041 return ret;
2042}
2043EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2044
e08f5f5b
GS
2045static int __cpufreq_governor(struct cpufreq_policy *policy,
2046 unsigned int event)
1da177e4 2047{
cc993cab 2048 int ret;
6afde10c
TR
2049
2050 /* Only must be defined when default governor is known to have latency
2051 restrictions, like e.g. conservative or ondemand.
2052 That this is the case is already ensured in Kconfig
2053 */
2054#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2055 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2056#else
2057 struct cpufreq_governor *gov = NULL;
2058#endif
1c256245 2059
2f0aea93
VK
2060 /* Don't start any governor operations if we are entering suspend */
2061 if (cpufreq_suspended)
2062 return 0;
cb57720b
EZ
2063 /*
2064 * Governor might not be initiated here if ACPI _PPC changed
2065 * notification happened, so check it.
2066 */
2067 if (!policy->governor)
2068 return -EINVAL;
2f0aea93 2069
1c256245
TR
2070 if (policy->governor->max_transition_latency &&
2071 policy->cpuinfo.transition_latency >
2072 policy->governor->max_transition_latency) {
6afde10c
TR
2073 if (!gov)
2074 return -EINVAL;
2075 else {
e837f9b5
JP
2076 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2077 policy->governor->name, gov->name);
6afde10c
TR
2078 policy->governor = gov;
2079 }
1c256245 2080 }
1da177e4 2081
fe492f3f
VK
2082 if (event == CPUFREQ_GOV_POLICY_INIT)
2083 if (!try_module_get(policy->governor->owner))
2084 return -EINVAL;
1da177e4 2085
2d06d8c4 2086 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 2087 policy->cpu, event);
95731ebb
XC
2088
2089 mutex_lock(&cpufreq_governor_lock);
56d07db2 2090 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
2091 || (!policy->governor_enabled
2092 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
2093 mutex_unlock(&cpufreq_governor_lock);
2094 return -EBUSY;
2095 }
2096
2097 if (event == CPUFREQ_GOV_STOP)
2098 policy->governor_enabled = false;
2099 else if (event == CPUFREQ_GOV_START)
2100 policy->governor_enabled = true;
2101
2102 mutex_unlock(&cpufreq_governor_lock);
2103
1da177e4
LT
2104 ret = policy->governor->governor(policy, event);
2105
4d5dcc42
VK
2106 if (!ret) {
2107 if (event == CPUFREQ_GOV_POLICY_INIT)
2108 policy->governor->initialized++;
2109 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2110 policy->governor->initialized--;
95731ebb
XC
2111 } else {
2112 /* Restore original values */
2113 mutex_lock(&cpufreq_governor_lock);
2114 if (event == CPUFREQ_GOV_STOP)
2115 policy->governor_enabled = true;
2116 else if (event == CPUFREQ_GOV_START)
2117 policy->governor_enabled = false;
2118 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 2119 }
b394058f 2120
fe492f3f
VK
2121 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2122 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
2123 module_put(policy->governor->owner);
2124
2125 return ret;
2126}
2127
1da177e4
LT
2128int cpufreq_register_governor(struct cpufreq_governor *governor)
2129{
3bcb09a3 2130 int err;
1da177e4
LT
2131
2132 if (!governor)
2133 return -EINVAL;
2134
a7b422cd
KRW
2135 if (cpufreq_disabled())
2136 return -ENODEV;
2137
3fc54d37 2138 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 2139
b394058f 2140 governor->initialized = 0;
3bcb09a3 2141 err = -EBUSY;
42f91fa1 2142 if (!find_governor(governor->name)) {
3bcb09a3
JF
2143 err = 0;
2144 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 2145 }
1da177e4 2146
32ee8c3e 2147 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 2148 return err;
1da177e4
LT
2149}
2150EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2151
1da177e4
LT
2152void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2153{
90e41bac 2154 int cpu;
90e41bac 2155
1da177e4
LT
2156 if (!governor)
2157 return;
2158
a7b422cd
KRW
2159 if (cpufreq_disabled())
2160 return;
2161
90e41bac
PB
2162 for_each_present_cpu(cpu) {
2163 if (cpu_online(cpu))
2164 continue;
2165 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2166 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2167 }
90e41bac 2168
3fc54d37 2169 mutex_lock(&cpufreq_governor_mutex);
1da177e4 2170 list_del(&governor->governor_list);
3fc54d37 2171 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
2172 return;
2173}
2174EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2175
2176
1da177e4
LT
2177/*********************************************************************
2178 * POLICY INTERFACE *
2179 *********************************************************************/
2180
2181/**
2182 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2183 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2184 * is written
1da177e4
LT
2185 *
2186 * Reads the current cpufreq policy.
2187 */
2188int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2189{
2190 struct cpufreq_policy *cpu_policy;
2191 if (!policy)
2192 return -EINVAL;
2193
2194 cpu_policy = cpufreq_cpu_get(cpu);
2195 if (!cpu_policy)
2196 return -EINVAL;
2197
d5b73cd8 2198 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2199
2200 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2201 return 0;
2202}
2203EXPORT_SYMBOL(cpufreq_get_policy);
2204
153d7f3f 2205/*
037ce839
VK
2206 * policy : current policy.
2207 * new_policy: policy to be set.
153d7f3f 2208 */
037ce839 2209static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2210 struct cpufreq_policy *new_policy)
1da177e4 2211{
d9a789c7
RW
2212 struct cpufreq_governor *old_gov;
2213 int ret;
1da177e4 2214
e837f9b5
JP
2215 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2216 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2217
d5b73cd8 2218 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2219
d9a789c7
RW
2220 if (new_policy->min > policy->max || new_policy->max < policy->min)
2221 return -EINVAL;
9c9a43ed 2222
1da177e4 2223 /* verify the cpu speed can be set within this limit */
3a3e9e06 2224 ret = cpufreq_driver->verify(new_policy);
1da177e4 2225 if (ret)
d9a789c7 2226 return ret;
1da177e4 2227
1da177e4 2228 /* adjust if necessary - all reasons */
e041c683 2229 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2230 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2231
2232 /* adjust if necessary - hardware incompatibility*/
e041c683 2233 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2234 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2235
bb176f7d
VK
2236 /*
2237 * verify the cpu speed can be set within this limit, which might be
2238 * different to the first one
2239 */
3a3e9e06 2240 ret = cpufreq_driver->verify(new_policy);
e041c683 2241 if (ret)
d9a789c7 2242 return ret;
1da177e4
LT
2243
2244 /* notification of the new policy */
e041c683 2245 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2246 CPUFREQ_NOTIFY, new_policy);
1da177e4 2247
3a3e9e06
VK
2248 policy->min = new_policy->min;
2249 policy->max = new_policy->max;
1da177e4 2250
2d06d8c4 2251 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2252 policy->min, policy->max);
1da177e4 2253
1c3d85dd 2254 if (cpufreq_driver->setpolicy) {
3a3e9e06 2255 policy->policy = new_policy->policy;
2d06d8c4 2256 pr_debug("setting range\n");
d9a789c7
RW
2257 return cpufreq_driver->setpolicy(new_policy);
2258 }
1da177e4 2259
d9a789c7
RW
2260 if (new_policy->governor == policy->governor)
2261 goto out;
7bd353a9 2262
d9a789c7
RW
2263 pr_debug("governor switch\n");
2264
2265 /* save old, working values */
2266 old_gov = policy->governor;
2267 /* end old governor */
2268 if (old_gov) {
2269 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2270 up_write(&policy->rwsem);
e5c87b76 2271 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
d9a789c7 2272 down_write(&policy->rwsem);
1da177e4
LT
2273 }
2274
d9a789c7
RW
2275 /* start new governor */
2276 policy->governor = new_policy->governor;
2277 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2278 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2279 goto out;
2280
2281 up_write(&policy->rwsem);
2282 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2283 down_write(&policy->rwsem);
2284 }
2285
2286 /* new governor failed, so re-start old one */
2287 pr_debug("starting governor %s failed\n", policy->governor->name);
2288 if (old_gov) {
2289 policy->governor = old_gov;
2290 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2291 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2292 }
2293
2294 return -EINVAL;
2295
2296 out:
2297 pr_debug("governor: change or update limits\n");
2298 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2299}
2300
1da177e4
LT
2301/**
2302 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2303 * @cpu: CPU which shall be re-evaluated
2304 *
25985edc 2305 * Useful for policy notifiers which have different necessities
1da177e4
LT
2306 * at different times.
2307 */
2308int cpufreq_update_policy(unsigned int cpu)
2309{
3a3e9e06
VK
2310 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2311 struct cpufreq_policy new_policy;
f1829e4a 2312 int ret;
1da177e4 2313
fefa8ff8
AP
2314 if (!policy)
2315 return -ENODEV;
1da177e4 2316
ad7722da 2317 down_write(&policy->rwsem);
1da177e4 2318
2d06d8c4 2319 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2320 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2321 new_policy.min = policy->user_policy.min;
2322 new_policy.max = policy->user_policy.max;
2323 new_policy.policy = policy->user_policy.policy;
2324 new_policy.governor = policy->user_policy.governor;
1da177e4 2325
bb176f7d
VK
2326 /*
2327 * BIOS might change freq behind our back
2328 * -> ask driver for current freq and notify governors about a change
2329 */
2ed99e39 2330 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2331 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2332 if (WARN_ON(!new_policy.cur)) {
2333 ret = -EIO;
fefa8ff8 2334 goto unlock;
bd0fa9bb
VK
2335 }
2336
3a3e9e06 2337 if (!policy->cur) {
e837f9b5 2338 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2339 policy->cur = new_policy.cur;
a85f7bd3 2340 } else {
9c0ebcf7 2341 if (policy->cur != new_policy.cur && has_target())
a1e1dc41 2342 cpufreq_out_of_sync(policy, new_policy.cur);
a85f7bd3 2343 }
0961dd0d
TR
2344 }
2345
037ce839 2346 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2347
fefa8ff8 2348unlock:
ad7722da 2349 up_write(&policy->rwsem);
5a01f2e8 2350
3a3e9e06 2351 cpufreq_cpu_put(policy);
1da177e4
LT
2352 return ret;
2353}
2354EXPORT_SYMBOL(cpufreq_update_policy);
2355
2760984f 2356static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2357 unsigned long action, void *hcpu)
2358{
2359 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2360 struct device *dev;
c32b6b8e 2361
8a25a2fd
KS
2362 dev = get_cpu_device(cpu);
2363 if (dev) {
5302c3fb 2364 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2365 case CPU_ONLINE:
23faf0b7 2366 cpufreq_add_dev(dev, NULL);
c32b6b8e 2367 break;
5302c3fb 2368
c32b6b8e 2369 case CPU_DOWN_PREPARE:
96bbbe4a 2370 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2371 break;
2372
2373 case CPU_POST_DEAD:
96bbbe4a 2374 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2375 break;
5302c3fb 2376
5a01f2e8 2377 case CPU_DOWN_FAILED:
23faf0b7 2378 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2379 break;
2380 }
2381 }
2382 return NOTIFY_OK;
2383}
2384
9c36f746 2385static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2386 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2387};
1da177e4 2388
6f19efc0
LM
2389/*********************************************************************
2390 * BOOST *
2391 *********************************************************************/
2392static int cpufreq_boost_set_sw(int state)
2393{
2394 struct cpufreq_frequency_table *freq_table;
2395 struct cpufreq_policy *policy;
2396 int ret = -EINVAL;
2397
f963735a 2398 for_each_active_policy(policy) {
6f19efc0
LM
2399 freq_table = cpufreq_frequency_get_table(policy->cpu);
2400 if (freq_table) {
2401 ret = cpufreq_frequency_table_cpuinfo(policy,
2402 freq_table);
2403 if (ret) {
2404 pr_err("%s: Policy frequency update failed\n",
2405 __func__);
2406 break;
2407 }
2408 policy->user_policy.max = policy->max;
2409 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2410 }
2411 }
2412
2413 return ret;
2414}
2415
2416int cpufreq_boost_trigger_state(int state)
2417{
2418 unsigned long flags;
2419 int ret = 0;
2420
2421 if (cpufreq_driver->boost_enabled == state)
2422 return 0;
2423
2424 write_lock_irqsave(&cpufreq_driver_lock, flags);
2425 cpufreq_driver->boost_enabled = state;
2426 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2427
2428 ret = cpufreq_driver->set_boost(state);
2429 if (ret) {
2430 write_lock_irqsave(&cpufreq_driver_lock, flags);
2431 cpufreq_driver->boost_enabled = !state;
2432 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2433
e837f9b5
JP
2434 pr_err("%s: Cannot %s BOOST\n",
2435 __func__, state ? "enable" : "disable");
6f19efc0
LM
2436 }
2437
2438 return ret;
2439}
2440
2441int cpufreq_boost_supported(void)
2442{
2443 if (likely(cpufreq_driver))
2444 return cpufreq_driver->boost_supported;
2445
2446 return 0;
2447}
2448EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2449
2450int cpufreq_boost_enabled(void)
2451{
2452 return cpufreq_driver->boost_enabled;
2453}
2454EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2455
1da177e4
LT
2456/*********************************************************************
2457 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2458 *********************************************************************/
2459
2460/**
2461 * cpufreq_register_driver - register a CPU Frequency driver
2462 * @driver_data: A struct cpufreq_driver containing the values#
2463 * submitted by the CPU Frequency driver.
2464 *
bb176f7d 2465 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2466 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2467 * (and isn't unregistered in the meantime).
1da177e4
LT
2468 *
2469 */
221dee28 2470int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2471{
2472 unsigned long flags;
2473 int ret;
2474
a7b422cd
KRW
2475 if (cpufreq_disabled())
2476 return -ENODEV;
2477
1da177e4 2478 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7 2479 !(driver_data->setpolicy || driver_data->target_index ||
9832235f
RW
2480 driver_data->target) ||
2481 (driver_data->setpolicy && (driver_data->target_index ||
1c03a2d0
VK
2482 driver_data->target)) ||
2483 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
1da177e4
LT
2484 return -EINVAL;
2485
2d06d8c4 2486 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4 2487
0d1857a1 2488 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2489 if (cpufreq_driver) {
0d1857a1 2490 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2491 return -EEXIST;
1da177e4 2492 }
1c3d85dd 2493 cpufreq_driver = driver_data;
0d1857a1 2494 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2495
bc68b7df
VK
2496 if (driver_data->setpolicy)
2497 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2498
6f19efc0
LM
2499 if (cpufreq_boost_supported()) {
2500 /*
2501 * Check if driver provides function to enable boost -
2502 * if not, use cpufreq_boost_set_sw as default
2503 */
2504 if (!cpufreq_driver->set_boost)
2505 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2506
2507 ret = cpufreq_sysfs_create_file(&boost.attr);
2508 if (ret) {
2509 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2510 __func__);
6f19efc0
LM
2511 goto err_null_driver;
2512 }
2513 }
2514
8a25a2fd 2515 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2516 if (ret)
6f19efc0 2517 goto err_boost_unreg;
1da177e4 2518
ce1bcfe9
VK
2519 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2520 list_empty(&cpufreq_policy_list)) {
1da177e4 2521 /* if all ->init() calls failed, unregister */
ce1bcfe9
VK
2522 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2523 driver_data->name);
2524 goto err_if_unreg;
1da177e4
LT
2525 }
2526
8f5bc2ab 2527 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2528 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2529
8f5bc2ab 2530 return 0;
8a25a2fd
KS
2531err_if_unreg:
2532 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2533err_boost_unreg:
2534 if (cpufreq_boost_supported())
2535 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2536err_null_driver:
0d1857a1 2537 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2538 cpufreq_driver = NULL;
0d1857a1 2539 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2540 return ret;
1da177e4
LT
2541}
2542EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2543
1da177e4
LT
2544/**
2545 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2546 *
bb176f7d 2547 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2548 * the right to do so, i.e. if you have succeeded in initialising before!
2549 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2550 * currently not initialised.
2551 */
221dee28 2552int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2553{
2554 unsigned long flags;
2555
1c3d85dd 2556 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2557 return -EINVAL;
1da177e4 2558
2d06d8c4 2559 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2560
8a25a2fd 2561 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2562 if (cpufreq_boost_supported())
2563 cpufreq_sysfs_remove_file(&boost.attr);
2564
65edc68c 2565 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2566
6eed9404 2567 down_write(&cpufreq_rwsem);
0d1857a1 2568 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2569
1c3d85dd 2570 cpufreq_driver = NULL;
6eed9404 2571
0d1857a1 2572 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2573 up_write(&cpufreq_rwsem);
1da177e4
LT
2574
2575 return 0;
2576}
2577EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8 2578
90de2a4a
DA
2579/*
2580 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2581 * or mutexes when secondary CPUs are halted.
2582 */
2583static struct syscore_ops cpufreq_syscore_ops = {
2584 .shutdown = cpufreq_suspend,
2585};
2586
5a01f2e8
VP
2587static int __init cpufreq_core_init(void)
2588{
a7b422cd
KRW
2589 if (cpufreq_disabled())
2590 return -ENODEV;
2591
2361be23 2592 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2593 BUG_ON(!cpufreq_global_kobject);
2594
90de2a4a
DA
2595 register_syscore_ops(&cpufreq_syscore_ops);
2596
5a01f2e8
VP
2597 return 0;
2598}
5a01f2e8 2599core_initcall(cpufreq_core_init);