]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/cpufreq/cpufreq.c
cpufreq: arm_big_little: remove compile-time dependency on BIG_LITTLE
[mirror_ubuntu-bionic-kernel.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
90de2a4a 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
b4f0676f
VK
34/* Macros to iterate over lists */
35/* Iterate over online CPUs policies */
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
f7b27061
VK
40/* Iterate over governors */
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
1da177e4 45/**
cd878479 46 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
47 * level driver of CPUFreq support, and its spinlock. This lock
48 * also protects the cpufreq_cpu_data array.
49 */
1c3d85dd 50static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 51static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 52static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 53static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 54DEFINE_MUTEX(cpufreq_governor_lock);
bb176f7d 55
084f3493 56/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 57static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
1da177e4 58
2f0aea93
VK
59/* Flag to suspend/resume CPUFreq governors */
60static bool cpufreq_suspended;
1da177e4 61
9c0ebcf7
VK
62static inline bool has_target(void)
63{
64 return cpufreq_driver->target_index || cpufreq_driver->target;
65}
66
6eed9404
VK
67/*
68 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
69 * sections
70 */
71static DECLARE_RWSEM(cpufreq_rwsem);
72
1da177e4 73/* internal prototypes */
29464f28
DJ
74static int __cpufreq_governor(struct cpufreq_policy *policy,
75 unsigned int event);
d92d50a4 76static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
65f27f38 77static void handle_update(struct work_struct *work);
1da177e4
LT
78
79/**
32ee8c3e
DJ
80 * Two notifier lists: the "policy" list is involved in the
81 * validation process for a new CPU frequency policy; the
1da177e4
LT
82 * "transition" list for kernel code that needs to handle
83 * changes to devices when the CPU clock speed changes.
84 * The mutex locks both lists.
85 */
e041c683 86static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 87static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 88
74212ca4 89static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
90static int __init init_cpufreq_transition_notifier_list(void)
91{
92 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 93 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
94 return 0;
95}
b3438f82 96pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 97
a7b422cd 98static int off __read_mostly;
da584455 99static int cpufreq_disabled(void)
a7b422cd
KRW
100{
101 return off;
102}
103void disable_cpufreq(void)
104{
105 off = 1;
106}
29464f28 107static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 108
4d5dcc42
VK
109bool have_governor_per_policy(void)
110{
0b981e70 111 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 112}
3f869d6d 113EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 114
944e9a03
VK
115struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116{
117 if (have_governor_per_policy())
118 return &policy->kobj;
119 else
120 return cpufreq_global_kobject;
121}
122EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123
72a4ce34
VK
124static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125{
126 u64 idle_time;
127 u64 cur_wall_time;
128 u64 busy_time;
129
130 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131
132 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
137 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138
139 idle_time = cur_wall_time - busy_time;
140 if (wall)
141 *wall = cputime_to_usecs(cur_wall_time);
142
143 return cputime_to_usecs(idle_time);
144}
145
146u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
147{
148 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
149
150 if (idle_time == -1ULL)
151 return get_cpu_idle_time_jiffy(cpu, wall);
152 else if (!io_busy)
153 idle_time += get_cpu_iowait_time_us(cpu, wall);
154
155 return idle_time;
156}
157EXPORT_SYMBOL_GPL(get_cpu_idle_time);
158
70e9e778
VK
159/*
160 * This is a generic cpufreq init() routine which can be used by cpufreq
161 * drivers of SMP systems. It will do following:
162 * - validate & show freq table passed
163 * - set policies transition latency
164 * - policy->cpus with all possible CPUs
165 */
166int cpufreq_generic_init(struct cpufreq_policy *policy,
167 struct cpufreq_frequency_table *table,
168 unsigned int transition_latency)
169{
170 int ret;
171
172 ret = cpufreq_table_validate_and_show(policy, table);
173 if (ret) {
174 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
175 return ret;
176 }
177
178 policy->cpuinfo.transition_latency = transition_latency;
179
180 /*
181 * The driver only supports the SMP configuartion where all processors
182 * share the clock and voltage and clock.
183 */
184 cpumask_setall(policy->cpus);
185
186 return 0;
187}
188EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189
652ed95d
VK
190unsigned int cpufreq_generic_get(unsigned int cpu)
191{
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193
194 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
197 return 0;
198 }
199
200 return clk_get_rate(policy->clk) / 1000;
201}
202EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
e0b3165b
VK
204/* Only for cpufreq core internal use */
205struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
206{
207 return per_cpu(cpufreq_cpu_data, cpu);
208}
209
50e9c852
VK
210/**
211 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
212 *
213 * @cpu: cpu to find policy for.
214 *
215 * This returns policy for 'cpu', returns NULL if it doesn't exist.
216 * It also increments the kobject reference count to mark it busy and so would
217 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
218 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
219 * freed as that depends on the kobj count.
220 *
221 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
222 * valid policy is found. This is done to make sure the driver doesn't get
223 * unregistered while the policy is being used.
224 *
225 * Return: A valid policy on success, otherwise NULL on failure.
226 */
6eed9404 227struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 228{
6eed9404 229 struct cpufreq_policy *policy = NULL;
1da177e4
LT
230 unsigned long flags;
231
1b947c90 232 if (WARN_ON(cpu >= nr_cpu_ids))
6eed9404
VK
233 return NULL;
234
235 if (!down_read_trylock(&cpufreq_rwsem))
236 return NULL;
1da177e4
LT
237
238 /* get the cpufreq driver */
1c3d85dd 239 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 240
6eed9404
VK
241 if (cpufreq_driver) {
242 /* get the CPU */
243 policy = per_cpu(cpufreq_cpu_data, cpu);
244 if (policy)
245 kobject_get(&policy->kobj);
246 }
1da177e4 247
6eed9404 248 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 249
3a3e9e06 250 if (!policy)
6eed9404 251 up_read(&cpufreq_rwsem);
1da177e4 252
3a3e9e06 253 return policy;
a9144436 254}
1da177e4
LT
255EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
256
50e9c852
VK
257/**
258 * cpufreq_cpu_put: Decrements the usage count of a policy
259 *
260 * @policy: policy earlier returned by cpufreq_cpu_get().
261 *
262 * This decrements the kobject reference count incremented earlier by calling
263 * cpufreq_cpu_get().
264 *
265 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
266 */
3a3e9e06 267void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 268{
6eed9404
VK
269 kobject_put(&policy->kobj);
270 up_read(&cpufreq_rwsem);
1da177e4
LT
271}
272EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
273
1da177e4
LT
274/*********************************************************************
275 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
276 *********************************************************************/
277
278/**
279 * adjust_jiffies - adjust the system "loops_per_jiffy"
280 *
281 * This function alters the system "loops_per_jiffy" for the clock
282 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 283 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
284 * per-CPU loops_per_jiffy value wherever possible.
285 */
858119e1 286static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4 287{
39c132ee
VK
288#ifndef CONFIG_SMP
289 static unsigned long l_p_j_ref;
290 static unsigned int l_p_j_ref_freq;
291
1da177e4
LT
292 if (ci->flags & CPUFREQ_CONST_LOOPS)
293 return;
294
295 if (!l_p_j_ref_freq) {
296 l_p_j_ref = loops_per_jiffy;
297 l_p_j_ref_freq = ci->old;
e837f9b5
JP
298 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
299 l_p_j_ref, l_p_j_ref_freq);
1da177e4 300 }
0b443ead 301 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
e08f5f5b
GS
302 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
303 ci->new);
e837f9b5
JP
304 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
305 loops_per_jiffy, ci->new);
1da177e4 306 }
1da177e4 307#endif
39c132ee 308}
1da177e4 309
0956df9c 310static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 311 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
312{
313 BUG_ON(irqs_disabled());
314
d5aaffa9
DB
315 if (cpufreq_disabled())
316 return;
317
1c3d85dd 318 freqs->flags = cpufreq_driver->flags;
2d06d8c4 319 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 320 state, freqs->new);
1da177e4 321
1da177e4 322 switch (state) {
e4472cb3 323
1da177e4 324 case CPUFREQ_PRECHANGE:
32ee8c3e 325 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
326 * which is not equal to what the cpufreq core thinks is
327 * "old frequency".
1da177e4 328 */
1c3d85dd 329 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
330 if ((policy) && (policy->cpu == freqs->cpu) &&
331 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
332 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333 freqs->old, policy->cur);
e4472cb3 334 freqs->old = policy->cur;
1da177e4
LT
335 }
336 }
b4dfdbb3 337 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 338 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
339 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
340 break;
e4472cb3 341
1da177e4
LT
342 case CPUFREQ_POSTCHANGE:
343 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
344 pr_debug("FREQ: %lu - CPU: %lu\n",
345 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 346 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 347 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 348 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
349 if (likely(policy) && likely(policy->cpu == freqs->cpu))
350 policy->cur = freqs->new;
1da177e4
LT
351 break;
352 }
1da177e4 353}
bb176f7d 354
b43a7ffb
VK
355/**
356 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
357 * on frequency transition.
358 *
359 * This function calls the transition notifiers and the "adjust_jiffies"
360 * function. It is called twice on all CPU frequency changes that have
361 * external effects.
362 */
236a9800 363static void cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb
VK
364 struct cpufreq_freqs *freqs, unsigned int state)
365{
366 for_each_cpu(freqs->cpu, policy->cpus)
367 __cpufreq_notify_transition(policy, freqs, state);
368}
1da177e4 369
f7ba3b41 370/* Do post notifications when there are chances that transition has failed */
236a9800 371static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
f7ba3b41
VK
372 struct cpufreq_freqs *freqs, int transition_failed)
373{
374 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
375 if (!transition_failed)
376 return;
377
378 swap(freqs->old, freqs->new);
379 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
380 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
381}
f7ba3b41 382
12478cf0
SB
383void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
384 struct cpufreq_freqs *freqs)
385{
ca654dc3
SB
386
387 /*
388 * Catch double invocations of _begin() which lead to self-deadlock.
389 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
390 * doesn't invoke _begin() on their behalf, and hence the chances of
391 * double invocations are very low. Moreover, there are scenarios
392 * where these checks can emit false-positive warnings in these
393 * drivers; so we avoid that by skipping them altogether.
394 */
395 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
396 && current == policy->transition_task);
397
12478cf0
SB
398wait:
399 wait_event(policy->transition_wait, !policy->transition_ongoing);
400
401 spin_lock(&policy->transition_lock);
402
403 if (unlikely(policy->transition_ongoing)) {
404 spin_unlock(&policy->transition_lock);
405 goto wait;
406 }
407
408 policy->transition_ongoing = true;
ca654dc3 409 policy->transition_task = current;
12478cf0
SB
410
411 spin_unlock(&policy->transition_lock);
412
413 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
414}
415EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
416
417void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
418 struct cpufreq_freqs *freqs, int transition_failed)
419{
420 if (unlikely(WARN_ON(!policy->transition_ongoing)))
421 return;
422
423 cpufreq_notify_post_transition(policy, freqs, transition_failed);
424
425 policy->transition_ongoing = false;
ca654dc3 426 policy->transition_task = NULL;
12478cf0
SB
427
428 wake_up(&policy->transition_wait);
429}
430EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
431
1da177e4 432
1da177e4
LT
433/*********************************************************************
434 * SYSFS INTERFACE *
435 *********************************************************************/
8a5c74a1 436static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
437 struct attribute *attr, char *buf)
438{
439 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
440}
441
442static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
443 const char *buf, size_t count)
444{
445 int ret, enable;
446
447 ret = sscanf(buf, "%d", &enable);
448 if (ret != 1 || enable < 0 || enable > 1)
449 return -EINVAL;
450
451 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
452 pr_err("%s: Cannot %s BOOST!\n",
453 __func__, enable ? "enable" : "disable");
6f19efc0
LM
454 return -EINVAL;
455 }
456
e837f9b5
JP
457 pr_debug("%s: cpufreq BOOST %s\n",
458 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
459
460 return count;
461}
462define_one_global_rw(boost);
1da177e4 463
42f91fa1 464static struct cpufreq_governor *find_governor(const char *str_governor)
3bcb09a3
JF
465{
466 struct cpufreq_governor *t;
467
f7b27061 468 for_each_governor(t)
7c4f4539 469 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
470 return t;
471
472 return NULL;
473}
474
1da177e4
LT
475/**
476 * cpufreq_parse_governor - parse a governor string
477 */
905d77cd 478static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
479 struct cpufreq_governor **governor)
480{
3bcb09a3 481 int err = -EINVAL;
1c3d85dd
RW
482
483 if (!cpufreq_driver)
3bcb09a3
JF
484 goto out;
485
1c3d85dd 486 if (cpufreq_driver->setpolicy) {
7c4f4539 487 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
1da177e4 488 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 489 err = 0;
7c4f4539 490 } else if (!strncasecmp(str_governor, "powersave",
e08f5f5b 491 CPUFREQ_NAME_LEN)) {
1da177e4 492 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 493 err = 0;
1da177e4 494 }
2e1cc3a5 495 } else {
1da177e4 496 struct cpufreq_governor *t;
3bcb09a3 497
3fc54d37 498 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3 499
42f91fa1 500 t = find_governor(str_governor);
3bcb09a3 501
ea714970 502 if (t == NULL) {
1a8e1463 503 int ret;
ea714970 504
1a8e1463
KC
505 mutex_unlock(&cpufreq_governor_mutex);
506 ret = request_module("cpufreq_%s", str_governor);
507 mutex_lock(&cpufreq_governor_mutex);
ea714970 508
1a8e1463 509 if (ret == 0)
42f91fa1 510 t = find_governor(str_governor);
ea714970
JF
511 }
512
3bcb09a3
JF
513 if (t != NULL) {
514 *governor = t;
515 err = 0;
1da177e4 516 }
3bcb09a3 517
3fc54d37 518 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 519 }
29464f28 520out:
3bcb09a3 521 return err;
1da177e4 522}
1da177e4 523
1da177e4 524/**
e08f5f5b
GS
525 * cpufreq_per_cpu_attr_read() / show_##file_name() -
526 * print out cpufreq information
1da177e4
LT
527 *
528 * Write out information from cpufreq_driver->policy[cpu]; object must be
529 * "unsigned int".
530 */
531
32ee8c3e
DJ
532#define show_one(file_name, object) \
533static ssize_t show_##file_name \
905d77cd 534(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 535{ \
29464f28 536 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
537}
538
539show_one(cpuinfo_min_freq, cpuinfo.min_freq);
540show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 541show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
542show_one(scaling_min_freq, min);
543show_one(scaling_max_freq, max);
c034b02e 544
09347b29 545static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
c034b02e
DB
546{
547 ssize_t ret;
548
549 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
550 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
551 else
552 ret = sprintf(buf, "%u\n", policy->cur);
553 return ret;
554}
1da177e4 555
037ce839 556static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 557 struct cpufreq_policy *new_policy);
7970e08b 558
1da177e4
LT
559/**
560 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
561 */
562#define store_one(file_name, object) \
563static ssize_t store_##file_name \
905d77cd 564(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 565{ \
619c144c 566 int ret, temp; \
1da177e4
LT
567 struct cpufreq_policy new_policy; \
568 \
569 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
570 if (ret) \
571 return -EINVAL; \
572 \
29464f28 573 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
574 if (ret != 1) \
575 return -EINVAL; \
576 \
619c144c 577 temp = new_policy.object; \
037ce839 578 ret = cpufreq_set_policy(policy, &new_policy); \
619c144c
VH
579 if (!ret) \
580 policy->user_policy.object = temp; \
1da177e4
LT
581 \
582 return ret ? ret : count; \
583}
584
29464f28
DJ
585store_one(scaling_min_freq, min);
586store_one(scaling_max_freq, max);
1da177e4
LT
587
588/**
589 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
590 */
905d77cd
DJ
591static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
592 char *buf)
1da177e4 593{
d92d50a4 594 unsigned int cur_freq = __cpufreq_get(policy);
1da177e4
LT
595 if (!cur_freq)
596 return sprintf(buf, "<unknown>");
597 return sprintf(buf, "%u\n", cur_freq);
598}
599
1da177e4
LT
600/**
601 * show_scaling_governor - show the current policy for the specified CPU
602 */
905d77cd 603static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 604{
29464f28 605 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
606 return sprintf(buf, "powersave\n");
607 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
608 return sprintf(buf, "performance\n");
609 else if (policy->governor)
4b972f0b 610 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 611 policy->governor->name);
1da177e4
LT
612 return -EINVAL;
613}
614
1da177e4
LT
615/**
616 * store_scaling_governor - store policy for the specified CPU
617 */
905d77cd
DJ
618static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
619 const char *buf, size_t count)
1da177e4 620{
5136fa56 621 int ret;
1da177e4
LT
622 char str_governor[16];
623 struct cpufreq_policy new_policy;
624
625 ret = cpufreq_get_policy(&new_policy, policy->cpu);
626 if (ret)
627 return ret;
628
29464f28 629 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
630 if (ret != 1)
631 return -EINVAL;
632
e08f5f5b
GS
633 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
634 &new_policy.governor))
1da177e4
LT
635 return -EINVAL;
636
037ce839 637 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
638
639 policy->user_policy.policy = policy->policy;
640 policy->user_policy.governor = policy->governor;
7970e08b 641
e08f5f5b
GS
642 if (ret)
643 return ret;
644 else
645 return count;
1da177e4
LT
646}
647
648/**
649 * show_scaling_driver - show the cpufreq driver currently loaded
650 */
905d77cd 651static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 652{
1c3d85dd 653 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
654}
655
656/**
657 * show_scaling_available_governors - show the available CPUfreq governors
658 */
905d77cd
DJ
659static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
660 char *buf)
1da177e4
LT
661{
662 ssize_t i = 0;
663 struct cpufreq_governor *t;
664
9c0ebcf7 665 if (!has_target()) {
1da177e4
LT
666 i += sprintf(buf, "performance powersave");
667 goto out;
668 }
669
f7b27061 670 for_each_governor(t) {
29464f28
DJ
671 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
672 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 673 goto out;
4b972f0b 674 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 675 }
7d5e350f 676out:
1da177e4
LT
677 i += sprintf(&buf[i], "\n");
678 return i;
679}
e8628dd0 680
f4fd3797 681ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
682{
683 ssize_t i = 0;
684 unsigned int cpu;
685
835481d9 686 for_each_cpu(cpu, mask) {
1da177e4
LT
687 if (i)
688 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
689 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
690 if (i >= (PAGE_SIZE - 5))
29464f28 691 break;
1da177e4
LT
692 }
693 i += sprintf(&buf[i], "\n");
694 return i;
695}
f4fd3797 696EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 697
e8628dd0
DW
698/**
699 * show_related_cpus - show the CPUs affected by each transition even if
700 * hw coordination is in use
701 */
702static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
703{
f4fd3797 704 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
705}
706
707/**
708 * show_affected_cpus - show the CPUs affected by each transition
709 */
710static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
711{
f4fd3797 712 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
713}
714
9e76988e 715static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 716 const char *buf, size_t count)
9e76988e
VP
717{
718 unsigned int freq = 0;
719 unsigned int ret;
720
879000f9 721 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
722 return -EINVAL;
723
724 ret = sscanf(buf, "%u", &freq);
725 if (ret != 1)
726 return -EINVAL;
727
728 policy->governor->store_setspeed(policy, freq);
729
730 return count;
731}
732
733static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
734{
879000f9 735 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
736 return sprintf(buf, "<unsupported>\n");
737
738 return policy->governor->show_setspeed(policy, buf);
739}
1da177e4 740
e2f74f35 741/**
8bf1ac72 742 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
743 */
744static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
745{
746 unsigned int limit;
747 int ret;
1c3d85dd
RW
748 if (cpufreq_driver->bios_limit) {
749 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
750 if (!ret)
751 return sprintf(buf, "%u\n", limit);
752 }
753 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
754}
755
6dad2a29
BP
756cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
757cpufreq_freq_attr_ro(cpuinfo_min_freq);
758cpufreq_freq_attr_ro(cpuinfo_max_freq);
759cpufreq_freq_attr_ro(cpuinfo_transition_latency);
760cpufreq_freq_attr_ro(scaling_available_governors);
761cpufreq_freq_attr_ro(scaling_driver);
762cpufreq_freq_attr_ro(scaling_cur_freq);
763cpufreq_freq_attr_ro(bios_limit);
764cpufreq_freq_attr_ro(related_cpus);
765cpufreq_freq_attr_ro(affected_cpus);
766cpufreq_freq_attr_rw(scaling_min_freq);
767cpufreq_freq_attr_rw(scaling_max_freq);
768cpufreq_freq_attr_rw(scaling_governor);
769cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 770
905d77cd 771static struct attribute *default_attrs[] = {
1da177e4
LT
772 &cpuinfo_min_freq.attr,
773 &cpuinfo_max_freq.attr,
ed129784 774 &cpuinfo_transition_latency.attr,
1da177e4
LT
775 &scaling_min_freq.attr,
776 &scaling_max_freq.attr,
777 &affected_cpus.attr,
e8628dd0 778 &related_cpus.attr,
1da177e4
LT
779 &scaling_governor.attr,
780 &scaling_driver.attr,
781 &scaling_available_governors.attr,
9e76988e 782 &scaling_setspeed.attr,
1da177e4
LT
783 NULL
784};
785
29464f28
DJ
786#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
787#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 788
29464f28 789static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 790{
905d77cd
DJ
791 struct cpufreq_policy *policy = to_policy(kobj);
792 struct freq_attr *fattr = to_attr(attr);
1b750e3b 793 ssize_t ret;
6eed9404
VK
794
795 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 796 return -EINVAL;
5a01f2e8 797
ad7722da 798 down_read(&policy->rwsem);
5a01f2e8 799
e08f5f5b
GS
800 if (fattr->show)
801 ret = fattr->show(policy, buf);
802 else
803 ret = -EIO;
804
ad7722da 805 up_read(&policy->rwsem);
6eed9404 806 up_read(&cpufreq_rwsem);
1b750e3b 807
1da177e4
LT
808 return ret;
809}
810
905d77cd
DJ
811static ssize_t store(struct kobject *kobj, struct attribute *attr,
812 const char *buf, size_t count)
1da177e4 813{
905d77cd
DJ
814 struct cpufreq_policy *policy = to_policy(kobj);
815 struct freq_attr *fattr = to_attr(attr);
a07530b4 816 ssize_t ret = -EINVAL;
6eed9404 817
4f750c93
SB
818 get_online_cpus();
819
820 if (!cpu_online(policy->cpu))
821 goto unlock;
822
6eed9404 823 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 824 goto unlock;
5a01f2e8 825
ad7722da 826 down_write(&policy->rwsem);
5a01f2e8 827
e08f5f5b
GS
828 if (fattr->store)
829 ret = fattr->store(policy, buf, count);
830 else
831 ret = -EIO;
832
ad7722da 833 up_write(&policy->rwsem);
6eed9404 834
6eed9404 835 up_read(&cpufreq_rwsem);
4f750c93
SB
836unlock:
837 put_online_cpus();
838
1da177e4
LT
839 return ret;
840}
841
905d77cd 842static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 843{
905d77cd 844 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 845 pr_debug("last reference is dropped\n");
1da177e4
LT
846 complete(&policy->kobj_unregister);
847}
848
52cf25d0 849static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
850 .show = show,
851 .store = store,
852};
853
854static struct kobj_type ktype_cpufreq = {
855 .sysfs_ops = &sysfs_ops,
856 .default_attrs = default_attrs,
857 .release = cpufreq_sysfs_release,
858};
859
2361be23
VK
860struct kobject *cpufreq_global_kobject;
861EXPORT_SYMBOL(cpufreq_global_kobject);
862
863static int cpufreq_global_kobject_usage;
864
865int cpufreq_get_global_kobject(void)
866{
867 if (!cpufreq_global_kobject_usage++)
868 return kobject_add(cpufreq_global_kobject,
869 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
870
871 return 0;
872}
873EXPORT_SYMBOL(cpufreq_get_global_kobject);
874
875void cpufreq_put_global_kobject(void)
876{
877 if (!--cpufreq_global_kobject_usage)
878 kobject_del(cpufreq_global_kobject);
879}
880EXPORT_SYMBOL(cpufreq_put_global_kobject);
881
882int cpufreq_sysfs_create_file(const struct attribute *attr)
883{
884 int ret = cpufreq_get_global_kobject();
885
886 if (!ret) {
887 ret = sysfs_create_file(cpufreq_global_kobject, attr);
888 if (ret)
889 cpufreq_put_global_kobject();
890 }
891
892 return ret;
893}
894EXPORT_SYMBOL(cpufreq_sysfs_create_file);
895
896void cpufreq_sysfs_remove_file(const struct attribute *attr)
897{
898 sysfs_remove_file(cpufreq_global_kobject, attr);
899 cpufreq_put_global_kobject();
900}
901EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
902
19d6f7ec 903/* symlink affected CPUs */
308b60e7 904static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
905{
906 unsigned int j;
907 int ret = 0;
908
909 for_each_cpu(j, policy->cpus) {
8a25a2fd 910 struct device *cpu_dev;
19d6f7ec 911
308b60e7 912 if (j == policy->cpu)
19d6f7ec 913 continue;
19d6f7ec 914
e8fdde10 915 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
916 cpu_dev = get_cpu_device(j);
917 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 918 "cpufreq");
71c3461e
RW
919 if (ret)
920 break;
19d6f7ec
DJ
921 }
922 return ret;
923}
924
308b60e7 925static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 926 struct device *dev)
909a694e
DJ
927{
928 struct freq_attr **drv_attr;
909a694e 929 int ret = 0;
909a694e 930
909a694e 931 /* set up files for this cpu device */
1c3d85dd 932 drv_attr = cpufreq_driver->attr;
f13f1184 933 while (drv_attr && *drv_attr) {
909a694e
DJ
934 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
935 if (ret)
6d4e81ed 936 return ret;
909a694e
DJ
937 drv_attr++;
938 }
1c3d85dd 939 if (cpufreq_driver->get) {
909a694e
DJ
940 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
941 if (ret)
6d4e81ed 942 return ret;
909a694e 943 }
c034b02e
DB
944
945 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
946 if (ret)
6d4e81ed 947 return ret;
c034b02e 948
1c3d85dd 949 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
950 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
951 if (ret)
6d4e81ed 952 return ret;
e2f74f35 953 }
909a694e 954
6d4e81ed 955 return cpufreq_add_dev_symlink(policy);
e18f1682
SB
956}
957
958static void cpufreq_init_policy(struct cpufreq_policy *policy)
959{
6e2c89d1 960 struct cpufreq_governor *gov = NULL;
e18f1682
SB
961 struct cpufreq_policy new_policy;
962 int ret = 0;
963
d5b73cd8 964 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 965
6e2c89d1 966 /* Update governor of new_policy to the governor used before hotplug */
42f91fa1 967 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
6e2c89d1 968 if (gov)
969 pr_debug("Restoring governor %s for cpu %d\n",
970 policy->governor->name, policy->cpu);
971 else
972 gov = CPUFREQ_DEFAULT_GOVERNOR;
973
974 new_policy.governor = gov;
975
a27a9ab7
JB
976 /* Use the default policy if its valid. */
977 if (cpufreq_driver->setpolicy)
6e2c89d1 978 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
979
980 /* set default policy */
037ce839 981 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 982 if (ret) {
2d06d8c4 983 pr_debug("setting policy failed\n");
1c3d85dd
RW
984 if (cpufreq_driver->exit)
985 cpufreq_driver->exit(policy);
ecf7e461 986 }
909a694e
DJ
987}
988
d8d3b471 989static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 990 unsigned int cpu, struct device *dev)
fcf80582 991{
9c0ebcf7 992 int ret = 0;
fcf80582
VK
993 unsigned long flags;
994
bb29ae15
VK
995 /* Has this CPU been taken care of already? */
996 if (cpumask_test_cpu(cpu, policy->cpus))
997 return 0;
998
9c0ebcf7 999 if (has_target()) {
3de9bdeb
VK
1000 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1001 if (ret) {
1002 pr_err("%s: Failed to stop governor\n", __func__);
1003 return ret;
1004 }
1005 }
fcf80582 1006
ad7722da 1007 down_write(&policy->rwsem);
2eaa3e2d 1008
0d1857a1 1009 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1010
fcf80582
VK
1011 cpumask_set_cpu(cpu, policy->cpus);
1012 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 1013 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 1014
ad7722da 1015 up_write(&policy->rwsem);
2eaa3e2d 1016
9c0ebcf7 1017 if (has_target()) {
e5c87b76
SK
1018 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1019 if (!ret)
1020 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1021
1022 if (ret) {
3de9bdeb
VK
1023 pr_err("%s: Failed to start governor\n", __func__);
1024 return ret;
1025 }
820c6ca2 1026 }
fcf80582 1027
42f921a6 1028 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582 1029}
1da177e4 1030
8414809c
SB
1031static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1032{
1033 struct cpufreq_policy *policy;
1034 unsigned long flags;
1035
44871c9c 1036 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
1037
1038 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1039
44871c9c 1040 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 1041
09712f55
GU
1042 if (policy)
1043 policy->governor = NULL;
6e2c89d1 1044
8414809c
SB
1045 return policy;
1046}
1047
e9698cc5
SB
1048static struct cpufreq_policy *cpufreq_policy_alloc(void)
1049{
1050 struct cpufreq_policy *policy;
1051
1052 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1053 if (!policy)
1054 return NULL;
1055
1056 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1057 goto err_free_policy;
1058
1059 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1060 goto err_free_cpumask;
1061
c88a1f8b 1062 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 1063 init_rwsem(&policy->rwsem);
12478cf0
SB
1064 spin_lock_init(&policy->transition_lock);
1065 init_waitqueue_head(&policy->transition_wait);
818c5712
VK
1066 init_completion(&policy->kobj_unregister);
1067 INIT_WORK(&policy->update, handle_update);
ad7722da 1068
e9698cc5
SB
1069 return policy;
1070
1071err_free_cpumask:
1072 free_cpumask_var(policy->cpus);
1073err_free_policy:
1074 kfree(policy);
1075
1076 return NULL;
1077}
1078
42f921a6
VK
1079static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1080{
1081 struct kobject *kobj;
1082 struct completion *cmp;
1083
fcd7af91
VK
1084 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1085 CPUFREQ_REMOVE_POLICY, policy);
1086
42f921a6
VK
1087 down_read(&policy->rwsem);
1088 kobj = &policy->kobj;
1089 cmp = &policy->kobj_unregister;
1090 up_read(&policy->rwsem);
1091 kobject_put(kobj);
1092
1093 /*
1094 * We need to make sure that the underlying kobj is
1095 * actually not referenced anymore by anybody before we
1096 * proceed with unloading.
1097 */
1098 pr_debug("waiting for dropping of refcount\n");
1099 wait_for_completion(cmp);
1100 pr_debug("wait complete\n");
1101}
1102
e9698cc5
SB
1103static void cpufreq_policy_free(struct cpufreq_policy *policy)
1104{
1105 free_cpumask_var(policy->related_cpus);
1106 free_cpumask_var(policy->cpus);
1107 kfree(policy);
1108}
1109
1bfb425b
VK
1110static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1111 struct device *cpu_dev)
0d66b91e 1112{
1bfb425b
VK
1113 int ret;
1114
99ec899e 1115 if (WARN_ON(cpu == policy->cpu))
1bfb425b
VK
1116 return 0;
1117
1118 /* Move kobject to the new policy->cpu */
1119 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1120 if (ret) {
1121 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1122 return ret;
1123 }
cb38ed5c 1124
ad7722da 1125 down_write(&policy->rwsem);
0d66b91e 1126 policy->cpu = cpu;
ad7722da 1127 up_write(&policy->rwsem);
8efd5765 1128
1bfb425b 1129 return 0;
0d66b91e
SB
1130}
1131
23faf0b7
VK
1132/**
1133 * cpufreq_add_dev - add a CPU device
1134 *
1135 * Adds the cpufreq interface for a CPU device.
1136 *
1137 * The Oracle says: try running cpufreq registration/unregistration concurrently
1138 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1139 * mess up, but more thorough testing is needed. - Mathieu
1140 */
1141static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1142{
fcf80582 1143 unsigned int j, cpu = dev->id;
65922465 1144 int ret = -ENOMEM;
7f0c020a 1145 struct cpufreq_policy *policy;
1da177e4 1146 unsigned long flags;
96bbbe4a 1147 bool recover_policy = cpufreq_suspended;
1da177e4 1148
c32b6b8e
AR
1149 if (cpu_is_offline(cpu))
1150 return 0;
1151
2d06d8c4 1152 pr_debug("adding CPU %u\n", cpu);
1da177e4 1153
6eed9404
VK
1154 if (!down_read_trylock(&cpufreq_rwsem))
1155 return 0;
1156
bb29ae15 1157 /* Check if this CPU already has a policy to manage it */
0d1857a1 1158 read_lock_irqsave(&cpufreq_driver_lock, flags);
b4f0676f 1159 for_each_policy(policy) {
7f0c020a 1160 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
0d1857a1 1161 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7f0c020a 1162 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
6eed9404
VK
1163 up_read(&cpufreq_rwsem);
1164 return ret;
2eaa3e2d 1165 }
fcf80582 1166 }
0d1857a1 1167 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1168
72368d12
RW
1169 /*
1170 * Restore the saved policy when doing light-weight init and fall back
1171 * to the full init if that fails.
1172 */
96bbbe4a 1173 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1174 if (!policy) {
96bbbe4a 1175 recover_policy = false;
8414809c 1176 policy = cpufreq_policy_alloc();
72368d12
RW
1177 if (!policy)
1178 goto nomem_out;
1179 }
0d66b91e
SB
1180
1181 /*
1182 * In the resume path, since we restore a saved policy, the assignment
1183 * to policy->cpu is like an update of the existing policy, rather than
1184 * the creation of a brand new one. So we need to perform this update
1185 * by invoking update_policy_cpu().
1186 */
1bfb425b
VK
1187 if (recover_policy && cpu != policy->cpu)
1188 WARN_ON(update_policy_cpu(policy, cpu, dev));
1189 else
0d66b91e
SB
1190 policy->cpu = cpu;
1191
835481d9 1192 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1193
1da177e4
LT
1194 /* call driver. From then on the cpufreq must be able
1195 * to accept all calls to ->verify and ->setpolicy for this CPU
1196 */
1c3d85dd 1197 ret = cpufreq_driver->init(policy);
1da177e4 1198 if (ret) {
2d06d8c4 1199 pr_debug("initialization failed\n");
2eaa3e2d 1200 goto err_set_policy_cpu;
1da177e4 1201 }
643ae6e8 1202
6d4e81ed
TV
1203 down_write(&policy->rwsem);
1204
5a7e56a5
VK
1205 /* related cpus should atleast have policy->cpus */
1206 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1207
1208 /*
1209 * affected cpus must always be the one, which are online. We aren't
1210 * managing offline cpus here.
1211 */
1212 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1213
96bbbe4a 1214 if (!recover_policy) {
5a7e56a5
VK
1215 policy->user_policy.min = policy->min;
1216 policy->user_policy.max = policy->max;
6d4e81ed
TV
1217
1218 /* prepare interface data */
1219 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1220 &dev->kobj, "cpufreq");
1221 if (ret) {
1222 pr_err("%s: failed to init policy->kobj: %d\n",
1223 __func__, ret);
1224 goto err_init_policy_kobj;
1225 }
5a7e56a5
VK
1226 }
1227
652ed95d
VK
1228 write_lock_irqsave(&cpufreq_driver_lock, flags);
1229 for_each_cpu(j, policy->cpus)
1230 per_cpu(cpufreq_cpu_data, j) = policy;
1231 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1232
2ed99e39 1233 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1234 policy->cur = cpufreq_driver->get(policy->cpu);
1235 if (!policy->cur) {
1236 pr_err("%s: ->get() failed\n", __func__);
1237 goto err_get_freq;
1238 }
1239 }
1240
d3916691
VK
1241 /*
1242 * Sometimes boot loaders set CPU frequency to a value outside of
1243 * frequency table present with cpufreq core. In such cases CPU might be
1244 * unstable if it has to run on that frequency for long duration of time
1245 * and so its better to set it to a frequency which is specified in
1246 * freq-table. This also makes cpufreq stats inconsistent as
1247 * cpufreq-stats would fail to register because current frequency of CPU
1248 * isn't found in freq-table.
1249 *
1250 * Because we don't want this change to effect boot process badly, we go
1251 * for the next freq which is >= policy->cur ('cur' must be set by now,
1252 * otherwise we will end up setting freq to lowest of the table as 'cur'
1253 * is initialized to zero).
1254 *
1255 * We are passing target-freq as "policy->cur - 1" otherwise
1256 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1257 * equal to target-freq.
1258 */
1259 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1260 && has_target()) {
1261 /* Are we running at unknown frequency ? */
1262 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1263 if (ret == -EINVAL) {
1264 /* Warn user and fix it */
1265 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1266 __func__, policy->cpu, policy->cur);
1267 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1268 CPUFREQ_RELATION_L);
1269
1270 /*
1271 * Reaching here after boot in a few seconds may not
1272 * mean that system will remain stable at "unknown"
1273 * frequency for longer duration. Hence, a BUG_ON().
1274 */
1275 BUG_ON(ret);
1276 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1277 __func__, policy->cpu, policy->cur);
1278 }
1279 }
1280
a1531acd
TR
1281 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1282 CPUFREQ_START, policy);
1283
96bbbe4a 1284 if (!recover_policy) {
308b60e7 1285 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1286 if (ret)
1287 goto err_out_unregister;
fcd7af91
VK
1288 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1289 CPUFREQ_CREATE_POLICY, policy);
a82fab29 1290 }
8ff69732 1291
9515f4d6
VK
1292 write_lock_irqsave(&cpufreq_driver_lock, flags);
1293 list_add(&policy->policy_list, &cpufreq_policy_list);
1294 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1295
e18f1682
SB
1296 cpufreq_init_policy(policy);
1297
96bbbe4a 1298 if (!recover_policy) {
08fd8c1c
VK
1299 policy->user_policy.policy = policy->policy;
1300 policy->user_policy.governor = policy->governor;
1301 }
4e97b631 1302 up_write(&policy->rwsem);
08fd8c1c 1303
038c5b3e 1304 kobject_uevent(&policy->kobj, KOBJ_ADD);
7c45cf31 1305
6eed9404
VK
1306 up_read(&cpufreq_rwsem);
1307
7c45cf31
VK
1308 /* Callback for handling stuff after policy is ready */
1309 if (cpufreq_driver->ready)
1310 cpufreq_driver->ready(policy);
1311
2d06d8c4 1312 pr_debug("initialization complete\n");
87c32271 1313
1da177e4
LT
1314 return 0;
1315
1da177e4 1316err_out_unregister:
652ed95d 1317err_get_freq:
0d1857a1 1318 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1319 for_each_cpu(j, policy->cpus)
7a6aedfa 1320 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1321 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1322
6d4e81ed
TV
1323 if (!recover_policy) {
1324 kobject_put(&policy->kobj);
1325 wait_for_completion(&policy->kobj_unregister);
1326 }
1327err_init_policy_kobj:
7106e02b
PB
1328 up_write(&policy->rwsem);
1329
da60ce9f
VK
1330 if (cpufreq_driver->exit)
1331 cpufreq_driver->exit(policy);
2eaa3e2d 1332err_set_policy_cpu:
96bbbe4a 1333 if (recover_policy) {
72368d12
RW
1334 /* Do not leave stale fallback data behind. */
1335 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1336 cpufreq_policy_put_kobj(policy);
72368d12 1337 }
e9698cc5 1338 cpufreq_policy_free(policy);
42f921a6 1339
1da177e4 1340nomem_out:
6eed9404
VK
1341 up_read(&cpufreq_rwsem);
1342
1da177e4
LT
1343 return ret;
1344}
1345
cedb70af 1346static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1347 struct subsys_interface *sif)
1da177e4 1348{
f9ba680d 1349 unsigned int cpu = dev->id, cpus;
1bfb425b 1350 int ret;
1da177e4 1351 unsigned long flags;
3a3e9e06 1352 struct cpufreq_policy *policy;
1da177e4 1353
b8eed8af 1354 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1355
0d1857a1 1356 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1357
3a3e9e06 1358 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1359
8414809c 1360 /* Save the policy somewhere when doing a light-weight tear-down */
96bbbe4a 1361 if (cpufreq_suspended)
3a3e9e06 1362 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1363
0d1857a1 1364 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1365
3a3e9e06 1366 if (!policy) {
b8eed8af 1367 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1368 return -EINVAL;
1369 }
1da177e4 1370
9c0ebcf7 1371 if (has_target()) {
3de9bdeb
VK
1372 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1373 if (ret) {
1374 pr_err("%s: Failed to stop governor\n", __func__);
1375 return ret;
1376 }
1da177e4 1377
fa69e33f 1378 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1379 policy->governor->name, CPUFREQ_NAME_LEN);
db5f2995 1380 }
1da177e4 1381
ad7722da 1382 down_read(&policy->rwsem);
3a3e9e06 1383 cpus = cpumask_weight(policy->cpus);
ad7722da 1384 up_read(&policy->rwsem);
084f3493 1385
61173f25 1386 if (cpu != policy->cpu) {
6964d91d 1387 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1388 } else if (cpus > 1) {
1bfb425b
VK
1389 /* Nominate new CPU */
1390 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1391 struct device *cpu_dev = get_cpu_device(new_cpu);
a82fab29 1392
1bfb425b
VK
1393 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1394 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1395 if (ret) {
1396 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1397 "cpufreq"))
1398 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1399 __func__, cpu_dev->id);
1400 return ret;
1da177e4 1401 }
1bfb425b
VK
1402
1403 if (!cpufreq_suspended)
1404 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1405 __func__, new_cpu, cpu);
789ca243 1406 } else if (cpufreq_driver->stop_cpu) {
367dc4aa 1407 cpufreq_driver->stop_cpu(policy);
1da177e4 1408 }
1da177e4 1409
cedb70af
SB
1410 return 0;
1411}
1412
1413static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1414 struct subsys_interface *sif)
cedb70af
SB
1415{
1416 unsigned int cpu = dev->id, cpus;
1417 int ret;
1418 unsigned long flags;
1419 struct cpufreq_policy *policy;
cedb70af 1420
6ffae8c0 1421 write_lock_irqsave(&cpufreq_driver_lock, flags);
cedb70af 1422 policy = per_cpu(cpufreq_cpu_data, cpu);
6ffae8c0
VK
1423 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1424 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cedb70af
SB
1425
1426 if (!policy) {
1427 pr_debug("%s: No cpu_data found\n", __func__);
1428 return -EINVAL;
1429 }
1430
ad7722da 1431 down_write(&policy->rwsem);
cedb70af 1432 cpus = cpumask_weight(policy->cpus);
303ae723 1433 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1434 up_write(&policy->rwsem);
cedb70af 1435
b8eed8af
VK
1436 /* If cpu is last user of policy, free policy */
1437 if (cpus == 1) {
9c0ebcf7 1438 if (has_target()) {
3de9bdeb
VK
1439 ret = __cpufreq_governor(policy,
1440 CPUFREQ_GOV_POLICY_EXIT);
1441 if (ret) {
1442 pr_err("%s: Failed to exit governor\n",
e837f9b5 1443 __func__);
3de9bdeb
VK
1444 return ret;
1445 }
edab2fbc 1446 }
2a998599 1447
96bbbe4a 1448 if (!cpufreq_suspended)
42f921a6 1449 cpufreq_policy_put_kobj(policy);
7d26e2d5 1450
8414809c
SB
1451 /*
1452 * Perform the ->exit() even during light-weight tear-down,
1453 * since this is a core component, and is essential for the
1454 * subsequent light-weight ->init() to succeed.
b8eed8af 1455 */
1c3d85dd 1456 if (cpufreq_driver->exit)
3a3e9e06 1457 cpufreq_driver->exit(policy);
27ecddc2 1458
9515f4d6
VK
1459 /* Remove policy from list of active policies */
1460 write_lock_irqsave(&cpufreq_driver_lock, flags);
1461 list_del(&policy->policy_list);
1462 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1463
96bbbe4a 1464 if (!cpufreq_suspended)
3a3e9e06 1465 cpufreq_policy_free(policy);
e5c87b76
SK
1466 } else if (has_target()) {
1467 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1468 if (!ret)
1469 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1470
1471 if (ret) {
1472 pr_err("%s: Failed to start governor\n", __func__);
1473 return ret;
2a998599 1474 }
27ecddc2 1475 }
1da177e4 1476
1da177e4
LT
1477 return 0;
1478}
1479
cedb70af 1480/**
27a862e9 1481 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1482 *
1483 * Removes the cpufreq interface for a CPU device.
cedb70af 1484 */
8a25a2fd 1485static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1486{
8a25a2fd 1487 unsigned int cpu = dev->id;
27a862e9 1488 int ret;
ec28297a
VP
1489
1490 if (cpu_is_offline(cpu))
1491 return 0;
1492
96bbbe4a 1493 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1494
1495 if (!ret)
96bbbe4a 1496 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1497
1498 return ret;
5a01f2e8
VP
1499}
1500
65f27f38 1501static void handle_update(struct work_struct *work)
1da177e4 1502{
65f27f38
DH
1503 struct cpufreq_policy *policy =
1504 container_of(work, struct cpufreq_policy, update);
1505 unsigned int cpu = policy->cpu;
2d06d8c4 1506 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1507 cpufreq_update_policy(cpu);
1508}
1509
1510/**
bb176f7d
VK
1511 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1512 * in deep trouble.
a1e1dc41 1513 * @policy: policy managing CPUs
1da177e4
LT
1514 * @new_freq: CPU frequency the CPU actually runs at
1515 *
29464f28
DJ
1516 * We adjust to current frequency first, and need to clean up later.
1517 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1518 */
a1e1dc41 1519static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
e08f5f5b 1520 unsigned int new_freq)
1da177e4
LT
1521{
1522 struct cpufreq_freqs freqs;
b43a7ffb 1523
e837f9b5 1524 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
a1e1dc41 1525 policy->cur, new_freq);
1da177e4 1526
a1e1dc41 1527 freqs.old = policy->cur;
1da177e4 1528 freqs.new = new_freq;
b43a7ffb 1529
8fec051e
VK
1530 cpufreq_freq_transition_begin(policy, &freqs);
1531 cpufreq_freq_transition_end(policy, &freqs, 0);
1da177e4
LT
1532}
1533
32ee8c3e 1534/**
4ab70df4 1535 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1536 * @cpu: CPU number
1537 *
1538 * This is the last known freq, without actually getting it from the driver.
1539 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1540 */
1541unsigned int cpufreq_quick_get(unsigned int cpu)
1542{
9e21ba8b 1543 struct cpufreq_policy *policy;
e08f5f5b 1544 unsigned int ret_freq = 0;
95235ca2 1545
1c3d85dd
RW
1546 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1547 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1548
1549 policy = cpufreq_cpu_get(cpu);
95235ca2 1550 if (policy) {
e08f5f5b 1551 ret_freq = policy->cur;
95235ca2
VP
1552 cpufreq_cpu_put(policy);
1553 }
1554
4d34a67d 1555 return ret_freq;
95235ca2
VP
1556}
1557EXPORT_SYMBOL(cpufreq_quick_get);
1558
3d737108
JB
1559/**
1560 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1561 * @cpu: CPU number
1562 *
1563 * Just return the max possible frequency for a given CPU.
1564 */
1565unsigned int cpufreq_quick_get_max(unsigned int cpu)
1566{
1567 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1568 unsigned int ret_freq = 0;
1569
1570 if (policy) {
1571 ret_freq = policy->max;
1572 cpufreq_cpu_put(policy);
1573 }
1574
1575 return ret_freq;
1576}
1577EXPORT_SYMBOL(cpufreq_quick_get_max);
1578
d92d50a4 1579static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1da177e4 1580{
e08f5f5b 1581 unsigned int ret_freq = 0;
5800043b 1582
1c3d85dd 1583 if (!cpufreq_driver->get)
4d34a67d 1584 return ret_freq;
1da177e4 1585
d92d50a4 1586 ret_freq = cpufreq_driver->get(policy->cpu);
1da177e4 1587
e08f5f5b 1588 if (ret_freq && policy->cur &&
1c3d85dd 1589 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1590 /* verify no discrepancy between actual and
1591 saved value exists */
1592 if (unlikely(ret_freq != policy->cur)) {
a1e1dc41 1593 cpufreq_out_of_sync(policy, ret_freq);
1da177e4
LT
1594 schedule_work(&policy->update);
1595 }
1596 }
1597
4d34a67d 1598 return ret_freq;
5a01f2e8 1599}
1da177e4 1600
5a01f2e8
VP
1601/**
1602 * cpufreq_get - get the current CPU frequency (in kHz)
1603 * @cpu: CPU number
1604 *
1605 * Get the CPU current (static) CPU frequency
1606 */
1607unsigned int cpufreq_get(unsigned int cpu)
1608{
999976e0 1609 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1610 unsigned int ret_freq = 0;
5a01f2e8 1611
999976e0
AP
1612 if (policy) {
1613 down_read(&policy->rwsem);
d92d50a4 1614 ret_freq = __cpufreq_get(policy);
999976e0 1615 up_read(&policy->rwsem);
5a01f2e8 1616
999976e0
AP
1617 cpufreq_cpu_put(policy);
1618 }
6eed9404 1619
4d34a67d 1620 return ret_freq;
1da177e4
LT
1621}
1622EXPORT_SYMBOL(cpufreq_get);
1623
8a25a2fd
KS
1624static struct subsys_interface cpufreq_interface = {
1625 .name = "cpufreq",
1626 .subsys = &cpu_subsys,
1627 .add_dev = cpufreq_add_dev,
1628 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1629};
1630
e28867ea
VK
1631/*
1632 * In case platform wants some specific frequency to be configured
1633 * during suspend..
1634 */
1635int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1636{
1637 int ret;
1638
1639 if (!policy->suspend_freq) {
1640 pr_err("%s: suspend_freq can't be zero\n", __func__);
1641 return -EINVAL;
1642 }
1643
1644 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1645 policy->suspend_freq);
1646
1647 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1648 CPUFREQ_RELATION_H);
1649 if (ret)
1650 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1651 __func__, policy->suspend_freq, ret);
1652
1653 return ret;
1654}
1655EXPORT_SYMBOL(cpufreq_generic_suspend);
1656
42d4dc3f 1657/**
2f0aea93 1658 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1659 *
2f0aea93
VK
1660 * Called during system wide Suspend/Hibernate cycles for suspending governors
1661 * as some platforms can't change frequency after this point in suspend cycle.
1662 * Because some of the devices (like: i2c, regulators, etc) they use for
1663 * changing frequency are suspended quickly after this point.
42d4dc3f 1664 */
2f0aea93 1665void cpufreq_suspend(void)
42d4dc3f 1666{
3a3e9e06 1667 struct cpufreq_policy *policy;
42d4dc3f 1668
2f0aea93
VK
1669 if (!cpufreq_driver)
1670 return;
42d4dc3f 1671
2f0aea93 1672 if (!has_target())
b1b12bab 1673 goto suspend;
42d4dc3f 1674
2f0aea93
VK
1675 pr_debug("%s: Suspending Governors\n", __func__);
1676
b4f0676f 1677 for_each_policy(policy) {
2f0aea93
VK
1678 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1679 pr_err("%s: Failed to stop governor for policy: %p\n",
1680 __func__, policy);
1681 else if (cpufreq_driver->suspend
1682 && cpufreq_driver->suspend(policy))
1683 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1684 policy);
42d4dc3f 1685 }
b1b12bab
VK
1686
1687suspend:
1688 cpufreq_suspended = true;
42d4dc3f
BH
1689}
1690
1da177e4 1691/**
2f0aea93 1692 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1693 *
2f0aea93
VK
1694 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1695 * are suspended with cpufreq_suspend().
1da177e4 1696 */
2f0aea93 1697void cpufreq_resume(void)
1da177e4 1698{
3a3e9e06 1699 struct cpufreq_policy *policy;
1da177e4 1700
2f0aea93
VK
1701 if (!cpufreq_driver)
1702 return;
1da177e4 1703
8e30444e
LT
1704 cpufreq_suspended = false;
1705
2f0aea93 1706 if (!has_target())
e00e56df 1707 return;
1da177e4 1708
2f0aea93 1709 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1710
b4f0676f 1711 for_each_policy(policy) {
0c5aa405
VK
1712 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1713 pr_err("%s: Failed to resume driver: %p\n", __func__,
1714 policy);
1715 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
2f0aea93
VK
1716 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1717 pr_err("%s: Failed to start governor for policy: %p\n",
1718 __func__, policy);
2f0aea93 1719 }
c75de0ac
VK
1720
1721 /*
1722 * schedule call cpufreq_update_policy() for first-online CPU, as that
1723 * wouldn't be hotplugged-out on suspend. It will verify that the
1724 * current freq is in sync with what we believe it to be.
1725 */
1726 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1727 if (WARN_ON(!policy))
1728 return;
1729
1730 schedule_work(&policy->update);
2f0aea93 1731}
1da177e4 1732
9d95046e
BP
1733/**
1734 * cpufreq_get_current_driver - return current driver's name
1735 *
1736 * Return the name string of the currently loaded cpufreq driver
1737 * or NULL, if none.
1738 */
1739const char *cpufreq_get_current_driver(void)
1740{
1c3d85dd
RW
1741 if (cpufreq_driver)
1742 return cpufreq_driver->name;
1743
1744 return NULL;
9d95046e
BP
1745}
1746EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4 1747
51315cdf
TP
1748/**
1749 * cpufreq_get_driver_data - return current driver data
1750 *
1751 * Return the private data of the currently loaded cpufreq
1752 * driver, or NULL if no cpufreq driver is loaded.
1753 */
1754void *cpufreq_get_driver_data(void)
1755{
1756 if (cpufreq_driver)
1757 return cpufreq_driver->driver_data;
1758
1759 return NULL;
1760}
1761EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1762
1da177e4
LT
1763/*********************************************************************
1764 * NOTIFIER LISTS INTERFACE *
1765 *********************************************************************/
1766
1767/**
1768 * cpufreq_register_notifier - register a driver with cpufreq
1769 * @nb: notifier function to register
1770 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1771 *
32ee8c3e 1772 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1773 * are notified about clock rate changes (once before and once after
1774 * the transition), or a list of drivers that are notified about
1775 * changes in cpufreq policy.
1776 *
1777 * This function may sleep, and has the same return conditions as
e041c683 1778 * blocking_notifier_chain_register.
1da177e4
LT
1779 */
1780int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1781{
1782 int ret;
1783
d5aaffa9
DB
1784 if (cpufreq_disabled())
1785 return -EINVAL;
1786
74212ca4
CEB
1787 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1788
1da177e4
LT
1789 switch (list) {
1790 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1791 ret = srcu_notifier_chain_register(
e041c683 1792 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1793 break;
1794 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1795 ret = blocking_notifier_chain_register(
1796 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1797 break;
1798 default:
1799 ret = -EINVAL;
1800 }
1da177e4
LT
1801
1802 return ret;
1803}
1804EXPORT_SYMBOL(cpufreq_register_notifier);
1805
1da177e4
LT
1806/**
1807 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1808 * @nb: notifier block to be unregistered
bb176f7d 1809 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1810 *
1811 * Remove a driver from the CPU frequency notifier list.
1812 *
1813 * This function may sleep, and has the same return conditions as
e041c683 1814 * blocking_notifier_chain_unregister.
1da177e4
LT
1815 */
1816int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1817{
1818 int ret;
1819
d5aaffa9
DB
1820 if (cpufreq_disabled())
1821 return -EINVAL;
1822
1da177e4
LT
1823 switch (list) {
1824 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1825 ret = srcu_notifier_chain_unregister(
e041c683 1826 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1827 break;
1828 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1829 ret = blocking_notifier_chain_unregister(
1830 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1831 break;
1832 default:
1833 ret = -EINVAL;
1834 }
1da177e4
LT
1835
1836 return ret;
1837}
1838EXPORT_SYMBOL(cpufreq_unregister_notifier);
1839
1840
1841/*********************************************************************
1842 * GOVERNORS *
1843 *********************************************************************/
1844
1c03a2d0
VK
1845/* Must set freqs->new to intermediate frequency */
1846static int __target_intermediate(struct cpufreq_policy *policy,
1847 struct cpufreq_freqs *freqs, int index)
1848{
1849 int ret;
1850
1851 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1852
1853 /* We don't need to switch to intermediate freq */
1854 if (!freqs->new)
1855 return 0;
1856
1857 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1858 __func__, policy->cpu, freqs->old, freqs->new);
1859
1860 cpufreq_freq_transition_begin(policy, freqs);
1861 ret = cpufreq_driver->target_intermediate(policy, index);
1862 cpufreq_freq_transition_end(policy, freqs, ret);
1863
1864 if (ret)
1865 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1866 __func__, ret);
1867
1868 return ret;
1869}
1870
8d65775d
VK
1871static int __target_index(struct cpufreq_policy *policy,
1872 struct cpufreq_frequency_table *freq_table, int index)
1873{
1c03a2d0
VK
1874 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1875 unsigned int intermediate_freq = 0;
8d65775d
VK
1876 int retval = -EINVAL;
1877 bool notify;
1878
1879 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
8d65775d 1880 if (notify) {
1c03a2d0
VK
1881 /* Handle switching to intermediate frequency */
1882 if (cpufreq_driver->get_intermediate) {
1883 retval = __target_intermediate(policy, &freqs, index);
1884 if (retval)
1885 return retval;
1886
1887 intermediate_freq = freqs.new;
1888 /* Set old freq to intermediate */
1889 if (intermediate_freq)
1890 freqs.old = freqs.new;
1891 }
8d65775d 1892
1c03a2d0 1893 freqs.new = freq_table[index].frequency;
8d65775d
VK
1894 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1895 __func__, policy->cpu, freqs.old, freqs.new);
1896
1897 cpufreq_freq_transition_begin(policy, &freqs);
1898 }
1899
1900 retval = cpufreq_driver->target_index(policy, index);
1901 if (retval)
1902 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1903 retval);
1904
1c03a2d0 1905 if (notify) {
8d65775d
VK
1906 cpufreq_freq_transition_end(policy, &freqs, retval);
1907
1c03a2d0
VK
1908 /*
1909 * Failed after setting to intermediate freq? Driver should have
1910 * reverted back to initial frequency and so should we. Check
1911 * here for intermediate_freq instead of get_intermediate, in
1912 * case we have't switched to intermediate freq at all.
1913 */
1914 if (unlikely(retval && intermediate_freq)) {
1915 freqs.old = intermediate_freq;
1916 freqs.new = policy->restore_freq;
1917 cpufreq_freq_transition_begin(policy, &freqs);
1918 cpufreq_freq_transition_end(policy, &freqs, 0);
1919 }
1920 }
1921
8d65775d
VK
1922 return retval;
1923}
1924
1da177e4
LT
1925int __cpufreq_driver_target(struct cpufreq_policy *policy,
1926 unsigned int target_freq,
1927 unsigned int relation)
1928{
7249924e 1929 unsigned int old_target_freq = target_freq;
8d65775d 1930 int retval = -EINVAL;
c32b6b8e 1931
a7b422cd
KRW
1932 if (cpufreq_disabled())
1933 return -ENODEV;
1934
7249924e
VK
1935 /* Make sure that target_freq is within supported range */
1936 if (target_freq > policy->max)
1937 target_freq = policy->max;
1938 if (target_freq < policy->min)
1939 target_freq = policy->min;
1940
1941 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1942 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1943
9c0ebcf7
VK
1944 /*
1945 * This might look like a redundant call as we are checking it again
1946 * after finding index. But it is left intentionally for cases where
1947 * exactly same freq is called again and so we can save on few function
1948 * calls.
1949 */
5a1c0228
VK
1950 if (target_freq == policy->cur)
1951 return 0;
1952
1c03a2d0
VK
1953 /* Save last value to restore later on errors */
1954 policy->restore_freq = policy->cur;
1955
1c3d85dd
RW
1956 if (cpufreq_driver->target)
1957 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1958 else if (cpufreq_driver->target_index) {
1959 struct cpufreq_frequency_table *freq_table;
1960 int index;
90d45d17 1961
9c0ebcf7
VK
1962 freq_table = cpufreq_frequency_get_table(policy->cpu);
1963 if (unlikely(!freq_table)) {
1964 pr_err("%s: Unable to find freq_table\n", __func__);
1965 goto out;
1966 }
1967
1968 retval = cpufreq_frequency_table_target(policy, freq_table,
1969 target_freq, relation, &index);
1970 if (unlikely(retval)) {
1971 pr_err("%s: Unable to find matching freq\n", __func__);
1972 goto out;
1973 }
1974
d4019f0a 1975 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1976 retval = 0;
d4019f0a
VK
1977 goto out;
1978 }
1979
8d65775d 1980 retval = __target_index(policy, freq_table, index);
9c0ebcf7
VK
1981 }
1982
1983out:
1da177e4
LT
1984 return retval;
1985}
1986EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1987
1da177e4
LT
1988int cpufreq_driver_target(struct cpufreq_policy *policy,
1989 unsigned int target_freq,
1990 unsigned int relation)
1991{
f1829e4a 1992 int ret = -EINVAL;
1da177e4 1993
ad7722da 1994 down_write(&policy->rwsem);
1da177e4
LT
1995
1996 ret = __cpufreq_driver_target(policy, target_freq, relation);
1997
ad7722da 1998 up_write(&policy->rwsem);
1da177e4 1999
1da177e4
LT
2000 return ret;
2001}
2002EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2003
e08f5f5b
GS
2004static int __cpufreq_governor(struct cpufreq_policy *policy,
2005 unsigned int event)
1da177e4 2006{
cc993cab 2007 int ret;
6afde10c
TR
2008
2009 /* Only must be defined when default governor is known to have latency
2010 restrictions, like e.g. conservative or ondemand.
2011 That this is the case is already ensured in Kconfig
2012 */
2013#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2014 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2015#else
2016 struct cpufreq_governor *gov = NULL;
2017#endif
1c256245 2018
2f0aea93
VK
2019 /* Don't start any governor operations if we are entering suspend */
2020 if (cpufreq_suspended)
2021 return 0;
cb57720b
EZ
2022 /*
2023 * Governor might not be initiated here if ACPI _PPC changed
2024 * notification happened, so check it.
2025 */
2026 if (!policy->governor)
2027 return -EINVAL;
2f0aea93 2028
1c256245
TR
2029 if (policy->governor->max_transition_latency &&
2030 policy->cpuinfo.transition_latency >
2031 policy->governor->max_transition_latency) {
6afde10c
TR
2032 if (!gov)
2033 return -EINVAL;
2034 else {
e837f9b5
JP
2035 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2036 policy->governor->name, gov->name);
6afde10c
TR
2037 policy->governor = gov;
2038 }
1c256245 2039 }
1da177e4 2040
fe492f3f
VK
2041 if (event == CPUFREQ_GOV_POLICY_INIT)
2042 if (!try_module_get(policy->governor->owner))
2043 return -EINVAL;
1da177e4 2044
2d06d8c4 2045 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 2046 policy->cpu, event);
95731ebb
XC
2047
2048 mutex_lock(&cpufreq_governor_lock);
56d07db2 2049 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
2050 || (!policy->governor_enabled
2051 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
2052 mutex_unlock(&cpufreq_governor_lock);
2053 return -EBUSY;
2054 }
2055
2056 if (event == CPUFREQ_GOV_STOP)
2057 policy->governor_enabled = false;
2058 else if (event == CPUFREQ_GOV_START)
2059 policy->governor_enabled = true;
2060
2061 mutex_unlock(&cpufreq_governor_lock);
2062
1da177e4
LT
2063 ret = policy->governor->governor(policy, event);
2064
4d5dcc42
VK
2065 if (!ret) {
2066 if (event == CPUFREQ_GOV_POLICY_INIT)
2067 policy->governor->initialized++;
2068 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2069 policy->governor->initialized--;
95731ebb
XC
2070 } else {
2071 /* Restore original values */
2072 mutex_lock(&cpufreq_governor_lock);
2073 if (event == CPUFREQ_GOV_STOP)
2074 policy->governor_enabled = true;
2075 else if (event == CPUFREQ_GOV_START)
2076 policy->governor_enabled = false;
2077 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 2078 }
b394058f 2079
fe492f3f
VK
2080 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2081 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
2082 module_put(policy->governor->owner);
2083
2084 return ret;
2085}
2086
1da177e4
LT
2087int cpufreq_register_governor(struct cpufreq_governor *governor)
2088{
3bcb09a3 2089 int err;
1da177e4
LT
2090
2091 if (!governor)
2092 return -EINVAL;
2093
a7b422cd
KRW
2094 if (cpufreq_disabled())
2095 return -ENODEV;
2096
3fc54d37 2097 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 2098
b394058f 2099 governor->initialized = 0;
3bcb09a3 2100 err = -EBUSY;
42f91fa1 2101 if (!find_governor(governor->name)) {
3bcb09a3
JF
2102 err = 0;
2103 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 2104 }
1da177e4 2105
32ee8c3e 2106 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 2107 return err;
1da177e4
LT
2108}
2109EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2110
1da177e4
LT
2111void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2112{
90e41bac 2113 int cpu;
90e41bac 2114
1da177e4
LT
2115 if (!governor)
2116 return;
2117
a7b422cd
KRW
2118 if (cpufreq_disabled())
2119 return;
2120
90e41bac
PB
2121 for_each_present_cpu(cpu) {
2122 if (cpu_online(cpu))
2123 continue;
2124 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2125 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2126 }
90e41bac 2127
3fc54d37 2128 mutex_lock(&cpufreq_governor_mutex);
1da177e4 2129 list_del(&governor->governor_list);
3fc54d37 2130 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
2131 return;
2132}
2133EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2134
2135
1da177e4
LT
2136/*********************************************************************
2137 * POLICY INTERFACE *
2138 *********************************************************************/
2139
2140/**
2141 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2142 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2143 * is written
1da177e4
LT
2144 *
2145 * Reads the current cpufreq policy.
2146 */
2147int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2148{
2149 struct cpufreq_policy *cpu_policy;
2150 if (!policy)
2151 return -EINVAL;
2152
2153 cpu_policy = cpufreq_cpu_get(cpu);
2154 if (!cpu_policy)
2155 return -EINVAL;
2156
d5b73cd8 2157 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2158
2159 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2160 return 0;
2161}
2162EXPORT_SYMBOL(cpufreq_get_policy);
2163
153d7f3f 2164/*
037ce839
VK
2165 * policy : current policy.
2166 * new_policy: policy to be set.
153d7f3f 2167 */
037ce839 2168static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2169 struct cpufreq_policy *new_policy)
1da177e4 2170{
d9a789c7
RW
2171 struct cpufreq_governor *old_gov;
2172 int ret;
1da177e4 2173
e837f9b5
JP
2174 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2175 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2176
d5b73cd8 2177 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2178
d9a789c7
RW
2179 if (new_policy->min > policy->max || new_policy->max < policy->min)
2180 return -EINVAL;
9c9a43ed 2181
1da177e4 2182 /* verify the cpu speed can be set within this limit */
3a3e9e06 2183 ret = cpufreq_driver->verify(new_policy);
1da177e4 2184 if (ret)
d9a789c7 2185 return ret;
1da177e4 2186
1da177e4 2187 /* adjust if necessary - all reasons */
e041c683 2188 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2189 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2190
2191 /* adjust if necessary - hardware incompatibility*/
e041c683 2192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2193 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2194
bb176f7d
VK
2195 /*
2196 * verify the cpu speed can be set within this limit, which might be
2197 * different to the first one
2198 */
3a3e9e06 2199 ret = cpufreq_driver->verify(new_policy);
e041c683 2200 if (ret)
d9a789c7 2201 return ret;
1da177e4
LT
2202
2203 /* notification of the new policy */
e041c683 2204 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2205 CPUFREQ_NOTIFY, new_policy);
1da177e4 2206
3a3e9e06
VK
2207 policy->min = new_policy->min;
2208 policy->max = new_policy->max;
1da177e4 2209
2d06d8c4 2210 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2211 policy->min, policy->max);
1da177e4 2212
1c3d85dd 2213 if (cpufreq_driver->setpolicy) {
3a3e9e06 2214 policy->policy = new_policy->policy;
2d06d8c4 2215 pr_debug("setting range\n");
d9a789c7
RW
2216 return cpufreq_driver->setpolicy(new_policy);
2217 }
1da177e4 2218
d9a789c7
RW
2219 if (new_policy->governor == policy->governor)
2220 goto out;
7bd353a9 2221
d9a789c7
RW
2222 pr_debug("governor switch\n");
2223
2224 /* save old, working values */
2225 old_gov = policy->governor;
2226 /* end old governor */
2227 if (old_gov) {
2228 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2229 up_write(&policy->rwsem);
e5c87b76 2230 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
d9a789c7 2231 down_write(&policy->rwsem);
1da177e4
LT
2232 }
2233
d9a789c7
RW
2234 /* start new governor */
2235 policy->governor = new_policy->governor;
2236 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2237 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2238 goto out;
2239
2240 up_write(&policy->rwsem);
2241 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2242 down_write(&policy->rwsem);
2243 }
2244
2245 /* new governor failed, so re-start old one */
2246 pr_debug("starting governor %s failed\n", policy->governor->name);
2247 if (old_gov) {
2248 policy->governor = old_gov;
2249 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2250 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2251 }
2252
2253 return -EINVAL;
2254
2255 out:
2256 pr_debug("governor: change or update limits\n");
2257 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2258}
2259
1da177e4
LT
2260/**
2261 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2262 * @cpu: CPU which shall be re-evaluated
2263 *
25985edc 2264 * Useful for policy notifiers which have different necessities
1da177e4
LT
2265 * at different times.
2266 */
2267int cpufreq_update_policy(unsigned int cpu)
2268{
3a3e9e06
VK
2269 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2270 struct cpufreq_policy new_policy;
f1829e4a 2271 int ret;
1da177e4 2272
fefa8ff8
AP
2273 if (!policy)
2274 return -ENODEV;
1da177e4 2275
ad7722da 2276 down_write(&policy->rwsem);
1da177e4 2277
2d06d8c4 2278 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2279 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2280 new_policy.min = policy->user_policy.min;
2281 new_policy.max = policy->user_policy.max;
2282 new_policy.policy = policy->user_policy.policy;
2283 new_policy.governor = policy->user_policy.governor;
1da177e4 2284
bb176f7d
VK
2285 /*
2286 * BIOS might change freq behind our back
2287 * -> ask driver for current freq and notify governors about a change
2288 */
2ed99e39 2289 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2290 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2291 if (WARN_ON(!new_policy.cur)) {
2292 ret = -EIO;
fefa8ff8 2293 goto unlock;
bd0fa9bb
VK
2294 }
2295
3a3e9e06 2296 if (!policy->cur) {
e837f9b5 2297 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2298 policy->cur = new_policy.cur;
a85f7bd3 2299 } else {
9c0ebcf7 2300 if (policy->cur != new_policy.cur && has_target())
a1e1dc41 2301 cpufreq_out_of_sync(policy, new_policy.cur);
a85f7bd3 2302 }
0961dd0d
TR
2303 }
2304
037ce839 2305 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2306
fefa8ff8 2307unlock:
ad7722da 2308 up_write(&policy->rwsem);
5a01f2e8 2309
3a3e9e06 2310 cpufreq_cpu_put(policy);
1da177e4
LT
2311 return ret;
2312}
2313EXPORT_SYMBOL(cpufreq_update_policy);
2314
2760984f 2315static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2316 unsigned long action, void *hcpu)
2317{
2318 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2319 struct device *dev;
c32b6b8e 2320
8a25a2fd
KS
2321 dev = get_cpu_device(cpu);
2322 if (dev) {
5302c3fb 2323 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2324 case CPU_ONLINE:
23faf0b7 2325 cpufreq_add_dev(dev, NULL);
c32b6b8e 2326 break;
5302c3fb 2327
c32b6b8e 2328 case CPU_DOWN_PREPARE:
96bbbe4a 2329 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2330 break;
2331
2332 case CPU_POST_DEAD:
96bbbe4a 2333 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2334 break;
5302c3fb 2335
5a01f2e8 2336 case CPU_DOWN_FAILED:
23faf0b7 2337 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2338 break;
2339 }
2340 }
2341 return NOTIFY_OK;
2342}
2343
9c36f746 2344static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2345 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2346};
1da177e4 2347
6f19efc0
LM
2348/*********************************************************************
2349 * BOOST *
2350 *********************************************************************/
2351static int cpufreq_boost_set_sw(int state)
2352{
2353 struct cpufreq_frequency_table *freq_table;
2354 struct cpufreq_policy *policy;
2355 int ret = -EINVAL;
2356
b4f0676f 2357 for_each_policy(policy) {
6f19efc0
LM
2358 freq_table = cpufreq_frequency_get_table(policy->cpu);
2359 if (freq_table) {
2360 ret = cpufreq_frequency_table_cpuinfo(policy,
2361 freq_table);
2362 if (ret) {
2363 pr_err("%s: Policy frequency update failed\n",
2364 __func__);
2365 break;
2366 }
2367 policy->user_policy.max = policy->max;
2368 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2369 }
2370 }
2371
2372 return ret;
2373}
2374
2375int cpufreq_boost_trigger_state(int state)
2376{
2377 unsigned long flags;
2378 int ret = 0;
2379
2380 if (cpufreq_driver->boost_enabled == state)
2381 return 0;
2382
2383 write_lock_irqsave(&cpufreq_driver_lock, flags);
2384 cpufreq_driver->boost_enabled = state;
2385 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2386
2387 ret = cpufreq_driver->set_boost(state);
2388 if (ret) {
2389 write_lock_irqsave(&cpufreq_driver_lock, flags);
2390 cpufreq_driver->boost_enabled = !state;
2391 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2392
e837f9b5
JP
2393 pr_err("%s: Cannot %s BOOST\n",
2394 __func__, state ? "enable" : "disable");
6f19efc0
LM
2395 }
2396
2397 return ret;
2398}
2399
2400int cpufreq_boost_supported(void)
2401{
2402 if (likely(cpufreq_driver))
2403 return cpufreq_driver->boost_supported;
2404
2405 return 0;
2406}
2407EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2408
2409int cpufreq_boost_enabled(void)
2410{
2411 return cpufreq_driver->boost_enabled;
2412}
2413EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2414
1da177e4
LT
2415/*********************************************************************
2416 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2417 *********************************************************************/
2418
2419/**
2420 * cpufreq_register_driver - register a CPU Frequency driver
2421 * @driver_data: A struct cpufreq_driver containing the values#
2422 * submitted by the CPU Frequency driver.
2423 *
bb176f7d 2424 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2425 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2426 * (and isn't unregistered in the meantime).
1da177e4
LT
2427 *
2428 */
221dee28 2429int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2430{
2431 unsigned long flags;
2432 int ret;
2433
a7b422cd
KRW
2434 if (cpufreq_disabled())
2435 return -ENODEV;
2436
1da177e4 2437 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7 2438 !(driver_data->setpolicy || driver_data->target_index ||
9832235f
RW
2439 driver_data->target) ||
2440 (driver_data->setpolicy && (driver_data->target_index ||
1c03a2d0
VK
2441 driver_data->target)) ||
2442 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
1da177e4
LT
2443 return -EINVAL;
2444
2d06d8c4 2445 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4 2446
0d1857a1 2447 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2448 if (cpufreq_driver) {
0d1857a1 2449 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2450 return -EEXIST;
1da177e4 2451 }
1c3d85dd 2452 cpufreq_driver = driver_data;
0d1857a1 2453 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2454
bc68b7df
VK
2455 if (driver_data->setpolicy)
2456 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2457
6f19efc0
LM
2458 if (cpufreq_boost_supported()) {
2459 /*
2460 * Check if driver provides function to enable boost -
2461 * if not, use cpufreq_boost_set_sw as default
2462 */
2463 if (!cpufreq_driver->set_boost)
2464 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2465
2466 ret = cpufreq_sysfs_create_file(&boost.attr);
2467 if (ret) {
2468 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2469 __func__);
6f19efc0
LM
2470 goto err_null_driver;
2471 }
2472 }
2473
8a25a2fd 2474 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2475 if (ret)
6f19efc0 2476 goto err_boost_unreg;
1da177e4 2477
ce1bcfe9
VK
2478 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2479 list_empty(&cpufreq_policy_list)) {
1da177e4 2480 /* if all ->init() calls failed, unregister */
ce1bcfe9
VK
2481 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2482 driver_data->name);
2483 goto err_if_unreg;
1da177e4
LT
2484 }
2485
8f5bc2ab 2486 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2487 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2488
8f5bc2ab 2489 return 0;
8a25a2fd
KS
2490err_if_unreg:
2491 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2492err_boost_unreg:
2493 if (cpufreq_boost_supported())
2494 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2495err_null_driver:
0d1857a1 2496 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2497 cpufreq_driver = NULL;
0d1857a1 2498 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2499 return ret;
1da177e4
LT
2500}
2501EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2502
1da177e4
LT
2503/**
2504 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2505 *
bb176f7d 2506 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2507 * the right to do so, i.e. if you have succeeded in initialising before!
2508 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2509 * currently not initialised.
2510 */
221dee28 2511int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2512{
2513 unsigned long flags;
2514
1c3d85dd 2515 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2516 return -EINVAL;
1da177e4 2517
2d06d8c4 2518 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2519
8a25a2fd 2520 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2521 if (cpufreq_boost_supported())
2522 cpufreq_sysfs_remove_file(&boost.attr);
2523
65edc68c 2524 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2525
6eed9404 2526 down_write(&cpufreq_rwsem);
0d1857a1 2527 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2528
1c3d85dd 2529 cpufreq_driver = NULL;
6eed9404 2530
0d1857a1 2531 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2532 up_write(&cpufreq_rwsem);
1da177e4
LT
2533
2534 return 0;
2535}
2536EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8 2537
90de2a4a
DA
2538/*
2539 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2540 * or mutexes when secondary CPUs are halted.
2541 */
2542static struct syscore_ops cpufreq_syscore_ops = {
2543 .shutdown = cpufreq_suspend,
2544};
2545
5a01f2e8
VP
2546static int __init cpufreq_core_init(void)
2547{
a7b422cd
KRW
2548 if (cpufreq_disabled())
2549 return -ENODEV;
2550
2361be23 2551 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2552 BUG_ON(!cpufreq_global_kobject);
2553
90de2a4a
DA
2554 register_syscore_ops(&cpufreq_syscore_ops);
2555
5a01f2e8
VP
2556 return 0;
2557}
5a01f2e8 2558core_initcall(cpufreq_core_init);