]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/cpufreq/cpufreq.c
cpufreq: Manage governor usage history with 'policy->last_governor'
[mirror_ubuntu-bionic-kernel.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
90de2a4a 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
b4f0676f 34static LIST_HEAD(cpufreq_policy_list);
f963735a
VK
35
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
b4f0676f
VK
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
f7b27061
VK
92/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
1da177e4 97/**
cd878479 98 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
1c3d85dd 102static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
bb176f7d 104static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 105DEFINE_MUTEX(cpufreq_governor_lock);
bb176f7d 106
2f0aea93
VK
107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
1da177e4 109
9c0ebcf7
VK
110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
6eed9404
VK
115/*
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
1da177e4 121/* internal prototypes */
29464f28
DJ
122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
d92d50a4 124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
65f27f38 125static void handle_update(struct work_struct *work);
1da177e4
LT
126
127/**
32ee8c3e
DJ
128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
1da177e4
LT
130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
e041c683 134static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 135static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 136
74212ca4 137static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
138static int __init init_cpufreq_transition_notifier_list(void)
139{
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 141 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
142 return 0;
143}
b3438f82 144pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 145
a7b422cd 146static int off __read_mostly;
da584455 147static int cpufreq_disabled(void)
a7b422cd
KRW
148{
149 return off;
150}
151void disable_cpufreq(void)
152{
153 off = 1;
154}
29464f28 155static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 156
4d5dcc42
VK
157bool have_governor_per_policy(void)
158{
0b981e70 159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 160}
3f869d6d 161EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 162
944e9a03
VK
163struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164{
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
72a4ce34
VK
172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{
174 u64 idle_time;
175 u64 cur_wall_time;
176 u64 busy_time;
177
178 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
179
180 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
181 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
182 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
183 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
186
187 idle_time = cur_wall_time - busy_time;
188 if (wall)
189 *wall = cputime_to_usecs(cur_wall_time);
190
191 return cputime_to_usecs(idle_time);
192}
193
194u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
195{
196 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
197
198 if (idle_time == -1ULL)
199 return get_cpu_idle_time_jiffy(cpu, wall);
200 else if (!io_busy)
201 idle_time += get_cpu_iowait_time_us(cpu, wall);
202
203 return idle_time;
204}
205EXPORT_SYMBOL_GPL(get_cpu_idle_time);
206
70e9e778
VK
207/*
208 * This is a generic cpufreq init() routine which can be used by cpufreq
209 * drivers of SMP systems. It will do following:
210 * - validate & show freq table passed
211 * - set policies transition latency
212 * - policy->cpus with all possible CPUs
213 */
214int cpufreq_generic_init(struct cpufreq_policy *policy,
215 struct cpufreq_frequency_table *table,
216 unsigned int transition_latency)
217{
218 int ret;
219
220 ret = cpufreq_table_validate_and_show(policy, table);
221 if (ret) {
222 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
223 return ret;
224 }
225
226 policy->cpuinfo.transition_latency = transition_latency;
227
228 /*
229 * The driver only supports the SMP configuartion where all processors
230 * share the clock and voltage and clock.
231 */
232 cpumask_setall(policy->cpus);
233
234 return 0;
235}
236EXPORT_SYMBOL_GPL(cpufreq_generic_init);
237
988bed09
VK
238/* Only for cpufreq core internal use */
239struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
652ed95d
VK
240{
241 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
242
988bed09
VK
243 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
244}
245
246unsigned int cpufreq_generic_get(unsigned int cpu)
247{
248 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
249
652ed95d 250 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
251 pr_err("%s: No %s associated to cpu: %d\n",
252 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
253 return 0;
254 }
255
256 return clk_get_rate(policy->clk) / 1000;
257}
258EXPORT_SYMBOL_GPL(cpufreq_generic_get);
259
50e9c852
VK
260/**
261 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
262 *
263 * @cpu: cpu to find policy for.
264 *
265 * This returns policy for 'cpu', returns NULL if it doesn't exist.
266 * It also increments the kobject reference count to mark it busy and so would
267 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
268 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
269 * freed as that depends on the kobj count.
270 *
271 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
272 * valid policy is found. This is done to make sure the driver doesn't get
273 * unregistered while the policy is being used.
274 *
275 * Return: A valid policy on success, otherwise NULL on failure.
276 */
6eed9404 277struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 278{
6eed9404 279 struct cpufreq_policy *policy = NULL;
1da177e4
LT
280 unsigned long flags;
281
1b947c90 282 if (WARN_ON(cpu >= nr_cpu_ids))
6eed9404
VK
283 return NULL;
284
285 if (!down_read_trylock(&cpufreq_rwsem))
286 return NULL;
1da177e4
LT
287
288 /* get the cpufreq driver */
1c3d85dd 289 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 290
6eed9404
VK
291 if (cpufreq_driver) {
292 /* get the CPU */
988bed09 293 policy = cpufreq_cpu_get_raw(cpu);
6eed9404
VK
294 if (policy)
295 kobject_get(&policy->kobj);
296 }
1da177e4 297
6eed9404 298 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 299
3a3e9e06 300 if (!policy)
6eed9404 301 up_read(&cpufreq_rwsem);
1da177e4 302
3a3e9e06 303 return policy;
a9144436 304}
1da177e4
LT
305EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
306
50e9c852
VK
307/**
308 * cpufreq_cpu_put: Decrements the usage count of a policy
309 *
310 * @policy: policy earlier returned by cpufreq_cpu_get().
311 *
312 * This decrements the kobject reference count incremented earlier by calling
313 * cpufreq_cpu_get().
314 *
315 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
316 */
3a3e9e06 317void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 318{
6eed9404
VK
319 kobject_put(&policy->kobj);
320 up_read(&cpufreq_rwsem);
1da177e4
LT
321}
322EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
323
1da177e4
LT
324/*********************************************************************
325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
327
328/**
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
330 *
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 333 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
334 * per-CPU loops_per_jiffy value wherever possible.
335 */
858119e1 336static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4 337{
39c132ee
VK
338#ifndef CONFIG_SMP
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
341
1da177e4
LT
342 if (ci->flags & CPUFREQ_CONST_LOOPS)
343 return;
344
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
e837f9b5
JP
348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
1da177e4 350 }
0b443ead 351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
e08f5f5b
GS
352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353 ci->new);
e837f9b5
JP
354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
1da177e4 356 }
1da177e4 357#endif
39c132ee 358}
1da177e4 359
0956df9c 360static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 361 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
362{
363 BUG_ON(irqs_disabled());
364
d5aaffa9
DB
365 if (cpufreq_disabled())
366 return;
367
1c3d85dd 368 freqs->flags = cpufreq_driver->flags;
2d06d8c4 369 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 370 state, freqs->new);
1da177e4 371
1da177e4 372 switch (state) {
e4472cb3 373
1da177e4 374 case CPUFREQ_PRECHANGE:
32ee8c3e 375 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
376 * which is not equal to what the cpufreq core thinks is
377 * "old frequency".
1da177e4 378 */
1c3d85dd 379 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
380 if ((policy) && (policy->cpu == freqs->cpu) &&
381 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
382 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383 freqs->old, policy->cur);
e4472cb3 384 freqs->old = policy->cur;
1da177e4
LT
385 }
386 }
b4dfdbb3 387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 388 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
389 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
390 break;
e4472cb3 391
1da177e4
LT
392 case CPUFREQ_POSTCHANGE:
393 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
394 pr_debug("FREQ: %lu - CPU: %lu\n",
395 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 396 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 397 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 398 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
399 if (likely(policy) && likely(policy->cpu == freqs->cpu))
400 policy->cur = freqs->new;
1da177e4
LT
401 break;
402 }
1da177e4 403}
bb176f7d 404
b43a7ffb
VK
405/**
406 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407 * on frequency transition.
408 *
409 * This function calls the transition notifiers and the "adjust_jiffies"
410 * function. It is called twice on all CPU frequency changes that have
411 * external effects.
412 */
236a9800 413static void cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb
VK
414 struct cpufreq_freqs *freqs, unsigned int state)
415{
416 for_each_cpu(freqs->cpu, policy->cpus)
417 __cpufreq_notify_transition(policy, freqs, state);
418}
1da177e4 419
f7ba3b41 420/* Do post notifications when there are chances that transition has failed */
236a9800 421static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
f7ba3b41
VK
422 struct cpufreq_freqs *freqs, int transition_failed)
423{
424 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
425 if (!transition_failed)
426 return;
427
428 swap(freqs->old, freqs->new);
429 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
431}
f7ba3b41 432
12478cf0
SB
433void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
434 struct cpufreq_freqs *freqs)
435{
ca654dc3
SB
436
437 /*
438 * Catch double invocations of _begin() which lead to self-deadlock.
439 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440 * doesn't invoke _begin() on their behalf, and hence the chances of
441 * double invocations are very low. Moreover, there are scenarios
442 * where these checks can emit false-positive warnings in these
443 * drivers; so we avoid that by skipping them altogether.
444 */
445 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
446 && current == policy->transition_task);
447
12478cf0
SB
448wait:
449 wait_event(policy->transition_wait, !policy->transition_ongoing);
450
451 spin_lock(&policy->transition_lock);
452
453 if (unlikely(policy->transition_ongoing)) {
454 spin_unlock(&policy->transition_lock);
455 goto wait;
456 }
457
458 policy->transition_ongoing = true;
ca654dc3 459 policy->transition_task = current;
12478cf0
SB
460
461 spin_unlock(&policy->transition_lock);
462
463 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
464}
465EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
466
467void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
468 struct cpufreq_freqs *freqs, int transition_failed)
469{
470 if (unlikely(WARN_ON(!policy->transition_ongoing)))
471 return;
472
473 cpufreq_notify_post_transition(policy, freqs, transition_failed);
474
475 policy->transition_ongoing = false;
ca654dc3 476 policy->transition_task = NULL;
12478cf0
SB
477
478 wake_up(&policy->transition_wait);
479}
480EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
481
1da177e4 482
1da177e4
LT
483/*********************************************************************
484 * SYSFS INTERFACE *
485 *********************************************************************/
8a5c74a1 486static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
490}
491
492static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
493 const char *buf, size_t count)
494{
495 int ret, enable;
496
497 ret = sscanf(buf, "%d", &enable);
498 if (ret != 1 || enable < 0 || enable > 1)
499 return -EINVAL;
500
501 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
502 pr_err("%s: Cannot %s BOOST!\n",
503 __func__, enable ? "enable" : "disable");
6f19efc0
LM
504 return -EINVAL;
505 }
506
e837f9b5
JP
507 pr_debug("%s: cpufreq BOOST %s\n",
508 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
509
510 return count;
511}
512define_one_global_rw(boost);
1da177e4 513
42f91fa1 514static struct cpufreq_governor *find_governor(const char *str_governor)
3bcb09a3
JF
515{
516 struct cpufreq_governor *t;
517
f7b27061 518 for_each_governor(t)
7c4f4539 519 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
520 return t;
521
522 return NULL;
523}
524
1da177e4
LT
525/**
526 * cpufreq_parse_governor - parse a governor string
527 */
905d77cd 528static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
529 struct cpufreq_governor **governor)
530{
3bcb09a3 531 int err = -EINVAL;
1c3d85dd
RW
532
533 if (!cpufreq_driver)
3bcb09a3
JF
534 goto out;
535
1c3d85dd 536 if (cpufreq_driver->setpolicy) {
7c4f4539 537 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
1da177e4 538 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 539 err = 0;
7c4f4539 540 } else if (!strncasecmp(str_governor, "powersave",
e08f5f5b 541 CPUFREQ_NAME_LEN)) {
1da177e4 542 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 543 err = 0;
1da177e4 544 }
2e1cc3a5 545 } else {
1da177e4 546 struct cpufreq_governor *t;
3bcb09a3 547
3fc54d37 548 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3 549
42f91fa1 550 t = find_governor(str_governor);
3bcb09a3 551
ea714970 552 if (t == NULL) {
1a8e1463 553 int ret;
ea714970 554
1a8e1463
KC
555 mutex_unlock(&cpufreq_governor_mutex);
556 ret = request_module("cpufreq_%s", str_governor);
557 mutex_lock(&cpufreq_governor_mutex);
ea714970 558
1a8e1463 559 if (ret == 0)
42f91fa1 560 t = find_governor(str_governor);
ea714970
JF
561 }
562
3bcb09a3
JF
563 if (t != NULL) {
564 *governor = t;
565 err = 0;
1da177e4 566 }
3bcb09a3 567
3fc54d37 568 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 569 }
29464f28 570out:
3bcb09a3 571 return err;
1da177e4 572}
1da177e4 573
1da177e4 574/**
e08f5f5b
GS
575 * cpufreq_per_cpu_attr_read() / show_##file_name() -
576 * print out cpufreq information
1da177e4
LT
577 *
578 * Write out information from cpufreq_driver->policy[cpu]; object must be
579 * "unsigned int".
580 */
581
32ee8c3e
DJ
582#define show_one(file_name, object) \
583static ssize_t show_##file_name \
905d77cd 584(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 585{ \
29464f28 586 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
587}
588
589show_one(cpuinfo_min_freq, cpuinfo.min_freq);
590show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 591show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
592show_one(scaling_min_freq, min);
593show_one(scaling_max_freq, max);
c034b02e 594
09347b29 595static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
c034b02e
DB
596{
597 ssize_t ret;
598
599 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
600 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
601 else
602 ret = sprintf(buf, "%u\n", policy->cur);
603 return ret;
604}
1da177e4 605
037ce839 606static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 607 struct cpufreq_policy *new_policy);
7970e08b 608
1da177e4
LT
609/**
610 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
611 */
612#define store_one(file_name, object) \
613static ssize_t store_##file_name \
905d77cd 614(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 615{ \
619c144c 616 int ret, temp; \
1da177e4
LT
617 struct cpufreq_policy new_policy; \
618 \
619 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
620 if (ret) \
621 return -EINVAL; \
622 \
29464f28 623 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
624 if (ret != 1) \
625 return -EINVAL; \
626 \
619c144c 627 temp = new_policy.object; \
037ce839 628 ret = cpufreq_set_policy(policy, &new_policy); \
619c144c
VH
629 if (!ret) \
630 policy->user_policy.object = temp; \
1da177e4
LT
631 \
632 return ret ? ret : count; \
633}
634
29464f28
DJ
635store_one(scaling_min_freq, min);
636store_one(scaling_max_freq, max);
1da177e4
LT
637
638/**
639 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
640 */
905d77cd
DJ
641static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
642 char *buf)
1da177e4 643{
d92d50a4 644 unsigned int cur_freq = __cpufreq_get(policy);
1da177e4
LT
645 if (!cur_freq)
646 return sprintf(buf, "<unknown>");
647 return sprintf(buf, "%u\n", cur_freq);
648}
649
1da177e4
LT
650/**
651 * show_scaling_governor - show the current policy for the specified CPU
652 */
905d77cd 653static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 654{
29464f28 655 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
656 return sprintf(buf, "powersave\n");
657 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
658 return sprintf(buf, "performance\n");
659 else if (policy->governor)
4b972f0b 660 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 661 policy->governor->name);
1da177e4
LT
662 return -EINVAL;
663}
664
1da177e4
LT
665/**
666 * store_scaling_governor - store policy for the specified CPU
667 */
905d77cd
DJ
668static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
669 const char *buf, size_t count)
1da177e4 670{
5136fa56 671 int ret;
1da177e4
LT
672 char str_governor[16];
673 struct cpufreq_policy new_policy;
674
675 ret = cpufreq_get_policy(&new_policy, policy->cpu);
676 if (ret)
677 return ret;
678
29464f28 679 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
680 if (ret != 1)
681 return -EINVAL;
682
e08f5f5b
GS
683 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
684 &new_policy.governor))
1da177e4
LT
685 return -EINVAL;
686
037ce839 687 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
688
689 policy->user_policy.policy = policy->policy;
690 policy->user_policy.governor = policy->governor;
7970e08b 691
e08f5f5b
GS
692 if (ret)
693 return ret;
694 else
695 return count;
1da177e4
LT
696}
697
698/**
699 * show_scaling_driver - show the cpufreq driver currently loaded
700 */
905d77cd 701static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 702{
1c3d85dd 703 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
704}
705
706/**
707 * show_scaling_available_governors - show the available CPUfreq governors
708 */
905d77cd
DJ
709static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
710 char *buf)
1da177e4
LT
711{
712 ssize_t i = 0;
713 struct cpufreq_governor *t;
714
9c0ebcf7 715 if (!has_target()) {
1da177e4
LT
716 i += sprintf(buf, "performance powersave");
717 goto out;
718 }
719
f7b27061 720 for_each_governor(t) {
29464f28
DJ
721 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
722 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 723 goto out;
4b972f0b 724 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 725 }
7d5e350f 726out:
1da177e4
LT
727 i += sprintf(&buf[i], "\n");
728 return i;
729}
e8628dd0 730
f4fd3797 731ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
732{
733 ssize_t i = 0;
734 unsigned int cpu;
735
835481d9 736 for_each_cpu(cpu, mask) {
1da177e4
LT
737 if (i)
738 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
739 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
740 if (i >= (PAGE_SIZE - 5))
29464f28 741 break;
1da177e4
LT
742 }
743 i += sprintf(&buf[i], "\n");
744 return i;
745}
f4fd3797 746EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 747
e8628dd0
DW
748/**
749 * show_related_cpus - show the CPUs affected by each transition even if
750 * hw coordination is in use
751 */
752static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
753{
f4fd3797 754 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
755}
756
757/**
758 * show_affected_cpus - show the CPUs affected by each transition
759 */
760static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
761{
f4fd3797 762 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
763}
764
9e76988e 765static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 766 const char *buf, size_t count)
9e76988e
VP
767{
768 unsigned int freq = 0;
769 unsigned int ret;
770
879000f9 771 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
772 return -EINVAL;
773
774 ret = sscanf(buf, "%u", &freq);
775 if (ret != 1)
776 return -EINVAL;
777
778 policy->governor->store_setspeed(policy, freq);
779
780 return count;
781}
782
783static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
784{
879000f9 785 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
786 return sprintf(buf, "<unsupported>\n");
787
788 return policy->governor->show_setspeed(policy, buf);
789}
1da177e4 790
e2f74f35 791/**
8bf1ac72 792 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
793 */
794static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
795{
796 unsigned int limit;
797 int ret;
1c3d85dd
RW
798 if (cpufreq_driver->bios_limit) {
799 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
800 if (!ret)
801 return sprintf(buf, "%u\n", limit);
802 }
803 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
804}
805
6dad2a29
BP
806cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
807cpufreq_freq_attr_ro(cpuinfo_min_freq);
808cpufreq_freq_attr_ro(cpuinfo_max_freq);
809cpufreq_freq_attr_ro(cpuinfo_transition_latency);
810cpufreq_freq_attr_ro(scaling_available_governors);
811cpufreq_freq_attr_ro(scaling_driver);
812cpufreq_freq_attr_ro(scaling_cur_freq);
813cpufreq_freq_attr_ro(bios_limit);
814cpufreq_freq_attr_ro(related_cpus);
815cpufreq_freq_attr_ro(affected_cpus);
816cpufreq_freq_attr_rw(scaling_min_freq);
817cpufreq_freq_attr_rw(scaling_max_freq);
818cpufreq_freq_attr_rw(scaling_governor);
819cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 820
905d77cd 821static struct attribute *default_attrs[] = {
1da177e4
LT
822 &cpuinfo_min_freq.attr,
823 &cpuinfo_max_freq.attr,
ed129784 824 &cpuinfo_transition_latency.attr,
1da177e4
LT
825 &scaling_min_freq.attr,
826 &scaling_max_freq.attr,
827 &affected_cpus.attr,
e8628dd0 828 &related_cpus.attr,
1da177e4
LT
829 &scaling_governor.attr,
830 &scaling_driver.attr,
831 &scaling_available_governors.attr,
9e76988e 832 &scaling_setspeed.attr,
1da177e4
LT
833 NULL
834};
835
29464f28
DJ
836#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
837#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 838
29464f28 839static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 840{
905d77cd
DJ
841 struct cpufreq_policy *policy = to_policy(kobj);
842 struct freq_attr *fattr = to_attr(attr);
1b750e3b 843 ssize_t ret;
6eed9404
VK
844
845 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 846 return -EINVAL;
5a01f2e8 847
ad7722da 848 down_read(&policy->rwsem);
5a01f2e8 849
e08f5f5b
GS
850 if (fattr->show)
851 ret = fattr->show(policy, buf);
852 else
853 ret = -EIO;
854
ad7722da 855 up_read(&policy->rwsem);
6eed9404 856 up_read(&cpufreq_rwsem);
1b750e3b 857
1da177e4
LT
858 return ret;
859}
860
905d77cd
DJ
861static ssize_t store(struct kobject *kobj, struct attribute *attr,
862 const char *buf, size_t count)
1da177e4 863{
905d77cd
DJ
864 struct cpufreq_policy *policy = to_policy(kobj);
865 struct freq_attr *fattr = to_attr(attr);
a07530b4 866 ssize_t ret = -EINVAL;
6eed9404 867
4f750c93
SB
868 get_online_cpus();
869
870 if (!cpu_online(policy->cpu))
871 goto unlock;
872
6eed9404 873 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 874 goto unlock;
5a01f2e8 875
ad7722da 876 down_write(&policy->rwsem);
5a01f2e8 877
e08f5f5b
GS
878 if (fattr->store)
879 ret = fattr->store(policy, buf, count);
880 else
881 ret = -EIO;
882
ad7722da 883 up_write(&policy->rwsem);
6eed9404 884
6eed9404 885 up_read(&cpufreq_rwsem);
4f750c93
SB
886unlock:
887 put_online_cpus();
888
1da177e4
LT
889 return ret;
890}
891
905d77cd 892static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 893{
905d77cd 894 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 895 pr_debug("last reference is dropped\n");
1da177e4
LT
896 complete(&policy->kobj_unregister);
897}
898
52cf25d0 899static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
900 .show = show,
901 .store = store,
902};
903
904static struct kobj_type ktype_cpufreq = {
905 .sysfs_ops = &sysfs_ops,
906 .default_attrs = default_attrs,
907 .release = cpufreq_sysfs_release,
908};
909
2361be23
VK
910struct kobject *cpufreq_global_kobject;
911EXPORT_SYMBOL(cpufreq_global_kobject);
912
913static int cpufreq_global_kobject_usage;
914
915int cpufreq_get_global_kobject(void)
916{
917 if (!cpufreq_global_kobject_usage++)
918 return kobject_add(cpufreq_global_kobject,
919 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
920
921 return 0;
922}
923EXPORT_SYMBOL(cpufreq_get_global_kobject);
924
925void cpufreq_put_global_kobject(void)
926{
927 if (!--cpufreq_global_kobject_usage)
928 kobject_del(cpufreq_global_kobject);
929}
930EXPORT_SYMBOL(cpufreq_put_global_kobject);
931
932int cpufreq_sysfs_create_file(const struct attribute *attr)
933{
934 int ret = cpufreq_get_global_kobject();
935
936 if (!ret) {
937 ret = sysfs_create_file(cpufreq_global_kobject, attr);
938 if (ret)
939 cpufreq_put_global_kobject();
940 }
941
942 return ret;
943}
944EXPORT_SYMBOL(cpufreq_sysfs_create_file);
945
946void cpufreq_sysfs_remove_file(const struct attribute *attr)
947{
948 sysfs_remove_file(cpufreq_global_kobject, attr);
949 cpufreq_put_global_kobject();
950}
951EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
952
19d6f7ec 953/* symlink affected CPUs */
308b60e7 954static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
955{
956 unsigned int j;
957 int ret = 0;
958
959 for_each_cpu(j, policy->cpus) {
8a25a2fd 960 struct device *cpu_dev;
19d6f7ec 961
308b60e7 962 if (j == policy->cpu)
19d6f7ec 963 continue;
19d6f7ec 964
e8fdde10 965 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
966 cpu_dev = get_cpu_device(j);
967 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 968 "cpufreq");
71c3461e
RW
969 if (ret)
970 break;
19d6f7ec
DJ
971 }
972 return ret;
973}
974
308b60e7 975static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 976 struct device *dev)
909a694e
DJ
977{
978 struct freq_attr **drv_attr;
909a694e 979 int ret = 0;
909a694e 980
909a694e 981 /* set up files for this cpu device */
1c3d85dd 982 drv_attr = cpufreq_driver->attr;
f13f1184 983 while (drv_attr && *drv_attr) {
909a694e
DJ
984 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
985 if (ret)
6d4e81ed 986 return ret;
909a694e
DJ
987 drv_attr++;
988 }
1c3d85dd 989 if (cpufreq_driver->get) {
909a694e
DJ
990 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
991 if (ret)
6d4e81ed 992 return ret;
909a694e 993 }
c034b02e
DB
994
995 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
996 if (ret)
6d4e81ed 997 return ret;
c034b02e 998
1c3d85dd 999 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
1000 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1001 if (ret)
6d4e81ed 1002 return ret;
e2f74f35 1003 }
909a694e 1004
6d4e81ed 1005 return cpufreq_add_dev_symlink(policy);
e18f1682
SB
1006}
1007
1008static void cpufreq_init_policy(struct cpufreq_policy *policy)
1009{
6e2c89d1 1010 struct cpufreq_governor *gov = NULL;
e18f1682
SB
1011 struct cpufreq_policy new_policy;
1012 int ret = 0;
1013
d5b73cd8 1014 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 1015
6e2c89d1 1016 /* Update governor of new_policy to the governor used before hotplug */
4573237b 1017 gov = find_governor(policy->last_governor);
6e2c89d1 1018 if (gov)
1019 pr_debug("Restoring governor %s for cpu %d\n",
1020 policy->governor->name, policy->cpu);
1021 else
1022 gov = CPUFREQ_DEFAULT_GOVERNOR;
1023
1024 new_policy.governor = gov;
1025
a27a9ab7
JB
1026 /* Use the default policy if its valid. */
1027 if (cpufreq_driver->setpolicy)
6e2c89d1 1028 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
1029
1030 /* set default policy */
037ce839 1031 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 1032 if (ret) {
2d06d8c4 1033 pr_debug("setting policy failed\n");
1c3d85dd
RW
1034 if (cpufreq_driver->exit)
1035 cpufreq_driver->exit(policy);
ecf7e461 1036 }
909a694e
DJ
1037}
1038
d8d3b471 1039static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1040 unsigned int cpu, struct device *dev)
fcf80582 1041{
9c0ebcf7 1042 int ret = 0;
fcf80582 1043
bb29ae15
VK
1044 /* Has this CPU been taken care of already? */
1045 if (cpumask_test_cpu(cpu, policy->cpus))
1046 return 0;
1047
9c0ebcf7 1048 if (has_target()) {
3de9bdeb
VK
1049 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1050 if (ret) {
1051 pr_err("%s: Failed to stop governor\n", __func__);
1052 return ret;
1053 }
1054 }
fcf80582 1055
ad7722da 1056 down_write(&policy->rwsem);
fcf80582 1057 cpumask_set_cpu(cpu, policy->cpus);
ad7722da 1058 up_write(&policy->rwsem);
2eaa3e2d 1059
9c0ebcf7 1060 if (has_target()) {
e5c87b76
SK
1061 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1062 if (!ret)
1063 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1064
1065 if (ret) {
3de9bdeb
VK
1066 pr_err("%s: Failed to start governor\n", __func__);
1067 return ret;
1068 }
820c6ca2 1069 }
fcf80582 1070
42f921a6 1071 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582 1072}
1da177e4 1073
8414809c
SB
1074static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1075{
1076 struct cpufreq_policy *policy;
1077 unsigned long flags;
1078
44871c9c 1079 read_lock_irqsave(&cpufreq_driver_lock, flags);
3914d379 1080 policy = per_cpu(cpufreq_cpu_data, cpu);
44871c9c 1081 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 1082
3914d379
VK
1083 if (likely(policy)) {
1084 /* Policy should be inactive here */
1085 WARN_ON(!policy_is_inactive(policy));
09712f55 1086 policy->governor = NULL;
3914d379 1087 }
6e2c89d1 1088
8414809c
SB
1089 return policy;
1090}
1091
e9698cc5
SB
1092static struct cpufreq_policy *cpufreq_policy_alloc(void)
1093{
1094 struct cpufreq_policy *policy;
1095
1096 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1097 if (!policy)
1098 return NULL;
1099
1100 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1101 goto err_free_policy;
1102
1103 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1104 goto err_free_cpumask;
1105
c88a1f8b 1106 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 1107 init_rwsem(&policy->rwsem);
12478cf0
SB
1108 spin_lock_init(&policy->transition_lock);
1109 init_waitqueue_head(&policy->transition_wait);
818c5712
VK
1110 init_completion(&policy->kobj_unregister);
1111 INIT_WORK(&policy->update, handle_update);
ad7722da 1112
e9698cc5
SB
1113 return policy;
1114
1115err_free_cpumask:
1116 free_cpumask_var(policy->cpus);
1117err_free_policy:
1118 kfree(policy);
1119
1120 return NULL;
1121}
1122
42f921a6
VK
1123static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1124{
1125 struct kobject *kobj;
1126 struct completion *cmp;
1127
fcd7af91
VK
1128 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1129 CPUFREQ_REMOVE_POLICY, policy);
1130
42f921a6
VK
1131 down_read(&policy->rwsem);
1132 kobj = &policy->kobj;
1133 cmp = &policy->kobj_unregister;
1134 up_read(&policy->rwsem);
1135 kobject_put(kobj);
1136
1137 /*
1138 * We need to make sure that the underlying kobj is
1139 * actually not referenced anymore by anybody before we
1140 * proceed with unloading.
1141 */
1142 pr_debug("waiting for dropping of refcount\n");
1143 wait_for_completion(cmp);
1144 pr_debug("wait complete\n");
1145}
1146
e9698cc5
SB
1147static void cpufreq_policy_free(struct cpufreq_policy *policy)
1148{
988bed09
VK
1149 unsigned long flags;
1150 int cpu;
1151
1152 /* Remove policy from list */
1153 write_lock_irqsave(&cpufreq_driver_lock, flags);
1154 list_del(&policy->policy_list);
1155
1156 for_each_cpu(cpu, policy->related_cpus)
1157 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1158 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1159
e9698cc5
SB
1160 free_cpumask_var(policy->related_cpus);
1161 free_cpumask_var(policy->cpus);
1162 kfree(policy);
1163}
1164
1bfb425b
VK
1165static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1166 struct device *cpu_dev)
0d66b91e 1167{
1bfb425b
VK
1168 int ret;
1169
99ec899e 1170 if (WARN_ON(cpu == policy->cpu))
1bfb425b
VK
1171 return 0;
1172
1173 /* Move kobject to the new policy->cpu */
1174 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1175 if (ret) {
1176 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1177 return ret;
1178 }
cb38ed5c 1179
ad7722da 1180 down_write(&policy->rwsem);
0d66b91e 1181 policy->cpu = cpu;
ad7722da 1182 up_write(&policy->rwsem);
8efd5765 1183
1bfb425b 1184 return 0;
0d66b91e
SB
1185}
1186
23faf0b7
VK
1187/**
1188 * cpufreq_add_dev - add a CPU device
1189 *
1190 * Adds the cpufreq interface for a CPU device.
1191 *
1192 * The Oracle says: try running cpufreq registration/unregistration concurrently
1193 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1194 * mess up, but more thorough testing is needed. - Mathieu
1195 */
1196static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1197{
fcf80582 1198 unsigned int j, cpu = dev->id;
65922465 1199 int ret = -ENOMEM;
7f0c020a 1200 struct cpufreq_policy *policy;
1da177e4 1201 unsigned long flags;
96bbbe4a 1202 bool recover_policy = cpufreq_suspended;
1da177e4 1203
c32b6b8e
AR
1204 if (cpu_is_offline(cpu))
1205 return 0;
1206
2d06d8c4 1207 pr_debug("adding CPU %u\n", cpu);
1da177e4 1208
6eed9404
VK
1209 if (!down_read_trylock(&cpufreq_rwsem))
1210 return 0;
1211
bb29ae15 1212 /* Check if this CPU already has a policy to manage it */
9104bb26
VK
1213 policy = per_cpu(cpufreq_cpu_data, cpu);
1214 if (policy && !policy_is_inactive(policy)) {
1215 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1216 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1217 up_read(&cpufreq_rwsem);
1218 return ret;
fcf80582 1219 }
1da177e4 1220
72368d12
RW
1221 /*
1222 * Restore the saved policy when doing light-weight init and fall back
1223 * to the full init if that fails.
1224 */
96bbbe4a 1225 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1226 if (!policy) {
96bbbe4a 1227 recover_policy = false;
8414809c 1228 policy = cpufreq_policy_alloc();
72368d12
RW
1229 if (!policy)
1230 goto nomem_out;
1231 }
0d66b91e
SB
1232
1233 /*
1234 * In the resume path, since we restore a saved policy, the assignment
1235 * to policy->cpu is like an update of the existing policy, rather than
1236 * the creation of a brand new one. So we need to perform this update
1237 * by invoking update_policy_cpu().
1238 */
1bfb425b
VK
1239 if (recover_policy && cpu != policy->cpu)
1240 WARN_ON(update_policy_cpu(policy, cpu, dev));
1241 else
0d66b91e
SB
1242 policy->cpu = cpu;
1243
835481d9 1244 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1245
1da177e4
LT
1246 /* call driver. From then on the cpufreq must be able
1247 * to accept all calls to ->verify and ->setpolicy for this CPU
1248 */
1c3d85dd 1249 ret = cpufreq_driver->init(policy);
1da177e4 1250 if (ret) {
2d06d8c4 1251 pr_debug("initialization failed\n");
2eaa3e2d 1252 goto err_set_policy_cpu;
1da177e4 1253 }
643ae6e8 1254
6d4e81ed
TV
1255 down_write(&policy->rwsem);
1256
5a7e56a5
VK
1257 /* related cpus should atleast have policy->cpus */
1258 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1259
1260 /*
1261 * affected cpus must always be the one, which are online. We aren't
1262 * managing offline cpus here.
1263 */
1264 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1265
96bbbe4a 1266 if (!recover_policy) {
5a7e56a5
VK
1267 policy->user_policy.min = policy->min;
1268 policy->user_policy.max = policy->max;
6d4e81ed
TV
1269
1270 /* prepare interface data */
1271 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1272 &dev->kobj, "cpufreq");
1273 if (ret) {
1274 pr_err("%s: failed to init policy->kobj: %d\n",
1275 __func__, ret);
1276 goto err_init_policy_kobj;
1277 }
5a7e56a5 1278
988bed09
VK
1279 write_lock_irqsave(&cpufreq_driver_lock, flags);
1280 for_each_cpu(j, policy->related_cpus)
1281 per_cpu(cpufreq_cpu_data, j) = policy;
1282 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1283 }
652ed95d 1284
2ed99e39 1285 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1286 policy->cur = cpufreq_driver->get(policy->cpu);
1287 if (!policy->cur) {
1288 pr_err("%s: ->get() failed\n", __func__);
1289 goto err_get_freq;
1290 }
1291 }
1292
d3916691
VK
1293 /*
1294 * Sometimes boot loaders set CPU frequency to a value outside of
1295 * frequency table present with cpufreq core. In such cases CPU might be
1296 * unstable if it has to run on that frequency for long duration of time
1297 * and so its better to set it to a frequency which is specified in
1298 * freq-table. This also makes cpufreq stats inconsistent as
1299 * cpufreq-stats would fail to register because current frequency of CPU
1300 * isn't found in freq-table.
1301 *
1302 * Because we don't want this change to effect boot process badly, we go
1303 * for the next freq which is >= policy->cur ('cur' must be set by now,
1304 * otherwise we will end up setting freq to lowest of the table as 'cur'
1305 * is initialized to zero).
1306 *
1307 * We are passing target-freq as "policy->cur - 1" otherwise
1308 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1309 * equal to target-freq.
1310 */
1311 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1312 && has_target()) {
1313 /* Are we running at unknown frequency ? */
1314 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1315 if (ret == -EINVAL) {
1316 /* Warn user and fix it */
1317 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1318 __func__, policy->cpu, policy->cur);
1319 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1320 CPUFREQ_RELATION_L);
1321
1322 /*
1323 * Reaching here after boot in a few seconds may not
1324 * mean that system will remain stable at "unknown"
1325 * frequency for longer duration. Hence, a BUG_ON().
1326 */
1327 BUG_ON(ret);
1328 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1329 __func__, policy->cpu, policy->cur);
1330 }
1331 }
1332
a1531acd
TR
1333 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1334 CPUFREQ_START, policy);
1335
96bbbe4a 1336 if (!recover_policy) {
308b60e7 1337 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1338 if (ret)
1339 goto err_out_unregister;
fcd7af91
VK
1340 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1341 CPUFREQ_CREATE_POLICY, policy);
8ff69732 1342
988bed09
VK
1343 write_lock_irqsave(&cpufreq_driver_lock, flags);
1344 list_add(&policy->policy_list, &cpufreq_policy_list);
1345 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1346 }
9515f4d6 1347
e18f1682
SB
1348 cpufreq_init_policy(policy);
1349
96bbbe4a 1350 if (!recover_policy) {
08fd8c1c
VK
1351 policy->user_policy.policy = policy->policy;
1352 policy->user_policy.governor = policy->governor;
1353 }
4e97b631 1354 up_write(&policy->rwsem);
08fd8c1c 1355
038c5b3e 1356 kobject_uevent(&policy->kobj, KOBJ_ADD);
7c45cf31 1357
6eed9404
VK
1358 up_read(&cpufreq_rwsem);
1359
7c45cf31
VK
1360 /* Callback for handling stuff after policy is ready */
1361 if (cpufreq_driver->ready)
1362 cpufreq_driver->ready(policy);
1363
2d06d8c4 1364 pr_debug("initialization complete\n");
87c32271 1365
1da177e4
LT
1366 return 0;
1367
1da177e4 1368err_out_unregister:
652ed95d 1369err_get_freq:
6d4e81ed
TV
1370 if (!recover_policy) {
1371 kobject_put(&policy->kobj);
1372 wait_for_completion(&policy->kobj_unregister);
1373 }
1374err_init_policy_kobj:
7106e02b
PB
1375 up_write(&policy->rwsem);
1376
da60ce9f
VK
1377 if (cpufreq_driver->exit)
1378 cpufreq_driver->exit(policy);
2eaa3e2d 1379err_set_policy_cpu:
3914d379 1380 if (recover_policy)
42f921a6 1381 cpufreq_policy_put_kobj(policy);
e9698cc5 1382 cpufreq_policy_free(policy);
42f921a6 1383
1da177e4 1384nomem_out:
6eed9404
VK
1385 up_read(&cpufreq_rwsem);
1386
1da177e4
LT
1387 return ret;
1388}
1389
cedb70af 1390static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1391 struct subsys_interface *sif)
1da177e4 1392{
f9ba680d 1393 unsigned int cpu = dev->id, cpus;
1bfb425b 1394 int ret;
3a3e9e06 1395 struct cpufreq_policy *policy;
1da177e4 1396
b8eed8af 1397 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1398
988bed09 1399 policy = cpufreq_cpu_get_raw(cpu);
3a3e9e06 1400 if (!policy) {
b8eed8af 1401 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1402 return -EINVAL;
1403 }
1da177e4 1404
9c0ebcf7 1405 if (has_target()) {
3de9bdeb
VK
1406 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1407 if (ret) {
1408 pr_err("%s: Failed to stop governor\n", __func__);
1409 return ret;
1410 }
db5f2995 1411 }
1da177e4 1412
4573237b 1413 down_write(&policy->rwsem);
3a3e9e06 1414 cpus = cpumask_weight(policy->cpus);
4573237b
VK
1415
1416 if (has_target() && cpus == 1)
1417 strncpy(policy->last_governor, policy->governor->name,
1418 CPUFREQ_NAME_LEN);
1419 up_write(&policy->rwsem);
084f3493 1420
61173f25 1421 if (cpu != policy->cpu) {
6964d91d 1422 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1423 } else if (cpus > 1) {
1bfb425b
VK
1424 /* Nominate new CPU */
1425 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1426 struct device *cpu_dev = get_cpu_device(new_cpu);
a82fab29 1427
1bfb425b
VK
1428 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1429 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1430 if (ret) {
1431 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1432 "cpufreq"))
1433 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1434 __func__, cpu_dev->id);
1435 return ret;
1da177e4 1436 }
1bfb425b
VK
1437
1438 if (!cpufreq_suspended)
1439 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1440 __func__, new_cpu, cpu);
789ca243 1441 } else if (cpufreq_driver->stop_cpu) {
367dc4aa 1442 cpufreq_driver->stop_cpu(policy);
1da177e4 1443 }
1da177e4 1444
cedb70af
SB
1445 return 0;
1446}
1447
1448static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1449 struct subsys_interface *sif)
cedb70af 1450{
988bed09 1451 unsigned int cpu = dev->id;
cedb70af 1452 int ret;
988bed09 1453 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cedb70af
SB
1454
1455 if (!policy) {
1456 pr_debug("%s: No cpu_data found\n", __func__);
1457 return -EINVAL;
1458 }
1459
ad7722da 1460 down_write(&policy->rwsem);
303ae723 1461 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1462 up_write(&policy->rwsem);
cedb70af 1463
b8eed8af 1464 /* If cpu is last user of policy, free policy */
988bed09 1465 if (policy_is_inactive(policy)) {
9c0ebcf7 1466 if (has_target()) {
3de9bdeb
VK
1467 ret = __cpufreq_governor(policy,
1468 CPUFREQ_GOV_POLICY_EXIT);
1469 if (ret) {
1470 pr_err("%s: Failed to exit governor\n",
e837f9b5 1471 __func__);
3de9bdeb
VK
1472 return ret;
1473 }
edab2fbc 1474 }
2a998599 1475
96bbbe4a 1476 if (!cpufreq_suspended)
42f921a6 1477 cpufreq_policy_put_kobj(policy);
7d26e2d5 1478
8414809c
SB
1479 /*
1480 * Perform the ->exit() even during light-weight tear-down,
1481 * since this is a core component, and is essential for the
1482 * subsequent light-weight ->init() to succeed.
b8eed8af 1483 */
1c3d85dd 1484 if (cpufreq_driver->exit)
3a3e9e06 1485 cpufreq_driver->exit(policy);
27ecddc2 1486
96bbbe4a 1487 if (!cpufreq_suspended)
3a3e9e06 1488 cpufreq_policy_free(policy);
e5c87b76
SK
1489 } else if (has_target()) {
1490 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1491 if (!ret)
1492 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1493
1494 if (ret) {
1495 pr_err("%s: Failed to start governor\n", __func__);
1496 return ret;
2a998599 1497 }
27ecddc2 1498 }
1da177e4 1499
1da177e4
LT
1500 return 0;
1501}
1502
cedb70af 1503/**
27a862e9 1504 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1505 *
1506 * Removes the cpufreq interface for a CPU device.
cedb70af 1507 */
8a25a2fd 1508static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1509{
8a25a2fd 1510 unsigned int cpu = dev->id;
27a862e9 1511 int ret;
ec28297a
VP
1512
1513 if (cpu_is_offline(cpu))
1514 return 0;
1515
96bbbe4a 1516 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1517
1518 if (!ret)
96bbbe4a 1519 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1520
1521 return ret;
5a01f2e8
VP
1522}
1523
65f27f38 1524static void handle_update(struct work_struct *work)
1da177e4 1525{
65f27f38
DH
1526 struct cpufreq_policy *policy =
1527 container_of(work, struct cpufreq_policy, update);
1528 unsigned int cpu = policy->cpu;
2d06d8c4 1529 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1530 cpufreq_update_policy(cpu);
1531}
1532
1533/**
bb176f7d
VK
1534 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1535 * in deep trouble.
a1e1dc41 1536 * @policy: policy managing CPUs
1da177e4
LT
1537 * @new_freq: CPU frequency the CPU actually runs at
1538 *
29464f28
DJ
1539 * We adjust to current frequency first, and need to clean up later.
1540 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1541 */
a1e1dc41 1542static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
e08f5f5b 1543 unsigned int new_freq)
1da177e4
LT
1544{
1545 struct cpufreq_freqs freqs;
b43a7ffb 1546
e837f9b5 1547 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
a1e1dc41 1548 policy->cur, new_freq);
1da177e4 1549
a1e1dc41 1550 freqs.old = policy->cur;
1da177e4 1551 freqs.new = new_freq;
b43a7ffb 1552
8fec051e
VK
1553 cpufreq_freq_transition_begin(policy, &freqs);
1554 cpufreq_freq_transition_end(policy, &freqs, 0);
1da177e4
LT
1555}
1556
32ee8c3e 1557/**
4ab70df4 1558 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1559 * @cpu: CPU number
1560 *
1561 * This is the last known freq, without actually getting it from the driver.
1562 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1563 */
1564unsigned int cpufreq_quick_get(unsigned int cpu)
1565{
9e21ba8b 1566 struct cpufreq_policy *policy;
e08f5f5b 1567 unsigned int ret_freq = 0;
95235ca2 1568
1c3d85dd
RW
1569 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1570 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1571
1572 policy = cpufreq_cpu_get(cpu);
95235ca2 1573 if (policy) {
e08f5f5b 1574 ret_freq = policy->cur;
95235ca2
VP
1575 cpufreq_cpu_put(policy);
1576 }
1577
4d34a67d 1578 return ret_freq;
95235ca2
VP
1579}
1580EXPORT_SYMBOL(cpufreq_quick_get);
1581
3d737108
JB
1582/**
1583 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1584 * @cpu: CPU number
1585 *
1586 * Just return the max possible frequency for a given CPU.
1587 */
1588unsigned int cpufreq_quick_get_max(unsigned int cpu)
1589{
1590 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1591 unsigned int ret_freq = 0;
1592
1593 if (policy) {
1594 ret_freq = policy->max;
1595 cpufreq_cpu_put(policy);
1596 }
1597
1598 return ret_freq;
1599}
1600EXPORT_SYMBOL(cpufreq_quick_get_max);
1601
d92d50a4 1602static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1da177e4 1603{
e08f5f5b 1604 unsigned int ret_freq = 0;
5800043b 1605
1c3d85dd 1606 if (!cpufreq_driver->get)
4d34a67d 1607 return ret_freq;
1da177e4 1608
d92d50a4 1609 ret_freq = cpufreq_driver->get(policy->cpu);
1da177e4 1610
e08f5f5b 1611 if (ret_freq && policy->cur &&
1c3d85dd 1612 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1613 /* verify no discrepancy between actual and
1614 saved value exists */
1615 if (unlikely(ret_freq != policy->cur)) {
a1e1dc41 1616 cpufreq_out_of_sync(policy, ret_freq);
1da177e4
LT
1617 schedule_work(&policy->update);
1618 }
1619 }
1620
4d34a67d 1621 return ret_freq;
5a01f2e8 1622}
1da177e4 1623
5a01f2e8
VP
1624/**
1625 * cpufreq_get - get the current CPU frequency (in kHz)
1626 * @cpu: CPU number
1627 *
1628 * Get the CPU current (static) CPU frequency
1629 */
1630unsigned int cpufreq_get(unsigned int cpu)
1631{
999976e0 1632 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1633 unsigned int ret_freq = 0;
5a01f2e8 1634
999976e0
AP
1635 if (policy) {
1636 down_read(&policy->rwsem);
d92d50a4 1637 ret_freq = __cpufreq_get(policy);
999976e0 1638 up_read(&policy->rwsem);
5a01f2e8 1639
999976e0
AP
1640 cpufreq_cpu_put(policy);
1641 }
6eed9404 1642
4d34a67d 1643 return ret_freq;
1da177e4
LT
1644}
1645EXPORT_SYMBOL(cpufreq_get);
1646
8a25a2fd
KS
1647static struct subsys_interface cpufreq_interface = {
1648 .name = "cpufreq",
1649 .subsys = &cpu_subsys,
1650 .add_dev = cpufreq_add_dev,
1651 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1652};
1653
e28867ea
VK
1654/*
1655 * In case platform wants some specific frequency to be configured
1656 * during suspend..
1657 */
1658int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1659{
1660 int ret;
1661
1662 if (!policy->suspend_freq) {
1663 pr_err("%s: suspend_freq can't be zero\n", __func__);
1664 return -EINVAL;
1665 }
1666
1667 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1668 policy->suspend_freq);
1669
1670 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1671 CPUFREQ_RELATION_H);
1672 if (ret)
1673 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1674 __func__, policy->suspend_freq, ret);
1675
1676 return ret;
1677}
1678EXPORT_SYMBOL(cpufreq_generic_suspend);
1679
42d4dc3f 1680/**
2f0aea93 1681 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1682 *
2f0aea93
VK
1683 * Called during system wide Suspend/Hibernate cycles for suspending governors
1684 * as some platforms can't change frequency after this point in suspend cycle.
1685 * Because some of the devices (like: i2c, regulators, etc) they use for
1686 * changing frequency are suspended quickly after this point.
42d4dc3f 1687 */
2f0aea93 1688void cpufreq_suspend(void)
42d4dc3f 1689{
3a3e9e06 1690 struct cpufreq_policy *policy;
42d4dc3f 1691
2f0aea93
VK
1692 if (!cpufreq_driver)
1693 return;
42d4dc3f 1694
2f0aea93 1695 if (!has_target())
b1b12bab 1696 goto suspend;
42d4dc3f 1697
2f0aea93
VK
1698 pr_debug("%s: Suspending Governors\n", __func__);
1699
f963735a 1700 for_each_active_policy(policy) {
2f0aea93
VK
1701 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1702 pr_err("%s: Failed to stop governor for policy: %p\n",
1703 __func__, policy);
1704 else if (cpufreq_driver->suspend
1705 && cpufreq_driver->suspend(policy))
1706 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1707 policy);
42d4dc3f 1708 }
b1b12bab
VK
1709
1710suspend:
1711 cpufreq_suspended = true;
42d4dc3f
BH
1712}
1713
1da177e4 1714/**
2f0aea93 1715 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1716 *
2f0aea93
VK
1717 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1718 * are suspended with cpufreq_suspend().
1da177e4 1719 */
2f0aea93 1720void cpufreq_resume(void)
1da177e4 1721{
3a3e9e06 1722 struct cpufreq_policy *policy;
1da177e4 1723
2f0aea93
VK
1724 if (!cpufreq_driver)
1725 return;
1da177e4 1726
8e30444e
LT
1727 cpufreq_suspended = false;
1728
2f0aea93 1729 if (!has_target())
e00e56df 1730 return;
1da177e4 1731
2f0aea93 1732 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1733
f963735a 1734 for_each_active_policy(policy) {
0c5aa405
VK
1735 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1736 pr_err("%s: Failed to resume driver: %p\n", __func__,
1737 policy);
1738 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
2f0aea93
VK
1739 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1740 pr_err("%s: Failed to start governor for policy: %p\n",
1741 __func__, policy);
2f0aea93 1742 }
c75de0ac
VK
1743
1744 /*
1745 * schedule call cpufreq_update_policy() for first-online CPU, as that
1746 * wouldn't be hotplugged-out on suspend. It will verify that the
1747 * current freq is in sync with what we believe it to be.
1748 */
1749 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1750 if (WARN_ON(!policy))
1751 return;
1752
1753 schedule_work(&policy->update);
2f0aea93 1754}
1da177e4 1755
9d95046e
BP
1756/**
1757 * cpufreq_get_current_driver - return current driver's name
1758 *
1759 * Return the name string of the currently loaded cpufreq driver
1760 * or NULL, if none.
1761 */
1762const char *cpufreq_get_current_driver(void)
1763{
1c3d85dd
RW
1764 if (cpufreq_driver)
1765 return cpufreq_driver->name;
1766
1767 return NULL;
9d95046e
BP
1768}
1769EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4 1770
51315cdf
TP
1771/**
1772 * cpufreq_get_driver_data - return current driver data
1773 *
1774 * Return the private data of the currently loaded cpufreq
1775 * driver, or NULL if no cpufreq driver is loaded.
1776 */
1777void *cpufreq_get_driver_data(void)
1778{
1779 if (cpufreq_driver)
1780 return cpufreq_driver->driver_data;
1781
1782 return NULL;
1783}
1784EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1785
1da177e4
LT
1786/*********************************************************************
1787 * NOTIFIER LISTS INTERFACE *
1788 *********************************************************************/
1789
1790/**
1791 * cpufreq_register_notifier - register a driver with cpufreq
1792 * @nb: notifier function to register
1793 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1794 *
32ee8c3e 1795 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1796 * are notified about clock rate changes (once before and once after
1797 * the transition), or a list of drivers that are notified about
1798 * changes in cpufreq policy.
1799 *
1800 * This function may sleep, and has the same return conditions as
e041c683 1801 * blocking_notifier_chain_register.
1da177e4
LT
1802 */
1803int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1804{
1805 int ret;
1806
d5aaffa9
DB
1807 if (cpufreq_disabled())
1808 return -EINVAL;
1809
74212ca4
CEB
1810 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1811
1da177e4
LT
1812 switch (list) {
1813 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1814 ret = srcu_notifier_chain_register(
e041c683 1815 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1816 break;
1817 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1818 ret = blocking_notifier_chain_register(
1819 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1820 break;
1821 default:
1822 ret = -EINVAL;
1823 }
1da177e4
LT
1824
1825 return ret;
1826}
1827EXPORT_SYMBOL(cpufreq_register_notifier);
1828
1da177e4
LT
1829/**
1830 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1831 * @nb: notifier block to be unregistered
bb176f7d 1832 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1833 *
1834 * Remove a driver from the CPU frequency notifier list.
1835 *
1836 * This function may sleep, and has the same return conditions as
e041c683 1837 * blocking_notifier_chain_unregister.
1da177e4
LT
1838 */
1839int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1840{
1841 int ret;
1842
d5aaffa9
DB
1843 if (cpufreq_disabled())
1844 return -EINVAL;
1845
1da177e4
LT
1846 switch (list) {
1847 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1848 ret = srcu_notifier_chain_unregister(
e041c683 1849 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1850 break;
1851 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1852 ret = blocking_notifier_chain_unregister(
1853 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1854 break;
1855 default:
1856 ret = -EINVAL;
1857 }
1da177e4
LT
1858
1859 return ret;
1860}
1861EXPORT_SYMBOL(cpufreq_unregister_notifier);
1862
1863
1864/*********************************************************************
1865 * GOVERNORS *
1866 *********************************************************************/
1867
1c03a2d0
VK
1868/* Must set freqs->new to intermediate frequency */
1869static int __target_intermediate(struct cpufreq_policy *policy,
1870 struct cpufreq_freqs *freqs, int index)
1871{
1872 int ret;
1873
1874 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1875
1876 /* We don't need to switch to intermediate freq */
1877 if (!freqs->new)
1878 return 0;
1879
1880 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1881 __func__, policy->cpu, freqs->old, freqs->new);
1882
1883 cpufreq_freq_transition_begin(policy, freqs);
1884 ret = cpufreq_driver->target_intermediate(policy, index);
1885 cpufreq_freq_transition_end(policy, freqs, ret);
1886
1887 if (ret)
1888 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1889 __func__, ret);
1890
1891 return ret;
1892}
1893
8d65775d
VK
1894static int __target_index(struct cpufreq_policy *policy,
1895 struct cpufreq_frequency_table *freq_table, int index)
1896{
1c03a2d0
VK
1897 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1898 unsigned int intermediate_freq = 0;
8d65775d
VK
1899 int retval = -EINVAL;
1900 bool notify;
1901
1902 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
8d65775d 1903 if (notify) {
1c03a2d0
VK
1904 /* Handle switching to intermediate frequency */
1905 if (cpufreq_driver->get_intermediate) {
1906 retval = __target_intermediate(policy, &freqs, index);
1907 if (retval)
1908 return retval;
1909
1910 intermediate_freq = freqs.new;
1911 /* Set old freq to intermediate */
1912 if (intermediate_freq)
1913 freqs.old = freqs.new;
1914 }
8d65775d 1915
1c03a2d0 1916 freqs.new = freq_table[index].frequency;
8d65775d
VK
1917 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1918 __func__, policy->cpu, freqs.old, freqs.new);
1919
1920 cpufreq_freq_transition_begin(policy, &freqs);
1921 }
1922
1923 retval = cpufreq_driver->target_index(policy, index);
1924 if (retval)
1925 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1926 retval);
1927
1c03a2d0 1928 if (notify) {
8d65775d
VK
1929 cpufreq_freq_transition_end(policy, &freqs, retval);
1930
1c03a2d0
VK
1931 /*
1932 * Failed after setting to intermediate freq? Driver should have
1933 * reverted back to initial frequency and so should we. Check
1934 * here for intermediate_freq instead of get_intermediate, in
1935 * case we have't switched to intermediate freq at all.
1936 */
1937 if (unlikely(retval && intermediate_freq)) {
1938 freqs.old = intermediate_freq;
1939 freqs.new = policy->restore_freq;
1940 cpufreq_freq_transition_begin(policy, &freqs);
1941 cpufreq_freq_transition_end(policy, &freqs, 0);
1942 }
1943 }
1944
8d65775d
VK
1945 return retval;
1946}
1947
1da177e4
LT
1948int __cpufreq_driver_target(struct cpufreq_policy *policy,
1949 unsigned int target_freq,
1950 unsigned int relation)
1951{
7249924e 1952 unsigned int old_target_freq = target_freq;
8d65775d 1953 int retval = -EINVAL;
c32b6b8e 1954
a7b422cd
KRW
1955 if (cpufreq_disabled())
1956 return -ENODEV;
1957
7249924e
VK
1958 /* Make sure that target_freq is within supported range */
1959 if (target_freq > policy->max)
1960 target_freq = policy->max;
1961 if (target_freq < policy->min)
1962 target_freq = policy->min;
1963
1964 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1965 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1966
9c0ebcf7
VK
1967 /*
1968 * This might look like a redundant call as we are checking it again
1969 * after finding index. But it is left intentionally for cases where
1970 * exactly same freq is called again and so we can save on few function
1971 * calls.
1972 */
5a1c0228
VK
1973 if (target_freq == policy->cur)
1974 return 0;
1975
1c03a2d0
VK
1976 /* Save last value to restore later on errors */
1977 policy->restore_freq = policy->cur;
1978
1c3d85dd
RW
1979 if (cpufreq_driver->target)
1980 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1981 else if (cpufreq_driver->target_index) {
1982 struct cpufreq_frequency_table *freq_table;
1983 int index;
90d45d17 1984
9c0ebcf7
VK
1985 freq_table = cpufreq_frequency_get_table(policy->cpu);
1986 if (unlikely(!freq_table)) {
1987 pr_err("%s: Unable to find freq_table\n", __func__);
1988 goto out;
1989 }
1990
1991 retval = cpufreq_frequency_table_target(policy, freq_table,
1992 target_freq, relation, &index);
1993 if (unlikely(retval)) {
1994 pr_err("%s: Unable to find matching freq\n", __func__);
1995 goto out;
1996 }
1997
d4019f0a 1998 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1999 retval = 0;
d4019f0a
VK
2000 goto out;
2001 }
2002
8d65775d 2003 retval = __target_index(policy, freq_table, index);
9c0ebcf7
VK
2004 }
2005
2006out:
1da177e4
LT
2007 return retval;
2008}
2009EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2010
1da177e4
LT
2011int cpufreq_driver_target(struct cpufreq_policy *policy,
2012 unsigned int target_freq,
2013 unsigned int relation)
2014{
f1829e4a 2015 int ret = -EINVAL;
1da177e4 2016
ad7722da 2017 down_write(&policy->rwsem);
1da177e4
LT
2018
2019 ret = __cpufreq_driver_target(policy, target_freq, relation);
2020
ad7722da 2021 up_write(&policy->rwsem);
1da177e4 2022
1da177e4
LT
2023 return ret;
2024}
2025EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2026
e08f5f5b
GS
2027static int __cpufreq_governor(struct cpufreq_policy *policy,
2028 unsigned int event)
1da177e4 2029{
cc993cab 2030 int ret;
6afde10c
TR
2031
2032 /* Only must be defined when default governor is known to have latency
2033 restrictions, like e.g. conservative or ondemand.
2034 That this is the case is already ensured in Kconfig
2035 */
2036#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2037 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2038#else
2039 struct cpufreq_governor *gov = NULL;
2040#endif
1c256245 2041
2f0aea93
VK
2042 /* Don't start any governor operations if we are entering suspend */
2043 if (cpufreq_suspended)
2044 return 0;
cb57720b
EZ
2045 /*
2046 * Governor might not be initiated here if ACPI _PPC changed
2047 * notification happened, so check it.
2048 */
2049 if (!policy->governor)
2050 return -EINVAL;
2f0aea93 2051
1c256245
TR
2052 if (policy->governor->max_transition_latency &&
2053 policy->cpuinfo.transition_latency >
2054 policy->governor->max_transition_latency) {
6afde10c
TR
2055 if (!gov)
2056 return -EINVAL;
2057 else {
e837f9b5
JP
2058 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2059 policy->governor->name, gov->name);
6afde10c
TR
2060 policy->governor = gov;
2061 }
1c256245 2062 }
1da177e4 2063
fe492f3f
VK
2064 if (event == CPUFREQ_GOV_POLICY_INIT)
2065 if (!try_module_get(policy->governor->owner))
2066 return -EINVAL;
1da177e4 2067
2d06d8c4 2068 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 2069 policy->cpu, event);
95731ebb
XC
2070
2071 mutex_lock(&cpufreq_governor_lock);
56d07db2 2072 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
2073 || (!policy->governor_enabled
2074 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
2075 mutex_unlock(&cpufreq_governor_lock);
2076 return -EBUSY;
2077 }
2078
2079 if (event == CPUFREQ_GOV_STOP)
2080 policy->governor_enabled = false;
2081 else if (event == CPUFREQ_GOV_START)
2082 policy->governor_enabled = true;
2083
2084 mutex_unlock(&cpufreq_governor_lock);
2085
1da177e4
LT
2086 ret = policy->governor->governor(policy, event);
2087
4d5dcc42
VK
2088 if (!ret) {
2089 if (event == CPUFREQ_GOV_POLICY_INIT)
2090 policy->governor->initialized++;
2091 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2092 policy->governor->initialized--;
95731ebb
XC
2093 } else {
2094 /* Restore original values */
2095 mutex_lock(&cpufreq_governor_lock);
2096 if (event == CPUFREQ_GOV_STOP)
2097 policy->governor_enabled = true;
2098 else if (event == CPUFREQ_GOV_START)
2099 policy->governor_enabled = false;
2100 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 2101 }
b394058f 2102
fe492f3f
VK
2103 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2104 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
2105 module_put(policy->governor->owner);
2106
2107 return ret;
2108}
2109
1da177e4
LT
2110int cpufreq_register_governor(struct cpufreq_governor *governor)
2111{
3bcb09a3 2112 int err;
1da177e4
LT
2113
2114 if (!governor)
2115 return -EINVAL;
2116
a7b422cd
KRW
2117 if (cpufreq_disabled())
2118 return -ENODEV;
2119
3fc54d37 2120 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 2121
b394058f 2122 governor->initialized = 0;
3bcb09a3 2123 err = -EBUSY;
42f91fa1 2124 if (!find_governor(governor->name)) {
3bcb09a3
JF
2125 err = 0;
2126 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 2127 }
1da177e4 2128
32ee8c3e 2129 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 2130 return err;
1da177e4
LT
2131}
2132EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2133
1da177e4
LT
2134void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2135{
4573237b
VK
2136 struct cpufreq_policy *policy;
2137 unsigned long flags;
90e41bac 2138
1da177e4
LT
2139 if (!governor)
2140 return;
2141
a7b422cd
KRW
2142 if (cpufreq_disabled())
2143 return;
2144
4573237b
VK
2145 /* clear last_governor for all inactive policies */
2146 read_lock_irqsave(&cpufreq_driver_lock, flags);
2147 for_each_inactive_policy(policy) {
2148 if (!strcmp(policy->last_governor, governor->name))
2149 strcpy(policy->last_governor, "\0");
90e41bac 2150 }
4573237b 2151 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
90e41bac 2152
3fc54d37 2153 mutex_lock(&cpufreq_governor_mutex);
1da177e4 2154 list_del(&governor->governor_list);
3fc54d37 2155 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
2156 return;
2157}
2158EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2159
2160
1da177e4
LT
2161/*********************************************************************
2162 * POLICY INTERFACE *
2163 *********************************************************************/
2164
2165/**
2166 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2167 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2168 * is written
1da177e4
LT
2169 *
2170 * Reads the current cpufreq policy.
2171 */
2172int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2173{
2174 struct cpufreq_policy *cpu_policy;
2175 if (!policy)
2176 return -EINVAL;
2177
2178 cpu_policy = cpufreq_cpu_get(cpu);
2179 if (!cpu_policy)
2180 return -EINVAL;
2181
d5b73cd8 2182 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2183
2184 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2185 return 0;
2186}
2187EXPORT_SYMBOL(cpufreq_get_policy);
2188
153d7f3f 2189/*
037ce839
VK
2190 * policy : current policy.
2191 * new_policy: policy to be set.
153d7f3f 2192 */
037ce839 2193static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2194 struct cpufreq_policy *new_policy)
1da177e4 2195{
d9a789c7
RW
2196 struct cpufreq_governor *old_gov;
2197 int ret;
1da177e4 2198
e837f9b5
JP
2199 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2200 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2201
d5b73cd8 2202 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2203
d9a789c7
RW
2204 if (new_policy->min > policy->max || new_policy->max < policy->min)
2205 return -EINVAL;
9c9a43ed 2206
1da177e4 2207 /* verify the cpu speed can be set within this limit */
3a3e9e06 2208 ret = cpufreq_driver->verify(new_policy);
1da177e4 2209 if (ret)
d9a789c7 2210 return ret;
1da177e4 2211
1da177e4 2212 /* adjust if necessary - all reasons */
e041c683 2213 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2214 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2215
2216 /* adjust if necessary - hardware incompatibility*/
e041c683 2217 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2218 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2219
bb176f7d
VK
2220 /*
2221 * verify the cpu speed can be set within this limit, which might be
2222 * different to the first one
2223 */
3a3e9e06 2224 ret = cpufreq_driver->verify(new_policy);
e041c683 2225 if (ret)
d9a789c7 2226 return ret;
1da177e4
LT
2227
2228 /* notification of the new policy */
e041c683 2229 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2230 CPUFREQ_NOTIFY, new_policy);
1da177e4 2231
3a3e9e06
VK
2232 policy->min = new_policy->min;
2233 policy->max = new_policy->max;
1da177e4 2234
2d06d8c4 2235 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2236 policy->min, policy->max);
1da177e4 2237
1c3d85dd 2238 if (cpufreq_driver->setpolicy) {
3a3e9e06 2239 policy->policy = new_policy->policy;
2d06d8c4 2240 pr_debug("setting range\n");
d9a789c7
RW
2241 return cpufreq_driver->setpolicy(new_policy);
2242 }
1da177e4 2243
d9a789c7
RW
2244 if (new_policy->governor == policy->governor)
2245 goto out;
7bd353a9 2246
d9a789c7
RW
2247 pr_debug("governor switch\n");
2248
2249 /* save old, working values */
2250 old_gov = policy->governor;
2251 /* end old governor */
2252 if (old_gov) {
2253 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2254 up_write(&policy->rwsem);
e5c87b76 2255 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
d9a789c7 2256 down_write(&policy->rwsem);
1da177e4
LT
2257 }
2258
d9a789c7
RW
2259 /* start new governor */
2260 policy->governor = new_policy->governor;
2261 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2262 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2263 goto out;
2264
2265 up_write(&policy->rwsem);
2266 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2267 down_write(&policy->rwsem);
2268 }
2269
2270 /* new governor failed, so re-start old one */
2271 pr_debug("starting governor %s failed\n", policy->governor->name);
2272 if (old_gov) {
2273 policy->governor = old_gov;
2274 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2275 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2276 }
2277
2278 return -EINVAL;
2279
2280 out:
2281 pr_debug("governor: change or update limits\n");
2282 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2283}
2284
1da177e4
LT
2285/**
2286 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2287 * @cpu: CPU which shall be re-evaluated
2288 *
25985edc 2289 * Useful for policy notifiers which have different necessities
1da177e4
LT
2290 * at different times.
2291 */
2292int cpufreq_update_policy(unsigned int cpu)
2293{
3a3e9e06
VK
2294 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2295 struct cpufreq_policy new_policy;
f1829e4a 2296 int ret;
1da177e4 2297
fefa8ff8
AP
2298 if (!policy)
2299 return -ENODEV;
1da177e4 2300
ad7722da 2301 down_write(&policy->rwsem);
1da177e4 2302
2d06d8c4 2303 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2304 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2305 new_policy.min = policy->user_policy.min;
2306 new_policy.max = policy->user_policy.max;
2307 new_policy.policy = policy->user_policy.policy;
2308 new_policy.governor = policy->user_policy.governor;
1da177e4 2309
bb176f7d
VK
2310 /*
2311 * BIOS might change freq behind our back
2312 * -> ask driver for current freq and notify governors about a change
2313 */
2ed99e39 2314 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2315 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2316 if (WARN_ON(!new_policy.cur)) {
2317 ret = -EIO;
fefa8ff8 2318 goto unlock;
bd0fa9bb
VK
2319 }
2320
3a3e9e06 2321 if (!policy->cur) {
e837f9b5 2322 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2323 policy->cur = new_policy.cur;
a85f7bd3 2324 } else {
9c0ebcf7 2325 if (policy->cur != new_policy.cur && has_target())
a1e1dc41 2326 cpufreq_out_of_sync(policy, new_policy.cur);
a85f7bd3 2327 }
0961dd0d
TR
2328 }
2329
037ce839 2330 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2331
fefa8ff8 2332unlock:
ad7722da 2333 up_write(&policy->rwsem);
5a01f2e8 2334
3a3e9e06 2335 cpufreq_cpu_put(policy);
1da177e4
LT
2336 return ret;
2337}
2338EXPORT_SYMBOL(cpufreq_update_policy);
2339
2760984f 2340static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2341 unsigned long action, void *hcpu)
2342{
2343 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2344 struct device *dev;
c32b6b8e 2345
8a25a2fd
KS
2346 dev = get_cpu_device(cpu);
2347 if (dev) {
5302c3fb 2348 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2349 case CPU_ONLINE:
23faf0b7 2350 cpufreq_add_dev(dev, NULL);
c32b6b8e 2351 break;
5302c3fb 2352
c32b6b8e 2353 case CPU_DOWN_PREPARE:
96bbbe4a 2354 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2355 break;
2356
2357 case CPU_POST_DEAD:
96bbbe4a 2358 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2359 break;
5302c3fb 2360
5a01f2e8 2361 case CPU_DOWN_FAILED:
23faf0b7 2362 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2363 break;
2364 }
2365 }
2366 return NOTIFY_OK;
2367}
2368
9c36f746 2369static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2370 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2371};
1da177e4 2372
6f19efc0
LM
2373/*********************************************************************
2374 * BOOST *
2375 *********************************************************************/
2376static int cpufreq_boost_set_sw(int state)
2377{
2378 struct cpufreq_frequency_table *freq_table;
2379 struct cpufreq_policy *policy;
2380 int ret = -EINVAL;
2381
f963735a 2382 for_each_active_policy(policy) {
6f19efc0
LM
2383 freq_table = cpufreq_frequency_get_table(policy->cpu);
2384 if (freq_table) {
2385 ret = cpufreq_frequency_table_cpuinfo(policy,
2386 freq_table);
2387 if (ret) {
2388 pr_err("%s: Policy frequency update failed\n",
2389 __func__);
2390 break;
2391 }
2392 policy->user_policy.max = policy->max;
2393 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2394 }
2395 }
2396
2397 return ret;
2398}
2399
2400int cpufreq_boost_trigger_state(int state)
2401{
2402 unsigned long flags;
2403 int ret = 0;
2404
2405 if (cpufreq_driver->boost_enabled == state)
2406 return 0;
2407
2408 write_lock_irqsave(&cpufreq_driver_lock, flags);
2409 cpufreq_driver->boost_enabled = state;
2410 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2411
2412 ret = cpufreq_driver->set_boost(state);
2413 if (ret) {
2414 write_lock_irqsave(&cpufreq_driver_lock, flags);
2415 cpufreq_driver->boost_enabled = !state;
2416 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2417
e837f9b5
JP
2418 pr_err("%s: Cannot %s BOOST\n",
2419 __func__, state ? "enable" : "disable");
6f19efc0
LM
2420 }
2421
2422 return ret;
2423}
2424
2425int cpufreq_boost_supported(void)
2426{
2427 if (likely(cpufreq_driver))
2428 return cpufreq_driver->boost_supported;
2429
2430 return 0;
2431}
2432EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2433
2434int cpufreq_boost_enabled(void)
2435{
2436 return cpufreq_driver->boost_enabled;
2437}
2438EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2439
1da177e4
LT
2440/*********************************************************************
2441 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2442 *********************************************************************/
2443
2444/**
2445 * cpufreq_register_driver - register a CPU Frequency driver
2446 * @driver_data: A struct cpufreq_driver containing the values#
2447 * submitted by the CPU Frequency driver.
2448 *
bb176f7d 2449 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2450 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2451 * (and isn't unregistered in the meantime).
1da177e4
LT
2452 *
2453 */
221dee28 2454int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2455{
2456 unsigned long flags;
2457 int ret;
2458
a7b422cd
KRW
2459 if (cpufreq_disabled())
2460 return -ENODEV;
2461
1da177e4 2462 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7 2463 !(driver_data->setpolicy || driver_data->target_index ||
9832235f
RW
2464 driver_data->target) ||
2465 (driver_data->setpolicy && (driver_data->target_index ||
1c03a2d0
VK
2466 driver_data->target)) ||
2467 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
1da177e4
LT
2468 return -EINVAL;
2469
2d06d8c4 2470 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4 2471
0d1857a1 2472 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2473 if (cpufreq_driver) {
0d1857a1 2474 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2475 return -EEXIST;
1da177e4 2476 }
1c3d85dd 2477 cpufreq_driver = driver_data;
0d1857a1 2478 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2479
bc68b7df
VK
2480 if (driver_data->setpolicy)
2481 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2482
6f19efc0
LM
2483 if (cpufreq_boost_supported()) {
2484 /*
2485 * Check if driver provides function to enable boost -
2486 * if not, use cpufreq_boost_set_sw as default
2487 */
2488 if (!cpufreq_driver->set_boost)
2489 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2490
2491 ret = cpufreq_sysfs_create_file(&boost.attr);
2492 if (ret) {
2493 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2494 __func__);
6f19efc0
LM
2495 goto err_null_driver;
2496 }
2497 }
2498
8a25a2fd 2499 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2500 if (ret)
6f19efc0 2501 goto err_boost_unreg;
1da177e4 2502
ce1bcfe9
VK
2503 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2504 list_empty(&cpufreq_policy_list)) {
1da177e4 2505 /* if all ->init() calls failed, unregister */
ce1bcfe9
VK
2506 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2507 driver_data->name);
2508 goto err_if_unreg;
1da177e4
LT
2509 }
2510
8f5bc2ab 2511 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2512 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2513
8f5bc2ab 2514 return 0;
8a25a2fd
KS
2515err_if_unreg:
2516 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2517err_boost_unreg:
2518 if (cpufreq_boost_supported())
2519 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2520err_null_driver:
0d1857a1 2521 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2522 cpufreq_driver = NULL;
0d1857a1 2523 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2524 return ret;
1da177e4
LT
2525}
2526EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2527
1da177e4
LT
2528/**
2529 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2530 *
bb176f7d 2531 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2532 * the right to do so, i.e. if you have succeeded in initialising before!
2533 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2534 * currently not initialised.
2535 */
221dee28 2536int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2537{
2538 unsigned long flags;
2539
1c3d85dd 2540 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2541 return -EINVAL;
1da177e4 2542
2d06d8c4 2543 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2544
8a25a2fd 2545 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2546 if (cpufreq_boost_supported())
2547 cpufreq_sysfs_remove_file(&boost.attr);
2548
65edc68c 2549 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2550
6eed9404 2551 down_write(&cpufreq_rwsem);
0d1857a1 2552 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2553
1c3d85dd 2554 cpufreq_driver = NULL;
6eed9404 2555
0d1857a1 2556 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2557 up_write(&cpufreq_rwsem);
1da177e4
LT
2558
2559 return 0;
2560}
2561EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8 2562
90de2a4a
DA
2563/*
2564 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2565 * or mutexes when secondary CPUs are halted.
2566 */
2567static struct syscore_ops cpufreq_syscore_ops = {
2568 .shutdown = cpufreq_suspend,
2569};
2570
5a01f2e8
VP
2571static int __init cpufreq_core_init(void)
2572{
a7b422cd
KRW
2573 if (cpufreq_disabled())
2574 return -ENODEV;
2575
2361be23 2576 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2577 BUG_ON(!cpufreq_global_kobject);
2578
90de2a4a
DA
2579 register_syscore_ops(&cpufreq_syscore_ops);
2580
5a01f2e8
VP
2581 return 0;
2582}
5a01f2e8 2583core_initcall(cpufreq_core_init);