]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/cpufreq/cpufreq.c
cpufreq: suspend governors on system suspend/hibernate
[mirror_ubuntu-artful-kernel.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47
48 /* Flag to suspend/resume CPUFreq governors */
49 static bool cpufreq_suspended;
50
51 static inline bool has_target(void)
52 {
53 return cpufreq_driver->target_index || cpufreq_driver->target;
54 }
55
56 /*
57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
58 * sections
59 */
60 static DECLARE_RWSEM(cpufreq_rwsem);
61
62 /* internal prototypes */
63 static int __cpufreq_governor(struct cpufreq_policy *policy,
64 unsigned int event);
65 static unsigned int __cpufreq_get(unsigned int cpu);
66 static void handle_update(struct work_struct *work);
67
68 /**
69 * Two notifier lists: the "policy" list is involved in the
70 * validation process for a new CPU frequency policy; the
71 * "transition" list for kernel code that needs to handle
72 * changes to devices when the CPU clock speed changes.
73 * The mutex locks both lists.
74 */
75 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
76 static struct srcu_notifier_head cpufreq_transition_notifier_list;
77
78 static bool init_cpufreq_transition_notifier_list_called;
79 static int __init init_cpufreq_transition_notifier_list(void)
80 {
81 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
82 init_cpufreq_transition_notifier_list_called = true;
83 return 0;
84 }
85 pure_initcall(init_cpufreq_transition_notifier_list);
86
87 static int off __read_mostly;
88 static int cpufreq_disabled(void)
89 {
90 return off;
91 }
92 void disable_cpufreq(void)
93 {
94 off = 1;
95 }
96 static LIST_HEAD(cpufreq_governor_list);
97 static DEFINE_MUTEX(cpufreq_governor_mutex);
98
99 bool have_governor_per_policy(void)
100 {
101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102 }
103 EXPORT_SYMBOL_GPL(have_governor_per_policy);
104
105 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106 {
107 if (have_governor_per_policy())
108 return &policy->kobj;
109 else
110 return cpufreq_global_kobject;
111 }
112 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113
114 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115 {
116 u64 idle_time;
117 u64 cur_wall_time;
118 u64 busy_time;
119
120 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128
129 idle_time = cur_wall_time - busy_time;
130 if (wall)
131 *wall = cputime_to_usecs(cur_wall_time);
132
133 return cputime_to_usecs(idle_time);
134 }
135
136 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137 {
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
142 else if (!io_busy)
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
144
145 return idle_time;
146 }
147 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
148
149 /*
150 * This is a generic cpufreq init() routine which can be used by cpufreq
151 * drivers of SMP systems. It will do following:
152 * - validate & show freq table passed
153 * - set policies transition latency
154 * - policy->cpus with all possible CPUs
155 */
156 int cpufreq_generic_init(struct cpufreq_policy *policy,
157 struct cpufreq_frequency_table *table,
158 unsigned int transition_latency)
159 {
160 int ret;
161
162 ret = cpufreq_table_validate_and_show(policy, table);
163 if (ret) {
164 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
165 return ret;
166 }
167
168 policy->cpuinfo.transition_latency = transition_latency;
169
170 /*
171 * The driver only supports the SMP configuartion where all processors
172 * share the clock and voltage and clock.
173 */
174 cpumask_setall(policy->cpus);
175
176 return 0;
177 }
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179
180 unsigned int cpufreq_generic_get(unsigned int cpu)
181 {
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183
184 if (!policy || IS_ERR(policy->clk)) {
185 pr_err("%s: No %s associated to cpu: %d\n", __func__,
186 policy ? "clk" : "policy", cpu);
187 return 0;
188 }
189
190 return clk_get_rate(policy->clk) / 1000;
191 }
192 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
193
194 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
195 {
196 struct cpufreq_policy *policy = NULL;
197 unsigned long flags;
198
199 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
200 return NULL;
201
202 if (!down_read_trylock(&cpufreq_rwsem))
203 return NULL;
204
205 /* get the cpufreq driver */
206 read_lock_irqsave(&cpufreq_driver_lock, flags);
207
208 if (cpufreq_driver) {
209 /* get the CPU */
210 policy = per_cpu(cpufreq_cpu_data, cpu);
211 if (policy)
212 kobject_get(&policy->kobj);
213 }
214
215 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
216
217 if (!policy)
218 up_read(&cpufreq_rwsem);
219
220 return policy;
221 }
222 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
223
224 void cpufreq_cpu_put(struct cpufreq_policy *policy)
225 {
226 if (cpufreq_disabled())
227 return;
228
229 kobject_put(&policy->kobj);
230 up_read(&cpufreq_rwsem);
231 }
232 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
233
234 /*********************************************************************
235 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
236 *********************************************************************/
237
238 /**
239 * adjust_jiffies - adjust the system "loops_per_jiffy"
240 *
241 * This function alters the system "loops_per_jiffy" for the clock
242 * speed change. Note that loops_per_jiffy cannot be updated on SMP
243 * systems as each CPU might be scaled differently. So, use the arch
244 * per-CPU loops_per_jiffy value wherever possible.
245 */
246 #ifndef CONFIG_SMP
247 static unsigned long l_p_j_ref;
248 static unsigned int l_p_j_ref_freq;
249
250 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
251 {
252 if (ci->flags & CPUFREQ_CONST_LOOPS)
253 return;
254
255 if (!l_p_j_ref_freq) {
256 l_p_j_ref = loops_per_jiffy;
257 l_p_j_ref_freq = ci->old;
258 pr_debug("saving %lu as reference value for loops_per_jiffy; "
259 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
260 }
261 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
262 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
263 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
264 ci->new);
265 pr_debug("scaling loops_per_jiffy to %lu "
266 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
267 }
268 }
269 #else
270 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
271 {
272 return;
273 }
274 #endif
275
276 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
277 struct cpufreq_freqs *freqs, unsigned int state)
278 {
279 BUG_ON(irqs_disabled());
280
281 if (cpufreq_disabled())
282 return;
283
284 freqs->flags = cpufreq_driver->flags;
285 pr_debug("notification %u of frequency transition to %u kHz\n",
286 state, freqs->new);
287
288 switch (state) {
289
290 case CPUFREQ_PRECHANGE:
291 /* detect if the driver reported a value as "old frequency"
292 * which is not equal to what the cpufreq core thinks is
293 * "old frequency".
294 */
295 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
296 if ((policy) && (policy->cpu == freqs->cpu) &&
297 (policy->cur) && (policy->cur != freqs->old)) {
298 pr_debug("Warning: CPU frequency is"
299 " %u, cpufreq assumed %u kHz.\n",
300 freqs->old, policy->cur);
301 freqs->old = policy->cur;
302 }
303 }
304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
305 CPUFREQ_PRECHANGE, freqs);
306 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
307 break;
308
309 case CPUFREQ_POSTCHANGE:
310 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
311 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
312 (unsigned long)freqs->cpu);
313 trace_cpu_frequency(freqs->new, freqs->cpu);
314 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
315 CPUFREQ_POSTCHANGE, freqs);
316 if (likely(policy) && likely(policy->cpu == freqs->cpu))
317 policy->cur = freqs->new;
318 break;
319 }
320 }
321
322 /**
323 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
324 * on frequency transition.
325 *
326 * This function calls the transition notifiers and the "adjust_jiffies"
327 * function. It is called twice on all CPU frequency changes that have
328 * external effects.
329 */
330 void cpufreq_notify_transition(struct cpufreq_policy *policy,
331 struct cpufreq_freqs *freqs, unsigned int state)
332 {
333 for_each_cpu(freqs->cpu, policy->cpus)
334 __cpufreq_notify_transition(policy, freqs, state);
335 }
336 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
337
338 /* Do post notifications when there are chances that transition has failed */
339 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
340 struct cpufreq_freqs *freqs, int transition_failed)
341 {
342 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
343 if (!transition_failed)
344 return;
345
346 swap(freqs->old, freqs->new);
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
348 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
349 }
350 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
351
352
353 /*********************************************************************
354 * SYSFS INTERFACE *
355 *********************************************************************/
356 static ssize_t show_boost(struct kobject *kobj,
357 struct attribute *attr, char *buf)
358 {
359 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
360 }
361
362 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
363 const char *buf, size_t count)
364 {
365 int ret, enable;
366
367 ret = sscanf(buf, "%d", &enable);
368 if (ret != 1 || enable < 0 || enable > 1)
369 return -EINVAL;
370
371 if (cpufreq_boost_trigger_state(enable)) {
372 pr_err("%s: Cannot %s BOOST!\n", __func__,
373 enable ? "enable" : "disable");
374 return -EINVAL;
375 }
376
377 pr_debug("%s: cpufreq BOOST %s\n", __func__,
378 enable ? "enabled" : "disabled");
379
380 return count;
381 }
382 define_one_global_rw(boost);
383
384 static struct cpufreq_governor *__find_governor(const char *str_governor)
385 {
386 struct cpufreq_governor *t;
387
388 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
389 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
390 return t;
391
392 return NULL;
393 }
394
395 /**
396 * cpufreq_parse_governor - parse a governor string
397 */
398 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
399 struct cpufreq_governor **governor)
400 {
401 int err = -EINVAL;
402
403 if (!cpufreq_driver)
404 goto out;
405
406 if (cpufreq_driver->setpolicy) {
407 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
408 *policy = CPUFREQ_POLICY_PERFORMANCE;
409 err = 0;
410 } else if (!strnicmp(str_governor, "powersave",
411 CPUFREQ_NAME_LEN)) {
412 *policy = CPUFREQ_POLICY_POWERSAVE;
413 err = 0;
414 }
415 } else if (has_target()) {
416 struct cpufreq_governor *t;
417
418 mutex_lock(&cpufreq_governor_mutex);
419
420 t = __find_governor(str_governor);
421
422 if (t == NULL) {
423 int ret;
424
425 mutex_unlock(&cpufreq_governor_mutex);
426 ret = request_module("cpufreq_%s", str_governor);
427 mutex_lock(&cpufreq_governor_mutex);
428
429 if (ret == 0)
430 t = __find_governor(str_governor);
431 }
432
433 if (t != NULL) {
434 *governor = t;
435 err = 0;
436 }
437
438 mutex_unlock(&cpufreq_governor_mutex);
439 }
440 out:
441 return err;
442 }
443
444 /**
445 * cpufreq_per_cpu_attr_read() / show_##file_name() -
446 * print out cpufreq information
447 *
448 * Write out information from cpufreq_driver->policy[cpu]; object must be
449 * "unsigned int".
450 */
451
452 #define show_one(file_name, object) \
453 static ssize_t show_##file_name \
454 (struct cpufreq_policy *policy, char *buf) \
455 { \
456 return sprintf(buf, "%u\n", policy->object); \
457 }
458
459 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
460 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
461 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
462 show_one(scaling_min_freq, min);
463 show_one(scaling_max_freq, max);
464 show_one(scaling_cur_freq, cur);
465
466 static int cpufreq_set_policy(struct cpufreq_policy *policy,
467 struct cpufreq_policy *new_policy);
468
469 /**
470 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
471 */
472 #define store_one(file_name, object) \
473 static ssize_t store_##file_name \
474 (struct cpufreq_policy *policy, const char *buf, size_t count) \
475 { \
476 int ret; \
477 struct cpufreq_policy new_policy; \
478 \
479 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
480 if (ret) \
481 return -EINVAL; \
482 \
483 ret = sscanf(buf, "%u", &new_policy.object); \
484 if (ret != 1) \
485 return -EINVAL; \
486 \
487 ret = cpufreq_set_policy(policy, &new_policy); \
488 policy->user_policy.object = policy->object; \
489 \
490 return ret ? ret : count; \
491 }
492
493 store_one(scaling_min_freq, min);
494 store_one(scaling_max_freq, max);
495
496 /**
497 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
498 */
499 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
500 char *buf)
501 {
502 unsigned int cur_freq = __cpufreq_get(policy->cpu);
503 if (!cur_freq)
504 return sprintf(buf, "<unknown>");
505 return sprintf(buf, "%u\n", cur_freq);
506 }
507
508 /**
509 * show_scaling_governor - show the current policy for the specified CPU
510 */
511 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
512 {
513 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
514 return sprintf(buf, "powersave\n");
515 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
516 return sprintf(buf, "performance\n");
517 else if (policy->governor)
518 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
519 policy->governor->name);
520 return -EINVAL;
521 }
522
523 /**
524 * store_scaling_governor - store policy for the specified CPU
525 */
526 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
527 const char *buf, size_t count)
528 {
529 int ret;
530 char str_governor[16];
531 struct cpufreq_policy new_policy;
532
533 ret = cpufreq_get_policy(&new_policy, policy->cpu);
534 if (ret)
535 return ret;
536
537 ret = sscanf(buf, "%15s", str_governor);
538 if (ret != 1)
539 return -EINVAL;
540
541 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
542 &new_policy.governor))
543 return -EINVAL;
544
545 ret = cpufreq_set_policy(policy, &new_policy);
546
547 policy->user_policy.policy = policy->policy;
548 policy->user_policy.governor = policy->governor;
549
550 if (ret)
551 return ret;
552 else
553 return count;
554 }
555
556 /**
557 * show_scaling_driver - show the cpufreq driver currently loaded
558 */
559 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
560 {
561 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
562 }
563
564 /**
565 * show_scaling_available_governors - show the available CPUfreq governors
566 */
567 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
568 char *buf)
569 {
570 ssize_t i = 0;
571 struct cpufreq_governor *t;
572
573 if (!has_target()) {
574 i += sprintf(buf, "performance powersave");
575 goto out;
576 }
577
578 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
579 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
580 - (CPUFREQ_NAME_LEN + 2)))
581 goto out;
582 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
583 }
584 out:
585 i += sprintf(&buf[i], "\n");
586 return i;
587 }
588
589 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
590 {
591 ssize_t i = 0;
592 unsigned int cpu;
593
594 for_each_cpu(cpu, mask) {
595 if (i)
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
598 if (i >= (PAGE_SIZE - 5))
599 break;
600 }
601 i += sprintf(&buf[i], "\n");
602 return i;
603 }
604 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
605
606 /**
607 * show_related_cpus - show the CPUs affected by each transition even if
608 * hw coordination is in use
609 */
610 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
611 {
612 return cpufreq_show_cpus(policy->related_cpus, buf);
613 }
614
615 /**
616 * show_affected_cpus - show the CPUs affected by each transition
617 */
618 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
619 {
620 return cpufreq_show_cpus(policy->cpus, buf);
621 }
622
623 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
624 const char *buf, size_t count)
625 {
626 unsigned int freq = 0;
627 unsigned int ret;
628
629 if (!policy->governor || !policy->governor->store_setspeed)
630 return -EINVAL;
631
632 ret = sscanf(buf, "%u", &freq);
633 if (ret != 1)
634 return -EINVAL;
635
636 policy->governor->store_setspeed(policy, freq);
637
638 return count;
639 }
640
641 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
642 {
643 if (!policy->governor || !policy->governor->show_setspeed)
644 return sprintf(buf, "<unsupported>\n");
645
646 return policy->governor->show_setspeed(policy, buf);
647 }
648
649 /**
650 * show_bios_limit - show the current cpufreq HW/BIOS limitation
651 */
652 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
653 {
654 unsigned int limit;
655 int ret;
656 if (cpufreq_driver->bios_limit) {
657 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
658 if (!ret)
659 return sprintf(buf, "%u\n", limit);
660 }
661 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
662 }
663
664 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
665 cpufreq_freq_attr_ro(cpuinfo_min_freq);
666 cpufreq_freq_attr_ro(cpuinfo_max_freq);
667 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
668 cpufreq_freq_attr_ro(scaling_available_governors);
669 cpufreq_freq_attr_ro(scaling_driver);
670 cpufreq_freq_attr_ro(scaling_cur_freq);
671 cpufreq_freq_attr_ro(bios_limit);
672 cpufreq_freq_attr_ro(related_cpus);
673 cpufreq_freq_attr_ro(affected_cpus);
674 cpufreq_freq_attr_rw(scaling_min_freq);
675 cpufreq_freq_attr_rw(scaling_max_freq);
676 cpufreq_freq_attr_rw(scaling_governor);
677 cpufreq_freq_attr_rw(scaling_setspeed);
678
679 static struct attribute *default_attrs[] = {
680 &cpuinfo_min_freq.attr,
681 &cpuinfo_max_freq.attr,
682 &cpuinfo_transition_latency.attr,
683 &scaling_min_freq.attr,
684 &scaling_max_freq.attr,
685 &affected_cpus.attr,
686 &related_cpus.attr,
687 &scaling_governor.attr,
688 &scaling_driver.attr,
689 &scaling_available_governors.attr,
690 &scaling_setspeed.attr,
691 NULL
692 };
693
694 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
695 #define to_attr(a) container_of(a, struct freq_attr, attr)
696
697 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
698 {
699 struct cpufreq_policy *policy = to_policy(kobj);
700 struct freq_attr *fattr = to_attr(attr);
701 ssize_t ret;
702
703 if (!down_read_trylock(&cpufreq_rwsem))
704 return -EINVAL;
705
706 down_read(&policy->rwsem);
707
708 if (fattr->show)
709 ret = fattr->show(policy, buf);
710 else
711 ret = -EIO;
712
713 up_read(&policy->rwsem);
714 up_read(&cpufreq_rwsem);
715
716 return ret;
717 }
718
719 static ssize_t store(struct kobject *kobj, struct attribute *attr,
720 const char *buf, size_t count)
721 {
722 struct cpufreq_policy *policy = to_policy(kobj);
723 struct freq_attr *fattr = to_attr(attr);
724 ssize_t ret = -EINVAL;
725
726 get_online_cpus();
727
728 if (!cpu_online(policy->cpu))
729 goto unlock;
730
731 if (!down_read_trylock(&cpufreq_rwsem))
732 goto unlock;
733
734 down_write(&policy->rwsem);
735
736 if (fattr->store)
737 ret = fattr->store(policy, buf, count);
738 else
739 ret = -EIO;
740
741 up_write(&policy->rwsem);
742
743 up_read(&cpufreq_rwsem);
744 unlock:
745 put_online_cpus();
746
747 return ret;
748 }
749
750 static void cpufreq_sysfs_release(struct kobject *kobj)
751 {
752 struct cpufreq_policy *policy = to_policy(kobj);
753 pr_debug("last reference is dropped\n");
754 complete(&policy->kobj_unregister);
755 }
756
757 static const struct sysfs_ops sysfs_ops = {
758 .show = show,
759 .store = store,
760 };
761
762 static struct kobj_type ktype_cpufreq = {
763 .sysfs_ops = &sysfs_ops,
764 .default_attrs = default_attrs,
765 .release = cpufreq_sysfs_release,
766 };
767
768 struct kobject *cpufreq_global_kobject;
769 EXPORT_SYMBOL(cpufreq_global_kobject);
770
771 static int cpufreq_global_kobject_usage;
772
773 int cpufreq_get_global_kobject(void)
774 {
775 if (!cpufreq_global_kobject_usage++)
776 return kobject_add(cpufreq_global_kobject,
777 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
778
779 return 0;
780 }
781 EXPORT_SYMBOL(cpufreq_get_global_kobject);
782
783 void cpufreq_put_global_kobject(void)
784 {
785 if (!--cpufreq_global_kobject_usage)
786 kobject_del(cpufreq_global_kobject);
787 }
788 EXPORT_SYMBOL(cpufreq_put_global_kobject);
789
790 int cpufreq_sysfs_create_file(const struct attribute *attr)
791 {
792 int ret = cpufreq_get_global_kobject();
793
794 if (!ret) {
795 ret = sysfs_create_file(cpufreq_global_kobject, attr);
796 if (ret)
797 cpufreq_put_global_kobject();
798 }
799
800 return ret;
801 }
802 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
803
804 void cpufreq_sysfs_remove_file(const struct attribute *attr)
805 {
806 sysfs_remove_file(cpufreq_global_kobject, attr);
807 cpufreq_put_global_kobject();
808 }
809 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
810
811 /* symlink affected CPUs */
812 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
813 {
814 unsigned int j;
815 int ret = 0;
816
817 for_each_cpu(j, policy->cpus) {
818 struct device *cpu_dev;
819
820 if (j == policy->cpu)
821 continue;
822
823 pr_debug("Adding link for CPU: %u\n", j);
824 cpu_dev = get_cpu_device(j);
825 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
826 "cpufreq");
827 if (ret)
828 break;
829 }
830 return ret;
831 }
832
833 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
834 struct device *dev)
835 {
836 struct freq_attr **drv_attr;
837 int ret = 0;
838
839 /* prepare interface data */
840 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
841 &dev->kobj, "cpufreq");
842 if (ret)
843 return ret;
844
845 /* set up files for this cpu device */
846 drv_attr = cpufreq_driver->attr;
847 while ((drv_attr) && (*drv_attr)) {
848 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
849 if (ret)
850 goto err_out_kobj_put;
851 drv_attr++;
852 }
853 if (cpufreq_driver->get) {
854 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
855 if (ret)
856 goto err_out_kobj_put;
857 }
858 if (has_target()) {
859 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
860 if (ret)
861 goto err_out_kobj_put;
862 }
863 if (cpufreq_driver->bios_limit) {
864 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
865 if (ret)
866 goto err_out_kobj_put;
867 }
868
869 ret = cpufreq_add_dev_symlink(policy);
870 if (ret)
871 goto err_out_kobj_put;
872
873 return ret;
874
875 err_out_kobj_put:
876 kobject_put(&policy->kobj);
877 wait_for_completion(&policy->kobj_unregister);
878 return ret;
879 }
880
881 static void cpufreq_init_policy(struct cpufreq_policy *policy)
882 {
883 struct cpufreq_governor *gov = NULL;
884 struct cpufreq_policy new_policy;
885 int ret = 0;
886
887 memcpy(&new_policy, policy, sizeof(*policy));
888
889 /* Update governor of new_policy to the governor used before hotplug */
890 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
891 if (gov)
892 pr_debug("Restoring governor %s for cpu %d\n",
893 policy->governor->name, policy->cpu);
894 else
895 gov = CPUFREQ_DEFAULT_GOVERNOR;
896
897 new_policy.governor = gov;
898
899 /* Use the default policy if its valid. */
900 if (cpufreq_driver->setpolicy)
901 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
902
903 /* set default policy */
904 ret = cpufreq_set_policy(policy, &new_policy);
905 if (ret) {
906 pr_debug("setting policy failed\n");
907 if (cpufreq_driver->exit)
908 cpufreq_driver->exit(policy);
909 }
910 }
911
912 #ifdef CONFIG_HOTPLUG_CPU
913 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
914 unsigned int cpu, struct device *dev)
915 {
916 int ret = 0;
917 unsigned long flags;
918
919 if (has_target()) {
920 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
921 if (ret) {
922 pr_err("%s: Failed to stop governor\n", __func__);
923 return ret;
924 }
925 }
926
927 down_write(&policy->rwsem);
928
929 write_lock_irqsave(&cpufreq_driver_lock, flags);
930
931 cpumask_set_cpu(cpu, policy->cpus);
932 per_cpu(cpufreq_cpu_data, cpu) = policy;
933 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
934
935 up_write(&policy->rwsem);
936
937 if (has_target()) {
938 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
939 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
940 pr_err("%s: Failed to start governor\n", __func__);
941 return ret;
942 }
943 }
944
945 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
946 }
947 #endif
948
949 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
950 {
951 struct cpufreq_policy *policy;
952 unsigned long flags;
953
954 read_lock_irqsave(&cpufreq_driver_lock, flags);
955
956 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
957
958 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
959
960 policy->governor = NULL;
961
962 return policy;
963 }
964
965 static struct cpufreq_policy *cpufreq_policy_alloc(void)
966 {
967 struct cpufreq_policy *policy;
968
969 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
970 if (!policy)
971 return NULL;
972
973 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
974 goto err_free_policy;
975
976 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
977 goto err_free_cpumask;
978
979 INIT_LIST_HEAD(&policy->policy_list);
980 init_rwsem(&policy->rwsem);
981
982 return policy;
983
984 err_free_cpumask:
985 free_cpumask_var(policy->cpus);
986 err_free_policy:
987 kfree(policy);
988
989 return NULL;
990 }
991
992 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
993 {
994 struct kobject *kobj;
995 struct completion *cmp;
996
997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
998 CPUFREQ_REMOVE_POLICY, policy);
999
1000 down_read(&policy->rwsem);
1001 kobj = &policy->kobj;
1002 cmp = &policy->kobj_unregister;
1003 up_read(&policy->rwsem);
1004 kobject_put(kobj);
1005
1006 /*
1007 * We need to make sure that the underlying kobj is
1008 * actually not referenced anymore by anybody before we
1009 * proceed with unloading.
1010 */
1011 pr_debug("waiting for dropping of refcount\n");
1012 wait_for_completion(cmp);
1013 pr_debug("wait complete\n");
1014 }
1015
1016 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1017 {
1018 free_cpumask_var(policy->related_cpus);
1019 free_cpumask_var(policy->cpus);
1020 kfree(policy);
1021 }
1022
1023 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1024 {
1025 if (WARN_ON(cpu == policy->cpu))
1026 return;
1027
1028 down_write(&policy->rwsem);
1029
1030 policy->last_cpu = policy->cpu;
1031 policy->cpu = cpu;
1032
1033 up_write(&policy->rwsem);
1034
1035 cpufreq_frequency_table_update_policy_cpu(policy);
1036 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1037 CPUFREQ_UPDATE_POLICY_CPU, policy);
1038 }
1039
1040 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1041 bool frozen)
1042 {
1043 unsigned int j, cpu = dev->id;
1044 int ret = -ENOMEM;
1045 struct cpufreq_policy *policy;
1046 unsigned long flags;
1047 #ifdef CONFIG_HOTPLUG_CPU
1048 struct cpufreq_policy *tpolicy;
1049 #endif
1050
1051 if (cpu_is_offline(cpu))
1052 return 0;
1053
1054 pr_debug("adding CPU %u\n", cpu);
1055
1056 #ifdef CONFIG_SMP
1057 /* check whether a different CPU already registered this
1058 * CPU because it is in the same boat. */
1059 policy = cpufreq_cpu_get(cpu);
1060 if (unlikely(policy)) {
1061 cpufreq_cpu_put(policy);
1062 return 0;
1063 }
1064 #endif
1065
1066 if (!down_read_trylock(&cpufreq_rwsem))
1067 return 0;
1068
1069 #ifdef CONFIG_HOTPLUG_CPU
1070 /* Check if this cpu was hot-unplugged earlier and has siblings */
1071 read_lock_irqsave(&cpufreq_driver_lock, flags);
1072 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1073 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1074 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1076 up_read(&cpufreq_rwsem);
1077 return ret;
1078 }
1079 }
1080 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1081 #endif
1082
1083 /*
1084 * Restore the saved policy when doing light-weight init and fall back
1085 * to the full init if that fails.
1086 */
1087 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1088 if (!policy) {
1089 frozen = false;
1090 policy = cpufreq_policy_alloc();
1091 if (!policy)
1092 goto nomem_out;
1093 }
1094
1095 /*
1096 * In the resume path, since we restore a saved policy, the assignment
1097 * to policy->cpu is like an update of the existing policy, rather than
1098 * the creation of a brand new one. So we need to perform this update
1099 * by invoking update_policy_cpu().
1100 */
1101 if (frozen && cpu != policy->cpu)
1102 update_policy_cpu(policy, cpu);
1103 else
1104 policy->cpu = cpu;
1105
1106 cpumask_copy(policy->cpus, cpumask_of(cpu));
1107
1108 init_completion(&policy->kobj_unregister);
1109 INIT_WORK(&policy->update, handle_update);
1110
1111 /* call driver. From then on the cpufreq must be able
1112 * to accept all calls to ->verify and ->setpolicy for this CPU
1113 */
1114 ret = cpufreq_driver->init(policy);
1115 if (ret) {
1116 pr_debug("initialization failed\n");
1117 goto err_set_policy_cpu;
1118 }
1119
1120 /* related cpus should atleast have policy->cpus */
1121 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1122
1123 /*
1124 * affected cpus must always be the one, which are online. We aren't
1125 * managing offline cpus here.
1126 */
1127 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1128
1129 if (!frozen) {
1130 policy->user_policy.min = policy->min;
1131 policy->user_policy.max = policy->max;
1132 }
1133
1134 down_write(&policy->rwsem);
1135 write_lock_irqsave(&cpufreq_driver_lock, flags);
1136 for_each_cpu(j, policy->cpus)
1137 per_cpu(cpufreq_cpu_data, j) = policy;
1138 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1139
1140 if (cpufreq_driver->get) {
1141 policy->cur = cpufreq_driver->get(policy->cpu);
1142 if (!policy->cur) {
1143 pr_err("%s: ->get() failed\n", __func__);
1144 goto err_get_freq;
1145 }
1146 }
1147
1148 /*
1149 * Sometimes boot loaders set CPU frequency to a value outside of
1150 * frequency table present with cpufreq core. In such cases CPU might be
1151 * unstable if it has to run on that frequency for long duration of time
1152 * and so its better to set it to a frequency which is specified in
1153 * freq-table. This also makes cpufreq stats inconsistent as
1154 * cpufreq-stats would fail to register because current frequency of CPU
1155 * isn't found in freq-table.
1156 *
1157 * Because we don't want this change to effect boot process badly, we go
1158 * for the next freq which is >= policy->cur ('cur' must be set by now,
1159 * otherwise we will end up setting freq to lowest of the table as 'cur'
1160 * is initialized to zero).
1161 *
1162 * We are passing target-freq as "policy->cur - 1" otherwise
1163 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1164 * equal to target-freq.
1165 */
1166 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1167 && has_target()) {
1168 /* Are we running at unknown frequency ? */
1169 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1170 if (ret == -EINVAL) {
1171 /* Warn user and fix it */
1172 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1173 __func__, policy->cpu, policy->cur);
1174 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1175 CPUFREQ_RELATION_L);
1176
1177 /*
1178 * Reaching here after boot in a few seconds may not
1179 * mean that system will remain stable at "unknown"
1180 * frequency for longer duration. Hence, a BUG_ON().
1181 */
1182 BUG_ON(ret);
1183 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1184 __func__, policy->cpu, policy->cur);
1185 }
1186 }
1187
1188 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1189 CPUFREQ_START, policy);
1190
1191 if (!frozen) {
1192 ret = cpufreq_add_dev_interface(policy, dev);
1193 if (ret)
1194 goto err_out_unregister;
1195 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1196 CPUFREQ_CREATE_POLICY, policy);
1197 }
1198
1199 write_lock_irqsave(&cpufreq_driver_lock, flags);
1200 list_add(&policy->policy_list, &cpufreq_policy_list);
1201 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1202
1203 cpufreq_init_policy(policy);
1204
1205 if (!frozen) {
1206 policy->user_policy.policy = policy->policy;
1207 policy->user_policy.governor = policy->governor;
1208 }
1209 up_write(&policy->rwsem);
1210
1211 kobject_uevent(&policy->kobj, KOBJ_ADD);
1212 up_read(&cpufreq_rwsem);
1213
1214 pr_debug("initialization complete\n");
1215
1216 return 0;
1217
1218 err_out_unregister:
1219 err_get_freq:
1220 write_lock_irqsave(&cpufreq_driver_lock, flags);
1221 for_each_cpu(j, policy->cpus)
1222 per_cpu(cpufreq_cpu_data, j) = NULL;
1223 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1224
1225 if (cpufreq_driver->exit)
1226 cpufreq_driver->exit(policy);
1227 err_set_policy_cpu:
1228 if (frozen) {
1229 /* Do not leave stale fallback data behind. */
1230 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1231 cpufreq_policy_put_kobj(policy);
1232 }
1233 cpufreq_policy_free(policy);
1234
1235 nomem_out:
1236 up_read(&cpufreq_rwsem);
1237
1238 return ret;
1239 }
1240
1241 /**
1242 * cpufreq_add_dev - add a CPU device
1243 *
1244 * Adds the cpufreq interface for a CPU device.
1245 *
1246 * The Oracle says: try running cpufreq registration/unregistration concurrently
1247 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1248 * mess up, but more thorough testing is needed. - Mathieu
1249 */
1250 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1251 {
1252 return __cpufreq_add_dev(dev, sif, false);
1253 }
1254
1255 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1256 unsigned int old_cpu)
1257 {
1258 struct device *cpu_dev;
1259 int ret;
1260
1261 /* first sibling now owns the new sysfs dir */
1262 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1263
1264 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1265 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1266 if (ret) {
1267 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1268
1269 down_write(&policy->rwsem);
1270 cpumask_set_cpu(old_cpu, policy->cpus);
1271 up_write(&policy->rwsem);
1272
1273 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1274 "cpufreq");
1275
1276 return -EINVAL;
1277 }
1278
1279 return cpu_dev->id;
1280 }
1281
1282 static int __cpufreq_remove_dev_prepare(struct device *dev,
1283 struct subsys_interface *sif,
1284 bool frozen)
1285 {
1286 unsigned int cpu = dev->id, cpus;
1287 int new_cpu, ret;
1288 unsigned long flags;
1289 struct cpufreq_policy *policy;
1290
1291 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1292
1293 write_lock_irqsave(&cpufreq_driver_lock, flags);
1294
1295 policy = per_cpu(cpufreq_cpu_data, cpu);
1296
1297 /* Save the policy somewhere when doing a light-weight tear-down */
1298 if (frozen)
1299 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1300
1301 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1302
1303 if (!policy) {
1304 pr_debug("%s: No cpu_data found\n", __func__);
1305 return -EINVAL;
1306 }
1307
1308 if (has_target()) {
1309 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1310 if (ret) {
1311 pr_err("%s: Failed to stop governor\n", __func__);
1312 return ret;
1313 }
1314 }
1315
1316 if (!cpufreq_driver->setpolicy)
1317 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1318 policy->governor->name, CPUFREQ_NAME_LEN);
1319
1320 down_read(&policy->rwsem);
1321 cpus = cpumask_weight(policy->cpus);
1322 up_read(&policy->rwsem);
1323
1324 if (cpu != policy->cpu) {
1325 sysfs_remove_link(&dev->kobj, "cpufreq");
1326 } else if (cpus > 1) {
1327 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1328 if (new_cpu >= 0) {
1329 update_policy_cpu(policy, new_cpu);
1330
1331 if (!frozen) {
1332 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1333 __func__, new_cpu, cpu);
1334 }
1335 }
1336 }
1337
1338 return 0;
1339 }
1340
1341 static int __cpufreq_remove_dev_finish(struct device *dev,
1342 struct subsys_interface *sif,
1343 bool frozen)
1344 {
1345 unsigned int cpu = dev->id, cpus;
1346 int ret;
1347 unsigned long flags;
1348 struct cpufreq_policy *policy;
1349
1350 read_lock_irqsave(&cpufreq_driver_lock, flags);
1351 policy = per_cpu(cpufreq_cpu_data, cpu);
1352 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1353
1354 if (!policy) {
1355 pr_debug("%s: No cpu_data found\n", __func__);
1356 return -EINVAL;
1357 }
1358
1359 down_write(&policy->rwsem);
1360 cpus = cpumask_weight(policy->cpus);
1361
1362 if (cpus > 1)
1363 cpumask_clear_cpu(cpu, policy->cpus);
1364 up_write(&policy->rwsem);
1365
1366 /* If cpu is last user of policy, free policy */
1367 if (cpus == 1) {
1368 if (has_target()) {
1369 ret = __cpufreq_governor(policy,
1370 CPUFREQ_GOV_POLICY_EXIT);
1371 if (ret) {
1372 pr_err("%s: Failed to exit governor\n",
1373 __func__);
1374 return ret;
1375 }
1376 }
1377
1378 if (!frozen)
1379 cpufreq_policy_put_kobj(policy);
1380
1381 /*
1382 * Perform the ->exit() even during light-weight tear-down,
1383 * since this is a core component, and is essential for the
1384 * subsequent light-weight ->init() to succeed.
1385 */
1386 if (cpufreq_driver->exit)
1387 cpufreq_driver->exit(policy);
1388
1389 /* Remove policy from list of active policies */
1390 write_lock_irqsave(&cpufreq_driver_lock, flags);
1391 list_del(&policy->policy_list);
1392 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1393
1394 if (!frozen)
1395 cpufreq_policy_free(policy);
1396 } else {
1397 if (has_target()) {
1398 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1399 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1400 pr_err("%s: Failed to start governor\n",
1401 __func__);
1402 return ret;
1403 }
1404 }
1405 }
1406
1407 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1408 return 0;
1409 }
1410
1411 /**
1412 * cpufreq_remove_dev - remove a CPU device
1413 *
1414 * Removes the cpufreq interface for a CPU device.
1415 */
1416 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1417 {
1418 unsigned int cpu = dev->id;
1419 int ret;
1420
1421 if (cpu_is_offline(cpu))
1422 return 0;
1423
1424 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1425
1426 if (!ret)
1427 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1428
1429 return ret;
1430 }
1431
1432 static void handle_update(struct work_struct *work)
1433 {
1434 struct cpufreq_policy *policy =
1435 container_of(work, struct cpufreq_policy, update);
1436 unsigned int cpu = policy->cpu;
1437 pr_debug("handle_update for cpu %u called\n", cpu);
1438 cpufreq_update_policy(cpu);
1439 }
1440
1441 /**
1442 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1443 * in deep trouble.
1444 * @cpu: cpu number
1445 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1446 * @new_freq: CPU frequency the CPU actually runs at
1447 *
1448 * We adjust to current frequency first, and need to clean up later.
1449 * So either call to cpufreq_update_policy() or schedule handle_update()).
1450 */
1451 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1452 unsigned int new_freq)
1453 {
1454 struct cpufreq_policy *policy;
1455 struct cpufreq_freqs freqs;
1456 unsigned long flags;
1457
1458 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1459 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1460
1461 freqs.old = old_freq;
1462 freqs.new = new_freq;
1463
1464 read_lock_irqsave(&cpufreq_driver_lock, flags);
1465 policy = per_cpu(cpufreq_cpu_data, cpu);
1466 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1467
1468 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1469 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1470 }
1471
1472 /**
1473 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1474 * @cpu: CPU number
1475 *
1476 * This is the last known freq, without actually getting it from the driver.
1477 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1478 */
1479 unsigned int cpufreq_quick_get(unsigned int cpu)
1480 {
1481 struct cpufreq_policy *policy;
1482 unsigned int ret_freq = 0;
1483
1484 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1485 return cpufreq_driver->get(cpu);
1486
1487 policy = cpufreq_cpu_get(cpu);
1488 if (policy) {
1489 ret_freq = policy->cur;
1490 cpufreq_cpu_put(policy);
1491 }
1492
1493 return ret_freq;
1494 }
1495 EXPORT_SYMBOL(cpufreq_quick_get);
1496
1497 /**
1498 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1499 * @cpu: CPU number
1500 *
1501 * Just return the max possible frequency for a given CPU.
1502 */
1503 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1504 {
1505 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1506 unsigned int ret_freq = 0;
1507
1508 if (policy) {
1509 ret_freq = policy->max;
1510 cpufreq_cpu_put(policy);
1511 }
1512
1513 return ret_freq;
1514 }
1515 EXPORT_SYMBOL(cpufreq_quick_get_max);
1516
1517 static unsigned int __cpufreq_get(unsigned int cpu)
1518 {
1519 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1520 unsigned int ret_freq = 0;
1521
1522 if (!cpufreq_driver->get)
1523 return ret_freq;
1524
1525 ret_freq = cpufreq_driver->get(cpu);
1526
1527 if (ret_freq && policy->cur &&
1528 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1529 /* verify no discrepancy between actual and
1530 saved value exists */
1531 if (unlikely(ret_freq != policy->cur)) {
1532 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1533 schedule_work(&policy->update);
1534 }
1535 }
1536
1537 return ret_freq;
1538 }
1539
1540 /**
1541 * cpufreq_get - get the current CPU frequency (in kHz)
1542 * @cpu: CPU number
1543 *
1544 * Get the CPU current (static) CPU frequency
1545 */
1546 unsigned int cpufreq_get(unsigned int cpu)
1547 {
1548 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1549 unsigned int ret_freq = 0;
1550
1551 if (policy) {
1552 down_read(&policy->rwsem);
1553 ret_freq = __cpufreq_get(cpu);
1554 up_read(&policy->rwsem);
1555
1556 cpufreq_cpu_put(policy);
1557 }
1558
1559 return ret_freq;
1560 }
1561 EXPORT_SYMBOL(cpufreq_get);
1562
1563 static struct subsys_interface cpufreq_interface = {
1564 .name = "cpufreq",
1565 .subsys = &cpu_subsys,
1566 .add_dev = cpufreq_add_dev,
1567 .remove_dev = cpufreq_remove_dev,
1568 };
1569
1570 /**
1571 * cpufreq_suspend() - Suspend CPUFreq governors
1572 *
1573 * Called during system wide Suspend/Hibernate cycles for suspending governors
1574 * as some platforms can't change frequency after this point in suspend cycle.
1575 * Because some of the devices (like: i2c, regulators, etc) they use for
1576 * changing frequency are suspended quickly after this point.
1577 */
1578 void cpufreq_suspend(void)
1579 {
1580 struct cpufreq_policy *policy;
1581
1582 if (!cpufreq_driver)
1583 return;
1584
1585 if (!has_target())
1586 return;
1587
1588 pr_debug("%s: Suspending Governors\n", __func__);
1589
1590 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1591 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1592 pr_err("%s: Failed to stop governor for policy: %p\n",
1593 __func__, policy);
1594 else if (cpufreq_driver->suspend
1595 && cpufreq_driver->suspend(policy))
1596 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1597 policy);
1598 }
1599
1600 cpufreq_suspended = true;
1601 }
1602
1603 /**
1604 * cpufreq_resume() - Resume CPUFreq governors
1605 *
1606 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1607 * are suspended with cpufreq_suspend().
1608 */
1609 void cpufreq_resume(void)
1610 {
1611 struct cpufreq_policy *policy;
1612
1613 if (!cpufreq_driver)
1614 return;
1615
1616 if (!has_target())
1617 return;
1618
1619 pr_debug("%s: Resuming Governors\n", __func__);
1620
1621 cpufreq_suspended = false;
1622
1623 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1624 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1625 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1626 pr_err("%s: Failed to start governor for policy: %p\n",
1627 __func__, policy);
1628 else if (cpufreq_driver->resume
1629 && cpufreq_driver->resume(policy))
1630 pr_err("%s: Failed to resume driver: %p\n", __func__,
1631 policy);
1632
1633 /*
1634 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1635 * policy in list. It will verify that the current freq is in
1636 * sync with what we believe it to be.
1637 */
1638 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1639 schedule_work(&policy->update);
1640 }
1641 }
1642
1643 /**
1644 * cpufreq_get_current_driver - return current driver's name
1645 *
1646 * Return the name string of the currently loaded cpufreq driver
1647 * or NULL, if none.
1648 */
1649 const char *cpufreq_get_current_driver(void)
1650 {
1651 if (cpufreq_driver)
1652 return cpufreq_driver->name;
1653
1654 return NULL;
1655 }
1656 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1657
1658 /*********************************************************************
1659 * NOTIFIER LISTS INTERFACE *
1660 *********************************************************************/
1661
1662 /**
1663 * cpufreq_register_notifier - register a driver with cpufreq
1664 * @nb: notifier function to register
1665 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1666 *
1667 * Add a driver to one of two lists: either a list of drivers that
1668 * are notified about clock rate changes (once before and once after
1669 * the transition), or a list of drivers that are notified about
1670 * changes in cpufreq policy.
1671 *
1672 * This function may sleep, and has the same return conditions as
1673 * blocking_notifier_chain_register.
1674 */
1675 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1676 {
1677 int ret;
1678
1679 if (cpufreq_disabled())
1680 return -EINVAL;
1681
1682 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1683
1684 switch (list) {
1685 case CPUFREQ_TRANSITION_NOTIFIER:
1686 ret = srcu_notifier_chain_register(
1687 &cpufreq_transition_notifier_list, nb);
1688 break;
1689 case CPUFREQ_POLICY_NOTIFIER:
1690 ret = blocking_notifier_chain_register(
1691 &cpufreq_policy_notifier_list, nb);
1692 break;
1693 default:
1694 ret = -EINVAL;
1695 }
1696
1697 return ret;
1698 }
1699 EXPORT_SYMBOL(cpufreq_register_notifier);
1700
1701 /**
1702 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1703 * @nb: notifier block to be unregistered
1704 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1705 *
1706 * Remove a driver from the CPU frequency notifier list.
1707 *
1708 * This function may sleep, and has the same return conditions as
1709 * blocking_notifier_chain_unregister.
1710 */
1711 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1712 {
1713 int ret;
1714
1715 if (cpufreq_disabled())
1716 return -EINVAL;
1717
1718 switch (list) {
1719 case CPUFREQ_TRANSITION_NOTIFIER:
1720 ret = srcu_notifier_chain_unregister(
1721 &cpufreq_transition_notifier_list, nb);
1722 break;
1723 case CPUFREQ_POLICY_NOTIFIER:
1724 ret = blocking_notifier_chain_unregister(
1725 &cpufreq_policy_notifier_list, nb);
1726 break;
1727 default:
1728 ret = -EINVAL;
1729 }
1730
1731 return ret;
1732 }
1733 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1734
1735
1736 /*********************************************************************
1737 * GOVERNORS *
1738 *********************************************************************/
1739
1740 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1741 unsigned int target_freq,
1742 unsigned int relation)
1743 {
1744 int retval = -EINVAL;
1745 unsigned int old_target_freq = target_freq;
1746
1747 if (cpufreq_disabled())
1748 return -ENODEV;
1749
1750 /* Make sure that target_freq is within supported range */
1751 if (target_freq > policy->max)
1752 target_freq = policy->max;
1753 if (target_freq < policy->min)
1754 target_freq = policy->min;
1755
1756 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1757 policy->cpu, target_freq, relation, old_target_freq);
1758
1759 /*
1760 * This might look like a redundant call as we are checking it again
1761 * after finding index. But it is left intentionally for cases where
1762 * exactly same freq is called again and so we can save on few function
1763 * calls.
1764 */
1765 if (target_freq == policy->cur)
1766 return 0;
1767
1768 if (cpufreq_driver->target)
1769 retval = cpufreq_driver->target(policy, target_freq, relation);
1770 else if (cpufreq_driver->target_index) {
1771 struct cpufreq_frequency_table *freq_table;
1772 struct cpufreq_freqs freqs;
1773 bool notify;
1774 int index;
1775
1776 freq_table = cpufreq_frequency_get_table(policy->cpu);
1777 if (unlikely(!freq_table)) {
1778 pr_err("%s: Unable to find freq_table\n", __func__);
1779 goto out;
1780 }
1781
1782 retval = cpufreq_frequency_table_target(policy, freq_table,
1783 target_freq, relation, &index);
1784 if (unlikely(retval)) {
1785 pr_err("%s: Unable to find matching freq\n", __func__);
1786 goto out;
1787 }
1788
1789 if (freq_table[index].frequency == policy->cur) {
1790 retval = 0;
1791 goto out;
1792 }
1793
1794 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1795
1796 if (notify) {
1797 freqs.old = policy->cur;
1798 freqs.new = freq_table[index].frequency;
1799 freqs.flags = 0;
1800
1801 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1802 __func__, policy->cpu, freqs.old,
1803 freqs.new);
1804
1805 cpufreq_notify_transition(policy, &freqs,
1806 CPUFREQ_PRECHANGE);
1807 }
1808
1809 retval = cpufreq_driver->target_index(policy, index);
1810 if (retval)
1811 pr_err("%s: Failed to change cpu frequency: %d\n",
1812 __func__, retval);
1813
1814 if (notify)
1815 cpufreq_notify_post_transition(policy, &freqs, retval);
1816 }
1817
1818 out:
1819 return retval;
1820 }
1821 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1822
1823 int cpufreq_driver_target(struct cpufreq_policy *policy,
1824 unsigned int target_freq,
1825 unsigned int relation)
1826 {
1827 int ret = -EINVAL;
1828
1829 down_write(&policy->rwsem);
1830
1831 ret = __cpufreq_driver_target(policy, target_freq, relation);
1832
1833 up_write(&policy->rwsem);
1834
1835 return ret;
1836 }
1837 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1838
1839 /*
1840 * when "event" is CPUFREQ_GOV_LIMITS
1841 */
1842
1843 static int __cpufreq_governor(struct cpufreq_policy *policy,
1844 unsigned int event)
1845 {
1846 int ret;
1847
1848 /* Only must be defined when default governor is known to have latency
1849 restrictions, like e.g. conservative or ondemand.
1850 That this is the case is already ensured in Kconfig
1851 */
1852 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1853 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1854 #else
1855 struct cpufreq_governor *gov = NULL;
1856 #endif
1857
1858 /* Don't start any governor operations if we are entering suspend */
1859 if (cpufreq_suspended)
1860 return 0;
1861
1862 if (policy->governor->max_transition_latency &&
1863 policy->cpuinfo.transition_latency >
1864 policy->governor->max_transition_latency) {
1865 if (!gov)
1866 return -EINVAL;
1867 else {
1868 printk(KERN_WARNING "%s governor failed, too long"
1869 " transition latency of HW, fallback"
1870 " to %s governor\n",
1871 policy->governor->name,
1872 gov->name);
1873 policy->governor = gov;
1874 }
1875 }
1876
1877 if (event == CPUFREQ_GOV_POLICY_INIT)
1878 if (!try_module_get(policy->governor->owner))
1879 return -EINVAL;
1880
1881 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1882 policy->cpu, event);
1883
1884 mutex_lock(&cpufreq_governor_lock);
1885 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1886 || (!policy->governor_enabled
1887 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1888 mutex_unlock(&cpufreq_governor_lock);
1889 return -EBUSY;
1890 }
1891
1892 if (event == CPUFREQ_GOV_STOP)
1893 policy->governor_enabled = false;
1894 else if (event == CPUFREQ_GOV_START)
1895 policy->governor_enabled = true;
1896
1897 mutex_unlock(&cpufreq_governor_lock);
1898
1899 ret = policy->governor->governor(policy, event);
1900
1901 if (!ret) {
1902 if (event == CPUFREQ_GOV_POLICY_INIT)
1903 policy->governor->initialized++;
1904 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1905 policy->governor->initialized--;
1906 } else {
1907 /* Restore original values */
1908 mutex_lock(&cpufreq_governor_lock);
1909 if (event == CPUFREQ_GOV_STOP)
1910 policy->governor_enabled = true;
1911 else if (event == CPUFREQ_GOV_START)
1912 policy->governor_enabled = false;
1913 mutex_unlock(&cpufreq_governor_lock);
1914 }
1915
1916 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1917 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1918 module_put(policy->governor->owner);
1919
1920 return ret;
1921 }
1922
1923 int cpufreq_register_governor(struct cpufreq_governor *governor)
1924 {
1925 int err;
1926
1927 if (!governor)
1928 return -EINVAL;
1929
1930 if (cpufreq_disabled())
1931 return -ENODEV;
1932
1933 mutex_lock(&cpufreq_governor_mutex);
1934
1935 governor->initialized = 0;
1936 err = -EBUSY;
1937 if (__find_governor(governor->name) == NULL) {
1938 err = 0;
1939 list_add(&governor->governor_list, &cpufreq_governor_list);
1940 }
1941
1942 mutex_unlock(&cpufreq_governor_mutex);
1943 return err;
1944 }
1945 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1946
1947 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1948 {
1949 int cpu;
1950
1951 if (!governor)
1952 return;
1953
1954 if (cpufreq_disabled())
1955 return;
1956
1957 for_each_present_cpu(cpu) {
1958 if (cpu_online(cpu))
1959 continue;
1960 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1961 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1962 }
1963
1964 mutex_lock(&cpufreq_governor_mutex);
1965 list_del(&governor->governor_list);
1966 mutex_unlock(&cpufreq_governor_mutex);
1967 return;
1968 }
1969 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1970
1971
1972 /*********************************************************************
1973 * POLICY INTERFACE *
1974 *********************************************************************/
1975
1976 /**
1977 * cpufreq_get_policy - get the current cpufreq_policy
1978 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1979 * is written
1980 *
1981 * Reads the current cpufreq policy.
1982 */
1983 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1984 {
1985 struct cpufreq_policy *cpu_policy;
1986 if (!policy)
1987 return -EINVAL;
1988
1989 cpu_policy = cpufreq_cpu_get(cpu);
1990 if (!cpu_policy)
1991 return -EINVAL;
1992
1993 memcpy(policy, cpu_policy, sizeof(*policy));
1994
1995 cpufreq_cpu_put(cpu_policy);
1996 return 0;
1997 }
1998 EXPORT_SYMBOL(cpufreq_get_policy);
1999
2000 /*
2001 * policy : current policy.
2002 * new_policy: policy to be set.
2003 */
2004 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2005 struct cpufreq_policy *new_policy)
2006 {
2007 struct cpufreq_governor *old_gov;
2008 int ret;
2009
2010 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
2011 new_policy->min, new_policy->max);
2012
2013 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2014
2015 if (new_policy->min > policy->max || new_policy->max < policy->min)
2016 return -EINVAL;
2017
2018 /* verify the cpu speed can be set within this limit */
2019 ret = cpufreq_driver->verify(new_policy);
2020 if (ret)
2021 return ret;
2022
2023 /* adjust if necessary - all reasons */
2024 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2025 CPUFREQ_ADJUST, new_policy);
2026
2027 /* adjust if necessary - hardware incompatibility*/
2028 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2029 CPUFREQ_INCOMPATIBLE, new_policy);
2030
2031 /*
2032 * verify the cpu speed can be set within this limit, which might be
2033 * different to the first one
2034 */
2035 ret = cpufreq_driver->verify(new_policy);
2036 if (ret)
2037 return ret;
2038
2039 /* notification of the new policy */
2040 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2041 CPUFREQ_NOTIFY, new_policy);
2042
2043 policy->min = new_policy->min;
2044 policy->max = new_policy->max;
2045
2046 pr_debug("new min and max freqs are %u - %u kHz\n",
2047 policy->min, policy->max);
2048
2049 if (cpufreq_driver->setpolicy) {
2050 policy->policy = new_policy->policy;
2051 pr_debug("setting range\n");
2052 return cpufreq_driver->setpolicy(new_policy);
2053 }
2054
2055 if (new_policy->governor == policy->governor)
2056 goto out;
2057
2058 pr_debug("governor switch\n");
2059
2060 /* save old, working values */
2061 old_gov = policy->governor;
2062 /* end old governor */
2063 if (old_gov) {
2064 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2065 up_write(&policy->rwsem);
2066 __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT);
2067 down_write(&policy->rwsem);
2068 }
2069
2070 /* start new governor */
2071 policy->governor = new_policy->governor;
2072 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2073 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2074 goto out;
2075
2076 up_write(&policy->rwsem);
2077 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2078 down_write(&policy->rwsem);
2079 }
2080
2081 /* new governor failed, so re-start old one */
2082 pr_debug("starting governor %s failed\n", policy->governor->name);
2083 if (old_gov) {
2084 policy->governor = old_gov;
2085 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2086 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2087 }
2088
2089 return -EINVAL;
2090
2091 out:
2092 pr_debug("governor: change or update limits\n");
2093 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2094 }
2095
2096 /**
2097 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2098 * @cpu: CPU which shall be re-evaluated
2099 *
2100 * Useful for policy notifiers which have different necessities
2101 * at different times.
2102 */
2103 int cpufreq_update_policy(unsigned int cpu)
2104 {
2105 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2106 struct cpufreq_policy new_policy;
2107 int ret;
2108
2109 if (!policy) {
2110 ret = -ENODEV;
2111 goto no_policy;
2112 }
2113
2114 down_write(&policy->rwsem);
2115
2116 pr_debug("updating policy for CPU %u\n", cpu);
2117 memcpy(&new_policy, policy, sizeof(*policy));
2118 new_policy.min = policy->user_policy.min;
2119 new_policy.max = policy->user_policy.max;
2120 new_policy.policy = policy->user_policy.policy;
2121 new_policy.governor = policy->user_policy.governor;
2122
2123 /*
2124 * BIOS might change freq behind our back
2125 * -> ask driver for current freq and notify governors about a change
2126 */
2127 if (cpufreq_driver->get) {
2128 new_policy.cur = cpufreq_driver->get(cpu);
2129 if (WARN_ON(!new_policy.cur)) {
2130 ret = -EIO;
2131 goto no_policy;
2132 }
2133
2134 if (!policy->cur) {
2135 pr_debug("Driver did not initialize current freq");
2136 policy->cur = new_policy.cur;
2137 } else {
2138 if (policy->cur != new_policy.cur && has_target())
2139 cpufreq_out_of_sync(cpu, policy->cur,
2140 new_policy.cur);
2141 }
2142 }
2143
2144 ret = cpufreq_set_policy(policy, &new_policy);
2145
2146 up_write(&policy->rwsem);
2147
2148 cpufreq_cpu_put(policy);
2149 no_policy:
2150 return ret;
2151 }
2152 EXPORT_SYMBOL(cpufreq_update_policy);
2153
2154 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2155 unsigned long action, void *hcpu)
2156 {
2157 unsigned int cpu = (unsigned long)hcpu;
2158 struct device *dev;
2159 bool frozen = false;
2160
2161 dev = get_cpu_device(cpu);
2162 if (dev) {
2163
2164 if (action & CPU_TASKS_FROZEN)
2165 frozen = true;
2166
2167 switch (action & ~CPU_TASKS_FROZEN) {
2168 case CPU_ONLINE:
2169 __cpufreq_add_dev(dev, NULL, frozen);
2170 break;
2171
2172 case CPU_DOWN_PREPARE:
2173 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2174 break;
2175
2176 case CPU_POST_DEAD:
2177 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2178 break;
2179
2180 case CPU_DOWN_FAILED:
2181 __cpufreq_add_dev(dev, NULL, frozen);
2182 break;
2183 }
2184 }
2185 return NOTIFY_OK;
2186 }
2187
2188 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2189 .notifier_call = cpufreq_cpu_callback,
2190 };
2191
2192 /*********************************************************************
2193 * BOOST *
2194 *********************************************************************/
2195 static int cpufreq_boost_set_sw(int state)
2196 {
2197 struct cpufreq_frequency_table *freq_table;
2198 struct cpufreq_policy *policy;
2199 int ret = -EINVAL;
2200
2201 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2202 freq_table = cpufreq_frequency_get_table(policy->cpu);
2203 if (freq_table) {
2204 ret = cpufreq_frequency_table_cpuinfo(policy,
2205 freq_table);
2206 if (ret) {
2207 pr_err("%s: Policy frequency update failed\n",
2208 __func__);
2209 break;
2210 }
2211 policy->user_policy.max = policy->max;
2212 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2213 }
2214 }
2215
2216 return ret;
2217 }
2218
2219 int cpufreq_boost_trigger_state(int state)
2220 {
2221 unsigned long flags;
2222 int ret = 0;
2223
2224 if (cpufreq_driver->boost_enabled == state)
2225 return 0;
2226
2227 write_lock_irqsave(&cpufreq_driver_lock, flags);
2228 cpufreq_driver->boost_enabled = state;
2229 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2230
2231 ret = cpufreq_driver->set_boost(state);
2232 if (ret) {
2233 write_lock_irqsave(&cpufreq_driver_lock, flags);
2234 cpufreq_driver->boost_enabled = !state;
2235 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2236
2237 pr_err("%s: Cannot %s BOOST\n", __func__,
2238 state ? "enable" : "disable");
2239 }
2240
2241 return ret;
2242 }
2243
2244 int cpufreq_boost_supported(void)
2245 {
2246 if (likely(cpufreq_driver))
2247 return cpufreq_driver->boost_supported;
2248
2249 return 0;
2250 }
2251 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2252
2253 int cpufreq_boost_enabled(void)
2254 {
2255 return cpufreq_driver->boost_enabled;
2256 }
2257 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2258
2259 /*********************************************************************
2260 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2261 *********************************************************************/
2262
2263 /**
2264 * cpufreq_register_driver - register a CPU Frequency driver
2265 * @driver_data: A struct cpufreq_driver containing the values#
2266 * submitted by the CPU Frequency driver.
2267 *
2268 * Registers a CPU Frequency driver to this core code. This code
2269 * returns zero on success, -EBUSY when another driver got here first
2270 * (and isn't unregistered in the meantime).
2271 *
2272 */
2273 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2274 {
2275 unsigned long flags;
2276 int ret;
2277
2278 if (cpufreq_disabled())
2279 return -ENODEV;
2280
2281 if (!driver_data || !driver_data->verify || !driver_data->init ||
2282 !(driver_data->setpolicy || driver_data->target_index ||
2283 driver_data->target))
2284 return -EINVAL;
2285
2286 pr_debug("trying to register driver %s\n", driver_data->name);
2287
2288 if (driver_data->setpolicy)
2289 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2290
2291 write_lock_irqsave(&cpufreq_driver_lock, flags);
2292 if (cpufreq_driver) {
2293 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2294 return -EEXIST;
2295 }
2296 cpufreq_driver = driver_data;
2297 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2298
2299 if (cpufreq_boost_supported()) {
2300 /*
2301 * Check if driver provides function to enable boost -
2302 * if not, use cpufreq_boost_set_sw as default
2303 */
2304 if (!cpufreq_driver->set_boost)
2305 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2306
2307 ret = cpufreq_sysfs_create_file(&boost.attr);
2308 if (ret) {
2309 pr_err("%s: cannot register global BOOST sysfs file\n",
2310 __func__);
2311 goto err_null_driver;
2312 }
2313 }
2314
2315 ret = subsys_interface_register(&cpufreq_interface);
2316 if (ret)
2317 goto err_boost_unreg;
2318
2319 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2320 int i;
2321 ret = -ENODEV;
2322
2323 /* check for at least one working CPU */
2324 for (i = 0; i < nr_cpu_ids; i++)
2325 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2326 ret = 0;
2327 break;
2328 }
2329
2330 /* if all ->init() calls failed, unregister */
2331 if (ret) {
2332 pr_debug("no CPU initialized for driver %s\n",
2333 driver_data->name);
2334 goto err_if_unreg;
2335 }
2336 }
2337
2338 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2339 pr_debug("driver %s up and running\n", driver_data->name);
2340
2341 return 0;
2342 err_if_unreg:
2343 subsys_interface_unregister(&cpufreq_interface);
2344 err_boost_unreg:
2345 if (cpufreq_boost_supported())
2346 cpufreq_sysfs_remove_file(&boost.attr);
2347 err_null_driver:
2348 write_lock_irqsave(&cpufreq_driver_lock, flags);
2349 cpufreq_driver = NULL;
2350 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2351 return ret;
2352 }
2353 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2354
2355 /**
2356 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2357 *
2358 * Unregister the current CPUFreq driver. Only call this if you have
2359 * the right to do so, i.e. if you have succeeded in initialising before!
2360 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2361 * currently not initialised.
2362 */
2363 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2364 {
2365 unsigned long flags;
2366
2367 if (!cpufreq_driver || (driver != cpufreq_driver))
2368 return -EINVAL;
2369
2370 pr_debug("unregistering driver %s\n", driver->name);
2371
2372 subsys_interface_unregister(&cpufreq_interface);
2373 if (cpufreq_boost_supported())
2374 cpufreq_sysfs_remove_file(&boost.attr);
2375
2376 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2377
2378 down_write(&cpufreq_rwsem);
2379 write_lock_irqsave(&cpufreq_driver_lock, flags);
2380
2381 cpufreq_driver = NULL;
2382
2383 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2384 up_write(&cpufreq_rwsem);
2385
2386 return 0;
2387 }
2388 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2389
2390 static int __init cpufreq_core_init(void)
2391 {
2392 if (cpufreq_disabled())
2393 return -ENODEV;
2394
2395 cpufreq_global_kobject = kobject_create();
2396 BUG_ON(!cpufreq_global_kobject);
2397
2398 return 0;
2399 }
2400 core_initcall(cpufreq_core_init);