]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/cpufreq/cpufreq.c
cpufreq: move call to __find_governor() to cpufreq_init_policy()
[mirror_ubuntu-zesty-kernel.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47
48 static inline bool has_target(void)
49 {
50 return cpufreq_driver->target_index || cpufreq_driver->target;
51 }
52
53 /*
54 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
55 * sections
56 */
57 static DECLARE_RWSEM(cpufreq_rwsem);
58
59 /* internal prototypes */
60 static int __cpufreq_governor(struct cpufreq_policy *policy,
61 unsigned int event);
62 static unsigned int __cpufreq_get(unsigned int cpu);
63 static void handle_update(struct work_struct *work);
64
65 /**
66 * Two notifier lists: the "policy" list is involved in the
67 * validation process for a new CPU frequency policy; the
68 * "transition" list for kernel code that needs to handle
69 * changes to devices when the CPU clock speed changes.
70 * The mutex locks both lists.
71 */
72 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
73 static struct srcu_notifier_head cpufreq_transition_notifier_list;
74
75 static bool init_cpufreq_transition_notifier_list_called;
76 static int __init init_cpufreq_transition_notifier_list(void)
77 {
78 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
79 init_cpufreq_transition_notifier_list_called = true;
80 return 0;
81 }
82 pure_initcall(init_cpufreq_transition_notifier_list);
83
84 static int off __read_mostly;
85 static int cpufreq_disabled(void)
86 {
87 return off;
88 }
89 void disable_cpufreq(void)
90 {
91 off = 1;
92 }
93 static LIST_HEAD(cpufreq_governor_list);
94 static DEFINE_MUTEX(cpufreq_governor_mutex);
95
96 bool have_governor_per_policy(void)
97 {
98 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
99 }
100 EXPORT_SYMBOL_GPL(have_governor_per_policy);
101
102 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
103 {
104 if (have_governor_per_policy())
105 return &policy->kobj;
106 else
107 return cpufreq_global_kobject;
108 }
109 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
110
111 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
112 {
113 u64 idle_time;
114 u64 cur_wall_time;
115 u64 busy_time;
116
117 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
118
119 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
120 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
121 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
125
126 idle_time = cur_wall_time - busy_time;
127 if (wall)
128 *wall = cputime_to_usecs(cur_wall_time);
129
130 return cputime_to_usecs(idle_time);
131 }
132
133 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
134 {
135 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
136
137 if (idle_time == -1ULL)
138 return get_cpu_idle_time_jiffy(cpu, wall);
139 else if (!io_busy)
140 idle_time += get_cpu_iowait_time_us(cpu, wall);
141
142 return idle_time;
143 }
144 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
145
146 /*
147 * This is a generic cpufreq init() routine which can be used by cpufreq
148 * drivers of SMP systems. It will do following:
149 * - validate & show freq table passed
150 * - set policies transition latency
151 * - policy->cpus with all possible CPUs
152 */
153 int cpufreq_generic_init(struct cpufreq_policy *policy,
154 struct cpufreq_frequency_table *table,
155 unsigned int transition_latency)
156 {
157 int ret;
158
159 ret = cpufreq_table_validate_and_show(policy, table);
160 if (ret) {
161 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
162 return ret;
163 }
164
165 policy->cpuinfo.transition_latency = transition_latency;
166
167 /*
168 * The driver only supports the SMP configuartion where all processors
169 * share the clock and voltage and clock.
170 */
171 cpumask_setall(policy->cpus);
172
173 return 0;
174 }
175 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
176
177 unsigned int cpufreq_generic_get(unsigned int cpu)
178 {
179 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
180
181 if (!policy || IS_ERR(policy->clk)) {
182 pr_err("%s: No %s associated to cpu: %d\n", __func__,
183 policy ? "clk" : "policy", cpu);
184 return 0;
185 }
186
187 return clk_get_rate(policy->clk) / 1000;
188 }
189 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
190
191 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
192 {
193 struct cpufreq_policy *policy = NULL;
194 unsigned long flags;
195
196 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
197 return NULL;
198
199 if (!down_read_trylock(&cpufreq_rwsem))
200 return NULL;
201
202 /* get the cpufreq driver */
203 read_lock_irqsave(&cpufreq_driver_lock, flags);
204
205 if (cpufreq_driver) {
206 /* get the CPU */
207 policy = per_cpu(cpufreq_cpu_data, cpu);
208 if (policy)
209 kobject_get(&policy->kobj);
210 }
211
212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
213
214 if (!policy)
215 up_read(&cpufreq_rwsem);
216
217 return policy;
218 }
219 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
220
221 void cpufreq_cpu_put(struct cpufreq_policy *policy)
222 {
223 if (cpufreq_disabled())
224 return;
225
226 kobject_put(&policy->kobj);
227 up_read(&cpufreq_rwsem);
228 }
229 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
230
231 /*********************************************************************
232 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
233 *********************************************************************/
234
235 /**
236 * adjust_jiffies - adjust the system "loops_per_jiffy"
237 *
238 * This function alters the system "loops_per_jiffy" for the clock
239 * speed change. Note that loops_per_jiffy cannot be updated on SMP
240 * systems as each CPU might be scaled differently. So, use the arch
241 * per-CPU loops_per_jiffy value wherever possible.
242 */
243 #ifndef CONFIG_SMP
244 static unsigned long l_p_j_ref;
245 static unsigned int l_p_j_ref_freq;
246
247 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
248 {
249 if (ci->flags & CPUFREQ_CONST_LOOPS)
250 return;
251
252 if (!l_p_j_ref_freq) {
253 l_p_j_ref = loops_per_jiffy;
254 l_p_j_ref_freq = ci->old;
255 pr_debug("saving %lu as reference value for loops_per_jiffy; "
256 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
257 }
258 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
259 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
260 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
261 ci->new);
262 pr_debug("scaling loops_per_jiffy to %lu "
263 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
264 }
265 }
266 #else
267 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
268 {
269 return;
270 }
271 #endif
272
273 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
274 struct cpufreq_freqs *freqs, unsigned int state)
275 {
276 BUG_ON(irqs_disabled());
277
278 if (cpufreq_disabled())
279 return;
280
281 freqs->flags = cpufreq_driver->flags;
282 pr_debug("notification %u of frequency transition to %u kHz\n",
283 state, freqs->new);
284
285 switch (state) {
286
287 case CPUFREQ_PRECHANGE:
288 /* detect if the driver reported a value as "old frequency"
289 * which is not equal to what the cpufreq core thinks is
290 * "old frequency".
291 */
292 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
293 if ((policy) && (policy->cpu == freqs->cpu) &&
294 (policy->cur) && (policy->cur != freqs->old)) {
295 pr_debug("Warning: CPU frequency is"
296 " %u, cpufreq assumed %u kHz.\n",
297 freqs->old, policy->cur);
298 freqs->old = policy->cur;
299 }
300 }
301 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
302 CPUFREQ_PRECHANGE, freqs);
303 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
304 break;
305
306 case CPUFREQ_POSTCHANGE:
307 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
308 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
309 (unsigned long)freqs->cpu);
310 trace_cpu_frequency(freqs->new, freqs->cpu);
311 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
312 CPUFREQ_POSTCHANGE, freqs);
313 if (likely(policy) && likely(policy->cpu == freqs->cpu))
314 policy->cur = freqs->new;
315 break;
316 }
317 }
318
319 /**
320 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
321 * on frequency transition.
322 *
323 * This function calls the transition notifiers and the "adjust_jiffies"
324 * function. It is called twice on all CPU frequency changes that have
325 * external effects.
326 */
327 void cpufreq_notify_transition(struct cpufreq_policy *policy,
328 struct cpufreq_freqs *freqs, unsigned int state)
329 {
330 for_each_cpu(freqs->cpu, policy->cpus)
331 __cpufreq_notify_transition(policy, freqs, state);
332 }
333 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
334
335 /* Do post notifications when there are chances that transition has failed */
336 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
337 struct cpufreq_freqs *freqs, int transition_failed)
338 {
339 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
340 if (!transition_failed)
341 return;
342
343 swap(freqs->old, freqs->new);
344 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
345 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
346 }
347 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
348
349
350 /*********************************************************************
351 * SYSFS INTERFACE *
352 *********************************************************************/
353 static ssize_t show_boost(struct kobject *kobj,
354 struct attribute *attr, char *buf)
355 {
356 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
357 }
358
359 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
360 const char *buf, size_t count)
361 {
362 int ret, enable;
363
364 ret = sscanf(buf, "%d", &enable);
365 if (ret != 1 || enable < 0 || enable > 1)
366 return -EINVAL;
367
368 if (cpufreq_boost_trigger_state(enable)) {
369 pr_err("%s: Cannot %s BOOST!\n", __func__,
370 enable ? "enable" : "disable");
371 return -EINVAL;
372 }
373
374 pr_debug("%s: cpufreq BOOST %s\n", __func__,
375 enable ? "enabled" : "disabled");
376
377 return count;
378 }
379 define_one_global_rw(boost);
380
381 static struct cpufreq_governor *__find_governor(const char *str_governor)
382 {
383 struct cpufreq_governor *t;
384
385 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
386 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
387 return t;
388
389 return NULL;
390 }
391
392 /**
393 * cpufreq_parse_governor - parse a governor string
394 */
395 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
396 struct cpufreq_governor **governor)
397 {
398 int err = -EINVAL;
399
400 if (!cpufreq_driver)
401 goto out;
402
403 if (cpufreq_driver->setpolicy) {
404 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
405 *policy = CPUFREQ_POLICY_PERFORMANCE;
406 err = 0;
407 } else if (!strnicmp(str_governor, "powersave",
408 CPUFREQ_NAME_LEN)) {
409 *policy = CPUFREQ_POLICY_POWERSAVE;
410 err = 0;
411 }
412 } else if (has_target()) {
413 struct cpufreq_governor *t;
414
415 mutex_lock(&cpufreq_governor_mutex);
416
417 t = __find_governor(str_governor);
418
419 if (t == NULL) {
420 int ret;
421
422 mutex_unlock(&cpufreq_governor_mutex);
423 ret = request_module("cpufreq_%s", str_governor);
424 mutex_lock(&cpufreq_governor_mutex);
425
426 if (ret == 0)
427 t = __find_governor(str_governor);
428 }
429
430 if (t != NULL) {
431 *governor = t;
432 err = 0;
433 }
434
435 mutex_unlock(&cpufreq_governor_mutex);
436 }
437 out:
438 return err;
439 }
440
441 /**
442 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information
444 *
445 * Write out information from cpufreq_driver->policy[cpu]; object must be
446 * "unsigned int".
447 */
448
449 #define show_one(file_name, object) \
450 static ssize_t show_##file_name \
451 (struct cpufreq_policy *policy, char *buf) \
452 { \
453 return sprintf(buf, "%u\n", policy->object); \
454 }
455
456 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
458 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
459 show_one(scaling_min_freq, min);
460 show_one(scaling_max_freq, max);
461 show_one(scaling_cur_freq, cur);
462
463 static int cpufreq_set_policy(struct cpufreq_policy *policy,
464 struct cpufreq_policy *new_policy);
465
466 /**
467 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468 */
469 #define store_one(file_name, object) \
470 static ssize_t store_##file_name \
471 (struct cpufreq_policy *policy, const char *buf, size_t count) \
472 { \
473 int ret; \
474 struct cpufreq_policy new_policy; \
475 \
476 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
477 if (ret) \
478 return -EINVAL; \
479 \
480 ret = sscanf(buf, "%u", &new_policy.object); \
481 if (ret != 1) \
482 return -EINVAL; \
483 \
484 ret = cpufreq_set_policy(policy, &new_policy); \
485 policy->user_policy.object = policy->object; \
486 \
487 return ret ? ret : count; \
488 }
489
490 store_one(scaling_min_freq, min);
491 store_one(scaling_max_freq, max);
492
493 /**
494 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495 */
496 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
497 char *buf)
498 {
499 unsigned int cur_freq = __cpufreq_get(policy->cpu);
500 if (!cur_freq)
501 return sprintf(buf, "<unknown>");
502 return sprintf(buf, "%u\n", cur_freq);
503 }
504
505 /**
506 * show_scaling_governor - show the current policy for the specified CPU
507 */
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 {
510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
515 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
516 policy->governor->name);
517 return -EINVAL;
518 }
519
520 /**
521 * store_scaling_governor - store policy for the specified CPU
522 */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524 const char *buf, size_t count)
525 {
526 int ret;
527 char str_governor[16];
528 struct cpufreq_policy new_policy;
529
530 ret = cpufreq_get_policy(&new_policy, policy->cpu);
531 if (ret)
532 return ret;
533
534 ret = sscanf(buf, "%15s", str_governor);
535 if (ret != 1)
536 return -EINVAL;
537
538 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539 &new_policy.governor))
540 return -EINVAL;
541
542 ret = cpufreq_set_policy(policy, &new_policy);
543
544 policy->user_policy.policy = policy->policy;
545 policy->user_policy.governor = policy->governor;
546
547 if (ret)
548 return ret;
549 else
550 return count;
551 }
552
553 /**
554 * show_scaling_driver - show the cpufreq driver currently loaded
555 */
556 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
557 {
558 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
559 }
560
561 /**
562 * show_scaling_available_governors - show the available CPUfreq governors
563 */
564 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
565 char *buf)
566 {
567 ssize_t i = 0;
568 struct cpufreq_governor *t;
569
570 if (!has_target()) {
571 i += sprintf(buf, "performance powersave");
572 goto out;
573 }
574
575 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
576 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
577 - (CPUFREQ_NAME_LEN + 2)))
578 goto out;
579 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
580 }
581 out:
582 i += sprintf(&buf[i], "\n");
583 return i;
584 }
585
586 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
587 {
588 ssize_t i = 0;
589 unsigned int cpu;
590
591 for_each_cpu(cpu, mask) {
592 if (i)
593 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
595 if (i >= (PAGE_SIZE - 5))
596 break;
597 }
598 i += sprintf(&buf[i], "\n");
599 return i;
600 }
601 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
602
603 /**
604 * show_related_cpus - show the CPUs affected by each transition even if
605 * hw coordination is in use
606 */
607 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608 {
609 return cpufreq_show_cpus(policy->related_cpus, buf);
610 }
611
612 /**
613 * show_affected_cpus - show the CPUs affected by each transition
614 */
615 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
616 {
617 return cpufreq_show_cpus(policy->cpus, buf);
618 }
619
620 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
621 const char *buf, size_t count)
622 {
623 unsigned int freq = 0;
624 unsigned int ret;
625
626 if (!policy->governor || !policy->governor->store_setspeed)
627 return -EINVAL;
628
629 ret = sscanf(buf, "%u", &freq);
630 if (ret != 1)
631 return -EINVAL;
632
633 policy->governor->store_setspeed(policy, freq);
634
635 return count;
636 }
637
638 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
639 {
640 if (!policy->governor || !policy->governor->show_setspeed)
641 return sprintf(buf, "<unsupported>\n");
642
643 return policy->governor->show_setspeed(policy, buf);
644 }
645
646 /**
647 * show_bios_limit - show the current cpufreq HW/BIOS limitation
648 */
649 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
650 {
651 unsigned int limit;
652 int ret;
653 if (cpufreq_driver->bios_limit) {
654 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
655 if (!ret)
656 return sprintf(buf, "%u\n", limit);
657 }
658 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
659 }
660
661 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
662 cpufreq_freq_attr_ro(cpuinfo_min_freq);
663 cpufreq_freq_attr_ro(cpuinfo_max_freq);
664 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
665 cpufreq_freq_attr_ro(scaling_available_governors);
666 cpufreq_freq_attr_ro(scaling_driver);
667 cpufreq_freq_attr_ro(scaling_cur_freq);
668 cpufreq_freq_attr_ro(bios_limit);
669 cpufreq_freq_attr_ro(related_cpus);
670 cpufreq_freq_attr_ro(affected_cpus);
671 cpufreq_freq_attr_rw(scaling_min_freq);
672 cpufreq_freq_attr_rw(scaling_max_freq);
673 cpufreq_freq_attr_rw(scaling_governor);
674 cpufreq_freq_attr_rw(scaling_setspeed);
675
676 static struct attribute *default_attrs[] = {
677 &cpuinfo_min_freq.attr,
678 &cpuinfo_max_freq.attr,
679 &cpuinfo_transition_latency.attr,
680 &scaling_min_freq.attr,
681 &scaling_max_freq.attr,
682 &affected_cpus.attr,
683 &related_cpus.attr,
684 &scaling_governor.attr,
685 &scaling_driver.attr,
686 &scaling_available_governors.attr,
687 &scaling_setspeed.attr,
688 NULL
689 };
690
691 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
692 #define to_attr(a) container_of(a, struct freq_attr, attr)
693
694 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
695 {
696 struct cpufreq_policy *policy = to_policy(kobj);
697 struct freq_attr *fattr = to_attr(attr);
698 ssize_t ret;
699
700 if (!down_read_trylock(&cpufreq_rwsem))
701 return -EINVAL;
702
703 down_read(&policy->rwsem);
704
705 if (fattr->show)
706 ret = fattr->show(policy, buf);
707 else
708 ret = -EIO;
709
710 up_read(&policy->rwsem);
711 up_read(&cpufreq_rwsem);
712
713 return ret;
714 }
715
716 static ssize_t store(struct kobject *kobj, struct attribute *attr,
717 const char *buf, size_t count)
718 {
719 struct cpufreq_policy *policy = to_policy(kobj);
720 struct freq_attr *fattr = to_attr(attr);
721 ssize_t ret = -EINVAL;
722
723 get_online_cpus();
724
725 if (!cpu_online(policy->cpu))
726 goto unlock;
727
728 if (!down_read_trylock(&cpufreq_rwsem))
729 goto unlock;
730
731 down_write(&policy->rwsem);
732
733 if (fattr->store)
734 ret = fattr->store(policy, buf, count);
735 else
736 ret = -EIO;
737
738 up_write(&policy->rwsem);
739
740 up_read(&cpufreq_rwsem);
741 unlock:
742 put_online_cpus();
743
744 return ret;
745 }
746
747 static void cpufreq_sysfs_release(struct kobject *kobj)
748 {
749 struct cpufreq_policy *policy = to_policy(kobj);
750 pr_debug("last reference is dropped\n");
751 complete(&policy->kobj_unregister);
752 }
753
754 static const struct sysfs_ops sysfs_ops = {
755 .show = show,
756 .store = store,
757 };
758
759 static struct kobj_type ktype_cpufreq = {
760 .sysfs_ops = &sysfs_ops,
761 .default_attrs = default_attrs,
762 .release = cpufreq_sysfs_release,
763 };
764
765 struct kobject *cpufreq_global_kobject;
766 EXPORT_SYMBOL(cpufreq_global_kobject);
767
768 static int cpufreq_global_kobject_usage;
769
770 int cpufreq_get_global_kobject(void)
771 {
772 if (!cpufreq_global_kobject_usage++)
773 return kobject_add(cpufreq_global_kobject,
774 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
775
776 return 0;
777 }
778 EXPORT_SYMBOL(cpufreq_get_global_kobject);
779
780 void cpufreq_put_global_kobject(void)
781 {
782 if (!--cpufreq_global_kobject_usage)
783 kobject_del(cpufreq_global_kobject);
784 }
785 EXPORT_SYMBOL(cpufreq_put_global_kobject);
786
787 int cpufreq_sysfs_create_file(const struct attribute *attr)
788 {
789 int ret = cpufreq_get_global_kobject();
790
791 if (!ret) {
792 ret = sysfs_create_file(cpufreq_global_kobject, attr);
793 if (ret)
794 cpufreq_put_global_kobject();
795 }
796
797 return ret;
798 }
799 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
800
801 void cpufreq_sysfs_remove_file(const struct attribute *attr)
802 {
803 sysfs_remove_file(cpufreq_global_kobject, attr);
804 cpufreq_put_global_kobject();
805 }
806 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
807
808 /* symlink affected CPUs */
809 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
810 {
811 unsigned int j;
812 int ret = 0;
813
814 for_each_cpu(j, policy->cpus) {
815 struct device *cpu_dev;
816
817 if (j == policy->cpu)
818 continue;
819
820 pr_debug("Adding link for CPU: %u\n", j);
821 cpu_dev = get_cpu_device(j);
822 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
823 "cpufreq");
824 if (ret)
825 break;
826 }
827 return ret;
828 }
829
830 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
831 struct device *dev)
832 {
833 struct freq_attr **drv_attr;
834 int ret = 0;
835
836 /* prepare interface data */
837 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
838 &dev->kobj, "cpufreq");
839 if (ret)
840 return ret;
841
842 /* set up files for this cpu device */
843 drv_attr = cpufreq_driver->attr;
844 while ((drv_attr) && (*drv_attr)) {
845 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
846 if (ret)
847 goto err_out_kobj_put;
848 drv_attr++;
849 }
850 if (cpufreq_driver->get) {
851 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
852 if (ret)
853 goto err_out_kobj_put;
854 }
855 if (has_target()) {
856 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
857 if (ret)
858 goto err_out_kobj_put;
859 }
860 if (cpufreq_driver->bios_limit) {
861 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
862 if (ret)
863 goto err_out_kobj_put;
864 }
865
866 ret = cpufreq_add_dev_symlink(policy);
867 if (ret)
868 goto err_out_kobj_put;
869
870 return ret;
871
872 err_out_kobj_put:
873 kobject_put(&policy->kobj);
874 wait_for_completion(&policy->kobj_unregister);
875 return ret;
876 }
877
878 static void cpufreq_init_policy(struct cpufreq_policy *policy)
879 {
880 struct cpufreq_governor *gov = NULL;
881 struct cpufreq_policy new_policy;
882 int ret = 0;
883
884 memcpy(&new_policy, policy, sizeof(*policy));
885
886 /* Update governor of new_policy to the governor used before hotplug */
887 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
888 if (gov)
889 pr_debug("Restoring governor %s for cpu %d\n",
890 policy->governor->name, policy->cpu);
891 else
892 gov = CPUFREQ_DEFAULT_GOVERNOR;
893
894 new_policy.governor = gov;
895
896 /* Use the default policy if its valid. */
897 if (cpufreq_driver->setpolicy)
898 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
899
900 /* set default policy */
901 ret = cpufreq_set_policy(policy, &new_policy);
902 if (ret) {
903 pr_debug("setting policy failed\n");
904 if (cpufreq_driver->exit)
905 cpufreq_driver->exit(policy);
906 }
907 }
908
909 #ifdef CONFIG_HOTPLUG_CPU
910 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
911 unsigned int cpu, struct device *dev)
912 {
913 int ret = 0;
914 unsigned long flags;
915
916 if (has_target()) {
917 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
918 if (ret) {
919 pr_err("%s: Failed to stop governor\n", __func__);
920 return ret;
921 }
922 }
923
924 down_write(&policy->rwsem);
925
926 write_lock_irqsave(&cpufreq_driver_lock, flags);
927
928 cpumask_set_cpu(cpu, policy->cpus);
929 per_cpu(cpufreq_cpu_data, cpu) = policy;
930 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
931
932 up_write(&policy->rwsem);
933
934 if (has_target()) {
935 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
936 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
937 pr_err("%s: Failed to start governor\n", __func__);
938 return ret;
939 }
940 }
941
942 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
943 }
944 #endif
945
946 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
947 {
948 struct cpufreq_policy *policy;
949 unsigned long flags;
950
951 read_lock_irqsave(&cpufreq_driver_lock, flags);
952
953 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
954
955 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
956
957 policy->governor = NULL;
958
959 return policy;
960 }
961
962 static struct cpufreq_policy *cpufreq_policy_alloc(void)
963 {
964 struct cpufreq_policy *policy;
965
966 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
967 if (!policy)
968 return NULL;
969
970 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
971 goto err_free_policy;
972
973 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
974 goto err_free_cpumask;
975
976 INIT_LIST_HEAD(&policy->policy_list);
977 init_rwsem(&policy->rwsem);
978
979 return policy;
980
981 err_free_cpumask:
982 free_cpumask_var(policy->cpus);
983 err_free_policy:
984 kfree(policy);
985
986 return NULL;
987 }
988
989 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
990 {
991 struct kobject *kobj;
992 struct completion *cmp;
993
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
995 CPUFREQ_REMOVE_POLICY, policy);
996
997 down_read(&policy->rwsem);
998 kobj = &policy->kobj;
999 cmp = &policy->kobj_unregister;
1000 up_read(&policy->rwsem);
1001 kobject_put(kobj);
1002
1003 /*
1004 * We need to make sure that the underlying kobj is
1005 * actually not referenced anymore by anybody before we
1006 * proceed with unloading.
1007 */
1008 pr_debug("waiting for dropping of refcount\n");
1009 wait_for_completion(cmp);
1010 pr_debug("wait complete\n");
1011 }
1012
1013 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1014 {
1015 free_cpumask_var(policy->related_cpus);
1016 free_cpumask_var(policy->cpus);
1017 kfree(policy);
1018 }
1019
1020 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1021 {
1022 if (WARN_ON(cpu == policy->cpu))
1023 return;
1024
1025 down_write(&policy->rwsem);
1026
1027 policy->last_cpu = policy->cpu;
1028 policy->cpu = cpu;
1029
1030 up_write(&policy->rwsem);
1031
1032 cpufreq_frequency_table_update_policy_cpu(policy);
1033 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1034 CPUFREQ_UPDATE_POLICY_CPU, policy);
1035 }
1036
1037 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1038 bool frozen)
1039 {
1040 unsigned int j, cpu = dev->id;
1041 int ret = -ENOMEM;
1042 struct cpufreq_policy *policy;
1043 unsigned long flags;
1044 #ifdef CONFIG_HOTPLUG_CPU
1045 struct cpufreq_policy *tpolicy;
1046 #endif
1047
1048 if (cpu_is_offline(cpu))
1049 return 0;
1050
1051 pr_debug("adding CPU %u\n", cpu);
1052
1053 #ifdef CONFIG_SMP
1054 /* check whether a different CPU already registered this
1055 * CPU because it is in the same boat. */
1056 policy = cpufreq_cpu_get(cpu);
1057 if (unlikely(policy)) {
1058 cpufreq_cpu_put(policy);
1059 return 0;
1060 }
1061 #endif
1062
1063 if (!down_read_trylock(&cpufreq_rwsem))
1064 return 0;
1065
1066 #ifdef CONFIG_HOTPLUG_CPU
1067 /* Check if this cpu was hot-unplugged earlier and has siblings */
1068 read_lock_irqsave(&cpufreq_driver_lock, flags);
1069 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1070 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1071 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1072 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1073 up_read(&cpufreq_rwsem);
1074 return ret;
1075 }
1076 }
1077 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1078 #endif
1079
1080 /*
1081 * Restore the saved policy when doing light-weight init and fall back
1082 * to the full init if that fails.
1083 */
1084 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1085 if (!policy) {
1086 frozen = false;
1087 policy = cpufreq_policy_alloc();
1088 if (!policy)
1089 goto nomem_out;
1090 }
1091
1092 /*
1093 * In the resume path, since we restore a saved policy, the assignment
1094 * to policy->cpu is like an update of the existing policy, rather than
1095 * the creation of a brand new one. So we need to perform this update
1096 * by invoking update_policy_cpu().
1097 */
1098 if (frozen && cpu != policy->cpu)
1099 update_policy_cpu(policy, cpu);
1100 else
1101 policy->cpu = cpu;
1102
1103 cpumask_copy(policy->cpus, cpumask_of(cpu));
1104
1105 init_completion(&policy->kobj_unregister);
1106 INIT_WORK(&policy->update, handle_update);
1107
1108 /* call driver. From then on the cpufreq must be able
1109 * to accept all calls to ->verify and ->setpolicy for this CPU
1110 */
1111 ret = cpufreq_driver->init(policy);
1112 if (ret) {
1113 pr_debug("initialization failed\n");
1114 goto err_set_policy_cpu;
1115 }
1116
1117 /* related cpus should atleast have policy->cpus */
1118 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1119
1120 /*
1121 * affected cpus must always be the one, which are online. We aren't
1122 * managing offline cpus here.
1123 */
1124 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1125
1126 if (!frozen) {
1127 policy->user_policy.min = policy->min;
1128 policy->user_policy.max = policy->max;
1129 }
1130
1131 down_write(&policy->rwsem);
1132 write_lock_irqsave(&cpufreq_driver_lock, flags);
1133 for_each_cpu(j, policy->cpus)
1134 per_cpu(cpufreq_cpu_data, j) = policy;
1135 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1136
1137 if (cpufreq_driver->get) {
1138 policy->cur = cpufreq_driver->get(policy->cpu);
1139 if (!policy->cur) {
1140 pr_err("%s: ->get() failed\n", __func__);
1141 goto err_get_freq;
1142 }
1143 }
1144
1145 /*
1146 * Sometimes boot loaders set CPU frequency to a value outside of
1147 * frequency table present with cpufreq core. In such cases CPU might be
1148 * unstable if it has to run on that frequency for long duration of time
1149 * and so its better to set it to a frequency which is specified in
1150 * freq-table. This also makes cpufreq stats inconsistent as
1151 * cpufreq-stats would fail to register because current frequency of CPU
1152 * isn't found in freq-table.
1153 *
1154 * Because we don't want this change to effect boot process badly, we go
1155 * for the next freq which is >= policy->cur ('cur' must be set by now,
1156 * otherwise we will end up setting freq to lowest of the table as 'cur'
1157 * is initialized to zero).
1158 *
1159 * We are passing target-freq as "policy->cur - 1" otherwise
1160 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1161 * equal to target-freq.
1162 */
1163 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1164 && has_target()) {
1165 /* Are we running at unknown frequency ? */
1166 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1167 if (ret == -EINVAL) {
1168 /* Warn user and fix it */
1169 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1170 __func__, policy->cpu, policy->cur);
1171 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1172 CPUFREQ_RELATION_L);
1173
1174 /*
1175 * Reaching here after boot in a few seconds may not
1176 * mean that system will remain stable at "unknown"
1177 * frequency for longer duration. Hence, a BUG_ON().
1178 */
1179 BUG_ON(ret);
1180 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1181 __func__, policy->cpu, policy->cur);
1182 }
1183 }
1184
1185 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1186 CPUFREQ_START, policy);
1187
1188 if (!frozen) {
1189 ret = cpufreq_add_dev_interface(policy, dev);
1190 if (ret)
1191 goto err_out_unregister;
1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1193 CPUFREQ_CREATE_POLICY, policy);
1194 }
1195
1196 write_lock_irqsave(&cpufreq_driver_lock, flags);
1197 list_add(&policy->policy_list, &cpufreq_policy_list);
1198 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1199
1200 cpufreq_init_policy(policy);
1201
1202 if (!frozen) {
1203 policy->user_policy.policy = policy->policy;
1204 policy->user_policy.governor = policy->governor;
1205 }
1206 up_write(&policy->rwsem);
1207
1208 kobject_uevent(&policy->kobj, KOBJ_ADD);
1209 up_read(&cpufreq_rwsem);
1210
1211 pr_debug("initialization complete\n");
1212
1213 return 0;
1214
1215 err_out_unregister:
1216 err_get_freq:
1217 write_lock_irqsave(&cpufreq_driver_lock, flags);
1218 for_each_cpu(j, policy->cpus)
1219 per_cpu(cpufreq_cpu_data, j) = NULL;
1220 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1221
1222 if (cpufreq_driver->exit)
1223 cpufreq_driver->exit(policy);
1224 err_set_policy_cpu:
1225 if (frozen) {
1226 /* Do not leave stale fallback data behind. */
1227 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1228 cpufreq_policy_put_kobj(policy);
1229 }
1230 cpufreq_policy_free(policy);
1231
1232 nomem_out:
1233 up_read(&cpufreq_rwsem);
1234
1235 return ret;
1236 }
1237
1238 /**
1239 * cpufreq_add_dev - add a CPU device
1240 *
1241 * Adds the cpufreq interface for a CPU device.
1242 *
1243 * The Oracle says: try running cpufreq registration/unregistration concurrently
1244 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1245 * mess up, but more thorough testing is needed. - Mathieu
1246 */
1247 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1248 {
1249 return __cpufreq_add_dev(dev, sif, false);
1250 }
1251
1252 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1253 unsigned int old_cpu)
1254 {
1255 struct device *cpu_dev;
1256 int ret;
1257
1258 /* first sibling now owns the new sysfs dir */
1259 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1260
1261 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1262 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1263 if (ret) {
1264 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1265
1266 down_write(&policy->rwsem);
1267 cpumask_set_cpu(old_cpu, policy->cpus);
1268 up_write(&policy->rwsem);
1269
1270 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1271 "cpufreq");
1272
1273 return -EINVAL;
1274 }
1275
1276 return cpu_dev->id;
1277 }
1278
1279 static int __cpufreq_remove_dev_prepare(struct device *dev,
1280 struct subsys_interface *sif,
1281 bool frozen)
1282 {
1283 unsigned int cpu = dev->id, cpus;
1284 int new_cpu, ret;
1285 unsigned long flags;
1286 struct cpufreq_policy *policy;
1287
1288 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1289
1290 write_lock_irqsave(&cpufreq_driver_lock, flags);
1291
1292 policy = per_cpu(cpufreq_cpu_data, cpu);
1293
1294 /* Save the policy somewhere when doing a light-weight tear-down */
1295 if (frozen)
1296 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1297
1298 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1299
1300 if (!policy) {
1301 pr_debug("%s: No cpu_data found\n", __func__);
1302 return -EINVAL;
1303 }
1304
1305 if (has_target()) {
1306 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1307 if (ret) {
1308 pr_err("%s: Failed to stop governor\n", __func__);
1309 return ret;
1310 }
1311 }
1312
1313 if (!cpufreq_driver->setpolicy)
1314 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1315 policy->governor->name, CPUFREQ_NAME_LEN);
1316
1317 down_read(&policy->rwsem);
1318 cpus = cpumask_weight(policy->cpus);
1319 up_read(&policy->rwsem);
1320
1321 if (cpu != policy->cpu) {
1322 sysfs_remove_link(&dev->kobj, "cpufreq");
1323 } else if (cpus > 1) {
1324 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1325 if (new_cpu >= 0) {
1326 update_policy_cpu(policy, new_cpu);
1327
1328 if (!frozen) {
1329 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1330 __func__, new_cpu, cpu);
1331 }
1332 }
1333 }
1334
1335 return 0;
1336 }
1337
1338 static int __cpufreq_remove_dev_finish(struct device *dev,
1339 struct subsys_interface *sif,
1340 bool frozen)
1341 {
1342 unsigned int cpu = dev->id, cpus;
1343 int ret;
1344 unsigned long flags;
1345 struct cpufreq_policy *policy;
1346
1347 read_lock_irqsave(&cpufreq_driver_lock, flags);
1348 policy = per_cpu(cpufreq_cpu_data, cpu);
1349 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1350
1351 if (!policy) {
1352 pr_debug("%s: No cpu_data found\n", __func__);
1353 return -EINVAL;
1354 }
1355
1356 down_write(&policy->rwsem);
1357 cpus = cpumask_weight(policy->cpus);
1358
1359 if (cpus > 1)
1360 cpumask_clear_cpu(cpu, policy->cpus);
1361 up_write(&policy->rwsem);
1362
1363 /* If cpu is last user of policy, free policy */
1364 if (cpus == 1) {
1365 if (has_target()) {
1366 ret = __cpufreq_governor(policy,
1367 CPUFREQ_GOV_POLICY_EXIT);
1368 if (ret) {
1369 pr_err("%s: Failed to exit governor\n",
1370 __func__);
1371 return ret;
1372 }
1373 }
1374
1375 if (!frozen)
1376 cpufreq_policy_put_kobj(policy);
1377
1378 /*
1379 * Perform the ->exit() even during light-weight tear-down,
1380 * since this is a core component, and is essential for the
1381 * subsequent light-weight ->init() to succeed.
1382 */
1383 if (cpufreq_driver->exit)
1384 cpufreq_driver->exit(policy);
1385
1386 /* Remove policy from list of active policies */
1387 write_lock_irqsave(&cpufreq_driver_lock, flags);
1388 list_del(&policy->policy_list);
1389 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1390
1391 if (!frozen)
1392 cpufreq_policy_free(policy);
1393 } else {
1394 if (has_target()) {
1395 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1396 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1397 pr_err("%s: Failed to start governor\n",
1398 __func__);
1399 return ret;
1400 }
1401 }
1402 }
1403
1404 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1405 return 0;
1406 }
1407
1408 /**
1409 * cpufreq_remove_dev - remove a CPU device
1410 *
1411 * Removes the cpufreq interface for a CPU device.
1412 */
1413 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1414 {
1415 unsigned int cpu = dev->id;
1416 int ret;
1417
1418 if (cpu_is_offline(cpu))
1419 return 0;
1420
1421 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1422
1423 if (!ret)
1424 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1425
1426 return ret;
1427 }
1428
1429 static void handle_update(struct work_struct *work)
1430 {
1431 struct cpufreq_policy *policy =
1432 container_of(work, struct cpufreq_policy, update);
1433 unsigned int cpu = policy->cpu;
1434 pr_debug("handle_update for cpu %u called\n", cpu);
1435 cpufreq_update_policy(cpu);
1436 }
1437
1438 /**
1439 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1440 * in deep trouble.
1441 * @cpu: cpu number
1442 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1443 * @new_freq: CPU frequency the CPU actually runs at
1444 *
1445 * We adjust to current frequency first, and need to clean up later.
1446 * So either call to cpufreq_update_policy() or schedule handle_update()).
1447 */
1448 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1449 unsigned int new_freq)
1450 {
1451 struct cpufreq_policy *policy;
1452 struct cpufreq_freqs freqs;
1453 unsigned long flags;
1454
1455 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1456 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1457
1458 freqs.old = old_freq;
1459 freqs.new = new_freq;
1460
1461 read_lock_irqsave(&cpufreq_driver_lock, flags);
1462 policy = per_cpu(cpufreq_cpu_data, cpu);
1463 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1464
1465 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1466 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1467 }
1468
1469 /**
1470 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1471 * @cpu: CPU number
1472 *
1473 * This is the last known freq, without actually getting it from the driver.
1474 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1475 */
1476 unsigned int cpufreq_quick_get(unsigned int cpu)
1477 {
1478 struct cpufreq_policy *policy;
1479 unsigned int ret_freq = 0;
1480
1481 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1482 return cpufreq_driver->get(cpu);
1483
1484 policy = cpufreq_cpu_get(cpu);
1485 if (policy) {
1486 ret_freq = policy->cur;
1487 cpufreq_cpu_put(policy);
1488 }
1489
1490 return ret_freq;
1491 }
1492 EXPORT_SYMBOL(cpufreq_quick_get);
1493
1494 /**
1495 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1496 * @cpu: CPU number
1497 *
1498 * Just return the max possible frequency for a given CPU.
1499 */
1500 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1501 {
1502 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1503 unsigned int ret_freq = 0;
1504
1505 if (policy) {
1506 ret_freq = policy->max;
1507 cpufreq_cpu_put(policy);
1508 }
1509
1510 return ret_freq;
1511 }
1512 EXPORT_SYMBOL(cpufreq_quick_get_max);
1513
1514 static unsigned int __cpufreq_get(unsigned int cpu)
1515 {
1516 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1517 unsigned int ret_freq = 0;
1518
1519 if (!cpufreq_driver->get)
1520 return ret_freq;
1521
1522 ret_freq = cpufreq_driver->get(cpu);
1523
1524 if (ret_freq && policy->cur &&
1525 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1526 /* verify no discrepancy between actual and
1527 saved value exists */
1528 if (unlikely(ret_freq != policy->cur)) {
1529 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1530 schedule_work(&policy->update);
1531 }
1532 }
1533
1534 return ret_freq;
1535 }
1536
1537 /**
1538 * cpufreq_get - get the current CPU frequency (in kHz)
1539 * @cpu: CPU number
1540 *
1541 * Get the CPU current (static) CPU frequency
1542 */
1543 unsigned int cpufreq_get(unsigned int cpu)
1544 {
1545 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1546 unsigned int ret_freq = 0;
1547
1548 if (policy) {
1549 down_read(&policy->rwsem);
1550 ret_freq = __cpufreq_get(cpu);
1551 up_read(&policy->rwsem);
1552
1553 cpufreq_cpu_put(policy);
1554 }
1555
1556 return ret_freq;
1557 }
1558 EXPORT_SYMBOL(cpufreq_get);
1559
1560 static struct subsys_interface cpufreq_interface = {
1561 .name = "cpufreq",
1562 .subsys = &cpu_subsys,
1563 .add_dev = cpufreq_add_dev,
1564 .remove_dev = cpufreq_remove_dev,
1565 };
1566
1567 /**
1568 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1569 *
1570 * This function is only executed for the boot processor. The other CPUs
1571 * have been put offline by means of CPU hotplug.
1572 */
1573 static int cpufreq_bp_suspend(void)
1574 {
1575 int ret = 0;
1576
1577 int cpu = smp_processor_id();
1578 struct cpufreq_policy *policy;
1579
1580 pr_debug("suspending cpu %u\n", cpu);
1581
1582 /* If there's no policy for the boot CPU, we have nothing to do. */
1583 policy = cpufreq_cpu_get(cpu);
1584 if (!policy)
1585 return 0;
1586
1587 if (cpufreq_driver->suspend) {
1588 ret = cpufreq_driver->suspend(policy);
1589 if (ret)
1590 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1591 "step on CPU %u\n", policy->cpu);
1592 }
1593
1594 cpufreq_cpu_put(policy);
1595 return ret;
1596 }
1597
1598 /**
1599 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1600 *
1601 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1602 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1603 * restored. It will verify that the current freq is in sync with
1604 * what we believe it to be. This is a bit later than when it
1605 * should be, but nonethteless it's better than calling
1606 * cpufreq_driver->get() here which might re-enable interrupts...
1607 *
1608 * This function is only executed for the boot CPU. The other CPUs have not
1609 * been turned on yet.
1610 */
1611 static void cpufreq_bp_resume(void)
1612 {
1613 int ret = 0;
1614
1615 int cpu = smp_processor_id();
1616 struct cpufreq_policy *policy;
1617
1618 pr_debug("resuming cpu %u\n", cpu);
1619
1620 /* If there's no policy for the boot CPU, we have nothing to do. */
1621 policy = cpufreq_cpu_get(cpu);
1622 if (!policy)
1623 return;
1624
1625 if (cpufreq_driver->resume) {
1626 ret = cpufreq_driver->resume(policy);
1627 if (ret) {
1628 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1629 "step on CPU %u\n", policy->cpu);
1630 goto fail;
1631 }
1632 }
1633
1634 schedule_work(&policy->update);
1635
1636 fail:
1637 cpufreq_cpu_put(policy);
1638 }
1639
1640 static struct syscore_ops cpufreq_syscore_ops = {
1641 .suspend = cpufreq_bp_suspend,
1642 .resume = cpufreq_bp_resume,
1643 };
1644
1645 /**
1646 * cpufreq_get_current_driver - return current driver's name
1647 *
1648 * Return the name string of the currently loaded cpufreq driver
1649 * or NULL, if none.
1650 */
1651 const char *cpufreq_get_current_driver(void)
1652 {
1653 if (cpufreq_driver)
1654 return cpufreq_driver->name;
1655
1656 return NULL;
1657 }
1658 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1659
1660 /*********************************************************************
1661 * NOTIFIER LISTS INTERFACE *
1662 *********************************************************************/
1663
1664 /**
1665 * cpufreq_register_notifier - register a driver with cpufreq
1666 * @nb: notifier function to register
1667 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1668 *
1669 * Add a driver to one of two lists: either a list of drivers that
1670 * are notified about clock rate changes (once before and once after
1671 * the transition), or a list of drivers that are notified about
1672 * changes in cpufreq policy.
1673 *
1674 * This function may sleep, and has the same return conditions as
1675 * blocking_notifier_chain_register.
1676 */
1677 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1678 {
1679 int ret;
1680
1681 if (cpufreq_disabled())
1682 return -EINVAL;
1683
1684 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1685
1686 switch (list) {
1687 case CPUFREQ_TRANSITION_NOTIFIER:
1688 ret = srcu_notifier_chain_register(
1689 &cpufreq_transition_notifier_list, nb);
1690 break;
1691 case CPUFREQ_POLICY_NOTIFIER:
1692 ret = blocking_notifier_chain_register(
1693 &cpufreq_policy_notifier_list, nb);
1694 break;
1695 default:
1696 ret = -EINVAL;
1697 }
1698
1699 return ret;
1700 }
1701 EXPORT_SYMBOL(cpufreq_register_notifier);
1702
1703 /**
1704 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1705 * @nb: notifier block to be unregistered
1706 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1707 *
1708 * Remove a driver from the CPU frequency notifier list.
1709 *
1710 * This function may sleep, and has the same return conditions as
1711 * blocking_notifier_chain_unregister.
1712 */
1713 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1714 {
1715 int ret;
1716
1717 if (cpufreq_disabled())
1718 return -EINVAL;
1719
1720 switch (list) {
1721 case CPUFREQ_TRANSITION_NOTIFIER:
1722 ret = srcu_notifier_chain_unregister(
1723 &cpufreq_transition_notifier_list, nb);
1724 break;
1725 case CPUFREQ_POLICY_NOTIFIER:
1726 ret = blocking_notifier_chain_unregister(
1727 &cpufreq_policy_notifier_list, nb);
1728 break;
1729 default:
1730 ret = -EINVAL;
1731 }
1732
1733 return ret;
1734 }
1735 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1736
1737
1738 /*********************************************************************
1739 * GOVERNORS *
1740 *********************************************************************/
1741
1742 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1743 unsigned int target_freq,
1744 unsigned int relation)
1745 {
1746 int retval = -EINVAL;
1747 unsigned int old_target_freq = target_freq;
1748
1749 if (cpufreq_disabled())
1750 return -ENODEV;
1751
1752 /* Make sure that target_freq is within supported range */
1753 if (target_freq > policy->max)
1754 target_freq = policy->max;
1755 if (target_freq < policy->min)
1756 target_freq = policy->min;
1757
1758 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1759 policy->cpu, target_freq, relation, old_target_freq);
1760
1761 /*
1762 * This might look like a redundant call as we are checking it again
1763 * after finding index. But it is left intentionally for cases where
1764 * exactly same freq is called again and so we can save on few function
1765 * calls.
1766 */
1767 if (target_freq == policy->cur)
1768 return 0;
1769
1770 if (cpufreq_driver->target)
1771 retval = cpufreq_driver->target(policy, target_freq, relation);
1772 else if (cpufreq_driver->target_index) {
1773 struct cpufreq_frequency_table *freq_table;
1774 struct cpufreq_freqs freqs;
1775 bool notify;
1776 int index;
1777
1778 freq_table = cpufreq_frequency_get_table(policy->cpu);
1779 if (unlikely(!freq_table)) {
1780 pr_err("%s: Unable to find freq_table\n", __func__);
1781 goto out;
1782 }
1783
1784 retval = cpufreq_frequency_table_target(policy, freq_table,
1785 target_freq, relation, &index);
1786 if (unlikely(retval)) {
1787 pr_err("%s: Unable to find matching freq\n", __func__);
1788 goto out;
1789 }
1790
1791 if (freq_table[index].frequency == policy->cur) {
1792 retval = 0;
1793 goto out;
1794 }
1795
1796 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1797
1798 if (notify) {
1799 freqs.old = policy->cur;
1800 freqs.new = freq_table[index].frequency;
1801 freqs.flags = 0;
1802
1803 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1804 __func__, policy->cpu, freqs.old,
1805 freqs.new);
1806
1807 cpufreq_notify_transition(policy, &freqs,
1808 CPUFREQ_PRECHANGE);
1809 }
1810
1811 retval = cpufreq_driver->target_index(policy, index);
1812 if (retval)
1813 pr_err("%s: Failed to change cpu frequency: %d\n",
1814 __func__, retval);
1815
1816 if (notify)
1817 cpufreq_notify_post_transition(policy, &freqs, retval);
1818 }
1819
1820 out:
1821 return retval;
1822 }
1823 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1824
1825 int cpufreq_driver_target(struct cpufreq_policy *policy,
1826 unsigned int target_freq,
1827 unsigned int relation)
1828 {
1829 int ret = -EINVAL;
1830
1831 down_write(&policy->rwsem);
1832
1833 ret = __cpufreq_driver_target(policy, target_freq, relation);
1834
1835 up_write(&policy->rwsem);
1836
1837 return ret;
1838 }
1839 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1840
1841 /*
1842 * when "event" is CPUFREQ_GOV_LIMITS
1843 */
1844
1845 static int __cpufreq_governor(struct cpufreq_policy *policy,
1846 unsigned int event)
1847 {
1848 int ret;
1849
1850 /* Only must be defined when default governor is known to have latency
1851 restrictions, like e.g. conservative or ondemand.
1852 That this is the case is already ensured in Kconfig
1853 */
1854 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1855 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1856 #else
1857 struct cpufreq_governor *gov = NULL;
1858 #endif
1859
1860 if (policy->governor->max_transition_latency &&
1861 policy->cpuinfo.transition_latency >
1862 policy->governor->max_transition_latency) {
1863 if (!gov)
1864 return -EINVAL;
1865 else {
1866 printk(KERN_WARNING "%s governor failed, too long"
1867 " transition latency of HW, fallback"
1868 " to %s governor\n",
1869 policy->governor->name,
1870 gov->name);
1871 policy->governor = gov;
1872 }
1873 }
1874
1875 if (event == CPUFREQ_GOV_POLICY_INIT)
1876 if (!try_module_get(policy->governor->owner))
1877 return -EINVAL;
1878
1879 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1880 policy->cpu, event);
1881
1882 mutex_lock(&cpufreq_governor_lock);
1883 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1884 || (!policy->governor_enabled
1885 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1886 mutex_unlock(&cpufreq_governor_lock);
1887 return -EBUSY;
1888 }
1889
1890 if (event == CPUFREQ_GOV_STOP)
1891 policy->governor_enabled = false;
1892 else if (event == CPUFREQ_GOV_START)
1893 policy->governor_enabled = true;
1894
1895 mutex_unlock(&cpufreq_governor_lock);
1896
1897 ret = policy->governor->governor(policy, event);
1898
1899 if (!ret) {
1900 if (event == CPUFREQ_GOV_POLICY_INIT)
1901 policy->governor->initialized++;
1902 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1903 policy->governor->initialized--;
1904 } else {
1905 /* Restore original values */
1906 mutex_lock(&cpufreq_governor_lock);
1907 if (event == CPUFREQ_GOV_STOP)
1908 policy->governor_enabled = true;
1909 else if (event == CPUFREQ_GOV_START)
1910 policy->governor_enabled = false;
1911 mutex_unlock(&cpufreq_governor_lock);
1912 }
1913
1914 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1915 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1916 module_put(policy->governor->owner);
1917
1918 return ret;
1919 }
1920
1921 int cpufreq_register_governor(struct cpufreq_governor *governor)
1922 {
1923 int err;
1924
1925 if (!governor)
1926 return -EINVAL;
1927
1928 if (cpufreq_disabled())
1929 return -ENODEV;
1930
1931 mutex_lock(&cpufreq_governor_mutex);
1932
1933 governor->initialized = 0;
1934 err = -EBUSY;
1935 if (__find_governor(governor->name) == NULL) {
1936 err = 0;
1937 list_add(&governor->governor_list, &cpufreq_governor_list);
1938 }
1939
1940 mutex_unlock(&cpufreq_governor_mutex);
1941 return err;
1942 }
1943 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1944
1945 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1946 {
1947 int cpu;
1948
1949 if (!governor)
1950 return;
1951
1952 if (cpufreq_disabled())
1953 return;
1954
1955 for_each_present_cpu(cpu) {
1956 if (cpu_online(cpu))
1957 continue;
1958 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1959 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1960 }
1961
1962 mutex_lock(&cpufreq_governor_mutex);
1963 list_del(&governor->governor_list);
1964 mutex_unlock(&cpufreq_governor_mutex);
1965 return;
1966 }
1967 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1968
1969
1970 /*********************************************************************
1971 * POLICY INTERFACE *
1972 *********************************************************************/
1973
1974 /**
1975 * cpufreq_get_policy - get the current cpufreq_policy
1976 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1977 * is written
1978 *
1979 * Reads the current cpufreq policy.
1980 */
1981 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1982 {
1983 struct cpufreq_policy *cpu_policy;
1984 if (!policy)
1985 return -EINVAL;
1986
1987 cpu_policy = cpufreq_cpu_get(cpu);
1988 if (!cpu_policy)
1989 return -EINVAL;
1990
1991 memcpy(policy, cpu_policy, sizeof(*policy));
1992
1993 cpufreq_cpu_put(cpu_policy);
1994 return 0;
1995 }
1996 EXPORT_SYMBOL(cpufreq_get_policy);
1997
1998 /*
1999 * policy : current policy.
2000 * new_policy: policy to be set.
2001 */
2002 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2003 struct cpufreq_policy *new_policy)
2004 {
2005 struct cpufreq_governor *old_gov;
2006 int ret;
2007
2008 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
2009 new_policy->min, new_policy->max);
2010
2011 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2012
2013 if (new_policy->min > policy->max || new_policy->max < policy->min)
2014 return -EINVAL;
2015
2016 /* verify the cpu speed can be set within this limit */
2017 ret = cpufreq_driver->verify(new_policy);
2018 if (ret)
2019 return ret;
2020
2021 /* adjust if necessary - all reasons */
2022 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2023 CPUFREQ_ADJUST, new_policy);
2024
2025 /* adjust if necessary - hardware incompatibility*/
2026 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2027 CPUFREQ_INCOMPATIBLE, new_policy);
2028
2029 /*
2030 * verify the cpu speed can be set within this limit, which might be
2031 * different to the first one
2032 */
2033 ret = cpufreq_driver->verify(new_policy);
2034 if (ret)
2035 return ret;
2036
2037 /* notification of the new policy */
2038 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2039 CPUFREQ_NOTIFY, new_policy);
2040
2041 policy->min = new_policy->min;
2042 policy->max = new_policy->max;
2043
2044 pr_debug("new min and max freqs are %u - %u kHz\n",
2045 policy->min, policy->max);
2046
2047 if (cpufreq_driver->setpolicy) {
2048 policy->policy = new_policy->policy;
2049 pr_debug("setting range\n");
2050 return cpufreq_driver->setpolicy(new_policy);
2051 }
2052
2053 if (new_policy->governor == policy->governor)
2054 goto out;
2055
2056 pr_debug("governor switch\n");
2057
2058 /* save old, working values */
2059 old_gov = policy->governor;
2060 /* end old governor */
2061 if (old_gov) {
2062 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2063 up_write(&policy->rwsem);
2064 __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT);
2065 down_write(&policy->rwsem);
2066 }
2067
2068 /* start new governor */
2069 policy->governor = new_policy->governor;
2070 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2071 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2072 goto out;
2073
2074 up_write(&policy->rwsem);
2075 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2076 down_write(&policy->rwsem);
2077 }
2078
2079 /* new governor failed, so re-start old one */
2080 pr_debug("starting governor %s failed\n", policy->governor->name);
2081 if (old_gov) {
2082 policy->governor = old_gov;
2083 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2084 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2085 }
2086
2087 return -EINVAL;
2088
2089 out:
2090 pr_debug("governor: change or update limits\n");
2091 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2092 }
2093
2094 /**
2095 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2096 * @cpu: CPU which shall be re-evaluated
2097 *
2098 * Useful for policy notifiers which have different necessities
2099 * at different times.
2100 */
2101 int cpufreq_update_policy(unsigned int cpu)
2102 {
2103 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2104 struct cpufreq_policy new_policy;
2105 int ret;
2106
2107 if (!policy) {
2108 ret = -ENODEV;
2109 goto no_policy;
2110 }
2111
2112 down_write(&policy->rwsem);
2113
2114 pr_debug("updating policy for CPU %u\n", cpu);
2115 memcpy(&new_policy, policy, sizeof(*policy));
2116 new_policy.min = policy->user_policy.min;
2117 new_policy.max = policy->user_policy.max;
2118 new_policy.policy = policy->user_policy.policy;
2119 new_policy.governor = policy->user_policy.governor;
2120
2121 /*
2122 * BIOS might change freq behind our back
2123 * -> ask driver for current freq and notify governors about a change
2124 */
2125 if (cpufreq_driver->get) {
2126 new_policy.cur = cpufreq_driver->get(cpu);
2127 if (WARN_ON(!new_policy.cur)) {
2128 ret = -EIO;
2129 goto no_policy;
2130 }
2131
2132 if (!policy->cur) {
2133 pr_debug("Driver did not initialize current freq");
2134 policy->cur = new_policy.cur;
2135 } else {
2136 if (policy->cur != new_policy.cur && has_target())
2137 cpufreq_out_of_sync(cpu, policy->cur,
2138 new_policy.cur);
2139 }
2140 }
2141
2142 ret = cpufreq_set_policy(policy, &new_policy);
2143
2144 up_write(&policy->rwsem);
2145
2146 cpufreq_cpu_put(policy);
2147 no_policy:
2148 return ret;
2149 }
2150 EXPORT_SYMBOL(cpufreq_update_policy);
2151
2152 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2153 unsigned long action, void *hcpu)
2154 {
2155 unsigned int cpu = (unsigned long)hcpu;
2156 struct device *dev;
2157 bool frozen = false;
2158
2159 dev = get_cpu_device(cpu);
2160 if (dev) {
2161
2162 if (action & CPU_TASKS_FROZEN)
2163 frozen = true;
2164
2165 switch (action & ~CPU_TASKS_FROZEN) {
2166 case CPU_ONLINE:
2167 __cpufreq_add_dev(dev, NULL, frozen);
2168 break;
2169
2170 case CPU_DOWN_PREPARE:
2171 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2172 break;
2173
2174 case CPU_POST_DEAD:
2175 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2176 break;
2177
2178 case CPU_DOWN_FAILED:
2179 __cpufreq_add_dev(dev, NULL, frozen);
2180 break;
2181 }
2182 }
2183 return NOTIFY_OK;
2184 }
2185
2186 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2187 .notifier_call = cpufreq_cpu_callback,
2188 };
2189
2190 /*********************************************************************
2191 * BOOST *
2192 *********************************************************************/
2193 static int cpufreq_boost_set_sw(int state)
2194 {
2195 struct cpufreq_frequency_table *freq_table;
2196 struct cpufreq_policy *policy;
2197 int ret = -EINVAL;
2198
2199 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2200 freq_table = cpufreq_frequency_get_table(policy->cpu);
2201 if (freq_table) {
2202 ret = cpufreq_frequency_table_cpuinfo(policy,
2203 freq_table);
2204 if (ret) {
2205 pr_err("%s: Policy frequency update failed\n",
2206 __func__);
2207 break;
2208 }
2209 policy->user_policy.max = policy->max;
2210 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2211 }
2212 }
2213
2214 return ret;
2215 }
2216
2217 int cpufreq_boost_trigger_state(int state)
2218 {
2219 unsigned long flags;
2220 int ret = 0;
2221
2222 if (cpufreq_driver->boost_enabled == state)
2223 return 0;
2224
2225 write_lock_irqsave(&cpufreq_driver_lock, flags);
2226 cpufreq_driver->boost_enabled = state;
2227 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2228
2229 ret = cpufreq_driver->set_boost(state);
2230 if (ret) {
2231 write_lock_irqsave(&cpufreq_driver_lock, flags);
2232 cpufreq_driver->boost_enabled = !state;
2233 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2234
2235 pr_err("%s: Cannot %s BOOST\n", __func__,
2236 state ? "enable" : "disable");
2237 }
2238
2239 return ret;
2240 }
2241
2242 int cpufreq_boost_supported(void)
2243 {
2244 if (likely(cpufreq_driver))
2245 return cpufreq_driver->boost_supported;
2246
2247 return 0;
2248 }
2249 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2250
2251 int cpufreq_boost_enabled(void)
2252 {
2253 return cpufreq_driver->boost_enabled;
2254 }
2255 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2256
2257 /*********************************************************************
2258 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2259 *********************************************************************/
2260
2261 /**
2262 * cpufreq_register_driver - register a CPU Frequency driver
2263 * @driver_data: A struct cpufreq_driver containing the values#
2264 * submitted by the CPU Frequency driver.
2265 *
2266 * Registers a CPU Frequency driver to this core code. This code
2267 * returns zero on success, -EBUSY when another driver got here first
2268 * (and isn't unregistered in the meantime).
2269 *
2270 */
2271 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2272 {
2273 unsigned long flags;
2274 int ret;
2275
2276 if (cpufreq_disabled())
2277 return -ENODEV;
2278
2279 if (!driver_data || !driver_data->verify || !driver_data->init ||
2280 !(driver_data->setpolicy || driver_data->target_index ||
2281 driver_data->target))
2282 return -EINVAL;
2283
2284 pr_debug("trying to register driver %s\n", driver_data->name);
2285
2286 if (driver_data->setpolicy)
2287 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2288
2289 write_lock_irqsave(&cpufreq_driver_lock, flags);
2290 if (cpufreq_driver) {
2291 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2292 return -EEXIST;
2293 }
2294 cpufreq_driver = driver_data;
2295 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2296
2297 if (cpufreq_boost_supported()) {
2298 /*
2299 * Check if driver provides function to enable boost -
2300 * if not, use cpufreq_boost_set_sw as default
2301 */
2302 if (!cpufreq_driver->set_boost)
2303 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2304
2305 ret = cpufreq_sysfs_create_file(&boost.attr);
2306 if (ret) {
2307 pr_err("%s: cannot register global BOOST sysfs file\n",
2308 __func__);
2309 goto err_null_driver;
2310 }
2311 }
2312
2313 ret = subsys_interface_register(&cpufreq_interface);
2314 if (ret)
2315 goto err_boost_unreg;
2316
2317 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2318 int i;
2319 ret = -ENODEV;
2320
2321 /* check for at least one working CPU */
2322 for (i = 0; i < nr_cpu_ids; i++)
2323 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2324 ret = 0;
2325 break;
2326 }
2327
2328 /* if all ->init() calls failed, unregister */
2329 if (ret) {
2330 pr_debug("no CPU initialized for driver %s\n",
2331 driver_data->name);
2332 goto err_if_unreg;
2333 }
2334 }
2335
2336 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2337 pr_debug("driver %s up and running\n", driver_data->name);
2338
2339 return 0;
2340 err_if_unreg:
2341 subsys_interface_unregister(&cpufreq_interface);
2342 err_boost_unreg:
2343 if (cpufreq_boost_supported())
2344 cpufreq_sysfs_remove_file(&boost.attr);
2345 err_null_driver:
2346 write_lock_irqsave(&cpufreq_driver_lock, flags);
2347 cpufreq_driver = NULL;
2348 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2349 return ret;
2350 }
2351 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2352
2353 /**
2354 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2355 *
2356 * Unregister the current CPUFreq driver. Only call this if you have
2357 * the right to do so, i.e. if you have succeeded in initialising before!
2358 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2359 * currently not initialised.
2360 */
2361 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2362 {
2363 unsigned long flags;
2364
2365 if (!cpufreq_driver || (driver != cpufreq_driver))
2366 return -EINVAL;
2367
2368 pr_debug("unregistering driver %s\n", driver->name);
2369
2370 subsys_interface_unregister(&cpufreq_interface);
2371 if (cpufreq_boost_supported())
2372 cpufreq_sysfs_remove_file(&boost.attr);
2373
2374 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2375
2376 down_write(&cpufreq_rwsem);
2377 write_lock_irqsave(&cpufreq_driver_lock, flags);
2378
2379 cpufreq_driver = NULL;
2380
2381 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2382 up_write(&cpufreq_rwsem);
2383
2384 return 0;
2385 }
2386 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2387
2388 static int __init cpufreq_core_init(void)
2389 {
2390 if (cpufreq_disabled())
2391 return -ENODEV;
2392
2393 cpufreq_global_kobject = kobject_create();
2394 BUG_ON(!cpufreq_global_kobject);
2395 register_syscore_ops(&cpufreq_syscore_ops);
2396
2397 return 0;
2398 }
2399 core_initcall(cpufreq_core_init);