]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/cpufreq/cpufreq.c
fedc8420214e0a01fa381bfa7107b34326238014
[mirror_ubuntu-zesty-kernel.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 static DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 #endif
49
50 /*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 */
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
72 { \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
76 \
77 return 0; \
78 }
79
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
82
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
85 { \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
89 }
90
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
93
94 /*
95 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
96 * sections
97 */
98 static DECLARE_RWSEM(cpufreq_rwsem);
99
100 /* internal prototypes */
101 static int __cpufreq_governor(struct cpufreq_policy *policy,
102 unsigned int event);
103 static unsigned int __cpufreq_get(unsigned int cpu);
104 static void handle_update(struct work_struct *work);
105
106 /**
107 * Two notifier lists: the "policy" list is involved in the
108 * validation process for a new CPU frequency policy; the
109 * "transition" list for kernel code that needs to handle
110 * changes to devices when the CPU clock speed changes.
111 * The mutex locks both lists.
112 */
113 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
114 static struct srcu_notifier_head cpufreq_transition_notifier_list;
115
116 static bool init_cpufreq_transition_notifier_list_called;
117 static int __init init_cpufreq_transition_notifier_list(void)
118 {
119 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
120 init_cpufreq_transition_notifier_list_called = true;
121 return 0;
122 }
123 pure_initcall(init_cpufreq_transition_notifier_list);
124
125 static int off __read_mostly;
126 static int cpufreq_disabled(void)
127 {
128 return off;
129 }
130 void disable_cpufreq(void)
131 {
132 off = 1;
133 }
134 static LIST_HEAD(cpufreq_governor_list);
135 static DEFINE_MUTEX(cpufreq_governor_mutex);
136
137 bool have_governor_per_policy(void)
138 {
139 return cpufreq_driver->have_governor_per_policy;
140 }
141 EXPORT_SYMBOL_GPL(have_governor_per_policy);
142
143 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
144 {
145 if (have_governor_per_policy())
146 return &policy->kobj;
147 else
148 return cpufreq_global_kobject;
149 }
150 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
151
152 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
153 {
154 u64 idle_time;
155 u64 cur_wall_time;
156 u64 busy_time;
157
158 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
159
160 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
166
167 idle_time = cur_wall_time - busy_time;
168 if (wall)
169 *wall = cputime_to_usecs(cur_wall_time);
170
171 return cputime_to_usecs(idle_time);
172 }
173
174 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
175 {
176 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
177
178 if (idle_time == -1ULL)
179 return get_cpu_idle_time_jiffy(cpu, wall);
180 else if (!io_busy)
181 idle_time += get_cpu_iowait_time_us(cpu, wall);
182
183 return idle_time;
184 }
185 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
186
187 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
188 {
189 struct cpufreq_policy *policy = NULL;
190 unsigned long flags;
191
192 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
193 return NULL;
194
195 if (!down_read_trylock(&cpufreq_rwsem))
196 return NULL;
197
198 /* get the cpufreq driver */
199 read_lock_irqsave(&cpufreq_driver_lock, flags);
200
201 if (cpufreq_driver) {
202 /* get the CPU */
203 policy = per_cpu(cpufreq_cpu_data, cpu);
204 if (policy)
205 kobject_get(&policy->kobj);
206 }
207
208 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
209
210 if (!policy)
211 up_read(&cpufreq_rwsem);
212
213 return policy;
214 }
215 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
216
217 void cpufreq_cpu_put(struct cpufreq_policy *policy)
218 {
219 if (cpufreq_disabled())
220 return;
221
222 kobject_put(&policy->kobj);
223 up_read(&cpufreq_rwsem);
224 }
225 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
226
227 /*********************************************************************
228 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
229 *********************************************************************/
230
231 /**
232 * adjust_jiffies - adjust the system "loops_per_jiffy"
233 *
234 * This function alters the system "loops_per_jiffy" for the clock
235 * speed change. Note that loops_per_jiffy cannot be updated on SMP
236 * systems as each CPU might be scaled differently. So, use the arch
237 * per-CPU loops_per_jiffy value wherever possible.
238 */
239 #ifndef CONFIG_SMP
240 static unsigned long l_p_j_ref;
241 static unsigned int l_p_j_ref_freq;
242
243 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
244 {
245 if (ci->flags & CPUFREQ_CONST_LOOPS)
246 return;
247
248 if (!l_p_j_ref_freq) {
249 l_p_j_ref = loops_per_jiffy;
250 l_p_j_ref_freq = ci->old;
251 pr_debug("saving %lu as reference value for loops_per_jiffy; "
252 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
253 }
254 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
255 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
256 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
257 ci->new);
258 pr_debug("scaling loops_per_jiffy to %lu "
259 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
260 }
261 }
262 #else
263 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
264 {
265 return;
266 }
267 #endif
268
269 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
270 struct cpufreq_freqs *freqs, unsigned int state)
271 {
272 BUG_ON(irqs_disabled());
273
274 if (cpufreq_disabled())
275 return;
276
277 freqs->flags = cpufreq_driver->flags;
278 pr_debug("notification %u of frequency transition to %u kHz\n",
279 state, freqs->new);
280
281 switch (state) {
282
283 case CPUFREQ_PRECHANGE:
284 if (WARN(policy->transition_ongoing ==
285 cpumask_weight(policy->cpus),
286 "In middle of another frequency transition\n"))
287 return;
288
289 policy->transition_ongoing++;
290
291 /* detect if the driver reported a value as "old frequency"
292 * which is not equal to what the cpufreq core thinks is
293 * "old frequency".
294 */
295 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
296 if ((policy) && (policy->cpu == freqs->cpu) &&
297 (policy->cur) && (policy->cur != freqs->old)) {
298 pr_debug("Warning: CPU frequency is"
299 " %u, cpufreq assumed %u kHz.\n",
300 freqs->old, policy->cur);
301 freqs->old = policy->cur;
302 }
303 }
304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
305 CPUFREQ_PRECHANGE, freqs);
306 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
307 break;
308
309 case CPUFREQ_POSTCHANGE:
310 if (WARN(!policy->transition_ongoing,
311 "No frequency transition in progress\n"))
312 return;
313
314 policy->transition_ongoing--;
315
316 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
317 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
318 (unsigned long)freqs->cpu);
319 trace_cpu_frequency(freqs->new, freqs->cpu);
320 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
321 CPUFREQ_POSTCHANGE, freqs);
322 if (likely(policy) && likely(policy->cpu == freqs->cpu))
323 policy->cur = freqs->new;
324 break;
325 }
326 }
327
328 /**
329 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
330 * on frequency transition.
331 *
332 * This function calls the transition notifiers and the "adjust_jiffies"
333 * function. It is called twice on all CPU frequency changes that have
334 * external effects.
335 */
336 void cpufreq_notify_transition(struct cpufreq_policy *policy,
337 struct cpufreq_freqs *freqs, unsigned int state)
338 {
339 for_each_cpu(freqs->cpu, policy->cpus)
340 __cpufreq_notify_transition(policy, freqs, state);
341 }
342 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
343
344
345 /*********************************************************************
346 * SYSFS INTERFACE *
347 *********************************************************************/
348
349 static struct cpufreq_governor *__find_governor(const char *str_governor)
350 {
351 struct cpufreq_governor *t;
352
353 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
354 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
355 return t;
356
357 return NULL;
358 }
359
360 /**
361 * cpufreq_parse_governor - parse a governor string
362 */
363 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
364 struct cpufreq_governor **governor)
365 {
366 int err = -EINVAL;
367
368 if (!cpufreq_driver)
369 goto out;
370
371 if (cpufreq_driver->setpolicy) {
372 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
373 *policy = CPUFREQ_POLICY_PERFORMANCE;
374 err = 0;
375 } else if (!strnicmp(str_governor, "powersave",
376 CPUFREQ_NAME_LEN)) {
377 *policy = CPUFREQ_POLICY_POWERSAVE;
378 err = 0;
379 }
380 } else if (cpufreq_driver->target) {
381 struct cpufreq_governor *t;
382
383 mutex_lock(&cpufreq_governor_mutex);
384
385 t = __find_governor(str_governor);
386
387 if (t == NULL) {
388 int ret;
389
390 mutex_unlock(&cpufreq_governor_mutex);
391 ret = request_module("cpufreq_%s", str_governor);
392 mutex_lock(&cpufreq_governor_mutex);
393
394 if (ret == 0)
395 t = __find_governor(str_governor);
396 }
397
398 if (t != NULL) {
399 *governor = t;
400 err = 0;
401 }
402
403 mutex_unlock(&cpufreq_governor_mutex);
404 }
405 out:
406 return err;
407 }
408
409 /**
410 * cpufreq_per_cpu_attr_read() / show_##file_name() -
411 * print out cpufreq information
412 *
413 * Write out information from cpufreq_driver->policy[cpu]; object must be
414 * "unsigned int".
415 */
416
417 #define show_one(file_name, object) \
418 static ssize_t show_##file_name \
419 (struct cpufreq_policy *policy, char *buf) \
420 { \
421 return sprintf(buf, "%u\n", policy->object); \
422 }
423
424 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
425 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
426 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
427 show_one(scaling_min_freq, min);
428 show_one(scaling_max_freq, max);
429 show_one(scaling_cur_freq, cur);
430
431 static int __cpufreq_set_policy(struct cpufreq_policy *policy,
432 struct cpufreq_policy *new_policy);
433
434 /**
435 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
436 */
437 #define store_one(file_name, object) \
438 static ssize_t store_##file_name \
439 (struct cpufreq_policy *policy, const char *buf, size_t count) \
440 { \
441 unsigned int ret; \
442 struct cpufreq_policy new_policy; \
443 \
444 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
445 if (ret) \
446 return -EINVAL; \
447 \
448 ret = sscanf(buf, "%u", &new_policy.object); \
449 if (ret != 1) \
450 return -EINVAL; \
451 \
452 ret = __cpufreq_set_policy(policy, &new_policy); \
453 policy->user_policy.object = policy->object; \
454 \
455 return ret ? ret : count; \
456 }
457
458 store_one(scaling_min_freq, min);
459 store_one(scaling_max_freq, max);
460
461 /**
462 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
463 */
464 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
465 char *buf)
466 {
467 unsigned int cur_freq = __cpufreq_get(policy->cpu);
468 if (!cur_freq)
469 return sprintf(buf, "<unknown>");
470 return sprintf(buf, "%u\n", cur_freq);
471 }
472
473 /**
474 * show_scaling_governor - show the current policy for the specified CPU
475 */
476 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
477 {
478 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
479 return sprintf(buf, "powersave\n");
480 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
481 return sprintf(buf, "performance\n");
482 else if (policy->governor)
483 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
484 policy->governor->name);
485 return -EINVAL;
486 }
487
488 /**
489 * store_scaling_governor - store policy for the specified CPU
490 */
491 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
492 const char *buf, size_t count)
493 {
494 unsigned int ret;
495 char str_governor[16];
496 struct cpufreq_policy new_policy;
497
498 ret = cpufreq_get_policy(&new_policy, policy->cpu);
499 if (ret)
500 return ret;
501
502 ret = sscanf(buf, "%15s", str_governor);
503 if (ret != 1)
504 return -EINVAL;
505
506 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
507 &new_policy.governor))
508 return -EINVAL;
509
510 /*
511 * Do not use cpufreq_set_policy here or the user_policy.max
512 * will be wrongly overridden
513 */
514 ret = __cpufreq_set_policy(policy, &new_policy);
515
516 policy->user_policy.policy = policy->policy;
517 policy->user_policy.governor = policy->governor;
518
519 if (ret)
520 return ret;
521 else
522 return count;
523 }
524
525 /**
526 * show_scaling_driver - show the cpufreq driver currently loaded
527 */
528 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
529 {
530 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
531 }
532
533 /**
534 * show_scaling_available_governors - show the available CPUfreq governors
535 */
536 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
537 char *buf)
538 {
539 ssize_t i = 0;
540 struct cpufreq_governor *t;
541
542 if (!cpufreq_driver->target) {
543 i += sprintf(buf, "performance powersave");
544 goto out;
545 }
546
547 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
548 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
549 - (CPUFREQ_NAME_LEN + 2)))
550 goto out;
551 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
552 }
553 out:
554 i += sprintf(&buf[i], "\n");
555 return i;
556 }
557
558 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
559 {
560 ssize_t i = 0;
561 unsigned int cpu;
562
563 for_each_cpu(cpu, mask) {
564 if (i)
565 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
566 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
567 if (i >= (PAGE_SIZE - 5))
568 break;
569 }
570 i += sprintf(&buf[i], "\n");
571 return i;
572 }
573 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
574
575 /**
576 * show_related_cpus - show the CPUs affected by each transition even if
577 * hw coordination is in use
578 */
579 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
580 {
581 return cpufreq_show_cpus(policy->related_cpus, buf);
582 }
583
584 /**
585 * show_affected_cpus - show the CPUs affected by each transition
586 */
587 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
588 {
589 return cpufreq_show_cpus(policy->cpus, buf);
590 }
591
592 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
593 const char *buf, size_t count)
594 {
595 unsigned int freq = 0;
596 unsigned int ret;
597
598 if (!policy->governor || !policy->governor->store_setspeed)
599 return -EINVAL;
600
601 ret = sscanf(buf, "%u", &freq);
602 if (ret != 1)
603 return -EINVAL;
604
605 policy->governor->store_setspeed(policy, freq);
606
607 return count;
608 }
609
610 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
611 {
612 if (!policy->governor || !policy->governor->show_setspeed)
613 return sprintf(buf, "<unsupported>\n");
614
615 return policy->governor->show_setspeed(policy, buf);
616 }
617
618 /**
619 * show_bios_limit - show the current cpufreq HW/BIOS limitation
620 */
621 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
622 {
623 unsigned int limit;
624 int ret;
625 if (cpufreq_driver->bios_limit) {
626 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
627 if (!ret)
628 return sprintf(buf, "%u\n", limit);
629 }
630 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
631 }
632
633 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
634 cpufreq_freq_attr_ro(cpuinfo_min_freq);
635 cpufreq_freq_attr_ro(cpuinfo_max_freq);
636 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
637 cpufreq_freq_attr_ro(scaling_available_governors);
638 cpufreq_freq_attr_ro(scaling_driver);
639 cpufreq_freq_attr_ro(scaling_cur_freq);
640 cpufreq_freq_attr_ro(bios_limit);
641 cpufreq_freq_attr_ro(related_cpus);
642 cpufreq_freq_attr_ro(affected_cpus);
643 cpufreq_freq_attr_rw(scaling_min_freq);
644 cpufreq_freq_attr_rw(scaling_max_freq);
645 cpufreq_freq_attr_rw(scaling_governor);
646 cpufreq_freq_attr_rw(scaling_setspeed);
647
648 static struct attribute *default_attrs[] = {
649 &cpuinfo_min_freq.attr,
650 &cpuinfo_max_freq.attr,
651 &cpuinfo_transition_latency.attr,
652 &scaling_min_freq.attr,
653 &scaling_max_freq.attr,
654 &affected_cpus.attr,
655 &related_cpus.attr,
656 &scaling_governor.attr,
657 &scaling_driver.attr,
658 &scaling_available_governors.attr,
659 &scaling_setspeed.attr,
660 NULL
661 };
662
663 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
664 #define to_attr(a) container_of(a, struct freq_attr, attr)
665
666 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
667 {
668 struct cpufreq_policy *policy = to_policy(kobj);
669 struct freq_attr *fattr = to_attr(attr);
670 ssize_t ret = -EINVAL;
671
672 if (!down_read_trylock(&cpufreq_rwsem))
673 goto exit;
674
675 if (lock_policy_rwsem_read(policy->cpu) < 0)
676 goto up_read;
677
678 if (fattr->show)
679 ret = fattr->show(policy, buf);
680 else
681 ret = -EIO;
682
683 unlock_policy_rwsem_read(policy->cpu);
684
685 up_read:
686 up_read(&cpufreq_rwsem);
687 exit:
688 return ret;
689 }
690
691 static ssize_t store(struct kobject *kobj, struct attribute *attr,
692 const char *buf, size_t count)
693 {
694 struct cpufreq_policy *policy = to_policy(kobj);
695 struct freq_attr *fattr = to_attr(attr);
696 ssize_t ret = -EINVAL;
697
698 if (!down_read_trylock(&cpufreq_rwsem))
699 goto exit;
700
701 if (lock_policy_rwsem_write(policy->cpu) < 0)
702 goto up_read;
703
704 if (fattr->store)
705 ret = fattr->store(policy, buf, count);
706 else
707 ret = -EIO;
708
709 unlock_policy_rwsem_write(policy->cpu);
710
711 up_read:
712 up_read(&cpufreq_rwsem);
713 exit:
714 return ret;
715 }
716
717 static void cpufreq_sysfs_release(struct kobject *kobj)
718 {
719 struct cpufreq_policy *policy = to_policy(kobj);
720 pr_debug("last reference is dropped\n");
721 complete(&policy->kobj_unregister);
722 }
723
724 static const struct sysfs_ops sysfs_ops = {
725 .show = show,
726 .store = store,
727 };
728
729 static struct kobj_type ktype_cpufreq = {
730 .sysfs_ops = &sysfs_ops,
731 .default_attrs = default_attrs,
732 .release = cpufreq_sysfs_release,
733 };
734
735 struct kobject *cpufreq_global_kobject;
736 EXPORT_SYMBOL(cpufreq_global_kobject);
737
738 static int cpufreq_global_kobject_usage;
739
740 int cpufreq_get_global_kobject(void)
741 {
742 if (!cpufreq_global_kobject_usage++)
743 return kobject_add(cpufreq_global_kobject,
744 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
745
746 return 0;
747 }
748 EXPORT_SYMBOL(cpufreq_get_global_kobject);
749
750 void cpufreq_put_global_kobject(void)
751 {
752 if (!--cpufreq_global_kobject_usage)
753 kobject_del(cpufreq_global_kobject);
754 }
755 EXPORT_SYMBOL(cpufreq_put_global_kobject);
756
757 int cpufreq_sysfs_create_file(const struct attribute *attr)
758 {
759 int ret = cpufreq_get_global_kobject();
760
761 if (!ret) {
762 ret = sysfs_create_file(cpufreq_global_kobject, attr);
763 if (ret)
764 cpufreq_put_global_kobject();
765 }
766
767 return ret;
768 }
769 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
770
771 void cpufreq_sysfs_remove_file(const struct attribute *attr)
772 {
773 sysfs_remove_file(cpufreq_global_kobject, attr);
774 cpufreq_put_global_kobject();
775 }
776 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
777
778 /* symlink affected CPUs */
779 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
780 {
781 unsigned int j;
782 int ret = 0;
783
784 for_each_cpu(j, policy->cpus) {
785 struct device *cpu_dev;
786
787 if (j == policy->cpu)
788 continue;
789
790 pr_debug("Adding link for CPU: %u\n", j);
791 cpu_dev = get_cpu_device(j);
792 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
793 "cpufreq");
794 if (ret)
795 break;
796 }
797 return ret;
798 }
799
800 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
801 struct device *dev)
802 {
803 struct freq_attr **drv_attr;
804 int ret = 0;
805
806 /* prepare interface data */
807 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
808 &dev->kobj, "cpufreq");
809 if (ret)
810 return ret;
811
812 /* set up files for this cpu device */
813 drv_attr = cpufreq_driver->attr;
814 while ((drv_attr) && (*drv_attr)) {
815 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
816 if (ret)
817 goto err_out_kobj_put;
818 drv_attr++;
819 }
820 if (cpufreq_driver->get) {
821 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
822 if (ret)
823 goto err_out_kobj_put;
824 }
825 if (cpufreq_driver->target) {
826 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
827 if (ret)
828 goto err_out_kobj_put;
829 }
830 if (cpufreq_driver->bios_limit) {
831 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
832 if (ret)
833 goto err_out_kobj_put;
834 }
835
836 ret = cpufreq_add_dev_symlink(policy);
837 if (ret)
838 goto err_out_kobj_put;
839
840 return ret;
841
842 err_out_kobj_put:
843 kobject_put(&policy->kobj);
844 wait_for_completion(&policy->kobj_unregister);
845 return ret;
846 }
847
848 static void cpufreq_init_policy(struct cpufreq_policy *policy)
849 {
850 struct cpufreq_policy new_policy;
851 int ret = 0;
852
853 memcpy(&new_policy, policy, sizeof(*policy));
854 /* assure that the starting sequence is run in __cpufreq_set_policy */
855 policy->governor = NULL;
856
857 /* set default policy */
858 ret = __cpufreq_set_policy(policy, &new_policy);
859 policy->user_policy.policy = policy->policy;
860 policy->user_policy.governor = policy->governor;
861
862 if (ret) {
863 pr_debug("setting policy failed\n");
864 if (cpufreq_driver->exit)
865 cpufreq_driver->exit(policy);
866 }
867 }
868
869 #ifdef CONFIG_HOTPLUG_CPU
870 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
871 unsigned int cpu, struct device *dev,
872 bool frozen)
873 {
874 int ret = 0, has_target = !!cpufreq_driver->target;
875 unsigned long flags;
876
877 if (has_target) {
878 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
879 if (ret) {
880 pr_err("%s: Failed to stop governor\n", __func__);
881 return ret;
882 }
883 }
884
885 lock_policy_rwsem_write(policy->cpu);
886
887 write_lock_irqsave(&cpufreq_driver_lock, flags);
888
889 cpumask_set_cpu(cpu, policy->cpus);
890 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
891 per_cpu(cpufreq_cpu_data, cpu) = policy;
892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
893
894 unlock_policy_rwsem_write(policy->cpu);
895
896 if (has_target) {
897 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
898 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
899 pr_err("%s: Failed to start governor\n", __func__);
900 return ret;
901 }
902 }
903
904 /* Don't touch sysfs links during light-weight init */
905 if (!frozen)
906 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
907
908 return ret;
909 }
910 #endif
911
912 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
913 {
914 struct cpufreq_policy *policy;
915 unsigned long flags;
916
917 write_lock_irqsave(&cpufreq_driver_lock, flags);
918
919 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
920
921 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
922
923 return policy;
924 }
925
926 static struct cpufreq_policy *cpufreq_policy_alloc(void)
927 {
928 struct cpufreq_policy *policy;
929
930 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
931 if (!policy)
932 return NULL;
933
934 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
935 goto err_free_policy;
936
937 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
938 goto err_free_cpumask;
939
940 INIT_LIST_HEAD(&policy->policy_list);
941 return policy;
942
943 err_free_cpumask:
944 free_cpumask_var(policy->cpus);
945 err_free_policy:
946 kfree(policy);
947
948 return NULL;
949 }
950
951 static void cpufreq_policy_free(struct cpufreq_policy *policy)
952 {
953 unsigned long flags;
954
955 write_lock_irqsave(&cpufreq_driver_lock, flags);
956 list_del(&policy->policy_list);
957 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
958
959 free_cpumask_var(policy->related_cpus);
960 free_cpumask_var(policy->cpus);
961 kfree(policy);
962 }
963
964 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
965 bool frozen)
966 {
967 unsigned int j, cpu = dev->id;
968 int ret = -ENOMEM;
969 struct cpufreq_policy *policy;
970 unsigned long flags;
971 #ifdef CONFIG_HOTPLUG_CPU
972 struct cpufreq_governor *gov;
973 int sibling;
974 #endif
975
976 if (cpu_is_offline(cpu))
977 return 0;
978
979 pr_debug("adding CPU %u\n", cpu);
980
981 #ifdef CONFIG_SMP
982 /* check whether a different CPU already registered this
983 * CPU because it is in the same boat. */
984 policy = cpufreq_cpu_get(cpu);
985 if (unlikely(policy)) {
986 cpufreq_cpu_put(policy);
987 return 0;
988 }
989
990 if (!down_read_trylock(&cpufreq_rwsem))
991 return 0;
992
993 #ifdef CONFIG_HOTPLUG_CPU
994 /* Check if this cpu was hot-unplugged earlier and has siblings */
995 read_lock_irqsave(&cpufreq_driver_lock, flags);
996 for_each_online_cpu(sibling) {
997 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
998 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
999 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1000 ret = cpufreq_add_policy_cpu(cp, cpu, dev, frozen);
1001 up_read(&cpufreq_rwsem);
1002 return ret;
1003 }
1004 }
1005 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1006 #endif
1007 #endif
1008
1009 if (frozen)
1010 /* Restore the saved policy when doing light-weight init */
1011 policy = cpufreq_policy_restore(cpu);
1012 else
1013 policy = cpufreq_policy_alloc();
1014
1015 if (!policy)
1016 goto nomem_out;
1017
1018 policy->cpu = cpu;
1019 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1020 cpumask_copy(policy->cpus, cpumask_of(cpu));
1021
1022 /* Initially set CPU itself as the policy_cpu */
1023 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1024
1025 init_completion(&policy->kobj_unregister);
1026 INIT_WORK(&policy->update, handle_update);
1027
1028 /* call driver. From then on the cpufreq must be able
1029 * to accept all calls to ->verify and ->setpolicy for this CPU
1030 */
1031 ret = cpufreq_driver->init(policy);
1032 if (ret) {
1033 pr_debug("initialization failed\n");
1034 goto err_set_policy_cpu;
1035 }
1036
1037 /* related cpus should atleast have policy->cpus */
1038 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1039
1040 /*
1041 * affected cpus must always be the one, which are online. We aren't
1042 * managing offline cpus here.
1043 */
1044 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1045
1046 policy->user_policy.min = policy->min;
1047 policy->user_policy.max = policy->max;
1048
1049 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1050 CPUFREQ_START, policy);
1051
1052 #ifdef CONFIG_HOTPLUG_CPU
1053 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1054 if (gov) {
1055 policy->governor = gov;
1056 pr_debug("Restoring governor %s for cpu %d\n",
1057 policy->governor->name, cpu);
1058 }
1059 #endif
1060
1061 write_lock_irqsave(&cpufreq_driver_lock, flags);
1062 for_each_cpu(j, policy->cpus) {
1063 per_cpu(cpufreq_cpu_data, j) = policy;
1064 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1065 }
1066 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1067
1068 if (!frozen) {
1069 ret = cpufreq_add_dev_interface(policy, dev);
1070 if (ret)
1071 goto err_out_unregister;
1072
1073 write_lock_irqsave(&cpufreq_driver_lock, flags);
1074 list_add(&policy->policy_list, &cpufreq_policy_list);
1075 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1076 }
1077
1078 cpufreq_init_policy(policy);
1079
1080 kobject_uevent(&policy->kobj, KOBJ_ADD);
1081 up_read(&cpufreq_rwsem);
1082
1083 pr_debug("initialization complete\n");
1084
1085 return 0;
1086
1087 err_out_unregister:
1088 write_lock_irqsave(&cpufreq_driver_lock, flags);
1089 for_each_cpu(j, policy->cpus) {
1090 per_cpu(cpufreq_cpu_data, j) = NULL;
1091 if (j != cpu)
1092 per_cpu(cpufreq_policy_cpu, j) = -1;
1093 }
1094 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1095
1096 err_set_policy_cpu:
1097 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1098 cpufreq_policy_free(policy);
1099 nomem_out:
1100 up_read(&cpufreq_rwsem);
1101
1102 return ret;
1103 }
1104
1105 /**
1106 * cpufreq_add_dev - add a CPU device
1107 *
1108 * Adds the cpufreq interface for a CPU device.
1109 *
1110 * The Oracle says: try running cpufreq registration/unregistration concurrently
1111 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1112 * mess up, but more thorough testing is needed. - Mathieu
1113 */
1114 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1115 {
1116 return __cpufreq_add_dev(dev, sif, false);
1117 }
1118
1119 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1120 {
1121 int j;
1122
1123 policy->last_cpu = policy->cpu;
1124 policy->cpu = cpu;
1125
1126 for_each_cpu(j, policy->cpus)
1127 per_cpu(cpufreq_policy_cpu, j) = cpu;
1128
1129 #ifdef CONFIG_CPU_FREQ_TABLE
1130 cpufreq_frequency_table_update_policy_cpu(policy);
1131 #endif
1132 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1133 CPUFREQ_UPDATE_POLICY_CPU, policy);
1134 }
1135
1136 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1137 unsigned int old_cpu, bool frozen)
1138 {
1139 struct device *cpu_dev;
1140 unsigned long flags;
1141 int ret;
1142
1143 /* first sibling now owns the new sysfs dir */
1144 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
1145
1146 /* Don't touch sysfs files during light-weight tear-down */
1147 if (frozen)
1148 return cpu_dev->id;
1149
1150 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1151 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1152 if (ret) {
1153 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1154
1155 WARN_ON(lock_policy_rwsem_write(old_cpu));
1156 cpumask_set_cpu(old_cpu, policy->cpus);
1157
1158 write_lock_irqsave(&cpufreq_driver_lock, flags);
1159 per_cpu(cpufreq_cpu_data, old_cpu) = policy;
1160 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1161
1162 unlock_policy_rwsem_write(old_cpu);
1163
1164 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1165 "cpufreq");
1166
1167 return -EINVAL;
1168 }
1169
1170 return cpu_dev->id;
1171 }
1172
1173 /**
1174 * __cpufreq_remove_dev - remove a CPU device
1175 *
1176 * Removes the cpufreq interface for a CPU device.
1177 * Caller should already have policy_rwsem in write mode for this CPU.
1178 * This routine frees the rwsem before returning.
1179 */
1180 static int __cpufreq_remove_dev(struct device *dev,
1181 struct subsys_interface *sif, bool frozen)
1182 {
1183 unsigned int cpu = dev->id, cpus;
1184 int new_cpu, ret;
1185 unsigned long flags;
1186 struct cpufreq_policy *policy;
1187 struct kobject *kobj;
1188 struct completion *cmp;
1189
1190 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1191
1192 write_lock_irqsave(&cpufreq_driver_lock, flags);
1193
1194 policy = per_cpu(cpufreq_cpu_data, cpu);
1195 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1196
1197 /* Save the policy somewhere when doing a light-weight tear-down */
1198 if (frozen)
1199 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1200
1201 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1202
1203 if (!policy) {
1204 pr_debug("%s: No cpu_data found\n", __func__);
1205 return -EINVAL;
1206 }
1207
1208 if (cpufreq_driver->target) {
1209 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1210 if (ret) {
1211 pr_err("%s: Failed to stop governor\n", __func__);
1212 return ret;
1213 }
1214 }
1215
1216 #ifdef CONFIG_HOTPLUG_CPU
1217 if (!cpufreq_driver->setpolicy)
1218 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1219 policy->governor->name, CPUFREQ_NAME_LEN);
1220 #endif
1221
1222 WARN_ON(lock_policy_rwsem_write(cpu));
1223 cpus = cpumask_weight(policy->cpus);
1224
1225 if (cpus > 1)
1226 cpumask_clear_cpu(cpu, policy->cpus);
1227 unlock_policy_rwsem_write(cpu);
1228
1229 if (cpu != policy->cpu && !frozen) {
1230 sysfs_remove_link(&dev->kobj, "cpufreq");
1231 } else if (cpus > 1) {
1232
1233 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1234 if (new_cpu >= 0) {
1235 WARN_ON(lock_policy_rwsem_write(cpu));
1236 update_policy_cpu(policy, new_cpu);
1237 unlock_policy_rwsem_write(cpu);
1238
1239 if (!frozen) {
1240 pr_debug("%s: policy Kobject moved to cpu: %d "
1241 "from: %d\n",__func__, new_cpu, cpu);
1242 }
1243 }
1244 }
1245
1246 /* If cpu is last user of policy, free policy */
1247 if (cpus == 1) {
1248 if (cpufreq_driver->target) {
1249 ret = __cpufreq_governor(policy,
1250 CPUFREQ_GOV_POLICY_EXIT);
1251 if (ret) {
1252 pr_err("%s: Failed to exit governor\n",
1253 __func__);
1254 return ret;
1255 }
1256 }
1257
1258 if (!frozen) {
1259 lock_policy_rwsem_read(cpu);
1260 kobj = &policy->kobj;
1261 cmp = &policy->kobj_unregister;
1262 unlock_policy_rwsem_read(cpu);
1263 kobject_put(kobj);
1264
1265 /*
1266 * We need to make sure that the underlying kobj is
1267 * actually not referenced anymore by anybody before we
1268 * proceed with unloading.
1269 */
1270 pr_debug("waiting for dropping of refcount\n");
1271 wait_for_completion(cmp);
1272 pr_debug("wait complete\n");
1273 }
1274
1275 /*
1276 * Perform the ->exit() even during light-weight tear-down,
1277 * since this is a core component, and is essential for the
1278 * subsequent light-weight ->init() to succeed.
1279 */
1280 if (cpufreq_driver->exit)
1281 cpufreq_driver->exit(policy);
1282
1283 if (!frozen)
1284 cpufreq_policy_free(policy);
1285 } else {
1286 if (cpufreq_driver->target) {
1287 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1288 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1289 pr_err("%s: Failed to start governor\n",
1290 __func__);
1291 return ret;
1292 }
1293 }
1294 }
1295
1296 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1297 return 0;
1298 }
1299
1300 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1301 {
1302 unsigned int cpu = dev->id;
1303 int retval;
1304
1305 if (cpu_is_offline(cpu))
1306 return 0;
1307
1308 retval = __cpufreq_remove_dev(dev, sif, false);
1309 return retval;
1310 }
1311
1312 static void handle_update(struct work_struct *work)
1313 {
1314 struct cpufreq_policy *policy =
1315 container_of(work, struct cpufreq_policy, update);
1316 unsigned int cpu = policy->cpu;
1317 pr_debug("handle_update for cpu %u called\n", cpu);
1318 cpufreq_update_policy(cpu);
1319 }
1320
1321 /**
1322 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1323 * in deep trouble.
1324 * @cpu: cpu number
1325 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1326 * @new_freq: CPU frequency the CPU actually runs at
1327 *
1328 * We adjust to current frequency first, and need to clean up later.
1329 * So either call to cpufreq_update_policy() or schedule handle_update()).
1330 */
1331 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1332 unsigned int new_freq)
1333 {
1334 struct cpufreq_policy *policy;
1335 struct cpufreq_freqs freqs;
1336 unsigned long flags;
1337
1338 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1339 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1340
1341 freqs.old = old_freq;
1342 freqs.new = new_freq;
1343
1344 read_lock_irqsave(&cpufreq_driver_lock, flags);
1345 policy = per_cpu(cpufreq_cpu_data, cpu);
1346 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1347
1348 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1349 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1350 }
1351
1352 /**
1353 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1354 * @cpu: CPU number
1355 *
1356 * This is the last known freq, without actually getting it from the driver.
1357 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1358 */
1359 unsigned int cpufreq_quick_get(unsigned int cpu)
1360 {
1361 struct cpufreq_policy *policy;
1362 unsigned int ret_freq = 0;
1363
1364 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1365 return cpufreq_driver->get(cpu);
1366
1367 policy = cpufreq_cpu_get(cpu);
1368 if (policy) {
1369 ret_freq = policy->cur;
1370 cpufreq_cpu_put(policy);
1371 }
1372
1373 return ret_freq;
1374 }
1375 EXPORT_SYMBOL(cpufreq_quick_get);
1376
1377 /**
1378 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1379 * @cpu: CPU number
1380 *
1381 * Just return the max possible frequency for a given CPU.
1382 */
1383 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1384 {
1385 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1386 unsigned int ret_freq = 0;
1387
1388 if (policy) {
1389 ret_freq = policy->max;
1390 cpufreq_cpu_put(policy);
1391 }
1392
1393 return ret_freq;
1394 }
1395 EXPORT_SYMBOL(cpufreq_quick_get_max);
1396
1397 static unsigned int __cpufreq_get(unsigned int cpu)
1398 {
1399 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1400 unsigned int ret_freq = 0;
1401
1402 if (!cpufreq_driver->get)
1403 return ret_freq;
1404
1405 ret_freq = cpufreq_driver->get(cpu);
1406
1407 if (ret_freq && policy->cur &&
1408 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1409 /* verify no discrepancy between actual and
1410 saved value exists */
1411 if (unlikely(ret_freq != policy->cur)) {
1412 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1413 schedule_work(&policy->update);
1414 }
1415 }
1416
1417 return ret_freq;
1418 }
1419
1420 /**
1421 * cpufreq_get - get the current CPU frequency (in kHz)
1422 * @cpu: CPU number
1423 *
1424 * Get the CPU current (static) CPU frequency
1425 */
1426 unsigned int cpufreq_get(unsigned int cpu)
1427 {
1428 unsigned int ret_freq = 0;
1429
1430 if (!down_read_trylock(&cpufreq_rwsem))
1431 return 0;
1432
1433 if (unlikely(lock_policy_rwsem_read(cpu)))
1434 goto out_policy;
1435
1436 ret_freq = __cpufreq_get(cpu);
1437
1438 unlock_policy_rwsem_read(cpu);
1439
1440 out_policy:
1441 up_read(&cpufreq_rwsem);
1442
1443 return ret_freq;
1444 }
1445 EXPORT_SYMBOL(cpufreq_get);
1446
1447 static struct subsys_interface cpufreq_interface = {
1448 .name = "cpufreq",
1449 .subsys = &cpu_subsys,
1450 .add_dev = cpufreq_add_dev,
1451 .remove_dev = cpufreq_remove_dev,
1452 };
1453
1454 /**
1455 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1456 *
1457 * This function is only executed for the boot processor. The other CPUs
1458 * have been put offline by means of CPU hotplug.
1459 */
1460 static int cpufreq_bp_suspend(void)
1461 {
1462 int ret = 0;
1463
1464 int cpu = smp_processor_id();
1465 struct cpufreq_policy *policy;
1466
1467 pr_debug("suspending cpu %u\n", cpu);
1468
1469 /* If there's no policy for the boot CPU, we have nothing to do. */
1470 policy = cpufreq_cpu_get(cpu);
1471 if (!policy)
1472 return 0;
1473
1474 if (cpufreq_driver->suspend) {
1475 ret = cpufreq_driver->suspend(policy);
1476 if (ret)
1477 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1478 "step on CPU %u\n", policy->cpu);
1479 }
1480
1481 cpufreq_cpu_put(policy);
1482 return ret;
1483 }
1484
1485 /**
1486 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1487 *
1488 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1489 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1490 * restored. It will verify that the current freq is in sync with
1491 * what we believe it to be. This is a bit later than when it
1492 * should be, but nonethteless it's better than calling
1493 * cpufreq_driver->get() here which might re-enable interrupts...
1494 *
1495 * This function is only executed for the boot CPU. The other CPUs have not
1496 * been turned on yet.
1497 */
1498 static void cpufreq_bp_resume(void)
1499 {
1500 int ret = 0;
1501
1502 int cpu = smp_processor_id();
1503 struct cpufreq_policy *policy;
1504
1505 pr_debug("resuming cpu %u\n", cpu);
1506
1507 /* If there's no policy for the boot CPU, we have nothing to do. */
1508 policy = cpufreq_cpu_get(cpu);
1509 if (!policy)
1510 return;
1511
1512 if (cpufreq_driver->resume) {
1513 ret = cpufreq_driver->resume(policy);
1514 if (ret) {
1515 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1516 "step on CPU %u\n", policy->cpu);
1517 goto fail;
1518 }
1519 }
1520
1521 schedule_work(&policy->update);
1522
1523 fail:
1524 cpufreq_cpu_put(policy);
1525 }
1526
1527 static struct syscore_ops cpufreq_syscore_ops = {
1528 .suspend = cpufreq_bp_suspend,
1529 .resume = cpufreq_bp_resume,
1530 };
1531
1532 /**
1533 * cpufreq_get_current_driver - return current driver's name
1534 *
1535 * Return the name string of the currently loaded cpufreq driver
1536 * or NULL, if none.
1537 */
1538 const char *cpufreq_get_current_driver(void)
1539 {
1540 if (cpufreq_driver)
1541 return cpufreq_driver->name;
1542
1543 return NULL;
1544 }
1545 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1546
1547 /*********************************************************************
1548 * NOTIFIER LISTS INTERFACE *
1549 *********************************************************************/
1550
1551 /**
1552 * cpufreq_register_notifier - register a driver with cpufreq
1553 * @nb: notifier function to register
1554 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1555 *
1556 * Add a driver to one of two lists: either a list of drivers that
1557 * are notified about clock rate changes (once before and once after
1558 * the transition), or a list of drivers that are notified about
1559 * changes in cpufreq policy.
1560 *
1561 * This function may sleep, and has the same return conditions as
1562 * blocking_notifier_chain_register.
1563 */
1564 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1565 {
1566 int ret;
1567
1568 if (cpufreq_disabled())
1569 return -EINVAL;
1570
1571 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1572
1573 switch (list) {
1574 case CPUFREQ_TRANSITION_NOTIFIER:
1575 ret = srcu_notifier_chain_register(
1576 &cpufreq_transition_notifier_list, nb);
1577 break;
1578 case CPUFREQ_POLICY_NOTIFIER:
1579 ret = blocking_notifier_chain_register(
1580 &cpufreq_policy_notifier_list, nb);
1581 break;
1582 default:
1583 ret = -EINVAL;
1584 }
1585
1586 return ret;
1587 }
1588 EXPORT_SYMBOL(cpufreq_register_notifier);
1589
1590 /**
1591 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1592 * @nb: notifier block to be unregistered
1593 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1594 *
1595 * Remove a driver from the CPU frequency notifier list.
1596 *
1597 * This function may sleep, and has the same return conditions as
1598 * blocking_notifier_chain_unregister.
1599 */
1600 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1601 {
1602 int ret;
1603
1604 if (cpufreq_disabled())
1605 return -EINVAL;
1606
1607 switch (list) {
1608 case CPUFREQ_TRANSITION_NOTIFIER:
1609 ret = srcu_notifier_chain_unregister(
1610 &cpufreq_transition_notifier_list, nb);
1611 break;
1612 case CPUFREQ_POLICY_NOTIFIER:
1613 ret = blocking_notifier_chain_unregister(
1614 &cpufreq_policy_notifier_list, nb);
1615 break;
1616 default:
1617 ret = -EINVAL;
1618 }
1619
1620 return ret;
1621 }
1622 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1623
1624
1625 /*********************************************************************
1626 * GOVERNORS *
1627 *********************************************************************/
1628
1629 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1630 unsigned int target_freq,
1631 unsigned int relation)
1632 {
1633 int retval = -EINVAL;
1634 unsigned int old_target_freq = target_freq;
1635
1636 if (cpufreq_disabled())
1637 return -ENODEV;
1638 if (policy->transition_ongoing)
1639 return -EBUSY;
1640
1641 /* Make sure that target_freq is within supported range */
1642 if (target_freq > policy->max)
1643 target_freq = policy->max;
1644 if (target_freq < policy->min)
1645 target_freq = policy->min;
1646
1647 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1648 policy->cpu, target_freq, relation, old_target_freq);
1649
1650 if (target_freq == policy->cur)
1651 return 0;
1652
1653 if (cpufreq_driver->target)
1654 retval = cpufreq_driver->target(policy, target_freq, relation);
1655
1656 return retval;
1657 }
1658 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1659
1660 int cpufreq_driver_target(struct cpufreq_policy *policy,
1661 unsigned int target_freq,
1662 unsigned int relation)
1663 {
1664 int ret = -EINVAL;
1665
1666 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1667 goto fail;
1668
1669 ret = __cpufreq_driver_target(policy, target_freq, relation);
1670
1671 unlock_policy_rwsem_write(policy->cpu);
1672
1673 fail:
1674 return ret;
1675 }
1676 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1677
1678 /*
1679 * when "event" is CPUFREQ_GOV_LIMITS
1680 */
1681
1682 static int __cpufreq_governor(struct cpufreq_policy *policy,
1683 unsigned int event)
1684 {
1685 int ret;
1686
1687 /* Only must be defined when default governor is known to have latency
1688 restrictions, like e.g. conservative or ondemand.
1689 That this is the case is already ensured in Kconfig
1690 */
1691 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1692 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1693 #else
1694 struct cpufreq_governor *gov = NULL;
1695 #endif
1696
1697 if (policy->governor->max_transition_latency &&
1698 policy->cpuinfo.transition_latency >
1699 policy->governor->max_transition_latency) {
1700 if (!gov)
1701 return -EINVAL;
1702 else {
1703 printk(KERN_WARNING "%s governor failed, too long"
1704 " transition latency of HW, fallback"
1705 " to %s governor\n",
1706 policy->governor->name,
1707 gov->name);
1708 policy->governor = gov;
1709 }
1710 }
1711
1712 if (event == CPUFREQ_GOV_POLICY_INIT)
1713 if (!try_module_get(policy->governor->owner))
1714 return -EINVAL;
1715
1716 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1717 policy->cpu, event);
1718
1719 mutex_lock(&cpufreq_governor_lock);
1720 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1721 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1722 mutex_unlock(&cpufreq_governor_lock);
1723 if (event == CPUFREQ_GOV_POLICY_INIT)
1724 module_put(policy->governor->owner);
1725 return -EBUSY;
1726 }
1727
1728 if (event == CPUFREQ_GOV_STOP)
1729 policy->governor_enabled = false;
1730 else if (event == CPUFREQ_GOV_START)
1731 policy->governor_enabled = true;
1732
1733 mutex_unlock(&cpufreq_governor_lock);
1734
1735 ret = policy->governor->governor(policy, event);
1736
1737 if (!ret) {
1738 if (event == CPUFREQ_GOV_POLICY_INIT)
1739 policy->governor->initialized++;
1740 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1741 policy->governor->initialized--;
1742 } else {
1743 /* Restore original values */
1744 mutex_lock(&cpufreq_governor_lock);
1745 if (event == CPUFREQ_GOV_STOP)
1746 policy->governor_enabled = true;
1747 else if (event == CPUFREQ_GOV_START)
1748 policy->governor_enabled = false;
1749 mutex_unlock(&cpufreq_governor_lock);
1750 }
1751
1752 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1753 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1754 module_put(policy->governor->owner);
1755
1756 return ret;
1757 }
1758
1759 int cpufreq_register_governor(struct cpufreq_governor *governor)
1760 {
1761 int err;
1762
1763 if (!governor)
1764 return -EINVAL;
1765
1766 if (cpufreq_disabled())
1767 return -ENODEV;
1768
1769 mutex_lock(&cpufreq_governor_mutex);
1770
1771 governor->initialized = 0;
1772 err = -EBUSY;
1773 if (__find_governor(governor->name) == NULL) {
1774 err = 0;
1775 list_add(&governor->governor_list, &cpufreq_governor_list);
1776 }
1777
1778 mutex_unlock(&cpufreq_governor_mutex);
1779 return err;
1780 }
1781 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1782
1783 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1784 {
1785 #ifdef CONFIG_HOTPLUG_CPU
1786 int cpu;
1787 #endif
1788
1789 if (!governor)
1790 return;
1791
1792 if (cpufreq_disabled())
1793 return;
1794
1795 #ifdef CONFIG_HOTPLUG_CPU
1796 for_each_present_cpu(cpu) {
1797 if (cpu_online(cpu))
1798 continue;
1799 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1800 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1801 }
1802 #endif
1803
1804 mutex_lock(&cpufreq_governor_mutex);
1805 list_del(&governor->governor_list);
1806 mutex_unlock(&cpufreq_governor_mutex);
1807 return;
1808 }
1809 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1810
1811
1812 /*********************************************************************
1813 * POLICY INTERFACE *
1814 *********************************************************************/
1815
1816 /**
1817 * cpufreq_get_policy - get the current cpufreq_policy
1818 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1819 * is written
1820 *
1821 * Reads the current cpufreq policy.
1822 */
1823 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1824 {
1825 struct cpufreq_policy *cpu_policy;
1826 if (!policy)
1827 return -EINVAL;
1828
1829 cpu_policy = cpufreq_cpu_get(cpu);
1830 if (!cpu_policy)
1831 return -EINVAL;
1832
1833 memcpy(policy, cpu_policy, sizeof(*policy));
1834
1835 cpufreq_cpu_put(cpu_policy);
1836 return 0;
1837 }
1838 EXPORT_SYMBOL(cpufreq_get_policy);
1839
1840 /*
1841 * data : current policy.
1842 * policy : policy to be set.
1843 */
1844 static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1845 struct cpufreq_policy *new_policy)
1846 {
1847 int ret = 0, failed = 1;
1848
1849 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1850 new_policy->min, new_policy->max);
1851
1852 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1853
1854 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1855 ret = -EINVAL;
1856 goto error_out;
1857 }
1858
1859 /* verify the cpu speed can be set within this limit */
1860 ret = cpufreq_driver->verify(new_policy);
1861 if (ret)
1862 goto error_out;
1863
1864 /* adjust if necessary - all reasons */
1865 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1866 CPUFREQ_ADJUST, new_policy);
1867
1868 /* adjust if necessary - hardware incompatibility*/
1869 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1870 CPUFREQ_INCOMPATIBLE, new_policy);
1871
1872 /*
1873 * verify the cpu speed can be set within this limit, which might be
1874 * different to the first one
1875 */
1876 ret = cpufreq_driver->verify(new_policy);
1877 if (ret)
1878 goto error_out;
1879
1880 /* notification of the new policy */
1881 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1882 CPUFREQ_NOTIFY, new_policy);
1883
1884 policy->min = new_policy->min;
1885 policy->max = new_policy->max;
1886
1887 pr_debug("new min and max freqs are %u - %u kHz\n",
1888 policy->min, policy->max);
1889
1890 if (cpufreq_driver->setpolicy) {
1891 policy->policy = new_policy->policy;
1892 pr_debug("setting range\n");
1893 ret = cpufreq_driver->setpolicy(new_policy);
1894 } else {
1895 if (new_policy->governor != policy->governor) {
1896 /* save old, working values */
1897 struct cpufreq_governor *old_gov = policy->governor;
1898
1899 pr_debug("governor switch\n");
1900
1901 /* end old governor */
1902 if (policy->governor) {
1903 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1904 unlock_policy_rwsem_write(new_policy->cpu);
1905 __cpufreq_governor(policy,
1906 CPUFREQ_GOV_POLICY_EXIT);
1907 lock_policy_rwsem_write(new_policy->cpu);
1908 }
1909
1910 /* start new governor */
1911 policy->governor = new_policy->governor;
1912 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1913 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1914 failed = 0;
1915 } else {
1916 unlock_policy_rwsem_write(new_policy->cpu);
1917 __cpufreq_governor(policy,
1918 CPUFREQ_GOV_POLICY_EXIT);
1919 lock_policy_rwsem_write(new_policy->cpu);
1920 }
1921 }
1922
1923 if (failed) {
1924 /* new governor failed, so re-start old one */
1925 pr_debug("starting governor %s failed\n",
1926 policy->governor->name);
1927 if (old_gov) {
1928 policy->governor = old_gov;
1929 __cpufreq_governor(policy,
1930 CPUFREQ_GOV_POLICY_INIT);
1931 __cpufreq_governor(policy,
1932 CPUFREQ_GOV_START);
1933 }
1934 ret = -EINVAL;
1935 goto error_out;
1936 }
1937 /* might be a policy change, too, so fall through */
1938 }
1939 pr_debug("governor: change or update limits\n");
1940 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1941 }
1942
1943 error_out:
1944 return ret;
1945 }
1946
1947 /**
1948 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1949 * @cpu: CPU which shall be re-evaluated
1950 *
1951 * Useful for policy notifiers which have different necessities
1952 * at different times.
1953 */
1954 int cpufreq_update_policy(unsigned int cpu)
1955 {
1956 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1957 struct cpufreq_policy new_policy;
1958 int ret;
1959
1960 if (!policy) {
1961 ret = -ENODEV;
1962 goto no_policy;
1963 }
1964
1965 if (unlikely(lock_policy_rwsem_write(cpu))) {
1966 ret = -EINVAL;
1967 goto fail;
1968 }
1969
1970 pr_debug("updating policy for CPU %u\n", cpu);
1971 memcpy(&new_policy, policy, sizeof(*policy));
1972 new_policy.min = policy->user_policy.min;
1973 new_policy.max = policy->user_policy.max;
1974 new_policy.policy = policy->user_policy.policy;
1975 new_policy.governor = policy->user_policy.governor;
1976
1977 /*
1978 * BIOS might change freq behind our back
1979 * -> ask driver for current freq and notify governors about a change
1980 */
1981 if (cpufreq_driver->get) {
1982 new_policy.cur = cpufreq_driver->get(cpu);
1983 if (!policy->cur) {
1984 pr_debug("Driver did not initialize current freq");
1985 policy->cur = new_policy.cur;
1986 } else {
1987 if (policy->cur != new_policy.cur && cpufreq_driver->target)
1988 cpufreq_out_of_sync(cpu, policy->cur,
1989 new_policy.cur);
1990 }
1991 }
1992
1993 ret = __cpufreq_set_policy(policy, &new_policy);
1994
1995 unlock_policy_rwsem_write(cpu);
1996
1997 fail:
1998 cpufreq_cpu_put(policy);
1999 no_policy:
2000 return ret;
2001 }
2002 EXPORT_SYMBOL(cpufreq_update_policy);
2003
2004 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2005 unsigned long action, void *hcpu)
2006 {
2007 unsigned int cpu = (unsigned long)hcpu;
2008 struct device *dev;
2009 bool frozen = false;
2010
2011 dev = get_cpu_device(cpu);
2012 if (dev) {
2013
2014 if (action & CPU_TASKS_FROZEN)
2015 frozen = true;
2016
2017 switch (action & ~CPU_TASKS_FROZEN) {
2018 case CPU_ONLINE:
2019 __cpufreq_add_dev(dev, NULL, frozen);
2020 cpufreq_update_policy(cpu);
2021 break;
2022
2023 case CPU_DOWN_PREPARE:
2024 __cpufreq_remove_dev(dev, NULL, frozen);
2025 break;
2026
2027 case CPU_DOWN_FAILED:
2028 __cpufreq_add_dev(dev, NULL, frozen);
2029 break;
2030 }
2031 }
2032 return NOTIFY_OK;
2033 }
2034
2035 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2036 .notifier_call = cpufreq_cpu_callback,
2037 };
2038
2039 /*********************************************************************
2040 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2041 *********************************************************************/
2042
2043 /**
2044 * cpufreq_register_driver - register a CPU Frequency driver
2045 * @driver_data: A struct cpufreq_driver containing the values#
2046 * submitted by the CPU Frequency driver.
2047 *
2048 * Registers a CPU Frequency driver to this core code. This code
2049 * returns zero on success, -EBUSY when another driver got here first
2050 * (and isn't unregistered in the meantime).
2051 *
2052 */
2053 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2054 {
2055 unsigned long flags;
2056 int ret;
2057
2058 if (cpufreq_disabled())
2059 return -ENODEV;
2060
2061 if (!driver_data || !driver_data->verify || !driver_data->init ||
2062 ((!driver_data->setpolicy) && (!driver_data->target)))
2063 return -EINVAL;
2064
2065 pr_debug("trying to register driver %s\n", driver_data->name);
2066
2067 if (driver_data->setpolicy)
2068 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2069
2070 write_lock_irqsave(&cpufreq_driver_lock, flags);
2071 if (cpufreq_driver) {
2072 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2073 return -EBUSY;
2074 }
2075 cpufreq_driver = driver_data;
2076 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2077
2078 ret = subsys_interface_register(&cpufreq_interface);
2079 if (ret)
2080 goto err_null_driver;
2081
2082 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2083 int i;
2084 ret = -ENODEV;
2085
2086 /* check for at least one working CPU */
2087 for (i = 0; i < nr_cpu_ids; i++)
2088 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2089 ret = 0;
2090 break;
2091 }
2092
2093 /* if all ->init() calls failed, unregister */
2094 if (ret) {
2095 pr_debug("no CPU initialized for driver %s\n",
2096 driver_data->name);
2097 goto err_if_unreg;
2098 }
2099 }
2100
2101 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2102 pr_debug("driver %s up and running\n", driver_data->name);
2103
2104 return 0;
2105 err_if_unreg:
2106 subsys_interface_unregister(&cpufreq_interface);
2107 err_null_driver:
2108 write_lock_irqsave(&cpufreq_driver_lock, flags);
2109 cpufreq_driver = NULL;
2110 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2111 return ret;
2112 }
2113 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2114
2115 /**
2116 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2117 *
2118 * Unregister the current CPUFreq driver. Only call this if you have
2119 * the right to do so, i.e. if you have succeeded in initialising before!
2120 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2121 * currently not initialised.
2122 */
2123 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2124 {
2125 unsigned long flags;
2126
2127 if (!cpufreq_driver || (driver != cpufreq_driver))
2128 return -EINVAL;
2129
2130 pr_debug("unregistering driver %s\n", driver->name);
2131
2132 subsys_interface_unregister(&cpufreq_interface);
2133 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2134
2135 down_write(&cpufreq_rwsem);
2136 write_lock_irqsave(&cpufreq_driver_lock, flags);
2137
2138 cpufreq_driver = NULL;
2139
2140 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2141 up_write(&cpufreq_rwsem);
2142
2143 return 0;
2144 }
2145 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2146
2147 static int __init cpufreq_core_init(void)
2148 {
2149 int cpu;
2150
2151 if (cpufreq_disabled())
2152 return -ENODEV;
2153
2154 for_each_possible_cpu(cpu) {
2155 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2156 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2157 }
2158
2159 cpufreq_global_kobject = kobject_create();
2160 BUG_ON(!cpufreq_global_kobject);
2161 register_syscore_ops(&cpufreq_syscore_ops);
2162
2163 return 0;
2164 }
2165 core_initcall(cpufreq_core_init);