]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/cpufreq/cpufreq.c
cpufreq: Use signed type for 'ret' variable, to store negative error values
[mirror_ubuntu-bionic-kernel.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 static DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 #endif
49
50 /*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 */
67 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
68
69 #define lock_policy_rwsem(mode, cpu) \
70 static int lock_policy_rwsem_##mode(int cpu) \
71 { \
72 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
73 BUG_ON(!policy); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
75 \
76 return 0; \
77 }
78
79 lock_policy_rwsem(read, cpu);
80 lock_policy_rwsem(write, cpu);
81
82 #define unlock_policy_rwsem(mode, cpu) \
83 static void unlock_policy_rwsem_##mode(int cpu) \
84 { \
85 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
86 BUG_ON(!policy); \
87 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
88 }
89
90 unlock_policy_rwsem(read, cpu);
91 unlock_policy_rwsem(write, cpu);
92
93 /*
94 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
95 * sections
96 */
97 static DECLARE_RWSEM(cpufreq_rwsem);
98
99 /* internal prototypes */
100 static int __cpufreq_governor(struct cpufreq_policy *policy,
101 unsigned int event);
102 static unsigned int __cpufreq_get(unsigned int cpu);
103 static void handle_update(struct work_struct *work);
104
105 /**
106 * Two notifier lists: the "policy" list is involved in the
107 * validation process for a new CPU frequency policy; the
108 * "transition" list for kernel code that needs to handle
109 * changes to devices when the CPU clock speed changes.
110 * The mutex locks both lists.
111 */
112 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
113 static struct srcu_notifier_head cpufreq_transition_notifier_list;
114
115 static bool init_cpufreq_transition_notifier_list_called;
116 static int __init init_cpufreq_transition_notifier_list(void)
117 {
118 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
119 init_cpufreq_transition_notifier_list_called = true;
120 return 0;
121 }
122 pure_initcall(init_cpufreq_transition_notifier_list);
123
124 static int off __read_mostly;
125 static int cpufreq_disabled(void)
126 {
127 return off;
128 }
129 void disable_cpufreq(void)
130 {
131 off = 1;
132 }
133 static LIST_HEAD(cpufreq_governor_list);
134 static DEFINE_MUTEX(cpufreq_governor_mutex);
135
136 bool have_governor_per_policy(void)
137 {
138 return cpufreq_driver->have_governor_per_policy;
139 }
140 EXPORT_SYMBOL_GPL(have_governor_per_policy);
141
142 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
143 {
144 if (have_governor_per_policy())
145 return &policy->kobj;
146 else
147 return cpufreq_global_kobject;
148 }
149 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
150
151 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
152 {
153 u64 idle_time;
154 u64 cur_wall_time;
155 u64 busy_time;
156
157 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
158
159 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
165
166 idle_time = cur_wall_time - busy_time;
167 if (wall)
168 *wall = cputime_to_usecs(cur_wall_time);
169
170 return cputime_to_usecs(idle_time);
171 }
172
173 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
174 {
175 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
176
177 if (idle_time == -1ULL)
178 return get_cpu_idle_time_jiffy(cpu, wall);
179 else if (!io_busy)
180 idle_time += get_cpu_iowait_time_us(cpu, wall);
181
182 return idle_time;
183 }
184 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
185
186 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
187 {
188 struct cpufreq_policy *policy = NULL;
189 unsigned long flags;
190
191 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
192 return NULL;
193
194 if (!down_read_trylock(&cpufreq_rwsem))
195 return NULL;
196
197 /* get the cpufreq driver */
198 read_lock_irqsave(&cpufreq_driver_lock, flags);
199
200 if (cpufreq_driver) {
201 /* get the CPU */
202 policy = per_cpu(cpufreq_cpu_data, cpu);
203 if (policy)
204 kobject_get(&policy->kobj);
205 }
206
207 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
208
209 if (!policy)
210 up_read(&cpufreq_rwsem);
211
212 return policy;
213 }
214 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
215
216 void cpufreq_cpu_put(struct cpufreq_policy *policy)
217 {
218 if (cpufreq_disabled())
219 return;
220
221 kobject_put(&policy->kobj);
222 up_read(&cpufreq_rwsem);
223 }
224 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
225
226 /*********************************************************************
227 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
228 *********************************************************************/
229
230 /**
231 * adjust_jiffies - adjust the system "loops_per_jiffy"
232 *
233 * This function alters the system "loops_per_jiffy" for the clock
234 * speed change. Note that loops_per_jiffy cannot be updated on SMP
235 * systems as each CPU might be scaled differently. So, use the arch
236 * per-CPU loops_per_jiffy value wherever possible.
237 */
238 #ifndef CONFIG_SMP
239 static unsigned long l_p_j_ref;
240 static unsigned int l_p_j_ref_freq;
241
242 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
243 {
244 if (ci->flags & CPUFREQ_CONST_LOOPS)
245 return;
246
247 if (!l_p_j_ref_freq) {
248 l_p_j_ref = loops_per_jiffy;
249 l_p_j_ref_freq = ci->old;
250 pr_debug("saving %lu as reference value for loops_per_jiffy; "
251 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
252 }
253 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
254 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
255 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
256 ci->new);
257 pr_debug("scaling loops_per_jiffy to %lu "
258 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
259 }
260 }
261 #else
262 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
263 {
264 return;
265 }
266 #endif
267
268 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
269 struct cpufreq_freqs *freqs, unsigned int state)
270 {
271 BUG_ON(irqs_disabled());
272
273 if (cpufreq_disabled())
274 return;
275
276 freqs->flags = cpufreq_driver->flags;
277 pr_debug("notification %u of frequency transition to %u kHz\n",
278 state, freqs->new);
279
280 switch (state) {
281
282 case CPUFREQ_PRECHANGE:
283 if (WARN(policy->transition_ongoing ==
284 cpumask_weight(policy->cpus),
285 "In middle of another frequency transition\n"))
286 return;
287
288 policy->transition_ongoing++;
289
290 /* detect if the driver reported a value as "old frequency"
291 * which is not equal to what the cpufreq core thinks is
292 * "old frequency".
293 */
294 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
295 if ((policy) && (policy->cpu == freqs->cpu) &&
296 (policy->cur) && (policy->cur != freqs->old)) {
297 pr_debug("Warning: CPU frequency is"
298 " %u, cpufreq assumed %u kHz.\n",
299 freqs->old, policy->cur);
300 freqs->old = policy->cur;
301 }
302 }
303 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
304 CPUFREQ_PRECHANGE, freqs);
305 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
306 break;
307
308 case CPUFREQ_POSTCHANGE:
309 if (WARN(!policy->transition_ongoing,
310 "No frequency transition in progress\n"))
311 return;
312
313 policy->transition_ongoing--;
314
315 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
316 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
317 (unsigned long)freqs->cpu);
318 trace_cpu_frequency(freqs->new, freqs->cpu);
319 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
320 CPUFREQ_POSTCHANGE, freqs);
321 if (likely(policy) && likely(policy->cpu == freqs->cpu))
322 policy->cur = freqs->new;
323 break;
324 }
325 }
326
327 /**
328 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
329 * on frequency transition.
330 *
331 * This function calls the transition notifiers and the "adjust_jiffies"
332 * function. It is called twice on all CPU frequency changes that have
333 * external effects.
334 */
335 void cpufreq_notify_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs, unsigned int state)
337 {
338 for_each_cpu(freqs->cpu, policy->cpus)
339 __cpufreq_notify_transition(policy, freqs, state);
340 }
341 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
342
343
344 /*********************************************************************
345 * SYSFS INTERFACE *
346 *********************************************************************/
347
348 static struct cpufreq_governor *__find_governor(const char *str_governor)
349 {
350 struct cpufreq_governor *t;
351
352 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
353 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
354 return t;
355
356 return NULL;
357 }
358
359 /**
360 * cpufreq_parse_governor - parse a governor string
361 */
362 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
363 struct cpufreq_governor **governor)
364 {
365 int err = -EINVAL;
366
367 if (!cpufreq_driver)
368 goto out;
369
370 if (cpufreq_driver->setpolicy) {
371 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
372 *policy = CPUFREQ_POLICY_PERFORMANCE;
373 err = 0;
374 } else if (!strnicmp(str_governor, "powersave",
375 CPUFREQ_NAME_LEN)) {
376 *policy = CPUFREQ_POLICY_POWERSAVE;
377 err = 0;
378 }
379 } else if (cpufreq_driver->target) {
380 struct cpufreq_governor *t;
381
382 mutex_lock(&cpufreq_governor_mutex);
383
384 t = __find_governor(str_governor);
385
386 if (t == NULL) {
387 int ret;
388
389 mutex_unlock(&cpufreq_governor_mutex);
390 ret = request_module("cpufreq_%s", str_governor);
391 mutex_lock(&cpufreq_governor_mutex);
392
393 if (ret == 0)
394 t = __find_governor(str_governor);
395 }
396
397 if (t != NULL) {
398 *governor = t;
399 err = 0;
400 }
401
402 mutex_unlock(&cpufreq_governor_mutex);
403 }
404 out:
405 return err;
406 }
407
408 /**
409 * cpufreq_per_cpu_attr_read() / show_##file_name() -
410 * print out cpufreq information
411 *
412 * Write out information from cpufreq_driver->policy[cpu]; object must be
413 * "unsigned int".
414 */
415
416 #define show_one(file_name, object) \
417 static ssize_t show_##file_name \
418 (struct cpufreq_policy *policy, char *buf) \
419 { \
420 return sprintf(buf, "%u\n", policy->object); \
421 }
422
423 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
424 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
425 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
426 show_one(scaling_min_freq, min);
427 show_one(scaling_max_freq, max);
428 show_one(scaling_cur_freq, cur);
429
430 static int __cpufreq_set_policy(struct cpufreq_policy *policy,
431 struct cpufreq_policy *new_policy);
432
433 /**
434 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
435 */
436 #define store_one(file_name, object) \
437 static ssize_t store_##file_name \
438 (struct cpufreq_policy *policy, const char *buf, size_t count) \
439 { \
440 int ret; \
441 struct cpufreq_policy new_policy; \
442 \
443 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
444 if (ret) \
445 return -EINVAL; \
446 \
447 ret = sscanf(buf, "%u", &new_policy.object); \
448 if (ret != 1) \
449 return -EINVAL; \
450 \
451 ret = __cpufreq_set_policy(policy, &new_policy); \
452 policy->user_policy.object = policy->object; \
453 \
454 return ret ? ret : count; \
455 }
456
457 store_one(scaling_min_freq, min);
458 store_one(scaling_max_freq, max);
459
460 /**
461 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
462 */
463 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
464 char *buf)
465 {
466 unsigned int cur_freq = __cpufreq_get(policy->cpu);
467 if (!cur_freq)
468 return sprintf(buf, "<unknown>");
469 return sprintf(buf, "%u\n", cur_freq);
470 }
471
472 /**
473 * show_scaling_governor - show the current policy for the specified CPU
474 */
475 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
476 {
477 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
478 return sprintf(buf, "powersave\n");
479 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
480 return sprintf(buf, "performance\n");
481 else if (policy->governor)
482 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
483 policy->governor->name);
484 return -EINVAL;
485 }
486
487 /**
488 * store_scaling_governor - store policy for the specified CPU
489 */
490 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
491 const char *buf, size_t count)
492 {
493 int ret;
494 char str_governor[16];
495 struct cpufreq_policy new_policy;
496
497 ret = cpufreq_get_policy(&new_policy, policy->cpu);
498 if (ret)
499 return ret;
500
501 ret = sscanf(buf, "%15s", str_governor);
502 if (ret != 1)
503 return -EINVAL;
504
505 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
506 &new_policy.governor))
507 return -EINVAL;
508
509 /*
510 * Do not use cpufreq_set_policy here or the user_policy.max
511 * will be wrongly overridden
512 */
513 ret = __cpufreq_set_policy(policy, &new_policy);
514
515 policy->user_policy.policy = policy->policy;
516 policy->user_policy.governor = policy->governor;
517
518 if (ret)
519 return ret;
520 else
521 return count;
522 }
523
524 /**
525 * show_scaling_driver - show the cpufreq driver currently loaded
526 */
527 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
528 {
529 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
530 }
531
532 /**
533 * show_scaling_available_governors - show the available CPUfreq governors
534 */
535 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
536 char *buf)
537 {
538 ssize_t i = 0;
539 struct cpufreq_governor *t;
540
541 if (!cpufreq_driver->target) {
542 i += sprintf(buf, "performance powersave");
543 goto out;
544 }
545
546 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
547 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
548 - (CPUFREQ_NAME_LEN + 2)))
549 goto out;
550 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
551 }
552 out:
553 i += sprintf(&buf[i], "\n");
554 return i;
555 }
556
557 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
558 {
559 ssize_t i = 0;
560 unsigned int cpu;
561
562 for_each_cpu(cpu, mask) {
563 if (i)
564 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
565 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
566 if (i >= (PAGE_SIZE - 5))
567 break;
568 }
569 i += sprintf(&buf[i], "\n");
570 return i;
571 }
572 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
573
574 /**
575 * show_related_cpus - show the CPUs affected by each transition even if
576 * hw coordination is in use
577 */
578 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
579 {
580 return cpufreq_show_cpus(policy->related_cpus, buf);
581 }
582
583 /**
584 * show_affected_cpus - show the CPUs affected by each transition
585 */
586 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
587 {
588 return cpufreq_show_cpus(policy->cpus, buf);
589 }
590
591 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
592 const char *buf, size_t count)
593 {
594 unsigned int freq = 0;
595 unsigned int ret;
596
597 if (!policy->governor || !policy->governor->store_setspeed)
598 return -EINVAL;
599
600 ret = sscanf(buf, "%u", &freq);
601 if (ret != 1)
602 return -EINVAL;
603
604 policy->governor->store_setspeed(policy, freq);
605
606 return count;
607 }
608
609 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
610 {
611 if (!policy->governor || !policy->governor->show_setspeed)
612 return sprintf(buf, "<unsupported>\n");
613
614 return policy->governor->show_setspeed(policy, buf);
615 }
616
617 /**
618 * show_bios_limit - show the current cpufreq HW/BIOS limitation
619 */
620 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
621 {
622 unsigned int limit;
623 int ret;
624 if (cpufreq_driver->bios_limit) {
625 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
626 if (!ret)
627 return sprintf(buf, "%u\n", limit);
628 }
629 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
630 }
631
632 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
633 cpufreq_freq_attr_ro(cpuinfo_min_freq);
634 cpufreq_freq_attr_ro(cpuinfo_max_freq);
635 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
636 cpufreq_freq_attr_ro(scaling_available_governors);
637 cpufreq_freq_attr_ro(scaling_driver);
638 cpufreq_freq_attr_ro(scaling_cur_freq);
639 cpufreq_freq_attr_ro(bios_limit);
640 cpufreq_freq_attr_ro(related_cpus);
641 cpufreq_freq_attr_ro(affected_cpus);
642 cpufreq_freq_attr_rw(scaling_min_freq);
643 cpufreq_freq_attr_rw(scaling_max_freq);
644 cpufreq_freq_attr_rw(scaling_governor);
645 cpufreq_freq_attr_rw(scaling_setspeed);
646
647 static struct attribute *default_attrs[] = {
648 &cpuinfo_min_freq.attr,
649 &cpuinfo_max_freq.attr,
650 &cpuinfo_transition_latency.attr,
651 &scaling_min_freq.attr,
652 &scaling_max_freq.attr,
653 &affected_cpus.attr,
654 &related_cpus.attr,
655 &scaling_governor.attr,
656 &scaling_driver.attr,
657 &scaling_available_governors.attr,
658 &scaling_setspeed.attr,
659 NULL
660 };
661
662 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
663 #define to_attr(a) container_of(a, struct freq_attr, attr)
664
665 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
666 {
667 struct cpufreq_policy *policy = to_policy(kobj);
668 struct freq_attr *fattr = to_attr(attr);
669 ssize_t ret = -EINVAL;
670
671 if (!down_read_trylock(&cpufreq_rwsem))
672 goto exit;
673
674 if (lock_policy_rwsem_read(policy->cpu) < 0)
675 goto up_read;
676
677 if (fattr->show)
678 ret = fattr->show(policy, buf);
679 else
680 ret = -EIO;
681
682 unlock_policy_rwsem_read(policy->cpu);
683
684 up_read:
685 up_read(&cpufreq_rwsem);
686 exit:
687 return ret;
688 }
689
690 static ssize_t store(struct kobject *kobj, struct attribute *attr,
691 const char *buf, size_t count)
692 {
693 struct cpufreq_policy *policy = to_policy(kobj);
694 struct freq_attr *fattr = to_attr(attr);
695 ssize_t ret = -EINVAL;
696
697 get_online_cpus();
698
699 if (!cpu_online(policy->cpu))
700 goto unlock;
701
702 if (!down_read_trylock(&cpufreq_rwsem))
703 goto unlock;
704
705 if (lock_policy_rwsem_write(policy->cpu) < 0)
706 goto up_read;
707
708 if (fattr->store)
709 ret = fattr->store(policy, buf, count);
710 else
711 ret = -EIO;
712
713 unlock_policy_rwsem_write(policy->cpu);
714
715 up_read:
716 up_read(&cpufreq_rwsem);
717 unlock:
718 put_online_cpus();
719
720 return ret;
721 }
722
723 static void cpufreq_sysfs_release(struct kobject *kobj)
724 {
725 struct cpufreq_policy *policy = to_policy(kobj);
726 pr_debug("last reference is dropped\n");
727 complete(&policy->kobj_unregister);
728 }
729
730 static const struct sysfs_ops sysfs_ops = {
731 .show = show,
732 .store = store,
733 };
734
735 static struct kobj_type ktype_cpufreq = {
736 .sysfs_ops = &sysfs_ops,
737 .default_attrs = default_attrs,
738 .release = cpufreq_sysfs_release,
739 };
740
741 struct kobject *cpufreq_global_kobject;
742 EXPORT_SYMBOL(cpufreq_global_kobject);
743
744 static int cpufreq_global_kobject_usage;
745
746 int cpufreq_get_global_kobject(void)
747 {
748 if (!cpufreq_global_kobject_usage++)
749 return kobject_add(cpufreq_global_kobject,
750 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
751
752 return 0;
753 }
754 EXPORT_SYMBOL(cpufreq_get_global_kobject);
755
756 void cpufreq_put_global_kobject(void)
757 {
758 if (!--cpufreq_global_kobject_usage)
759 kobject_del(cpufreq_global_kobject);
760 }
761 EXPORT_SYMBOL(cpufreq_put_global_kobject);
762
763 int cpufreq_sysfs_create_file(const struct attribute *attr)
764 {
765 int ret = cpufreq_get_global_kobject();
766
767 if (!ret) {
768 ret = sysfs_create_file(cpufreq_global_kobject, attr);
769 if (ret)
770 cpufreq_put_global_kobject();
771 }
772
773 return ret;
774 }
775 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
776
777 void cpufreq_sysfs_remove_file(const struct attribute *attr)
778 {
779 sysfs_remove_file(cpufreq_global_kobject, attr);
780 cpufreq_put_global_kobject();
781 }
782 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
783
784 /* symlink affected CPUs */
785 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
786 {
787 unsigned int j;
788 int ret = 0;
789
790 for_each_cpu(j, policy->cpus) {
791 struct device *cpu_dev;
792
793 if (j == policy->cpu)
794 continue;
795
796 pr_debug("Adding link for CPU: %u\n", j);
797 cpu_dev = get_cpu_device(j);
798 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
799 "cpufreq");
800 if (ret)
801 break;
802 }
803 return ret;
804 }
805
806 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
807 struct device *dev)
808 {
809 struct freq_attr **drv_attr;
810 int ret = 0;
811
812 /* prepare interface data */
813 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
814 &dev->kobj, "cpufreq");
815 if (ret)
816 return ret;
817
818 /* set up files for this cpu device */
819 drv_attr = cpufreq_driver->attr;
820 while ((drv_attr) && (*drv_attr)) {
821 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
822 if (ret)
823 goto err_out_kobj_put;
824 drv_attr++;
825 }
826 if (cpufreq_driver->get) {
827 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
828 if (ret)
829 goto err_out_kobj_put;
830 }
831 if (cpufreq_driver->target) {
832 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
833 if (ret)
834 goto err_out_kobj_put;
835 }
836 if (cpufreq_driver->bios_limit) {
837 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
838 if (ret)
839 goto err_out_kobj_put;
840 }
841
842 ret = cpufreq_add_dev_symlink(policy);
843 if (ret)
844 goto err_out_kobj_put;
845
846 return ret;
847
848 err_out_kobj_put:
849 kobject_put(&policy->kobj);
850 wait_for_completion(&policy->kobj_unregister);
851 return ret;
852 }
853
854 static void cpufreq_init_policy(struct cpufreq_policy *policy)
855 {
856 struct cpufreq_policy new_policy;
857 int ret = 0;
858
859 memcpy(&new_policy, policy, sizeof(*policy));
860 /* assure that the starting sequence is run in __cpufreq_set_policy */
861 policy->governor = NULL;
862
863 /* set default policy */
864 ret = __cpufreq_set_policy(policy, &new_policy);
865 policy->user_policy.policy = policy->policy;
866 policy->user_policy.governor = policy->governor;
867
868 if (ret) {
869 pr_debug("setting policy failed\n");
870 if (cpufreq_driver->exit)
871 cpufreq_driver->exit(policy);
872 }
873 }
874
875 #ifdef CONFIG_HOTPLUG_CPU
876 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
877 unsigned int cpu, struct device *dev,
878 bool frozen)
879 {
880 int ret = 0, has_target = !!cpufreq_driver->target;
881 unsigned long flags;
882
883 if (has_target) {
884 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
885 if (ret) {
886 pr_err("%s: Failed to stop governor\n", __func__);
887 return ret;
888 }
889 }
890
891 lock_policy_rwsem_write(policy->cpu);
892
893 write_lock_irqsave(&cpufreq_driver_lock, flags);
894
895 cpumask_set_cpu(cpu, policy->cpus);
896 per_cpu(cpufreq_cpu_data, cpu) = policy;
897 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
898
899 unlock_policy_rwsem_write(policy->cpu);
900
901 if (has_target) {
902 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
903 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
904 pr_err("%s: Failed to start governor\n", __func__);
905 return ret;
906 }
907 }
908
909 /* Don't touch sysfs links during light-weight init */
910 if (!frozen)
911 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
912
913 return ret;
914 }
915 #endif
916
917 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
918 {
919 struct cpufreq_policy *policy;
920 unsigned long flags;
921
922 write_lock_irqsave(&cpufreq_driver_lock, flags);
923
924 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
925
926 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
927
928 return policy;
929 }
930
931 static struct cpufreq_policy *cpufreq_policy_alloc(void)
932 {
933 struct cpufreq_policy *policy;
934
935 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
936 if (!policy)
937 return NULL;
938
939 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
940 goto err_free_policy;
941
942 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
943 goto err_free_cpumask;
944
945 INIT_LIST_HEAD(&policy->policy_list);
946 return policy;
947
948 err_free_cpumask:
949 free_cpumask_var(policy->cpus);
950 err_free_policy:
951 kfree(policy);
952
953 return NULL;
954 }
955
956 static void cpufreq_policy_free(struct cpufreq_policy *policy)
957 {
958 free_cpumask_var(policy->related_cpus);
959 free_cpumask_var(policy->cpus);
960 kfree(policy);
961 }
962
963 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
964 bool frozen)
965 {
966 unsigned int j, cpu = dev->id;
967 int ret = -ENOMEM;
968 struct cpufreq_policy *policy;
969 unsigned long flags;
970 #ifdef CONFIG_HOTPLUG_CPU
971 struct cpufreq_policy *tpolicy;
972 struct cpufreq_governor *gov;
973 #endif
974
975 if (cpu_is_offline(cpu))
976 return 0;
977
978 pr_debug("adding CPU %u\n", cpu);
979
980 #ifdef CONFIG_SMP
981 /* check whether a different CPU already registered this
982 * CPU because it is in the same boat. */
983 policy = cpufreq_cpu_get(cpu);
984 if (unlikely(policy)) {
985 cpufreq_cpu_put(policy);
986 return 0;
987 }
988 #endif
989
990 if (!down_read_trylock(&cpufreq_rwsem))
991 return 0;
992
993 #ifdef CONFIG_HOTPLUG_CPU
994 /* Check if this cpu was hot-unplugged earlier and has siblings */
995 read_lock_irqsave(&cpufreq_driver_lock, flags);
996 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
997 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
998 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
999 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
1000 up_read(&cpufreq_rwsem);
1001 return ret;
1002 }
1003 }
1004 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1005 #endif
1006
1007 if (frozen)
1008 /* Restore the saved policy when doing light-weight init */
1009 policy = cpufreq_policy_restore(cpu);
1010 else
1011 policy = cpufreq_policy_alloc();
1012
1013 if (!policy)
1014 goto nomem_out;
1015
1016 policy->cpu = cpu;
1017 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1018 cpumask_copy(policy->cpus, cpumask_of(cpu));
1019
1020 init_completion(&policy->kobj_unregister);
1021 INIT_WORK(&policy->update, handle_update);
1022
1023 /* call driver. From then on the cpufreq must be able
1024 * to accept all calls to ->verify and ->setpolicy for this CPU
1025 */
1026 ret = cpufreq_driver->init(policy);
1027 if (ret) {
1028 pr_debug("initialization failed\n");
1029 goto err_set_policy_cpu;
1030 }
1031
1032 /* related cpus should atleast have policy->cpus */
1033 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1034
1035 /*
1036 * affected cpus must always be the one, which are online. We aren't
1037 * managing offline cpus here.
1038 */
1039 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1040
1041 policy->user_policy.min = policy->min;
1042 policy->user_policy.max = policy->max;
1043
1044 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1045 CPUFREQ_START, policy);
1046
1047 #ifdef CONFIG_HOTPLUG_CPU
1048 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1049 if (gov) {
1050 policy->governor = gov;
1051 pr_debug("Restoring governor %s for cpu %d\n",
1052 policy->governor->name, cpu);
1053 }
1054 #endif
1055
1056 write_lock_irqsave(&cpufreq_driver_lock, flags);
1057 for_each_cpu(j, policy->cpus)
1058 per_cpu(cpufreq_cpu_data, j) = policy;
1059 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1060
1061 if (!frozen) {
1062 ret = cpufreq_add_dev_interface(policy, dev);
1063 if (ret)
1064 goto err_out_unregister;
1065 }
1066
1067 write_lock_irqsave(&cpufreq_driver_lock, flags);
1068 list_add(&policy->policy_list, &cpufreq_policy_list);
1069 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1070
1071 cpufreq_init_policy(policy);
1072
1073 kobject_uevent(&policy->kobj, KOBJ_ADD);
1074 up_read(&cpufreq_rwsem);
1075
1076 pr_debug("initialization complete\n");
1077
1078 return 0;
1079
1080 err_out_unregister:
1081 write_lock_irqsave(&cpufreq_driver_lock, flags);
1082 for_each_cpu(j, policy->cpus)
1083 per_cpu(cpufreq_cpu_data, j) = NULL;
1084 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1085
1086 err_set_policy_cpu:
1087 cpufreq_policy_free(policy);
1088 nomem_out:
1089 up_read(&cpufreq_rwsem);
1090
1091 return ret;
1092 }
1093
1094 /**
1095 * cpufreq_add_dev - add a CPU device
1096 *
1097 * Adds the cpufreq interface for a CPU device.
1098 *
1099 * The Oracle says: try running cpufreq registration/unregistration concurrently
1100 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1101 * mess up, but more thorough testing is needed. - Mathieu
1102 */
1103 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1104 {
1105 return __cpufreq_add_dev(dev, sif, false);
1106 }
1107
1108 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1109 {
1110 policy->last_cpu = policy->cpu;
1111 policy->cpu = cpu;
1112
1113 #ifdef CONFIG_CPU_FREQ_TABLE
1114 cpufreq_frequency_table_update_policy_cpu(policy);
1115 #endif
1116 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1117 CPUFREQ_UPDATE_POLICY_CPU, policy);
1118 }
1119
1120 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1121 unsigned int old_cpu, bool frozen)
1122 {
1123 struct device *cpu_dev;
1124 int ret;
1125
1126 /* first sibling now owns the new sysfs dir */
1127 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
1128
1129 /* Don't touch sysfs files during light-weight tear-down */
1130 if (frozen)
1131 return cpu_dev->id;
1132
1133 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1134 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1135 if (ret) {
1136 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1137
1138 WARN_ON(lock_policy_rwsem_write(old_cpu));
1139 cpumask_set_cpu(old_cpu, policy->cpus);
1140 unlock_policy_rwsem_write(old_cpu);
1141
1142 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1143 "cpufreq");
1144
1145 return -EINVAL;
1146 }
1147
1148 return cpu_dev->id;
1149 }
1150
1151 static int __cpufreq_remove_dev_prepare(struct device *dev,
1152 struct subsys_interface *sif,
1153 bool frozen)
1154 {
1155 unsigned int cpu = dev->id, cpus;
1156 int new_cpu, ret;
1157 unsigned long flags;
1158 struct cpufreq_policy *policy;
1159
1160 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1161
1162 write_lock_irqsave(&cpufreq_driver_lock, flags);
1163
1164 policy = per_cpu(cpufreq_cpu_data, cpu);
1165
1166 /* Save the policy somewhere when doing a light-weight tear-down */
1167 if (frozen)
1168 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1169
1170 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1171
1172 if (!policy) {
1173 pr_debug("%s: No cpu_data found\n", __func__);
1174 return -EINVAL;
1175 }
1176
1177 if (cpufreq_driver->target) {
1178 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1179 if (ret) {
1180 pr_err("%s: Failed to stop governor\n", __func__);
1181 return ret;
1182 }
1183 }
1184
1185 #ifdef CONFIG_HOTPLUG_CPU
1186 if (!cpufreq_driver->setpolicy)
1187 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1188 policy->governor->name, CPUFREQ_NAME_LEN);
1189 #endif
1190
1191 WARN_ON(lock_policy_rwsem_write(cpu));
1192 cpus = cpumask_weight(policy->cpus);
1193
1194 if (cpus > 1)
1195 cpumask_clear_cpu(cpu, policy->cpus);
1196 unlock_policy_rwsem_write(cpu);
1197
1198 if (cpu != policy->cpu && !frozen) {
1199 sysfs_remove_link(&dev->kobj, "cpufreq");
1200 } else if (cpus > 1) {
1201
1202 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1203 if (new_cpu >= 0) {
1204 WARN_ON(lock_policy_rwsem_write(cpu));
1205 update_policy_cpu(policy, new_cpu);
1206 unlock_policy_rwsem_write(cpu);
1207
1208 if (!frozen) {
1209 pr_debug("%s: policy Kobject moved to cpu: %d "
1210 "from: %d\n",__func__, new_cpu, cpu);
1211 }
1212 }
1213 }
1214
1215 return 0;
1216 }
1217
1218 static int __cpufreq_remove_dev_finish(struct device *dev,
1219 struct subsys_interface *sif,
1220 bool frozen)
1221 {
1222 unsigned int cpu = dev->id, cpus;
1223 int ret;
1224 unsigned long flags;
1225 struct cpufreq_policy *policy;
1226 struct kobject *kobj;
1227 struct completion *cmp;
1228
1229 read_lock_irqsave(&cpufreq_driver_lock, flags);
1230 policy = per_cpu(cpufreq_cpu_data, cpu);
1231 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1232
1233 if (!policy) {
1234 pr_debug("%s: No cpu_data found\n", __func__);
1235 return -EINVAL;
1236 }
1237
1238 lock_policy_rwsem_read(cpu);
1239 cpus = cpumask_weight(policy->cpus);
1240 unlock_policy_rwsem_read(cpu);
1241
1242 /* If cpu is last user of policy, free policy */
1243 if (cpus == 1) {
1244 if (cpufreq_driver->target) {
1245 ret = __cpufreq_governor(policy,
1246 CPUFREQ_GOV_POLICY_EXIT);
1247 if (ret) {
1248 pr_err("%s: Failed to exit governor\n",
1249 __func__);
1250 return ret;
1251 }
1252 }
1253
1254 if (!frozen) {
1255 lock_policy_rwsem_read(cpu);
1256 kobj = &policy->kobj;
1257 cmp = &policy->kobj_unregister;
1258 unlock_policy_rwsem_read(cpu);
1259 kobject_put(kobj);
1260
1261 /*
1262 * We need to make sure that the underlying kobj is
1263 * actually not referenced anymore by anybody before we
1264 * proceed with unloading.
1265 */
1266 pr_debug("waiting for dropping of refcount\n");
1267 wait_for_completion(cmp);
1268 pr_debug("wait complete\n");
1269 }
1270
1271 /*
1272 * Perform the ->exit() even during light-weight tear-down,
1273 * since this is a core component, and is essential for the
1274 * subsequent light-weight ->init() to succeed.
1275 */
1276 if (cpufreq_driver->exit)
1277 cpufreq_driver->exit(policy);
1278
1279 /* Remove policy from list of active policies */
1280 write_lock_irqsave(&cpufreq_driver_lock, flags);
1281 list_del(&policy->policy_list);
1282 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1283
1284 if (!frozen)
1285 cpufreq_policy_free(policy);
1286 } else {
1287 if (cpufreq_driver->target) {
1288 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1289 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1290 pr_err("%s: Failed to start governor\n",
1291 __func__);
1292 return ret;
1293 }
1294 }
1295 }
1296
1297 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1298 return 0;
1299 }
1300
1301 /**
1302 * __cpufreq_remove_dev - remove a CPU device
1303 *
1304 * Removes the cpufreq interface for a CPU device.
1305 * Caller should already have policy_rwsem in write mode for this CPU.
1306 * This routine frees the rwsem before returning.
1307 */
1308 static inline int __cpufreq_remove_dev(struct device *dev,
1309 struct subsys_interface *sif,
1310 bool frozen)
1311 {
1312 int ret;
1313
1314 ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
1315
1316 if (!ret)
1317 ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
1318
1319 return ret;
1320 }
1321
1322 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1323 {
1324 unsigned int cpu = dev->id;
1325 int retval;
1326
1327 if (cpu_is_offline(cpu))
1328 return 0;
1329
1330 retval = __cpufreq_remove_dev(dev, sif, false);
1331 return retval;
1332 }
1333
1334 static void handle_update(struct work_struct *work)
1335 {
1336 struct cpufreq_policy *policy =
1337 container_of(work, struct cpufreq_policy, update);
1338 unsigned int cpu = policy->cpu;
1339 pr_debug("handle_update for cpu %u called\n", cpu);
1340 cpufreq_update_policy(cpu);
1341 }
1342
1343 /**
1344 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1345 * in deep trouble.
1346 * @cpu: cpu number
1347 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1348 * @new_freq: CPU frequency the CPU actually runs at
1349 *
1350 * We adjust to current frequency first, and need to clean up later.
1351 * So either call to cpufreq_update_policy() or schedule handle_update()).
1352 */
1353 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1354 unsigned int new_freq)
1355 {
1356 struct cpufreq_policy *policy;
1357 struct cpufreq_freqs freqs;
1358 unsigned long flags;
1359
1360 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1361 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1362
1363 freqs.old = old_freq;
1364 freqs.new = new_freq;
1365
1366 read_lock_irqsave(&cpufreq_driver_lock, flags);
1367 policy = per_cpu(cpufreq_cpu_data, cpu);
1368 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1369
1370 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1371 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1372 }
1373
1374 /**
1375 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1376 * @cpu: CPU number
1377 *
1378 * This is the last known freq, without actually getting it from the driver.
1379 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1380 */
1381 unsigned int cpufreq_quick_get(unsigned int cpu)
1382 {
1383 struct cpufreq_policy *policy;
1384 unsigned int ret_freq = 0;
1385
1386 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1387 return cpufreq_driver->get(cpu);
1388
1389 policy = cpufreq_cpu_get(cpu);
1390 if (policy) {
1391 ret_freq = policy->cur;
1392 cpufreq_cpu_put(policy);
1393 }
1394
1395 return ret_freq;
1396 }
1397 EXPORT_SYMBOL(cpufreq_quick_get);
1398
1399 /**
1400 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1401 * @cpu: CPU number
1402 *
1403 * Just return the max possible frequency for a given CPU.
1404 */
1405 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1406 {
1407 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1408 unsigned int ret_freq = 0;
1409
1410 if (policy) {
1411 ret_freq = policy->max;
1412 cpufreq_cpu_put(policy);
1413 }
1414
1415 return ret_freq;
1416 }
1417 EXPORT_SYMBOL(cpufreq_quick_get_max);
1418
1419 static unsigned int __cpufreq_get(unsigned int cpu)
1420 {
1421 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1422 unsigned int ret_freq = 0;
1423
1424 if (!cpufreq_driver->get)
1425 return ret_freq;
1426
1427 ret_freq = cpufreq_driver->get(cpu);
1428
1429 if (ret_freq && policy->cur &&
1430 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1431 /* verify no discrepancy between actual and
1432 saved value exists */
1433 if (unlikely(ret_freq != policy->cur)) {
1434 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1435 schedule_work(&policy->update);
1436 }
1437 }
1438
1439 return ret_freq;
1440 }
1441
1442 /**
1443 * cpufreq_get - get the current CPU frequency (in kHz)
1444 * @cpu: CPU number
1445 *
1446 * Get the CPU current (static) CPU frequency
1447 */
1448 unsigned int cpufreq_get(unsigned int cpu)
1449 {
1450 unsigned int ret_freq = 0;
1451
1452 if (!down_read_trylock(&cpufreq_rwsem))
1453 return 0;
1454
1455 if (unlikely(lock_policy_rwsem_read(cpu)))
1456 goto out_policy;
1457
1458 ret_freq = __cpufreq_get(cpu);
1459
1460 unlock_policy_rwsem_read(cpu);
1461
1462 out_policy:
1463 up_read(&cpufreq_rwsem);
1464
1465 return ret_freq;
1466 }
1467 EXPORT_SYMBOL(cpufreq_get);
1468
1469 static struct subsys_interface cpufreq_interface = {
1470 .name = "cpufreq",
1471 .subsys = &cpu_subsys,
1472 .add_dev = cpufreq_add_dev,
1473 .remove_dev = cpufreq_remove_dev,
1474 };
1475
1476 /**
1477 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1478 *
1479 * This function is only executed for the boot processor. The other CPUs
1480 * have been put offline by means of CPU hotplug.
1481 */
1482 static int cpufreq_bp_suspend(void)
1483 {
1484 int ret = 0;
1485
1486 int cpu = smp_processor_id();
1487 struct cpufreq_policy *policy;
1488
1489 pr_debug("suspending cpu %u\n", cpu);
1490
1491 /* If there's no policy for the boot CPU, we have nothing to do. */
1492 policy = cpufreq_cpu_get(cpu);
1493 if (!policy)
1494 return 0;
1495
1496 if (cpufreq_driver->suspend) {
1497 ret = cpufreq_driver->suspend(policy);
1498 if (ret)
1499 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1500 "step on CPU %u\n", policy->cpu);
1501 }
1502
1503 cpufreq_cpu_put(policy);
1504 return ret;
1505 }
1506
1507 /**
1508 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1509 *
1510 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1511 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1512 * restored. It will verify that the current freq is in sync with
1513 * what we believe it to be. This is a bit later than when it
1514 * should be, but nonethteless it's better than calling
1515 * cpufreq_driver->get() here which might re-enable interrupts...
1516 *
1517 * This function is only executed for the boot CPU. The other CPUs have not
1518 * been turned on yet.
1519 */
1520 static void cpufreq_bp_resume(void)
1521 {
1522 int ret = 0;
1523
1524 int cpu = smp_processor_id();
1525 struct cpufreq_policy *policy;
1526
1527 pr_debug("resuming cpu %u\n", cpu);
1528
1529 /* If there's no policy for the boot CPU, we have nothing to do. */
1530 policy = cpufreq_cpu_get(cpu);
1531 if (!policy)
1532 return;
1533
1534 if (cpufreq_driver->resume) {
1535 ret = cpufreq_driver->resume(policy);
1536 if (ret) {
1537 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1538 "step on CPU %u\n", policy->cpu);
1539 goto fail;
1540 }
1541 }
1542
1543 schedule_work(&policy->update);
1544
1545 fail:
1546 cpufreq_cpu_put(policy);
1547 }
1548
1549 static struct syscore_ops cpufreq_syscore_ops = {
1550 .suspend = cpufreq_bp_suspend,
1551 .resume = cpufreq_bp_resume,
1552 };
1553
1554 /**
1555 * cpufreq_get_current_driver - return current driver's name
1556 *
1557 * Return the name string of the currently loaded cpufreq driver
1558 * or NULL, if none.
1559 */
1560 const char *cpufreq_get_current_driver(void)
1561 {
1562 if (cpufreq_driver)
1563 return cpufreq_driver->name;
1564
1565 return NULL;
1566 }
1567 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1568
1569 /*********************************************************************
1570 * NOTIFIER LISTS INTERFACE *
1571 *********************************************************************/
1572
1573 /**
1574 * cpufreq_register_notifier - register a driver with cpufreq
1575 * @nb: notifier function to register
1576 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1577 *
1578 * Add a driver to one of two lists: either a list of drivers that
1579 * are notified about clock rate changes (once before and once after
1580 * the transition), or a list of drivers that are notified about
1581 * changes in cpufreq policy.
1582 *
1583 * This function may sleep, and has the same return conditions as
1584 * blocking_notifier_chain_register.
1585 */
1586 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1587 {
1588 int ret;
1589
1590 if (cpufreq_disabled())
1591 return -EINVAL;
1592
1593 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1594
1595 switch (list) {
1596 case CPUFREQ_TRANSITION_NOTIFIER:
1597 ret = srcu_notifier_chain_register(
1598 &cpufreq_transition_notifier_list, nb);
1599 break;
1600 case CPUFREQ_POLICY_NOTIFIER:
1601 ret = blocking_notifier_chain_register(
1602 &cpufreq_policy_notifier_list, nb);
1603 break;
1604 default:
1605 ret = -EINVAL;
1606 }
1607
1608 return ret;
1609 }
1610 EXPORT_SYMBOL(cpufreq_register_notifier);
1611
1612 /**
1613 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1614 * @nb: notifier block to be unregistered
1615 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1616 *
1617 * Remove a driver from the CPU frequency notifier list.
1618 *
1619 * This function may sleep, and has the same return conditions as
1620 * blocking_notifier_chain_unregister.
1621 */
1622 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1623 {
1624 int ret;
1625
1626 if (cpufreq_disabled())
1627 return -EINVAL;
1628
1629 switch (list) {
1630 case CPUFREQ_TRANSITION_NOTIFIER:
1631 ret = srcu_notifier_chain_unregister(
1632 &cpufreq_transition_notifier_list, nb);
1633 break;
1634 case CPUFREQ_POLICY_NOTIFIER:
1635 ret = blocking_notifier_chain_unregister(
1636 &cpufreq_policy_notifier_list, nb);
1637 break;
1638 default:
1639 ret = -EINVAL;
1640 }
1641
1642 return ret;
1643 }
1644 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1645
1646
1647 /*********************************************************************
1648 * GOVERNORS *
1649 *********************************************************************/
1650
1651 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1652 unsigned int target_freq,
1653 unsigned int relation)
1654 {
1655 int retval = -EINVAL;
1656 unsigned int old_target_freq = target_freq;
1657
1658 if (cpufreq_disabled())
1659 return -ENODEV;
1660 if (policy->transition_ongoing)
1661 return -EBUSY;
1662
1663 /* Make sure that target_freq is within supported range */
1664 if (target_freq > policy->max)
1665 target_freq = policy->max;
1666 if (target_freq < policy->min)
1667 target_freq = policy->min;
1668
1669 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1670 policy->cpu, target_freq, relation, old_target_freq);
1671
1672 if (target_freq == policy->cur)
1673 return 0;
1674
1675 if (cpufreq_driver->target)
1676 retval = cpufreq_driver->target(policy, target_freq, relation);
1677
1678 return retval;
1679 }
1680 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1681
1682 int cpufreq_driver_target(struct cpufreq_policy *policy,
1683 unsigned int target_freq,
1684 unsigned int relation)
1685 {
1686 int ret = -EINVAL;
1687
1688 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1689 goto fail;
1690
1691 ret = __cpufreq_driver_target(policy, target_freq, relation);
1692
1693 unlock_policy_rwsem_write(policy->cpu);
1694
1695 fail:
1696 return ret;
1697 }
1698 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1699
1700 /*
1701 * when "event" is CPUFREQ_GOV_LIMITS
1702 */
1703
1704 static int __cpufreq_governor(struct cpufreq_policy *policy,
1705 unsigned int event)
1706 {
1707 int ret;
1708
1709 /* Only must be defined when default governor is known to have latency
1710 restrictions, like e.g. conservative or ondemand.
1711 That this is the case is already ensured in Kconfig
1712 */
1713 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1714 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1715 #else
1716 struct cpufreq_governor *gov = NULL;
1717 #endif
1718
1719 if (policy->governor->max_transition_latency &&
1720 policy->cpuinfo.transition_latency >
1721 policy->governor->max_transition_latency) {
1722 if (!gov)
1723 return -EINVAL;
1724 else {
1725 printk(KERN_WARNING "%s governor failed, too long"
1726 " transition latency of HW, fallback"
1727 " to %s governor\n",
1728 policy->governor->name,
1729 gov->name);
1730 policy->governor = gov;
1731 }
1732 }
1733
1734 if (event == CPUFREQ_GOV_POLICY_INIT)
1735 if (!try_module_get(policy->governor->owner))
1736 return -EINVAL;
1737
1738 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1739 policy->cpu, event);
1740
1741 mutex_lock(&cpufreq_governor_lock);
1742 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1743 || (!policy->governor_enabled
1744 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1745 mutex_unlock(&cpufreq_governor_lock);
1746 return -EBUSY;
1747 }
1748
1749 if (event == CPUFREQ_GOV_STOP)
1750 policy->governor_enabled = false;
1751 else if (event == CPUFREQ_GOV_START)
1752 policy->governor_enabled = true;
1753
1754 mutex_unlock(&cpufreq_governor_lock);
1755
1756 ret = policy->governor->governor(policy, event);
1757
1758 if (!ret) {
1759 if (event == CPUFREQ_GOV_POLICY_INIT)
1760 policy->governor->initialized++;
1761 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1762 policy->governor->initialized--;
1763 } else {
1764 /* Restore original values */
1765 mutex_lock(&cpufreq_governor_lock);
1766 if (event == CPUFREQ_GOV_STOP)
1767 policy->governor_enabled = true;
1768 else if (event == CPUFREQ_GOV_START)
1769 policy->governor_enabled = false;
1770 mutex_unlock(&cpufreq_governor_lock);
1771 }
1772
1773 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1774 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1775 module_put(policy->governor->owner);
1776
1777 return ret;
1778 }
1779
1780 int cpufreq_register_governor(struct cpufreq_governor *governor)
1781 {
1782 int err;
1783
1784 if (!governor)
1785 return -EINVAL;
1786
1787 if (cpufreq_disabled())
1788 return -ENODEV;
1789
1790 mutex_lock(&cpufreq_governor_mutex);
1791
1792 governor->initialized = 0;
1793 err = -EBUSY;
1794 if (__find_governor(governor->name) == NULL) {
1795 err = 0;
1796 list_add(&governor->governor_list, &cpufreq_governor_list);
1797 }
1798
1799 mutex_unlock(&cpufreq_governor_mutex);
1800 return err;
1801 }
1802 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1803
1804 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1805 {
1806 #ifdef CONFIG_HOTPLUG_CPU
1807 int cpu;
1808 #endif
1809
1810 if (!governor)
1811 return;
1812
1813 if (cpufreq_disabled())
1814 return;
1815
1816 #ifdef CONFIG_HOTPLUG_CPU
1817 for_each_present_cpu(cpu) {
1818 if (cpu_online(cpu))
1819 continue;
1820 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1821 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1822 }
1823 #endif
1824
1825 mutex_lock(&cpufreq_governor_mutex);
1826 list_del(&governor->governor_list);
1827 mutex_unlock(&cpufreq_governor_mutex);
1828 return;
1829 }
1830 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1831
1832
1833 /*********************************************************************
1834 * POLICY INTERFACE *
1835 *********************************************************************/
1836
1837 /**
1838 * cpufreq_get_policy - get the current cpufreq_policy
1839 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1840 * is written
1841 *
1842 * Reads the current cpufreq policy.
1843 */
1844 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1845 {
1846 struct cpufreq_policy *cpu_policy;
1847 if (!policy)
1848 return -EINVAL;
1849
1850 cpu_policy = cpufreq_cpu_get(cpu);
1851 if (!cpu_policy)
1852 return -EINVAL;
1853
1854 memcpy(policy, cpu_policy, sizeof(*policy));
1855
1856 cpufreq_cpu_put(cpu_policy);
1857 return 0;
1858 }
1859 EXPORT_SYMBOL(cpufreq_get_policy);
1860
1861 /*
1862 * data : current policy.
1863 * policy : policy to be set.
1864 */
1865 static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1866 struct cpufreq_policy *new_policy)
1867 {
1868 int ret = 0, failed = 1;
1869
1870 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1871 new_policy->min, new_policy->max);
1872
1873 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1874
1875 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1876 ret = -EINVAL;
1877 goto error_out;
1878 }
1879
1880 /* verify the cpu speed can be set within this limit */
1881 ret = cpufreq_driver->verify(new_policy);
1882 if (ret)
1883 goto error_out;
1884
1885 /* adjust if necessary - all reasons */
1886 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1887 CPUFREQ_ADJUST, new_policy);
1888
1889 /* adjust if necessary - hardware incompatibility*/
1890 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1891 CPUFREQ_INCOMPATIBLE, new_policy);
1892
1893 /*
1894 * verify the cpu speed can be set within this limit, which might be
1895 * different to the first one
1896 */
1897 ret = cpufreq_driver->verify(new_policy);
1898 if (ret)
1899 goto error_out;
1900
1901 /* notification of the new policy */
1902 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1903 CPUFREQ_NOTIFY, new_policy);
1904
1905 policy->min = new_policy->min;
1906 policy->max = new_policy->max;
1907
1908 pr_debug("new min and max freqs are %u - %u kHz\n",
1909 policy->min, policy->max);
1910
1911 if (cpufreq_driver->setpolicy) {
1912 policy->policy = new_policy->policy;
1913 pr_debug("setting range\n");
1914 ret = cpufreq_driver->setpolicy(new_policy);
1915 } else {
1916 if (new_policy->governor != policy->governor) {
1917 /* save old, working values */
1918 struct cpufreq_governor *old_gov = policy->governor;
1919
1920 pr_debug("governor switch\n");
1921
1922 /* end old governor */
1923 if (policy->governor) {
1924 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1925 unlock_policy_rwsem_write(new_policy->cpu);
1926 __cpufreq_governor(policy,
1927 CPUFREQ_GOV_POLICY_EXIT);
1928 lock_policy_rwsem_write(new_policy->cpu);
1929 }
1930
1931 /* start new governor */
1932 policy->governor = new_policy->governor;
1933 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1934 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1935 failed = 0;
1936 } else {
1937 unlock_policy_rwsem_write(new_policy->cpu);
1938 __cpufreq_governor(policy,
1939 CPUFREQ_GOV_POLICY_EXIT);
1940 lock_policy_rwsem_write(new_policy->cpu);
1941 }
1942 }
1943
1944 if (failed) {
1945 /* new governor failed, so re-start old one */
1946 pr_debug("starting governor %s failed\n",
1947 policy->governor->name);
1948 if (old_gov) {
1949 policy->governor = old_gov;
1950 __cpufreq_governor(policy,
1951 CPUFREQ_GOV_POLICY_INIT);
1952 __cpufreq_governor(policy,
1953 CPUFREQ_GOV_START);
1954 }
1955 ret = -EINVAL;
1956 goto error_out;
1957 }
1958 /* might be a policy change, too, so fall through */
1959 }
1960 pr_debug("governor: change or update limits\n");
1961 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1962 }
1963
1964 error_out:
1965 return ret;
1966 }
1967
1968 /**
1969 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1970 * @cpu: CPU which shall be re-evaluated
1971 *
1972 * Useful for policy notifiers which have different necessities
1973 * at different times.
1974 */
1975 int cpufreq_update_policy(unsigned int cpu)
1976 {
1977 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1978 struct cpufreq_policy new_policy;
1979 int ret;
1980
1981 if (!policy) {
1982 ret = -ENODEV;
1983 goto no_policy;
1984 }
1985
1986 if (unlikely(lock_policy_rwsem_write(cpu))) {
1987 ret = -EINVAL;
1988 goto fail;
1989 }
1990
1991 pr_debug("updating policy for CPU %u\n", cpu);
1992 memcpy(&new_policy, policy, sizeof(*policy));
1993 new_policy.min = policy->user_policy.min;
1994 new_policy.max = policy->user_policy.max;
1995 new_policy.policy = policy->user_policy.policy;
1996 new_policy.governor = policy->user_policy.governor;
1997
1998 /*
1999 * BIOS might change freq behind our back
2000 * -> ask driver for current freq and notify governors about a change
2001 */
2002 if (cpufreq_driver->get) {
2003 new_policy.cur = cpufreq_driver->get(cpu);
2004 if (!policy->cur) {
2005 pr_debug("Driver did not initialize current freq");
2006 policy->cur = new_policy.cur;
2007 } else {
2008 if (policy->cur != new_policy.cur && cpufreq_driver->target)
2009 cpufreq_out_of_sync(cpu, policy->cur,
2010 new_policy.cur);
2011 }
2012 }
2013
2014 ret = __cpufreq_set_policy(policy, &new_policy);
2015
2016 unlock_policy_rwsem_write(cpu);
2017
2018 fail:
2019 cpufreq_cpu_put(policy);
2020 no_policy:
2021 return ret;
2022 }
2023 EXPORT_SYMBOL(cpufreq_update_policy);
2024
2025 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2026 unsigned long action, void *hcpu)
2027 {
2028 unsigned int cpu = (unsigned long)hcpu;
2029 struct device *dev;
2030 bool frozen = false;
2031
2032 dev = get_cpu_device(cpu);
2033 if (dev) {
2034
2035 if (action & CPU_TASKS_FROZEN)
2036 frozen = true;
2037
2038 switch (action & ~CPU_TASKS_FROZEN) {
2039 case CPU_ONLINE:
2040 __cpufreq_add_dev(dev, NULL, frozen);
2041 cpufreq_update_policy(cpu);
2042 break;
2043
2044 case CPU_DOWN_PREPARE:
2045 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2046 break;
2047
2048 case CPU_POST_DEAD:
2049 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2050 break;
2051
2052 case CPU_DOWN_FAILED:
2053 __cpufreq_add_dev(dev, NULL, frozen);
2054 break;
2055 }
2056 }
2057 return NOTIFY_OK;
2058 }
2059
2060 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2061 .notifier_call = cpufreq_cpu_callback,
2062 };
2063
2064 /*********************************************************************
2065 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2066 *********************************************************************/
2067
2068 /**
2069 * cpufreq_register_driver - register a CPU Frequency driver
2070 * @driver_data: A struct cpufreq_driver containing the values#
2071 * submitted by the CPU Frequency driver.
2072 *
2073 * Registers a CPU Frequency driver to this core code. This code
2074 * returns zero on success, -EBUSY when another driver got here first
2075 * (and isn't unregistered in the meantime).
2076 *
2077 */
2078 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2079 {
2080 unsigned long flags;
2081 int ret;
2082
2083 if (cpufreq_disabled())
2084 return -ENODEV;
2085
2086 if (!driver_data || !driver_data->verify || !driver_data->init ||
2087 ((!driver_data->setpolicy) && (!driver_data->target)))
2088 return -EINVAL;
2089
2090 pr_debug("trying to register driver %s\n", driver_data->name);
2091
2092 if (driver_data->setpolicy)
2093 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2094
2095 write_lock_irqsave(&cpufreq_driver_lock, flags);
2096 if (cpufreq_driver) {
2097 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2098 return -EBUSY;
2099 }
2100 cpufreq_driver = driver_data;
2101 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2102
2103 ret = subsys_interface_register(&cpufreq_interface);
2104 if (ret)
2105 goto err_null_driver;
2106
2107 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2108 int i;
2109 ret = -ENODEV;
2110
2111 /* check for at least one working CPU */
2112 for (i = 0; i < nr_cpu_ids; i++)
2113 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2114 ret = 0;
2115 break;
2116 }
2117
2118 /* if all ->init() calls failed, unregister */
2119 if (ret) {
2120 pr_debug("no CPU initialized for driver %s\n",
2121 driver_data->name);
2122 goto err_if_unreg;
2123 }
2124 }
2125
2126 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2127 pr_debug("driver %s up and running\n", driver_data->name);
2128
2129 return 0;
2130 err_if_unreg:
2131 subsys_interface_unregister(&cpufreq_interface);
2132 err_null_driver:
2133 write_lock_irqsave(&cpufreq_driver_lock, flags);
2134 cpufreq_driver = NULL;
2135 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2136 return ret;
2137 }
2138 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2139
2140 /**
2141 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2142 *
2143 * Unregister the current CPUFreq driver. Only call this if you have
2144 * the right to do so, i.e. if you have succeeded in initialising before!
2145 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2146 * currently not initialised.
2147 */
2148 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2149 {
2150 unsigned long flags;
2151
2152 if (!cpufreq_driver || (driver != cpufreq_driver))
2153 return -EINVAL;
2154
2155 pr_debug("unregistering driver %s\n", driver->name);
2156
2157 subsys_interface_unregister(&cpufreq_interface);
2158 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2159
2160 down_write(&cpufreq_rwsem);
2161 write_lock_irqsave(&cpufreq_driver_lock, flags);
2162
2163 cpufreq_driver = NULL;
2164
2165 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2166 up_write(&cpufreq_rwsem);
2167
2168 return 0;
2169 }
2170 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2171
2172 static int __init cpufreq_core_init(void)
2173 {
2174 int cpu;
2175
2176 if (cpufreq_disabled())
2177 return -ENODEV;
2178
2179 for_each_possible_cpu(cpu)
2180 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2181
2182 cpufreq_global_kobject = kobject_create();
2183 BUG_ON(!cpufreq_global_kobject);
2184 register_syscore_ops(&cpufreq_syscore_ops);
2185
2186 return 0;
2187 }
2188 core_initcall(cpufreq_core_init);