]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/cpufreq/cpufreq.c
[PATCH] Remove cpu_sys_devices in cpufreq subsystem.
[mirror_ubuntu-kernels.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/notifier.h>
18#include <linux/cpufreq.h>
19#include <linux/delay.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22#include <linux/device.h>
23#include <linux/slab.h>
24#include <linux/cpu.h>
25#include <linux/completion.h>
26
27#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
28
29/**
30 * The "cpufreq driver" - the arch- or hardware-dependend low
31 * level driver of CPUFreq support, and its spinlock. This lock
32 * also protects the cpufreq_cpu_data array.
33 */
34static struct cpufreq_driver *cpufreq_driver;
35static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
36static DEFINE_SPINLOCK(cpufreq_driver_lock);
37
38
1da177e4
LT
39/* internal prototypes */
40static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
41static void handle_update(void *data);
42static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci);
43
44/**
45 * Two notifier lists: the "policy" list is involved in the
46 * validation process for a new CPU frequency policy; the
47 * "transition" list for kernel code that needs to handle
48 * changes to devices when the CPU clock speed changes.
49 * The mutex locks both lists.
50 */
51static struct notifier_block *cpufreq_policy_notifier_list;
52static struct notifier_block *cpufreq_transition_notifier_list;
53static DECLARE_RWSEM (cpufreq_notifier_rwsem);
54
55
56static LIST_HEAD(cpufreq_governor_list);
57static DECLARE_MUTEX (cpufreq_governor_sem);
58
59struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
60{
61 struct cpufreq_policy *data;
62 unsigned long flags;
63
64 if (cpu >= NR_CPUS)
65 goto err_out;
66
67 /* get the cpufreq driver */
68 spin_lock_irqsave(&cpufreq_driver_lock, flags);
69
70 if (!cpufreq_driver)
71 goto err_out_unlock;
72
73 if (!try_module_get(cpufreq_driver->owner))
74 goto err_out_unlock;
75
76
77 /* get the CPU */
78 data = cpufreq_cpu_data[cpu];
79
80 if (!data)
81 goto err_out_put_module;
82
83 if (!kobject_get(&data->kobj))
84 goto err_out_put_module;
85
86
87 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
88
89 return data;
90
91 err_out_put_module:
92 module_put(cpufreq_driver->owner);
93 err_out_unlock:
94 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
95 err_out:
96 return NULL;
97}
98EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
99
100void cpufreq_cpu_put(struct cpufreq_policy *data)
101{
102 kobject_put(&data->kobj);
103 module_put(cpufreq_driver->owner);
104}
105EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
106
107
108/*********************************************************************
109 * UNIFIED DEBUG HELPERS *
110 *********************************************************************/
111#ifdef CONFIG_CPU_FREQ_DEBUG
112
113/* what part(s) of the CPUfreq subsystem are debugged? */
114static unsigned int debug;
115
116/* is the debug output ratelimit'ed using printk_ratelimit? User can
117 * set or modify this value.
118 */
119static unsigned int debug_ratelimit = 1;
120
121/* is the printk_ratelimit'ing enabled? It's enabled after a successful
122 * loading of a cpufreq driver, temporarily disabled when a new policy
123 * is set, and disabled upon cpufreq driver removal
124 */
125static unsigned int disable_ratelimit = 1;
126static DEFINE_SPINLOCK(disable_ratelimit_lock);
127
128static inline void cpufreq_debug_enable_ratelimit(void)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&disable_ratelimit_lock, flags);
133 if (disable_ratelimit)
134 disable_ratelimit--;
135 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
136}
137
138static inline void cpufreq_debug_disable_ratelimit(void)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&disable_ratelimit_lock, flags);
143 disable_ratelimit++;
144 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
145}
146
147void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...)
148{
149 char s[256];
150 va_list args;
151 unsigned int len;
152 unsigned long flags;
153
154 WARN_ON(!prefix);
155 if (type & debug) {
156 spin_lock_irqsave(&disable_ratelimit_lock, flags);
157 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) {
158 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
159 return;
160 }
161 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
162
163 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
164
165 va_start(args, fmt);
166 len += vsnprintf(&s[len], (256 - len), fmt, args);
167 va_end(args);
168
169 printk(s);
170
171 WARN_ON(len < 5);
172 }
173}
174EXPORT_SYMBOL(cpufreq_debug_printk);
175
176
177module_param(debug, uint, 0644);
178MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors.");
179
180module_param(debug_ratelimit, uint, 0644);
181MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting.");
182
183#else /* !CONFIG_CPU_FREQ_DEBUG */
184
185static inline void cpufreq_debug_enable_ratelimit(void) { return; }
186static inline void cpufreq_debug_disable_ratelimit(void) { return; }
187
188#endif /* CONFIG_CPU_FREQ_DEBUG */
189
190
191/*********************************************************************
192 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
193 *********************************************************************/
194
195/**
196 * adjust_jiffies - adjust the system "loops_per_jiffy"
197 *
198 * This function alters the system "loops_per_jiffy" for the clock
199 * speed change. Note that loops_per_jiffy cannot be updated on SMP
200 * systems as each CPU might be scaled differently. So, use the arch
201 * per-CPU loops_per_jiffy value wherever possible.
202 */
203#ifndef CONFIG_SMP
204static unsigned long l_p_j_ref;
205static unsigned int l_p_j_ref_freq;
206
207static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
208{
209 if (ci->flags & CPUFREQ_CONST_LOOPS)
210 return;
211
212 if (!l_p_j_ref_freq) {
213 l_p_j_ref = loops_per_jiffy;
214 l_p_j_ref_freq = ci->old;
215 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
216 }
217 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
218 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 219 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
1da177e4
LT
220 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
221 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new);
222 }
223}
224#else
225static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
226#endif
227
228
229/**
230 * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
231 *
232 * This function calls the transition notifiers and the "adjust_jiffies" function. It is called
233 * twice on all CPU frequency changes that have external effects.
234 */
235void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
236{
237 BUG_ON(irqs_disabled());
238
239 freqs->flags = cpufreq_driver->flags;
240 dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new);
241
242 down_read(&cpufreq_notifier_rwsem);
243 switch (state) {
244 case CPUFREQ_PRECHANGE:
245 /* detect if the driver reported a value as "old frequency" which
246 * is not equal to what the cpufreq core thinks is "old frequency".
247 */
248 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
249 if ((likely(cpufreq_cpu_data[freqs->cpu])) &&
250 (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)) &&
251 (likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
252 (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
253 {
78ee998f 254 dprintk(KERN_WARNING "Warning: CPU frequency is %u, "
1da177e4
LT
255 "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
256 freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
257 }
258 }
259 notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs);
260 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
261 break;
262 case CPUFREQ_POSTCHANGE:
263 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
264 notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs);
265 if ((likely(cpufreq_cpu_data[freqs->cpu])) &&
266 (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)))
267 cpufreq_cpu_data[freqs->cpu]->cur = freqs->new;
268 break;
269 }
270 up_read(&cpufreq_notifier_rwsem);
271}
272EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
273
274
275
276/*********************************************************************
277 * SYSFS INTERFACE *
278 *********************************************************************/
279
280/**
281 * cpufreq_parse_governor - parse a governor string
282 */
283static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
284 struct cpufreq_governor **governor)
285{
286 if (!cpufreq_driver)
287 return -EINVAL;
288 if (cpufreq_driver->setpolicy) {
289 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
290 *policy = CPUFREQ_POLICY_PERFORMANCE;
291 return 0;
292 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
293 *policy = CPUFREQ_POLICY_POWERSAVE;
294 return 0;
295 }
296 return -EINVAL;
297 } else {
298 struct cpufreq_governor *t;
299 down(&cpufreq_governor_sem);
300 if (!cpufreq_driver || !cpufreq_driver->target)
301 goto out;
302 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
303 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
304 *governor = t;
305 up(&cpufreq_governor_sem);
306 return 0;
307 }
308 }
309 out:
310 up(&cpufreq_governor_sem);
311 }
312 return -EINVAL;
313}
314EXPORT_SYMBOL_GPL(cpufreq_parse_governor);
315
316
317/* drivers/base/cpu.c */
318extern struct sysdev_class cpu_sysdev_class;
319
320
321/**
322 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information
323 *
324 * Write out information from cpufreq_driver->policy[cpu]; object must be
325 * "unsigned int".
326 */
327
328#define show_one(file_name, object) \
329static ssize_t show_##file_name \
330(struct cpufreq_policy * policy, char *buf) \
331{ \
332 return sprintf (buf, "%u\n", policy->object); \
333}
334
335show_one(cpuinfo_min_freq, cpuinfo.min_freq);
336show_one(cpuinfo_max_freq, cpuinfo.max_freq);
337show_one(scaling_min_freq, min);
338show_one(scaling_max_freq, max);
339show_one(scaling_cur_freq, cur);
340
341/**
342 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
343 */
344#define store_one(file_name, object) \
345static ssize_t store_##file_name \
346(struct cpufreq_policy * policy, const char *buf, size_t count) \
347{ \
348 unsigned int ret = -EINVAL; \
349 struct cpufreq_policy new_policy; \
350 \
351 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
352 if (ret) \
353 return -EINVAL; \
354 \
355 ret = sscanf (buf, "%u", &new_policy.object); \
356 if (ret != 1) \
357 return -EINVAL; \
358 \
359 ret = cpufreq_set_policy(&new_policy); \
360 \
361 return ret ? ret : count; \
362}
363
364store_one(scaling_min_freq,min);
365store_one(scaling_max_freq,max);
366
367/**
368 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
369 */
370static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
371{
372 unsigned int cur_freq = cpufreq_get(policy->cpu);
373 if (!cur_freq)
374 return sprintf(buf, "<unknown>");
375 return sprintf(buf, "%u\n", cur_freq);
376}
377
378
379/**
380 * show_scaling_governor - show the current policy for the specified CPU
381 */
382static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
383{
384 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
385 return sprintf(buf, "powersave\n");
386 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
387 return sprintf(buf, "performance\n");
388 else if (policy->governor)
389 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
390 return -EINVAL;
391}
392
393
394/**
395 * store_scaling_governor - store policy for the specified CPU
396 */
397static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
398 const char *buf, size_t count)
399{
400 unsigned int ret = -EINVAL;
401 char str_governor[16];
402 struct cpufreq_policy new_policy;
403
404 ret = cpufreq_get_policy(&new_policy, policy->cpu);
405 if (ret)
406 return ret;
407
408 ret = sscanf (buf, "%15s", str_governor);
409 if (ret != 1)
410 return -EINVAL;
411
412 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
413 return -EINVAL;
414
415 ret = cpufreq_set_policy(&new_policy);
416
417 return ret ? ret : count;
418}
419
420/**
421 * show_scaling_driver - show the cpufreq driver currently loaded
422 */
423static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
424{
425 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
426}
427
428/**
429 * show_scaling_available_governors - show the available CPUfreq governors
430 */
431static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy,
432 char *buf)
433{
434 ssize_t i = 0;
435 struct cpufreq_governor *t;
436
437 if (!cpufreq_driver->target) {
438 i += sprintf(buf, "performance powersave");
439 goto out;
440 }
441
442 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
443 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
444 goto out;
445 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
446 }
447 out:
448 i += sprintf(&buf[i], "\n");
449 return i;
450}
451/**
452 * show_affected_cpus - show the CPUs affected by each transition
453 */
454static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
455{
456 ssize_t i = 0;
457 unsigned int cpu;
458
459 for_each_cpu_mask(cpu, policy->cpus) {
460 if (i)
461 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
462 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
463 if (i >= (PAGE_SIZE - 5))
464 break;
465 }
466 i += sprintf(&buf[i], "\n");
467 return i;
468}
469
470
471#define define_one_ro(_name) \
472static struct freq_attr _name = \
473__ATTR(_name, 0444, show_##_name, NULL)
474
475#define define_one_ro0400(_name) \
476static struct freq_attr _name = \
477__ATTR(_name, 0400, show_##_name, NULL)
478
479#define define_one_rw(_name) \
480static struct freq_attr _name = \
481__ATTR(_name, 0644, show_##_name, store_##_name)
482
483define_one_ro0400(cpuinfo_cur_freq);
484define_one_ro(cpuinfo_min_freq);
485define_one_ro(cpuinfo_max_freq);
486define_one_ro(scaling_available_governors);
487define_one_ro(scaling_driver);
488define_one_ro(scaling_cur_freq);
489define_one_ro(affected_cpus);
490define_one_rw(scaling_min_freq);
491define_one_rw(scaling_max_freq);
492define_one_rw(scaling_governor);
493
494static struct attribute * default_attrs[] = {
495 &cpuinfo_min_freq.attr,
496 &cpuinfo_max_freq.attr,
497 &scaling_min_freq.attr,
498 &scaling_max_freq.attr,
499 &affected_cpus.attr,
500 &scaling_governor.attr,
501 &scaling_driver.attr,
502 &scaling_available_governors.attr,
503 NULL
504};
505
506#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
507#define to_attr(a) container_of(a,struct freq_attr,attr)
508
509static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
510{
511 struct cpufreq_policy * policy = to_policy(kobj);
512 struct freq_attr * fattr = to_attr(attr);
513 ssize_t ret;
514 policy = cpufreq_cpu_get(policy->cpu);
515 if (!policy)
516 return -EINVAL;
70f2817a 517 ret = fattr->show ? fattr->show(policy,buf) : -EIO;
1da177e4
LT
518 cpufreq_cpu_put(policy);
519 return ret;
520}
521
522static ssize_t store(struct kobject * kobj, struct attribute * attr,
523 const char * buf, size_t count)
524{
525 struct cpufreq_policy * policy = to_policy(kobj);
526 struct freq_attr * fattr = to_attr(attr);
527 ssize_t ret;
528 policy = cpufreq_cpu_get(policy->cpu);
529 if (!policy)
530 return -EINVAL;
70f2817a 531 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO;
1da177e4
LT
532 cpufreq_cpu_put(policy);
533 return ret;
534}
535
536static void cpufreq_sysfs_release(struct kobject * kobj)
537{
538 struct cpufreq_policy * policy = to_policy(kobj);
539 dprintk("last reference is dropped\n");
540 complete(&policy->kobj_unregister);
541}
542
543static struct sysfs_ops sysfs_ops = {
544 .show = show,
545 .store = store,
546};
547
548static struct kobj_type ktype_cpufreq = {
549 .sysfs_ops = &sysfs_ops,
550 .default_attrs = default_attrs,
551 .release = cpufreq_sysfs_release,
552};
553
554
555/**
556 * cpufreq_add_dev - add a CPU device
557 *
558 * Adds the cpufreq interface for a CPU device.
559 */
560static int cpufreq_add_dev (struct sys_device * sys_dev)
561{
562 unsigned int cpu = sys_dev->id;
563 int ret = 0;
564 struct cpufreq_policy new_policy;
565 struct cpufreq_policy *policy;
566 struct freq_attr **drv_attr;
567 unsigned long flags;
568 unsigned int j;
569
570 cpufreq_debug_disable_ratelimit();
571 dprintk("adding CPU %u\n", cpu);
572
573#ifdef CONFIG_SMP
574 /* check whether a different CPU already registered this
575 * CPU because it is in the same boat. */
576 policy = cpufreq_cpu_get(cpu);
577 if (unlikely(policy)) {
1da177e4
LT
578 dprintk("CPU already managed, adding link\n");
579 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq");
580 cpufreq_debug_enable_ratelimit();
581 return 0;
582 }
583#endif
584
585 if (!try_module_get(cpufreq_driver->owner)) {
586 ret = -EINVAL;
587 goto module_out;
588 }
589
590 policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
591 if (!policy) {
592 ret = -ENOMEM;
593 goto nomem_out;
594 }
595 memset(policy, 0, sizeof(struct cpufreq_policy));
596
597 policy->cpu = cpu;
598 policy->cpus = cpumask_of_cpu(cpu);
599
600 init_MUTEX_LOCKED(&policy->lock);
601 init_completion(&policy->kobj_unregister);
602 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
603
604 /* call driver. From then on the cpufreq must be able
605 * to accept all calls to ->verify and ->setpolicy for this CPU
606 */
607 ret = cpufreq_driver->init(policy);
608 if (ret) {
609 dprintk("initialization failed\n");
610 goto err_out;
611 }
612
613 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
614
615 /* prepare interface data */
616 policy->kobj.parent = &sys_dev->kobj;
617 policy->kobj.ktype = &ktype_cpufreq;
618 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
619
620 ret = kobject_register(&policy->kobj);
621 if (ret)
8085e1f1 622 goto err_out_driver_exit;
1da177e4
LT
623
624 /* set up files for this cpu device */
625 drv_attr = cpufreq_driver->attr;
626 while ((drv_attr) && (*drv_attr)) {
627 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
628 drv_attr++;
629 }
630 if (cpufreq_driver->get)
631 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
632 if (cpufreq_driver->target)
633 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
634
635 spin_lock_irqsave(&cpufreq_driver_lock, flags);
636 for_each_cpu_mask(j, policy->cpus)
637 cpufreq_cpu_data[j] = policy;
638 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
639 policy->governor = NULL; /* to assure that the starting sequence is
640 * run in cpufreq_set_policy */
641 up(&policy->lock);
642
643 /* set default policy */
644
645 ret = cpufreq_set_policy(&new_policy);
646 if (ret) {
647 dprintk("setting policy failed\n");
648 goto err_out_unregister;
649 }
650
651 module_put(cpufreq_driver->owner);
1da177e4
LT
652 dprintk("initialization complete\n");
653 cpufreq_debug_enable_ratelimit();
654
655 return 0;
656
657
658err_out_unregister:
659 spin_lock_irqsave(&cpufreq_driver_lock, flags);
660 for_each_cpu_mask(j, policy->cpus)
661 cpufreq_cpu_data[j] = NULL;
662 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
663
664 kobject_unregister(&policy->kobj);
665 wait_for_completion(&policy->kobj_unregister);
666
8085e1f1
VP
667err_out_driver_exit:
668 if (cpufreq_driver->exit)
669 cpufreq_driver->exit(policy);
670
1da177e4
LT
671err_out:
672 kfree(policy);
673
674nomem_out:
675 module_put(cpufreq_driver->owner);
676 module_out:
677 cpufreq_debug_enable_ratelimit();
678 return ret;
679}
680
681
682/**
683 * cpufreq_remove_dev - remove a CPU device
684 *
685 * Removes the cpufreq interface for a CPU device.
686 */
687static int cpufreq_remove_dev (struct sys_device * sys_dev)
688{
689 unsigned int cpu = sys_dev->id;
690 unsigned long flags;
691 struct cpufreq_policy *data;
d434fca7 692 struct sys_device *cpu_sys_dev;
1da177e4
LT
693#ifdef CONFIG_SMP
694 unsigned int j;
695#endif
696
697 cpufreq_debug_disable_ratelimit();
698 dprintk("unregistering CPU %u\n", cpu);
699
700 spin_lock_irqsave(&cpufreq_driver_lock, flags);
701 data = cpufreq_cpu_data[cpu];
702
703 if (!data) {
704 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
705 cpufreq_debug_enable_ratelimit();
706 return -EINVAL;
707 }
708 cpufreq_cpu_data[cpu] = NULL;
709
710
711#ifdef CONFIG_SMP
712 /* if this isn't the CPU which is the parent of the kobj, we
713 * only need to unlink, put and exit
714 */
715 if (unlikely(cpu != data->cpu)) {
716 dprintk("removing link\n");
717 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
718 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1da177e4
LT
719 cpufreq_cpu_put(data);
720 cpufreq_debug_enable_ratelimit();
721 return 0;
722 }
723#endif
724
1da177e4
LT
725
726 if (!kobject_get(&data->kobj)) {
727 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
728 cpufreq_debug_enable_ratelimit();
729 return -EFAULT;
730 }
731
732#ifdef CONFIG_SMP
733 /* if we have other CPUs still registered, we need to unlink them,
734 * or else wait_for_completion below will lock up. Clean the
735 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
736 * links afterwards.
737 */
738 if (unlikely(cpus_weight(data->cpus) > 1)) {
739 for_each_cpu_mask(j, data->cpus) {
740 if (j == cpu)
741 continue;
742 cpufreq_cpu_data[j] = NULL;
743 }
744 }
745
746 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
747
748 if (unlikely(cpus_weight(data->cpus) > 1)) {
749 for_each_cpu_mask(j, data->cpus) {
750 if (j == cpu)
751 continue;
752 dprintk("removing link for cpu %u\n", j);
d434fca7
AR
753 cpu_sys_dev = get_cpu_sysdev(j);
754 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1da177e4
LT
755 cpufreq_cpu_put(data);
756 }
757 }
758#else
759 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
760#endif
761
762 down(&data->lock);
763 if (cpufreq_driver->target)
764 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
765 cpufreq_driver->target = NULL;
766 up(&data->lock);
767
768 kobject_unregister(&data->kobj);
769
770 kobject_put(&data->kobj);
771
772 /* we need to make sure that the underlying kobj is actually
773 * not referenced anymore by anybody before we proceed with
774 * unloading.
775 */
776 dprintk("waiting for dropping of refcount\n");
777 wait_for_completion(&data->kobj_unregister);
778 dprintk("wait complete\n");
779
780 if (cpufreq_driver->exit)
781 cpufreq_driver->exit(data);
782
783 kfree(data);
784
785 cpufreq_debug_enable_ratelimit();
786
787 return 0;
788}
789
790
791static void handle_update(void *data)
792{
793 unsigned int cpu = (unsigned int)(long)data;
794 dprintk("handle_update for cpu %u called\n", cpu);
795 cpufreq_update_policy(cpu);
796}
797
798/**
799 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
800 * @cpu: cpu number
801 * @old_freq: CPU frequency the kernel thinks the CPU runs at
802 * @new_freq: CPU frequency the CPU actually runs at
803 *
804 * We adjust to current frequency first, and need to clean up later. So either call
805 * to cpufreq_update_policy() or schedule handle_update()).
806 */
807static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq)
808{
809 struct cpufreq_freqs freqs;
810
78ee998f 811 dprintk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
812 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
813
814 freqs.cpu = cpu;
815 freqs.old = old_freq;
816 freqs.new = new_freq;
817 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
818 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
819}
820
821
822/**
823 * cpufreq_get - get the current CPU frequency (in kHz)
824 * @cpu: CPU number
825 *
826 * Get the CPU current (static) CPU frequency
827 */
828unsigned int cpufreq_get(unsigned int cpu)
829{
830 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
831 unsigned int ret = 0;
832
833 if (!policy)
834 return 0;
835
836 if (!cpufreq_driver->get)
837 goto out;
838
839 down(&policy->lock);
840
841 ret = cpufreq_driver->get(cpu);
842
843 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS))
844 {
845 /* verify no discrepancy between actual and saved value exists */
846 if (unlikely(ret != policy->cur)) {
847 cpufreq_out_of_sync(cpu, policy->cur, ret);
848 schedule_work(&policy->update);
849 }
850 }
851
852 up(&policy->lock);
853
854 out:
855 cpufreq_cpu_put(policy);
856
857 return (ret);
858}
859EXPORT_SYMBOL(cpufreq_get);
860
861
42d4dc3f
BH
862/**
863 * cpufreq_suspend - let the low level driver prepare for suspend
864 */
865
e00d9967 866static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
42d4dc3f
BH
867{
868 int cpu = sysdev->id;
869 unsigned int ret = 0;
870 unsigned int cur_freq = 0;
871 struct cpufreq_policy *cpu_policy;
872
873 dprintk("resuming cpu %u\n", cpu);
874
875 if (!cpu_online(cpu))
876 return 0;
877
878 /* we may be lax here as interrupts are off. Nonetheless
879 * we need to grab the correct cpu policy, as to check
880 * whether we really run on this CPU.
881 */
882
883 cpu_policy = cpufreq_cpu_get(cpu);
884 if (!cpu_policy)
885 return -EINVAL;
886
887 /* only handle each CPU group once */
888 if (unlikely(cpu_policy->cpu != cpu)) {
889 cpufreq_cpu_put(cpu_policy);
890 return 0;
891 }
892
893 if (cpufreq_driver->suspend) {
e00d9967 894 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
42d4dc3f
BH
895 if (ret) {
896 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
897 "step on CPU %u\n", cpu_policy->cpu);
898 cpufreq_cpu_put(cpu_policy);
899 return ret;
900 }
901 }
902
903
904 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
905 goto out;
906
907 if (cpufreq_driver->get)
908 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
909
910 if (!cur_freq || !cpu_policy->cur) {
911 printk(KERN_ERR "cpufreq: suspend failed to assert current "
912 "frequency is what timing core thinks it is.\n");
913 goto out;
914 }
915
916 if (unlikely(cur_freq != cpu_policy->cur)) {
917 struct cpufreq_freqs freqs;
918
919 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
78ee998f 920 dprintk(KERN_DEBUG "Warning: CPU frequency is %u, "
42d4dc3f
BH
921 "cpufreq assumed %u kHz.\n",
922 cur_freq, cpu_policy->cur);
923
924 freqs.cpu = cpu;
925 freqs.old = cpu_policy->cur;
926 freqs.new = cur_freq;
927
928 notifier_call_chain(&cpufreq_transition_notifier_list,
929 CPUFREQ_SUSPENDCHANGE, &freqs);
930 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
931
932 cpu_policy->cur = cur_freq;
933 }
934
935 out:
936 cpufreq_cpu_put(cpu_policy);
937 return 0;
938}
939
1da177e4
LT
940/**
941 * cpufreq_resume - restore proper CPU frequency handling after resume
942 *
943 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
944 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
42d4dc3f
BH
945 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
946 * restored.
1da177e4
LT
947 */
948static int cpufreq_resume(struct sys_device * sysdev)
949{
950 int cpu = sysdev->id;
951 unsigned int ret = 0;
952 struct cpufreq_policy *cpu_policy;
953
954 dprintk("resuming cpu %u\n", cpu);
955
956 if (!cpu_online(cpu))
957 return 0;
958
959 /* we may be lax here as interrupts are off. Nonetheless
960 * we need to grab the correct cpu policy, as to check
961 * whether we really run on this CPU.
962 */
963
964 cpu_policy = cpufreq_cpu_get(cpu);
965 if (!cpu_policy)
966 return -EINVAL;
967
968 /* only handle each CPU group once */
969 if (unlikely(cpu_policy->cpu != cpu)) {
970 cpufreq_cpu_put(cpu_policy);
971 return 0;
972 }
973
974 if (cpufreq_driver->resume) {
975 ret = cpufreq_driver->resume(cpu_policy);
976 if (ret) {
977 printk(KERN_ERR "cpufreq: resume failed in ->resume "
978 "step on CPU %u\n", cpu_policy->cpu);
979 cpufreq_cpu_put(cpu_policy);
980 return ret;
981 }
982 }
983
984 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
985 unsigned int cur_freq = 0;
986
987 if (cpufreq_driver->get)
988 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
989
990 if (!cur_freq || !cpu_policy->cur) {
42d4dc3f
BH
991 printk(KERN_ERR "cpufreq: resume failed to assert "
992 "current frequency is what timing core "
993 "thinks it is.\n");
1da177e4
LT
994 goto out;
995 }
996
997 if (unlikely(cur_freq != cpu_policy->cur)) {
998 struct cpufreq_freqs freqs;
999
ac09f698 1000 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
78ee998f 1001 dprintk(KERN_WARNING "Warning: CPU frequency"
ac09f698
BH
1002 "is %u, cpufreq assumed %u kHz.\n",
1003 cur_freq, cpu_policy->cur);
1da177e4
LT
1004
1005 freqs.cpu = cpu;
1006 freqs.old = cpu_policy->cur;
1007 freqs.new = cur_freq;
1008
42d4dc3f
BH
1009 notifier_call_chain(&cpufreq_transition_notifier_list,
1010 CPUFREQ_RESUMECHANGE, &freqs);
1da177e4
LT
1011 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1012
1013 cpu_policy->cur = cur_freq;
1014 }
1015 }
1016
1017out:
1018 schedule_work(&cpu_policy->update);
1019 cpufreq_cpu_put(cpu_policy);
1020 return ret;
1021}
1022
1023static struct sysdev_driver cpufreq_sysdev_driver = {
1024 .add = cpufreq_add_dev,
1025 .remove = cpufreq_remove_dev,
42d4dc3f 1026 .suspend = cpufreq_suspend,
1da177e4
LT
1027 .resume = cpufreq_resume,
1028};
1029
1030
1031/*********************************************************************
1032 * NOTIFIER LISTS INTERFACE *
1033 *********************************************************************/
1034
1035/**
1036 * cpufreq_register_notifier - register a driver with cpufreq
1037 * @nb: notifier function to register
1038 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1039 *
1040 * Add a driver to one of two lists: either a list of drivers that
1041 * are notified about clock rate changes (once before and once after
1042 * the transition), or a list of drivers that are notified about
1043 * changes in cpufreq policy.
1044 *
1045 * This function may sleep, and has the same return conditions as
1046 * notifier_chain_register.
1047 */
1048int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1049{
1050 int ret;
1051
1052 down_write(&cpufreq_notifier_rwsem);
1053 switch (list) {
1054 case CPUFREQ_TRANSITION_NOTIFIER:
1055 ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb);
1056 break;
1057 case CPUFREQ_POLICY_NOTIFIER:
1058 ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb);
1059 break;
1060 default:
1061 ret = -EINVAL;
1062 }
1063 up_write(&cpufreq_notifier_rwsem);
1064
1065 return ret;
1066}
1067EXPORT_SYMBOL(cpufreq_register_notifier);
1068
1069
1070/**
1071 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1072 * @nb: notifier block to be unregistered
1073 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1074 *
1075 * Remove a driver from the CPU frequency notifier list.
1076 *
1077 * This function may sleep, and has the same return conditions as
1078 * notifier_chain_unregister.
1079 */
1080int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1081{
1082 int ret;
1083
1084 down_write(&cpufreq_notifier_rwsem);
1085 switch (list) {
1086 case CPUFREQ_TRANSITION_NOTIFIER:
1087 ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb);
1088 break;
1089 case CPUFREQ_POLICY_NOTIFIER:
1090 ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb);
1091 break;
1092 default:
1093 ret = -EINVAL;
1094 }
1095 up_write(&cpufreq_notifier_rwsem);
1096
1097 return ret;
1098}
1099EXPORT_SYMBOL(cpufreq_unregister_notifier);
1100
1101
1102/*********************************************************************
1103 * GOVERNORS *
1104 *********************************************************************/
1105
1106
1107int __cpufreq_driver_target(struct cpufreq_policy *policy,
1108 unsigned int target_freq,
1109 unsigned int relation)
1110{
1111 int retval = -EINVAL;
1112 lock_cpu_hotplug();
1113 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1114 target_freq, relation);
1115 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1116 retval = cpufreq_driver->target(policy, target_freq, relation);
1117 unlock_cpu_hotplug();
1118 return retval;
1119}
1120EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1121
1122
1123int cpufreq_driver_target(struct cpufreq_policy *policy,
1124 unsigned int target_freq,
1125 unsigned int relation)
1126{
cc993cab 1127 int ret;
1da177e4
LT
1128
1129 policy = cpufreq_cpu_get(policy->cpu);
1130 if (!policy)
1131 return -EINVAL;
1132
1133 down(&policy->lock);
1134
1135 ret = __cpufreq_driver_target(policy, target_freq, relation);
1136
1137 up(&policy->lock);
1138
1139 cpufreq_cpu_put(policy);
1140
1141 return ret;
1142}
1143EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1144
1145
1146static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1147{
cc993cab 1148 int ret;
1da177e4
LT
1149
1150 if (!try_module_get(policy->governor->owner))
1151 return -EINVAL;
1152
1153 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event);
1154 ret = policy->governor->governor(policy, event);
1155
1156 /* we keep one module reference alive for each CPU governed by this CPU */
1157 if ((event != CPUFREQ_GOV_START) || ret)
1158 module_put(policy->governor->owner);
1159 if ((event == CPUFREQ_GOV_STOP) && !ret)
1160 module_put(policy->governor->owner);
1161
1162 return ret;
1163}
1164
1165
1166int cpufreq_governor(unsigned int cpu, unsigned int event)
1167{
1168 int ret = 0;
1169 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1170
1171 if (!policy)
1172 return -EINVAL;
1173
1174 down(&policy->lock);
1175 ret = __cpufreq_governor(policy, event);
1176 up(&policy->lock);
1177
1178 cpufreq_cpu_put(policy);
1179
1180 return ret;
1181}
1182EXPORT_SYMBOL_GPL(cpufreq_governor);
1183
1184
1185int cpufreq_register_governor(struct cpufreq_governor *governor)
1186{
1187 struct cpufreq_governor *t;
1188
1189 if (!governor)
1190 return -EINVAL;
1191
1192 down(&cpufreq_governor_sem);
1193
1194 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
1195 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
1196 up(&cpufreq_governor_sem);
1197 return -EBUSY;
1198 }
1199 }
1200 list_add(&governor->governor_list, &cpufreq_governor_list);
1201
1202 up(&cpufreq_governor_sem);
1203
1204 return 0;
1205}
1206EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1207
1208
1209void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1210{
1211 if (!governor)
1212 return;
1213
1214 down(&cpufreq_governor_sem);
1215 list_del(&governor->governor_list);
1216 up(&cpufreq_governor_sem);
1217 return;
1218}
1219EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1220
1221
1222
1223/*********************************************************************
1224 * POLICY INTERFACE *
1225 *********************************************************************/
1226
1227/**
1228 * cpufreq_get_policy - get the current cpufreq_policy
1229 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1230 *
1231 * Reads the current cpufreq policy.
1232 */
1233int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1234{
1235 struct cpufreq_policy *cpu_policy;
1236 if (!policy)
1237 return -EINVAL;
1238
1239 cpu_policy = cpufreq_cpu_get(cpu);
1240 if (!cpu_policy)
1241 return -EINVAL;
1242
1243 down(&cpu_policy->lock);
1244 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1245 up(&cpu_policy->lock);
1246
1247 cpufreq_cpu_put(cpu_policy);
1248
1249 return 0;
1250}
1251EXPORT_SYMBOL(cpufreq_get_policy);
1252
1253
1254static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1255{
1256 int ret = 0;
1257
1258 cpufreq_debug_disable_ratelimit();
1259 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1260 policy->min, policy->max);
1261
1262 memcpy(&policy->cpuinfo,
1263 &data->cpuinfo,
1264 sizeof(struct cpufreq_cpuinfo));
1265
1266 /* verify the cpu speed can be set within this limit */
1267 ret = cpufreq_driver->verify(policy);
1268 if (ret)
1269 goto error_out;
1270
1271 down_read(&cpufreq_notifier_rwsem);
1272
1273 /* adjust if necessary - all reasons */
1274 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST,
1275 policy);
1276
1277 /* adjust if necessary - hardware incompatibility*/
1278 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE,
1279 policy);
1280
1281 /* verify the cpu speed can be set within this limit,
1282 which might be different to the first one */
1283 ret = cpufreq_driver->verify(policy);
1284 if (ret) {
1285 up_read(&cpufreq_notifier_rwsem);
1286 goto error_out;
1287 }
1288
1289 /* notification of the new policy */
1290 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY,
1291 policy);
1292
1293 up_read(&cpufreq_notifier_rwsem);
1294
1295 data->min = policy->min;
1296 data->max = policy->max;
1297
1298 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max);
1299
1300 if (cpufreq_driver->setpolicy) {
1301 data->policy = policy->policy;
1302 dprintk("setting range\n");
1303 ret = cpufreq_driver->setpolicy(policy);
1304 } else {
1305 if (policy->governor != data->governor) {
1306 /* save old, working values */
1307 struct cpufreq_governor *old_gov = data->governor;
1308
1309 dprintk("governor switch\n");
1310
1311 /* end old governor */
1312 if (data->governor)
1313 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1314
1315 /* start new governor */
1316 data->governor = policy->governor;
1317 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1318 /* new governor failed, so re-start old one */
1319 dprintk("starting governor %s failed\n", data->governor->name);
1320 if (old_gov) {
1321 data->governor = old_gov;
1322 __cpufreq_governor(data, CPUFREQ_GOV_START);
1323 }
1324 ret = -EINVAL;
1325 goto error_out;
1326 }
1327 /* might be a policy change, too, so fall through */
1328 }
1329 dprintk("governor: change or update limits\n");
1330 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1331 }
1332
1333 error_out:
1334 cpufreq_debug_enable_ratelimit();
1335 return ret;
1336}
1337
1338/**
1339 * cpufreq_set_policy - set a new CPUFreq policy
1340 * @policy: policy to be set.
1341 *
1342 * Sets a new CPU frequency and voltage scaling policy.
1343 */
1344int cpufreq_set_policy(struct cpufreq_policy *policy)
1345{
1346 int ret = 0;
1347 struct cpufreq_policy *data;
1348
1349 if (!policy)
1350 return -EINVAL;
1351
1352 data = cpufreq_cpu_get(policy->cpu);
1353 if (!data)
1354 return -EINVAL;
1355
1356 /* lock this CPU */
1357 down(&data->lock);
1358
1359 ret = __cpufreq_set_policy(data, policy);
1360 data->user_policy.min = data->min;
1361 data->user_policy.max = data->max;
1362 data->user_policy.policy = data->policy;
1363 data->user_policy.governor = data->governor;
1364
1365 up(&data->lock);
1366 cpufreq_cpu_put(data);
1367
1368 return ret;
1369}
1370EXPORT_SYMBOL(cpufreq_set_policy);
1371
1372
1373/**
1374 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1375 * @cpu: CPU which shall be re-evaluated
1376 *
1377 * Usefull for policy notifiers which have different necessities
1378 * at different times.
1379 */
1380int cpufreq_update_policy(unsigned int cpu)
1381{
1382 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1383 struct cpufreq_policy policy;
1384 int ret = 0;
1385
1386 if (!data)
1387 return -ENODEV;
1388
1389 down(&data->lock);
1390
1391 dprintk("updating policy for CPU %u\n", cpu);
1392 memcpy(&policy,
1393 data,
1394 sizeof(struct cpufreq_policy));
1395 policy.min = data->user_policy.min;
1396 policy.max = data->user_policy.max;
1397 policy.policy = data->user_policy.policy;
1398 policy.governor = data->user_policy.governor;
1399
1400 ret = __cpufreq_set_policy(data, &policy);
1401
1402 up(&data->lock);
1403
1404 cpufreq_cpu_put(data);
1405 return ret;
1406}
1407EXPORT_SYMBOL(cpufreq_update_policy);
1408
1409
1410/*********************************************************************
1411 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1412 *********************************************************************/
1413
1414/**
1415 * cpufreq_register_driver - register a CPU Frequency driver
1416 * @driver_data: A struct cpufreq_driver containing the values#
1417 * submitted by the CPU Frequency driver.
1418 *
1419 * Registers a CPU Frequency driver to this core code. This code
1420 * returns zero on success, -EBUSY when another driver got here first
1421 * (and isn't unregistered in the meantime).
1422 *
1423 */
1424int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1425{
1426 unsigned long flags;
1427 int ret;
1428
1429 if (!driver_data || !driver_data->verify || !driver_data->init ||
1430 ((!driver_data->setpolicy) && (!driver_data->target)))
1431 return -EINVAL;
1432
1433 dprintk("trying to register driver %s\n", driver_data->name);
1434
1435 if (driver_data->setpolicy)
1436 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1437
1438 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1439 if (cpufreq_driver) {
1440 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1441 return -EBUSY;
1442 }
1443 cpufreq_driver = driver_data;
1444 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1445
1446 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
1447
1448 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1449 int i;
1450 ret = -ENODEV;
1451
1452 /* check for at least one working CPU */
1453 for (i=0; i<NR_CPUS; i++)
1454 if (cpufreq_cpu_data[i])
1455 ret = 0;
1456
1457 /* if all ->init() calls failed, unregister */
1458 if (ret) {
1459 dprintk("no CPU initialized for driver %s\n", driver_data->name);
1460 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1461
1462 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1463 cpufreq_driver = NULL;
1464 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1465 }
1466 }
1467
1468 if (!ret) {
1469 dprintk("driver %s up and running\n", driver_data->name);
1470 cpufreq_debug_enable_ratelimit();
1471 }
1472
1473 return (ret);
1474}
1475EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1476
1477
1478/**
1479 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1480 *
1481 * Unregister the current CPUFreq driver. Only call this if you have
1482 * the right to do so, i.e. if you have succeeded in initialising before!
1483 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1484 * currently not initialised.
1485 */
1486int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1487{
1488 unsigned long flags;
1489
1490 cpufreq_debug_disable_ratelimit();
1491
1492 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1493 cpufreq_debug_enable_ratelimit();
1494 return -EINVAL;
1495 }
1496
1497 dprintk("unregistering driver %s\n", driver->name);
1498
1499 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1500
1501 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1502 cpufreq_driver = NULL;
1503 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1504
1505 return 0;
1506}
1507EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);