]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/cpuidle/cpuidle.c
cpuidle: remove unused 'governor_data' field
[mirror_ubuntu-artful-kernel.git] / drivers / cpuidle / cpuidle.c
CommitLineData
4f86d3a8
LB
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11#include <linux/kernel.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/notifier.h>
e8db0be1 15#include <linux/pm_qos.h>
4f86d3a8
LB
16#include <linux/cpu.h>
17#include <linux/cpuidle.h>
9a0b8415 18#include <linux/ktime.h>
2e94d1f7 19#include <linux/hrtimer.h>
884b17e1 20#include <linux/module.h>
288f023e 21#include <trace/events/power.h>
4f86d3a8
LB
22
23#include "cpuidle.h"
24
25DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
4f86d3a8
LB
26
27DEFINE_MUTEX(cpuidle_lock);
28LIST_HEAD(cpuidle_detected_devices);
4f86d3a8
LB
29
30static int enabled_devices;
62027aea 31static int off __read_mostly;
a0bfa137 32static int initialized __read_mostly;
62027aea
LB
33
34int cpuidle_disabled(void)
35{
36 return off;
37}
d91ee586
LB
38void disable_cpuidle(void)
39{
40 off = 1;
41}
4f86d3a8 42
a6869cc4
VP
43#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
44static void cpuidle_kick_cpus(void)
45{
46 cpu_idle_wait();
47}
48#elif defined(CONFIG_SMP)
49# error "Arch needs cpu_idle_wait() equivalent here"
50#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
51static void cpuidle_kick_cpus(void) {}
52#endif
53
dcb84f33
VP
54static int __cpuidle_register_device(struct cpuidle_device *dev);
55
e1689795
RL
56static inline int cpuidle_enter(struct cpuidle_device *dev,
57 struct cpuidle_driver *drv, int index)
58{
59 struct cpuidle_state *target_state = &drv->states[index];
60 return target_state->enter(dev, drv, index);
61}
62
63static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
64 struct cpuidle_driver *drv, int index)
65{
66 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
67}
68
69typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
70 struct cpuidle_driver *drv, int index);
71
72static cpuidle_enter_t cpuidle_enter_ops;
73
4f86d3a8
LB
74/**
75 * cpuidle_idle_call - the main idle loop
76 *
77 * NOTE: no locks or semaphores should be used here
a0bfa137 78 * return non-zero on failure
4f86d3a8 79 */
a0bfa137 80int cpuidle_idle_call(void)
4f86d3a8 81{
4a6f4fe8 82 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
46bcfad7 83 struct cpuidle_driver *drv = cpuidle_get_driver();
e978aa7d 84 int next_state, entered_state;
4f86d3a8 85
a0bfa137
LB
86 if (off)
87 return -ENODEV;
88
89 if (!initialized)
90 return -ENODEV;
91
4f86d3a8 92 /* check if the device is ready */
a0bfa137
LB
93 if (!dev || !dev->enabled)
94 return -EBUSY;
4f86d3a8 95
9a655837
AV
96#if 0
97 /* shows regressions, re-enable for 2.6.29 */
2e94d1f7
AV
98 /*
99 * run any timers that can be run now, at this point
100 * before calculating the idle duration etc.
101 */
102 hrtimer_peek_ahead_timers();
9a655837 103#endif
71abbbf8 104
4f86d3a8 105 /* ask the governor for the next state */
46bcfad7 106 next_state = cpuidle_curr_governor->select(drv, dev);
246eb7f0
KH
107 if (need_resched()) {
108 local_irq_enable();
a0bfa137 109 return 0;
246eb7f0
KH
110 }
111
f77cfe4e
TR
112 trace_power_start(POWER_CSTATE, next_state, dev->cpu);
113 trace_cpu_idle(next_state, dev->cpu);
114
e1689795 115 entered_state = cpuidle_enter_ops(dev, drv, next_state);
f77cfe4e
TR
116
117 trace_power_end(dev->cpu);
118 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
119
e978aa7d
DD
120 if (entered_state >= 0) {
121 /* Update cpuidle counters */
122 /* This can be moved to within driver enter routine
123 * but that results in multiple copies of same code.
124 */
4202735e 125 dev->states_usage[entered_state].time +=
e978aa7d 126 (unsigned long long)dev->last_residency;
4202735e 127 dev->states_usage[entered_state].usage++;
e1689795
RL
128 } else {
129 dev->last_residency = 0;
e978aa7d 130 }
4f86d3a8
LB
131
132 /* give the governor an opportunity to reflect on the outcome */
133 if (cpuidle_curr_governor->reflect)
e978aa7d 134 cpuidle_curr_governor->reflect(dev, entered_state);
a0bfa137
LB
135
136 return 0;
4f86d3a8
LB
137}
138
139/**
140 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
141 */
142void cpuidle_install_idle_handler(void)
143{
a0bfa137 144 if (enabled_devices) {
4f86d3a8
LB
145 /* Make sure all changes finished before we switch to new idle */
146 smp_wmb();
a0bfa137 147 initialized = 1;
4f86d3a8
LB
148 }
149}
150
151/**
152 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
153 */
154void cpuidle_uninstall_idle_handler(void)
155{
a0bfa137
LB
156 if (enabled_devices) {
157 initialized = 0;
a6869cc4 158 cpuidle_kick_cpus();
4f86d3a8
LB
159 }
160}
161
162/**
163 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
164 */
165void cpuidle_pause_and_lock(void)
166{
167 mutex_lock(&cpuidle_lock);
168 cpuidle_uninstall_idle_handler();
169}
170
171EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
172
173/**
174 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
175 */
176void cpuidle_resume_and_unlock(void)
177{
178 cpuidle_install_idle_handler();
179 mutex_unlock(&cpuidle_lock);
180}
181
182EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
183
e1689795
RL
184/**
185 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
186 * @dev: pointer to a valid cpuidle_device object
187 * @drv: pointer to a valid cpuidle_driver object
188 * @index: index of the target cpuidle state.
189 */
190int cpuidle_wrap_enter(struct cpuidle_device *dev,
191 struct cpuidle_driver *drv, int index,
192 int (*enter)(struct cpuidle_device *dev,
193 struct cpuidle_driver *drv, int index))
194{
195 ktime_t time_start, time_end;
196 s64 diff;
197
198 time_start = ktime_get();
199
200 index = enter(dev, drv, index);
201
202 time_end = ktime_get();
203
204 local_irq_enable();
205
206 diff = ktime_to_us(ktime_sub(time_end, time_start));
207 if (diff > INT_MAX)
208 diff = INT_MAX;
209
210 dev->last_residency = (int) diff;
211
212 return index;
213}
214
d8c216cf 215#ifdef CONFIG_ARCH_HAS_CPU_RELAX
46bcfad7
DD
216static int poll_idle(struct cpuidle_device *dev,
217 struct cpuidle_driver *drv, int index)
d8c216cf
RW
218{
219 ktime_t t1, t2;
220 s64 diff;
d8c216cf
RW
221
222 t1 = ktime_get();
223 local_irq_enable();
224 while (!need_resched())
225 cpu_relax();
226
227 t2 = ktime_get();
228 diff = ktime_to_us(ktime_sub(t2, t1));
229 if (diff > INT_MAX)
230 diff = INT_MAX;
231
e978aa7d
DD
232 dev->last_residency = (int) diff;
233
234 return index;
d8c216cf
RW
235}
236
46bcfad7 237static void poll_idle_init(struct cpuidle_driver *drv)
d8c216cf 238{
46bcfad7 239 struct cpuidle_state *state = &drv->states[0];
d8c216cf 240
720f1c30 241 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
d8c216cf
RW
242 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
243 state->exit_latency = 0;
244 state->target_residency = 0;
245 state->power_usage = -1;
d247632c 246 state->flags = 0;
d8c216cf 247 state->enter = poll_idle;
3a53396b 248 state->disable = 0;
d8c216cf
RW
249}
250#else
46bcfad7 251static void poll_idle_init(struct cpuidle_driver *drv) {}
d8c216cf
RW
252#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
253
4f86d3a8
LB
254/**
255 * cpuidle_enable_device - enables idle PM for a CPU
256 * @dev: the CPU
257 *
258 * This function must be called between cpuidle_pause_and_lock and
259 * cpuidle_resume_and_unlock when used externally.
260 */
261int cpuidle_enable_device(struct cpuidle_device *dev)
262{
263 int ret, i;
e1689795 264 struct cpuidle_driver *drv = cpuidle_get_driver();
4f86d3a8
LB
265
266 if (dev->enabled)
267 return 0;
e1689795 268 if (!drv || !cpuidle_curr_governor)
4f86d3a8
LB
269 return -EIO;
270 if (!dev->state_count)
fc850f39 271 dev->state_count = drv->state_count;
4f86d3a8 272
dcb84f33
VP
273 if (dev->registered == 0) {
274 ret = __cpuidle_register_device(dev);
275 if (ret)
276 return ret;
277 }
278
e1689795
RL
279 cpuidle_enter_ops = drv->en_core_tk_irqen ?
280 cpuidle_enter_tk : cpuidle_enter;
281
282 poll_idle_init(drv);
d8c216cf 283
4f86d3a8
LB
284 if ((ret = cpuidle_add_state_sysfs(dev)))
285 return ret;
286
287 if (cpuidle_curr_governor->enable &&
e1689795 288 (ret = cpuidle_curr_governor->enable(drv, dev)))
4f86d3a8
LB
289 goto fail_sysfs;
290
291 for (i = 0; i < dev->state_count; i++) {
4202735e
DD
292 dev->states_usage[i].usage = 0;
293 dev->states_usage[i].time = 0;
4f86d3a8
LB
294 }
295 dev->last_residency = 0;
4f86d3a8
LB
296
297 smp_wmb();
298
299 dev->enabled = 1;
300
301 enabled_devices++;
302 return 0;
303
304fail_sysfs:
305 cpuidle_remove_state_sysfs(dev);
306
307 return ret;
308}
309
310EXPORT_SYMBOL_GPL(cpuidle_enable_device);
311
312/**
313 * cpuidle_disable_device - disables idle PM for a CPU
314 * @dev: the CPU
315 *
316 * This function must be called between cpuidle_pause_and_lock and
317 * cpuidle_resume_and_unlock when used externally.
318 */
319void cpuidle_disable_device(struct cpuidle_device *dev)
320{
321 if (!dev->enabled)
322 return;
752138df 323 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
4f86d3a8
LB
324 return;
325
326 dev->enabled = 0;
327
328 if (cpuidle_curr_governor->disable)
46bcfad7 329 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
4f86d3a8
LB
330
331 cpuidle_remove_state_sysfs(dev);
332 enabled_devices--;
333}
334
335EXPORT_SYMBOL_GPL(cpuidle_disable_device);
336
337/**
dcb84f33
VP
338 * __cpuidle_register_device - internal register function called before register
339 * and enable routines
4f86d3a8 340 * @dev: the cpu
dcb84f33
VP
341 *
342 * cpuidle_lock mutex must be held before this is called
4f86d3a8 343 */
dcb84f33 344static int __cpuidle_register_device(struct cpuidle_device *dev)
4f86d3a8
LB
345{
346 int ret;
8a25a2fd 347 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
752138df 348 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
4f86d3a8 349
8a25a2fd 350 if (!dev)
4f86d3a8 351 return -EINVAL;
752138df 352 if (!try_module_get(cpuidle_driver->owner))
4f86d3a8
LB
353 return -EINVAL;
354
355 init_completion(&dev->kobj_unregister);
356
4f86d3a8
LB
357 per_cpu(cpuidle_devices, dev->cpu) = dev;
358 list_add(&dev->device_list, &cpuidle_detected_devices);
8a25a2fd 359 if ((ret = cpuidle_add_sysfs(cpu_dev))) {
752138df 360 module_put(cpuidle_driver->owner);
4f86d3a8
LB
361 return ret;
362 }
363
dcb84f33
VP
364 dev->registered = 1;
365 return 0;
366}
367
368/**
369 * cpuidle_register_device - registers a CPU's idle PM feature
370 * @dev: the cpu
371 */
372int cpuidle_register_device(struct cpuidle_device *dev)
373{
374 int ret;
375
376 mutex_lock(&cpuidle_lock);
377
378 if ((ret = __cpuidle_register_device(dev))) {
379 mutex_unlock(&cpuidle_lock);
380 return ret;
381 }
382
4f86d3a8
LB
383 cpuidle_enable_device(dev);
384 cpuidle_install_idle_handler();
385
386 mutex_unlock(&cpuidle_lock);
387
388 return 0;
389
390}
391
392EXPORT_SYMBOL_GPL(cpuidle_register_device);
393
394/**
395 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
396 * @dev: the cpu
397 */
398void cpuidle_unregister_device(struct cpuidle_device *dev)
399{
8a25a2fd 400 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
752138df 401 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
4f86d3a8 402
dcb84f33
VP
403 if (dev->registered == 0)
404 return;
405
4f86d3a8
LB
406 cpuidle_pause_and_lock();
407
408 cpuidle_disable_device(dev);
409
8a25a2fd 410 cpuidle_remove_sysfs(cpu_dev);
4f86d3a8
LB
411 list_del(&dev->device_list);
412 wait_for_completion(&dev->kobj_unregister);
413 per_cpu(cpuidle_devices, dev->cpu) = NULL;
414
415 cpuidle_resume_and_unlock();
416
752138df 417 module_put(cpuidle_driver->owner);
4f86d3a8
LB
418}
419
420EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
421
422#ifdef CONFIG_SMP
423
424static void smp_callback(void *v)
425{
426 /* we already woke the CPU up, nothing more to do */
427}
428
429/*
430 * This function gets called when a part of the kernel has a new latency
431 * requirement. This means we need to get all processors out of their C-state,
432 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
433 * wakes them all right up.
434 */
435static int cpuidle_latency_notify(struct notifier_block *b,
436 unsigned long l, void *v)
437{
8691e5a8 438 smp_call_function(smp_callback, NULL, 1);
4f86d3a8
LB
439 return NOTIFY_OK;
440}
441
442static struct notifier_block cpuidle_latency_notifier = {
443 .notifier_call = cpuidle_latency_notify,
444};
445
d82b3518
MG
446static inline void latency_notifier_init(struct notifier_block *n)
447{
448 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
449}
4f86d3a8
LB
450
451#else /* CONFIG_SMP */
452
453#define latency_notifier_init(x) do { } while (0)
454
455#endif /* CONFIG_SMP */
456
457/**
458 * cpuidle_init - core initializer
459 */
460static int __init cpuidle_init(void)
461{
462 int ret;
463
62027aea
LB
464 if (cpuidle_disabled())
465 return -ENODEV;
466
8a25a2fd 467 ret = cpuidle_add_interface(cpu_subsys.dev_root);
4f86d3a8
LB
468 if (ret)
469 return ret;
470
471 latency_notifier_init(&cpuidle_latency_notifier);
472
473 return 0;
474}
475
62027aea 476module_param(off, int, 0444);
4f86d3a8 477core_initcall(cpuidle_init);