]>
Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * cpuidle.c - core cpuidle infrastructure | |
3 | * | |
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Shaohua Li <shaohua.li@intel.com> | |
6 | * Adam Belay <abelay@novell.com> | |
7 | * | |
8 | * This code is licenced under the GPL. | |
9 | */ | |
10 | ||
b60e6a0e | 11 | #include <linux/clockchips.h> |
4f86d3a8 LB |
12 | #include <linux/kernel.h> |
13 | #include <linux/mutex.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/notifier.h> | |
e8db0be1 | 16 | #include <linux/pm_qos.h> |
4f86d3a8 LB |
17 | #include <linux/cpu.h> |
18 | #include <linux/cpuidle.h> | |
9a0b8415 | 19 | #include <linux/ktime.h> |
2e94d1f7 | 20 | #include <linux/hrtimer.h> |
884b17e1 | 21 | #include <linux/module.h> |
38106313 | 22 | #include <linux/suspend.h> |
124cf911 | 23 | #include <linux/tick.h> |
288f023e | 24 | #include <trace/events/power.h> |
4f86d3a8 LB |
25 | |
26 | #include "cpuidle.h" | |
27 | ||
28 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | |
4c637b21 | 29 | DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); |
4f86d3a8 LB |
30 | |
31 | DEFINE_MUTEX(cpuidle_lock); | |
32 | LIST_HEAD(cpuidle_detected_devices); | |
4f86d3a8 LB |
33 | |
34 | static int enabled_devices; | |
62027aea | 35 | static int off __read_mostly; |
a0bfa137 | 36 | static int initialized __read_mostly; |
62027aea LB |
37 | |
38 | int cpuidle_disabled(void) | |
39 | { | |
40 | return off; | |
41 | } | |
d91ee586 LB |
42 | void disable_cpuidle(void) |
43 | { | |
44 | off = 1; | |
45 | } | |
4f86d3a8 | 46 | |
31a34090 RW |
47 | static bool cpuidle_not_available(struct cpuidle_driver *drv, |
48 | struct cpuidle_device *dev) | |
49 | { | |
50 | return off || !initialized || !drv || !dev || !dev->enabled; | |
51 | } | |
52 | ||
1a022e3f BO |
53 | /** |
54 | * cpuidle_play_dead - cpu off-lining | |
55 | * | |
ee01e663 | 56 | * Returns in case of an error or no driver |
1a022e3f BO |
57 | */ |
58 | int cpuidle_play_dead(void) | |
59 | { | |
60 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | |
bf4d1b5d | 61 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
8aef33a7 | 62 | int i; |
1a022e3f | 63 | |
ee01e663 TK |
64 | if (!drv) |
65 | return -ENODEV; | |
66 | ||
1a022e3f | 67 | /* Find lowest-power state that supports long-term idle */ |
8aef33a7 DL |
68 | for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) |
69 | if (drv->states[i].enter_dead) | |
70 | return drv->states[i].enter_dead(dev, i); | |
1a022e3f BO |
71 | |
72 | return -ENODEV; | |
73 | } | |
74 | ||
a6220fc1 | 75 | /** |
38106313 RW |
76 | * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. |
77 | * @drv: cpuidle driver for the given CPU. | |
78 | * @dev: cpuidle device for the given CPU. | |
124cf911 | 79 | * @freeze: Whether or not the state should be suitable for suspend-to-idle. |
a6220fc1 RW |
80 | */ |
81 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |
124cf911 | 82 | struct cpuidle_device *dev, bool freeze) |
a6220fc1 RW |
83 | { |
84 | unsigned int latency_req = 0; | |
124cf911 | 85 | int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; |
a6220fc1 RW |
86 | |
87 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { | |
88 | struct cpuidle_state *s = &drv->states[i]; | |
89 | struct cpuidle_state_usage *su = &dev->states_usage[i]; | |
90 | ||
124cf911 RW |
91 | if (s->disabled || su->disable || s->exit_latency <= latency_req |
92 | || (freeze && !s->enter_freeze)) | |
a6220fc1 RW |
93 | continue; |
94 | ||
95 | latency_req = s->exit_latency; | |
96 | ret = i; | |
97 | } | |
98 | return ret; | |
99 | } | |
100 | ||
124cf911 RW |
101 | static void enter_freeze_proper(struct cpuidle_driver *drv, |
102 | struct cpuidle_device *dev, int index) | |
103 | { | |
104 | tick_freeze(); | |
105 | /* | |
106 | * The state used here cannot be a "coupled" one, because the "coupled" | |
107 | * cpuidle mechanism enables interrupts and doing that with timekeeping | |
108 | * suspended is generally unsafe. | |
109 | */ | |
110 | drv->states[index].enter_freeze(dev, drv, index); | |
111 | WARN_ON(!irqs_disabled()); | |
112 | /* | |
113 | * timekeeping_resume() that will be called by tick_unfreeze() for the | |
114 | * last CPU executing it calls functions containing RCU read-side | |
115 | * critical sections, so tell RCU about that. | |
116 | */ | |
117 | RCU_NONIDLE(tick_unfreeze()); | |
118 | } | |
119 | ||
38106313 RW |
120 | /** |
121 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. | |
122 | * | |
124cf911 RW |
123 | * If there are states with the ->enter_freeze callback, find the deepest of |
124 | * them and enter it with frozen tick. Otherwise, find the deepest state | |
125 | * available and enter it normally. | |
01e04f46 RW |
126 | * |
127 | * Returns with enabled interrupts. | |
38106313 RW |
128 | */ |
129 | void cpuidle_enter_freeze(void) | |
130 | { | |
131 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | |
132 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
133 | int index; | |
134 | ||
31a34090 RW |
135 | if (cpuidle_not_available(drv, dev)) |
136 | goto fallback; | |
137 | ||
124cf911 RW |
138 | /* |
139 | * Find the deepest state with ->enter_freeze present, which guarantees | |
140 | * that interrupts won't be enabled when it exits and allows the tick to | |
141 | * be frozen safely. | |
142 | */ | |
143 | index = cpuidle_find_deepest_state(drv, dev, true); | |
144 | if (index >= 0) { | |
145 | enter_freeze_proper(drv, dev, index); | |
01e04f46 | 146 | local_irq_enable(); |
124cf911 RW |
147 | return; |
148 | } | |
149 | ||
150 | /* | |
151 | * It is not safe to freeze the tick, find the deepest state available | |
152 | * at all and try to enter it normally. | |
153 | */ | |
154 | index = cpuidle_find_deepest_state(drv, dev, false); | |
31a34090 | 155 | if (index >= 0) { |
38106313 | 156 | cpuidle_enter(drv, dev, index); |
31a34090 RW |
157 | return; |
158 | } | |
159 | ||
160 | fallback: | |
161 | arch_cpu_idle(); | |
38106313 RW |
162 | } |
163 | ||
56cfbf74 CC |
164 | /** |
165 | * cpuidle_enter_state - enter the state and update stats | |
166 | * @dev: cpuidle device for this cpu | |
167 | * @drv: cpuidle driver for this cpu | |
168 | * @next_state: index into drv->states of the state to enter | |
169 | */ | |
170 | int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |
554c06ba | 171 | int index) |
56cfbf74 CC |
172 | { |
173 | int entered_state; | |
174 | ||
554c06ba DL |
175 | struct cpuidle_state *target_state = &drv->states[index]; |
176 | ktime_t time_start, time_end; | |
177 | s64 diff; | |
178 | ||
30fe6884 | 179 | trace_cpu_idle_rcuidle(index, dev->cpu); |
554c06ba DL |
180 | time_start = ktime_get(); |
181 | ||
182 | entered_state = target_state->enter(dev, drv, index); | |
183 | ||
184 | time_end = ktime_get(); | |
30fe6884 | 185 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); |
554c06ba | 186 | |
0b89e9aa PB |
187 | if (!cpuidle_state_is_coupled(dev, drv, entered_state)) |
188 | local_irq_enable(); | |
554c06ba DL |
189 | |
190 | diff = ktime_to_us(ktime_sub(time_end, time_start)); | |
191 | if (diff > INT_MAX) | |
192 | diff = INT_MAX; | |
193 | ||
194 | dev->last_residency = (int) diff; | |
56cfbf74 CC |
195 | |
196 | if (entered_state >= 0) { | |
197 | /* Update cpuidle counters */ | |
198 | /* This can be moved to within driver enter routine | |
199 | * but that results in multiple copies of same code. | |
200 | */ | |
a474a515 | 201 | dev->states_usage[entered_state].time += dev->last_residency; |
56cfbf74 CC |
202 | dev->states_usage[entered_state].usage++; |
203 | } else { | |
204 | dev->last_residency = 0; | |
205 | } | |
206 | ||
207 | return entered_state; | |
208 | } | |
209 | ||
4f86d3a8 | 210 | /** |
907e30f1 DL |
211 | * cpuidle_select - ask the cpuidle framework to choose an idle state |
212 | * | |
213 | * @drv: the cpuidle driver | |
214 | * @dev: the cpuidle device | |
4f86d3a8 | 215 | * |
907e30f1 | 216 | * Returns the index of the idle state. |
4f86d3a8 | 217 | */ |
907e30f1 | 218 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
4f86d3a8 | 219 | { |
31a34090 | 220 | if (cpuidle_not_available(drv, dev)) |
52c324f8 RW |
221 | return -ENODEV; |
222 | ||
907e30f1 DL |
223 | return cpuidle_curr_governor->select(drv, dev); |
224 | } | |
ba8f20c2 | 225 | |
907e30f1 DL |
226 | /** |
227 | * cpuidle_enter - enter into the specified idle state | |
228 | * | |
229 | * @drv: the cpuidle driver tied with the cpu | |
230 | * @dev: the cpuidle device | |
231 | * @index: the index in the idle state table | |
232 | * | |
233 | * Returns the index in the idle state, < 0 in case of error. | |
234 | * The error code depends on the backend driver | |
235 | */ | |
236 | int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |
237 | int index) | |
238 | { | |
239 | if (cpuidle_state_is_coupled(dev, drv, index)) | |
240 | return cpuidle_enter_state_coupled(dev, drv, index); | |
241 | return cpuidle_enter_state(dev, drv, index); | |
242 | } | |
b60e6a0e | 243 | |
907e30f1 DL |
244 | /** |
245 | * cpuidle_reflect - tell the underlying governor what was the state | |
246 | * we were in | |
247 | * | |
248 | * @dev : the cpuidle device | |
249 | * @index: the index in the idle state table | |
250 | * | |
251 | */ | |
252 | void cpuidle_reflect(struct cpuidle_device *dev, int index) | |
253 | { | |
38106313 | 254 | if (cpuidle_curr_governor->reflect) |
907e30f1 | 255 | cpuidle_curr_governor->reflect(dev, index); |
4f86d3a8 LB |
256 | } |
257 | ||
258 | /** | |
259 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | |
260 | */ | |
261 | void cpuidle_install_idle_handler(void) | |
262 | { | |
a0bfa137 | 263 | if (enabled_devices) { |
4f86d3a8 LB |
264 | /* Make sure all changes finished before we switch to new idle */ |
265 | smp_wmb(); | |
a0bfa137 | 266 | initialized = 1; |
4f86d3a8 LB |
267 | } |
268 | } | |
269 | ||
270 | /** | |
271 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | |
272 | */ | |
273 | void cpuidle_uninstall_idle_handler(void) | |
274 | { | |
a0bfa137 LB |
275 | if (enabled_devices) { |
276 | initialized = 0; | |
2ed903c5 | 277 | wake_up_all_idle_cpus(); |
4f86d3a8 | 278 | } |
442bf3aa DL |
279 | |
280 | /* | |
281 | * Make sure external observers (such as the scheduler) | |
282 | * are done looking at pointed idle states. | |
283 | */ | |
284 | synchronize_rcu(); | |
4f86d3a8 LB |
285 | } |
286 | ||
287 | /** | |
288 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | |
289 | */ | |
290 | void cpuidle_pause_and_lock(void) | |
291 | { | |
292 | mutex_lock(&cpuidle_lock); | |
293 | cpuidle_uninstall_idle_handler(); | |
294 | } | |
295 | ||
296 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | |
297 | ||
298 | /** | |
299 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | |
300 | */ | |
301 | void cpuidle_resume_and_unlock(void) | |
302 | { | |
303 | cpuidle_install_idle_handler(); | |
304 | mutex_unlock(&cpuidle_lock); | |
305 | } | |
306 | ||
307 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | |
308 | ||
8651f97b PM |
309 | /* Currently used in suspend/resume path to suspend cpuidle */ |
310 | void cpuidle_pause(void) | |
311 | { | |
312 | mutex_lock(&cpuidle_lock); | |
313 | cpuidle_uninstall_idle_handler(); | |
314 | mutex_unlock(&cpuidle_lock); | |
315 | } | |
316 | ||
317 | /* Currently used in suspend/resume path to resume cpuidle */ | |
318 | void cpuidle_resume(void) | |
319 | { | |
320 | mutex_lock(&cpuidle_lock); | |
321 | cpuidle_install_idle_handler(); | |
322 | mutex_unlock(&cpuidle_lock); | |
323 | } | |
324 | ||
4f86d3a8 LB |
325 | /** |
326 | * cpuidle_enable_device - enables idle PM for a CPU | |
327 | * @dev: the CPU | |
328 | * | |
329 | * This function must be called between cpuidle_pause_and_lock and | |
330 | * cpuidle_resume_and_unlock when used externally. | |
331 | */ | |
332 | int cpuidle_enable_device(struct cpuidle_device *dev) | |
333 | { | |
5df0aa73 | 334 | int ret; |
bf4d1b5d | 335 | struct cpuidle_driver *drv; |
4f86d3a8 | 336 | |
1b0a0e9a SB |
337 | if (!dev) |
338 | return -EINVAL; | |
339 | ||
4f86d3a8 LB |
340 | if (dev->enabled) |
341 | return 0; | |
bf4d1b5d DL |
342 | |
343 | drv = cpuidle_get_cpu_driver(dev); | |
344 | ||
e1689795 | 345 | if (!drv || !cpuidle_curr_governor) |
4f86d3a8 | 346 | return -EIO; |
bf4d1b5d | 347 | |
10b9d3f8 DL |
348 | if (!dev->registered) |
349 | return -EINVAL; | |
350 | ||
4f86d3a8 | 351 | if (!dev->state_count) |
fc850f39 | 352 | dev->state_count = drv->state_count; |
4f86d3a8 | 353 | |
bf4d1b5d DL |
354 | ret = cpuidle_add_device_sysfs(dev); |
355 | if (ret) | |
4f86d3a8 LB |
356 | return ret; |
357 | ||
358 | if (cpuidle_curr_governor->enable && | |
e1689795 | 359 | (ret = cpuidle_curr_governor->enable(drv, dev))) |
4f86d3a8 LB |
360 | goto fail_sysfs; |
361 | ||
4f86d3a8 LB |
362 | smp_wmb(); |
363 | ||
364 | dev->enabled = 1; | |
365 | ||
366 | enabled_devices++; | |
367 | return 0; | |
368 | ||
369 | fail_sysfs: | |
bf4d1b5d | 370 | cpuidle_remove_device_sysfs(dev); |
4f86d3a8 LB |
371 | |
372 | return ret; | |
373 | } | |
374 | ||
375 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |
376 | ||
377 | /** | |
378 | * cpuidle_disable_device - disables idle PM for a CPU | |
379 | * @dev: the CPU | |
380 | * | |
381 | * This function must be called between cpuidle_pause_and_lock and | |
382 | * cpuidle_resume_and_unlock when used externally. | |
383 | */ | |
384 | void cpuidle_disable_device(struct cpuidle_device *dev) | |
385 | { | |
bf4d1b5d DL |
386 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
387 | ||
cf31cd1a | 388 | if (!dev || !dev->enabled) |
4f86d3a8 | 389 | return; |
bf4d1b5d DL |
390 | |
391 | if (!drv || !cpuidle_curr_governor) | |
4f86d3a8 LB |
392 | return; |
393 | ||
394 | dev->enabled = 0; | |
395 | ||
396 | if (cpuidle_curr_governor->disable) | |
bf4d1b5d | 397 | cpuidle_curr_governor->disable(drv, dev); |
4f86d3a8 | 398 | |
bf4d1b5d | 399 | cpuidle_remove_device_sysfs(dev); |
4f86d3a8 LB |
400 | enabled_devices--; |
401 | } | |
402 | ||
403 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |
404 | ||
f6bb51a5 DL |
405 | static void __cpuidle_unregister_device(struct cpuidle_device *dev) |
406 | { | |
407 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
408 | ||
409 | list_del(&dev->device_list); | |
410 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | |
411 | module_put(drv->owner); | |
412 | } | |
413 | ||
267d4bf8 | 414 | static void __cpuidle_device_init(struct cpuidle_device *dev) |
5df0aa73 DL |
415 | { |
416 | memset(dev->states_usage, 0, sizeof(dev->states_usage)); | |
417 | dev->last_residency = 0; | |
5df0aa73 DL |
418 | } |
419 | ||
4f86d3a8 | 420 | /** |
dcb84f33 VP |
421 | * __cpuidle_register_device - internal register function called before register |
422 | * and enable routines | |
4f86d3a8 | 423 | * @dev: the cpu |
dcb84f33 VP |
424 | * |
425 | * cpuidle_lock mutex must be held before this is called | |
4f86d3a8 | 426 | */ |
dcb84f33 | 427 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
4f86d3a8 LB |
428 | { |
429 | int ret; | |
bf4d1b5d | 430 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
4f86d3a8 | 431 | |
bf4d1b5d | 432 | if (!try_module_get(drv->owner)) |
4f86d3a8 LB |
433 | return -EINVAL; |
434 | ||
4f86d3a8 LB |
435 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
436 | list_add(&dev->device_list, &cpuidle_detected_devices); | |
4f86d3a8 | 437 | |
4126c019 | 438 | ret = cpuidle_coupled_register_device(dev); |
47182668 | 439 | if (ret) |
f6bb51a5 | 440 | __cpuidle_unregister_device(dev); |
47182668 VK |
441 | else |
442 | dev->registered = 1; | |
4f86d3a8 | 443 | |
47182668 | 444 | return ret; |
dcb84f33 VP |
445 | } |
446 | ||
447 | /** | |
448 | * cpuidle_register_device - registers a CPU's idle PM feature | |
449 | * @dev: the cpu | |
450 | */ | |
451 | int cpuidle_register_device(struct cpuidle_device *dev) | |
452 | { | |
c878a52d | 453 | int ret = -EBUSY; |
dcb84f33 | 454 | |
1b0a0e9a SB |
455 | if (!dev) |
456 | return -EINVAL; | |
457 | ||
dcb84f33 VP |
458 | mutex_lock(&cpuidle_lock); |
459 | ||
c878a52d DL |
460 | if (dev->registered) |
461 | goto out_unlock; | |
462 | ||
267d4bf8 | 463 | __cpuidle_device_init(dev); |
5df0aa73 | 464 | |
f6bb51a5 DL |
465 | ret = __cpuidle_register_device(dev); |
466 | if (ret) | |
467 | goto out_unlock; | |
468 | ||
469 | ret = cpuidle_add_sysfs(dev); | |
470 | if (ret) | |
471 | goto out_unregister; | |
dcb84f33 | 472 | |
10b9d3f8 | 473 | ret = cpuidle_enable_device(dev); |
f6bb51a5 DL |
474 | if (ret) |
475 | goto out_sysfs; | |
10b9d3f8 | 476 | |
4f86d3a8 LB |
477 | cpuidle_install_idle_handler(); |
478 | ||
f6bb51a5 | 479 | out_unlock: |
4f86d3a8 LB |
480 | mutex_unlock(&cpuidle_lock); |
481 | ||
f6bb51a5 DL |
482 | return ret; |
483 | ||
484 | out_sysfs: | |
485 | cpuidle_remove_sysfs(dev); | |
486 | out_unregister: | |
487 | __cpuidle_unregister_device(dev); | |
488 | goto out_unlock; | |
4f86d3a8 LB |
489 | } |
490 | ||
491 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | |
492 | ||
493 | /** | |
494 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | |
495 | * @dev: the cpu | |
496 | */ | |
497 | void cpuidle_unregister_device(struct cpuidle_device *dev) | |
498 | { | |
813e8e3d | 499 | if (!dev || dev->registered == 0) |
dcb84f33 VP |
500 | return; |
501 | ||
4f86d3a8 LB |
502 | cpuidle_pause_and_lock(); |
503 | ||
504 | cpuidle_disable_device(dev); | |
505 | ||
1aef40e2 | 506 | cpuidle_remove_sysfs(dev); |
f6bb51a5 DL |
507 | |
508 | __cpuidle_unregister_device(dev); | |
4f86d3a8 | 509 | |
4126c019 CC |
510 | cpuidle_coupled_unregister_device(dev); |
511 | ||
4f86d3a8 | 512 | cpuidle_resume_and_unlock(); |
4f86d3a8 LB |
513 | } |
514 | ||
515 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | |
516 | ||
1c192d04 | 517 | /** |
4c637b21 DL |
518 | * cpuidle_unregister: unregister a driver and the devices. This function |
519 | * can be used only if the driver has been previously registered through | |
520 | * the cpuidle_register function. | |
521 | * | |
522 | * @drv: a valid pointer to a struct cpuidle_driver | |
523 | */ | |
524 | void cpuidle_unregister(struct cpuidle_driver *drv) | |
525 | { | |
526 | int cpu; | |
527 | struct cpuidle_device *device; | |
528 | ||
82467a5a | 529 | for_each_cpu(cpu, drv->cpumask) { |
4c637b21 DL |
530 | device = &per_cpu(cpuidle_dev, cpu); |
531 | cpuidle_unregister_device(device); | |
532 | } | |
533 | ||
534 | cpuidle_unregister_driver(drv); | |
535 | } | |
536 | EXPORT_SYMBOL_GPL(cpuidle_unregister); | |
537 | ||
538 | /** | |
539 | * cpuidle_register: registers the driver and the cpu devices with the | |
540 | * coupled_cpus passed as parameter. This function is used for all common | |
541 | * initialization pattern there are in the arch specific drivers. The | |
542 | * devices is globally defined in this file. | |
543 | * | |
544 | * @drv : a valid pointer to a struct cpuidle_driver | |
545 | * @coupled_cpus: a cpumask for the coupled states | |
546 | * | |
547 | * Returns 0 on success, < 0 otherwise | |
548 | */ | |
549 | int cpuidle_register(struct cpuidle_driver *drv, | |
550 | const struct cpumask *const coupled_cpus) | |
551 | { | |
552 | int ret, cpu; | |
553 | struct cpuidle_device *device; | |
554 | ||
555 | ret = cpuidle_register_driver(drv); | |
556 | if (ret) { | |
557 | pr_err("failed to register cpuidle driver\n"); | |
558 | return ret; | |
559 | } | |
560 | ||
82467a5a | 561 | for_each_cpu(cpu, drv->cpumask) { |
4c637b21 DL |
562 | device = &per_cpu(cpuidle_dev, cpu); |
563 | device->cpu = cpu; | |
564 | ||
565 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | |
566 | /* | |
caf4a36e | 567 | * On multiplatform for ARM, the coupled idle states could be |
4c637b21 DL |
568 | * enabled in the kernel even if the cpuidle driver does not |
569 | * use it. Note, coupled_cpus is a struct copy. | |
570 | */ | |
571 | if (coupled_cpus) | |
572 | device->coupled_cpus = *coupled_cpus; | |
573 | #endif | |
574 | ret = cpuidle_register_device(device); | |
575 | if (!ret) | |
576 | continue; | |
577 | ||
578 | pr_err("Failed to register cpuidle device for cpu%d\n", cpu); | |
579 | ||
580 | cpuidle_unregister(drv); | |
581 | break; | |
582 | } | |
583 | ||
584 | return ret; | |
585 | } | |
586 | EXPORT_SYMBOL_GPL(cpuidle_register); | |
587 | ||
4f86d3a8 LB |
588 | #ifdef CONFIG_SMP |
589 | ||
4f86d3a8 LB |
590 | /* |
591 | * This function gets called when a part of the kernel has a new latency | |
592 | * requirement. This means we need to get all processors out of their C-state, | |
593 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | |
594 | * wakes them all right up. | |
595 | */ | |
596 | static int cpuidle_latency_notify(struct notifier_block *b, | |
597 | unsigned long l, void *v) | |
598 | { | |
2ed903c5 | 599 | wake_up_all_idle_cpus(); |
4f86d3a8 LB |
600 | return NOTIFY_OK; |
601 | } | |
602 | ||
603 | static struct notifier_block cpuidle_latency_notifier = { | |
604 | .notifier_call = cpuidle_latency_notify, | |
605 | }; | |
606 | ||
d82b3518 MG |
607 | static inline void latency_notifier_init(struct notifier_block *n) |
608 | { | |
609 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); | |
610 | } | |
4f86d3a8 LB |
611 | |
612 | #else /* CONFIG_SMP */ | |
613 | ||
614 | #define latency_notifier_init(x) do { } while (0) | |
615 | ||
616 | #endif /* CONFIG_SMP */ | |
617 | ||
618 | /** | |
619 | * cpuidle_init - core initializer | |
620 | */ | |
621 | static int __init cpuidle_init(void) | |
622 | { | |
623 | int ret; | |
624 | ||
62027aea LB |
625 | if (cpuidle_disabled()) |
626 | return -ENODEV; | |
627 | ||
8a25a2fd | 628 | ret = cpuidle_add_interface(cpu_subsys.dev_root); |
4f86d3a8 LB |
629 | if (ret) |
630 | return ret; | |
631 | ||
632 | latency_notifier_init(&cpuidle_latency_notifier); | |
633 | ||
634 | return 0; | |
635 | } | |
636 | ||
62027aea | 637 | module_param(off, int, 0444); |
4f86d3a8 | 638 | core_initcall(cpuidle_init); |