]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/main.c
PM / wakeirq: Fix dedicated wakeirq for drivers not using autosuspend
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
431d452a 26#include <linux/pm-trace.h>
4990d4fe 27#include <linux/pm_wakeirq.h>
2ed8d2b3 28#include <linux/interrupt.h>
f2511774 29#include <linux/sched.h>
5af84b82 30#include <linux/async.h>
1e75227e 31#include <linux/suspend.h>
53644677 32#include <trace/events/power.h>
2f0aea93 33#include <linux/cpufreq.h>
8651f97b 34#include <linux/cpuidle.h>
70fea60d
BG
35#include <linux/timer.h>
36
cd59abfc 37#include "../base.h"
1da177e4
LT
38#include "power.h"
39
9cf519d1
RW
40typedef int (*pm_callback_t)(struct device *);
41
775b64d2 42/*
1eede070 43 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
44 * because children are guaranteed to be discovered after parents, and
45 * are inserted at the back of the list on discovery.
46 *
8e9394ce
GKH
47 * Since device_pm_add() may be called with a device lock held,
48 * we must never try to acquire a device lock while holding
775b64d2
RW
49 * dpm_list_mutex.
50 */
51
1eede070 52LIST_HEAD(dpm_list);
7664e969
SK
53static LIST_HEAD(dpm_prepared_list);
54static LIST_HEAD(dpm_suspended_list);
55static LIST_HEAD(dpm_late_early_list);
56static LIST_HEAD(dpm_noirq_list);
1da177e4 57
2a77c46d 58struct suspend_stats suspend_stats;
cd59abfc 59static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 60static pm_message_t pm_transition;
1da177e4 61
098dff73
RW
62static int async_error;
63
53644677
SK
64static char *pm_verb(int event)
65{
66 switch (event) {
67 case PM_EVENT_SUSPEND:
68 return "suspend";
69 case PM_EVENT_RESUME:
70 return "resume";
71 case PM_EVENT_FREEZE:
72 return "freeze";
73 case PM_EVENT_QUIESCE:
74 return "quiesce";
75 case PM_EVENT_HIBERNATE:
76 return "hibernate";
77 case PM_EVENT_THAW:
78 return "thaw";
79 case PM_EVENT_RESTORE:
80 return "restore";
81 case PM_EVENT_RECOVER:
82 return "recover";
83 default:
84 return "(unknown PM event)";
85 }
86}
87
5e928f77 88/**
e91c11b1 89 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
90 * @dev: Device object being initialized.
91 */
e91c11b1 92void device_pm_sleep_init(struct device *dev)
5e928f77 93{
f76b168b 94 dev->power.is_prepared = false;
6d0e0e84 95 dev->power.is_suspended = false;
3d2699bc
LC
96 dev->power.is_noirq_suspended = false;
97 dev->power.is_late_suspended = false;
5af84b82 98 init_completion(&dev->power.completion);
152e1d59 99 complete_all(&dev->power.completion);
074037ec 100 dev->power.wakeup = NULL;
22110faf 101 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
102}
103
1eede070 104/**
20d652d7 105 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
106 */
107void device_pm_lock(void)
108{
109 mutex_lock(&dpm_list_mtx);
110}
111
112/**
20d652d7 113 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
114 */
115void device_pm_unlock(void)
116{
117 mutex_unlock(&dpm_list_mtx);
118}
075c1771 119
775b64d2 120/**
20d652d7
RW
121 * device_pm_add - Add a device to the PM core's list of active devices.
122 * @dev: Device to add to the list.
775b64d2 123 */
3b98aeaf 124void device_pm_add(struct device *dev)
1da177e4 125{
1da177e4 126 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
aa8e54b5 128 device_pm_check_callbacks(dev);
11048dcf 129 mutex_lock(&dpm_list_mtx);
f76b168b 130 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
131 dev_warn(dev, "parent %s should not be sleeping\n",
132 dev_name(dev->parent));
3b98aeaf 133 list_add_tail(&dev->power.entry, &dpm_list);
1a9a9152 134 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
135}
136
775b64d2 137/**
20d652d7
RW
138 * device_pm_remove - Remove a device from the PM core's list of active devices.
139 * @dev: Device to be removed from the list.
775b64d2 140 */
9cddad77 141void device_pm_remove(struct device *dev)
1da177e4
LT
142{
143 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 144 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 145 complete_all(&dev->power.completion);
11048dcf 146 mutex_lock(&dpm_list_mtx);
1da177e4 147 list_del_init(&dev->power.entry);
11048dcf 148 mutex_unlock(&dpm_list_mtx);
074037ec 149 device_wakeup_disable(dev);
5e928f77 150 pm_runtime_remove(dev);
aa8e54b5 151 device_pm_check_callbacks(dev);
775b64d2
RW
152}
153
ffa6a705 154/**
20d652d7
RW
155 * device_pm_move_before - Move device in the PM core's list of active devices.
156 * @deva: Device to move in dpm_list.
157 * @devb: Device @deva should come before.
ffa6a705
CH
158 */
159void device_pm_move_before(struct device *deva, struct device *devb)
160{
161 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
162 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
163 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
164 /* Delete deva from dpm_list and reinsert before devb. */
165 list_move_tail(&deva->power.entry, &devb->power.entry);
166}
167
168/**
20d652d7
RW
169 * device_pm_move_after - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come after.
ffa6a705
CH
172 */
173void device_pm_move_after(struct device *deva, struct device *devb)
174{
175 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
178 /* Delete deva from dpm_list and reinsert after devb. */
179 list_move(&deva->power.entry, &devb->power.entry);
180}
181
182/**
20d652d7
RW
183 * device_pm_move_last - Move device to end of the PM core's list of devices.
184 * @dev: Device to move in dpm_list.
ffa6a705
CH
185 */
186void device_pm_move_last(struct device *dev)
187{
188 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 189 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
190 list_move_tail(&dev->power.entry, &dpm_list);
191}
192
875ab0b7
RW
193static ktime_t initcall_debug_start(struct device *dev)
194{
195 ktime_t calltime = ktime_set(0, 0);
196
b2df1d4f 197 if (pm_print_times_enabled) {
0c6aebe3
RW
198 pr_info("calling %s+ @ %i, parent: %s\n",
199 dev_name(dev), task_pid_nr(current),
200 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
201 calltime = ktime_get();
202 }
203
204 return calltime;
205}
206
207static void initcall_debug_report(struct device *dev, ktime_t calltime,
53644677 208 int error, pm_message_t state, char *info)
875ab0b7 209{
53644677
SK
210 ktime_t rettime;
211 s64 nsecs;
212
213 rettime = ktime_get();
214 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
875ab0b7 215
b2df1d4f 216 if (pm_print_times_enabled) {
875ab0b7 217 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
53644677 218 error, (unsigned long long)nsecs >> 10);
875ab0b7
RW
219 }
220}
221
5af84b82
RW
222/**
223 * dpm_wait - Wait for a PM operation to complete.
224 * @dev: Device to wait for.
225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
226 */
227static void dpm_wait(struct device *dev, bool async)
228{
229 if (!dev)
230 return;
231
0e06b4a8 232 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
233 wait_for_completion(&dev->power.completion);
234}
235
236static int dpm_wait_fn(struct device *dev, void *async_ptr)
237{
238 dpm_wait(dev, *((bool *)async_ptr));
239 return 0;
240}
241
242static void dpm_wait_for_children(struct device *dev, bool async)
243{
244 device_for_each_child(dev, &async, dpm_wait_fn);
245}
246
1eede070 247/**
9cf519d1 248 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
249 * @ops: PM operations to choose from.
250 * @state: PM transition of the system being carried out.
1eede070 251 */
9cf519d1 252static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 253{
1eede070
RW
254 switch (state.event) {
255#ifdef CONFIG_SUSPEND
256 case PM_EVENT_SUSPEND:
9cf519d1 257 return ops->suspend;
1eede070 258 case PM_EVENT_RESUME:
9cf519d1 259 return ops->resume;
1eede070 260#endif /* CONFIG_SUSPEND */
1f112cee 261#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
262 case PM_EVENT_FREEZE:
263 case PM_EVENT_QUIESCE:
9cf519d1 264 return ops->freeze;
1eede070 265 case PM_EVENT_HIBERNATE:
9cf519d1 266 return ops->poweroff;
1eede070
RW
267 case PM_EVENT_THAW:
268 case PM_EVENT_RECOVER:
9cf519d1 269 return ops->thaw;
1eede070
RW
270 break;
271 case PM_EVENT_RESTORE:
9cf519d1 272 return ops->restore;
1f112cee 273#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 274 }
f2511774 275
9cf519d1 276 return NULL;
1eede070
RW
277}
278
cf579dfb
RW
279/**
280 * pm_late_early_op - Return the PM operation appropriate for given PM event.
281 * @ops: PM operations to choose from.
282 * @state: PM transition of the system being carried out.
283 *
284 * Runtime PM is disabled for @dev while this function is being executed.
285 */
286static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
287 pm_message_t state)
288{
289 switch (state.event) {
290#ifdef CONFIG_SUSPEND
291 case PM_EVENT_SUSPEND:
292 return ops->suspend_late;
293 case PM_EVENT_RESUME:
294 return ops->resume_early;
295#endif /* CONFIG_SUSPEND */
296#ifdef CONFIG_HIBERNATE_CALLBACKS
297 case PM_EVENT_FREEZE:
298 case PM_EVENT_QUIESCE:
299 return ops->freeze_late;
300 case PM_EVENT_HIBERNATE:
301 return ops->poweroff_late;
302 case PM_EVENT_THAW:
303 case PM_EVENT_RECOVER:
304 return ops->thaw_early;
305 case PM_EVENT_RESTORE:
306 return ops->restore_early;
307#endif /* CONFIG_HIBERNATE_CALLBACKS */
308 }
309
310 return NULL;
311}
312
1eede070 313/**
9cf519d1 314 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
315 * @ops: PM operations to choose from.
316 * @state: PM transition of the system being carried out.
1eede070 317 *
20d652d7
RW
318 * The driver of @dev will not receive interrupts while this function is being
319 * executed.
1eede070 320 */
9cf519d1 321static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 322{
1eede070
RW
323 switch (state.event) {
324#ifdef CONFIG_SUSPEND
325 case PM_EVENT_SUSPEND:
9cf519d1 326 return ops->suspend_noirq;
1eede070 327 case PM_EVENT_RESUME:
9cf519d1 328 return ops->resume_noirq;
1eede070 329#endif /* CONFIG_SUSPEND */
1f112cee 330#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
331 case PM_EVENT_FREEZE:
332 case PM_EVENT_QUIESCE:
9cf519d1 333 return ops->freeze_noirq;
1eede070 334 case PM_EVENT_HIBERNATE:
9cf519d1 335 return ops->poweroff_noirq;
1eede070
RW
336 case PM_EVENT_THAW:
337 case PM_EVENT_RECOVER:
9cf519d1 338 return ops->thaw_noirq;
1eede070 339 case PM_EVENT_RESTORE:
9cf519d1 340 return ops->restore_noirq;
1f112cee 341#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 342 }
f2511774 343
9cf519d1 344 return NULL;
1eede070
RW
345}
346
1eede070
RW
347static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
348{
349 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
350 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
351 ", may wakeup" : "");
352}
353
354static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
355 int error)
356{
357 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 358 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
359}
360
ecf762b2
RW
361static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
362{
363 ktime_t calltime;
0702d9ee 364 u64 usecs64;
ecf762b2
RW
365 int usecs;
366
367 calltime = ktime_get();
368 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
369 do_div(usecs64, NSEC_PER_USEC);
370 usecs = usecs64;
371 if (usecs == 0)
372 usecs = 1;
373 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
374 info ?: "", info ? " " : "", pm_verb(state.event),
375 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
376}
377
9cf519d1
RW
378static int dpm_run_callback(pm_callback_t cb, struct device *dev,
379 pm_message_t state, char *info)
380{
381 ktime_t calltime;
382 int error;
383
384 if (!cb)
385 return 0;
386
387 calltime = initcall_debug_start(dev);
388
389 pm_dev_dbg(dev, state, info);
e8bca479 390 trace_device_pm_callback_start(dev, info, state.event);
9cf519d1 391 error = cb(dev);
e8bca479 392 trace_device_pm_callback_end(dev, error);
9cf519d1
RW
393 suspend_report_result(cb, error);
394
53644677 395 initcall_debug_report(dev, calltime, error, state, info);
9cf519d1
RW
396
397 return error;
398}
399
70fea60d
BG
400#ifdef CONFIG_DPM_WATCHDOG
401struct dpm_watchdog {
402 struct device *dev;
403 struct task_struct *tsk;
404 struct timer_list timer;
405};
406
407#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
408 struct dpm_watchdog wd
409
410/**
411 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
412 * @data: Watchdog object address.
413 *
414 * Called when a driver has timed out suspending or resuming.
415 * There's not much we can do here to recover so panic() to
416 * capture a crash-dump in pstore.
417 */
418static void dpm_watchdog_handler(unsigned long data)
419{
420 struct dpm_watchdog *wd = (void *)data;
421
422 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
423 show_stack(wd->tsk, NULL);
424 panic("%s %s: unrecoverable failure\n",
425 dev_driver_string(wd->dev), dev_name(wd->dev));
426}
427
428/**
429 * dpm_watchdog_set - Enable pm watchdog for given device.
430 * @wd: Watchdog. Must be allocated on the stack.
431 * @dev: Device to handle.
432 */
433static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
434{
435 struct timer_list *timer = &wd->timer;
436
437 wd->dev = dev;
438 wd->tsk = current;
439
440 init_timer_on_stack(timer);
441 /* use same timeout value for both suspend and resume */
442 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
443 timer->function = dpm_watchdog_handler;
444 timer->data = (unsigned long)wd;
445 add_timer(timer);
446}
447
448/**
449 * dpm_watchdog_clear - Disable suspend/resume watchdog.
450 * @wd: Watchdog to disable.
451 */
452static void dpm_watchdog_clear(struct dpm_watchdog *wd)
453{
454 struct timer_list *timer = &wd->timer;
455
456 del_timer_sync(timer);
457 destroy_timer_on_stack(timer);
458}
459#else
460#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
461#define dpm_watchdog_set(x, y)
462#define dpm_watchdog_clear(x)
463#endif
464
cd59abfc
AS
465/*------------------------- Resume routines -------------------------*/
466
467/**
20d652d7
RW
468 * device_resume_noirq - Execute an "early resume" callback for given device.
469 * @dev: Device to handle.
470 * @state: PM transition of the system being carried out.
58c256a3 471 * @async: If true, the device is being resumed asynchronously.
cd59abfc 472 *
20d652d7
RW
473 * The driver of @dev will not receive interrupts while this function is being
474 * executed.
cd59abfc 475 */
76569faa 476static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
cd59abfc 477{
9cf519d1
RW
478 pm_callback_t callback = NULL;
479 char *info = NULL;
cd59abfc
AS
480 int error = 0;
481
482 TRACE_DEVICE(dev);
483 TRACE_RESUME(0);
484
aae4518b 485 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
486 goto Out;
487
3d2699bc
LC
488 if (!dev->power.is_noirq_suspended)
489 goto Out;
490
76569faa
LC
491 dpm_wait(dev->parent, async);
492
564b905a 493 if (dev->pm_domain) {
cf579dfb 494 info = "noirq power domain ";
9cf519d1 495 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 496 } else if (dev->type && dev->type->pm) {
cf579dfb 497 info = "noirq type ";
9cf519d1 498 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 499 } else if (dev->class && dev->class->pm) {
cf579dfb 500 info = "noirq class ";
9cf519d1 501 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 502 } else if (dev->bus && dev->bus->pm) {
cf579dfb 503 info = "noirq bus ";
9cf519d1 504 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
505 }
506
35cd133c 507 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 508 info = "noirq driver ";
35cd133c
RW
509 callback = pm_noirq_op(dev->driver->pm, state);
510 }
511
9cf519d1 512 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 513 dev->power.is_noirq_suspended = false;
9cf519d1 514
dbf37414 515 Out:
76569faa 516 complete_all(&dev->power.completion);
775b64d2
RW
517 TRACE_RESUME(error);
518 return error;
519}
520
76569faa
LC
521static bool is_async(struct device *dev)
522{
523 return dev->power.async_suspend && pm_async_enabled
524 && !pm_trace_is_enabled();
525}
526
527static void async_resume_noirq(void *data, async_cookie_t cookie)
528{
529 struct device *dev = (struct device *)data;
530 int error;
531
532 error = device_resume_noirq(dev, pm_transition, true);
533 if (error)
534 pm_dev_err(dev, pm_transition, " async", error);
535
536 put_device(dev);
537}
538
775b64d2 539/**
cf579dfb 540 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 541 * @state: PM transition of the system being carried out.
775b64d2 542 *
cf579dfb 543 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 544 * enable device drivers to receive interrupts.
775b64d2 545 */
2a8a8ce6 546void dpm_resume_noirq(pm_message_t state)
775b64d2 547{
76569faa 548 struct device *dev;
ecf762b2 549 ktime_t starttime = ktime_get();
775b64d2 550
bb3632c6 551 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
32bdfac5 552 mutex_lock(&dpm_list_mtx);
76569faa 553 pm_transition = state;
d08a5ace 554
76569faa
LC
555 /*
556 * Advanced the async threads upfront,
557 * in case the starting of async threads is
558 * delayed by non-async resuming devices.
559 */
560 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
561 reinit_completion(&dev->power.completion);
562 if (is_async(dev)) {
563 get_device(dev);
564 async_schedule(async_resume_noirq, dev);
565 }
566 }
567
568 while (!list_empty(&dpm_noirq_list)) {
569 dev = to_device(dpm_noirq_list.next);
d08a5ace 570 get_device(dev);
cf579dfb 571 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 572 mutex_unlock(&dpm_list_mtx);
d08a5ace 573
76569faa
LC
574 if (!is_async(dev)) {
575 int error;
576
577 error = device_resume_noirq(dev, state, false);
578 if (error) {
579 suspend_stats.failed_resume_noirq++;
580 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
581 dpm_save_failed_dev(dev_name(dev));
582 pm_dev_err(dev, state, " noirq", error);
583 }
cf579dfb
RW
584 }
585
586 mutex_lock(&dpm_list_mtx);
587 put_device(dev);
588 }
589 mutex_unlock(&dpm_list_mtx);
76569faa 590 async_synchronize_full();
cf579dfb
RW
591 dpm_show_time(starttime, state, "noirq");
592 resume_device_irqs();
4990d4fe 593 device_wakeup_disarm_wake_irqs();
8651f97b 594 cpuidle_resume();
bb3632c6 595 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
cf579dfb
RW
596}
597
598/**
599 * device_resume_early - Execute an "early resume" callback for given device.
600 * @dev: Device to handle.
601 * @state: PM transition of the system being carried out.
58c256a3 602 * @async: If true, the device is being resumed asynchronously.
cf579dfb
RW
603 *
604 * Runtime PM is disabled for @dev while this function is being executed.
605 */
9e5e7910 606static int device_resume_early(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
607{
608 pm_callback_t callback = NULL;
609 char *info = NULL;
610 int error = 0;
611
612 TRACE_DEVICE(dev);
613 TRACE_RESUME(0);
614
aae4518b 615 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
616 goto Out;
617
3d2699bc
LC
618 if (!dev->power.is_late_suspended)
619 goto Out;
620
9e5e7910
LC
621 dpm_wait(dev->parent, async);
622
cf579dfb
RW
623 if (dev->pm_domain) {
624 info = "early power domain ";
625 callback = pm_late_early_op(&dev->pm_domain->ops, state);
626 } else if (dev->type && dev->type->pm) {
627 info = "early type ";
628 callback = pm_late_early_op(dev->type->pm, state);
629 } else if (dev->class && dev->class->pm) {
630 info = "early class ";
631 callback = pm_late_early_op(dev->class->pm, state);
632 } else if (dev->bus && dev->bus->pm) {
633 info = "early bus ";
634 callback = pm_late_early_op(dev->bus->pm, state);
635 }
636
637 if (!callback && dev->driver && dev->driver->pm) {
638 info = "early driver ";
639 callback = pm_late_early_op(dev->driver->pm, state);
640 }
641
642 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 643 dev->power.is_late_suspended = false;
cf579dfb 644
dbf37414 645 Out:
cf579dfb 646 TRACE_RESUME(error);
9f6d8f6a
RW
647
648 pm_runtime_enable(dev);
9e5e7910 649 complete_all(&dev->power.completion);
cf579dfb
RW
650 return error;
651}
652
9e5e7910
LC
653static void async_resume_early(void *data, async_cookie_t cookie)
654{
655 struct device *dev = (struct device *)data;
656 int error;
657
658 error = device_resume_early(dev, pm_transition, true);
659 if (error)
660 pm_dev_err(dev, pm_transition, " async", error);
661
662 put_device(dev);
663}
664
cf579dfb
RW
665/**
666 * dpm_resume_early - Execute "early resume" callbacks for all devices.
667 * @state: PM transition of the system being carried out.
668 */
2a8a8ce6 669void dpm_resume_early(pm_message_t state)
cf579dfb 670{
9e5e7910 671 struct device *dev;
cf579dfb
RW
672 ktime_t starttime = ktime_get();
673
bb3632c6 674 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
cf579dfb 675 mutex_lock(&dpm_list_mtx);
9e5e7910
LC
676 pm_transition = state;
677
678 /*
679 * Advanced the async threads upfront,
680 * in case the starting of async threads is
681 * delayed by non-async resuming devices.
682 */
683 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
684 reinit_completion(&dev->power.completion);
685 if (is_async(dev)) {
686 get_device(dev);
687 async_schedule(async_resume_early, dev);
688 }
689 }
cf579dfb 690
9e5e7910
LC
691 while (!list_empty(&dpm_late_early_list)) {
692 dev = to_device(dpm_late_early_list.next);
cf579dfb
RW
693 get_device(dev);
694 list_move_tail(&dev->power.entry, &dpm_suspended_list);
695 mutex_unlock(&dpm_list_mtx);
696
9e5e7910
LC
697 if (!is_async(dev)) {
698 int error;
d08a5ace 699
9e5e7910
LC
700 error = device_resume_early(dev, state, false);
701 if (error) {
702 suspend_stats.failed_resume_early++;
703 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
704 dpm_save_failed_dev(dev_name(dev));
705 pm_dev_err(dev, state, " early", error);
706 }
707 }
5b219a51 708 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
709 put_device(dev);
710 }
32bdfac5 711 mutex_unlock(&dpm_list_mtx);
9e5e7910 712 async_synchronize_full();
ecf762b2 713 dpm_show_time(starttime, state, "early");
bb3632c6 714 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
775b64d2 715}
cf579dfb
RW
716
717/**
718 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
719 * @state: PM transition of the system being carried out.
720 */
721void dpm_resume_start(pm_message_t state)
722{
723 dpm_resume_noirq(state);
724 dpm_resume_early(state);
725}
726EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
727
728/**
97df8c12 729 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
730 * @dev: Device to handle.
731 * @state: PM transition of the system being carried out.
5af84b82 732 * @async: If true, the device is being resumed asynchronously.
775b64d2 733 */
97df8c12 734static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 735{
9cf519d1
RW
736 pm_callback_t callback = NULL;
737 char *info = NULL;
775b64d2 738 int error = 0;
70fea60d 739 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
775b64d2
RW
740
741 TRACE_DEVICE(dev);
742 TRACE_RESUME(0);
cd59abfc 743
dbf37414
RW
744 if (dev->power.syscore)
745 goto Complete;
746
aae4518b
RW
747 if (dev->power.direct_complete) {
748 /* Match the pm_runtime_disable() in __device_suspend(). */
749 pm_runtime_enable(dev);
750 goto Complete;
751 }
752
5af84b82 753 dpm_wait(dev->parent, async);
70fea60d 754 dpm_watchdog_set(&wd, dev);
8e9394ce 755 device_lock(dev);
7a8d37a3 756
f76b168b
AS
757 /*
758 * This is a fib. But we'll allow new children to be added below
759 * a resumed device, even if the device hasn't been completed yet.
760 */
761 dev->power.is_prepared = false;
97df8c12 762
6d0e0e84
AS
763 if (!dev->power.is_suspended)
764 goto Unlock;
765
564b905a 766 if (dev->pm_domain) {
9cf519d1
RW
767 info = "power domain ";
768 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 769 goto Driver;
7538e3db
RW
770 }
771
9659cc06 772 if (dev->type && dev->type->pm) {
9cf519d1
RW
773 info = "type ";
774 callback = pm_op(dev->type->pm, state);
35cd133c 775 goto Driver;
cd59abfc
AS
776 }
777
1eede070
RW
778 if (dev->class) {
779 if (dev->class->pm) {
9cf519d1
RW
780 info = "class ";
781 callback = pm_op(dev->class->pm, state);
35cd133c 782 goto Driver;
1eede070 783 } else if (dev->class->resume) {
9cf519d1
RW
784 info = "legacy class ";
785 callback = dev->class->resume;
9659cc06 786 goto End;
1eede070 787 }
cd59abfc 788 }
9659cc06
RW
789
790 if (dev->bus) {
791 if (dev->bus->pm) {
35cd133c 792 info = "bus ";
9cf519d1 793 callback = pm_op(dev->bus->pm, state);
9659cc06 794 } else if (dev->bus->resume) {
35cd133c 795 info = "legacy bus ";
9cf519d1 796 callback = dev->bus->resume;
35cd133c 797 goto End;
9659cc06
RW
798 }
799 }
800
35cd133c
RW
801 Driver:
802 if (!callback && dev->driver && dev->driver->pm) {
803 info = "driver ";
804 callback = pm_op(dev->driver->pm, state);
805 }
806
1eede070 807 End:
9cf519d1 808 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
809 dev->power.is_suspended = false;
810
811 Unlock:
8e9394ce 812 device_unlock(dev);
70fea60d 813 dpm_watchdog_clear(&wd);
dbf37414
RW
814
815 Complete:
5af84b82 816 complete_all(&dev->power.completion);
7a8d37a3 817
cd59abfc 818 TRACE_RESUME(error);
1e2ef05b 819
cd59abfc
AS
820 return error;
821}
822
5af84b82
RW
823static void async_resume(void *data, async_cookie_t cookie)
824{
825 struct device *dev = (struct device *)data;
826 int error;
827
97df8c12 828 error = device_resume(dev, pm_transition, true);
5af84b82
RW
829 if (error)
830 pm_dev_err(dev, pm_transition, " async", error);
831 put_device(dev);
832}
833
775b64d2 834/**
20d652d7
RW
835 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
836 * @state: PM transition of the system being carried out.
775b64d2 837 *
20d652d7
RW
838 * Execute the appropriate "resume" callback for all devices whose status
839 * indicates that they are suspended.
1eede070 840 */
91e7c75b 841void dpm_resume(pm_message_t state)
1eede070 842{
97df8c12 843 struct device *dev;
ecf762b2 844 ktime_t starttime = ktime_get();
1eede070 845
bb3632c6 846 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
91e7c75b
RW
847 might_sleep();
848
1eede070 849 mutex_lock(&dpm_list_mtx);
5af84b82 850 pm_transition = state;
098dff73 851 async_error = 0;
1eede070 852
8a43a9ab 853 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
16735d02 854 reinit_completion(&dev->power.completion);
97df8c12
RW
855 if (is_async(dev)) {
856 get_device(dev);
857 async_schedule(async_resume, dev);
858 }
859 }
860
8a43a9ab
RW
861 while (!list_empty(&dpm_suspended_list)) {
862 dev = to_device(dpm_suspended_list.next);
1eede070 863 get_device(dev);
5b219a51 864 if (!is_async(dev)) {
1eede070
RW
865 int error;
866
1eede070
RW
867 mutex_unlock(&dpm_list_mtx);
868
97df8c12 869 error = device_resume(dev, state, false);
2a77c46d
SL
870 if (error) {
871 suspend_stats.failed_resume++;
872 dpm_save_failed_step(SUSPEND_RESUME);
873 dpm_save_failed_dev(dev_name(dev));
1eede070 874 pm_dev_err(dev, state, "", error);
2a77c46d 875 }
5b219a51
RW
876
877 mutex_lock(&dpm_list_mtx);
1eede070
RW
878 }
879 if (!list_empty(&dev->power.entry))
8a43a9ab 880 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
881 put_device(dev);
882 }
1eede070 883 mutex_unlock(&dpm_list_mtx);
5af84b82 884 async_synchronize_full();
ecf762b2 885 dpm_show_time(starttime, state, NULL);
2f0aea93
VK
886
887 cpufreq_resume();
bb3632c6 888 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1eede070
RW
889}
890
891/**
20d652d7
RW
892 * device_complete - Complete a PM transition for given device.
893 * @dev: Device to handle.
894 * @state: PM transition of the system being carried out.
1eede070 895 */
d1616302 896static void device_complete(struct device *dev, pm_message_t state)
1eede070 897{
35cd133c
RW
898 void (*callback)(struct device *) = NULL;
899 char *info = NULL;
900
dbf37414
RW
901 if (dev->power.syscore)
902 return;
903
8e9394ce 904 device_lock(dev);
1eede070 905
564b905a 906 if (dev->pm_domain) {
35cd133c
RW
907 info = "completing power domain ";
908 callback = dev->pm_domain->ops.complete;
4d27e9dc 909 } else if (dev->type && dev->type->pm) {
35cd133c
RW
910 info = "completing type ";
911 callback = dev->type->pm->complete;
9659cc06 912 } else if (dev->class && dev->class->pm) {
35cd133c
RW
913 info = "completing class ";
914 callback = dev->class->pm->complete;
9659cc06 915 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
916 info = "completing bus ";
917 callback = dev->bus->pm->complete;
918 }
919
920 if (!callback && dev->driver && dev->driver->pm) {
921 info = "completing driver ";
922 callback = dev->driver->pm->complete;
923 }
924
925 if (callback) {
926 pm_dev_dbg(dev, state, info);
927 callback(dev);
1eede070
RW
928 }
929
8e9394ce 930 device_unlock(dev);
88d26136 931
af939339 932 pm_runtime_put(dev);
1eede070
RW
933}
934
935/**
20d652d7
RW
936 * dpm_complete - Complete a PM transition for all non-sysdev devices.
937 * @state: PM transition of the system being carried out.
775b64d2 938 *
20d652d7
RW
939 * Execute the ->complete() callbacks for all devices whose PM status is not
940 * DPM_ON (this allows new devices to be registered).
cd59abfc 941 */
91e7c75b 942void dpm_complete(pm_message_t state)
cd59abfc 943{
1eede070
RW
944 struct list_head list;
945
bb3632c6 946 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
91e7c75b
RW
947 might_sleep();
948
1eede070 949 INIT_LIST_HEAD(&list);
cd59abfc 950 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
951 while (!list_empty(&dpm_prepared_list)) {
952 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 953
1eede070 954 get_device(dev);
f76b168b 955 dev->power.is_prepared = false;
5b219a51
RW
956 list_move(&dev->power.entry, &list);
957 mutex_unlock(&dpm_list_mtx);
1eede070 958
32e8d689 959 trace_device_pm_callback_start(dev, "", state.event);
5b219a51 960 device_complete(dev, state);
32e8d689 961 trace_device_pm_callback_end(dev, 0);
1eede070 962
5b219a51 963 mutex_lock(&dpm_list_mtx);
1eede070 964 put_device(dev);
cd59abfc 965 }
1eede070 966 list_splice(&list, &dpm_list);
cd59abfc 967 mutex_unlock(&dpm_list_mtx);
013c074f
SG
968
969 /* Allow device probing and trigger re-probing of deferred devices */
970 device_unblock_probing();
bb3632c6 971 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
cd59abfc
AS
972}
973
cd59abfc 974/**
20d652d7
RW
975 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
976 * @state: PM transition of the system being carried out.
cd59abfc 977 *
20d652d7
RW
978 * Execute "resume" callbacks for all devices and complete the PM transition of
979 * the system.
cd59abfc 980 */
d1616302 981void dpm_resume_end(pm_message_t state)
cd59abfc 982{
1eede070
RW
983 dpm_resume(state);
984 dpm_complete(state);
cd59abfc 985}
d1616302 986EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
987
988
989/*------------------------- Suspend routines -------------------------*/
990
1eede070 991/**
20d652d7
RW
992 * resume_event - Return a "resume" message for given "suspend" sleep state.
993 * @sleep_state: PM message representing a sleep state.
994 *
995 * Return a PM message representing the resume event corresponding to given
996 * sleep state.
1eede070
RW
997 */
998static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 999{
1eede070
RW
1000 switch (sleep_state.event) {
1001 case PM_EVENT_SUSPEND:
1002 return PMSG_RESUME;
1003 case PM_EVENT_FREEZE:
1004 case PM_EVENT_QUIESCE:
1005 return PMSG_RECOVER;
1006 case PM_EVENT_HIBERNATE:
1007 return PMSG_RESTORE;
cd59abfc 1008 }
1eede070 1009 return PMSG_ON;
cd59abfc
AS
1010}
1011
1012/**
20d652d7
RW
1013 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1014 * @dev: Device to handle.
1015 * @state: PM transition of the system being carried out.
58c256a3 1016 * @async: If true, the device is being suspended asynchronously.
775b64d2 1017 *
20d652d7
RW
1018 * The driver of @dev will not receive interrupts while this function is being
1019 * executed.
cd59abfc 1020 */
28b6fd6e 1021static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
775b64d2 1022{
9cf519d1
RW
1023 pm_callback_t callback = NULL;
1024 char *info = NULL;
28b6fd6e
LC
1025 int error = 0;
1026
431d452a
ZF
1027 TRACE_DEVICE(dev);
1028 TRACE_SUSPEND(0);
1029
28b6fd6e
LC
1030 if (async_error)
1031 goto Complete;
1032
1033 if (pm_wakeup_pending()) {
1034 async_error = -EBUSY;
1035 goto Complete;
1036 }
e7176a37 1037
aae4518b 1038 if (dev->power.syscore || dev->power.direct_complete)
28b6fd6e
LC
1039 goto Complete;
1040
1041 dpm_wait_for_children(dev, async);
dbf37414 1042
564b905a 1043 if (dev->pm_domain) {
cf579dfb 1044 info = "noirq power domain ";
9cf519d1 1045 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 1046 } else if (dev->type && dev->type->pm) {
cf579dfb 1047 info = "noirq type ";
9cf519d1 1048 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 1049 } else if (dev->class && dev->class->pm) {
cf579dfb 1050 info = "noirq class ";
9cf519d1 1051 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 1052 } else if (dev->bus && dev->bus->pm) {
cf579dfb 1053 info = "noirq bus ";
9cf519d1 1054 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
1055 }
1056
35cd133c 1057 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 1058 info = "noirq driver ";
35cd133c
RW
1059 callback = pm_noirq_op(dev->driver->pm, state);
1060 }
1061
3d2699bc
LC
1062 error = dpm_run_callback(callback, dev, state, info);
1063 if (!error)
1064 dev->power.is_noirq_suspended = true;
28b6fd6e
LC
1065 else
1066 async_error = error;
3d2699bc 1067
28b6fd6e
LC
1068Complete:
1069 complete_all(&dev->power.completion);
431d452a 1070 TRACE_SUSPEND(error);
3d2699bc 1071 return error;
775b64d2
RW
1072}
1073
28b6fd6e
LC
1074static void async_suspend_noirq(void *data, async_cookie_t cookie)
1075{
1076 struct device *dev = (struct device *)data;
1077 int error;
1078
1079 error = __device_suspend_noirq(dev, pm_transition, true);
1080 if (error) {
1081 dpm_save_failed_dev(dev_name(dev));
1082 pm_dev_err(dev, pm_transition, " async", error);
1083 }
1084
1085 put_device(dev);
1086}
1087
1088static int device_suspend_noirq(struct device *dev)
1089{
1090 reinit_completion(&dev->power.completion);
1091
431d452a 1092 if (is_async(dev)) {
28b6fd6e
LC
1093 get_device(dev);
1094 async_schedule(async_suspend_noirq, dev);
1095 return 0;
1096 }
1097 return __device_suspend_noirq(dev, pm_transition, false);
1098}
1099
775b64d2 1100/**
cf579dfb 1101 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 1102 * @state: PM transition of the system being carried out.
775b64d2 1103 *
20d652d7
RW
1104 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1105 * handlers for all non-sysdev devices.
775b64d2 1106 */
2a8a8ce6 1107int dpm_suspend_noirq(pm_message_t state)
775b64d2 1108{
ecf762b2 1109 ktime_t starttime = ktime_get();
775b64d2
RW
1110 int error = 0;
1111
bb3632c6 1112 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
8651f97b 1113 cpuidle_pause();
4990d4fe 1114 device_wakeup_arm_wake_irqs();
2ed8d2b3 1115 suspend_device_irqs();
32bdfac5 1116 mutex_lock(&dpm_list_mtx);
28b6fd6e
LC
1117 pm_transition = state;
1118 async_error = 0;
1119
cf579dfb
RW
1120 while (!list_empty(&dpm_late_early_list)) {
1121 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1122
1123 get_device(dev);
1124 mutex_unlock(&dpm_list_mtx);
1125
28b6fd6e 1126 error = device_suspend_noirq(dev);
d08a5ace
RW
1127
1128 mutex_lock(&dpm_list_mtx);
775b64d2 1129 if (error) {
cf579dfb 1130 pm_dev_err(dev, state, " noirq", error);
2a77c46d 1131 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1132 put_device(dev);
775b64d2
RW
1133 break;
1134 }
d08a5ace 1135 if (!list_empty(&dev->power.entry))
8a43a9ab 1136 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1137 put_device(dev);
52d136cc 1138
28b6fd6e 1139 if (async_error)
52d136cc 1140 break;
775b64d2 1141 }
32bdfac5 1142 mutex_unlock(&dpm_list_mtx);
28b6fd6e
LC
1143 async_synchronize_full();
1144 if (!error)
1145 error = async_error;
1146
1147 if (error) {
1148 suspend_stats.failed_suspend_noirq++;
1149 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
d1616302 1150 dpm_resume_noirq(resume_event(state));
28b6fd6e 1151 } else {
cf579dfb 1152 dpm_show_time(starttime, state, "noirq");
28b6fd6e 1153 }
bb3632c6 1154 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
cf579dfb
RW
1155 return error;
1156}
1157
1158/**
1159 * device_suspend_late - Execute a "late suspend" callback for given device.
1160 * @dev: Device to handle.
1161 * @state: PM transition of the system being carried out.
58c256a3 1162 * @async: If true, the device is being suspended asynchronously.
cf579dfb
RW
1163 *
1164 * Runtime PM is disabled for @dev while this function is being executed.
1165 */
de377b39 1166static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
1167{
1168 pm_callback_t callback = NULL;
1169 char *info = NULL;
de377b39 1170 int error = 0;
cf579dfb 1171
431d452a
ZF
1172 TRACE_DEVICE(dev);
1173 TRACE_SUSPEND(0);
1174
9f6d8f6a
RW
1175 __pm_runtime_disable(dev, false);
1176
de377b39
LC
1177 if (async_error)
1178 goto Complete;
1179
1180 if (pm_wakeup_pending()) {
1181 async_error = -EBUSY;
1182 goto Complete;
1183 }
1184
aae4518b 1185 if (dev->power.syscore || dev->power.direct_complete)
de377b39
LC
1186 goto Complete;
1187
1188 dpm_wait_for_children(dev, async);
dbf37414 1189
cf579dfb
RW
1190 if (dev->pm_domain) {
1191 info = "late power domain ";
1192 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1193 } else if (dev->type && dev->type->pm) {
1194 info = "late type ";
1195 callback = pm_late_early_op(dev->type->pm, state);
1196 } else if (dev->class && dev->class->pm) {
1197 info = "late class ";
1198 callback = pm_late_early_op(dev->class->pm, state);
1199 } else if (dev->bus && dev->bus->pm) {
1200 info = "late bus ";
1201 callback = pm_late_early_op(dev->bus->pm, state);
1202 }
1203
1204 if (!callback && dev->driver && dev->driver->pm) {
1205 info = "late driver ";
1206 callback = pm_late_early_op(dev->driver->pm, state);
1207 }
1208
3d2699bc
LC
1209 error = dpm_run_callback(callback, dev, state, info);
1210 if (!error)
1211 dev->power.is_late_suspended = true;
de377b39
LC
1212 else
1213 async_error = error;
3d2699bc 1214
de377b39 1215Complete:
431d452a 1216 TRACE_SUSPEND(error);
de377b39 1217 complete_all(&dev->power.completion);
3d2699bc 1218 return error;
cf579dfb
RW
1219}
1220
de377b39
LC
1221static void async_suspend_late(void *data, async_cookie_t cookie)
1222{
1223 struct device *dev = (struct device *)data;
1224 int error;
1225
1226 error = __device_suspend_late(dev, pm_transition, true);
1227 if (error) {
1228 dpm_save_failed_dev(dev_name(dev));
1229 pm_dev_err(dev, pm_transition, " async", error);
1230 }
1231 put_device(dev);
1232}
1233
1234static int device_suspend_late(struct device *dev)
1235{
1236 reinit_completion(&dev->power.completion);
1237
431d452a 1238 if (is_async(dev)) {
de377b39
LC
1239 get_device(dev);
1240 async_schedule(async_suspend_late, dev);
1241 return 0;
1242 }
1243
1244 return __device_suspend_late(dev, pm_transition, false);
1245}
1246
cf579dfb
RW
1247/**
1248 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1249 * @state: PM transition of the system being carried out.
1250 */
2a8a8ce6 1251int dpm_suspend_late(pm_message_t state)
cf579dfb
RW
1252{
1253 ktime_t starttime = ktime_get();
1254 int error = 0;
1255
bb3632c6 1256 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
cf579dfb 1257 mutex_lock(&dpm_list_mtx);
de377b39
LC
1258 pm_transition = state;
1259 async_error = 0;
1260
cf579dfb
RW
1261 while (!list_empty(&dpm_suspended_list)) {
1262 struct device *dev = to_device(dpm_suspended_list.prev);
1263
1264 get_device(dev);
1265 mutex_unlock(&dpm_list_mtx);
1266
de377b39 1267 error = device_suspend_late(dev);
cf579dfb
RW
1268
1269 mutex_lock(&dpm_list_mtx);
3a17fb32
RW
1270 if (!list_empty(&dev->power.entry))
1271 list_move(&dev->power.entry, &dpm_late_early_list);
1272
cf579dfb
RW
1273 if (error) {
1274 pm_dev_err(dev, state, " late", error);
cf579dfb
RW
1275 dpm_save_failed_dev(dev_name(dev));
1276 put_device(dev);
1277 break;
1278 }
cf579dfb 1279 put_device(dev);
52d136cc 1280
de377b39 1281 if (async_error)
52d136cc 1282 break;
cf579dfb
RW
1283 }
1284 mutex_unlock(&dpm_list_mtx);
de377b39 1285 async_synchronize_full();
246ef766
ID
1286 if (!error)
1287 error = async_error;
de377b39
LC
1288 if (error) {
1289 suspend_stats.failed_suspend_late++;
1290 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
cf579dfb 1291 dpm_resume_early(resume_event(state));
de377b39 1292 } else {
ecf762b2 1293 dpm_show_time(starttime, state, "late");
de377b39 1294 }
bb3632c6 1295 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
775b64d2
RW
1296 return error;
1297}
cf579dfb
RW
1298
1299/**
1300 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1301 * @state: PM transition of the system being carried out.
1302 */
1303int dpm_suspend_end(pm_message_t state)
1304{
1305 int error = dpm_suspend_late(state);
064b021f
CC
1306 if (error)
1307 return error;
1308
1309 error = dpm_suspend_noirq(state);
1310 if (error) {
997a0311 1311 dpm_resume_early(resume_event(state));
064b021f
CC
1312 return error;
1313 }
cf579dfb 1314
064b021f 1315 return 0;
cf579dfb
RW
1316}
1317EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1318
875ab0b7
RW
1319/**
1320 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1321 * @dev: Device to suspend.
1322 * @state: PM transition of the system being carried out.
1323 * @cb: Suspend callback to execute.
58c256a3 1324 * @info: string description of caller.
875ab0b7
RW
1325 */
1326static int legacy_suspend(struct device *dev, pm_message_t state,
53644677
SK
1327 int (*cb)(struct device *dev, pm_message_t state),
1328 char *info)
875ab0b7
RW
1329{
1330 int error;
1331 ktime_t calltime;
1332
1333 calltime = initcall_debug_start(dev);
1334
e8bca479 1335 trace_device_pm_callback_start(dev, info, state.event);
875ab0b7 1336 error = cb(dev, state);
e8bca479 1337 trace_device_pm_callback_end(dev, error);
875ab0b7
RW
1338 suspend_report_result(cb, error);
1339
53644677 1340 initcall_debug_report(dev, calltime, error, state, info);
875ab0b7
RW
1341
1342 return error;
1343}
1344
775b64d2 1345/**
20d652d7
RW
1346 * device_suspend - Execute "suspend" callbacks for given device.
1347 * @dev: Device to handle.
1348 * @state: PM transition of the system being carried out.
5af84b82 1349 * @async: If true, the device is being suspended asynchronously.
775b64d2 1350 */
5af84b82 1351static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1352{
9cf519d1
RW
1353 pm_callback_t callback = NULL;
1354 char *info = NULL;
cd59abfc 1355 int error = 0;
70fea60d 1356 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
cd59abfc 1357
431d452a
ZF
1358 TRACE_DEVICE(dev);
1359 TRACE_SUSPEND(0);
1360
5af84b82 1361 dpm_wait_for_children(dev, async);
7a8d37a3 1362
5af84b82 1363 if (async_error)
1f758b23 1364 goto Complete;
1e2ef05b 1365
88d26136
AS
1366 /*
1367 * If a device configured to wake up the system from sleep states
1368 * has been suspended at run time and there's a resume request pending
1369 * for it, this is equivalent to the device signaling wakeup, so the
1370 * system suspend operation should be aborted.
1371 */
1e2ef05b
RW
1372 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1373 pm_wakeup_event(dev, 0);
5af84b82 1374
d83f905e
RW
1375 if (pm_wakeup_pending()) {
1376 async_error = -EBUSY;
1f758b23 1377 goto Complete;
d83f905e
RW
1378 }
1379
dbf37414
RW
1380 if (dev->power.syscore)
1381 goto Complete;
1382
aae4518b
RW
1383 if (dev->power.direct_complete) {
1384 if (pm_runtime_status_suspended(dev)) {
1385 pm_runtime_disable(dev);
019d8817 1386 if (pm_runtime_status_suspended(dev))
aae4518b
RW
1387 goto Complete;
1388
1389 pm_runtime_enable(dev);
1390 }
1391 dev->power.direct_complete = false;
1392 }
1393
70fea60d 1394 dpm_watchdog_set(&wd, dev);
1e2ef05b
RW
1395 device_lock(dev);
1396
564b905a 1397 if (dev->pm_domain) {
9cf519d1
RW
1398 info = "power domain ";
1399 callback = pm_op(&dev->pm_domain->ops, state);
1400 goto Run;
4d27e9dc
RW
1401 }
1402
9659cc06 1403 if (dev->type && dev->type->pm) {
9cf519d1
RW
1404 info = "type ";
1405 callback = pm_op(dev->type->pm, state);
1406 goto Run;
9659cc06
RW
1407 }
1408
1eede070
RW
1409 if (dev->class) {
1410 if (dev->class->pm) {
9cf519d1
RW
1411 info = "class ";
1412 callback = pm_op(dev->class->pm, state);
1413 goto Run;
1eede070
RW
1414 } else if (dev->class->suspend) {
1415 pm_dev_dbg(dev, state, "legacy class ");
53644677
SK
1416 error = legacy_suspend(dev, state, dev->class->suspend,
1417 "legacy class ");
4d27e9dc 1418 goto End;
1eede070 1419 }
cd59abfc
AS
1420 }
1421
1eede070
RW
1422 if (dev->bus) {
1423 if (dev->bus->pm) {
35cd133c 1424 info = "bus ";
9cf519d1 1425 callback = pm_op(dev->bus->pm, state);
1eede070 1426 } else if (dev->bus->suspend) {
35cd133c 1427 pm_dev_dbg(dev, state, "legacy bus ");
53644677
SK
1428 error = legacy_suspend(dev, state, dev->bus->suspend,
1429 "legacy bus ");
9cf519d1 1430 goto End;
1eede070 1431 }
7538e3db
RW
1432 }
1433
9cf519d1 1434 Run:
35cd133c
RW
1435 if (!callback && dev->driver && dev->driver->pm) {
1436 info = "driver ";
1437 callback = pm_op(dev->driver->pm, state);
1438 }
1439
9cf519d1
RW
1440 error = dpm_run_callback(callback, dev, state, info);
1441
1eede070 1442 End:
4ca46ff3 1443 if (!error) {
aae4518b
RW
1444 struct device *parent = dev->parent;
1445
4ca46ff3 1446 dev->power.is_suspended = true;
aae4518b
RW
1447 if (parent) {
1448 spin_lock_irq(&parent->power.lock);
1449
1450 dev->parent->power.direct_complete = false;
1451 if (dev->power.wakeup_path
1452 && !dev->parent->power.ignore_children)
1453 dev->parent->power.wakeup_path = true;
1454
1455 spin_unlock_irq(&parent->power.lock);
1456 }
4ca46ff3 1457 }
6d0e0e84 1458
8e9394ce 1459 device_unlock(dev);
70fea60d 1460 dpm_watchdog_clear(&wd);
1f758b23
MSB
1461
1462 Complete:
5af84b82 1463 complete_all(&dev->power.completion);
88d26136 1464 if (error)
098dff73
RW
1465 async_error = error;
1466
431d452a 1467 TRACE_SUSPEND(error);
cd59abfc
AS
1468 return error;
1469}
1470
5af84b82
RW
1471static void async_suspend(void *data, async_cookie_t cookie)
1472{
1473 struct device *dev = (struct device *)data;
1474 int error;
1475
1476 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1477 if (error) {
1478 dpm_save_failed_dev(dev_name(dev));
5af84b82 1479 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1480 }
5af84b82
RW
1481
1482 put_device(dev);
1483}
1484
1485static int device_suspend(struct device *dev)
1486{
16735d02 1487 reinit_completion(&dev->power.completion);
5af84b82 1488
431d452a 1489 if (is_async(dev)) {
5af84b82
RW
1490 get_device(dev);
1491 async_schedule(async_suspend, dev);
1492 return 0;
1493 }
1494
1495 return __device_suspend(dev, pm_transition, false);
1496}
1497
cd59abfc 1498/**
20d652d7
RW
1499 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1500 * @state: PM transition of the system being carried out.
cd59abfc 1501 */
91e7c75b 1502int dpm_suspend(pm_message_t state)
cd59abfc 1503{
ecf762b2 1504 ktime_t starttime = ktime_get();
cd59abfc
AS
1505 int error = 0;
1506
bb3632c6 1507 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
91e7c75b
RW
1508 might_sleep();
1509
2f0aea93
VK
1510 cpufreq_suspend();
1511
cd59abfc 1512 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1513 pm_transition = state;
1514 async_error = 0;
8a43a9ab
RW
1515 while (!list_empty(&dpm_prepared_list)) {
1516 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1517
1eede070 1518 get_device(dev);
cd59abfc 1519 mutex_unlock(&dpm_list_mtx);
1eede070 1520
5af84b82 1521 error = device_suspend(dev);
1eede070 1522
1b3cbec1 1523 mutex_lock(&dpm_list_mtx);
775b64d2 1524 if (error) {
1eede070 1525 pm_dev_err(dev, state, "", error);
2a77c46d 1526 dpm_save_failed_dev(dev_name(dev));
1eede070 1527 put_device(dev);
775b64d2
RW
1528 break;
1529 }
7a8d37a3 1530 if (!list_empty(&dev->power.entry))
8a43a9ab 1531 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1532 put_device(dev);
5af84b82
RW
1533 if (async_error)
1534 break;
cd59abfc
AS
1535 }
1536 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1537 async_synchronize_full();
1538 if (!error)
1539 error = async_error;
2a77c46d
SL
1540 if (error) {
1541 suspend_stats.failed_suspend++;
1542 dpm_save_failed_step(SUSPEND_SUSPEND);
1543 } else
ecf762b2 1544 dpm_show_time(starttime, state, NULL);
bb3632c6 1545 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1eede070
RW
1546 return error;
1547}
1548
1549/**
20d652d7
RW
1550 * device_prepare - Prepare a device for system power transition.
1551 * @dev: Device to handle.
1552 * @state: PM transition of the system being carried out.
1553 *
1554 * Execute the ->prepare() callback(s) for given device. No new children of the
1555 * device may be registered after this function has returned.
1eede070 1556 */
d1616302 1557static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1558{
35cd133c 1559 int (*callback)(struct device *) = NULL;
aae4518b 1560 int ret = 0;
1eede070 1561
dbf37414
RW
1562 if (dev->power.syscore)
1563 return 0;
1564
88d26136
AS
1565 /*
1566 * If a device's parent goes into runtime suspend at the wrong time,
1567 * it won't be possible to resume the device. To prevent this we
1568 * block runtime suspend here, during the prepare phase, and allow
1569 * it again during the complete phase.
1570 */
1571 pm_runtime_get_noresume(dev);
1572
8e9394ce 1573 device_lock(dev);
1eede070 1574
4ca46ff3
RW
1575 dev->power.wakeup_path = device_may_wakeup(dev);
1576
aa8e54b5
TV
1577 if (dev->power.no_pm_callbacks) {
1578 ret = 1; /* Let device go direct_complete */
1579 goto unlock;
1580 }
1581
fba1fbf5 1582 if (dev->pm_domain)
35cd133c 1583 callback = dev->pm_domain->ops.prepare;
fba1fbf5 1584 else if (dev->type && dev->type->pm)
35cd133c 1585 callback = dev->type->pm->prepare;
fba1fbf5 1586 else if (dev->class && dev->class->pm)
35cd133c 1587 callback = dev->class->pm->prepare;
fba1fbf5 1588 else if (dev->bus && dev->bus->pm)
35cd133c 1589 callback = dev->bus->pm->prepare;
35cd133c 1590
fba1fbf5 1591 if (!callback && dev->driver && dev->driver->pm)
35cd133c 1592 callback = dev->driver->pm->prepare;
35cd133c 1593
32e8d689 1594 if (callback)
aae4518b 1595 ret = callback(dev);
7538e3db 1596
aa8e54b5 1597unlock:
8e9394ce 1598 device_unlock(dev);
1eede070 1599
aae4518b
RW
1600 if (ret < 0) {
1601 suspend_report_result(callback, ret);
aa1b9f13 1602 pm_runtime_put(dev);
aae4518b
RW
1603 return ret;
1604 }
1605 /*
1606 * A positive return value from ->prepare() means "this device appears
1607 * to be runtime-suspended and its state is fine, so if it really is
1608 * runtime-suspended, you can leave it in that state provided that you
1609 * will do the same thing with all of its descendants". This only
1610 * applies to suspend transitions, however.
1611 */
1612 spin_lock_irq(&dev->power.lock);
1613 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1614 spin_unlock_irq(&dev->power.lock);
1615 return 0;
1eede070 1616}
cd59abfc 1617
1eede070 1618/**
20d652d7
RW
1619 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1620 * @state: PM transition of the system being carried out.
1eede070 1621 *
20d652d7 1622 * Execute the ->prepare() callback(s) for all devices.
1eede070 1623 */
91e7c75b 1624int dpm_prepare(pm_message_t state)
1eede070 1625{
1eede070
RW
1626 int error = 0;
1627
bb3632c6 1628 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
91e7c75b
RW
1629 might_sleep();
1630
013c074f
SG
1631 /*
1632 * Give a chance for the known devices to complete their probes, before
1633 * disable probing of devices. This sync point is important at least
1634 * at boot time + hibernation restore.
1635 */
1636 wait_for_device_probe();
1637 /*
1638 * It is unsafe if probing of devices will happen during suspend or
1639 * hibernation and system behavior will be unpredictable in this case.
1640 * So, let's prohibit device's probing here and defer their probes
1641 * instead. The normal behavior will be restored in dpm_complete().
1642 */
1643 device_block_probing();
1644
1eede070 1645 mutex_lock(&dpm_list_mtx);
1eede070
RW
1646 while (!list_empty(&dpm_list)) {
1647 struct device *dev = to_device(dpm_list.next);
1648
1649 get_device(dev);
1eede070
RW
1650 mutex_unlock(&dpm_list_mtx);
1651
32e8d689 1652 trace_device_pm_callback_start(dev, "", state.event);
1e2ef05b 1653 error = device_prepare(dev, state);
32e8d689 1654 trace_device_pm_callback_end(dev, error);
1eede070
RW
1655
1656 mutex_lock(&dpm_list_mtx);
1657 if (error) {
1eede070
RW
1658 if (error == -EAGAIN) {
1659 put_device(dev);
886a7a33 1660 error = 0;
1eede070
RW
1661 continue;
1662 }
1e75227e
RW
1663 printk(KERN_INFO "PM: Device %s not prepared "
1664 "for power transition: code %d\n",
5c1a07ab 1665 dev_name(dev), error);
1eede070
RW
1666 put_device(dev);
1667 break;
1668 }
f76b168b 1669 dev->power.is_prepared = true;
1eede070 1670 if (!list_empty(&dev->power.entry))
8a43a9ab 1671 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1672 put_device(dev);
1673 }
1eede070 1674 mutex_unlock(&dpm_list_mtx);
bb3632c6 1675 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
cd59abfc
AS
1676 return error;
1677}
1678
775b64d2 1679/**
20d652d7
RW
1680 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1681 * @state: PM transition of the system being carried out.
775b64d2 1682 *
20d652d7
RW
1683 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1684 * callbacks for them.
775b64d2 1685 */
d1616302 1686int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1687{
1688 int error;
cd59abfc 1689
1eede070 1690 error = dpm_prepare(state);
2a77c46d
SL
1691 if (error) {
1692 suspend_stats.failed_prepare++;
1693 dpm_save_failed_step(SUSPEND_PREPARE);
1694 } else
1eede070 1695 error = dpm_suspend(state);
cd59abfc 1696 return error;
cd59abfc 1697}
d1616302 1698EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1699
1700void __suspend_report_result(const char *function, void *fn, int ret)
1701{
c80cfb04
BH
1702 if (ret)
1703 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1704}
1705EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1706
1707/**
1708 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1709 * @dev: Device to wait for.
1710 * @subordinate: Device that needs to wait for @dev.
1711 */
098dff73 1712int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1713{
1714 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1715 return async_error;
f8824cee
RW
1716}
1717EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
1718
1719/**
1720 * dpm_for_each_dev - device iterator.
1721 * @data: data for the callback.
1722 * @fn: function to be called for each device.
1723 *
1724 * Iterate over devices in dpm_list, and call @fn for each device,
1725 * passing it @data.
1726 */
1727void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1728{
1729 struct device *dev;
1730
1731 if (!fn)
1732 return;
1733
1734 device_pm_lock();
1735 list_for_each_entry(dev, &dpm_list, power.entry)
1736 fn(dev, data);
1737 device_pm_unlock();
1738}
1739EXPORT_SYMBOL_GPL(dpm_for_each_dev);
aa8e54b5
TV
1740
1741static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1742{
1743 if (!ops)
1744 return true;
1745
1746 return !ops->prepare &&
1747 !ops->suspend &&
1748 !ops->suspend_late &&
1749 !ops->suspend_noirq &&
1750 !ops->resume_noirq &&
1751 !ops->resume_early &&
1752 !ops->resume &&
1753 !ops->complete;
1754}
1755
1756void device_pm_check_callbacks(struct device *dev)
1757{
1758 spin_lock_irq(&dev->power.lock);
1759 dev->power.no_pm_callbacks =
1760 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1761 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1762 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1763 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1764 (!dev->driver || pm_ops_is_empty(dev->driver->pm));
1765 spin_unlock_irq(&dev->power.lock);
1766}