]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/base/power/main.c
ACPI: intel_idle : break dependency between modules
[mirror_ubuntu-bionic-kernel.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
cd59abfc 26#include <linux/resume-trace.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
5af84b82 29#include <linux/async.h>
1e75227e 30#include <linux/suspend.h>
11048dcf 31
cd59abfc 32#include "../base.h"
1da177e4
LT
33#include "power.h"
34
9cf519d1
RW
35typedef int (*pm_callback_t)(struct device *);
36
775b64d2 37/*
1eede070 38 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
39 * because children are guaranteed to be discovered after parents, and
40 * are inserted at the back of the list on discovery.
41 *
8e9394ce
GKH
42 * Since device_pm_add() may be called with a device lock held,
43 * we must never try to acquire a device lock while holding
775b64d2
RW
44 * dpm_list_mutex.
45 */
46
1eede070 47LIST_HEAD(dpm_list);
8a43a9ab
RW
48LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list);
cf579dfb 50LIST_HEAD(dpm_late_early_list);
8a43a9ab 51LIST_HEAD(dpm_noirq_list);
1da177e4 52
2a77c46d 53struct suspend_stats suspend_stats;
cd59abfc 54static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 55static pm_message_t pm_transition;
1da177e4 56
098dff73
RW
57static int async_error;
58
5e928f77 59/**
20d652d7 60 * device_pm_init - Initialize the PM-related part of a device object.
5e928f77
RW
61 * @dev: Device object being initialized.
62 */
63void device_pm_init(struct device *dev)
64{
f76b168b 65 dev->power.is_prepared = false;
6d0e0e84 66 dev->power.is_suspended = false;
5af84b82 67 init_completion(&dev->power.completion);
152e1d59 68 complete_all(&dev->power.completion);
074037ec
RW
69 dev->power.wakeup = NULL;
70 spin_lock_init(&dev->power.lock);
5e928f77 71 pm_runtime_init(dev);
22110faf 72 INIT_LIST_HEAD(&dev->power.entry);
1a9a9152 73 dev->power.power_state = PMSG_INVALID;
5e928f77
RW
74}
75
1eede070 76/**
20d652d7 77 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
78 */
79void device_pm_lock(void)
80{
81 mutex_lock(&dpm_list_mtx);
82}
83
84/**
20d652d7 85 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
86 */
87void device_pm_unlock(void)
88{
89 mutex_unlock(&dpm_list_mtx);
90}
075c1771 91
775b64d2 92/**
20d652d7
RW
93 * device_pm_add - Add a device to the PM core's list of active devices.
94 * @dev: Device to add to the list.
775b64d2 95 */
3b98aeaf 96void device_pm_add(struct device *dev)
1da177e4 97{
1da177e4 98 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 99 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
11048dcf 100 mutex_lock(&dpm_list_mtx);
f76b168b 101 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
102 dev_warn(dev, "parent %s should not be sleeping\n",
103 dev_name(dev->parent));
3b98aeaf 104 list_add_tail(&dev->power.entry, &dpm_list);
91ff4cb8 105 dev_pm_qos_constraints_init(dev);
1a9a9152 106 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
107}
108
775b64d2 109/**
20d652d7
RW
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
775b64d2 112 */
9cddad77 113void device_pm_remove(struct device *dev)
1da177e4
LT
114{
115 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 117 complete_all(&dev->power.completion);
11048dcf 118 mutex_lock(&dpm_list_mtx);
1a9a9152 119 dev_pm_qos_constraints_destroy(dev);
1da177e4 120 list_del_init(&dev->power.entry);
11048dcf 121 mutex_unlock(&dpm_list_mtx);
074037ec 122 device_wakeup_disable(dev);
5e928f77 123 pm_runtime_remove(dev);
775b64d2
RW
124}
125
ffa6a705 126/**
20d652d7
RW
127 * device_pm_move_before - Move device in the PM core's list of active devices.
128 * @deva: Device to move in dpm_list.
129 * @devb: Device @deva should come before.
ffa6a705
CH
130 */
131void device_pm_move_before(struct device *deva, struct device *devb)
132{
133 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
134 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
135 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
136 /* Delete deva from dpm_list and reinsert before devb. */
137 list_move_tail(&deva->power.entry, &devb->power.entry);
138}
139
140/**
20d652d7
RW
141 * device_pm_move_after - Move device in the PM core's list of active devices.
142 * @deva: Device to move in dpm_list.
143 * @devb: Device @deva should come after.
ffa6a705
CH
144 */
145void device_pm_move_after(struct device *deva, struct device *devb)
146{
147 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
148 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
149 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
150 /* Delete deva from dpm_list and reinsert after devb. */
151 list_move(&deva->power.entry, &devb->power.entry);
152}
153
154/**
20d652d7
RW
155 * device_pm_move_last - Move device to end of the PM core's list of devices.
156 * @dev: Device to move in dpm_list.
ffa6a705
CH
157 */
158void device_pm_move_last(struct device *dev)
159{
160 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 161 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
162 list_move_tail(&dev->power.entry, &dpm_list);
163}
164
875ab0b7
RW
165static ktime_t initcall_debug_start(struct device *dev)
166{
167 ktime_t calltime = ktime_set(0, 0);
168
169 if (initcall_debug) {
0c6aebe3
RW
170 pr_info("calling %s+ @ %i, parent: %s\n",
171 dev_name(dev), task_pid_nr(current),
172 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
173 calltime = ktime_get();
174 }
175
176 return calltime;
177}
178
179static void initcall_debug_report(struct device *dev, ktime_t calltime,
180 int error)
181{
182 ktime_t delta, rettime;
183
184 if (initcall_debug) {
185 rettime = ktime_get();
186 delta = ktime_sub(rettime, calltime);
187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
188 error, (unsigned long long)ktime_to_ns(delta) >> 10);
189 }
190}
191
5af84b82
RW
192/**
193 * dpm_wait - Wait for a PM operation to complete.
194 * @dev: Device to wait for.
195 * @async: If unset, wait only if the device's power.async_suspend flag is set.
196 */
197static void dpm_wait(struct device *dev, bool async)
198{
199 if (!dev)
200 return;
201
0e06b4a8 202 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
203 wait_for_completion(&dev->power.completion);
204}
205
206static int dpm_wait_fn(struct device *dev, void *async_ptr)
207{
208 dpm_wait(dev, *((bool *)async_ptr));
209 return 0;
210}
211
212static void dpm_wait_for_children(struct device *dev, bool async)
213{
214 device_for_each_child(dev, &async, dpm_wait_fn);
215}
216
1eede070 217/**
9cf519d1 218 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
219 * @ops: PM operations to choose from.
220 * @state: PM transition of the system being carried out.
1eede070 221 */
9cf519d1 222static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 223{
1eede070
RW
224 switch (state.event) {
225#ifdef CONFIG_SUSPEND
226 case PM_EVENT_SUSPEND:
9cf519d1 227 return ops->suspend;
1eede070 228 case PM_EVENT_RESUME:
9cf519d1 229 return ops->resume;
1eede070 230#endif /* CONFIG_SUSPEND */
1f112cee 231#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
232 case PM_EVENT_FREEZE:
233 case PM_EVENT_QUIESCE:
9cf519d1 234 return ops->freeze;
1eede070 235 case PM_EVENT_HIBERNATE:
9cf519d1 236 return ops->poweroff;
1eede070
RW
237 case PM_EVENT_THAW:
238 case PM_EVENT_RECOVER:
9cf519d1 239 return ops->thaw;
1eede070
RW
240 break;
241 case PM_EVENT_RESTORE:
9cf519d1 242 return ops->restore;
1f112cee 243#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 244 }
f2511774 245
9cf519d1 246 return NULL;
1eede070
RW
247}
248
cf579dfb
RW
249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
1eede070 283/**
9cf519d1 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
285 * @ops: PM operations to choose from.
286 * @state: PM transition of the system being carried out.
1eede070 287 *
20d652d7
RW
288 * The driver of @dev will not receive interrupts while this function is being
289 * executed.
1eede070 290 */
9cf519d1 291static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 292{
1eede070
RW
293 switch (state.event) {
294#ifdef CONFIG_SUSPEND
295 case PM_EVENT_SUSPEND:
9cf519d1 296 return ops->suspend_noirq;
1eede070 297 case PM_EVENT_RESUME:
9cf519d1 298 return ops->resume_noirq;
1eede070 299#endif /* CONFIG_SUSPEND */
1f112cee 300#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
301 case PM_EVENT_FREEZE:
302 case PM_EVENT_QUIESCE:
9cf519d1 303 return ops->freeze_noirq;
1eede070 304 case PM_EVENT_HIBERNATE:
9cf519d1 305 return ops->poweroff_noirq;
1eede070
RW
306 case PM_EVENT_THAW:
307 case PM_EVENT_RECOVER:
9cf519d1 308 return ops->thaw_noirq;
1eede070 309 case PM_EVENT_RESTORE:
9cf519d1 310 return ops->restore_noirq;
1f112cee 311#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 312 }
f2511774 313
9cf519d1 314 return NULL;
1eede070
RW
315}
316
317static char *pm_verb(int event)
318{
319 switch (event) {
320 case PM_EVENT_SUSPEND:
321 return "suspend";
322 case PM_EVENT_RESUME:
323 return "resume";
324 case PM_EVENT_FREEZE:
325 return "freeze";
326 case PM_EVENT_QUIESCE:
327 return "quiesce";
328 case PM_EVENT_HIBERNATE:
329 return "hibernate";
330 case PM_EVENT_THAW:
331 return "thaw";
332 case PM_EVENT_RESTORE:
333 return "restore";
334 case PM_EVENT_RECOVER:
335 return "recover";
336 default:
337 return "(unknown PM event)";
338 }
339}
340
341static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
342{
343 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
344 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
345 ", may wakeup" : "");
346}
347
348static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
349 int error)
350{
351 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 352 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
353}
354
ecf762b2
RW
355static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
356{
357 ktime_t calltime;
0702d9ee 358 u64 usecs64;
ecf762b2
RW
359 int usecs;
360
361 calltime = ktime_get();
362 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
363 do_div(usecs64, NSEC_PER_USEC);
364 usecs = usecs64;
365 if (usecs == 0)
366 usecs = 1;
367 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
368 info ?: "", info ? " " : "", pm_verb(state.event),
369 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
370}
371
9cf519d1
RW
372static int dpm_run_callback(pm_callback_t cb, struct device *dev,
373 pm_message_t state, char *info)
374{
375 ktime_t calltime;
376 int error;
377
378 if (!cb)
379 return 0;
380
381 calltime = initcall_debug_start(dev);
382
383 pm_dev_dbg(dev, state, info);
384 error = cb(dev);
385 suspend_report_result(cb, error);
386
387 initcall_debug_report(dev, calltime, error);
388
389 return error;
390}
391
cd59abfc
AS
392/*------------------------- Resume routines -------------------------*/
393
394/**
20d652d7
RW
395 * device_resume_noirq - Execute an "early resume" callback for given device.
396 * @dev: Device to handle.
397 * @state: PM transition of the system being carried out.
cd59abfc 398 *
20d652d7
RW
399 * The driver of @dev will not receive interrupts while this function is being
400 * executed.
cd59abfc 401 */
d1616302 402static int device_resume_noirq(struct device *dev, pm_message_t state)
cd59abfc 403{
9cf519d1
RW
404 pm_callback_t callback = NULL;
405 char *info = NULL;
cd59abfc
AS
406 int error = 0;
407
408 TRACE_DEVICE(dev);
409 TRACE_RESUME(0);
410
564b905a 411 if (dev->pm_domain) {
cf579dfb 412 info = "noirq power domain ";
9cf519d1 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 414 } else if (dev->type && dev->type->pm) {
cf579dfb 415 info = "noirq type ";
9cf519d1 416 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 417 } else if (dev->class && dev->class->pm) {
cf579dfb 418 info = "noirq class ";
9cf519d1 419 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 420 } else if (dev->bus && dev->bus->pm) {
cf579dfb 421 info = "noirq bus ";
9cf519d1 422 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
423 }
424
35cd133c 425 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 426 info = "noirq driver ";
35cd133c
RW
427 callback = pm_noirq_op(dev->driver->pm, state);
428 }
429
9cf519d1
RW
430 error = dpm_run_callback(callback, dev, state, info);
431
775b64d2
RW
432 TRACE_RESUME(error);
433 return error;
434}
435
436/**
cf579dfb 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 438 * @state: PM transition of the system being carried out.
775b64d2 439 *
cf579dfb 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 441 * enable device drivers to receive interrupts.
775b64d2 442 */
cf579dfb 443static void dpm_resume_noirq(pm_message_t state)
775b64d2 444{
ecf762b2 445 ktime_t starttime = ktime_get();
775b64d2 446
32bdfac5 447 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
448 while (!list_empty(&dpm_noirq_list)) {
449 struct device *dev = to_device(dpm_noirq_list.next);
5b219a51 450 int error;
d08a5ace
RW
451
452 get_device(dev);
cf579dfb 453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 454 mutex_unlock(&dpm_list_mtx);
d08a5ace 455
5b219a51 456 error = device_resume_noirq(dev, state);
2a77c46d
SL
457 if (error) {
458 suspend_stats.failed_resume_noirq++;
459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
460 dpm_save_failed_dev(dev_name(dev));
cf579dfb
RW
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
470}
471
472/**
473 * device_resume_early - Execute an "early resume" callback for given device.
474 * @dev: Device to handle.
475 * @state: PM transition of the system being carried out.
476 *
477 * Runtime PM is disabled for @dev while this function is being executed.
478 */
479static int device_resume_early(struct device *dev, pm_message_t state)
480{
481 pm_callback_t callback = NULL;
482 char *info = NULL;
483 int error = 0;
484
485 TRACE_DEVICE(dev);
486 TRACE_RESUME(0);
487
488 if (dev->pm_domain) {
489 info = "early power domain ";
490 callback = pm_late_early_op(&dev->pm_domain->ops, state);
491 } else if (dev->type && dev->type->pm) {
492 info = "early type ";
493 callback = pm_late_early_op(dev->type->pm, state);
494 } else if (dev->class && dev->class->pm) {
495 info = "early class ";
496 callback = pm_late_early_op(dev->class->pm, state);
497 } else if (dev->bus && dev->bus->pm) {
498 info = "early bus ";
499 callback = pm_late_early_op(dev->bus->pm, state);
500 }
501
502 if (!callback && dev->driver && dev->driver->pm) {
503 info = "early driver ";
504 callback = pm_late_early_op(dev->driver->pm, state);
505 }
506
507 error = dpm_run_callback(callback, dev, state, info);
508
509 TRACE_RESUME(error);
510 return error;
511}
512
513/**
514 * dpm_resume_early - Execute "early resume" callbacks for all devices.
515 * @state: PM transition of the system being carried out.
516 */
517static void dpm_resume_early(pm_message_t state)
518{
519 ktime_t starttime = ktime_get();
520
521 mutex_lock(&dpm_list_mtx);
522 while (!list_empty(&dpm_late_early_list)) {
523 struct device *dev = to_device(dpm_late_early_list.next);
524 int error;
525
526 get_device(dev);
527 list_move_tail(&dev->power.entry, &dpm_suspended_list);
528 mutex_unlock(&dpm_list_mtx);
529
530 error = device_resume_early(dev, state);
531 if (error) {
532 suspend_stats.failed_resume_early++;
533 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
534 dpm_save_failed_dev(dev_name(dev));
5b219a51 535 pm_dev_err(dev, state, " early", error);
2a77c46d 536 }
d08a5ace 537
5b219a51 538 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
539 put_device(dev);
540 }
32bdfac5 541 mutex_unlock(&dpm_list_mtx);
ecf762b2 542 dpm_show_time(starttime, state, "early");
775b64d2 543}
cf579dfb
RW
544
545/**
546 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
547 * @state: PM transition of the system being carried out.
548 */
549void dpm_resume_start(pm_message_t state)
550{
551 dpm_resume_noirq(state);
552 dpm_resume_early(state);
553}
554EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
555
556/**
97df8c12 557 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
558 * @dev: Device to handle.
559 * @state: PM transition of the system being carried out.
5af84b82 560 * @async: If true, the device is being resumed asynchronously.
775b64d2 561 */
97df8c12 562static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 563{
9cf519d1
RW
564 pm_callback_t callback = NULL;
565 char *info = NULL;
775b64d2 566 int error = 0;
1e2ef05b 567 bool put = false;
775b64d2
RW
568
569 TRACE_DEVICE(dev);
570 TRACE_RESUME(0);
cd59abfc 571
5af84b82 572 dpm_wait(dev->parent, async);
8e9394ce 573 device_lock(dev);
7a8d37a3 574
f76b168b
AS
575 /*
576 * This is a fib. But we'll allow new children to be added below
577 * a resumed device, even if the device hasn't been completed yet.
578 */
579 dev->power.is_prepared = false;
97df8c12 580
6d0e0e84
AS
581 if (!dev->power.is_suspended)
582 goto Unlock;
583
1e2ef05b
RW
584 pm_runtime_enable(dev);
585 put = true;
586
564b905a 587 if (dev->pm_domain) {
9cf519d1
RW
588 info = "power domain ";
589 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 590 goto Driver;
7538e3db
RW
591 }
592
9659cc06 593 if (dev->type && dev->type->pm) {
9cf519d1
RW
594 info = "type ";
595 callback = pm_op(dev->type->pm, state);
35cd133c 596 goto Driver;
cd59abfc
AS
597 }
598
1eede070
RW
599 if (dev->class) {
600 if (dev->class->pm) {
9cf519d1
RW
601 info = "class ";
602 callback = pm_op(dev->class->pm, state);
35cd133c 603 goto Driver;
1eede070 604 } else if (dev->class->resume) {
9cf519d1
RW
605 info = "legacy class ";
606 callback = dev->class->resume;
9659cc06 607 goto End;
1eede070 608 }
cd59abfc 609 }
9659cc06
RW
610
611 if (dev->bus) {
612 if (dev->bus->pm) {
35cd133c 613 info = "bus ";
9cf519d1 614 callback = pm_op(dev->bus->pm, state);
9659cc06 615 } else if (dev->bus->resume) {
35cd133c 616 info = "legacy bus ";
9cf519d1 617 callback = dev->bus->resume;
35cd133c 618 goto End;
9659cc06
RW
619 }
620 }
621
35cd133c
RW
622 Driver:
623 if (!callback && dev->driver && dev->driver->pm) {
624 info = "driver ";
625 callback = pm_op(dev->driver->pm, state);
626 }
627
1eede070 628 End:
9cf519d1 629 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
630 dev->power.is_suspended = false;
631
632 Unlock:
8e9394ce 633 device_unlock(dev);
5af84b82 634 complete_all(&dev->power.completion);
7a8d37a3 635
cd59abfc 636 TRACE_RESUME(error);
1e2ef05b
RW
637
638 if (put)
639 pm_runtime_put_sync(dev);
640
cd59abfc
AS
641 return error;
642}
643
5af84b82
RW
644static void async_resume(void *data, async_cookie_t cookie)
645{
646 struct device *dev = (struct device *)data;
647 int error;
648
97df8c12 649 error = device_resume(dev, pm_transition, true);
5af84b82
RW
650 if (error)
651 pm_dev_err(dev, pm_transition, " async", error);
652 put_device(dev);
653}
654
97df8c12 655static bool is_async(struct device *dev)
5af84b82 656{
97df8c12
RW
657 return dev->power.async_suspend && pm_async_enabled
658 && !pm_trace_is_enabled();
5af84b82
RW
659}
660
775b64d2 661/**
20d652d7
RW
662 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
663 * @state: PM transition of the system being carried out.
775b64d2 664 *
20d652d7
RW
665 * Execute the appropriate "resume" callback for all devices whose status
666 * indicates that they are suspended.
1eede070 667 */
91e7c75b 668void dpm_resume(pm_message_t state)
1eede070 669{
97df8c12 670 struct device *dev;
ecf762b2 671 ktime_t starttime = ktime_get();
1eede070 672
91e7c75b
RW
673 might_sleep();
674
1eede070 675 mutex_lock(&dpm_list_mtx);
5af84b82 676 pm_transition = state;
098dff73 677 async_error = 0;
1eede070 678
8a43a9ab 679 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
97df8c12
RW
680 INIT_COMPLETION(dev->power.completion);
681 if (is_async(dev)) {
682 get_device(dev);
683 async_schedule(async_resume, dev);
684 }
685 }
686
8a43a9ab
RW
687 while (!list_empty(&dpm_suspended_list)) {
688 dev = to_device(dpm_suspended_list.next);
1eede070 689 get_device(dev);
5b219a51 690 if (!is_async(dev)) {
1eede070
RW
691 int error;
692
1eede070
RW
693 mutex_unlock(&dpm_list_mtx);
694
97df8c12 695 error = device_resume(dev, state, false);
2a77c46d
SL
696 if (error) {
697 suspend_stats.failed_resume++;
698 dpm_save_failed_step(SUSPEND_RESUME);
699 dpm_save_failed_dev(dev_name(dev));
1eede070 700 pm_dev_err(dev, state, "", error);
2a77c46d 701 }
5b219a51
RW
702
703 mutex_lock(&dpm_list_mtx);
1eede070
RW
704 }
705 if (!list_empty(&dev->power.entry))
8a43a9ab 706 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
707 put_device(dev);
708 }
1eede070 709 mutex_unlock(&dpm_list_mtx);
5af84b82 710 async_synchronize_full();
ecf762b2 711 dpm_show_time(starttime, state, NULL);
1eede070
RW
712}
713
714/**
20d652d7
RW
715 * device_complete - Complete a PM transition for given device.
716 * @dev: Device to handle.
717 * @state: PM transition of the system being carried out.
1eede070 718 */
d1616302 719static void device_complete(struct device *dev, pm_message_t state)
1eede070 720{
35cd133c
RW
721 void (*callback)(struct device *) = NULL;
722 char *info = NULL;
723
8e9394ce 724 device_lock(dev);
1eede070 725
564b905a 726 if (dev->pm_domain) {
35cd133c
RW
727 info = "completing power domain ";
728 callback = dev->pm_domain->ops.complete;
4d27e9dc 729 } else if (dev->type && dev->type->pm) {
35cd133c
RW
730 info = "completing type ";
731 callback = dev->type->pm->complete;
9659cc06 732 } else if (dev->class && dev->class->pm) {
35cd133c
RW
733 info = "completing class ";
734 callback = dev->class->pm->complete;
9659cc06 735 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
736 info = "completing bus ";
737 callback = dev->bus->pm->complete;
738 }
739
740 if (!callback && dev->driver && dev->driver->pm) {
741 info = "completing driver ";
742 callback = dev->driver->pm->complete;
743 }
744
745 if (callback) {
746 pm_dev_dbg(dev, state, info);
747 callback(dev);
1eede070
RW
748 }
749
8e9394ce 750 device_unlock(dev);
1eede070
RW
751}
752
753/**
20d652d7
RW
754 * dpm_complete - Complete a PM transition for all non-sysdev devices.
755 * @state: PM transition of the system being carried out.
775b64d2 756 *
20d652d7
RW
757 * Execute the ->complete() callbacks for all devices whose PM status is not
758 * DPM_ON (this allows new devices to be registered).
cd59abfc 759 */
91e7c75b 760void dpm_complete(pm_message_t state)
cd59abfc 761{
1eede070
RW
762 struct list_head list;
763
91e7c75b
RW
764 might_sleep();
765
1eede070 766 INIT_LIST_HEAD(&list);
cd59abfc 767 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
768 while (!list_empty(&dpm_prepared_list)) {
769 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 770
1eede070 771 get_device(dev);
f76b168b 772 dev->power.is_prepared = false;
5b219a51
RW
773 list_move(&dev->power.entry, &list);
774 mutex_unlock(&dpm_list_mtx);
1eede070 775
5b219a51 776 device_complete(dev, state);
1eede070 777
5b219a51 778 mutex_lock(&dpm_list_mtx);
1eede070 779 put_device(dev);
cd59abfc 780 }
1eede070 781 list_splice(&list, &dpm_list);
cd59abfc
AS
782 mutex_unlock(&dpm_list_mtx);
783}
784
cd59abfc 785/**
20d652d7
RW
786 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
787 * @state: PM transition of the system being carried out.
cd59abfc 788 *
20d652d7
RW
789 * Execute "resume" callbacks for all devices and complete the PM transition of
790 * the system.
cd59abfc 791 */
d1616302 792void dpm_resume_end(pm_message_t state)
cd59abfc 793{
1eede070
RW
794 dpm_resume(state);
795 dpm_complete(state);
cd59abfc 796}
d1616302 797EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
798
799
800/*------------------------- Suspend routines -------------------------*/
801
1eede070 802/**
20d652d7
RW
803 * resume_event - Return a "resume" message for given "suspend" sleep state.
804 * @sleep_state: PM message representing a sleep state.
805 *
806 * Return a PM message representing the resume event corresponding to given
807 * sleep state.
1eede070
RW
808 */
809static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 810{
1eede070
RW
811 switch (sleep_state.event) {
812 case PM_EVENT_SUSPEND:
813 return PMSG_RESUME;
814 case PM_EVENT_FREEZE:
815 case PM_EVENT_QUIESCE:
816 return PMSG_RECOVER;
817 case PM_EVENT_HIBERNATE:
818 return PMSG_RESTORE;
cd59abfc 819 }
1eede070 820 return PMSG_ON;
cd59abfc
AS
821}
822
823/**
20d652d7
RW
824 * device_suspend_noirq - Execute a "late suspend" callback for given device.
825 * @dev: Device to handle.
826 * @state: PM transition of the system being carried out.
775b64d2 827 *
20d652d7
RW
828 * The driver of @dev will not receive interrupts while this function is being
829 * executed.
cd59abfc 830 */
d1616302 831static int device_suspend_noirq(struct device *dev, pm_message_t state)
775b64d2 832{
9cf519d1
RW
833 pm_callback_t callback = NULL;
834 char *info = NULL;
e7176a37 835
564b905a 836 if (dev->pm_domain) {
cf579dfb 837 info = "noirq power domain ";
9cf519d1 838 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 839 } else if (dev->type && dev->type->pm) {
cf579dfb 840 info = "noirq type ";
9cf519d1 841 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 842 } else if (dev->class && dev->class->pm) {
cf579dfb 843 info = "noirq class ";
9cf519d1 844 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 845 } else if (dev->bus && dev->bus->pm) {
cf579dfb 846 info = "noirq bus ";
9cf519d1 847 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
848 }
849
35cd133c 850 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 851 info = "noirq driver ";
35cd133c
RW
852 callback = pm_noirq_op(dev->driver->pm, state);
853 }
854
9cf519d1 855 return dpm_run_callback(callback, dev, state, info);
775b64d2
RW
856}
857
858/**
cf579dfb 859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 860 * @state: PM transition of the system being carried out.
775b64d2 861 *
20d652d7
RW
862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
863 * handlers for all non-sysdev devices.
775b64d2 864 */
cf579dfb 865static int dpm_suspend_noirq(pm_message_t state)
775b64d2 866{
ecf762b2 867 ktime_t starttime = ktime_get();
775b64d2
RW
868 int error = 0;
869
2ed8d2b3 870 suspend_device_irqs();
32bdfac5 871 mutex_lock(&dpm_list_mtx);
cf579dfb
RW
872 while (!list_empty(&dpm_late_early_list)) {
873 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
874
875 get_device(dev);
876 mutex_unlock(&dpm_list_mtx);
877
d1616302 878 error = device_suspend_noirq(dev, state);
d08a5ace
RW
879
880 mutex_lock(&dpm_list_mtx);
775b64d2 881 if (error) {
cf579dfb 882 pm_dev_err(dev, state, " noirq", error);
2a77c46d
SL
883 suspend_stats.failed_suspend_noirq++;
884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
885 dpm_save_failed_dev(dev_name(dev));
d08a5ace 886 put_device(dev);
775b64d2
RW
887 break;
888 }
d08a5ace 889 if (!list_empty(&dev->power.entry))
8a43a9ab 890 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 891 put_device(dev);
52d136cc
RW
892
893 if (pm_wakeup_pending()) {
894 error = -EBUSY;
895 break;
896 }
775b64d2 897 }
32bdfac5 898 mutex_unlock(&dpm_list_mtx);
775b64d2 899 if (error)
d1616302 900 dpm_resume_noirq(resume_event(state));
cf579dfb
RW
901 else
902 dpm_show_time(starttime, state, "noirq");
903 return error;
904}
905
906/**
907 * device_suspend_late - Execute a "late suspend" callback for given device.
908 * @dev: Device to handle.
909 * @state: PM transition of the system being carried out.
910 *
911 * Runtime PM is disabled for @dev while this function is being executed.
912 */
913static int device_suspend_late(struct device *dev, pm_message_t state)
914{
915 pm_callback_t callback = NULL;
916 char *info = NULL;
917
918 if (dev->pm_domain) {
919 info = "late power domain ";
920 callback = pm_late_early_op(&dev->pm_domain->ops, state);
921 } else if (dev->type && dev->type->pm) {
922 info = "late type ";
923 callback = pm_late_early_op(dev->type->pm, state);
924 } else if (dev->class && dev->class->pm) {
925 info = "late class ";
926 callback = pm_late_early_op(dev->class->pm, state);
927 } else if (dev->bus && dev->bus->pm) {
928 info = "late bus ";
929 callback = pm_late_early_op(dev->bus->pm, state);
930 }
931
932 if (!callback && dev->driver && dev->driver->pm) {
933 info = "late driver ";
934 callback = pm_late_early_op(dev->driver->pm, state);
935 }
936
937 return dpm_run_callback(callback, dev, state, info);
938}
939
940/**
941 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
942 * @state: PM transition of the system being carried out.
943 */
944static int dpm_suspend_late(pm_message_t state)
945{
946 ktime_t starttime = ktime_get();
947 int error = 0;
948
949 mutex_lock(&dpm_list_mtx);
950 while (!list_empty(&dpm_suspended_list)) {
951 struct device *dev = to_device(dpm_suspended_list.prev);
952
953 get_device(dev);
954 mutex_unlock(&dpm_list_mtx);
955
956 error = device_suspend_late(dev, state);
957
958 mutex_lock(&dpm_list_mtx);
959 if (error) {
960 pm_dev_err(dev, state, " late", error);
961 suspend_stats.failed_suspend_late++;
962 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
963 dpm_save_failed_dev(dev_name(dev));
964 put_device(dev);
965 break;
966 }
967 if (!list_empty(&dev->power.entry))
968 list_move(&dev->power.entry, &dpm_late_early_list);
969 put_device(dev);
52d136cc
RW
970
971 if (pm_wakeup_pending()) {
972 error = -EBUSY;
973 break;
974 }
cf579dfb
RW
975 }
976 mutex_unlock(&dpm_list_mtx);
977 if (error)
978 dpm_resume_early(resume_event(state));
ecf762b2
RW
979 else
980 dpm_show_time(starttime, state, "late");
cf579dfb 981
775b64d2
RW
982 return error;
983}
cf579dfb
RW
984
985/**
986 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
987 * @state: PM transition of the system being carried out.
988 */
989int dpm_suspend_end(pm_message_t state)
990{
991 int error = dpm_suspend_late(state);
992
993 return error ? : dpm_suspend_noirq(state);
994}
995EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 996
875ab0b7
RW
997/**
998 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
999 * @dev: Device to suspend.
1000 * @state: PM transition of the system being carried out.
1001 * @cb: Suspend callback to execute.
875ab0b7
RW
1002 */
1003static int legacy_suspend(struct device *dev, pm_message_t state,
1004 int (*cb)(struct device *dev, pm_message_t state))
1005{
1006 int error;
1007 ktime_t calltime;
1008
1009 calltime = initcall_debug_start(dev);
1010
1011 error = cb(dev, state);
1012 suspend_report_result(cb, error);
1013
1014 initcall_debug_report(dev, calltime, error);
1015
1016 return error;
1017}
1018
775b64d2 1019/**
20d652d7
RW
1020 * device_suspend - Execute "suspend" callbacks for given device.
1021 * @dev: Device to handle.
1022 * @state: PM transition of the system being carried out.
5af84b82 1023 * @async: If true, the device is being suspended asynchronously.
775b64d2 1024 */
5af84b82 1025static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1026{
9cf519d1
RW
1027 pm_callback_t callback = NULL;
1028 char *info = NULL;
cd59abfc
AS
1029 int error = 0;
1030
5af84b82 1031 dpm_wait_for_children(dev, async);
7a8d37a3 1032
5af84b82 1033 if (async_error)
1f758b23 1034 goto Complete;
1e2ef05b
RW
1035
1036 pm_runtime_get_noresume(dev);
1037 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1038 pm_wakeup_event(dev, 0);
5af84b82 1039
d83f905e 1040 if (pm_wakeup_pending()) {
1e2ef05b 1041 pm_runtime_put_sync(dev);
d83f905e 1042 async_error = -EBUSY;
1f758b23 1043 goto Complete;
d83f905e
RW
1044 }
1045
1e2ef05b
RW
1046 device_lock(dev);
1047
564b905a 1048 if (dev->pm_domain) {
9cf519d1
RW
1049 info = "power domain ";
1050 callback = pm_op(&dev->pm_domain->ops, state);
1051 goto Run;
4d27e9dc
RW
1052 }
1053
9659cc06 1054 if (dev->type && dev->type->pm) {
9cf519d1
RW
1055 info = "type ";
1056 callback = pm_op(dev->type->pm, state);
1057 goto Run;
9659cc06
RW
1058 }
1059
1eede070
RW
1060 if (dev->class) {
1061 if (dev->class->pm) {
9cf519d1
RW
1062 info = "class ";
1063 callback = pm_op(dev->class->pm, state);
1064 goto Run;
1eede070
RW
1065 } else if (dev->class->suspend) {
1066 pm_dev_dbg(dev, state, "legacy class ");
875ab0b7 1067 error = legacy_suspend(dev, state, dev->class->suspend);
4d27e9dc 1068 goto End;
1eede070 1069 }
cd59abfc
AS
1070 }
1071
1eede070
RW
1072 if (dev->bus) {
1073 if (dev->bus->pm) {
35cd133c 1074 info = "bus ";
9cf519d1 1075 callback = pm_op(dev->bus->pm, state);
1eede070 1076 } else if (dev->bus->suspend) {
35cd133c 1077 pm_dev_dbg(dev, state, "legacy bus ");
875ab0b7 1078 error = legacy_suspend(dev, state, dev->bus->suspend);
9cf519d1 1079 goto End;
1eede070 1080 }
7538e3db
RW
1081 }
1082
9cf519d1 1083 Run:
35cd133c
RW
1084 if (!callback && dev->driver && dev->driver->pm) {
1085 info = "driver ";
1086 callback = pm_op(dev->driver->pm, state);
1087 }
1088
9cf519d1
RW
1089 error = dpm_run_callback(callback, dev, state, info);
1090
1eede070 1091 End:
4ca46ff3
RW
1092 if (!error) {
1093 dev->power.is_suspended = true;
8b258cc8
RW
1094 if (dev->power.wakeup_path
1095 && dev->parent && !dev->parent->power.ignore_children)
4ca46ff3
RW
1096 dev->parent->power.wakeup_path = true;
1097 }
6d0e0e84 1098
8e9394ce 1099 device_unlock(dev);
1f758b23
MSB
1100
1101 Complete:
5af84b82 1102 complete_all(&dev->power.completion);
7a8d37a3 1103
1e2ef05b
RW
1104 if (error) {
1105 pm_runtime_put_sync(dev);
098dff73 1106 async_error = error;
1e2ef05b
RW
1107 } else if (dev->power.is_suspended) {
1108 __pm_runtime_disable(dev, false);
1109 }
098dff73 1110
cd59abfc
AS
1111 return error;
1112}
1113
5af84b82
RW
1114static void async_suspend(void *data, async_cookie_t cookie)
1115{
1116 struct device *dev = (struct device *)data;
1117 int error;
1118
1119 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1120 if (error) {
1121 dpm_save_failed_dev(dev_name(dev));
5af84b82 1122 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1123 }
5af84b82
RW
1124
1125 put_device(dev);
1126}
1127
1128static int device_suspend(struct device *dev)
1129{
1130 INIT_COMPLETION(dev->power.completion);
1131
0e06b4a8 1132 if (pm_async_enabled && dev->power.async_suspend) {
5af84b82
RW
1133 get_device(dev);
1134 async_schedule(async_suspend, dev);
1135 return 0;
1136 }
1137
1138 return __device_suspend(dev, pm_transition, false);
1139}
1140
cd59abfc 1141/**
20d652d7
RW
1142 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1143 * @state: PM transition of the system being carried out.
cd59abfc 1144 */
91e7c75b 1145int dpm_suspend(pm_message_t state)
cd59abfc 1146{
ecf762b2 1147 ktime_t starttime = ktime_get();
cd59abfc
AS
1148 int error = 0;
1149
91e7c75b
RW
1150 might_sleep();
1151
cd59abfc 1152 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1153 pm_transition = state;
1154 async_error = 0;
8a43a9ab
RW
1155 while (!list_empty(&dpm_prepared_list)) {
1156 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1157
1eede070 1158 get_device(dev);
cd59abfc 1159 mutex_unlock(&dpm_list_mtx);
1eede070 1160
5af84b82 1161 error = device_suspend(dev);
1eede070 1162
1b3cbec1 1163 mutex_lock(&dpm_list_mtx);
775b64d2 1164 if (error) {
1eede070 1165 pm_dev_err(dev, state, "", error);
2a77c46d 1166 dpm_save_failed_dev(dev_name(dev));
1eede070 1167 put_device(dev);
775b64d2
RW
1168 break;
1169 }
7a8d37a3 1170 if (!list_empty(&dev->power.entry))
8a43a9ab 1171 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1172 put_device(dev);
5af84b82
RW
1173 if (async_error)
1174 break;
cd59abfc
AS
1175 }
1176 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1177 async_synchronize_full();
1178 if (!error)
1179 error = async_error;
2a77c46d
SL
1180 if (error) {
1181 suspend_stats.failed_suspend++;
1182 dpm_save_failed_step(SUSPEND_SUSPEND);
1183 } else
ecf762b2 1184 dpm_show_time(starttime, state, NULL);
1eede070
RW
1185 return error;
1186}
1187
1188/**
20d652d7
RW
1189 * device_prepare - Prepare a device for system power transition.
1190 * @dev: Device to handle.
1191 * @state: PM transition of the system being carried out.
1192 *
1193 * Execute the ->prepare() callback(s) for given device. No new children of the
1194 * device may be registered after this function has returned.
1eede070 1195 */
d1616302 1196static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1197{
35cd133c
RW
1198 int (*callback)(struct device *) = NULL;
1199 char *info = NULL;
1eede070
RW
1200 int error = 0;
1201
8e9394ce 1202 device_lock(dev);
1eede070 1203
4ca46ff3
RW
1204 dev->power.wakeup_path = device_may_wakeup(dev);
1205
564b905a 1206 if (dev->pm_domain) {
35cd133c
RW
1207 info = "preparing power domain ";
1208 callback = dev->pm_domain->ops.prepare;
4d27e9dc 1209 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1210 info = "preparing type ";
1211 callback = dev->type->pm->prepare;
9659cc06 1212 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1213 info = "preparing class ";
1214 callback = dev->class->pm->prepare;
9659cc06 1215 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1216 info = "preparing bus ";
1217 callback = dev->bus->pm->prepare;
1218 }
1219
1220 if (!callback && dev->driver && dev->driver->pm) {
1221 info = "preparing driver ";
1222 callback = dev->driver->pm->prepare;
1223 }
1224
1225 if (callback) {
1226 error = callback(dev);
1227 suspend_report_result(callback, error);
1eede070 1228 }
7538e3db 1229
8e9394ce 1230 device_unlock(dev);
1eede070
RW
1231
1232 return error;
1233}
cd59abfc 1234
1eede070 1235/**
20d652d7
RW
1236 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1237 * @state: PM transition of the system being carried out.
1eede070 1238 *
20d652d7 1239 * Execute the ->prepare() callback(s) for all devices.
1eede070 1240 */
91e7c75b 1241int dpm_prepare(pm_message_t state)
1eede070 1242{
1eede070
RW
1243 int error = 0;
1244
91e7c75b
RW
1245 might_sleep();
1246
1eede070 1247 mutex_lock(&dpm_list_mtx);
1eede070
RW
1248 while (!list_empty(&dpm_list)) {
1249 struct device *dev = to_device(dpm_list.next);
1250
1251 get_device(dev);
1eede070
RW
1252 mutex_unlock(&dpm_list_mtx);
1253
1e2ef05b 1254 error = device_prepare(dev, state);
1eede070
RW
1255
1256 mutex_lock(&dpm_list_mtx);
1257 if (error) {
1eede070
RW
1258 if (error == -EAGAIN) {
1259 put_device(dev);
886a7a33 1260 error = 0;
1eede070
RW
1261 continue;
1262 }
1e75227e
RW
1263 printk(KERN_INFO "PM: Device %s not prepared "
1264 "for power transition: code %d\n",
5c1a07ab 1265 dev_name(dev), error);
1eede070
RW
1266 put_device(dev);
1267 break;
1268 }
f76b168b 1269 dev->power.is_prepared = true;
1eede070 1270 if (!list_empty(&dev->power.entry))
8a43a9ab 1271 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1272 put_device(dev);
1273 }
1eede070 1274 mutex_unlock(&dpm_list_mtx);
cd59abfc
AS
1275 return error;
1276}
1277
775b64d2 1278/**
20d652d7
RW
1279 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1280 * @state: PM transition of the system being carried out.
775b64d2 1281 *
20d652d7
RW
1282 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1283 * callbacks for them.
775b64d2 1284 */
d1616302 1285int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1286{
1287 int error;
cd59abfc 1288
1eede070 1289 error = dpm_prepare(state);
2a77c46d
SL
1290 if (error) {
1291 suspend_stats.failed_prepare++;
1292 dpm_save_failed_step(SUSPEND_PREPARE);
1293 } else
1eede070 1294 error = dpm_suspend(state);
cd59abfc 1295 return error;
cd59abfc 1296}
d1616302 1297EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1298
1299void __suspend_report_result(const char *function, void *fn, int ret)
1300{
c80cfb04
BH
1301 if (ret)
1302 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1303}
1304EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1305
1306/**
1307 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1308 * @dev: Device to wait for.
1309 * @subordinate: Device that needs to wait for @dev.
1310 */
098dff73 1311int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1312{
1313 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1314 return async_error;
f8824cee
RW
1315}
1316EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);