]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/base/power/main.c
PM / Sleep: use resume event when call dpm_resume_early
[mirror_ubuntu-bionic-kernel.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
cd59abfc 26#include <linux/resume-trace.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
5af84b82 29#include <linux/async.h>
1e75227e 30#include <linux/suspend.h>
8651f97b 31#include <linux/cpuidle.h>
cd59abfc 32#include "../base.h"
1da177e4
LT
33#include "power.h"
34
9cf519d1
RW
35typedef int (*pm_callback_t)(struct device *);
36
775b64d2 37/*
1eede070 38 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
39 * because children are guaranteed to be discovered after parents, and
40 * are inserted at the back of the list on discovery.
41 *
8e9394ce
GKH
42 * Since device_pm_add() may be called with a device lock held,
43 * we must never try to acquire a device lock while holding
775b64d2
RW
44 * dpm_list_mutex.
45 */
46
1eede070 47LIST_HEAD(dpm_list);
7664e969
SK
48static LIST_HEAD(dpm_prepared_list);
49static LIST_HEAD(dpm_suspended_list);
50static LIST_HEAD(dpm_late_early_list);
51static LIST_HEAD(dpm_noirq_list);
1da177e4 52
2a77c46d 53struct suspend_stats suspend_stats;
cd59abfc 54static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 55static pm_message_t pm_transition;
1da177e4 56
098dff73
RW
57static int async_error;
58
5e928f77 59/**
20d652d7 60 * device_pm_init - Initialize the PM-related part of a device object.
5e928f77
RW
61 * @dev: Device object being initialized.
62 */
63void device_pm_init(struct device *dev)
64{
f76b168b 65 dev->power.is_prepared = false;
6d0e0e84 66 dev->power.is_suspended = false;
5af84b82 67 init_completion(&dev->power.completion);
152e1d59 68 complete_all(&dev->power.completion);
074037ec
RW
69 dev->power.wakeup = NULL;
70 spin_lock_init(&dev->power.lock);
5e928f77 71 pm_runtime_init(dev);
22110faf 72 INIT_LIST_HEAD(&dev->power.entry);
1a9a9152 73 dev->power.power_state = PMSG_INVALID;
5e928f77
RW
74}
75
1eede070 76/**
20d652d7 77 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
78 */
79void device_pm_lock(void)
80{
81 mutex_lock(&dpm_list_mtx);
82}
83
84/**
20d652d7 85 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
86 */
87void device_pm_unlock(void)
88{
89 mutex_unlock(&dpm_list_mtx);
90}
075c1771 91
775b64d2 92/**
20d652d7
RW
93 * device_pm_add - Add a device to the PM core's list of active devices.
94 * @dev: Device to add to the list.
775b64d2 95 */
3b98aeaf 96void device_pm_add(struct device *dev)
1da177e4 97{
1da177e4 98 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 99 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
11048dcf 100 mutex_lock(&dpm_list_mtx);
f76b168b 101 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
102 dev_warn(dev, "parent %s should not be sleeping\n",
103 dev_name(dev->parent));
3b98aeaf 104 list_add_tail(&dev->power.entry, &dpm_list);
91ff4cb8 105 dev_pm_qos_constraints_init(dev);
1a9a9152 106 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
107}
108
775b64d2 109/**
20d652d7
RW
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
775b64d2 112 */
9cddad77 113void device_pm_remove(struct device *dev)
1da177e4
LT
114{
115 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 117 complete_all(&dev->power.completion);
11048dcf 118 mutex_lock(&dpm_list_mtx);
1a9a9152 119 dev_pm_qos_constraints_destroy(dev);
1da177e4 120 list_del_init(&dev->power.entry);
11048dcf 121 mutex_unlock(&dpm_list_mtx);
074037ec 122 device_wakeup_disable(dev);
5e928f77 123 pm_runtime_remove(dev);
775b64d2
RW
124}
125
ffa6a705 126/**
20d652d7
RW
127 * device_pm_move_before - Move device in the PM core's list of active devices.
128 * @deva: Device to move in dpm_list.
129 * @devb: Device @deva should come before.
ffa6a705
CH
130 */
131void device_pm_move_before(struct device *deva, struct device *devb)
132{
133 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
134 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
135 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
136 /* Delete deva from dpm_list and reinsert before devb. */
137 list_move_tail(&deva->power.entry, &devb->power.entry);
138}
139
140/**
20d652d7
RW
141 * device_pm_move_after - Move device in the PM core's list of active devices.
142 * @deva: Device to move in dpm_list.
143 * @devb: Device @deva should come after.
ffa6a705
CH
144 */
145void device_pm_move_after(struct device *deva, struct device *devb)
146{
147 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
148 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
149 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
150 /* Delete deva from dpm_list and reinsert after devb. */
151 list_move(&deva->power.entry, &devb->power.entry);
152}
153
154/**
20d652d7
RW
155 * device_pm_move_last - Move device to end of the PM core's list of devices.
156 * @dev: Device to move in dpm_list.
ffa6a705
CH
157 */
158void device_pm_move_last(struct device *dev)
159{
160 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 161 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
162 list_move_tail(&dev->power.entry, &dpm_list);
163}
164
875ab0b7
RW
165static ktime_t initcall_debug_start(struct device *dev)
166{
167 ktime_t calltime = ktime_set(0, 0);
168
b2df1d4f 169 if (pm_print_times_enabled) {
0c6aebe3
RW
170 pr_info("calling %s+ @ %i, parent: %s\n",
171 dev_name(dev), task_pid_nr(current),
172 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
173 calltime = ktime_get();
174 }
175
176 return calltime;
177}
178
179static void initcall_debug_report(struct device *dev, ktime_t calltime,
180 int error)
181{
182 ktime_t delta, rettime;
183
b2df1d4f 184 if (pm_print_times_enabled) {
875ab0b7
RW
185 rettime = ktime_get();
186 delta = ktime_sub(rettime, calltime);
187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
188 error, (unsigned long long)ktime_to_ns(delta) >> 10);
189 }
190}
191
5af84b82
RW
192/**
193 * dpm_wait - Wait for a PM operation to complete.
194 * @dev: Device to wait for.
195 * @async: If unset, wait only if the device's power.async_suspend flag is set.
196 */
197static void dpm_wait(struct device *dev, bool async)
198{
199 if (!dev)
200 return;
201
0e06b4a8 202 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
203 wait_for_completion(&dev->power.completion);
204}
205
206static int dpm_wait_fn(struct device *dev, void *async_ptr)
207{
208 dpm_wait(dev, *((bool *)async_ptr));
209 return 0;
210}
211
212static void dpm_wait_for_children(struct device *dev, bool async)
213{
214 device_for_each_child(dev, &async, dpm_wait_fn);
215}
216
1eede070 217/**
9cf519d1 218 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
219 * @ops: PM operations to choose from.
220 * @state: PM transition of the system being carried out.
1eede070 221 */
9cf519d1 222static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 223{
1eede070
RW
224 switch (state.event) {
225#ifdef CONFIG_SUSPEND
226 case PM_EVENT_SUSPEND:
9cf519d1 227 return ops->suspend;
1eede070 228 case PM_EVENT_RESUME:
9cf519d1 229 return ops->resume;
1eede070 230#endif /* CONFIG_SUSPEND */
1f112cee 231#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
232 case PM_EVENT_FREEZE:
233 case PM_EVENT_QUIESCE:
9cf519d1 234 return ops->freeze;
1eede070 235 case PM_EVENT_HIBERNATE:
9cf519d1 236 return ops->poweroff;
1eede070
RW
237 case PM_EVENT_THAW:
238 case PM_EVENT_RECOVER:
9cf519d1 239 return ops->thaw;
1eede070
RW
240 break;
241 case PM_EVENT_RESTORE:
9cf519d1 242 return ops->restore;
1f112cee 243#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 244 }
f2511774 245
9cf519d1 246 return NULL;
1eede070
RW
247}
248
cf579dfb
RW
249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
1eede070 283/**
9cf519d1 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
285 * @ops: PM operations to choose from.
286 * @state: PM transition of the system being carried out.
1eede070 287 *
20d652d7
RW
288 * The driver of @dev will not receive interrupts while this function is being
289 * executed.
1eede070 290 */
9cf519d1 291static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 292{
1eede070
RW
293 switch (state.event) {
294#ifdef CONFIG_SUSPEND
295 case PM_EVENT_SUSPEND:
9cf519d1 296 return ops->suspend_noirq;
1eede070 297 case PM_EVENT_RESUME:
9cf519d1 298 return ops->resume_noirq;
1eede070 299#endif /* CONFIG_SUSPEND */
1f112cee 300#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
301 case PM_EVENT_FREEZE:
302 case PM_EVENT_QUIESCE:
9cf519d1 303 return ops->freeze_noirq;
1eede070 304 case PM_EVENT_HIBERNATE:
9cf519d1 305 return ops->poweroff_noirq;
1eede070
RW
306 case PM_EVENT_THAW:
307 case PM_EVENT_RECOVER:
9cf519d1 308 return ops->thaw_noirq;
1eede070 309 case PM_EVENT_RESTORE:
9cf519d1 310 return ops->restore_noirq;
1f112cee 311#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 312 }
f2511774 313
9cf519d1 314 return NULL;
1eede070
RW
315}
316
317static char *pm_verb(int event)
318{
319 switch (event) {
320 case PM_EVENT_SUSPEND:
321 return "suspend";
322 case PM_EVENT_RESUME:
323 return "resume";
324 case PM_EVENT_FREEZE:
325 return "freeze";
326 case PM_EVENT_QUIESCE:
327 return "quiesce";
328 case PM_EVENT_HIBERNATE:
329 return "hibernate";
330 case PM_EVENT_THAW:
331 return "thaw";
332 case PM_EVENT_RESTORE:
333 return "restore";
334 case PM_EVENT_RECOVER:
335 return "recover";
336 default:
337 return "(unknown PM event)";
338 }
339}
340
341static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
342{
343 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
344 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
345 ", may wakeup" : "");
346}
347
348static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
349 int error)
350{
351 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 352 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
353}
354
ecf762b2
RW
355static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
356{
357 ktime_t calltime;
0702d9ee 358 u64 usecs64;
ecf762b2
RW
359 int usecs;
360
361 calltime = ktime_get();
362 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
363 do_div(usecs64, NSEC_PER_USEC);
364 usecs = usecs64;
365 if (usecs == 0)
366 usecs = 1;
367 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
368 info ?: "", info ? " " : "", pm_verb(state.event),
369 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
370}
371
9cf519d1
RW
372static int dpm_run_callback(pm_callback_t cb, struct device *dev,
373 pm_message_t state, char *info)
374{
375 ktime_t calltime;
376 int error;
377
378 if (!cb)
379 return 0;
380
381 calltime = initcall_debug_start(dev);
382
383 pm_dev_dbg(dev, state, info);
384 error = cb(dev);
385 suspend_report_result(cb, error);
386
387 initcall_debug_report(dev, calltime, error);
388
389 return error;
390}
391
cd59abfc
AS
392/*------------------------- Resume routines -------------------------*/
393
394/**
20d652d7
RW
395 * device_resume_noirq - Execute an "early resume" callback for given device.
396 * @dev: Device to handle.
397 * @state: PM transition of the system being carried out.
cd59abfc 398 *
20d652d7
RW
399 * The driver of @dev will not receive interrupts while this function is being
400 * executed.
cd59abfc 401 */
d1616302 402static int device_resume_noirq(struct device *dev, pm_message_t state)
cd59abfc 403{
9cf519d1
RW
404 pm_callback_t callback = NULL;
405 char *info = NULL;
cd59abfc
AS
406 int error = 0;
407
408 TRACE_DEVICE(dev);
409 TRACE_RESUME(0);
410
564b905a 411 if (dev->pm_domain) {
cf579dfb 412 info = "noirq power domain ";
9cf519d1 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 414 } else if (dev->type && dev->type->pm) {
cf579dfb 415 info = "noirq type ";
9cf519d1 416 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 417 } else if (dev->class && dev->class->pm) {
cf579dfb 418 info = "noirq class ";
9cf519d1 419 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 420 } else if (dev->bus && dev->bus->pm) {
cf579dfb 421 info = "noirq bus ";
9cf519d1 422 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
423 }
424
35cd133c 425 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 426 info = "noirq driver ";
35cd133c
RW
427 callback = pm_noirq_op(dev->driver->pm, state);
428 }
429
9cf519d1
RW
430 error = dpm_run_callback(callback, dev, state, info);
431
775b64d2
RW
432 TRACE_RESUME(error);
433 return error;
434}
435
436/**
cf579dfb 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 438 * @state: PM transition of the system being carried out.
775b64d2 439 *
cf579dfb 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 441 * enable device drivers to receive interrupts.
775b64d2 442 */
cf579dfb 443static void dpm_resume_noirq(pm_message_t state)
775b64d2 444{
ecf762b2 445 ktime_t starttime = ktime_get();
775b64d2 446
32bdfac5 447 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
448 while (!list_empty(&dpm_noirq_list)) {
449 struct device *dev = to_device(dpm_noirq_list.next);
5b219a51 450 int error;
d08a5ace
RW
451
452 get_device(dev);
cf579dfb 453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 454 mutex_unlock(&dpm_list_mtx);
d08a5ace 455
5b219a51 456 error = device_resume_noirq(dev, state);
2a77c46d
SL
457 if (error) {
458 suspend_stats.failed_resume_noirq++;
459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
460 dpm_save_failed_dev(dev_name(dev));
cf579dfb
RW
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
8651f97b 470 cpuidle_resume();
cf579dfb
RW
471}
472
473/**
474 * device_resume_early - Execute an "early resume" callback for given device.
475 * @dev: Device to handle.
476 * @state: PM transition of the system being carried out.
477 *
478 * Runtime PM is disabled for @dev while this function is being executed.
479 */
480static int device_resume_early(struct device *dev, pm_message_t state)
481{
482 pm_callback_t callback = NULL;
483 char *info = NULL;
484 int error = 0;
485
486 TRACE_DEVICE(dev);
487 TRACE_RESUME(0);
488
489 if (dev->pm_domain) {
490 info = "early power domain ";
491 callback = pm_late_early_op(&dev->pm_domain->ops, state);
492 } else if (dev->type && dev->type->pm) {
493 info = "early type ";
494 callback = pm_late_early_op(dev->type->pm, state);
495 } else if (dev->class && dev->class->pm) {
496 info = "early class ";
497 callback = pm_late_early_op(dev->class->pm, state);
498 } else if (dev->bus && dev->bus->pm) {
499 info = "early bus ";
500 callback = pm_late_early_op(dev->bus->pm, state);
501 }
502
503 if (!callback && dev->driver && dev->driver->pm) {
504 info = "early driver ";
505 callback = pm_late_early_op(dev->driver->pm, state);
506 }
507
508 error = dpm_run_callback(callback, dev, state, info);
509
510 TRACE_RESUME(error);
511 return error;
512}
513
514/**
515 * dpm_resume_early - Execute "early resume" callbacks for all devices.
516 * @state: PM transition of the system being carried out.
517 */
518static void dpm_resume_early(pm_message_t state)
519{
520 ktime_t starttime = ktime_get();
521
522 mutex_lock(&dpm_list_mtx);
523 while (!list_empty(&dpm_late_early_list)) {
524 struct device *dev = to_device(dpm_late_early_list.next);
525 int error;
526
527 get_device(dev);
528 list_move_tail(&dev->power.entry, &dpm_suspended_list);
529 mutex_unlock(&dpm_list_mtx);
530
531 error = device_resume_early(dev, state);
532 if (error) {
533 suspend_stats.failed_resume_early++;
534 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
535 dpm_save_failed_dev(dev_name(dev));
5b219a51 536 pm_dev_err(dev, state, " early", error);
2a77c46d 537 }
d08a5ace 538
5b219a51 539 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
540 put_device(dev);
541 }
32bdfac5 542 mutex_unlock(&dpm_list_mtx);
ecf762b2 543 dpm_show_time(starttime, state, "early");
775b64d2 544}
cf579dfb
RW
545
546/**
547 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
548 * @state: PM transition of the system being carried out.
549 */
550void dpm_resume_start(pm_message_t state)
551{
552 dpm_resume_noirq(state);
553 dpm_resume_early(state);
554}
555EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
556
557/**
97df8c12 558 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
559 * @dev: Device to handle.
560 * @state: PM transition of the system being carried out.
5af84b82 561 * @async: If true, the device is being resumed asynchronously.
775b64d2 562 */
97df8c12 563static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 564{
9cf519d1
RW
565 pm_callback_t callback = NULL;
566 char *info = NULL;
775b64d2 567 int error = 0;
1e2ef05b 568 bool put = false;
775b64d2
RW
569
570 TRACE_DEVICE(dev);
571 TRACE_RESUME(0);
cd59abfc 572
5af84b82 573 dpm_wait(dev->parent, async);
8e9394ce 574 device_lock(dev);
7a8d37a3 575
f76b168b
AS
576 /*
577 * This is a fib. But we'll allow new children to be added below
578 * a resumed device, even if the device hasn't been completed yet.
579 */
580 dev->power.is_prepared = false;
97df8c12 581
6d0e0e84
AS
582 if (!dev->power.is_suspended)
583 goto Unlock;
584
1e2ef05b
RW
585 pm_runtime_enable(dev);
586 put = true;
587
564b905a 588 if (dev->pm_domain) {
9cf519d1
RW
589 info = "power domain ";
590 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 591 goto Driver;
7538e3db
RW
592 }
593
9659cc06 594 if (dev->type && dev->type->pm) {
9cf519d1
RW
595 info = "type ";
596 callback = pm_op(dev->type->pm, state);
35cd133c 597 goto Driver;
cd59abfc
AS
598 }
599
1eede070
RW
600 if (dev->class) {
601 if (dev->class->pm) {
9cf519d1
RW
602 info = "class ";
603 callback = pm_op(dev->class->pm, state);
35cd133c 604 goto Driver;
1eede070 605 } else if (dev->class->resume) {
9cf519d1
RW
606 info = "legacy class ";
607 callback = dev->class->resume;
9659cc06 608 goto End;
1eede070 609 }
cd59abfc 610 }
9659cc06
RW
611
612 if (dev->bus) {
613 if (dev->bus->pm) {
35cd133c 614 info = "bus ";
9cf519d1 615 callback = pm_op(dev->bus->pm, state);
9659cc06 616 } else if (dev->bus->resume) {
35cd133c 617 info = "legacy bus ";
9cf519d1 618 callback = dev->bus->resume;
35cd133c 619 goto End;
9659cc06
RW
620 }
621 }
622
35cd133c
RW
623 Driver:
624 if (!callback && dev->driver && dev->driver->pm) {
625 info = "driver ";
626 callback = pm_op(dev->driver->pm, state);
627 }
628
1eede070 629 End:
9cf519d1 630 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
631 dev->power.is_suspended = false;
632
633 Unlock:
8e9394ce 634 device_unlock(dev);
5af84b82 635 complete_all(&dev->power.completion);
7a8d37a3 636
cd59abfc 637 TRACE_RESUME(error);
1e2ef05b
RW
638
639 if (put)
640 pm_runtime_put_sync(dev);
641
cd59abfc
AS
642 return error;
643}
644
5af84b82
RW
645static void async_resume(void *data, async_cookie_t cookie)
646{
647 struct device *dev = (struct device *)data;
648 int error;
649
97df8c12 650 error = device_resume(dev, pm_transition, true);
5af84b82
RW
651 if (error)
652 pm_dev_err(dev, pm_transition, " async", error);
653 put_device(dev);
654}
655
97df8c12 656static bool is_async(struct device *dev)
5af84b82 657{
97df8c12
RW
658 return dev->power.async_suspend && pm_async_enabled
659 && !pm_trace_is_enabled();
5af84b82
RW
660}
661
775b64d2 662/**
20d652d7
RW
663 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
664 * @state: PM transition of the system being carried out.
775b64d2 665 *
20d652d7
RW
666 * Execute the appropriate "resume" callback for all devices whose status
667 * indicates that they are suspended.
1eede070 668 */
91e7c75b 669void dpm_resume(pm_message_t state)
1eede070 670{
97df8c12 671 struct device *dev;
ecf762b2 672 ktime_t starttime = ktime_get();
1eede070 673
91e7c75b
RW
674 might_sleep();
675
1eede070 676 mutex_lock(&dpm_list_mtx);
5af84b82 677 pm_transition = state;
098dff73 678 async_error = 0;
1eede070 679
8a43a9ab 680 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
97df8c12
RW
681 INIT_COMPLETION(dev->power.completion);
682 if (is_async(dev)) {
683 get_device(dev);
684 async_schedule(async_resume, dev);
685 }
686 }
687
8a43a9ab
RW
688 while (!list_empty(&dpm_suspended_list)) {
689 dev = to_device(dpm_suspended_list.next);
1eede070 690 get_device(dev);
5b219a51 691 if (!is_async(dev)) {
1eede070
RW
692 int error;
693
1eede070
RW
694 mutex_unlock(&dpm_list_mtx);
695
97df8c12 696 error = device_resume(dev, state, false);
2a77c46d
SL
697 if (error) {
698 suspend_stats.failed_resume++;
699 dpm_save_failed_step(SUSPEND_RESUME);
700 dpm_save_failed_dev(dev_name(dev));
1eede070 701 pm_dev_err(dev, state, "", error);
2a77c46d 702 }
5b219a51
RW
703
704 mutex_lock(&dpm_list_mtx);
1eede070
RW
705 }
706 if (!list_empty(&dev->power.entry))
8a43a9ab 707 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
708 put_device(dev);
709 }
1eede070 710 mutex_unlock(&dpm_list_mtx);
5af84b82 711 async_synchronize_full();
ecf762b2 712 dpm_show_time(starttime, state, NULL);
1eede070
RW
713}
714
715/**
20d652d7
RW
716 * device_complete - Complete a PM transition for given device.
717 * @dev: Device to handle.
718 * @state: PM transition of the system being carried out.
1eede070 719 */
d1616302 720static void device_complete(struct device *dev, pm_message_t state)
1eede070 721{
35cd133c
RW
722 void (*callback)(struct device *) = NULL;
723 char *info = NULL;
724
8e9394ce 725 device_lock(dev);
1eede070 726
564b905a 727 if (dev->pm_domain) {
35cd133c
RW
728 info = "completing power domain ";
729 callback = dev->pm_domain->ops.complete;
4d27e9dc 730 } else if (dev->type && dev->type->pm) {
35cd133c
RW
731 info = "completing type ";
732 callback = dev->type->pm->complete;
9659cc06 733 } else if (dev->class && dev->class->pm) {
35cd133c
RW
734 info = "completing class ";
735 callback = dev->class->pm->complete;
9659cc06 736 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
737 info = "completing bus ";
738 callback = dev->bus->pm->complete;
739 }
740
741 if (!callback && dev->driver && dev->driver->pm) {
742 info = "completing driver ";
743 callback = dev->driver->pm->complete;
744 }
745
746 if (callback) {
747 pm_dev_dbg(dev, state, info);
748 callback(dev);
1eede070
RW
749 }
750
8e9394ce 751 device_unlock(dev);
1eede070
RW
752}
753
754/**
20d652d7
RW
755 * dpm_complete - Complete a PM transition for all non-sysdev devices.
756 * @state: PM transition of the system being carried out.
775b64d2 757 *
20d652d7
RW
758 * Execute the ->complete() callbacks for all devices whose PM status is not
759 * DPM_ON (this allows new devices to be registered).
cd59abfc 760 */
91e7c75b 761void dpm_complete(pm_message_t state)
cd59abfc 762{
1eede070
RW
763 struct list_head list;
764
91e7c75b
RW
765 might_sleep();
766
1eede070 767 INIT_LIST_HEAD(&list);
cd59abfc 768 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
769 while (!list_empty(&dpm_prepared_list)) {
770 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 771
1eede070 772 get_device(dev);
f76b168b 773 dev->power.is_prepared = false;
5b219a51
RW
774 list_move(&dev->power.entry, &list);
775 mutex_unlock(&dpm_list_mtx);
1eede070 776
5b219a51 777 device_complete(dev, state);
1eede070 778
5b219a51 779 mutex_lock(&dpm_list_mtx);
1eede070 780 put_device(dev);
cd59abfc 781 }
1eede070 782 list_splice(&list, &dpm_list);
cd59abfc
AS
783 mutex_unlock(&dpm_list_mtx);
784}
785
cd59abfc 786/**
20d652d7
RW
787 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
788 * @state: PM transition of the system being carried out.
cd59abfc 789 *
20d652d7
RW
790 * Execute "resume" callbacks for all devices and complete the PM transition of
791 * the system.
cd59abfc 792 */
d1616302 793void dpm_resume_end(pm_message_t state)
cd59abfc 794{
1eede070
RW
795 dpm_resume(state);
796 dpm_complete(state);
cd59abfc 797}
d1616302 798EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
799
800
801/*------------------------- Suspend routines -------------------------*/
802
1eede070 803/**
20d652d7
RW
804 * resume_event - Return a "resume" message for given "suspend" sleep state.
805 * @sleep_state: PM message representing a sleep state.
806 *
807 * Return a PM message representing the resume event corresponding to given
808 * sleep state.
1eede070
RW
809 */
810static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 811{
1eede070
RW
812 switch (sleep_state.event) {
813 case PM_EVENT_SUSPEND:
814 return PMSG_RESUME;
815 case PM_EVENT_FREEZE:
816 case PM_EVENT_QUIESCE:
817 return PMSG_RECOVER;
818 case PM_EVENT_HIBERNATE:
819 return PMSG_RESTORE;
cd59abfc 820 }
1eede070 821 return PMSG_ON;
cd59abfc
AS
822}
823
824/**
20d652d7
RW
825 * device_suspend_noirq - Execute a "late suspend" callback for given device.
826 * @dev: Device to handle.
827 * @state: PM transition of the system being carried out.
775b64d2 828 *
20d652d7
RW
829 * The driver of @dev will not receive interrupts while this function is being
830 * executed.
cd59abfc 831 */
d1616302 832static int device_suspend_noirq(struct device *dev, pm_message_t state)
775b64d2 833{
9cf519d1
RW
834 pm_callback_t callback = NULL;
835 char *info = NULL;
e7176a37 836
564b905a 837 if (dev->pm_domain) {
cf579dfb 838 info = "noirq power domain ";
9cf519d1 839 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 840 } else if (dev->type && dev->type->pm) {
cf579dfb 841 info = "noirq type ";
9cf519d1 842 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 843 } else if (dev->class && dev->class->pm) {
cf579dfb 844 info = "noirq class ";
9cf519d1 845 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 846 } else if (dev->bus && dev->bus->pm) {
cf579dfb 847 info = "noirq bus ";
9cf519d1 848 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
849 }
850
35cd133c 851 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 852 info = "noirq driver ";
35cd133c
RW
853 callback = pm_noirq_op(dev->driver->pm, state);
854 }
855
9cf519d1 856 return dpm_run_callback(callback, dev, state, info);
775b64d2
RW
857}
858
859/**
cf579dfb 860 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 861 * @state: PM transition of the system being carried out.
775b64d2 862 *
20d652d7
RW
863 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
864 * handlers for all non-sysdev devices.
775b64d2 865 */
cf579dfb 866static int dpm_suspend_noirq(pm_message_t state)
775b64d2 867{
ecf762b2 868 ktime_t starttime = ktime_get();
775b64d2
RW
869 int error = 0;
870
8651f97b 871 cpuidle_pause();
2ed8d2b3 872 suspend_device_irqs();
32bdfac5 873 mutex_lock(&dpm_list_mtx);
cf579dfb
RW
874 while (!list_empty(&dpm_late_early_list)) {
875 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
876
877 get_device(dev);
878 mutex_unlock(&dpm_list_mtx);
879
d1616302 880 error = device_suspend_noirq(dev, state);
d08a5ace
RW
881
882 mutex_lock(&dpm_list_mtx);
775b64d2 883 if (error) {
cf579dfb 884 pm_dev_err(dev, state, " noirq", error);
2a77c46d
SL
885 suspend_stats.failed_suspend_noirq++;
886 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
887 dpm_save_failed_dev(dev_name(dev));
d08a5ace 888 put_device(dev);
775b64d2
RW
889 break;
890 }
d08a5ace 891 if (!list_empty(&dev->power.entry))
8a43a9ab 892 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 893 put_device(dev);
52d136cc
RW
894
895 if (pm_wakeup_pending()) {
896 error = -EBUSY;
897 break;
898 }
775b64d2 899 }
32bdfac5 900 mutex_unlock(&dpm_list_mtx);
775b64d2 901 if (error)
d1616302 902 dpm_resume_noirq(resume_event(state));
cf579dfb
RW
903 else
904 dpm_show_time(starttime, state, "noirq");
905 return error;
906}
907
908/**
909 * device_suspend_late - Execute a "late suspend" callback for given device.
910 * @dev: Device to handle.
911 * @state: PM transition of the system being carried out.
912 *
913 * Runtime PM is disabled for @dev while this function is being executed.
914 */
915static int device_suspend_late(struct device *dev, pm_message_t state)
916{
917 pm_callback_t callback = NULL;
918 char *info = NULL;
919
920 if (dev->pm_domain) {
921 info = "late power domain ";
922 callback = pm_late_early_op(&dev->pm_domain->ops, state);
923 } else if (dev->type && dev->type->pm) {
924 info = "late type ";
925 callback = pm_late_early_op(dev->type->pm, state);
926 } else if (dev->class && dev->class->pm) {
927 info = "late class ";
928 callback = pm_late_early_op(dev->class->pm, state);
929 } else if (dev->bus && dev->bus->pm) {
930 info = "late bus ";
931 callback = pm_late_early_op(dev->bus->pm, state);
932 }
933
934 if (!callback && dev->driver && dev->driver->pm) {
935 info = "late driver ";
936 callback = pm_late_early_op(dev->driver->pm, state);
937 }
938
939 return dpm_run_callback(callback, dev, state, info);
940}
941
942/**
943 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
944 * @state: PM transition of the system being carried out.
945 */
946static int dpm_suspend_late(pm_message_t state)
947{
948 ktime_t starttime = ktime_get();
949 int error = 0;
950
951 mutex_lock(&dpm_list_mtx);
952 while (!list_empty(&dpm_suspended_list)) {
953 struct device *dev = to_device(dpm_suspended_list.prev);
954
955 get_device(dev);
956 mutex_unlock(&dpm_list_mtx);
957
958 error = device_suspend_late(dev, state);
959
960 mutex_lock(&dpm_list_mtx);
961 if (error) {
962 pm_dev_err(dev, state, " late", error);
963 suspend_stats.failed_suspend_late++;
964 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
965 dpm_save_failed_dev(dev_name(dev));
966 put_device(dev);
967 break;
968 }
969 if (!list_empty(&dev->power.entry))
970 list_move(&dev->power.entry, &dpm_late_early_list);
971 put_device(dev);
52d136cc
RW
972
973 if (pm_wakeup_pending()) {
974 error = -EBUSY;
975 break;
976 }
cf579dfb
RW
977 }
978 mutex_unlock(&dpm_list_mtx);
979 if (error)
980 dpm_resume_early(resume_event(state));
ecf762b2
RW
981 else
982 dpm_show_time(starttime, state, "late");
cf579dfb 983
775b64d2
RW
984 return error;
985}
cf579dfb
RW
986
987/**
988 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
989 * @state: PM transition of the system being carried out.
990 */
991int dpm_suspend_end(pm_message_t state)
992{
993 int error = dpm_suspend_late(state);
064b021f
CC
994 if (error)
995 return error;
996
997 error = dpm_suspend_noirq(state);
998 if (error) {
997a0311 999 dpm_resume_early(resume_event(state));
064b021f
CC
1000 return error;
1001 }
cf579dfb 1002
064b021f 1003 return 0;
cf579dfb
RW
1004}
1005EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1006
875ab0b7
RW
1007/**
1008 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1009 * @dev: Device to suspend.
1010 * @state: PM transition of the system being carried out.
1011 * @cb: Suspend callback to execute.
875ab0b7
RW
1012 */
1013static int legacy_suspend(struct device *dev, pm_message_t state,
1014 int (*cb)(struct device *dev, pm_message_t state))
1015{
1016 int error;
1017 ktime_t calltime;
1018
1019 calltime = initcall_debug_start(dev);
1020
1021 error = cb(dev, state);
1022 suspend_report_result(cb, error);
1023
1024 initcall_debug_report(dev, calltime, error);
1025
1026 return error;
1027}
1028
775b64d2 1029/**
20d652d7
RW
1030 * device_suspend - Execute "suspend" callbacks for given device.
1031 * @dev: Device to handle.
1032 * @state: PM transition of the system being carried out.
5af84b82 1033 * @async: If true, the device is being suspended asynchronously.
775b64d2 1034 */
5af84b82 1035static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1036{
9cf519d1
RW
1037 pm_callback_t callback = NULL;
1038 char *info = NULL;
cd59abfc
AS
1039 int error = 0;
1040
5af84b82 1041 dpm_wait_for_children(dev, async);
7a8d37a3 1042
5af84b82 1043 if (async_error)
1f758b23 1044 goto Complete;
1e2ef05b
RW
1045
1046 pm_runtime_get_noresume(dev);
1047 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1048 pm_wakeup_event(dev, 0);
5af84b82 1049
d83f905e 1050 if (pm_wakeup_pending()) {
1e2ef05b 1051 pm_runtime_put_sync(dev);
d83f905e 1052 async_error = -EBUSY;
1f758b23 1053 goto Complete;
d83f905e
RW
1054 }
1055
1e2ef05b
RW
1056 device_lock(dev);
1057
564b905a 1058 if (dev->pm_domain) {
9cf519d1
RW
1059 info = "power domain ";
1060 callback = pm_op(&dev->pm_domain->ops, state);
1061 goto Run;
4d27e9dc
RW
1062 }
1063
9659cc06 1064 if (dev->type && dev->type->pm) {
9cf519d1
RW
1065 info = "type ";
1066 callback = pm_op(dev->type->pm, state);
1067 goto Run;
9659cc06
RW
1068 }
1069
1eede070
RW
1070 if (dev->class) {
1071 if (dev->class->pm) {
9cf519d1
RW
1072 info = "class ";
1073 callback = pm_op(dev->class->pm, state);
1074 goto Run;
1eede070
RW
1075 } else if (dev->class->suspend) {
1076 pm_dev_dbg(dev, state, "legacy class ");
875ab0b7 1077 error = legacy_suspend(dev, state, dev->class->suspend);
4d27e9dc 1078 goto End;
1eede070 1079 }
cd59abfc
AS
1080 }
1081
1eede070
RW
1082 if (dev->bus) {
1083 if (dev->bus->pm) {
35cd133c 1084 info = "bus ";
9cf519d1 1085 callback = pm_op(dev->bus->pm, state);
1eede070 1086 } else if (dev->bus->suspend) {
35cd133c 1087 pm_dev_dbg(dev, state, "legacy bus ");
875ab0b7 1088 error = legacy_suspend(dev, state, dev->bus->suspend);
9cf519d1 1089 goto End;
1eede070 1090 }
7538e3db
RW
1091 }
1092
9cf519d1 1093 Run:
35cd133c
RW
1094 if (!callback && dev->driver && dev->driver->pm) {
1095 info = "driver ";
1096 callback = pm_op(dev->driver->pm, state);
1097 }
1098
9cf519d1
RW
1099 error = dpm_run_callback(callback, dev, state, info);
1100
1eede070 1101 End:
4ca46ff3
RW
1102 if (!error) {
1103 dev->power.is_suspended = true;
8b258cc8
RW
1104 if (dev->power.wakeup_path
1105 && dev->parent && !dev->parent->power.ignore_children)
4ca46ff3
RW
1106 dev->parent->power.wakeup_path = true;
1107 }
6d0e0e84 1108
8e9394ce 1109 device_unlock(dev);
1f758b23
MSB
1110
1111 Complete:
5af84b82 1112 complete_all(&dev->power.completion);
7a8d37a3 1113
1e2ef05b
RW
1114 if (error) {
1115 pm_runtime_put_sync(dev);
098dff73 1116 async_error = error;
1e2ef05b
RW
1117 } else if (dev->power.is_suspended) {
1118 __pm_runtime_disable(dev, false);
1119 }
098dff73 1120
cd59abfc
AS
1121 return error;
1122}
1123
5af84b82
RW
1124static void async_suspend(void *data, async_cookie_t cookie)
1125{
1126 struct device *dev = (struct device *)data;
1127 int error;
1128
1129 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1130 if (error) {
1131 dpm_save_failed_dev(dev_name(dev));
5af84b82 1132 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1133 }
5af84b82
RW
1134
1135 put_device(dev);
1136}
1137
1138static int device_suspend(struct device *dev)
1139{
1140 INIT_COMPLETION(dev->power.completion);
1141
0e06b4a8 1142 if (pm_async_enabled && dev->power.async_suspend) {
5af84b82
RW
1143 get_device(dev);
1144 async_schedule(async_suspend, dev);
1145 return 0;
1146 }
1147
1148 return __device_suspend(dev, pm_transition, false);
1149}
1150
cd59abfc 1151/**
20d652d7
RW
1152 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1153 * @state: PM transition of the system being carried out.
cd59abfc 1154 */
91e7c75b 1155int dpm_suspend(pm_message_t state)
cd59abfc 1156{
ecf762b2 1157 ktime_t starttime = ktime_get();
cd59abfc
AS
1158 int error = 0;
1159
91e7c75b
RW
1160 might_sleep();
1161
cd59abfc 1162 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1163 pm_transition = state;
1164 async_error = 0;
8a43a9ab
RW
1165 while (!list_empty(&dpm_prepared_list)) {
1166 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1167
1eede070 1168 get_device(dev);
cd59abfc 1169 mutex_unlock(&dpm_list_mtx);
1eede070 1170
5af84b82 1171 error = device_suspend(dev);
1eede070 1172
1b3cbec1 1173 mutex_lock(&dpm_list_mtx);
775b64d2 1174 if (error) {
1eede070 1175 pm_dev_err(dev, state, "", error);
2a77c46d 1176 dpm_save_failed_dev(dev_name(dev));
1eede070 1177 put_device(dev);
775b64d2
RW
1178 break;
1179 }
7a8d37a3 1180 if (!list_empty(&dev->power.entry))
8a43a9ab 1181 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1182 put_device(dev);
5af84b82
RW
1183 if (async_error)
1184 break;
cd59abfc
AS
1185 }
1186 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1187 async_synchronize_full();
1188 if (!error)
1189 error = async_error;
2a77c46d
SL
1190 if (error) {
1191 suspend_stats.failed_suspend++;
1192 dpm_save_failed_step(SUSPEND_SUSPEND);
1193 } else
ecf762b2 1194 dpm_show_time(starttime, state, NULL);
1eede070
RW
1195 return error;
1196}
1197
1198/**
20d652d7
RW
1199 * device_prepare - Prepare a device for system power transition.
1200 * @dev: Device to handle.
1201 * @state: PM transition of the system being carried out.
1202 *
1203 * Execute the ->prepare() callback(s) for given device. No new children of the
1204 * device may be registered after this function has returned.
1eede070 1205 */
d1616302 1206static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1207{
35cd133c
RW
1208 int (*callback)(struct device *) = NULL;
1209 char *info = NULL;
1eede070
RW
1210 int error = 0;
1211
8e9394ce 1212 device_lock(dev);
1eede070 1213
4ca46ff3
RW
1214 dev->power.wakeup_path = device_may_wakeup(dev);
1215
564b905a 1216 if (dev->pm_domain) {
35cd133c
RW
1217 info = "preparing power domain ";
1218 callback = dev->pm_domain->ops.prepare;
4d27e9dc 1219 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1220 info = "preparing type ";
1221 callback = dev->type->pm->prepare;
9659cc06 1222 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1223 info = "preparing class ";
1224 callback = dev->class->pm->prepare;
9659cc06 1225 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1226 info = "preparing bus ";
1227 callback = dev->bus->pm->prepare;
1228 }
1229
1230 if (!callback && dev->driver && dev->driver->pm) {
1231 info = "preparing driver ";
1232 callback = dev->driver->pm->prepare;
1233 }
1234
1235 if (callback) {
1236 error = callback(dev);
1237 suspend_report_result(callback, error);
1eede070 1238 }
7538e3db 1239
8e9394ce 1240 device_unlock(dev);
1eede070
RW
1241
1242 return error;
1243}
cd59abfc 1244
1eede070 1245/**
20d652d7
RW
1246 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1247 * @state: PM transition of the system being carried out.
1eede070 1248 *
20d652d7 1249 * Execute the ->prepare() callback(s) for all devices.
1eede070 1250 */
91e7c75b 1251int dpm_prepare(pm_message_t state)
1eede070 1252{
1eede070
RW
1253 int error = 0;
1254
91e7c75b
RW
1255 might_sleep();
1256
1eede070 1257 mutex_lock(&dpm_list_mtx);
1eede070
RW
1258 while (!list_empty(&dpm_list)) {
1259 struct device *dev = to_device(dpm_list.next);
1260
1261 get_device(dev);
1eede070
RW
1262 mutex_unlock(&dpm_list_mtx);
1263
1e2ef05b 1264 error = device_prepare(dev, state);
1eede070
RW
1265
1266 mutex_lock(&dpm_list_mtx);
1267 if (error) {
1eede070
RW
1268 if (error == -EAGAIN) {
1269 put_device(dev);
886a7a33 1270 error = 0;
1eede070
RW
1271 continue;
1272 }
1e75227e
RW
1273 printk(KERN_INFO "PM: Device %s not prepared "
1274 "for power transition: code %d\n",
5c1a07ab 1275 dev_name(dev), error);
1eede070
RW
1276 put_device(dev);
1277 break;
1278 }
f76b168b 1279 dev->power.is_prepared = true;
1eede070 1280 if (!list_empty(&dev->power.entry))
8a43a9ab 1281 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1282 put_device(dev);
1283 }
1eede070 1284 mutex_unlock(&dpm_list_mtx);
cd59abfc
AS
1285 return error;
1286}
1287
775b64d2 1288/**
20d652d7
RW
1289 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1290 * @state: PM transition of the system being carried out.
775b64d2 1291 *
20d652d7
RW
1292 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1293 * callbacks for them.
775b64d2 1294 */
d1616302 1295int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1296{
1297 int error;
cd59abfc 1298
1eede070 1299 error = dpm_prepare(state);
2a77c46d
SL
1300 if (error) {
1301 suspend_stats.failed_prepare++;
1302 dpm_save_failed_step(SUSPEND_PREPARE);
1303 } else
1eede070 1304 error = dpm_suspend(state);
cd59abfc 1305 return error;
cd59abfc 1306}
d1616302 1307EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1308
1309void __suspend_report_result(const char *function, void *fn, int ret)
1310{
c80cfb04
BH
1311 if (ret)
1312 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1313}
1314EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1315
1316/**
1317 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1318 * @dev: Device to wait for.
1319 * @subordinate: Device that needs to wait for @dev.
1320 */
098dff73 1321int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1322{
1323 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1324 return async_error;
f8824cee
RW
1325}
1326EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);