]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/main.c
PM / cpuidle: System resume hang fix with cpuidle
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
cd59abfc 26#include <linux/resume-trace.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
5af84b82 29#include <linux/async.h>
1e75227e 30#include <linux/suspend.h>
8651f97b 31#include <linux/cpuidle.h>
cd59abfc 32#include "../base.h"
1da177e4
LT
33#include "power.h"
34
9cf519d1
RW
35typedef int (*pm_callback_t)(struct device *);
36
775b64d2 37/*
1eede070 38 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
39 * because children are guaranteed to be discovered after parents, and
40 * are inserted at the back of the list on discovery.
41 *
8e9394ce
GKH
42 * Since device_pm_add() may be called with a device lock held,
43 * we must never try to acquire a device lock while holding
775b64d2
RW
44 * dpm_list_mutex.
45 */
46
1eede070 47LIST_HEAD(dpm_list);
8a43a9ab
RW
48LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list);
cf579dfb 50LIST_HEAD(dpm_late_early_list);
8a43a9ab 51LIST_HEAD(dpm_noirq_list);
1da177e4 52
2a77c46d 53struct suspend_stats suspend_stats;
cd59abfc 54static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 55static pm_message_t pm_transition;
1da177e4 56
098dff73
RW
57static int async_error;
58
5e928f77 59/**
20d652d7 60 * device_pm_init - Initialize the PM-related part of a device object.
5e928f77
RW
61 * @dev: Device object being initialized.
62 */
63void device_pm_init(struct device *dev)
64{
f76b168b 65 dev->power.is_prepared = false;
6d0e0e84 66 dev->power.is_suspended = false;
5af84b82 67 init_completion(&dev->power.completion);
152e1d59 68 complete_all(&dev->power.completion);
074037ec
RW
69 dev->power.wakeup = NULL;
70 spin_lock_init(&dev->power.lock);
5e928f77 71 pm_runtime_init(dev);
22110faf 72 INIT_LIST_HEAD(&dev->power.entry);
1a9a9152 73 dev->power.power_state = PMSG_INVALID;
5e928f77
RW
74}
75
1eede070 76/**
20d652d7 77 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
78 */
79void device_pm_lock(void)
80{
81 mutex_lock(&dpm_list_mtx);
82}
83
84/**
20d652d7 85 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
86 */
87void device_pm_unlock(void)
88{
89 mutex_unlock(&dpm_list_mtx);
90}
075c1771 91
775b64d2 92/**
20d652d7
RW
93 * device_pm_add - Add a device to the PM core's list of active devices.
94 * @dev: Device to add to the list.
775b64d2 95 */
3b98aeaf 96void device_pm_add(struct device *dev)
1da177e4 97{
1da177e4 98 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 99 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
11048dcf 100 mutex_lock(&dpm_list_mtx);
f76b168b 101 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
102 dev_warn(dev, "parent %s should not be sleeping\n",
103 dev_name(dev->parent));
3b98aeaf 104 list_add_tail(&dev->power.entry, &dpm_list);
91ff4cb8 105 dev_pm_qos_constraints_init(dev);
1a9a9152 106 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
107}
108
775b64d2 109/**
20d652d7
RW
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
775b64d2 112 */
9cddad77 113void device_pm_remove(struct device *dev)
1da177e4
LT
114{
115 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 117 complete_all(&dev->power.completion);
11048dcf 118 mutex_lock(&dpm_list_mtx);
1a9a9152 119 dev_pm_qos_constraints_destroy(dev);
1da177e4 120 list_del_init(&dev->power.entry);
11048dcf 121 mutex_unlock(&dpm_list_mtx);
074037ec 122 device_wakeup_disable(dev);
5e928f77 123 pm_runtime_remove(dev);
775b64d2
RW
124}
125
ffa6a705 126/**
20d652d7
RW
127 * device_pm_move_before - Move device in the PM core's list of active devices.
128 * @deva: Device to move in dpm_list.
129 * @devb: Device @deva should come before.
ffa6a705
CH
130 */
131void device_pm_move_before(struct device *deva, struct device *devb)
132{
133 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
134 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
135 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
136 /* Delete deva from dpm_list and reinsert before devb. */
137 list_move_tail(&deva->power.entry, &devb->power.entry);
138}
139
140/**
20d652d7
RW
141 * device_pm_move_after - Move device in the PM core's list of active devices.
142 * @deva: Device to move in dpm_list.
143 * @devb: Device @deva should come after.
ffa6a705
CH
144 */
145void device_pm_move_after(struct device *deva, struct device *devb)
146{
147 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
148 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
149 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
150 /* Delete deva from dpm_list and reinsert after devb. */
151 list_move(&deva->power.entry, &devb->power.entry);
152}
153
154/**
20d652d7
RW
155 * device_pm_move_last - Move device to end of the PM core's list of devices.
156 * @dev: Device to move in dpm_list.
ffa6a705
CH
157 */
158void device_pm_move_last(struct device *dev)
159{
160 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 161 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
162 list_move_tail(&dev->power.entry, &dpm_list);
163}
164
875ab0b7
RW
165static ktime_t initcall_debug_start(struct device *dev)
166{
167 ktime_t calltime = ktime_set(0, 0);
168
169 if (initcall_debug) {
0c6aebe3
RW
170 pr_info("calling %s+ @ %i, parent: %s\n",
171 dev_name(dev), task_pid_nr(current),
172 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
173 calltime = ktime_get();
174 }
175
176 return calltime;
177}
178
179static void initcall_debug_report(struct device *dev, ktime_t calltime,
180 int error)
181{
182 ktime_t delta, rettime;
183
184 if (initcall_debug) {
185 rettime = ktime_get();
186 delta = ktime_sub(rettime, calltime);
187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
188 error, (unsigned long long)ktime_to_ns(delta) >> 10);
189 }
190}
191
5af84b82
RW
192/**
193 * dpm_wait - Wait for a PM operation to complete.
194 * @dev: Device to wait for.
195 * @async: If unset, wait only if the device's power.async_suspend flag is set.
196 */
197static void dpm_wait(struct device *dev, bool async)
198{
199 if (!dev)
200 return;
201
0e06b4a8 202 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
203 wait_for_completion(&dev->power.completion);
204}
205
206static int dpm_wait_fn(struct device *dev, void *async_ptr)
207{
208 dpm_wait(dev, *((bool *)async_ptr));
209 return 0;
210}
211
212static void dpm_wait_for_children(struct device *dev, bool async)
213{
214 device_for_each_child(dev, &async, dpm_wait_fn);
215}
216
1eede070 217/**
9cf519d1 218 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
219 * @ops: PM operations to choose from.
220 * @state: PM transition of the system being carried out.
1eede070 221 */
9cf519d1 222static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 223{
1eede070
RW
224 switch (state.event) {
225#ifdef CONFIG_SUSPEND
226 case PM_EVENT_SUSPEND:
9cf519d1 227 return ops->suspend;
1eede070 228 case PM_EVENT_RESUME:
9cf519d1 229 return ops->resume;
1eede070 230#endif /* CONFIG_SUSPEND */
1f112cee 231#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
232 case PM_EVENT_FREEZE:
233 case PM_EVENT_QUIESCE:
9cf519d1 234 return ops->freeze;
1eede070 235 case PM_EVENT_HIBERNATE:
9cf519d1 236 return ops->poweroff;
1eede070
RW
237 case PM_EVENT_THAW:
238 case PM_EVENT_RECOVER:
9cf519d1 239 return ops->thaw;
1eede070
RW
240 break;
241 case PM_EVENT_RESTORE:
9cf519d1 242 return ops->restore;
1f112cee 243#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 244 }
f2511774 245
9cf519d1 246 return NULL;
1eede070
RW
247}
248
cf579dfb
RW
249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
1eede070 283/**
9cf519d1 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
285 * @ops: PM operations to choose from.
286 * @state: PM transition of the system being carried out.
1eede070 287 *
20d652d7
RW
288 * The driver of @dev will not receive interrupts while this function is being
289 * executed.
1eede070 290 */
9cf519d1 291static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 292{
1eede070
RW
293 switch (state.event) {
294#ifdef CONFIG_SUSPEND
295 case PM_EVENT_SUSPEND:
9cf519d1 296 return ops->suspend_noirq;
1eede070 297 case PM_EVENT_RESUME:
9cf519d1 298 return ops->resume_noirq;
1eede070 299#endif /* CONFIG_SUSPEND */
1f112cee 300#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
301 case PM_EVENT_FREEZE:
302 case PM_EVENT_QUIESCE:
9cf519d1 303 return ops->freeze_noirq;
1eede070 304 case PM_EVENT_HIBERNATE:
9cf519d1 305 return ops->poweroff_noirq;
1eede070
RW
306 case PM_EVENT_THAW:
307 case PM_EVENT_RECOVER:
9cf519d1 308 return ops->thaw_noirq;
1eede070 309 case PM_EVENT_RESTORE:
9cf519d1 310 return ops->restore_noirq;
1f112cee 311#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 312 }
f2511774 313
9cf519d1 314 return NULL;
1eede070
RW
315}
316
317static char *pm_verb(int event)
318{
319 switch (event) {
320 case PM_EVENT_SUSPEND:
321 return "suspend";
322 case PM_EVENT_RESUME:
323 return "resume";
324 case PM_EVENT_FREEZE:
325 return "freeze";
326 case PM_EVENT_QUIESCE:
327 return "quiesce";
328 case PM_EVENT_HIBERNATE:
329 return "hibernate";
330 case PM_EVENT_THAW:
331 return "thaw";
332 case PM_EVENT_RESTORE:
333 return "restore";
334 case PM_EVENT_RECOVER:
335 return "recover";
336 default:
337 return "(unknown PM event)";
338 }
339}
340
341static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
342{
343 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
344 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
345 ", may wakeup" : "");
346}
347
348static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
349 int error)
350{
351 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 352 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
353}
354
ecf762b2
RW
355static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
356{
357 ktime_t calltime;
0702d9ee 358 u64 usecs64;
ecf762b2
RW
359 int usecs;
360
361 calltime = ktime_get();
362 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
363 do_div(usecs64, NSEC_PER_USEC);
364 usecs = usecs64;
365 if (usecs == 0)
366 usecs = 1;
367 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
368 info ?: "", info ? " " : "", pm_verb(state.event),
369 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
370}
371
9cf519d1
RW
372static int dpm_run_callback(pm_callback_t cb, struct device *dev,
373 pm_message_t state, char *info)
374{
375 ktime_t calltime;
376 int error;
377
378 if (!cb)
379 return 0;
380
381 calltime = initcall_debug_start(dev);
382
383 pm_dev_dbg(dev, state, info);
384 error = cb(dev);
385 suspend_report_result(cb, error);
386
387 initcall_debug_report(dev, calltime, error);
388
389 return error;
390}
391
cd59abfc
AS
392/*------------------------- Resume routines -------------------------*/
393
394/**
20d652d7
RW
395 * device_resume_noirq - Execute an "early resume" callback for given device.
396 * @dev: Device to handle.
397 * @state: PM transition of the system being carried out.
cd59abfc 398 *
20d652d7
RW
399 * The driver of @dev will not receive interrupts while this function is being
400 * executed.
cd59abfc 401 */
d1616302 402static int device_resume_noirq(struct device *dev, pm_message_t state)
cd59abfc 403{
9cf519d1
RW
404 pm_callback_t callback = NULL;
405 char *info = NULL;
cd59abfc
AS
406 int error = 0;
407
408 TRACE_DEVICE(dev);
409 TRACE_RESUME(0);
410
564b905a 411 if (dev->pm_domain) {
cf579dfb 412 info = "noirq power domain ";
9cf519d1 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 414 } else if (dev->type && dev->type->pm) {
cf579dfb 415 info = "noirq type ";
9cf519d1 416 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 417 } else if (dev->class && dev->class->pm) {
cf579dfb 418 info = "noirq class ";
9cf519d1 419 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 420 } else if (dev->bus && dev->bus->pm) {
cf579dfb 421 info = "noirq bus ";
9cf519d1 422 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
423 }
424
35cd133c 425 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 426 info = "noirq driver ";
35cd133c
RW
427 callback = pm_noirq_op(dev->driver->pm, state);
428 }
429
9cf519d1
RW
430 error = dpm_run_callback(callback, dev, state, info);
431
775b64d2
RW
432 TRACE_RESUME(error);
433 return error;
434}
435
436/**
cf579dfb 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 438 * @state: PM transition of the system being carried out.
775b64d2 439 *
cf579dfb 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 441 * enable device drivers to receive interrupts.
775b64d2 442 */
cf579dfb 443static void dpm_resume_noirq(pm_message_t state)
775b64d2 444{
ecf762b2 445 ktime_t starttime = ktime_get();
775b64d2 446
32bdfac5 447 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
448 while (!list_empty(&dpm_noirq_list)) {
449 struct device *dev = to_device(dpm_noirq_list.next);
5b219a51 450 int error;
d08a5ace
RW
451
452 get_device(dev);
cf579dfb 453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 454 mutex_unlock(&dpm_list_mtx);
d08a5ace 455
5b219a51 456 error = device_resume_noirq(dev, state);
2a77c46d
SL
457 if (error) {
458 suspend_stats.failed_resume_noirq++;
459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
460 dpm_save_failed_dev(dev_name(dev));
cf579dfb
RW
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
8651f97b 470 cpuidle_resume();
cf579dfb
RW
471}
472
473/**
474 * device_resume_early - Execute an "early resume" callback for given device.
475 * @dev: Device to handle.
476 * @state: PM transition of the system being carried out.
477 *
478 * Runtime PM is disabled for @dev while this function is being executed.
479 */
480static int device_resume_early(struct device *dev, pm_message_t state)
481{
482 pm_callback_t callback = NULL;
483 char *info = NULL;
484 int error = 0;
485
486 TRACE_DEVICE(dev);
487 TRACE_RESUME(0);
488
489 if (dev->pm_domain) {
490 info = "early power domain ";
491 callback = pm_late_early_op(&dev->pm_domain->ops, state);
492 } else if (dev->type && dev->type->pm) {
493 info = "early type ";
494 callback = pm_late_early_op(dev->type->pm, state);
495 } else if (dev->class && dev->class->pm) {
496 info = "early class ";
497 callback = pm_late_early_op(dev->class->pm, state);
498 } else if (dev->bus && dev->bus->pm) {
499 info = "early bus ";
500 callback = pm_late_early_op(dev->bus->pm, state);
501 }
502
503 if (!callback && dev->driver && dev->driver->pm) {
504 info = "early driver ";
505 callback = pm_late_early_op(dev->driver->pm, state);
506 }
507
508 error = dpm_run_callback(callback, dev, state, info);
509
510 TRACE_RESUME(error);
511 return error;
512}
513
514/**
515 * dpm_resume_early - Execute "early resume" callbacks for all devices.
516 * @state: PM transition of the system being carried out.
517 */
518static void dpm_resume_early(pm_message_t state)
519{
520 ktime_t starttime = ktime_get();
521
522 mutex_lock(&dpm_list_mtx);
523 while (!list_empty(&dpm_late_early_list)) {
524 struct device *dev = to_device(dpm_late_early_list.next);
525 int error;
526
527 get_device(dev);
528 list_move_tail(&dev->power.entry, &dpm_suspended_list);
529 mutex_unlock(&dpm_list_mtx);
530
531 error = device_resume_early(dev, state);
532 if (error) {
533 suspend_stats.failed_resume_early++;
534 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
535 dpm_save_failed_dev(dev_name(dev));
5b219a51 536 pm_dev_err(dev, state, " early", error);
2a77c46d 537 }
d08a5ace 538
5b219a51 539 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
540 put_device(dev);
541 }
32bdfac5 542 mutex_unlock(&dpm_list_mtx);
ecf762b2 543 dpm_show_time(starttime, state, "early");
775b64d2 544}
cf579dfb
RW
545
546/**
547 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
548 * @state: PM transition of the system being carried out.
549 */
550void dpm_resume_start(pm_message_t state)
551{
552 dpm_resume_noirq(state);
553 dpm_resume_early(state);
554}
555EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
556
557/**
97df8c12 558 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
559 * @dev: Device to handle.
560 * @state: PM transition of the system being carried out.
5af84b82 561 * @async: If true, the device is being resumed asynchronously.
775b64d2 562 */
97df8c12 563static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 564{
9cf519d1
RW
565 pm_callback_t callback = NULL;
566 char *info = NULL;
775b64d2 567 int error = 0;
1e2ef05b 568 bool put = false;
775b64d2
RW
569
570 TRACE_DEVICE(dev);
571 TRACE_RESUME(0);
cd59abfc 572
5af84b82 573 dpm_wait(dev->parent, async);
8e9394ce 574 device_lock(dev);
7a8d37a3 575
f76b168b
AS
576 /*
577 * This is a fib. But we'll allow new children to be added below
578 * a resumed device, even if the device hasn't been completed yet.
579 */
580 dev->power.is_prepared = false;
97df8c12 581
6d0e0e84
AS
582 if (!dev->power.is_suspended)
583 goto Unlock;
584
1e2ef05b
RW
585 pm_runtime_enable(dev);
586 put = true;
587
564b905a 588 if (dev->pm_domain) {
9cf519d1
RW
589 info = "power domain ";
590 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 591 goto Driver;
7538e3db
RW
592 }
593
9659cc06 594 if (dev->type && dev->type->pm) {
9cf519d1
RW
595 info = "type ";
596 callback = pm_op(dev->type->pm, state);
35cd133c 597 goto Driver;
cd59abfc
AS
598 }
599
1eede070
RW
600 if (dev->class) {
601 if (dev->class->pm) {
9cf519d1
RW
602 info = "class ";
603 callback = pm_op(dev->class->pm, state);
35cd133c 604 goto Driver;
1eede070 605 } else if (dev->class->resume) {
9cf519d1
RW
606 info = "legacy class ";
607 callback = dev->class->resume;
9659cc06 608 goto End;
1eede070 609 }
cd59abfc 610 }
9659cc06
RW
611
612 if (dev->bus) {
613 if (dev->bus->pm) {
35cd133c 614 info = "bus ";
9cf519d1 615 callback = pm_op(dev->bus->pm, state);
9659cc06 616 } else if (dev->bus->resume) {
35cd133c 617 info = "legacy bus ";
9cf519d1 618 callback = dev->bus->resume;
35cd133c 619 goto End;
9659cc06
RW
620 }
621 }
622
35cd133c
RW
623 Driver:
624 if (!callback && dev->driver && dev->driver->pm) {
625 info = "driver ";
626 callback = pm_op(dev->driver->pm, state);
627 }
628
1eede070 629 End:
9cf519d1 630 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
631 dev->power.is_suspended = false;
632
633 Unlock:
8e9394ce 634 device_unlock(dev);
5af84b82 635 complete_all(&dev->power.completion);
7a8d37a3 636
cd59abfc 637 TRACE_RESUME(error);
1e2ef05b
RW
638
639 if (put)
640 pm_runtime_put_sync(dev);
641
cd59abfc
AS
642 return error;
643}
644
5af84b82
RW
645static void async_resume(void *data, async_cookie_t cookie)
646{
647 struct device *dev = (struct device *)data;
648 int error;
649
97df8c12 650 error = device_resume(dev, pm_transition, true);
5af84b82
RW
651 if (error)
652 pm_dev_err(dev, pm_transition, " async", error);
653 put_device(dev);
654}
655
97df8c12 656static bool is_async(struct device *dev)
5af84b82 657{
97df8c12
RW
658 return dev->power.async_suspend && pm_async_enabled
659 && !pm_trace_is_enabled();
5af84b82
RW
660}
661
775b64d2 662/**
20d652d7
RW
663 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
664 * @state: PM transition of the system being carried out.
775b64d2 665 *
20d652d7
RW
666 * Execute the appropriate "resume" callback for all devices whose status
667 * indicates that they are suspended.
1eede070 668 */
91e7c75b 669void dpm_resume(pm_message_t state)
1eede070 670{
97df8c12 671 struct device *dev;
ecf762b2 672 ktime_t starttime = ktime_get();
1eede070 673
91e7c75b
RW
674 might_sleep();
675
1eede070 676 mutex_lock(&dpm_list_mtx);
5af84b82 677 pm_transition = state;
098dff73 678 async_error = 0;
1eede070 679
8a43a9ab 680 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
97df8c12
RW
681 INIT_COMPLETION(dev->power.completion);
682 if (is_async(dev)) {
683 get_device(dev);
684 async_schedule(async_resume, dev);
685 }
686 }
687
8a43a9ab
RW
688 while (!list_empty(&dpm_suspended_list)) {
689 dev = to_device(dpm_suspended_list.next);
1eede070 690 get_device(dev);
5b219a51 691 if (!is_async(dev)) {
1eede070
RW
692 int error;
693
1eede070
RW
694 mutex_unlock(&dpm_list_mtx);
695
97df8c12 696 error = device_resume(dev, state, false);
2a77c46d
SL
697 if (error) {
698 suspend_stats.failed_resume++;
699 dpm_save_failed_step(SUSPEND_RESUME);
700 dpm_save_failed_dev(dev_name(dev));
1eede070 701 pm_dev_err(dev, state, "", error);
2a77c46d 702 }
5b219a51
RW
703
704 mutex_lock(&dpm_list_mtx);
1eede070
RW
705 }
706 if (!list_empty(&dev->power.entry))
8a43a9ab 707 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
708 put_device(dev);
709 }
1eede070 710 mutex_unlock(&dpm_list_mtx);
5af84b82 711 async_synchronize_full();
ecf762b2 712 dpm_show_time(starttime, state, NULL);
1eede070
RW
713}
714
715/**
20d652d7
RW
716 * device_complete - Complete a PM transition for given device.
717 * @dev: Device to handle.
718 * @state: PM transition of the system being carried out.
1eede070 719 */
d1616302 720static void device_complete(struct device *dev, pm_message_t state)
1eede070 721{
35cd133c
RW
722 void (*callback)(struct device *) = NULL;
723 char *info = NULL;
724
8e9394ce 725 device_lock(dev);
1eede070 726
564b905a 727 if (dev->pm_domain) {
35cd133c
RW
728 info = "completing power domain ";
729 callback = dev->pm_domain->ops.complete;
4d27e9dc 730 } else if (dev->type && dev->type->pm) {
35cd133c
RW
731 info = "completing type ";
732 callback = dev->type->pm->complete;
9659cc06 733 } else if (dev->class && dev->class->pm) {
35cd133c
RW
734 info = "completing class ";
735 callback = dev->class->pm->complete;
9659cc06 736 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
737 info = "completing bus ";
738 callback = dev->bus->pm->complete;
739 }
740
741 if (!callback && dev->driver && dev->driver->pm) {
742 info = "completing driver ";
743 callback = dev->driver->pm->complete;
744 }
745
746 if (callback) {
747 pm_dev_dbg(dev, state, info);
748 callback(dev);
1eede070
RW
749 }
750
8e9394ce 751 device_unlock(dev);
1eede070
RW
752}
753
754/**
20d652d7
RW
755 * dpm_complete - Complete a PM transition for all non-sysdev devices.
756 * @state: PM transition of the system being carried out.
775b64d2 757 *
20d652d7
RW
758 * Execute the ->complete() callbacks for all devices whose PM status is not
759 * DPM_ON (this allows new devices to be registered).
cd59abfc 760 */
91e7c75b 761void dpm_complete(pm_message_t state)
cd59abfc 762{
1eede070
RW
763 struct list_head list;
764
91e7c75b
RW
765 might_sleep();
766
1eede070 767 INIT_LIST_HEAD(&list);
cd59abfc 768 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
769 while (!list_empty(&dpm_prepared_list)) {
770 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 771
1eede070 772 get_device(dev);
f76b168b 773 dev->power.is_prepared = false;
5b219a51
RW
774 list_move(&dev->power.entry, &list);
775 mutex_unlock(&dpm_list_mtx);
1eede070 776
5b219a51 777 device_complete(dev, state);
1eede070 778
5b219a51 779 mutex_lock(&dpm_list_mtx);
1eede070 780 put_device(dev);
cd59abfc 781 }
1eede070 782 list_splice(&list, &dpm_list);
cd59abfc
AS
783 mutex_unlock(&dpm_list_mtx);
784}
785
cd59abfc 786/**
20d652d7
RW
787 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
788 * @state: PM transition of the system being carried out.
cd59abfc 789 *
20d652d7
RW
790 * Execute "resume" callbacks for all devices and complete the PM transition of
791 * the system.
cd59abfc 792 */
d1616302 793void dpm_resume_end(pm_message_t state)
cd59abfc 794{
1eede070
RW
795 dpm_resume(state);
796 dpm_complete(state);
cd59abfc 797}
d1616302 798EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
799
800
801/*------------------------- Suspend routines -------------------------*/
802
1eede070 803/**
20d652d7
RW
804 * resume_event - Return a "resume" message for given "suspend" sleep state.
805 * @sleep_state: PM message representing a sleep state.
806 *
807 * Return a PM message representing the resume event corresponding to given
808 * sleep state.
1eede070
RW
809 */
810static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 811{
1eede070
RW
812 switch (sleep_state.event) {
813 case PM_EVENT_SUSPEND:
814 return PMSG_RESUME;
815 case PM_EVENT_FREEZE:
816 case PM_EVENT_QUIESCE:
817 return PMSG_RECOVER;
818 case PM_EVENT_HIBERNATE:
819 return PMSG_RESTORE;
cd59abfc 820 }
1eede070 821 return PMSG_ON;
cd59abfc
AS
822}
823
824/**
20d652d7
RW
825 * device_suspend_noirq - Execute a "late suspend" callback for given device.
826 * @dev: Device to handle.
827 * @state: PM transition of the system being carried out.
775b64d2 828 *
20d652d7
RW
829 * The driver of @dev will not receive interrupts while this function is being
830 * executed.
cd59abfc 831 */
d1616302 832static int device_suspend_noirq(struct device *dev, pm_message_t state)
775b64d2 833{
9cf519d1
RW
834 pm_callback_t callback = NULL;
835 char *info = NULL;
e7176a37 836
564b905a 837 if (dev->pm_domain) {
cf579dfb 838 info = "noirq power domain ";
9cf519d1 839 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 840 } else if (dev->type && dev->type->pm) {
cf579dfb 841 info = "noirq type ";
9cf519d1 842 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 843 } else if (dev->class && dev->class->pm) {
cf579dfb 844 info = "noirq class ";
9cf519d1 845 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 846 } else if (dev->bus && dev->bus->pm) {
cf579dfb 847 info = "noirq bus ";
9cf519d1 848 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
849 }
850
35cd133c 851 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 852 info = "noirq driver ";
35cd133c
RW
853 callback = pm_noirq_op(dev->driver->pm, state);
854 }
855
9cf519d1 856 return dpm_run_callback(callback, dev, state, info);
775b64d2
RW
857}
858
859/**
cf579dfb 860 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 861 * @state: PM transition of the system being carried out.
775b64d2 862 *
20d652d7
RW
863 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
864 * handlers for all non-sysdev devices.
775b64d2 865 */
cf579dfb 866static int dpm_suspend_noirq(pm_message_t state)
775b64d2 867{
ecf762b2 868 ktime_t starttime = ktime_get();
775b64d2
RW
869 int error = 0;
870
8651f97b 871 cpuidle_pause();
2ed8d2b3 872 suspend_device_irqs();
32bdfac5 873 mutex_lock(&dpm_list_mtx);
cf579dfb
RW
874 while (!list_empty(&dpm_late_early_list)) {
875 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
876
877 get_device(dev);
878 mutex_unlock(&dpm_list_mtx);
879
d1616302 880 error = device_suspend_noirq(dev, state);
d08a5ace
RW
881
882 mutex_lock(&dpm_list_mtx);
775b64d2 883 if (error) {
cf579dfb 884 pm_dev_err(dev, state, " noirq", error);
2a77c46d
SL
885 suspend_stats.failed_suspend_noirq++;
886 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
887 dpm_save_failed_dev(dev_name(dev));
d08a5ace 888 put_device(dev);
775b64d2
RW
889 break;
890 }
d08a5ace 891 if (!list_empty(&dev->power.entry))
8a43a9ab 892 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 893 put_device(dev);
52d136cc
RW
894
895 if (pm_wakeup_pending()) {
896 error = -EBUSY;
897 break;
898 }
775b64d2 899 }
32bdfac5 900 mutex_unlock(&dpm_list_mtx);
775b64d2 901 if (error)
d1616302 902 dpm_resume_noirq(resume_event(state));
cf579dfb
RW
903 else
904 dpm_show_time(starttime, state, "noirq");
905 return error;
906}
907
908/**
909 * device_suspend_late - Execute a "late suspend" callback for given device.
910 * @dev: Device to handle.
911 * @state: PM transition of the system being carried out.
912 *
913 * Runtime PM is disabled for @dev while this function is being executed.
914 */
915static int device_suspend_late(struct device *dev, pm_message_t state)
916{
917 pm_callback_t callback = NULL;
918 char *info = NULL;
919
920 if (dev->pm_domain) {
921 info = "late power domain ";
922 callback = pm_late_early_op(&dev->pm_domain->ops, state);
923 } else if (dev->type && dev->type->pm) {
924 info = "late type ";
925 callback = pm_late_early_op(dev->type->pm, state);
926 } else if (dev->class && dev->class->pm) {
927 info = "late class ";
928 callback = pm_late_early_op(dev->class->pm, state);
929 } else if (dev->bus && dev->bus->pm) {
930 info = "late bus ";
931 callback = pm_late_early_op(dev->bus->pm, state);
932 }
933
934 if (!callback && dev->driver && dev->driver->pm) {
935 info = "late driver ";
936 callback = pm_late_early_op(dev->driver->pm, state);
937 }
938
939 return dpm_run_callback(callback, dev, state, info);
940}
941
942/**
943 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
944 * @state: PM transition of the system being carried out.
945 */
946static int dpm_suspend_late(pm_message_t state)
947{
948 ktime_t starttime = ktime_get();
949 int error = 0;
950
951 mutex_lock(&dpm_list_mtx);
952 while (!list_empty(&dpm_suspended_list)) {
953 struct device *dev = to_device(dpm_suspended_list.prev);
954
955 get_device(dev);
956 mutex_unlock(&dpm_list_mtx);
957
958 error = device_suspend_late(dev, state);
959
960 mutex_lock(&dpm_list_mtx);
961 if (error) {
962 pm_dev_err(dev, state, " late", error);
963 suspend_stats.failed_suspend_late++;
964 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
965 dpm_save_failed_dev(dev_name(dev));
966 put_device(dev);
967 break;
968 }
969 if (!list_empty(&dev->power.entry))
970 list_move(&dev->power.entry, &dpm_late_early_list);
971 put_device(dev);
52d136cc
RW
972
973 if (pm_wakeup_pending()) {
974 error = -EBUSY;
975 break;
976 }
cf579dfb
RW
977 }
978 mutex_unlock(&dpm_list_mtx);
979 if (error)
980 dpm_resume_early(resume_event(state));
ecf762b2
RW
981 else
982 dpm_show_time(starttime, state, "late");
cf579dfb 983
775b64d2
RW
984 return error;
985}
cf579dfb
RW
986
987/**
988 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
989 * @state: PM transition of the system being carried out.
990 */
991int dpm_suspend_end(pm_message_t state)
992{
993 int error = dpm_suspend_late(state);
994
995 return error ? : dpm_suspend_noirq(state);
996}
997EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 998
875ab0b7
RW
999/**
1000 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1001 * @dev: Device to suspend.
1002 * @state: PM transition of the system being carried out.
1003 * @cb: Suspend callback to execute.
875ab0b7
RW
1004 */
1005static int legacy_suspend(struct device *dev, pm_message_t state,
1006 int (*cb)(struct device *dev, pm_message_t state))
1007{
1008 int error;
1009 ktime_t calltime;
1010
1011 calltime = initcall_debug_start(dev);
1012
1013 error = cb(dev, state);
1014 suspend_report_result(cb, error);
1015
1016 initcall_debug_report(dev, calltime, error);
1017
1018 return error;
1019}
1020
775b64d2 1021/**
20d652d7
RW
1022 * device_suspend - Execute "suspend" callbacks for given device.
1023 * @dev: Device to handle.
1024 * @state: PM transition of the system being carried out.
5af84b82 1025 * @async: If true, the device is being suspended asynchronously.
775b64d2 1026 */
5af84b82 1027static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1028{
9cf519d1
RW
1029 pm_callback_t callback = NULL;
1030 char *info = NULL;
cd59abfc
AS
1031 int error = 0;
1032
5af84b82 1033 dpm_wait_for_children(dev, async);
7a8d37a3 1034
5af84b82 1035 if (async_error)
1f758b23 1036 goto Complete;
1e2ef05b
RW
1037
1038 pm_runtime_get_noresume(dev);
1039 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1040 pm_wakeup_event(dev, 0);
5af84b82 1041
d83f905e 1042 if (pm_wakeup_pending()) {
1e2ef05b 1043 pm_runtime_put_sync(dev);
d83f905e 1044 async_error = -EBUSY;
1f758b23 1045 goto Complete;
d83f905e
RW
1046 }
1047
1e2ef05b
RW
1048 device_lock(dev);
1049
564b905a 1050 if (dev->pm_domain) {
9cf519d1
RW
1051 info = "power domain ";
1052 callback = pm_op(&dev->pm_domain->ops, state);
1053 goto Run;
4d27e9dc
RW
1054 }
1055
9659cc06 1056 if (dev->type && dev->type->pm) {
9cf519d1
RW
1057 info = "type ";
1058 callback = pm_op(dev->type->pm, state);
1059 goto Run;
9659cc06
RW
1060 }
1061
1eede070
RW
1062 if (dev->class) {
1063 if (dev->class->pm) {
9cf519d1
RW
1064 info = "class ";
1065 callback = pm_op(dev->class->pm, state);
1066 goto Run;
1eede070
RW
1067 } else if (dev->class->suspend) {
1068 pm_dev_dbg(dev, state, "legacy class ");
875ab0b7 1069 error = legacy_suspend(dev, state, dev->class->suspend);
4d27e9dc 1070 goto End;
1eede070 1071 }
cd59abfc
AS
1072 }
1073
1eede070
RW
1074 if (dev->bus) {
1075 if (dev->bus->pm) {
35cd133c 1076 info = "bus ";
9cf519d1 1077 callback = pm_op(dev->bus->pm, state);
1eede070 1078 } else if (dev->bus->suspend) {
35cd133c 1079 pm_dev_dbg(dev, state, "legacy bus ");
875ab0b7 1080 error = legacy_suspend(dev, state, dev->bus->suspend);
9cf519d1 1081 goto End;
1eede070 1082 }
7538e3db
RW
1083 }
1084
9cf519d1 1085 Run:
35cd133c
RW
1086 if (!callback && dev->driver && dev->driver->pm) {
1087 info = "driver ";
1088 callback = pm_op(dev->driver->pm, state);
1089 }
1090
9cf519d1
RW
1091 error = dpm_run_callback(callback, dev, state, info);
1092
1eede070 1093 End:
4ca46ff3
RW
1094 if (!error) {
1095 dev->power.is_suspended = true;
8b258cc8
RW
1096 if (dev->power.wakeup_path
1097 && dev->parent && !dev->parent->power.ignore_children)
4ca46ff3
RW
1098 dev->parent->power.wakeup_path = true;
1099 }
6d0e0e84 1100
8e9394ce 1101 device_unlock(dev);
1f758b23
MSB
1102
1103 Complete:
5af84b82 1104 complete_all(&dev->power.completion);
7a8d37a3 1105
1e2ef05b
RW
1106 if (error) {
1107 pm_runtime_put_sync(dev);
098dff73 1108 async_error = error;
1e2ef05b
RW
1109 } else if (dev->power.is_suspended) {
1110 __pm_runtime_disable(dev, false);
1111 }
098dff73 1112
cd59abfc
AS
1113 return error;
1114}
1115
5af84b82
RW
1116static void async_suspend(void *data, async_cookie_t cookie)
1117{
1118 struct device *dev = (struct device *)data;
1119 int error;
1120
1121 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1122 if (error) {
1123 dpm_save_failed_dev(dev_name(dev));
5af84b82 1124 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1125 }
5af84b82
RW
1126
1127 put_device(dev);
1128}
1129
1130static int device_suspend(struct device *dev)
1131{
1132 INIT_COMPLETION(dev->power.completion);
1133
0e06b4a8 1134 if (pm_async_enabled && dev->power.async_suspend) {
5af84b82
RW
1135 get_device(dev);
1136 async_schedule(async_suspend, dev);
1137 return 0;
1138 }
1139
1140 return __device_suspend(dev, pm_transition, false);
1141}
1142
cd59abfc 1143/**
20d652d7
RW
1144 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1145 * @state: PM transition of the system being carried out.
cd59abfc 1146 */
91e7c75b 1147int dpm_suspend(pm_message_t state)
cd59abfc 1148{
ecf762b2 1149 ktime_t starttime = ktime_get();
cd59abfc
AS
1150 int error = 0;
1151
91e7c75b
RW
1152 might_sleep();
1153
cd59abfc 1154 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1155 pm_transition = state;
1156 async_error = 0;
8a43a9ab
RW
1157 while (!list_empty(&dpm_prepared_list)) {
1158 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1159
1eede070 1160 get_device(dev);
cd59abfc 1161 mutex_unlock(&dpm_list_mtx);
1eede070 1162
5af84b82 1163 error = device_suspend(dev);
1eede070 1164
1b3cbec1 1165 mutex_lock(&dpm_list_mtx);
775b64d2 1166 if (error) {
1eede070 1167 pm_dev_err(dev, state, "", error);
2a77c46d 1168 dpm_save_failed_dev(dev_name(dev));
1eede070 1169 put_device(dev);
775b64d2
RW
1170 break;
1171 }
7a8d37a3 1172 if (!list_empty(&dev->power.entry))
8a43a9ab 1173 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1174 put_device(dev);
5af84b82
RW
1175 if (async_error)
1176 break;
cd59abfc
AS
1177 }
1178 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1179 async_synchronize_full();
1180 if (!error)
1181 error = async_error;
2a77c46d
SL
1182 if (error) {
1183 suspend_stats.failed_suspend++;
1184 dpm_save_failed_step(SUSPEND_SUSPEND);
1185 } else
ecf762b2 1186 dpm_show_time(starttime, state, NULL);
1eede070
RW
1187 return error;
1188}
1189
1190/**
20d652d7
RW
1191 * device_prepare - Prepare a device for system power transition.
1192 * @dev: Device to handle.
1193 * @state: PM transition of the system being carried out.
1194 *
1195 * Execute the ->prepare() callback(s) for given device. No new children of the
1196 * device may be registered after this function has returned.
1eede070 1197 */
d1616302 1198static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1199{
35cd133c
RW
1200 int (*callback)(struct device *) = NULL;
1201 char *info = NULL;
1eede070
RW
1202 int error = 0;
1203
8e9394ce 1204 device_lock(dev);
1eede070 1205
4ca46ff3
RW
1206 dev->power.wakeup_path = device_may_wakeup(dev);
1207
564b905a 1208 if (dev->pm_domain) {
35cd133c
RW
1209 info = "preparing power domain ";
1210 callback = dev->pm_domain->ops.prepare;
4d27e9dc 1211 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1212 info = "preparing type ";
1213 callback = dev->type->pm->prepare;
9659cc06 1214 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1215 info = "preparing class ";
1216 callback = dev->class->pm->prepare;
9659cc06 1217 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1218 info = "preparing bus ";
1219 callback = dev->bus->pm->prepare;
1220 }
1221
1222 if (!callback && dev->driver && dev->driver->pm) {
1223 info = "preparing driver ";
1224 callback = dev->driver->pm->prepare;
1225 }
1226
1227 if (callback) {
1228 error = callback(dev);
1229 suspend_report_result(callback, error);
1eede070 1230 }
7538e3db 1231
8e9394ce 1232 device_unlock(dev);
1eede070
RW
1233
1234 return error;
1235}
cd59abfc 1236
1eede070 1237/**
20d652d7
RW
1238 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1239 * @state: PM transition of the system being carried out.
1eede070 1240 *
20d652d7 1241 * Execute the ->prepare() callback(s) for all devices.
1eede070 1242 */
91e7c75b 1243int dpm_prepare(pm_message_t state)
1eede070 1244{
1eede070
RW
1245 int error = 0;
1246
91e7c75b
RW
1247 might_sleep();
1248
1eede070 1249 mutex_lock(&dpm_list_mtx);
1eede070
RW
1250 while (!list_empty(&dpm_list)) {
1251 struct device *dev = to_device(dpm_list.next);
1252
1253 get_device(dev);
1eede070
RW
1254 mutex_unlock(&dpm_list_mtx);
1255
1e2ef05b 1256 error = device_prepare(dev, state);
1eede070
RW
1257
1258 mutex_lock(&dpm_list_mtx);
1259 if (error) {
1eede070
RW
1260 if (error == -EAGAIN) {
1261 put_device(dev);
886a7a33 1262 error = 0;
1eede070
RW
1263 continue;
1264 }
1e75227e
RW
1265 printk(KERN_INFO "PM: Device %s not prepared "
1266 "for power transition: code %d\n",
5c1a07ab 1267 dev_name(dev), error);
1eede070
RW
1268 put_device(dev);
1269 break;
1270 }
f76b168b 1271 dev->power.is_prepared = true;
1eede070 1272 if (!list_empty(&dev->power.entry))
8a43a9ab 1273 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1274 put_device(dev);
1275 }
1eede070 1276 mutex_unlock(&dpm_list_mtx);
cd59abfc
AS
1277 return error;
1278}
1279
775b64d2 1280/**
20d652d7
RW
1281 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1282 * @state: PM transition of the system being carried out.
775b64d2 1283 *
20d652d7
RW
1284 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1285 * callbacks for them.
775b64d2 1286 */
d1616302 1287int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1288{
1289 int error;
cd59abfc 1290
1eede070 1291 error = dpm_prepare(state);
2a77c46d
SL
1292 if (error) {
1293 suspend_stats.failed_prepare++;
1294 dpm_save_failed_step(SUSPEND_PREPARE);
1295 } else
1eede070 1296 error = dpm_suspend(state);
cd59abfc 1297 return error;
cd59abfc 1298}
d1616302 1299EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1300
1301void __suspend_report_result(const char *function, void *fn, int ret)
1302{
c80cfb04
BH
1303 if (ret)
1304 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1305}
1306EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1307
1308/**
1309 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1310 * @dev: Device to wait for.
1311 * @subordinate: Device that needs to wait for @dev.
1312 */
098dff73 1313int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1314{
1315 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1316 return async_error;
f8824cee
RW
1317}
1318EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);