]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/main.c
Merge branch 'acpi-pm' into pm-sleep
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
cd59abfc 26#include <linux/resume-trace.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
5af84b82 29#include <linux/async.h>
1e75227e 30#include <linux/suspend.h>
53644677 31#include <trace/events/power.h>
2f0aea93 32#include <linux/cpufreq.h>
8651f97b 33#include <linux/cpuidle.h>
70fea60d
BG
34#include <linux/timer.h>
35
cd59abfc 36#include "../base.h"
1da177e4
LT
37#include "power.h"
38
9cf519d1
RW
39typedef int (*pm_callback_t)(struct device *);
40
775b64d2 41/*
1eede070 42 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
43 * because children are guaranteed to be discovered after parents, and
44 * are inserted at the back of the list on discovery.
45 *
8e9394ce
GKH
46 * Since device_pm_add() may be called with a device lock held,
47 * we must never try to acquire a device lock while holding
775b64d2
RW
48 * dpm_list_mutex.
49 */
50
1eede070 51LIST_HEAD(dpm_list);
7664e969
SK
52static LIST_HEAD(dpm_prepared_list);
53static LIST_HEAD(dpm_suspended_list);
54static LIST_HEAD(dpm_late_early_list);
55static LIST_HEAD(dpm_noirq_list);
1da177e4 56
2a77c46d 57struct suspend_stats suspend_stats;
cd59abfc 58static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 59static pm_message_t pm_transition;
1da177e4 60
098dff73
RW
61static int async_error;
62
53644677
SK
63static char *pm_verb(int event)
64{
65 switch (event) {
66 case PM_EVENT_SUSPEND:
67 return "suspend";
68 case PM_EVENT_RESUME:
69 return "resume";
70 case PM_EVENT_FREEZE:
71 return "freeze";
72 case PM_EVENT_QUIESCE:
73 return "quiesce";
74 case PM_EVENT_HIBERNATE:
75 return "hibernate";
76 case PM_EVENT_THAW:
77 return "thaw";
78 case PM_EVENT_RESTORE:
79 return "restore";
80 case PM_EVENT_RECOVER:
81 return "recover";
82 default:
83 return "(unknown PM event)";
84 }
85}
86
5e928f77 87/**
e91c11b1 88 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
89 * @dev: Device object being initialized.
90 */
e91c11b1 91void device_pm_sleep_init(struct device *dev)
5e928f77 92{
f76b168b 93 dev->power.is_prepared = false;
6d0e0e84 94 dev->power.is_suspended = false;
3d2699bc
LC
95 dev->power.is_noirq_suspended = false;
96 dev->power.is_late_suspended = false;
5af84b82 97 init_completion(&dev->power.completion);
152e1d59 98 complete_all(&dev->power.completion);
074037ec 99 dev->power.wakeup = NULL;
22110faf 100 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
101}
102
1eede070 103/**
20d652d7 104 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
105 */
106void device_pm_lock(void)
107{
108 mutex_lock(&dpm_list_mtx);
109}
110
111/**
20d652d7 112 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
113 */
114void device_pm_unlock(void)
115{
116 mutex_unlock(&dpm_list_mtx);
117}
075c1771 118
775b64d2 119/**
20d652d7
RW
120 * device_pm_add - Add a device to the PM core's list of active devices.
121 * @dev: Device to add to the list.
775b64d2 122 */
3b98aeaf 123void device_pm_add(struct device *dev)
1da177e4 124{
1da177e4 125 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 126 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
11048dcf 127 mutex_lock(&dpm_list_mtx);
f76b168b 128 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
129 dev_warn(dev, "parent %s should not be sleeping\n",
130 dev_name(dev->parent));
3b98aeaf 131 list_add_tail(&dev->power.entry, &dpm_list);
1a9a9152 132 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
133}
134
775b64d2 135/**
20d652d7
RW
136 * device_pm_remove - Remove a device from the PM core's list of active devices.
137 * @dev: Device to be removed from the list.
775b64d2 138 */
9cddad77 139void device_pm_remove(struct device *dev)
1da177e4
LT
140{
141 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 142 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 143 complete_all(&dev->power.completion);
11048dcf 144 mutex_lock(&dpm_list_mtx);
1da177e4 145 list_del_init(&dev->power.entry);
11048dcf 146 mutex_unlock(&dpm_list_mtx);
074037ec 147 device_wakeup_disable(dev);
5e928f77 148 pm_runtime_remove(dev);
775b64d2
RW
149}
150
ffa6a705 151/**
20d652d7
RW
152 * device_pm_move_before - Move device in the PM core's list of active devices.
153 * @deva: Device to move in dpm_list.
154 * @devb: Device @deva should come before.
ffa6a705
CH
155 */
156void device_pm_move_before(struct device *deva, struct device *devb)
157{
158 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
159 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
160 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
161 /* Delete deva from dpm_list and reinsert before devb. */
162 list_move_tail(&deva->power.entry, &devb->power.entry);
163}
164
165/**
20d652d7
RW
166 * device_pm_move_after - Move device in the PM core's list of active devices.
167 * @deva: Device to move in dpm_list.
168 * @devb: Device @deva should come after.
ffa6a705
CH
169 */
170void device_pm_move_after(struct device *deva, struct device *devb)
171{
172 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
173 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
174 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
175 /* Delete deva from dpm_list and reinsert after devb. */
176 list_move(&deva->power.entry, &devb->power.entry);
177}
178
179/**
20d652d7
RW
180 * device_pm_move_last - Move device to end of the PM core's list of devices.
181 * @dev: Device to move in dpm_list.
ffa6a705
CH
182 */
183void device_pm_move_last(struct device *dev)
184{
185 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 186 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
187 list_move_tail(&dev->power.entry, &dpm_list);
188}
189
875ab0b7
RW
190static ktime_t initcall_debug_start(struct device *dev)
191{
192 ktime_t calltime = ktime_set(0, 0);
193
b2df1d4f 194 if (pm_print_times_enabled) {
0c6aebe3
RW
195 pr_info("calling %s+ @ %i, parent: %s\n",
196 dev_name(dev), task_pid_nr(current),
197 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
198 calltime = ktime_get();
199 }
200
201 return calltime;
202}
203
204static void initcall_debug_report(struct device *dev, ktime_t calltime,
53644677 205 int error, pm_message_t state, char *info)
875ab0b7 206{
53644677
SK
207 ktime_t rettime;
208 s64 nsecs;
209
210 rettime = ktime_get();
211 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
875ab0b7 212
b2df1d4f 213 if (pm_print_times_enabled) {
875ab0b7 214 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
53644677 215 error, (unsigned long long)nsecs >> 10);
875ab0b7 216 }
53644677
SK
217
218 trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
219 error);
875ab0b7
RW
220}
221
5af84b82
RW
222/**
223 * dpm_wait - Wait for a PM operation to complete.
224 * @dev: Device to wait for.
225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
226 */
227static void dpm_wait(struct device *dev, bool async)
228{
229 if (!dev)
230 return;
231
0e06b4a8 232 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
233 wait_for_completion(&dev->power.completion);
234}
235
236static int dpm_wait_fn(struct device *dev, void *async_ptr)
237{
238 dpm_wait(dev, *((bool *)async_ptr));
239 return 0;
240}
241
242static void dpm_wait_for_children(struct device *dev, bool async)
243{
244 device_for_each_child(dev, &async, dpm_wait_fn);
245}
246
1eede070 247/**
9cf519d1 248 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
249 * @ops: PM operations to choose from.
250 * @state: PM transition of the system being carried out.
1eede070 251 */
9cf519d1 252static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 253{
1eede070
RW
254 switch (state.event) {
255#ifdef CONFIG_SUSPEND
256 case PM_EVENT_SUSPEND:
9cf519d1 257 return ops->suspend;
1eede070 258 case PM_EVENT_RESUME:
9cf519d1 259 return ops->resume;
1eede070 260#endif /* CONFIG_SUSPEND */
1f112cee 261#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
262 case PM_EVENT_FREEZE:
263 case PM_EVENT_QUIESCE:
9cf519d1 264 return ops->freeze;
1eede070 265 case PM_EVENT_HIBERNATE:
9cf519d1 266 return ops->poweroff;
1eede070
RW
267 case PM_EVENT_THAW:
268 case PM_EVENT_RECOVER:
9cf519d1 269 return ops->thaw;
1eede070
RW
270 break;
271 case PM_EVENT_RESTORE:
9cf519d1 272 return ops->restore;
1f112cee 273#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 274 }
f2511774 275
9cf519d1 276 return NULL;
1eede070
RW
277}
278
cf579dfb
RW
279/**
280 * pm_late_early_op - Return the PM operation appropriate for given PM event.
281 * @ops: PM operations to choose from.
282 * @state: PM transition of the system being carried out.
283 *
284 * Runtime PM is disabled for @dev while this function is being executed.
285 */
286static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
287 pm_message_t state)
288{
289 switch (state.event) {
290#ifdef CONFIG_SUSPEND
291 case PM_EVENT_SUSPEND:
292 return ops->suspend_late;
293 case PM_EVENT_RESUME:
294 return ops->resume_early;
295#endif /* CONFIG_SUSPEND */
296#ifdef CONFIG_HIBERNATE_CALLBACKS
297 case PM_EVENT_FREEZE:
298 case PM_EVENT_QUIESCE:
299 return ops->freeze_late;
300 case PM_EVENT_HIBERNATE:
301 return ops->poweroff_late;
302 case PM_EVENT_THAW:
303 case PM_EVENT_RECOVER:
304 return ops->thaw_early;
305 case PM_EVENT_RESTORE:
306 return ops->restore_early;
307#endif /* CONFIG_HIBERNATE_CALLBACKS */
308 }
309
310 return NULL;
311}
312
1eede070 313/**
9cf519d1 314 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
315 * @ops: PM operations to choose from.
316 * @state: PM transition of the system being carried out.
1eede070 317 *
20d652d7
RW
318 * The driver of @dev will not receive interrupts while this function is being
319 * executed.
1eede070 320 */
9cf519d1 321static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 322{
1eede070
RW
323 switch (state.event) {
324#ifdef CONFIG_SUSPEND
325 case PM_EVENT_SUSPEND:
9cf519d1 326 return ops->suspend_noirq;
1eede070 327 case PM_EVENT_RESUME:
9cf519d1 328 return ops->resume_noirq;
1eede070 329#endif /* CONFIG_SUSPEND */
1f112cee 330#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
331 case PM_EVENT_FREEZE:
332 case PM_EVENT_QUIESCE:
9cf519d1 333 return ops->freeze_noirq;
1eede070 334 case PM_EVENT_HIBERNATE:
9cf519d1 335 return ops->poweroff_noirq;
1eede070
RW
336 case PM_EVENT_THAW:
337 case PM_EVENT_RECOVER:
9cf519d1 338 return ops->thaw_noirq;
1eede070 339 case PM_EVENT_RESTORE:
9cf519d1 340 return ops->restore_noirq;
1f112cee 341#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 342 }
f2511774 343
9cf519d1 344 return NULL;
1eede070
RW
345}
346
1eede070
RW
347static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
348{
349 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
350 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
351 ", may wakeup" : "");
352}
353
354static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
355 int error)
356{
357 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 358 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
359}
360
ecf762b2
RW
361static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
362{
363 ktime_t calltime;
0702d9ee 364 u64 usecs64;
ecf762b2
RW
365 int usecs;
366
367 calltime = ktime_get();
368 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
369 do_div(usecs64, NSEC_PER_USEC);
370 usecs = usecs64;
371 if (usecs == 0)
372 usecs = 1;
373 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
374 info ?: "", info ? " " : "", pm_verb(state.event),
375 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
376}
377
9cf519d1
RW
378static int dpm_run_callback(pm_callback_t cb, struct device *dev,
379 pm_message_t state, char *info)
380{
381 ktime_t calltime;
382 int error;
383
384 if (!cb)
385 return 0;
386
387 calltime = initcall_debug_start(dev);
388
389 pm_dev_dbg(dev, state, info);
390 error = cb(dev);
391 suspend_report_result(cb, error);
392
53644677 393 initcall_debug_report(dev, calltime, error, state, info);
9cf519d1
RW
394
395 return error;
396}
397
70fea60d
BG
398#ifdef CONFIG_DPM_WATCHDOG
399struct dpm_watchdog {
400 struct device *dev;
401 struct task_struct *tsk;
402 struct timer_list timer;
403};
404
405#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
406 struct dpm_watchdog wd
407
408/**
409 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
410 * @data: Watchdog object address.
411 *
412 * Called when a driver has timed out suspending or resuming.
413 * There's not much we can do here to recover so panic() to
414 * capture a crash-dump in pstore.
415 */
416static void dpm_watchdog_handler(unsigned long data)
417{
418 struct dpm_watchdog *wd = (void *)data;
419
420 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
421 show_stack(wd->tsk, NULL);
422 panic("%s %s: unrecoverable failure\n",
423 dev_driver_string(wd->dev), dev_name(wd->dev));
424}
425
426/**
427 * dpm_watchdog_set - Enable pm watchdog for given device.
428 * @wd: Watchdog. Must be allocated on the stack.
429 * @dev: Device to handle.
430 */
431static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
432{
433 struct timer_list *timer = &wd->timer;
434
435 wd->dev = dev;
436 wd->tsk = current;
437
438 init_timer_on_stack(timer);
439 /* use same timeout value for both suspend and resume */
440 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
441 timer->function = dpm_watchdog_handler;
442 timer->data = (unsigned long)wd;
443 add_timer(timer);
444}
445
446/**
447 * dpm_watchdog_clear - Disable suspend/resume watchdog.
448 * @wd: Watchdog to disable.
449 */
450static void dpm_watchdog_clear(struct dpm_watchdog *wd)
451{
452 struct timer_list *timer = &wd->timer;
453
454 del_timer_sync(timer);
455 destroy_timer_on_stack(timer);
456}
457#else
458#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
459#define dpm_watchdog_set(x, y)
460#define dpm_watchdog_clear(x)
461#endif
462
cd59abfc
AS
463/*------------------------- Resume routines -------------------------*/
464
465/**
20d652d7
RW
466 * device_resume_noirq - Execute an "early resume" callback for given device.
467 * @dev: Device to handle.
468 * @state: PM transition of the system being carried out.
cd59abfc 469 *
20d652d7
RW
470 * The driver of @dev will not receive interrupts while this function is being
471 * executed.
cd59abfc 472 */
76569faa 473static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
cd59abfc 474{
9cf519d1
RW
475 pm_callback_t callback = NULL;
476 char *info = NULL;
cd59abfc
AS
477 int error = 0;
478
479 TRACE_DEVICE(dev);
480 TRACE_RESUME(0);
481
aae4518b 482 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
483 goto Out;
484
3d2699bc
LC
485 if (!dev->power.is_noirq_suspended)
486 goto Out;
487
76569faa
LC
488 dpm_wait(dev->parent, async);
489
564b905a 490 if (dev->pm_domain) {
cf579dfb 491 info = "noirq power domain ";
9cf519d1 492 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 493 } else if (dev->type && dev->type->pm) {
cf579dfb 494 info = "noirq type ";
9cf519d1 495 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 496 } else if (dev->class && dev->class->pm) {
cf579dfb 497 info = "noirq class ";
9cf519d1 498 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 499 } else if (dev->bus && dev->bus->pm) {
cf579dfb 500 info = "noirq bus ";
9cf519d1 501 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
502 }
503
35cd133c 504 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 505 info = "noirq driver ";
35cd133c
RW
506 callback = pm_noirq_op(dev->driver->pm, state);
507 }
508
9cf519d1 509 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 510 dev->power.is_noirq_suspended = false;
9cf519d1 511
dbf37414 512 Out:
76569faa 513 complete_all(&dev->power.completion);
775b64d2
RW
514 TRACE_RESUME(error);
515 return error;
516}
517
76569faa
LC
518static bool is_async(struct device *dev)
519{
520 return dev->power.async_suspend && pm_async_enabled
521 && !pm_trace_is_enabled();
522}
523
524static void async_resume_noirq(void *data, async_cookie_t cookie)
525{
526 struct device *dev = (struct device *)data;
527 int error;
528
529 error = device_resume_noirq(dev, pm_transition, true);
530 if (error)
531 pm_dev_err(dev, pm_transition, " async", error);
532
533 put_device(dev);
534}
535
775b64d2 536/**
cf579dfb 537 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 538 * @state: PM transition of the system being carried out.
775b64d2 539 *
cf579dfb 540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 541 * enable device drivers to receive interrupts.
775b64d2 542 */
cf579dfb 543static void dpm_resume_noirq(pm_message_t state)
775b64d2 544{
76569faa 545 struct device *dev;
ecf762b2 546 ktime_t starttime = ktime_get();
775b64d2 547
32bdfac5 548 mutex_lock(&dpm_list_mtx);
76569faa 549 pm_transition = state;
d08a5ace 550
76569faa
LC
551 /*
552 * Advanced the async threads upfront,
553 * in case the starting of async threads is
554 * delayed by non-async resuming devices.
555 */
556 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
557 reinit_completion(&dev->power.completion);
558 if (is_async(dev)) {
559 get_device(dev);
560 async_schedule(async_resume_noirq, dev);
561 }
562 }
563
564 while (!list_empty(&dpm_noirq_list)) {
565 dev = to_device(dpm_noirq_list.next);
d08a5ace 566 get_device(dev);
cf579dfb 567 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 568 mutex_unlock(&dpm_list_mtx);
d08a5ace 569
76569faa
LC
570 if (!is_async(dev)) {
571 int error;
572
573 error = device_resume_noirq(dev, state, false);
574 if (error) {
575 suspend_stats.failed_resume_noirq++;
576 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
577 dpm_save_failed_dev(dev_name(dev));
578 pm_dev_err(dev, state, " noirq", error);
579 }
cf579dfb
RW
580 }
581
582 mutex_lock(&dpm_list_mtx);
583 put_device(dev);
584 }
585 mutex_unlock(&dpm_list_mtx);
76569faa 586 async_synchronize_full();
cf579dfb
RW
587 dpm_show_time(starttime, state, "noirq");
588 resume_device_irqs();
8651f97b 589 cpuidle_resume();
cf579dfb
RW
590}
591
592/**
593 * device_resume_early - Execute an "early resume" callback for given device.
594 * @dev: Device to handle.
595 * @state: PM transition of the system being carried out.
596 *
597 * Runtime PM is disabled for @dev while this function is being executed.
598 */
9e5e7910 599static int device_resume_early(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
600{
601 pm_callback_t callback = NULL;
602 char *info = NULL;
603 int error = 0;
604
605 TRACE_DEVICE(dev);
606 TRACE_RESUME(0);
607
aae4518b 608 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
609 goto Out;
610
3d2699bc
LC
611 if (!dev->power.is_late_suspended)
612 goto Out;
613
9e5e7910
LC
614 dpm_wait(dev->parent, async);
615
cf579dfb
RW
616 if (dev->pm_domain) {
617 info = "early power domain ";
618 callback = pm_late_early_op(&dev->pm_domain->ops, state);
619 } else if (dev->type && dev->type->pm) {
620 info = "early type ";
621 callback = pm_late_early_op(dev->type->pm, state);
622 } else if (dev->class && dev->class->pm) {
623 info = "early class ";
624 callback = pm_late_early_op(dev->class->pm, state);
625 } else if (dev->bus && dev->bus->pm) {
626 info = "early bus ";
627 callback = pm_late_early_op(dev->bus->pm, state);
628 }
629
630 if (!callback && dev->driver && dev->driver->pm) {
631 info = "early driver ";
632 callback = pm_late_early_op(dev->driver->pm, state);
633 }
634
635 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 636 dev->power.is_late_suspended = false;
cf579dfb 637
dbf37414 638 Out:
cf579dfb 639 TRACE_RESUME(error);
9f6d8f6a
RW
640
641 pm_runtime_enable(dev);
9e5e7910 642 complete_all(&dev->power.completion);
cf579dfb
RW
643 return error;
644}
645
9e5e7910
LC
646static void async_resume_early(void *data, async_cookie_t cookie)
647{
648 struct device *dev = (struct device *)data;
649 int error;
650
651 error = device_resume_early(dev, pm_transition, true);
652 if (error)
653 pm_dev_err(dev, pm_transition, " async", error);
654
655 put_device(dev);
656}
657
cf579dfb
RW
658/**
659 * dpm_resume_early - Execute "early resume" callbacks for all devices.
660 * @state: PM transition of the system being carried out.
661 */
662static void dpm_resume_early(pm_message_t state)
663{
9e5e7910 664 struct device *dev;
cf579dfb
RW
665 ktime_t starttime = ktime_get();
666
667 mutex_lock(&dpm_list_mtx);
9e5e7910
LC
668 pm_transition = state;
669
670 /*
671 * Advanced the async threads upfront,
672 * in case the starting of async threads is
673 * delayed by non-async resuming devices.
674 */
675 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
676 reinit_completion(&dev->power.completion);
677 if (is_async(dev)) {
678 get_device(dev);
679 async_schedule(async_resume_early, dev);
680 }
681 }
cf579dfb 682
9e5e7910
LC
683 while (!list_empty(&dpm_late_early_list)) {
684 dev = to_device(dpm_late_early_list.next);
cf579dfb
RW
685 get_device(dev);
686 list_move_tail(&dev->power.entry, &dpm_suspended_list);
687 mutex_unlock(&dpm_list_mtx);
688
9e5e7910
LC
689 if (!is_async(dev)) {
690 int error;
d08a5ace 691
9e5e7910
LC
692 error = device_resume_early(dev, state, false);
693 if (error) {
694 suspend_stats.failed_resume_early++;
695 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
696 dpm_save_failed_dev(dev_name(dev));
697 pm_dev_err(dev, state, " early", error);
698 }
699 }
5b219a51 700 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
701 put_device(dev);
702 }
32bdfac5 703 mutex_unlock(&dpm_list_mtx);
9e5e7910 704 async_synchronize_full();
ecf762b2 705 dpm_show_time(starttime, state, "early");
775b64d2 706}
cf579dfb
RW
707
708/**
709 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
710 * @state: PM transition of the system being carried out.
711 */
712void dpm_resume_start(pm_message_t state)
713{
714 dpm_resume_noirq(state);
715 dpm_resume_early(state);
716}
717EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
718
719/**
97df8c12 720 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
721 * @dev: Device to handle.
722 * @state: PM transition of the system being carried out.
5af84b82 723 * @async: If true, the device is being resumed asynchronously.
775b64d2 724 */
97df8c12 725static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 726{
9cf519d1
RW
727 pm_callback_t callback = NULL;
728 char *info = NULL;
775b64d2 729 int error = 0;
70fea60d 730 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
775b64d2
RW
731
732 TRACE_DEVICE(dev);
733 TRACE_RESUME(0);
cd59abfc 734
dbf37414
RW
735 if (dev->power.syscore)
736 goto Complete;
737
aae4518b
RW
738 if (dev->power.direct_complete) {
739 /* Match the pm_runtime_disable() in __device_suspend(). */
740 pm_runtime_enable(dev);
741 goto Complete;
742 }
743
5af84b82 744 dpm_wait(dev->parent, async);
70fea60d 745 dpm_watchdog_set(&wd, dev);
8e9394ce 746 device_lock(dev);
7a8d37a3 747
f76b168b
AS
748 /*
749 * This is a fib. But we'll allow new children to be added below
750 * a resumed device, even if the device hasn't been completed yet.
751 */
752 dev->power.is_prepared = false;
97df8c12 753
6d0e0e84
AS
754 if (!dev->power.is_suspended)
755 goto Unlock;
756
564b905a 757 if (dev->pm_domain) {
9cf519d1
RW
758 info = "power domain ";
759 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 760 goto Driver;
7538e3db
RW
761 }
762
9659cc06 763 if (dev->type && dev->type->pm) {
9cf519d1
RW
764 info = "type ";
765 callback = pm_op(dev->type->pm, state);
35cd133c 766 goto Driver;
cd59abfc
AS
767 }
768
1eede070
RW
769 if (dev->class) {
770 if (dev->class->pm) {
9cf519d1
RW
771 info = "class ";
772 callback = pm_op(dev->class->pm, state);
35cd133c 773 goto Driver;
1eede070 774 } else if (dev->class->resume) {
9cf519d1
RW
775 info = "legacy class ";
776 callback = dev->class->resume;
9659cc06 777 goto End;
1eede070 778 }
cd59abfc 779 }
9659cc06
RW
780
781 if (dev->bus) {
782 if (dev->bus->pm) {
35cd133c 783 info = "bus ";
9cf519d1 784 callback = pm_op(dev->bus->pm, state);
9659cc06 785 } else if (dev->bus->resume) {
35cd133c 786 info = "legacy bus ";
9cf519d1 787 callback = dev->bus->resume;
35cd133c 788 goto End;
9659cc06
RW
789 }
790 }
791
35cd133c
RW
792 Driver:
793 if (!callback && dev->driver && dev->driver->pm) {
794 info = "driver ";
795 callback = pm_op(dev->driver->pm, state);
796 }
797
1eede070 798 End:
9cf519d1 799 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
800 dev->power.is_suspended = false;
801
802 Unlock:
8e9394ce 803 device_unlock(dev);
70fea60d 804 dpm_watchdog_clear(&wd);
dbf37414
RW
805
806 Complete:
5af84b82 807 complete_all(&dev->power.completion);
7a8d37a3 808
cd59abfc 809 TRACE_RESUME(error);
1e2ef05b 810
cd59abfc
AS
811 return error;
812}
813
5af84b82
RW
814static void async_resume(void *data, async_cookie_t cookie)
815{
816 struct device *dev = (struct device *)data;
817 int error;
818
97df8c12 819 error = device_resume(dev, pm_transition, true);
5af84b82
RW
820 if (error)
821 pm_dev_err(dev, pm_transition, " async", error);
822 put_device(dev);
823}
824
775b64d2 825/**
20d652d7
RW
826 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
827 * @state: PM transition of the system being carried out.
775b64d2 828 *
20d652d7
RW
829 * Execute the appropriate "resume" callback for all devices whose status
830 * indicates that they are suspended.
1eede070 831 */
91e7c75b 832void dpm_resume(pm_message_t state)
1eede070 833{
97df8c12 834 struct device *dev;
ecf762b2 835 ktime_t starttime = ktime_get();
1eede070 836
91e7c75b
RW
837 might_sleep();
838
1eede070 839 mutex_lock(&dpm_list_mtx);
5af84b82 840 pm_transition = state;
098dff73 841 async_error = 0;
1eede070 842
8a43a9ab 843 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
16735d02 844 reinit_completion(&dev->power.completion);
97df8c12
RW
845 if (is_async(dev)) {
846 get_device(dev);
847 async_schedule(async_resume, dev);
848 }
849 }
850
8a43a9ab
RW
851 while (!list_empty(&dpm_suspended_list)) {
852 dev = to_device(dpm_suspended_list.next);
1eede070 853 get_device(dev);
5b219a51 854 if (!is_async(dev)) {
1eede070
RW
855 int error;
856
1eede070
RW
857 mutex_unlock(&dpm_list_mtx);
858
97df8c12 859 error = device_resume(dev, state, false);
2a77c46d
SL
860 if (error) {
861 suspend_stats.failed_resume++;
862 dpm_save_failed_step(SUSPEND_RESUME);
863 dpm_save_failed_dev(dev_name(dev));
1eede070 864 pm_dev_err(dev, state, "", error);
2a77c46d 865 }
5b219a51
RW
866
867 mutex_lock(&dpm_list_mtx);
1eede070
RW
868 }
869 if (!list_empty(&dev->power.entry))
8a43a9ab 870 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
871 put_device(dev);
872 }
1eede070 873 mutex_unlock(&dpm_list_mtx);
5af84b82 874 async_synchronize_full();
ecf762b2 875 dpm_show_time(starttime, state, NULL);
2f0aea93
VK
876
877 cpufreq_resume();
1eede070
RW
878}
879
880/**
20d652d7
RW
881 * device_complete - Complete a PM transition for given device.
882 * @dev: Device to handle.
883 * @state: PM transition of the system being carried out.
1eede070 884 */
d1616302 885static void device_complete(struct device *dev, pm_message_t state)
1eede070 886{
35cd133c
RW
887 void (*callback)(struct device *) = NULL;
888 char *info = NULL;
889
dbf37414
RW
890 if (dev->power.syscore)
891 return;
892
8e9394ce 893 device_lock(dev);
1eede070 894
564b905a 895 if (dev->pm_domain) {
35cd133c
RW
896 info = "completing power domain ";
897 callback = dev->pm_domain->ops.complete;
4d27e9dc 898 } else if (dev->type && dev->type->pm) {
35cd133c
RW
899 info = "completing type ";
900 callback = dev->type->pm->complete;
9659cc06 901 } else if (dev->class && dev->class->pm) {
35cd133c
RW
902 info = "completing class ";
903 callback = dev->class->pm->complete;
9659cc06 904 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
905 info = "completing bus ";
906 callback = dev->bus->pm->complete;
907 }
908
909 if (!callback && dev->driver && dev->driver->pm) {
910 info = "completing driver ";
911 callback = dev->driver->pm->complete;
912 }
913
914 if (callback) {
915 pm_dev_dbg(dev, state, info);
916 callback(dev);
1eede070
RW
917 }
918
8e9394ce 919 device_unlock(dev);
88d26136 920
af939339 921 pm_runtime_put(dev);
1eede070
RW
922}
923
924/**
20d652d7
RW
925 * dpm_complete - Complete a PM transition for all non-sysdev devices.
926 * @state: PM transition of the system being carried out.
775b64d2 927 *
20d652d7
RW
928 * Execute the ->complete() callbacks for all devices whose PM status is not
929 * DPM_ON (this allows new devices to be registered).
cd59abfc 930 */
91e7c75b 931void dpm_complete(pm_message_t state)
cd59abfc 932{
1eede070
RW
933 struct list_head list;
934
91e7c75b
RW
935 might_sleep();
936
1eede070 937 INIT_LIST_HEAD(&list);
cd59abfc 938 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
939 while (!list_empty(&dpm_prepared_list)) {
940 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 941
1eede070 942 get_device(dev);
f76b168b 943 dev->power.is_prepared = false;
5b219a51
RW
944 list_move(&dev->power.entry, &list);
945 mutex_unlock(&dpm_list_mtx);
1eede070 946
5b219a51 947 device_complete(dev, state);
1eede070 948
5b219a51 949 mutex_lock(&dpm_list_mtx);
1eede070 950 put_device(dev);
cd59abfc 951 }
1eede070 952 list_splice(&list, &dpm_list);
cd59abfc
AS
953 mutex_unlock(&dpm_list_mtx);
954}
955
cd59abfc 956/**
20d652d7
RW
957 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
958 * @state: PM transition of the system being carried out.
cd59abfc 959 *
20d652d7
RW
960 * Execute "resume" callbacks for all devices and complete the PM transition of
961 * the system.
cd59abfc 962 */
d1616302 963void dpm_resume_end(pm_message_t state)
cd59abfc 964{
1eede070
RW
965 dpm_resume(state);
966 dpm_complete(state);
cd59abfc 967}
d1616302 968EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
969
970
971/*------------------------- Suspend routines -------------------------*/
972
1eede070 973/**
20d652d7
RW
974 * resume_event - Return a "resume" message for given "suspend" sleep state.
975 * @sleep_state: PM message representing a sleep state.
976 *
977 * Return a PM message representing the resume event corresponding to given
978 * sleep state.
1eede070
RW
979 */
980static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 981{
1eede070
RW
982 switch (sleep_state.event) {
983 case PM_EVENT_SUSPEND:
984 return PMSG_RESUME;
985 case PM_EVENT_FREEZE:
986 case PM_EVENT_QUIESCE:
987 return PMSG_RECOVER;
988 case PM_EVENT_HIBERNATE:
989 return PMSG_RESTORE;
cd59abfc 990 }
1eede070 991 return PMSG_ON;
cd59abfc
AS
992}
993
994/**
20d652d7
RW
995 * device_suspend_noirq - Execute a "late suspend" callback for given device.
996 * @dev: Device to handle.
997 * @state: PM transition of the system being carried out.
775b64d2 998 *
20d652d7
RW
999 * The driver of @dev will not receive interrupts while this function is being
1000 * executed.
cd59abfc 1001 */
28b6fd6e 1002static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
775b64d2 1003{
9cf519d1
RW
1004 pm_callback_t callback = NULL;
1005 char *info = NULL;
28b6fd6e
LC
1006 int error = 0;
1007
1008 if (async_error)
1009 goto Complete;
1010
1011 if (pm_wakeup_pending()) {
1012 async_error = -EBUSY;
1013 goto Complete;
1014 }
e7176a37 1015
aae4518b 1016 if (dev->power.syscore || dev->power.direct_complete)
28b6fd6e
LC
1017 goto Complete;
1018
1019 dpm_wait_for_children(dev, async);
dbf37414 1020
564b905a 1021 if (dev->pm_domain) {
cf579dfb 1022 info = "noirq power domain ";
9cf519d1 1023 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 1024 } else if (dev->type && dev->type->pm) {
cf579dfb 1025 info = "noirq type ";
9cf519d1 1026 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 1027 } else if (dev->class && dev->class->pm) {
cf579dfb 1028 info = "noirq class ";
9cf519d1 1029 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 1030 } else if (dev->bus && dev->bus->pm) {
cf579dfb 1031 info = "noirq bus ";
9cf519d1 1032 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
1033 }
1034
35cd133c 1035 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 1036 info = "noirq driver ";
35cd133c
RW
1037 callback = pm_noirq_op(dev->driver->pm, state);
1038 }
1039
3d2699bc
LC
1040 error = dpm_run_callback(callback, dev, state, info);
1041 if (!error)
1042 dev->power.is_noirq_suspended = true;
28b6fd6e
LC
1043 else
1044 async_error = error;
3d2699bc 1045
28b6fd6e
LC
1046Complete:
1047 complete_all(&dev->power.completion);
3d2699bc 1048 return error;
775b64d2
RW
1049}
1050
28b6fd6e
LC
1051static void async_suspend_noirq(void *data, async_cookie_t cookie)
1052{
1053 struct device *dev = (struct device *)data;
1054 int error;
1055
1056 error = __device_suspend_noirq(dev, pm_transition, true);
1057 if (error) {
1058 dpm_save_failed_dev(dev_name(dev));
1059 pm_dev_err(dev, pm_transition, " async", error);
1060 }
1061
1062 put_device(dev);
1063}
1064
1065static int device_suspend_noirq(struct device *dev)
1066{
1067 reinit_completion(&dev->power.completion);
1068
1069 if (pm_async_enabled && dev->power.async_suspend) {
1070 get_device(dev);
1071 async_schedule(async_suspend_noirq, dev);
1072 return 0;
1073 }
1074 return __device_suspend_noirq(dev, pm_transition, false);
1075}
1076
775b64d2 1077/**
cf579dfb 1078 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 1079 * @state: PM transition of the system being carried out.
775b64d2 1080 *
20d652d7
RW
1081 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1082 * handlers for all non-sysdev devices.
775b64d2 1083 */
cf579dfb 1084static int dpm_suspend_noirq(pm_message_t state)
775b64d2 1085{
ecf762b2 1086 ktime_t starttime = ktime_get();
775b64d2
RW
1087 int error = 0;
1088
8651f97b 1089 cpuidle_pause();
2ed8d2b3 1090 suspend_device_irqs();
32bdfac5 1091 mutex_lock(&dpm_list_mtx);
28b6fd6e
LC
1092 pm_transition = state;
1093 async_error = 0;
1094
cf579dfb
RW
1095 while (!list_empty(&dpm_late_early_list)) {
1096 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1097
1098 get_device(dev);
1099 mutex_unlock(&dpm_list_mtx);
1100
28b6fd6e 1101 error = device_suspend_noirq(dev);
d08a5ace
RW
1102
1103 mutex_lock(&dpm_list_mtx);
775b64d2 1104 if (error) {
cf579dfb 1105 pm_dev_err(dev, state, " noirq", error);
2a77c46d 1106 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1107 put_device(dev);
775b64d2
RW
1108 break;
1109 }
d08a5ace 1110 if (!list_empty(&dev->power.entry))
8a43a9ab 1111 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1112 put_device(dev);
52d136cc 1113
28b6fd6e 1114 if (async_error)
52d136cc 1115 break;
775b64d2 1116 }
32bdfac5 1117 mutex_unlock(&dpm_list_mtx);
28b6fd6e
LC
1118 async_synchronize_full();
1119 if (!error)
1120 error = async_error;
1121
1122 if (error) {
1123 suspend_stats.failed_suspend_noirq++;
1124 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
d1616302 1125 dpm_resume_noirq(resume_event(state));
28b6fd6e 1126 } else {
cf579dfb 1127 dpm_show_time(starttime, state, "noirq");
28b6fd6e 1128 }
cf579dfb
RW
1129 return error;
1130}
1131
1132/**
1133 * device_suspend_late - Execute a "late suspend" callback for given device.
1134 * @dev: Device to handle.
1135 * @state: PM transition of the system being carried out.
1136 *
1137 * Runtime PM is disabled for @dev while this function is being executed.
1138 */
de377b39 1139static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
1140{
1141 pm_callback_t callback = NULL;
1142 char *info = NULL;
de377b39 1143 int error = 0;
cf579dfb 1144
9f6d8f6a
RW
1145 __pm_runtime_disable(dev, false);
1146
de377b39
LC
1147 if (async_error)
1148 goto Complete;
1149
1150 if (pm_wakeup_pending()) {
1151 async_error = -EBUSY;
1152 goto Complete;
1153 }
1154
aae4518b 1155 if (dev->power.syscore || dev->power.direct_complete)
de377b39
LC
1156 goto Complete;
1157
1158 dpm_wait_for_children(dev, async);
dbf37414 1159
cf579dfb
RW
1160 if (dev->pm_domain) {
1161 info = "late power domain ";
1162 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1163 } else if (dev->type && dev->type->pm) {
1164 info = "late type ";
1165 callback = pm_late_early_op(dev->type->pm, state);
1166 } else if (dev->class && dev->class->pm) {
1167 info = "late class ";
1168 callback = pm_late_early_op(dev->class->pm, state);
1169 } else if (dev->bus && dev->bus->pm) {
1170 info = "late bus ";
1171 callback = pm_late_early_op(dev->bus->pm, state);
1172 }
1173
1174 if (!callback && dev->driver && dev->driver->pm) {
1175 info = "late driver ";
1176 callback = pm_late_early_op(dev->driver->pm, state);
1177 }
1178
3d2699bc
LC
1179 error = dpm_run_callback(callback, dev, state, info);
1180 if (!error)
1181 dev->power.is_late_suspended = true;
de377b39
LC
1182 else
1183 async_error = error;
3d2699bc 1184
de377b39
LC
1185Complete:
1186 complete_all(&dev->power.completion);
3d2699bc 1187 return error;
cf579dfb
RW
1188}
1189
de377b39
LC
1190static void async_suspend_late(void *data, async_cookie_t cookie)
1191{
1192 struct device *dev = (struct device *)data;
1193 int error;
1194
1195 error = __device_suspend_late(dev, pm_transition, true);
1196 if (error) {
1197 dpm_save_failed_dev(dev_name(dev));
1198 pm_dev_err(dev, pm_transition, " async", error);
1199 }
1200 put_device(dev);
1201}
1202
1203static int device_suspend_late(struct device *dev)
1204{
1205 reinit_completion(&dev->power.completion);
1206
1207 if (pm_async_enabled && dev->power.async_suspend) {
1208 get_device(dev);
1209 async_schedule(async_suspend_late, dev);
1210 return 0;
1211 }
1212
1213 return __device_suspend_late(dev, pm_transition, false);
1214}
1215
cf579dfb
RW
1216/**
1217 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1218 * @state: PM transition of the system being carried out.
1219 */
1220static int dpm_suspend_late(pm_message_t state)
1221{
1222 ktime_t starttime = ktime_get();
1223 int error = 0;
1224
1225 mutex_lock(&dpm_list_mtx);
de377b39
LC
1226 pm_transition = state;
1227 async_error = 0;
1228
cf579dfb
RW
1229 while (!list_empty(&dpm_suspended_list)) {
1230 struct device *dev = to_device(dpm_suspended_list.prev);
1231
1232 get_device(dev);
1233 mutex_unlock(&dpm_list_mtx);
1234
de377b39 1235 error = device_suspend_late(dev);
cf579dfb
RW
1236
1237 mutex_lock(&dpm_list_mtx);
1238 if (error) {
1239 pm_dev_err(dev, state, " late", error);
cf579dfb
RW
1240 dpm_save_failed_dev(dev_name(dev));
1241 put_device(dev);
1242 break;
1243 }
1244 if (!list_empty(&dev->power.entry))
1245 list_move(&dev->power.entry, &dpm_late_early_list);
1246 put_device(dev);
52d136cc 1247
de377b39 1248 if (async_error)
52d136cc 1249 break;
cf579dfb
RW
1250 }
1251 mutex_unlock(&dpm_list_mtx);
de377b39
LC
1252 async_synchronize_full();
1253 if (error) {
1254 suspend_stats.failed_suspend_late++;
1255 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
cf579dfb 1256 dpm_resume_early(resume_event(state));
de377b39 1257 } else {
ecf762b2 1258 dpm_show_time(starttime, state, "late");
de377b39 1259 }
775b64d2
RW
1260 return error;
1261}
cf579dfb
RW
1262
1263/**
1264 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1265 * @state: PM transition of the system being carried out.
1266 */
1267int dpm_suspend_end(pm_message_t state)
1268{
1269 int error = dpm_suspend_late(state);
064b021f
CC
1270 if (error)
1271 return error;
1272
1273 error = dpm_suspend_noirq(state);
1274 if (error) {
997a0311 1275 dpm_resume_early(resume_event(state));
064b021f
CC
1276 return error;
1277 }
cf579dfb 1278
064b021f 1279 return 0;
cf579dfb
RW
1280}
1281EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1282
875ab0b7
RW
1283/**
1284 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1285 * @dev: Device to suspend.
1286 * @state: PM transition of the system being carried out.
1287 * @cb: Suspend callback to execute.
875ab0b7
RW
1288 */
1289static int legacy_suspend(struct device *dev, pm_message_t state,
53644677
SK
1290 int (*cb)(struct device *dev, pm_message_t state),
1291 char *info)
875ab0b7
RW
1292{
1293 int error;
1294 ktime_t calltime;
1295
1296 calltime = initcall_debug_start(dev);
1297
1298 error = cb(dev, state);
1299 suspend_report_result(cb, error);
1300
53644677 1301 initcall_debug_report(dev, calltime, error, state, info);
875ab0b7
RW
1302
1303 return error;
1304}
1305
775b64d2 1306/**
20d652d7
RW
1307 * device_suspend - Execute "suspend" callbacks for given device.
1308 * @dev: Device to handle.
1309 * @state: PM transition of the system being carried out.
5af84b82 1310 * @async: If true, the device is being suspended asynchronously.
775b64d2 1311 */
5af84b82 1312static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1313{
9cf519d1
RW
1314 pm_callback_t callback = NULL;
1315 char *info = NULL;
cd59abfc 1316 int error = 0;
70fea60d 1317 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
cd59abfc 1318
5af84b82 1319 dpm_wait_for_children(dev, async);
7a8d37a3 1320
5af84b82 1321 if (async_error)
1f758b23 1322 goto Complete;
1e2ef05b 1323
88d26136
AS
1324 /*
1325 * If a device configured to wake up the system from sleep states
1326 * has been suspended at run time and there's a resume request pending
1327 * for it, this is equivalent to the device signaling wakeup, so the
1328 * system suspend operation should be aborted.
1329 */
1e2ef05b
RW
1330 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1331 pm_wakeup_event(dev, 0);
5af84b82 1332
d83f905e
RW
1333 if (pm_wakeup_pending()) {
1334 async_error = -EBUSY;
1f758b23 1335 goto Complete;
d83f905e
RW
1336 }
1337
dbf37414
RW
1338 if (dev->power.syscore)
1339 goto Complete;
1340
aae4518b
RW
1341 if (dev->power.direct_complete) {
1342 if (pm_runtime_status_suspended(dev)) {
1343 pm_runtime_disable(dev);
1344 if (pm_runtime_suspended_if_enabled(dev))
1345 goto Complete;
1346
1347 pm_runtime_enable(dev);
1348 }
1349 dev->power.direct_complete = false;
1350 }
1351
70fea60d 1352 dpm_watchdog_set(&wd, dev);
1e2ef05b
RW
1353 device_lock(dev);
1354
564b905a 1355 if (dev->pm_domain) {
9cf519d1
RW
1356 info = "power domain ";
1357 callback = pm_op(&dev->pm_domain->ops, state);
1358 goto Run;
4d27e9dc
RW
1359 }
1360
9659cc06 1361 if (dev->type && dev->type->pm) {
9cf519d1
RW
1362 info = "type ";
1363 callback = pm_op(dev->type->pm, state);
1364 goto Run;
9659cc06
RW
1365 }
1366
1eede070
RW
1367 if (dev->class) {
1368 if (dev->class->pm) {
9cf519d1
RW
1369 info = "class ";
1370 callback = pm_op(dev->class->pm, state);
1371 goto Run;
1eede070
RW
1372 } else if (dev->class->suspend) {
1373 pm_dev_dbg(dev, state, "legacy class ");
53644677
SK
1374 error = legacy_suspend(dev, state, dev->class->suspend,
1375 "legacy class ");
4d27e9dc 1376 goto End;
1eede070 1377 }
cd59abfc
AS
1378 }
1379
1eede070
RW
1380 if (dev->bus) {
1381 if (dev->bus->pm) {
35cd133c 1382 info = "bus ";
9cf519d1 1383 callback = pm_op(dev->bus->pm, state);
1eede070 1384 } else if (dev->bus->suspend) {
35cd133c 1385 pm_dev_dbg(dev, state, "legacy bus ");
53644677
SK
1386 error = legacy_suspend(dev, state, dev->bus->suspend,
1387 "legacy bus ");
9cf519d1 1388 goto End;
1eede070 1389 }
7538e3db
RW
1390 }
1391
9cf519d1 1392 Run:
35cd133c
RW
1393 if (!callback && dev->driver && dev->driver->pm) {
1394 info = "driver ";
1395 callback = pm_op(dev->driver->pm, state);
1396 }
1397
9cf519d1
RW
1398 error = dpm_run_callback(callback, dev, state, info);
1399
1eede070 1400 End:
4ca46ff3 1401 if (!error) {
aae4518b
RW
1402 struct device *parent = dev->parent;
1403
4ca46ff3 1404 dev->power.is_suspended = true;
aae4518b
RW
1405 if (parent) {
1406 spin_lock_irq(&parent->power.lock);
1407
1408 dev->parent->power.direct_complete = false;
1409 if (dev->power.wakeup_path
1410 && !dev->parent->power.ignore_children)
1411 dev->parent->power.wakeup_path = true;
1412
1413 spin_unlock_irq(&parent->power.lock);
1414 }
4ca46ff3 1415 }
6d0e0e84 1416
8e9394ce 1417 device_unlock(dev);
70fea60d 1418 dpm_watchdog_clear(&wd);
1f758b23
MSB
1419
1420 Complete:
5af84b82 1421 complete_all(&dev->power.completion);
88d26136 1422 if (error)
098dff73
RW
1423 async_error = error;
1424
cd59abfc
AS
1425 return error;
1426}
1427
5af84b82
RW
1428static void async_suspend(void *data, async_cookie_t cookie)
1429{
1430 struct device *dev = (struct device *)data;
1431 int error;
1432
1433 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1434 if (error) {
1435 dpm_save_failed_dev(dev_name(dev));
5af84b82 1436 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1437 }
5af84b82
RW
1438
1439 put_device(dev);
1440}
1441
1442static int device_suspend(struct device *dev)
1443{
16735d02 1444 reinit_completion(&dev->power.completion);
5af84b82 1445
0e06b4a8 1446 if (pm_async_enabled && dev->power.async_suspend) {
5af84b82
RW
1447 get_device(dev);
1448 async_schedule(async_suspend, dev);
1449 return 0;
1450 }
1451
1452 return __device_suspend(dev, pm_transition, false);
1453}
1454
cd59abfc 1455/**
20d652d7
RW
1456 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1457 * @state: PM transition of the system being carried out.
cd59abfc 1458 */
91e7c75b 1459int dpm_suspend(pm_message_t state)
cd59abfc 1460{
ecf762b2 1461 ktime_t starttime = ktime_get();
cd59abfc
AS
1462 int error = 0;
1463
91e7c75b
RW
1464 might_sleep();
1465
2f0aea93
VK
1466 cpufreq_suspend();
1467
cd59abfc 1468 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1469 pm_transition = state;
1470 async_error = 0;
8a43a9ab
RW
1471 while (!list_empty(&dpm_prepared_list)) {
1472 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1473
1eede070 1474 get_device(dev);
cd59abfc 1475 mutex_unlock(&dpm_list_mtx);
1eede070 1476
5af84b82 1477 error = device_suspend(dev);
1eede070 1478
1b3cbec1 1479 mutex_lock(&dpm_list_mtx);
775b64d2 1480 if (error) {
1eede070 1481 pm_dev_err(dev, state, "", error);
2a77c46d 1482 dpm_save_failed_dev(dev_name(dev));
1eede070 1483 put_device(dev);
775b64d2
RW
1484 break;
1485 }
7a8d37a3 1486 if (!list_empty(&dev->power.entry))
8a43a9ab 1487 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1488 put_device(dev);
5af84b82
RW
1489 if (async_error)
1490 break;
cd59abfc
AS
1491 }
1492 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1493 async_synchronize_full();
1494 if (!error)
1495 error = async_error;
2a77c46d
SL
1496 if (error) {
1497 suspend_stats.failed_suspend++;
1498 dpm_save_failed_step(SUSPEND_SUSPEND);
1499 } else
ecf762b2 1500 dpm_show_time(starttime, state, NULL);
1eede070
RW
1501 return error;
1502}
1503
1504/**
20d652d7
RW
1505 * device_prepare - Prepare a device for system power transition.
1506 * @dev: Device to handle.
1507 * @state: PM transition of the system being carried out.
1508 *
1509 * Execute the ->prepare() callback(s) for given device. No new children of the
1510 * device may be registered after this function has returned.
1eede070 1511 */
d1616302 1512static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1513{
35cd133c
RW
1514 int (*callback)(struct device *) = NULL;
1515 char *info = NULL;
aae4518b 1516 int ret = 0;
1eede070 1517
dbf37414
RW
1518 if (dev->power.syscore)
1519 return 0;
1520
88d26136
AS
1521 /*
1522 * If a device's parent goes into runtime suspend at the wrong time,
1523 * it won't be possible to resume the device. To prevent this we
1524 * block runtime suspend here, during the prepare phase, and allow
1525 * it again during the complete phase.
1526 */
1527 pm_runtime_get_noresume(dev);
1528
8e9394ce 1529 device_lock(dev);
1eede070 1530
4ca46ff3
RW
1531 dev->power.wakeup_path = device_may_wakeup(dev);
1532
564b905a 1533 if (dev->pm_domain) {
35cd133c
RW
1534 info = "preparing power domain ";
1535 callback = dev->pm_domain->ops.prepare;
4d27e9dc 1536 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1537 info = "preparing type ";
1538 callback = dev->type->pm->prepare;
9659cc06 1539 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1540 info = "preparing class ";
1541 callback = dev->class->pm->prepare;
9659cc06 1542 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1543 info = "preparing bus ";
1544 callback = dev->bus->pm->prepare;
1545 }
1546
1547 if (!callback && dev->driver && dev->driver->pm) {
1548 info = "preparing driver ";
1549 callback = dev->driver->pm->prepare;
1550 }
1551
aae4518b
RW
1552 if (callback)
1553 ret = callback(dev);
7538e3db 1554
8e9394ce 1555 device_unlock(dev);
1eede070 1556
aae4518b
RW
1557 if (ret < 0) {
1558 suspend_report_result(callback, ret);
aa1b9f13 1559 pm_runtime_put(dev);
aae4518b
RW
1560 return ret;
1561 }
1562 /*
1563 * A positive return value from ->prepare() means "this device appears
1564 * to be runtime-suspended and its state is fine, so if it really is
1565 * runtime-suspended, you can leave it in that state provided that you
1566 * will do the same thing with all of its descendants". This only
1567 * applies to suspend transitions, however.
1568 */
1569 spin_lock_irq(&dev->power.lock);
1570 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1571 spin_unlock_irq(&dev->power.lock);
1572 return 0;
1eede070 1573}
cd59abfc 1574
1eede070 1575/**
20d652d7
RW
1576 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1577 * @state: PM transition of the system being carried out.
1eede070 1578 *
20d652d7 1579 * Execute the ->prepare() callback(s) for all devices.
1eede070 1580 */
91e7c75b 1581int dpm_prepare(pm_message_t state)
1eede070 1582{
1eede070
RW
1583 int error = 0;
1584
91e7c75b
RW
1585 might_sleep();
1586
1eede070 1587 mutex_lock(&dpm_list_mtx);
1eede070
RW
1588 while (!list_empty(&dpm_list)) {
1589 struct device *dev = to_device(dpm_list.next);
1590
1591 get_device(dev);
1eede070
RW
1592 mutex_unlock(&dpm_list_mtx);
1593
1e2ef05b 1594 error = device_prepare(dev, state);
1eede070
RW
1595
1596 mutex_lock(&dpm_list_mtx);
1597 if (error) {
1eede070
RW
1598 if (error == -EAGAIN) {
1599 put_device(dev);
886a7a33 1600 error = 0;
1eede070
RW
1601 continue;
1602 }
1e75227e
RW
1603 printk(KERN_INFO "PM: Device %s not prepared "
1604 "for power transition: code %d\n",
5c1a07ab 1605 dev_name(dev), error);
1eede070
RW
1606 put_device(dev);
1607 break;
1608 }
f76b168b 1609 dev->power.is_prepared = true;
1eede070 1610 if (!list_empty(&dev->power.entry))
8a43a9ab 1611 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1612 put_device(dev);
1613 }
1eede070 1614 mutex_unlock(&dpm_list_mtx);
cd59abfc
AS
1615 return error;
1616}
1617
775b64d2 1618/**
20d652d7
RW
1619 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1620 * @state: PM transition of the system being carried out.
775b64d2 1621 *
20d652d7
RW
1622 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1623 * callbacks for them.
775b64d2 1624 */
d1616302 1625int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1626{
1627 int error;
cd59abfc 1628
1eede070 1629 error = dpm_prepare(state);
2a77c46d
SL
1630 if (error) {
1631 suspend_stats.failed_prepare++;
1632 dpm_save_failed_step(SUSPEND_PREPARE);
1633 } else
1eede070 1634 error = dpm_suspend(state);
cd59abfc 1635 return error;
cd59abfc 1636}
d1616302 1637EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1638
1639void __suspend_report_result(const char *function, void *fn, int ret)
1640{
c80cfb04
BH
1641 if (ret)
1642 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1643}
1644EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1645
1646/**
1647 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1648 * @dev: Device to wait for.
1649 * @subordinate: Device that needs to wait for @dev.
1650 */
098dff73 1651int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1652{
1653 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1654 return async_error;
f8824cee
RW
1655}
1656EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
1657
1658/**
1659 * dpm_for_each_dev - device iterator.
1660 * @data: data for the callback.
1661 * @fn: function to be called for each device.
1662 *
1663 * Iterate over devices in dpm_list, and call @fn for each device,
1664 * passing it @data.
1665 */
1666void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1667{
1668 struct device *dev;
1669
1670 if (!fn)
1671 return;
1672
1673 device_pm_lock();
1674 list_for_each_entry(dev, &dpm_list, power.entry)
1675 fn(dev, data);
1676 device_pm_unlock();
1677}
1678EXPORT_SYMBOL_GPL(dpm_for_each_dev);