]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/main.c
Linux 4.4-rc3
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
431d452a 26#include <linux/pm-trace.h>
4990d4fe 27#include <linux/pm_wakeirq.h>
2ed8d2b3 28#include <linux/interrupt.h>
f2511774 29#include <linux/sched.h>
5af84b82 30#include <linux/async.h>
1e75227e 31#include <linux/suspend.h>
53644677 32#include <trace/events/power.h>
2f0aea93 33#include <linux/cpufreq.h>
8651f97b 34#include <linux/cpuidle.h>
70fea60d
BG
35#include <linux/timer.h>
36
cd59abfc 37#include "../base.h"
1da177e4
LT
38#include "power.h"
39
9cf519d1
RW
40typedef int (*pm_callback_t)(struct device *);
41
775b64d2 42/*
1eede070 43 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
44 * because children are guaranteed to be discovered after parents, and
45 * are inserted at the back of the list on discovery.
46 *
8e9394ce
GKH
47 * Since device_pm_add() may be called with a device lock held,
48 * we must never try to acquire a device lock while holding
775b64d2
RW
49 * dpm_list_mutex.
50 */
51
1eede070 52LIST_HEAD(dpm_list);
7664e969
SK
53static LIST_HEAD(dpm_prepared_list);
54static LIST_HEAD(dpm_suspended_list);
55static LIST_HEAD(dpm_late_early_list);
56static LIST_HEAD(dpm_noirq_list);
1da177e4 57
2a77c46d 58struct suspend_stats suspend_stats;
cd59abfc 59static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 60static pm_message_t pm_transition;
1da177e4 61
098dff73
RW
62static int async_error;
63
53644677
SK
64static char *pm_verb(int event)
65{
66 switch (event) {
67 case PM_EVENT_SUSPEND:
68 return "suspend";
69 case PM_EVENT_RESUME:
70 return "resume";
71 case PM_EVENT_FREEZE:
72 return "freeze";
73 case PM_EVENT_QUIESCE:
74 return "quiesce";
75 case PM_EVENT_HIBERNATE:
76 return "hibernate";
77 case PM_EVENT_THAW:
78 return "thaw";
79 case PM_EVENT_RESTORE:
80 return "restore";
81 case PM_EVENT_RECOVER:
82 return "recover";
83 default:
84 return "(unknown PM event)";
85 }
86}
87
5e928f77 88/**
e91c11b1 89 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
90 * @dev: Device object being initialized.
91 */
e91c11b1 92void device_pm_sleep_init(struct device *dev)
5e928f77 93{
f76b168b 94 dev->power.is_prepared = false;
6d0e0e84 95 dev->power.is_suspended = false;
3d2699bc
LC
96 dev->power.is_noirq_suspended = false;
97 dev->power.is_late_suspended = false;
5af84b82 98 init_completion(&dev->power.completion);
152e1d59 99 complete_all(&dev->power.completion);
074037ec 100 dev->power.wakeup = NULL;
22110faf 101 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
102}
103
1eede070 104/**
20d652d7 105 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
106 */
107void device_pm_lock(void)
108{
109 mutex_lock(&dpm_list_mtx);
110}
111
112/**
20d652d7 113 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
114 */
115void device_pm_unlock(void)
116{
117 mutex_unlock(&dpm_list_mtx);
118}
075c1771 119
775b64d2 120/**
20d652d7
RW
121 * device_pm_add - Add a device to the PM core's list of active devices.
122 * @dev: Device to add to the list.
775b64d2 123 */
3b98aeaf 124void device_pm_add(struct device *dev)
1da177e4 125{
1da177e4 126 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
11048dcf 128 mutex_lock(&dpm_list_mtx);
f76b168b 129 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
130 dev_warn(dev, "parent %s should not be sleeping\n",
131 dev_name(dev->parent));
3b98aeaf 132 list_add_tail(&dev->power.entry, &dpm_list);
1a9a9152 133 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
134}
135
775b64d2 136/**
20d652d7
RW
137 * device_pm_remove - Remove a device from the PM core's list of active devices.
138 * @dev: Device to be removed from the list.
775b64d2 139 */
9cddad77 140void device_pm_remove(struct device *dev)
1da177e4
LT
141{
142 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 143 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 144 complete_all(&dev->power.completion);
11048dcf 145 mutex_lock(&dpm_list_mtx);
1da177e4 146 list_del_init(&dev->power.entry);
11048dcf 147 mutex_unlock(&dpm_list_mtx);
074037ec 148 device_wakeup_disable(dev);
5e928f77 149 pm_runtime_remove(dev);
775b64d2
RW
150}
151
ffa6a705 152/**
20d652d7
RW
153 * device_pm_move_before - Move device in the PM core's list of active devices.
154 * @deva: Device to move in dpm_list.
155 * @devb: Device @deva should come before.
ffa6a705
CH
156 */
157void device_pm_move_before(struct device *deva, struct device *devb)
158{
159 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
160 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
161 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
162 /* Delete deva from dpm_list and reinsert before devb. */
163 list_move_tail(&deva->power.entry, &devb->power.entry);
164}
165
166/**
20d652d7
RW
167 * device_pm_move_after - Move device in the PM core's list of active devices.
168 * @deva: Device to move in dpm_list.
169 * @devb: Device @deva should come after.
ffa6a705
CH
170 */
171void device_pm_move_after(struct device *deva, struct device *devb)
172{
173 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
174 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
175 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
176 /* Delete deva from dpm_list and reinsert after devb. */
177 list_move(&deva->power.entry, &devb->power.entry);
178}
179
180/**
20d652d7
RW
181 * device_pm_move_last - Move device to end of the PM core's list of devices.
182 * @dev: Device to move in dpm_list.
ffa6a705
CH
183 */
184void device_pm_move_last(struct device *dev)
185{
186 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 187 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
188 list_move_tail(&dev->power.entry, &dpm_list);
189}
190
875ab0b7
RW
191static ktime_t initcall_debug_start(struct device *dev)
192{
193 ktime_t calltime = ktime_set(0, 0);
194
b2df1d4f 195 if (pm_print_times_enabled) {
0c6aebe3
RW
196 pr_info("calling %s+ @ %i, parent: %s\n",
197 dev_name(dev), task_pid_nr(current),
198 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
199 calltime = ktime_get();
200 }
201
202 return calltime;
203}
204
205static void initcall_debug_report(struct device *dev, ktime_t calltime,
53644677 206 int error, pm_message_t state, char *info)
875ab0b7 207{
53644677
SK
208 ktime_t rettime;
209 s64 nsecs;
210
211 rettime = ktime_get();
212 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
875ab0b7 213
b2df1d4f 214 if (pm_print_times_enabled) {
875ab0b7 215 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
53644677 216 error, (unsigned long long)nsecs >> 10);
875ab0b7
RW
217 }
218}
219
5af84b82
RW
220/**
221 * dpm_wait - Wait for a PM operation to complete.
222 * @dev: Device to wait for.
223 * @async: If unset, wait only if the device's power.async_suspend flag is set.
224 */
225static void dpm_wait(struct device *dev, bool async)
226{
227 if (!dev)
228 return;
229
0e06b4a8 230 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
231 wait_for_completion(&dev->power.completion);
232}
233
234static int dpm_wait_fn(struct device *dev, void *async_ptr)
235{
236 dpm_wait(dev, *((bool *)async_ptr));
237 return 0;
238}
239
240static void dpm_wait_for_children(struct device *dev, bool async)
241{
242 device_for_each_child(dev, &async, dpm_wait_fn);
243}
244
1eede070 245/**
9cf519d1 246 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
247 * @ops: PM operations to choose from.
248 * @state: PM transition of the system being carried out.
1eede070 249 */
9cf519d1 250static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 251{
1eede070
RW
252 switch (state.event) {
253#ifdef CONFIG_SUSPEND
254 case PM_EVENT_SUSPEND:
9cf519d1 255 return ops->suspend;
1eede070 256 case PM_EVENT_RESUME:
9cf519d1 257 return ops->resume;
1eede070 258#endif /* CONFIG_SUSPEND */
1f112cee 259#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
260 case PM_EVENT_FREEZE:
261 case PM_EVENT_QUIESCE:
9cf519d1 262 return ops->freeze;
1eede070 263 case PM_EVENT_HIBERNATE:
9cf519d1 264 return ops->poweroff;
1eede070
RW
265 case PM_EVENT_THAW:
266 case PM_EVENT_RECOVER:
9cf519d1 267 return ops->thaw;
1eede070
RW
268 break;
269 case PM_EVENT_RESTORE:
9cf519d1 270 return ops->restore;
1f112cee 271#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 272 }
f2511774 273
9cf519d1 274 return NULL;
1eede070
RW
275}
276
cf579dfb
RW
277/**
278 * pm_late_early_op - Return the PM operation appropriate for given PM event.
279 * @ops: PM operations to choose from.
280 * @state: PM transition of the system being carried out.
281 *
282 * Runtime PM is disabled for @dev while this function is being executed.
283 */
284static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
285 pm_message_t state)
286{
287 switch (state.event) {
288#ifdef CONFIG_SUSPEND
289 case PM_EVENT_SUSPEND:
290 return ops->suspend_late;
291 case PM_EVENT_RESUME:
292 return ops->resume_early;
293#endif /* CONFIG_SUSPEND */
294#ifdef CONFIG_HIBERNATE_CALLBACKS
295 case PM_EVENT_FREEZE:
296 case PM_EVENT_QUIESCE:
297 return ops->freeze_late;
298 case PM_EVENT_HIBERNATE:
299 return ops->poweroff_late;
300 case PM_EVENT_THAW:
301 case PM_EVENT_RECOVER:
302 return ops->thaw_early;
303 case PM_EVENT_RESTORE:
304 return ops->restore_early;
305#endif /* CONFIG_HIBERNATE_CALLBACKS */
306 }
307
308 return NULL;
309}
310
1eede070 311/**
9cf519d1 312 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
313 * @ops: PM operations to choose from.
314 * @state: PM transition of the system being carried out.
1eede070 315 *
20d652d7
RW
316 * The driver of @dev will not receive interrupts while this function is being
317 * executed.
1eede070 318 */
9cf519d1 319static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 320{
1eede070
RW
321 switch (state.event) {
322#ifdef CONFIG_SUSPEND
323 case PM_EVENT_SUSPEND:
9cf519d1 324 return ops->suspend_noirq;
1eede070 325 case PM_EVENT_RESUME:
9cf519d1 326 return ops->resume_noirq;
1eede070 327#endif /* CONFIG_SUSPEND */
1f112cee 328#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
329 case PM_EVENT_FREEZE:
330 case PM_EVENT_QUIESCE:
9cf519d1 331 return ops->freeze_noirq;
1eede070 332 case PM_EVENT_HIBERNATE:
9cf519d1 333 return ops->poweroff_noirq;
1eede070
RW
334 case PM_EVENT_THAW:
335 case PM_EVENT_RECOVER:
9cf519d1 336 return ops->thaw_noirq;
1eede070 337 case PM_EVENT_RESTORE:
9cf519d1 338 return ops->restore_noirq;
1f112cee 339#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 340 }
f2511774 341
9cf519d1 342 return NULL;
1eede070
RW
343}
344
1eede070
RW
345static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
346{
347 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
348 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
349 ", may wakeup" : "");
350}
351
352static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
353 int error)
354{
355 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 356 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
357}
358
ecf762b2
RW
359static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
360{
361 ktime_t calltime;
0702d9ee 362 u64 usecs64;
ecf762b2
RW
363 int usecs;
364
365 calltime = ktime_get();
366 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
367 do_div(usecs64, NSEC_PER_USEC);
368 usecs = usecs64;
369 if (usecs == 0)
370 usecs = 1;
371 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
372 info ?: "", info ? " " : "", pm_verb(state.event),
373 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
374}
375
9cf519d1
RW
376static int dpm_run_callback(pm_callback_t cb, struct device *dev,
377 pm_message_t state, char *info)
378{
379 ktime_t calltime;
380 int error;
381
382 if (!cb)
383 return 0;
384
385 calltime = initcall_debug_start(dev);
386
387 pm_dev_dbg(dev, state, info);
e8bca479 388 trace_device_pm_callback_start(dev, info, state.event);
9cf519d1 389 error = cb(dev);
e8bca479 390 trace_device_pm_callback_end(dev, error);
9cf519d1
RW
391 suspend_report_result(cb, error);
392
53644677 393 initcall_debug_report(dev, calltime, error, state, info);
9cf519d1
RW
394
395 return error;
396}
397
70fea60d
BG
398#ifdef CONFIG_DPM_WATCHDOG
399struct dpm_watchdog {
400 struct device *dev;
401 struct task_struct *tsk;
402 struct timer_list timer;
403};
404
405#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
406 struct dpm_watchdog wd
407
408/**
409 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
410 * @data: Watchdog object address.
411 *
412 * Called when a driver has timed out suspending or resuming.
413 * There's not much we can do here to recover so panic() to
414 * capture a crash-dump in pstore.
415 */
416static void dpm_watchdog_handler(unsigned long data)
417{
418 struct dpm_watchdog *wd = (void *)data;
419
420 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
421 show_stack(wd->tsk, NULL);
422 panic("%s %s: unrecoverable failure\n",
423 dev_driver_string(wd->dev), dev_name(wd->dev));
424}
425
426/**
427 * dpm_watchdog_set - Enable pm watchdog for given device.
428 * @wd: Watchdog. Must be allocated on the stack.
429 * @dev: Device to handle.
430 */
431static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
432{
433 struct timer_list *timer = &wd->timer;
434
435 wd->dev = dev;
436 wd->tsk = current;
437
438 init_timer_on_stack(timer);
439 /* use same timeout value for both suspend and resume */
440 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
441 timer->function = dpm_watchdog_handler;
442 timer->data = (unsigned long)wd;
443 add_timer(timer);
444}
445
446/**
447 * dpm_watchdog_clear - Disable suspend/resume watchdog.
448 * @wd: Watchdog to disable.
449 */
450static void dpm_watchdog_clear(struct dpm_watchdog *wd)
451{
452 struct timer_list *timer = &wd->timer;
453
454 del_timer_sync(timer);
455 destroy_timer_on_stack(timer);
456}
457#else
458#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
459#define dpm_watchdog_set(x, y)
460#define dpm_watchdog_clear(x)
461#endif
462
cd59abfc
AS
463/*------------------------- Resume routines -------------------------*/
464
465/**
20d652d7
RW
466 * device_resume_noirq - Execute an "early resume" callback for given device.
467 * @dev: Device to handle.
468 * @state: PM transition of the system being carried out.
58c256a3 469 * @async: If true, the device is being resumed asynchronously.
cd59abfc 470 *
20d652d7
RW
471 * The driver of @dev will not receive interrupts while this function is being
472 * executed.
cd59abfc 473 */
76569faa 474static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
cd59abfc 475{
9cf519d1
RW
476 pm_callback_t callback = NULL;
477 char *info = NULL;
cd59abfc
AS
478 int error = 0;
479
480 TRACE_DEVICE(dev);
481 TRACE_RESUME(0);
482
aae4518b 483 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
484 goto Out;
485
3d2699bc
LC
486 if (!dev->power.is_noirq_suspended)
487 goto Out;
488
76569faa
LC
489 dpm_wait(dev->parent, async);
490
564b905a 491 if (dev->pm_domain) {
cf579dfb 492 info = "noirq power domain ";
9cf519d1 493 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 494 } else if (dev->type && dev->type->pm) {
cf579dfb 495 info = "noirq type ";
9cf519d1 496 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 497 } else if (dev->class && dev->class->pm) {
cf579dfb 498 info = "noirq class ";
9cf519d1 499 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 500 } else if (dev->bus && dev->bus->pm) {
cf579dfb 501 info = "noirq bus ";
9cf519d1 502 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
503 }
504
35cd133c 505 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 506 info = "noirq driver ";
35cd133c
RW
507 callback = pm_noirq_op(dev->driver->pm, state);
508 }
509
9cf519d1 510 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 511 dev->power.is_noirq_suspended = false;
9cf519d1 512
dbf37414 513 Out:
76569faa 514 complete_all(&dev->power.completion);
775b64d2
RW
515 TRACE_RESUME(error);
516 return error;
517}
518
76569faa
LC
519static bool is_async(struct device *dev)
520{
521 return dev->power.async_suspend && pm_async_enabled
522 && !pm_trace_is_enabled();
523}
524
525static void async_resume_noirq(void *data, async_cookie_t cookie)
526{
527 struct device *dev = (struct device *)data;
528 int error;
529
530 error = device_resume_noirq(dev, pm_transition, true);
531 if (error)
532 pm_dev_err(dev, pm_transition, " async", error);
533
534 put_device(dev);
535}
536
775b64d2 537/**
cf579dfb 538 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 539 * @state: PM transition of the system being carried out.
775b64d2 540 *
cf579dfb 541 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 542 * enable device drivers to receive interrupts.
775b64d2 543 */
2a8a8ce6 544void dpm_resume_noirq(pm_message_t state)
775b64d2 545{
76569faa 546 struct device *dev;
ecf762b2 547 ktime_t starttime = ktime_get();
775b64d2 548
bb3632c6 549 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
32bdfac5 550 mutex_lock(&dpm_list_mtx);
76569faa 551 pm_transition = state;
d08a5ace 552
76569faa
LC
553 /*
554 * Advanced the async threads upfront,
555 * in case the starting of async threads is
556 * delayed by non-async resuming devices.
557 */
558 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
559 reinit_completion(&dev->power.completion);
560 if (is_async(dev)) {
561 get_device(dev);
562 async_schedule(async_resume_noirq, dev);
563 }
564 }
565
566 while (!list_empty(&dpm_noirq_list)) {
567 dev = to_device(dpm_noirq_list.next);
d08a5ace 568 get_device(dev);
cf579dfb 569 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 570 mutex_unlock(&dpm_list_mtx);
d08a5ace 571
76569faa
LC
572 if (!is_async(dev)) {
573 int error;
574
575 error = device_resume_noirq(dev, state, false);
576 if (error) {
577 suspend_stats.failed_resume_noirq++;
578 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
579 dpm_save_failed_dev(dev_name(dev));
580 pm_dev_err(dev, state, " noirq", error);
581 }
cf579dfb
RW
582 }
583
584 mutex_lock(&dpm_list_mtx);
585 put_device(dev);
586 }
587 mutex_unlock(&dpm_list_mtx);
76569faa 588 async_synchronize_full();
cf579dfb
RW
589 dpm_show_time(starttime, state, "noirq");
590 resume_device_irqs();
4990d4fe 591 device_wakeup_disarm_wake_irqs();
8651f97b 592 cpuidle_resume();
bb3632c6 593 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
cf579dfb
RW
594}
595
596/**
597 * device_resume_early - Execute an "early resume" callback for given device.
598 * @dev: Device to handle.
599 * @state: PM transition of the system being carried out.
58c256a3 600 * @async: If true, the device is being resumed asynchronously.
cf579dfb
RW
601 *
602 * Runtime PM is disabled for @dev while this function is being executed.
603 */
9e5e7910 604static int device_resume_early(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
605{
606 pm_callback_t callback = NULL;
607 char *info = NULL;
608 int error = 0;
609
610 TRACE_DEVICE(dev);
611 TRACE_RESUME(0);
612
aae4518b 613 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
614 goto Out;
615
3d2699bc
LC
616 if (!dev->power.is_late_suspended)
617 goto Out;
618
9e5e7910
LC
619 dpm_wait(dev->parent, async);
620
cf579dfb
RW
621 if (dev->pm_domain) {
622 info = "early power domain ";
623 callback = pm_late_early_op(&dev->pm_domain->ops, state);
624 } else if (dev->type && dev->type->pm) {
625 info = "early type ";
626 callback = pm_late_early_op(dev->type->pm, state);
627 } else if (dev->class && dev->class->pm) {
628 info = "early class ";
629 callback = pm_late_early_op(dev->class->pm, state);
630 } else if (dev->bus && dev->bus->pm) {
631 info = "early bus ";
632 callback = pm_late_early_op(dev->bus->pm, state);
633 }
634
635 if (!callback && dev->driver && dev->driver->pm) {
636 info = "early driver ";
637 callback = pm_late_early_op(dev->driver->pm, state);
638 }
639
640 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 641 dev->power.is_late_suspended = false;
cf579dfb 642
dbf37414 643 Out:
cf579dfb 644 TRACE_RESUME(error);
9f6d8f6a
RW
645
646 pm_runtime_enable(dev);
9e5e7910 647 complete_all(&dev->power.completion);
cf579dfb
RW
648 return error;
649}
650
9e5e7910
LC
651static void async_resume_early(void *data, async_cookie_t cookie)
652{
653 struct device *dev = (struct device *)data;
654 int error;
655
656 error = device_resume_early(dev, pm_transition, true);
657 if (error)
658 pm_dev_err(dev, pm_transition, " async", error);
659
660 put_device(dev);
661}
662
cf579dfb
RW
663/**
664 * dpm_resume_early - Execute "early resume" callbacks for all devices.
665 * @state: PM transition of the system being carried out.
666 */
2a8a8ce6 667void dpm_resume_early(pm_message_t state)
cf579dfb 668{
9e5e7910 669 struct device *dev;
cf579dfb
RW
670 ktime_t starttime = ktime_get();
671
bb3632c6 672 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
cf579dfb 673 mutex_lock(&dpm_list_mtx);
9e5e7910
LC
674 pm_transition = state;
675
676 /*
677 * Advanced the async threads upfront,
678 * in case the starting of async threads is
679 * delayed by non-async resuming devices.
680 */
681 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
682 reinit_completion(&dev->power.completion);
683 if (is_async(dev)) {
684 get_device(dev);
685 async_schedule(async_resume_early, dev);
686 }
687 }
cf579dfb 688
9e5e7910
LC
689 while (!list_empty(&dpm_late_early_list)) {
690 dev = to_device(dpm_late_early_list.next);
cf579dfb
RW
691 get_device(dev);
692 list_move_tail(&dev->power.entry, &dpm_suspended_list);
693 mutex_unlock(&dpm_list_mtx);
694
9e5e7910
LC
695 if (!is_async(dev)) {
696 int error;
d08a5ace 697
9e5e7910
LC
698 error = device_resume_early(dev, state, false);
699 if (error) {
700 suspend_stats.failed_resume_early++;
701 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
702 dpm_save_failed_dev(dev_name(dev));
703 pm_dev_err(dev, state, " early", error);
704 }
705 }
5b219a51 706 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
707 put_device(dev);
708 }
32bdfac5 709 mutex_unlock(&dpm_list_mtx);
9e5e7910 710 async_synchronize_full();
ecf762b2 711 dpm_show_time(starttime, state, "early");
bb3632c6 712 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
775b64d2 713}
cf579dfb
RW
714
715/**
716 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
717 * @state: PM transition of the system being carried out.
718 */
719void dpm_resume_start(pm_message_t state)
720{
721 dpm_resume_noirq(state);
722 dpm_resume_early(state);
723}
724EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
725
726/**
97df8c12 727 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
728 * @dev: Device to handle.
729 * @state: PM transition of the system being carried out.
5af84b82 730 * @async: If true, the device is being resumed asynchronously.
775b64d2 731 */
97df8c12 732static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 733{
9cf519d1
RW
734 pm_callback_t callback = NULL;
735 char *info = NULL;
775b64d2 736 int error = 0;
70fea60d 737 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
775b64d2
RW
738
739 TRACE_DEVICE(dev);
740 TRACE_RESUME(0);
cd59abfc 741
dbf37414
RW
742 if (dev->power.syscore)
743 goto Complete;
744
aae4518b
RW
745 if (dev->power.direct_complete) {
746 /* Match the pm_runtime_disable() in __device_suspend(). */
747 pm_runtime_enable(dev);
748 goto Complete;
749 }
750
5af84b82 751 dpm_wait(dev->parent, async);
70fea60d 752 dpm_watchdog_set(&wd, dev);
8e9394ce 753 device_lock(dev);
7a8d37a3 754
f76b168b
AS
755 /*
756 * This is a fib. But we'll allow new children to be added below
757 * a resumed device, even if the device hasn't been completed yet.
758 */
759 dev->power.is_prepared = false;
97df8c12 760
6d0e0e84
AS
761 if (!dev->power.is_suspended)
762 goto Unlock;
763
564b905a 764 if (dev->pm_domain) {
9cf519d1
RW
765 info = "power domain ";
766 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 767 goto Driver;
7538e3db
RW
768 }
769
9659cc06 770 if (dev->type && dev->type->pm) {
9cf519d1
RW
771 info = "type ";
772 callback = pm_op(dev->type->pm, state);
35cd133c 773 goto Driver;
cd59abfc
AS
774 }
775
1eede070
RW
776 if (dev->class) {
777 if (dev->class->pm) {
9cf519d1
RW
778 info = "class ";
779 callback = pm_op(dev->class->pm, state);
35cd133c 780 goto Driver;
1eede070 781 } else if (dev->class->resume) {
9cf519d1
RW
782 info = "legacy class ";
783 callback = dev->class->resume;
9659cc06 784 goto End;
1eede070 785 }
cd59abfc 786 }
9659cc06
RW
787
788 if (dev->bus) {
789 if (dev->bus->pm) {
35cd133c 790 info = "bus ";
9cf519d1 791 callback = pm_op(dev->bus->pm, state);
9659cc06 792 } else if (dev->bus->resume) {
35cd133c 793 info = "legacy bus ";
9cf519d1 794 callback = dev->bus->resume;
35cd133c 795 goto End;
9659cc06
RW
796 }
797 }
798
35cd133c
RW
799 Driver:
800 if (!callback && dev->driver && dev->driver->pm) {
801 info = "driver ";
802 callback = pm_op(dev->driver->pm, state);
803 }
804
1eede070 805 End:
9cf519d1 806 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
807 dev->power.is_suspended = false;
808
809 Unlock:
8e9394ce 810 device_unlock(dev);
70fea60d 811 dpm_watchdog_clear(&wd);
dbf37414
RW
812
813 Complete:
5af84b82 814 complete_all(&dev->power.completion);
7a8d37a3 815
cd59abfc 816 TRACE_RESUME(error);
1e2ef05b 817
cd59abfc
AS
818 return error;
819}
820
5af84b82
RW
821static void async_resume(void *data, async_cookie_t cookie)
822{
823 struct device *dev = (struct device *)data;
824 int error;
825
97df8c12 826 error = device_resume(dev, pm_transition, true);
5af84b82
RW
827 if (error)
828 pm_dev_err(dev, pm_transition, " async", error);
829 put_device(dev);
830}
831
775b64d2 832/**
20d652d7
RW
833 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
834 * @state: PM transition of the system being carried out.
775b64d2 835 *
20d652d7
RW
836 * Execute the appropriate "resume" callback for all devices whose status
837 * indicates that they are suspended.
1eede070 838 */
91e7c75b 839void dpm_resume(pm_message_t state)
1eede070 840{
97df8c12 841 struct device *dev;
ecf762b2 842 ktime_t starttime = ktime_get();
1eede070 843
bb3632c6 844 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
91e7c75b
RW
845 might_sleep();
846
1eede070 847 mutex_lock(&dpm_list_mtx);
5af84b82 848 pm_transition = state;
098dff73 849 async_error = 0;
1eede070 850
8a43a9ab 851 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
16735d02 852 reinit_completion(&dev->power.completion);
97df8c12
RW
853 if (is_async(dev)) {
854 get_device(dev);
855 async_schedule(async_resume, dev);
856 }
857 }
858
8a43a9ab
RW
859 while (!list_empty(&dpm_suspended_list)) {
860 dev = to_device(dpm_suspended_list.next);
1eede070 861 get_device(dev);
5b219a51 862 if (!is_async(dev)) {
1eede070
RW
863 int error;
864
1eede070
RW
865 mutex_unlock(&dpm_list_mtx);
866
97df8c12 867 error = device_resume(dev, state, false);
2a77c46d
SL
868 if (error) {
869 suspend_stats.failed_resume++;
870 dpm_save_failed_step(SUSPEND_RESUME);
871 dpm_save_failed_dev(dev_name(dev));
1eede070 872 pm_dev_err(dev, state, "", error);
2a77c46d 873 }
5b219a51
RW
874
875 mutex_lock(&dpm_list_mtx);
1eede070
RW
876 }
877 if (!list_empty(&dev->power.entry))
8a43a9ab 878 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
879 put_device(dev);
880 }
1eede070 881 mutex_unlock(&dpm_list_mtx);
5af84b82 882 async_synchronize_full();
ecf762b2 883 dpm_show_time(starttime, state, NULL);
2f0aea93
VK
884
885 cpufreq_resume();
bb3632c6 886 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1eede070
RW
887}
888
889/**
20d652d7
RW
890 * device_complete - Complete a PM transition for given device.
891 * @dev: Device to handle.
892 * @state: PM transition of the system being carried out.
1eede070 893 */
d1616302 894static void device_complete(struct device *dev, pm_message_t state)
1eede070 895{
35cd133c
RW
896 void (*callback)(struct device *) = NULL;
897 char *info = NULL;
898
dbf37414
RW
899 if (dev->power.syscore)
900 return;
901
8e9394ce 902 device_lock(dev);
1eede070 903
564b905a 904 if (dev->pm_domain) {
35cd133c
RW
905 info = "completing power domain ";
906 callback = dev->pm_domain->ops.complete;
4d27e9dc 907 } else if (dev->type && dev->type->pm) {
35cd133c
RW
908 info = "completing type ";
909 callback = dev->type->pm->complete;
9659cc06 910 } else if (dev->class && dev->class->pm) {
35cd133c
RW
911 info = "completing class ";
912 callback = dev->class->pm->complete;
9659cc06 913 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
914 info = "completing bus ";
915 callback = dev->bus->pm->complete;
916 }
917
918 if (!callback && dev->driver && dev->driver->pm) {
919 info = "completing driver ";
920 callback = dev->driver->pm->complete;
921 }
922
923 if (callback) {
924 pm_dev_dbg(dev, state, info);
925 callback(dev);
1eede070
RW
926 }
927
8e9394ce 928 device_unlock(dev);
88d26136 929
af939339 930 pm_runtime_put(dev);
1eede070
RW
931}
932
933/**
20d652d7
RW
934 * dpm_complete - Complete a PM transition for all non-sysdev devices.
935 * @state: PM transition of the system being carried out.
775b64d2 936 *
20d652d7
RW
937 * Execute the ->complete() callbacks for all devices whose PM status is not
938 * DPM_ON (this allows new devices to be registered).
cd59abfc 939 */
91e7c75b 940void dpm_complete(pm_message_t state)
cd59abfc 941{
1eede070
RW
942 struct list_head list;
943
bb3632c6 944 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
91e7c75b
RW
945 might_sleep();
946
1eede070 947 INIT_LIST_HEAD(&list);
cd59abfc 948 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
949 while (!list_empty(&dpm_prepared_list)) {
950 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 951
1eede070 952 get_device(dev);
f76b168b 953 dev->power.is_prepared = false;
5b219a51
RW
954 list_move(&dev->power.entry, &list);
955 mutex_unlock(&dpm_list_mtx);
1eede070 956
32e8d689 957 trace_device_pm_callback_start(dev, "", state.event);
5b219a51 958 device_complete(dev, state);
32e8d689 959 trace_device_pm_callback_end(dev, 0);
1eede070 960
5b219a51 961 mutex_lock(&dpm_list_mtx);
1eede070 962 put_device(dev);
cd59abfc 963 }
1eede070 964 list_splice(&list, &dpm_list);
cd59abfc 965 mutex_unlock(&dpm_list_mtx);
bb3632c6 966 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
cd59abfc
AS
967}
968
cd59abfc 969/**
20d652d7
RW
970 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
971 * @state: PM transition of the system being carried out.
cd59abfc 972 *
20d652d7
RW
973 * Execute "resume" callbacks for all devices and complete the PM transition of
974 * the system.
cd59abfc 975 */
d1616302 976void dpm_resume_end(pm_message_t state)
cd59abfc 977{
1eede070
RW
978 dpm_resume(state);
979 dpm_complete(state);
cd59abfc 980}
d1616302 981EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
982
983
984/*------------------------- Suspend routines -------------------------*/
985
1eede070 986/**
20d652d7
RW
987 * resume_event - Return a "resume" message for given "suspend" sleep state.
988 * @sleep_state: PM message representing a sleep state.
989 *
990 * Return a PM message representing the resume event corresponding to given
991 * sleep state.
1eede070
RW
992 */
993static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 994{
1eede070
RW
995 switch (sleep_state.event) {
996 case PM_EVENT_SUSPEND:
997 return PMSG_RESUME;
998 case PM_EVENT_FREEZE:
999 case PM_EVENT_QUIESCE:
1000 return PMSG_RECOVER;
1001 case PM_EVENT_HIBERNATE:
1002 return PMSG_RESTORE;
cd59abfc 1003 }
1eede070 1004 return PMSG_ON;
cd59abfc
AS
1005}
1006
1007/**
20d652d7
RW
1008 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1009 * @dev: Device to handle.
1010 * @state: PM transition of the system being carried out.
58c256a3 1011 * @async: If true, the device is being suspended asynchronously.
775b64d2 1012 *
20d652d7
RW
1013 * The driver of @dev will not receive interrupts while this function is being
1014 * executed.
cd59abfc 1015 */
28b6fd6e 1016static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
775b64d2 1017{
9cf519d1
RW
1018 pm_callback_t callback = NULL;
1019 char *info = NULL;
28b6fd6e
LC
1020 int error = 0;
1021
431d452a
ZF
1022 TRACE_DEVICE(dev);
1023 TRACE_SUSPEND(0);
1024
28b6fd6e
LC
1025 if (async_error)
1026 goto Complete;
1027
1028 if (pm_wakeup_pending()) {
1029 async_error = -EBUSY;
1030 goto Complete;
1031 }
e7176a37 1032
aae4518b 1033 if (dev->power.syscore || dev->power.direct_complete)
28b6fd6e
LC
1034 goto Complete;
1035
1036 dpm_wait_for_children(dev, async);
dbf37414 1037
564b905a 1038 if (dev->pm_domain) {
cf579dfb 1039 info = "noirq power domain ";
9cf519d1 1040 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 1041 } else if (dev->type && dev->type->pm) {
cf579dfb 1042 info = "noirq type ";
9cf519d1 1043 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 1044 } else if (dev->class && dev->class->pm) {
cf579dfb 1045 info = "noirq class ";
9cf519d1 1046 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 1047 } else if (dev->bus && dev->bus->pm) {
cf579dfb 1048 info = "noirq bus ";
9cf519d1 1049 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
1050 }
1051
35cd133c 1052 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 1053 info = "noirq driver ";
35cd133c
RW
1054 callback = pm_noirq_op(dev->driver->pm, state);
1055 }
1056
3d2699bc
LC
1057 error = dpm_run_callback(callback, dev, state, info);
1058 if (!error)
1059 dev->power.is_noirq_suspended = true;
28b6fd6e
LC
1060 else
1061 async_error = error;
3d2699bc 1062
28b6fd6e
LC
1063Complete:
1064 complete_all(&dev->power.completion);
431d452a 1065 TRACE_SUSPEND(error);
3d2699bc 1066 return error;
775b64d2
RW
1067}
1068
28b6fd6e
LC
1069static void async_suspend_noirq(void *data, async_cookie_t cookie)
1070{
1071 struct device *dev = (struct device *)data;
1072 int error;
1073
1074 error = __device_suspend_noirq(dev, pm_transition, true);
1075 if (error) {
1076 dpm_save_failed_dev(dev_name(dev));
1077 pm_dev_err(dev, pm_transition, " async", error);
1078 }
1079
1080 put_device(dev);
1081}
1082
1083static int device_suspend_noirq(struct device *dev)
1084{
1085 reinit_completion(&dev->power.completion);
1086
431d452a 1087 if (is_async(dev)) {
28b6fd6e
LC
1088 get_device(dev);
1089 async_schedule(async_suspend_noirq, dev);
1090 return 0;
1091 }
1092 return __device_suspend_noirq(dev, pm_transition, false);
1093}
1094
775b64d2 1095/**
cf579dfb 1096 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 1097 * @state: PM transition of the system being carried out.
775b64d2 1098 *
20d652d7
RW
1099 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1100 * handlers for all non-sysdev devices.
775b64d2 1101 */
2a8a8ce6 1102int dpm_suspend_noirq(pm_message_t state)
775b64d2 1103{
ecf762b2 1104 ktime_t starttime = ktime_get();
775b64d2
RW
1105 int error = 0;
1106
bb3632c6 1107 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
8651f97b 1108 cpuidle_pause();
4990d4fe 1109 device_wakeup_arm_wake_irqs();
2ed8d2b3 1110 suspend_device_irqs();
32bdfac5 1111 mutex_lock(&dpm_list_mtx);
28b6fd6e
LC
1112 pm_transition = state;
1113 async_error = 0;
1114
cf579dfb
RW
1115 while (!list_empty(&dpm_late_early_list)) {
1116 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1117
1118 get_device(dev);
1119 mutex_unlock(&dpm_list_mtx);
1120
28b6fd6e 1121 error = device_suspend_noirq(dev);
d08a5ace
RW
1122
1123 mutex_lock(&dpm_list_mtx);
775b64d2 1124 if (error) {
cf579dfb 1125 pm_dev_err(dev, state, " noirq", error);
2a77c46d 1126 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1127 put_device(dev);
775b64d2
RW
1128 break;
1129 }
d08a5ace 1130 if (!list_empty(&dev->power.entry))
8a43a9ab 1131 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1132 put_device(dev);
52d136cc 1133
28b6fd6e 1134 if (async_error)
52d136cc 1135 break;
775b64d2 1136 }
32bdfac5 1137 mutex_unlock(&dpm_list_mtx);
28b6fd6e
LC
1138 async_synchronize_full();
1139 if (!error)
1140 error = async_error;
1141
1142 if (error) {
1143 suspend_stats.failed_suspend_noirq++;
1144 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
d1616302 1145 dpm_resume_noirq(resume_event(state));
28b6fd6e 1146 } else {
cf579dfb 1147 dpm_show_time(starttime, state, "noirq");
28b6fd6e 1148 }
bb3632c6 1149 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
cf579dfb
RW
1150 return error;
1151}
1152
1153/**
1154 * device_suspend_late - Execute a "late suspend" callback for given device.
1155 * @dev: Device to handle.
1156 * @state: PM transition of the system being carried out.
58c256a3 1157 * @async: If true, the device is being suspended asynchronously.
cf579dfb
RW
1158 *
1159 * Runtime PM is disabled for @dev while this function is being executed.
1160 */
de377b39 1161static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
1162{
1163 pm_callback_t callback = NULL;
1164 char *info = NULL;
de377b39 1165 int error = 0;
cf579dfb 1166
431d452a
ZF
1167 TRACE_DEVICE(dev);
1168 TRACE_SUSPEND(0);
1169
9f6d8f6a
RW
1170 __pm_runtime_disable(dev, false);
1171
de377b39
LC
1172 if (async_error)
1173 goto Complete;
1174
1175 if (pm_wakeup_pending()) {
1176 async_error = -EBUSY;
1177 goto Complete;
1178 }
1179
aae4518b 1180 if (dev->power.syscore || dev->power.direct_complete)
de377b39
LC
1181 goto Complete;
1182
1183 dpm_wait_for_children(dev, async);
dbf37414 1184
cf579dfb
RW
1185 if (dev->pm_domain) {
1186 info = "late power domain ";
1187 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1188 } else if (dev->type && dev->type->pm) {
1189 info = "late type ";
1190 callback = pm_late_early_op(dev->type->pm, state);
1191 } else if (dev->class && dev->class->pm) {
1192 info = "late class ";
1193 callback = pm_late_early_op(dev->class->pm, state);
1194 } else if (dev->bus && dev->bus->pm) {
1195 info = "late bus ";
1196 callback = pm_late_early_op(dev->bus->pm, state);
1197 }
1198
1199 if (!callback && dev->driver && dev->driver->pm) {
1200 info = "late driver ";
1201 callback = pm_late_early_op(dev->driver->pm, state);
1202 }
1203
3d2699bc
LC
1204 error = dpm_run_callback(callback, dev, state, info);
1205 if (!error)
1206 dev->power.is_late_suspended = true;
de377b39
LC
1207 else
1208 async_error = error;
3d2699bc 1209
de377b39 1210Complete:
431d452a 1211 TRACE_SUSPEND(error);
de377b39 1212 complete_all(&dev->power.completion);
3d2699bc 1213 return error;
cf579dfb
RW
1214}
1215
de377b39
LC
1216static void async_suspend_late(void *data, async_cookie_t cookie)
1217{
1218 struct device *dev = (struct device *)data;
1219 int error;
1220
1221 error = __device_suspend_late(dev, pm_transition, true);
1222 if (error) {
1223 dpm_save_failed_dev(dev_name(dev));
1224 pm_dev_err(dev, pm_transition, " async", error);
1225 }
1226 put_device(dev);
1227}
1228
1229static int device_suspend_late(struct device *dev)
1230{
1231 reinit_completion(&dev->power.completion);
1232
431d452a 1233 if (is_async(dev)) {
de377b39
LC
1234 get_device(dev);
1235 async_schedule(async_suspend_late, dev);
1236 return 0;
1237 }
1238
1239 return __device_suspend_late(dev, pm_transition, false);
1240}
1241
cf579dfb
RW
1242/**
1243 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1244 * @state: PM transition of the system being carried out.
1245 */
2a8a8ce6 1246int dpm_suspend_late(pm_message_t state)
cf579dfb
RW
1247{
1248 ktime_t starttime = ktime_get();
1249 int error = 0;
1250
bb3632c6 1251 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
cf579dfb 1252 mutex_lock(&dpm_list_mtx);
de377b39
LC
1253 pm_transition = state;
1254 async_error = 0;
1255
cf579dfb
RW
1256 while (!list_empty(&dpm_suspended_list)) {
1257 struct device *dev = to_device(dpm_suspended_list.prev);
1258
1259 get_device(dev);
1260 mutex_unlock(&dpm_list_mtx);
1261
de377b39 1262 error = device_suspend_late(dev);
cf579dfb
RW
1263
1264 mutex_lock(&dpm_list_mtx);
1265 if (error) {
1266 pm_dev_err(dev, state, " late", error);
cf579dfb
RW
1267 dpm_save_failed_dev(dev_name(dev));
1268 put_device(dev);
1269 break;
1270 }
1271 if (!list_empty(&dev->power.entry))
1272 list_move(&dev->power.entry, &dpm_late_early_list);
1273 put_device(dev);
52d136cc 1274
de377b39 1275 if (async_error)
52d136cc 1276 break;
cf579dfb
RW
1277 }
1278 mutex_unlock(&dpm_list_mtx);
de377b39 1279 async_synchronize_full();
246ef766
ID
1280 if (!error)
1281 error = async_error;
de377b39
LC
1282 if (error) {
1283 suspend_stats.failed_suspend_late++;
1284 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
cf579dfb 1285 dpm_resume_early(resume_event(state));
de377b39 1286 } else {
ecf762b2 1287 dpm_show_time(starttime, state, "late");
de377b39 1288 }
bb3632c6 1289 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
775b64d2
RW
1290 return error;
1291}
cf579dfb
RW
1292
1293/**
1294 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1295 * @state: PM transition of the system being carried out.
1296 */
1297int dpm_suspend_end(pm_message_t state)
1298{
1299 int error = dpm_suspend_late(state);
064b021f
CC
1300 if (error)
1301 return error;
1302
1303 error = dpm_suspend_noirq(state);
1304 if (error) {
997a0311 1305 dpm_resume_early(resume_event(state));
064b021f
CC
1306 return error;
1307 }
cf579dfb 1308
064b021f 1309 return 0;
cf579dfb
RW
1310}
1311EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1312
875ab0b7
RW
1313/**
1314 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1315 * @dev: Device to suspend.
1316 * @state: PM transition of the system being carried out.
1317 * @cb: Suspend callback to execute.
58c256a3 1318 * @info: string description of caller.
875ab0b7
RW
1319 */
1320static int legacy_suspend(struct device *dev, pm_message_t state,
53644677
SK
1321 int (*cb)(struct device *dev, pm_message_t state),
1322 char *info)
875ab0b7
RW
1323{
1324 int error;
1325 ktime_t calltime;
1326
1327 calltime = initcall_debug_start(dev);
1328
e8bca479 1329 trace_device_pm_callback_start(dev, info, state.event);
875ab0b7 1330 error = cb(dev, state);
e8bca479 1331 trace_device_pm_callback_end(dev, error);
875ab0b7
RW
1332 suspend_report_result(cb, error);
1333
53644677 1334 initcall_debug_report(dev, calltime, error, state, info);
875ab0b7
RW
1335
1336 return error;
1337}
1338
775b64d2 1339/**
20d652d7
RW
1340 * device_suspend - Execute "suspend" callbacks for given device.
1341 * @dev: Device to handle.
1342 * @state: PM transition of the system being carried out.
5af84b82 1343 * @async: If true, the device is being suspended asynchronously.
775b64d2 1344 */
5af84b82 1345static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1346{
9cf519d1
RW
1347 pm_callback_t callback = NULL;
1348 char *info = NULL;
cd59abfc 1349 int error = 0;
70fea60d 1350 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
cd59abfc 1351
431d452a
ZF
1352 TRACE_DEVICE(dev);
1353 TRACE_SUSPEND(0);
1354
5af84b82 1355 dpm_wait_for_children(dev, async);
7a8d37a3 1356
5af84b82 1357 if (async_error)
1f758b23 1358 goto Complete;
1e2ef05b 1359
88d26136
AS
1360 /*
1361 * If a device configured to wake up the system from sleep states
1362 * has been suspended at run time and there's a resume request pending
1363 * for it, this is equivalent to the device signaling wakeup, so the
1364 * system suspend operation should be aborted.
1365 */
1e2ef05b
RW
1366 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1367 pm_wakeup_event(dev, 0);
5af84b82 1368
d83f905e
RW
1369 if (pm_wakeup_pending()) {
1370 async_error = -EBUSY;
1f758b23 1371 goto Complete;
d83f905e
RW
1372 }
1373
dbf37414
RW
1374 if (dev->power.syscore)
1375 goto Complete;
1376
aae4518b
RW
1377 if (dev->power.direct_complete) {
1378 if (pm_runtime_status_suspended(dev)) {
1379 pm_runtime_disable(dev);
019d8817 1380 if (pm_runtime_status_suspended(dev))
aae4518b
RW
1381 goto Complete;
1382
1383 pm_runtime_enable(dev);
1384 }
1385 dev->power.direct_complete = false;
1386 }
1387
70fea60d 1388 dpm_watchdog_set(&wd, dev);
1e2ef05b
RW
1389 device_lock(dev);
1390
564b905a 1391 if (dev->pm_domain) {
9cf519d1
RW
1392 info = "power domain ";
1393 callback = pm_op(&dev->pm_domain->ops, state);
1394 goto Run;
4d27e9dc
RW
1395 }
1396
9659cc06 1397 if (dev->type && dev->type->pm) {
9cf519d1
RW
1398 info = "type ";
1399 callback = pm_op(dev->type->pm, state);
1400 goto Run;
9659cc06
RW
1401 }
1402
1eede070
RW
1403 if (dev->class) {
1404 if (dev->class->pm) {
9cf519d1
RW
1405 info = "class ";
1406 callback = pm_op(dev->class->pm, state);
1407 goto Run;
1eede070
RW
1408 } else if (dev->class->suspend) {
1409 pm_dev_dbg(dev, state, "legacy class ");
53644677
SK
1410 error = legacy_suspend(dev, state, dev->class->suspend,
1411 "legacy class ");
4d27e9dc 1412 goto End;
1eede070 1413 }
cd59abfc
AS
1414 }
1415
1eede070
RW
1416 if (dev->bus) {
1417 if (dev->bus->pm) {
35cd133c 1418 info = "bus ";
9cf519d1 1419 callback = pm_op(dev->bus->pm, state);
1eede070 1420 } else if (dev->bus->suspend) {
35cd133c 1421 pm_dev_dbg(dev, state, "legacy bus ");
53644677
SK
1422 error = legacy_suspend(dev, state, dev->bus->suspend,
1423 "legacy bus ");
9cf519d1 1424 goto End;
1eede070 1425 }
7538e3db
RW
1426 }
1427
9cf519d1 1428 Run:
35cd133c
RW
1429 if (!callback && dev->driver && dev->driver->pm) {
1430 info = "driver ";
1431 callback = pm_op(dev->driver->pm, state);
1432 }
1433
9cf519d1
RW
1434 error = dpm_run_callback(callback, dev, state, info);
1435
1eede070 1436 End:
4ca46ff3 1437 if (!error) {
aae4518b
RW
1438 struct device *parent = dev->parent;
1439
4ca46ff3 1440 dev->power.is_suspended = true;
aae4518b
RW
1441 if (parent) {
1442 spin_lock_irq(&parent->power.lock);
1443
1444 dev->parent->power.direct_complete = false;
1445 if (dev->power.wakeup_path
1446 && !dev->parent->power.ignore_children)
1447 dev->parent->power.wakeup_path = true;
1448
1449 spin_unlock_irq(&parent->power.lock);
1450 }
4ca46ff3 1451 }
6d0e0e84 1452
8e9394ce 1453 device_unlock(dev);
70fea60d 1454 dpm_watchdog_clear(&wd);
1f758b23
MSB
1455
1456 Complete:
5af84b82 1457 complete_all(&dev->power.completion);
88d26136 1458 if (error)
098dff73
RW
1459 async_error = error;
1460
431d452a 1461 TRACE_SUSPEND(error);
cd59abfc
AS
1462 return error;
1463}
1464
5af84b82
RW
1465static void async_suspend(void *data, async_cookie_t cookie)
1466{
1467 struct device *dev = (struct device *)data;
1468 int error;
1469
1470 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1471 if (error) {
1472 dpm_save_failed_dev(dev_name(dev));
5af84b82 1473 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1474 }
5af84b82
RW
1475
1476 put_device(dev);
1477}
1478
1479static int device_suspend(struct device *dev)
1480{
16735d02 1481 reinit_completion(&dev->power.completion);
5af84b82 1482
431d452a 1483 if (is_async(dev)) {
5af84b82
RW
1484 get_device(dev);
1485 async_schedule(async_suspend, dev);
1486 return 0;
1487 }
1488
1489 return __device_suspend(dev, pm_transition, false);
1490}
1491
cd59abfc 1492/**
20d652d7
RW
1493 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1494 * @state: PM transition of the system being carried out.
cd59abfc 1495 */
91e7c75b 1496int dpm_suspend(pm_message_t state)
cd59abfc 1497{
ecf762b2 1498 ktime_t starttime = ktime_get();
cd59abfc
AS
1499 int error = 0;
1500
bb3632c6 1501 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
91e7c75b
RW
1502 might_sleep();
1503
2f0aea93
VK
1504 cpufreq_suspend();
1505
cd59abfc 1506 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1507 pm_transition = state;
1508 async_error = 0;
8a43a9ab
RW
1509 while (!list_empty(&dpm_prepared_list)) {
1510 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1511
1eede070 1512 get_device(dev);
cd59abfc 1513 mutex_unlock(&dpm_list_mtx);
1eede070 1514
5af84b82 1515 error = device_suspend(dev);
1eede070 1516
1b3cbec1 1517 mutex_lock(&dpm_list_mtx);
775b64d2 1518 if (error) {
1eede070 1519 pm_dev_err(dev, state, "", error);
2a77c46d 1520 dpm_save_failed_dev(dev_name(dev));
1eede070 1521 put_device(dev);
775b64d2
RW
1522 break;
1523 }
7a8d37a3 1524 if (!list_empty(&dev->power.entry))
8a43a9ab 1525 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1526 put_device(dev);
5af84b82
RW
1527 if (async_error)
1528 break;
cd59abfc
AS
1529 }
1530 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1531 async_synchronize_full();
1532 if (!error)
1533 error = async_error;
2a77c46d
SL
1534 if (error) {
1535 suspend_stats.failed_suspend++;
1536 dpm_save_failed_step(SUSPEND_SUSPEND);
1537 } else
ecf762b2 1538 dpm_show_time(starttime, state, NULL);
bb3632c6 1539 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1eede070
RW
1540 return error;
1541}
1542
1543/**
20d652d7
RW
1544 * device_prepare - Prepare a device for system power transition.
1545 * @dev: Device to handle.
1546 * @state: PM transition of the system being carried out.
1547 *
1548 * Execute the ->prepare() callback(s) for given device. No new children of the
1549 * device may be registered after this function has returned.
1eede070 1550 */
d1616302 1551static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1552{
35cd133c
RW
1553 int (*callback)(struct device *) = NULL;
1554 char *info = NULL;
aae4518b 1555 int ret = 0;
1eede070 1556
dbf37414
RW
1557 if (dev->power.syscore)
1558 return 0;
1559
88d26136
AS
1560 /*
1561 * If a device's parent goes into runtime suspend at the wrong time,
1562 * it won't be possible to resume the device. To prevent this we
1563 * block runtime suspend here, during the prepare phase, and allow
1564 * it again during the complete phase.
1565 */
1566 pm_runtime_get_noresume(dev);
1567
8e9394ce 1568 device_lock(dev);
1eede070 1569
4ca46ff3
RW
1570 dev->power.wakeup_path = device_may_wakeup(dev);
1571
564b905a 1572 if (dev->pm_domain) {
35cd133c
RW
1573 info = "preparing power domain ";
1574 callback = dev->pm_domain->ops.prepare;
4d27e9dc 1575 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1576 info = "preparing type ";
1577 callback = dev->type->pm->prepare;
9659cc06 1578 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1579 info = "preparing class ";
1580 callback = dev->class->pm->prepare;
9659cc06 1581 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1582 info = "preparing bus ";
1583 callback = dev->bus->pm->prepare;
1584 }
1585
1586 if (!callback && dev->driver && dev->driver->pm) {
1587 info = "preparing driver ";
1588 callback = dev->driver->pm->prepare;
1589 }
1590
32e8d689 1591 if (callback)
aae4518b 1592 ret = callback(dev);
7538e3db 1593
8e9394ce 1594 device_unlock(dev);
1eede070 1595
aae4518b
RW
1596 if (ret < 0) {
1597 suspend_report_result(callback, ret);
aa1b9f13 1598 pm_runtime_put(dev);
aae4518b
RW
1599 return ret;
1600 }
1601 /*
1602 * A positive return value from ->prepare() means "this device appears
1603 * to be runtime-suspended and its state is fine, so if it really is
1604 * runtime-suspended, you can leave it in that state provided that you
1605 * will do the same thing with all of its descendants". This only
1606 * applies to suspend transitions, however.
1607 */
1608 spin_lock_irq(&dev->power.lock);
1609 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1610 spin_unlock_irq(&dev->power.lock);
1611 return 0;
1eede070 1612}
cd59abfc 1613
1eede070 1614/**
20d652d7
RW
1615 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1616 * @state: PM transition of the system being carried out.
1eede070 1617 *
20d652d7 1618 * Execute the ->prepare() callback(s) for all devices.
1eede070 1619 */
91e7c75b 1620int dpm_prepare(pm_message_t state)
1eede070 1621{
1eede070
RW
1622 int error = 0;
1623
bb3632c6 1624 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
91e7c75b
RW
1625 might_sleep();
1626
1eede070 1627 mutex_lock(&dpm_list_mtx);
1eede070
RW
1628 while (!list_empty(&dpm_list)) {
1629 struct device *dev = to_device(dpm_list.next);
1630
1631 get_device(dev);
1eede070
RW
1632 mutex_unlock(&dpm_list_mtx);
1633
32e8d689 1634 trace_device_pm_callback_start(dev, "", state.event);
1e2ef05b 1635 error = device_prepare(dev, state);
32e8d689 1636 trace_device_pm_callback_end(dev, error);
1eede070
RW
1637
1638 mutex_lock(&dpm_list_mtx);
1639 if (error) {
1eede070
RW
1640 if (error == -EAGAIN) {
1641 put_device(dev);
886a7a33 1642 error = 0;
1eede070
RW
1643 continue;
1644 }
1e75227e
RW
1645 printk(KERN_INFO "PM: Device %s not prepared "
1646 "for power transition: code %d\n",
5c1a07ab 1647 dev_name(dev), error);
1eede070
RW
1648 put_device(dev);
1649 break;
1650 }
f76b168b 1651 dev->power.is_prepared = true;
1eede070 1652 if (!list_empty(&dev->power.entry))
8a43a9ab 1653 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1654 put_device(dev);
1655 }
1eede070 1656 mutex_unlock(&dpm_list_mtx);
bb3632c6 1657 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
cd59abfc
AS
1658 return error;
1659}
1660
775b64d2 1661/**
20d652d7
RW
1662 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1663 * @state: PM transition of the system being carried out.
775b64d2 1664 *
20d652d7
RW
1665 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1666 * callbacks for them.
775b64d2 1667 */
d1616302 1668int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1669{
1670 int error;
cd59abfc 1671
1eede070 1672 error = dpm_prepare(state);
2a77c46d
SL
1673 if (error) {
1674 suspend_stats.failed_prepare++;
1675 dpm_save_failed_step(SUSPEND_PREPARE);
1676 } else
1eede070 1677 error = dpm_suspend(state);
cd59abfc 1678 return error;
cd59abfc 1679}
d1616302 1680EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1681
1682void __suspend_report_result(const char *function, void *fn, int ret)
1683{
c80cfb04
BH
1684 if (ret)
1685 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1686}
1687EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1688
1689/**
1690 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1691 * @dev: Device to wait for.
1692 * @subordinate: Device that needs to wait for @dev.
1693 */
098dff73 1694int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1695{
1696 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1697 return async_error;
f8824cee
RW
1698}
1699EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
1700
1701/**
1702 * dpm_for_each_dev - device iterator.
1703 * @data: data for the callback.
1704 * @fn: function to be called for each device.
1705 *
1706 * Iterate over devices in dpm_list, and call @fn for each device,
1707 * passing it @data.
1708 */
1709void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1710{
1711 struct device *dev;
1712
1713 if (!fn)
1714 return;
1715
1716 device_pm_lock();
1717 list_for_each_entry(dev, &dpm_list, power.entry)
1718 fn(dev, data);
1719 device_pm_unlock();
1720}
1721EXPORT_SYMBOL_GPL(dpm_for_each_dev);