2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t
)(struct device
*);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list
);
55 static LIST_HEAD(dpm_suspended_list
);
56 static LIST_HEAD(dpm_late_early_list
);
57 static LIST_HEAD(dpm_noirq_list
);
59 struct suspend_stats suspend_stats
;
60 static DEFINE_MUTEX(dpm_list_mtx
);
61 static pm_message_t pm_transition
;
63 static int async_error
;
65 static char *pm_verb(int event
)
68 case PM_EVENT_SUSPEND
:
74 case PM_EVENT_QUIESCE
:
76 case PM_EVENT_HIBERNATE
:
80 case PM_EVENT_RESTORE
:
82 case PM_EVENT_RECOVER
:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device
*dev
)
95 dev
->power
.is_prepared
= false;
96 dev
->power
.is_suspended
= false;
97 dev
->power
.is_noirq_suspended
= false;
98 dev
->power
.is_late_suspended
= false;
99 init_completion(&dev
->power
.completion
);
100 complete_all(&dev
->power
.completion
);
101 dev
->power
.wakeup
= NULL
;
102 INIT_LIST_HEAD(&dev
->power
.entry
);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx
);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx
);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device
*dev
)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
129 device_pm_check_callbacks(dev
);
130 mutex_lock(&dpm_list_mtx
);
131 if (dev
->parent
&& dev
->parent
->power
.is_prepared
)
132 dev_warn(dev
, "parent %s should not be sleeping\n",
133 dev_name(dev
->parent
));
134 list_add_tail(&dev
->power
.entry
, &dpm_list
);
135 dev
->power
.in_dpm_list
= true;
136 mutex_unlock(&dpm_list_mtx
);
140 * device_pm_remove - Remove a device from the PM core's list of active devices.
141 * @dev: Device to be removed from the list.
143 void device_pm_remove(struct device
*dev
)
145 pr_debug("PM: Removing info for %s:%s\n",
146 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
147 complete_all(&dev
->power
.completion
);
148 mutex_lock(&dpm_list_mtx
);
149 list_del_init(&dev
->power
.entry
);
150 dev
->power
.in_dpm_list
= false;
151 mutex_unlock(&dpm_list_mtx
);
152 device_wakeup_disable(dev
);
153 pm_runtime_remove(dev
);
154 device_pm_check_callbacks(dev
);
158 * device_pm_move_before - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come before.
162 void device_pm_move_before(struct device
*deva
, struct device
*devb
)
164 pr_debug("PM: Moving %s:%s before %s:%s\n",
165 deva
->bus
? deva
->bus
->name
: "No Bus", dev_name(deva
),
166 devb
->bus
? devb
->bus
->name
: "No Bus", dev_name(devb
));
167 /* Delete deva from dpm_list and reinsert before devb. */
168 list_move_tail(&deva
->power
.entry
, &devb
->power
.entry
);
172 * device_pm_move_after - Move device in the PM core's list of active devices.
173 * @deva: Device to move in dpm_list.
174 * @devb: Device @deva should come after.
176 void device_pm_move_after(struct device
*deva
, struct device
*devb
)
178 pr_debug("PM: Moving %s:%s after %s:%s\n",
179 deva
->bus
? deva
->bus
->name
: "No Bus", dev_name(deva
),
180 devb
->bus
? devb
->bus
->name
: "No Bus", dev_name(devb
));
181 /* Delete deva from dpm_list and reinsert after devb. */
182 list_move(&deva
->power
.entry
, &devb
->power
.entry
);
186 * device_pm_move_last - Move device to end of the PM core's list of devices.
187 * @dev: Device to move in dpm_list.
189 void device_pm_move_last(struct device
*dev
)
191 pr_debug("PM: Moving %s:%s to end of list\n",
192 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
193 list_move_tail(&dev
->power
.entry
, &dpm_list
);
196 static ktime_t
initcall_debug_start(struct device
*dev
)
198 ktime_t calltime
= 0;
200 if (pm_print_times_enabled
) {
201 pr_info("calling %s+ @ %i, parent: %s\n",
202 dev_name(dev
), task_pid_nr(current
),
203 dev
->parent
? dev_name(dev
->parent
) : "none");
204 calltime
= ktime_get();
210 static void initcall_debug_report(struct device
*dev
, ktime_t calltime
,
211 int error
, pm_message_t state
, char *info
)
216 rettime
= ktime_get();
217 nsecs
= (s64
) ktime_to_ns(ktime_sub(rettime
, calltime
));
219 if (pm_print_times_enabled
) {
220 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev
),
221 error
, (unsigned long long)nsecs
>> 10);
226 * dpm_wait - Wait for a PM operation to complete.
227 * @dev: Device to wait for.
228 * @async: If unset, wait only if the device's power.async_suspend flag is set.
230 static void dpm_wait(struct device
*dev
, bool async
)
235 if (async
|| (pm_async_enabled
&& dev
->power
.async_suspend
))
236 wait_for_completion(&dev
->power
.completion
);
239 static int dpm_wait_fn(struct device
*dev
, void *async_ptr
)
241 dpm_wait(dev
, *((bool *)async_ptr
));
245 static void dpm_wait_for_children(struct device
*dev
, bool async
)
247 device_for_each_child(dev
, &async
, dpm_wait_fn
);
250 static void dpm_wait_for_suppliers(struct device
*dev
, bool async
)
252 struct device_link
*link
;
255 idx
= device_links_read_lock();
258 * If the supplier goes away right after we've checked the link to it,
259 * we'll wait for its completion to change the state, but that's fine,
260 * because the only things that will block as a result are the SRCU
261 * callbacks freeing the link objects for the links in the list we're
264 list_for_each_entry_rcu(link
, &dev
->links
.suppliers
, c_node
)
265 if (READ_ONCE(link
->status
) != DL_STATE_DORMANT
)
266 dpm_wait(link
->supplier
, async
);
268 device_links_read_unlock(idx
);
271 static void dpm_wait_for_superior(struct device
*dev
, bool async
)
273 dpm_wait(dev
->parent
, async
);
274 dpm_wait_for_suppliers(dev
, async
);
277 static void dpm_wait_for_consumers(struct device
*dev
, bool async
)
279 struct device_link
*link
;
282 idx
= device_links_read_lock();
285 * The status of a device link can only be changed from "dormant" by a
286 * probe, but that cannot happen during system suspend/resume. In
287 * theory it can change to "dormant" at that time, but then it is
288 * reasonable to wait for the target device anyway (eg. if it goes
289 * away, it's better to wait for it to go away completely and then
290 * continue instead of trying to continue in parallel with its
293 list_for_each_entry_rcu(link
, &dev
->links
.consumers
, s_node
)
294 if (READ_ONCE(link
->status
) != DL_STATE_DORMANT
)
295 dpm_wait(link
->consumer
, async
);
297 device_links_read_unlock(idx
);
300 static void dpm_wait_for_subordinate(struct device
*dev
, bool async
)
302 dpm_wait_for_children(dev
, async
);
303 dpm_wait_for_consumers(dev
, async
);
307 * pm_op - Return the PM operation appropriate for given PM event.
308 * @ops: PM operations to choose from.
309 * @state: PM transition of the system being carried out.
311 static pm_callback_t
pm_op(const struct dev_pm_ops
*ops
, pm_message_t state
)
313 switch (state
.event
) {
314 #ifdef CONFIG_SUSPEND
315 case PM_EVENT_SUSPEND
:
317 case PM_EVENT_RESUME
:
319 #endif /* CONFIG_SUSPEND */
320 #ifdef CONFIG_HIBERNATE_CALLBACKS
321 case PM_EVENT_FREEZE
:
322 case PM_EVENT_QUIESCE
:
324 case PM_EVENT_HIBERNATE
:
325 return ops
->poweroff
;
327 case PM_EVENT_RECOVER
:
330 case PM_EVENT_RESTORE
:
332 #endif /* CONFIG_HIBERNATE_CALLBACKS */
339 * pm_late_early_op - Return the PM operation appropriate for given PM event.
340 * @ops: PM operations to choose from.
341 * @state: PM transition of the system being carried out.
343 * Runtime PM is disabled for @dev while this function is being executed.
345 static pm_callback_t
pm_late_early_op(const struct dev_pm_ops
*ops
,
348 switch (state
.event
) {
349 #ifdef CONFIG_SUSPEND
350 case PM_EVENT_SUSPEND
:
351 return ops
->suspend_late
;
352 case PM_EVENT_RESUME
:
353 return ops
->resume_early
;
354 #endif /* CONFIG_SUSPEND */
355 #ifdef CONFIG_HIBERNATE_CALLBACKS
356 case PM_EVENT_FREEZE
:
357 case PM_EVENT_QUIESCE
:
358 return ops
->freeze_late
;
359 case PM_EVENT_HIBERNATE
:
360 return ops
->poweroff_late
;
362 case PM_EVENT_RECOVER
:
363 return ops
->thaw_early
;
364 case PM_EVENT_RESTORE
:
365 return ops
->restore_early
;
366 #endif /* CONFIG_HIBERNATE_CALLBACKS */
373 * pm_noirq_op - Return the PM operation appropriate for given PM event.
374 * @ops: PM operations to choose from.
375 * @state: PM transition of the system being carried out.
377 * The driver of @dev will not receive interrupts while this function is being
380 static pm_callback_t
pm_noirq_op(const struct dev_pm_ops
*ops
, pm_message_t state
)
382 switch (state
.event
) {
383 #ifdef CONFIG_SUSPEND
384 case PM_EVENT_SUSPEND
:
385 return ops
->suspend_noirq
;
386 case PM_EVENT_RESUME
:
387 return ops
->resume_noirq
;
388 #endif /* CONFIG_SUSPEND */
389 #ifdef CONFIG_HIBERNATE_CALLBACKS
390 case PM_EVENT_FREEZE
:
391 case PM_EVENT_QUIESCE
:
392 return ops
->freeze_noirq
;
393 case PM_EVENT_HIBERNATE
:
394 return ops
->poweroff_noirq
;
396 case PM_EVENT_RECOVER
:
397 return ops
->thaw_noirq
;
398 case PM_EVENT_RESTORE
:
399 return ops
->restore_noirq
;
400 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406 static void pm_dev_dbg(struct device
*dev
, pm_message_t state
, char *info
)
408 dev_dbg(dev
, "%s%s%s\n", info
, pm_verb(state
.event
),
409 ((state
.event
& PM_EVENT_SLEEP
) && device_may_wakeup(dev
)) ?
410 ", may wakeup" : "");
413 static void pm_dev_err(struct device
*dev
, pm_message_t state
, char *info
,
416 printk(KERN_ERR
"PM: Device %s failed to %s%s: error %d\n",
417 dev_name(dev
), pm_verb(state
.event
), info
, error
);
420 #ifdef CONFIG_PM_DEBUG
421 static void dpm_show_time(ktime_t starttime
, pm_message_t state
, char *info
)
427 calltime
= ktime_get();
428 usecs64
= ktime_to_ns(ktime_sub(calltime
, starttime
));
429 do_div(usecs64
, NSEC_PER_USEC
);
433 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
434 info
?: "", info
? " " : "", pm_verb(state
.event
),
435 usecs
/ USEC_PER_MSEC
, usecs
% USEC_PER_MSEC
);
438 static inline void dpm_show_time(ktime_t starttime
, pm_message_t state
, char *info
) {}
439 #endif /* CONFIG_PM_DEBUG */
441 static int dpm_run_callback(pm_callback_t cb
, struct device
*dev
,
442 pm_message_t state
, char *info
)
450 calltime
= initcall_debug_start(dev
);
452 pm_dev_dbg(dev
, state
, info
);
453 trace_device_pm_callback_start(dev
, info
, state
.event
);
455 trace_device_pm_callback_end(dev
, error
);
456 suspend_report_result(cb
, error
);
458 initcall_debug_report(dev
, calltime
, error
, state
, info
);
463 #ifdef CONFIG_DPM_WATCHDOG
464 struct dpm_watchdog
{
466 struct task_struct
*tsk
;
467 struct timer_list timer
;
470 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
471 struct dpm_watchdog wd
474 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
475 * @data: Watchdog object address.
477 * Called when a driver has timed out suspending or resuming.
478 * There's not much we can do here to recover so panic() to
479 * capture a crash-dump in pstore.
481 static void dpm_watchdog_handler(unsigned long data
)
483 struct dpm_watchdog
*wd
= (void *)data
;
485 dev_emerg(wd
->dev
, "**** DPM device timeout ****\n");
486 show_stack(wd
->tsk
, NULL
);
487 panic("%s %s: unrecoverable failure\n",
488 dev_driver_string(wd
->dev
), dev_name(wd
->dev
));
492 * dpm_watchdog_set - Enable pm watchdog for given device.
493 * @wd: Watchdog. Must be allocated on the stack.
494 * @dev: Device to handle.
496 static void dpm_watchdog_set(struct dpm_watchdog
*wd
, struct device
*dev
)
498 struct timer_list
*timer
= &wd
->timer
;
503 init_timer_on_stack(timer
);
504 /* use same timeout value for both suspend and resume */
505 timer
->expires
= jiffies
+ HZ
* CONFIG_DPM_WATCHDOG_TIMEOUT
;
506 timer
->function
= dpm_watchdog_handler
;
507 timer
->data
= (unsigned long)wd
;
512 * dpm_watchdog_clear - Disable suspend/resume watchdog.
513 * @wd: Watchdog to disable.
515 static void dpm_watchdog_clear(struct dpm_watchdog
*wd
)
517 struct timer_list
*timer
= &wd
->timer
;
519 del_timer_sync(timer
);
520 destroy_timer_on_stack(timer
);
523 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
524 #define dpm_watchdog_set(x, y)
525 #define dpm_watchdog_clear(x)
528 /*------------------------- Resume routines -------------------------*/
531 * device_resume_noirq - Execute an "early resume" callback for given device.
532 * @dev: Device to handle.
533 * @state: PM transition of the system being carried out.
534 * @async: If true, the device is being resumed asynchronously.
536 * The driver of @dev will not receive interrupts while this function is being
539 static int device_resume_noirq(struct device
*dev
, pm_message_t state
, bool async
)
541 pm_callback_t callback
= NULL
;
548 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
551 if (!dev
->power
.is_noirq_suspended
)
554 dpm_wait_for_superior(dev
, async
);
556 if (dev
->pm_domain
) {
557 info
= "noirq power domain ";
558 callback
= pm_noirq_op(&dev
->pm_domain
->ops
, state
);
559 } else if (dev
->type
&& dev
->type
->pm
) {
560 info
= "noirq type ";
561 callback
= pm_noirq_op(dev
->type
->pm
, state
);
562 } else if (dev
->class && dev
->class->pm
) {
563 info
= "noirq class ";
564 callback
= pm_noirq_op(dev
->class->pm
, state
);
565 } else if (dev
->bus
&& dev
->bus
->pm
) {
567 callback
= pm_noirq_op(dev
->bus
->pm
, state
);
570 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
571 info
= "noirq driver ";
572 callback
= pm_noirq_op(dev
->driver
->pm
, state
);
575 error
= dpm_run_callback(callback
, dev
, state
, info
);
576 dev
->power
.is_noirq_suspended
= false;
579 complete_all(&dev
->power
.completion
);
584 static bool is_async(struct device
*dev
)
586 return dev
->power
.async_suspend
&& pm_async_enabled
587 && !pm_trace_is_enabled();
590 static void async_resume_noirq(void *data
, async_cookie_t cookie
)
592 struct device
*dev
= (struct device
*)data
;
595 error
= device_resume_noirq(dev
, pm_transition
, true);
597 pm_dev_err(dev
, pm_transition
, " async", error
);
603 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
604 * @state: PM transition of the system being carried out.
606 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
607 * enable device drivers to receive interrupts.
609 void dpm_resume_noirq(pm_message_t state
)
612 ktime_t starttime
= ktime_get();
614 trace_suspend_resume(TPS("dpm_resume_noirq"), state
.event
, true);
615 mutex_lock(&dpm_list_mtx
);
616 pm_transition
= state
;
619 * Advanced the async threads upfront,
620 * in case the starting of async threads is
621 * delayed by non-async resuming devices.
623 list_for_each_entry(dev
, &dpm_noirq_list
, power
.entry
) {
624 reinit_completion(&dev
->power
.completion
);
627 async_schedule(async_resume_noirq
, dev
);
631 while (!list_empty(&dpm_noirq_list
)) {
632 dev
= to_device(dpm_noirq_list
.next
);
634 list_move_tail(&dev
->power
.entry
, &dpm_late_early_list
);
635 mutex_unlock(&dpm_list_mtx
);
637 if (!is_async(dev
)) {
640 error
= device_resume_noirq(dev
, state
, false);
642 suspend_stats
.failed_resume_noirq
++;
643 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ
);
644 dpm_save_failed_dev(dev_name(dev
));
645 pm_dev_err(dev
, state
, " noirq", error
);
649 mutex_lock(&dpm_list_mtx
);
652 mutex_unlock(&dpm_list_mtx
);
653 async_synchronize_full();
654 dpm_show_time(starttime
, state
, "noirq");
655 resume_device_irqs();
656 device_wakeup_disarm_wake_irqs();
658 trace_suspend_resume(TPS("dpm_resume_noirq"), state
.event
, false);
662 * device_resume_early - Execute an "early resume" callback for given device.
663 * @dev: Device to handle.
664 * @state: PM transition of the system being carried out.
665 * @async: If true, the device is being resumed asynchronously.
667 * Runtime PM is disabled for @dev while this function is being executed.
669 static int device_resume_early(struct device
*dev
, pm_message_t state
, bool async
)
671 pm_callback_t callback
= NULL
;
678 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
681 if (!dev
->power
.is_late_suspended
)
684 dpm_wait_for_superior(dev
, async
);
686 if (dev
->pm_domain
) {
687 info
= "early power domain ";
688 callback
= pm_late_early_op(&dev
->pm_domain
->ops
, state
);
689 } else if (dev
->type
&& dev
->type
->pm
) {
690 info
= "early type ";
691 callback
= pm_late_early_op(dev
->type
->pm
, state
);
692 } else if (dev
->class && dev
->class->pm
) {
693 info
= "early class ";
694 callback
= pm_late_early_op(dev
->class->pm
, state
);
695 } else if (dev
->bus
&& dev
->bus
->pm
) {
697 callback
= pm_late_early_op(dev
->bus
->pm
, state
);
700 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
701 info
= "early driver ";
702 callback
= pm_late_early_op(dev
->driver
->pm
, state
);
705 error
= dpm_run_callback(callback
, dev
, state
, info
);
706 dev
->power
.is_late_suspended
= false;
711 pm_runtime_enable(dev
);
712 complete_all(&dev
->power
.completion
);
716 static void async_resume_early(void *data
, async_cookie_t cookie
)
718 struct device
*dev
= (struct device
*)data
;
721 error
= device_resume_early(dev
, pm_transition
, true);
723 pm_dev_err(dev
, pm_transition
, " async", error
);
729 * dpm_resume_early - Execute "early resume" callbacks for all devices.
730 * @state: PM transition of the system being carried out.
732 void dpm_resume_early(pm_message_t state
)
735 ktime_t starttime
= ktime_get();
737 trace_suspend_resume(TPS("dpm_resume_early"), state
.event
, true);
738 mutex_lock(&dpm_list_mtx
);
739 pm_transition
= state
;
742 * Advanced the async threads upfront,
743 * in case the starting of async threads is
744 * delayed by non-async resuming devices.
746 list_for_each_entry(dev
, &dpm_late_early_list
, power
.entry
) {
747 reinit_completion(&dev
->power
.completion
);
750 async_schedule(async_resume_early
, dev
);
754 while (!list_empty(&dpm_late_early_list
)) {
755 dev
= to_device(dpm_late_early_list
.next
);
757 list_move_tail(&dev
->power
.entry
, &dpm_suspended_list
);
758 mutex_unlock(&dpm_list_mtx
);
760 if (!is_async(dev
)) {
763 error
= device_resume_early(dev
, state
, false);
765 suspend_stats
.failed_resume_early
++;
766 dpm_save_failed_step(SUSPEND_RESUME_EARLY
);
767 dpm_save_failed_dev(dev_name(dev
));
768 pm_dev_err(dev
, state
, " early", error
);
771 mutex_lock(&dpm_list_mtx
);
774 mutex_unlock(&dpm_list_mtx
);
775 async_synchronize_full();
776 dpm_show_time(starttime
, state
, "early");
777 trace_suspend_resume(TPS("dpm_resume_early"), state
.event
, false);
781 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
782 * @state: PM transition of the system being carried out.
784 void dpm_resume_start(pm_message_t state
)
786 dpm_resume_noirq(state
);
787 dpm_resume_early(state
);
789 EXPORT_SYMBOL_GPL(dpm_resume_start
);
792 * device_resume - Execute "resume" callbacks for given device.
793 * @dev: Device to handle.
794 * @state: PM transition of the system being carried out.
795 * @async: If true, the device is being resumed asynchronously.
797 static int device_resume(struct device
*dev
, pm_message_t state
, bool async
)
799 pm_callback_t callback
= NULL
;
802 DECLARE_DPM_WATCHDOG_ON_STACK(wd
);
807 if (dev
->power
.syscore
)
810 if (dev
->power
.direct_complete
) {
811 /* Match the pm_runtime_disable() in __device_suspend(). */
812 pm_runtime_enable(dev
);
816 dpm_wait_for_superior(dev
, async
);
817 dpm_watchdog_set(&wd
, dev
);
821 * This is a fib. But we'll allow new children to be added below
822 * a resumed device, even if the device hasn't been completed yet.
824 dev
->power
.is_prepared
= false;
826 if (!dev
->power
.is_suspended
)
829 if (dev
->pm_domain
) {
830 info
= "power domain ";
831 callback
= pm_op(&dev
->pm_domain
->ops
, state
);
835 if (dev
->type
&& dev
->type
->pm
) {
837 callback
= pm_op(dev
->type
->pm
, state
);
842 if (dev
->class->pm
) {
844 callback
= pm_op(dev
->class->pm
, state
);
846 } else if (dev
->class->resume
) {
847 info
= "legacy class ";
848 callback
= dev
->class->resume
;
856 callback
= pm_op(dev
->bus
->pm
, state
);
857 } else if (dev
->bus
->resume
) {
858 info
= "legacy bus ";
859 callback
= dev
->bus
->resume
;
865 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
867 callback
= pm_op(dev
->driver
->pm
, state
);
871 error
= dpm_run_callback(callback
, dev
, state
, info
);
872 dev
->power
.is_suspended
= false;
876 dpm_watchdog_clear(&wd
);
879 complete_all(&dev
->power
.completion
);
886 static void async_resume(void *data
, async_cookie_t cookie
)
888 struct device
*dev
= (struct device
*)data
;
891 error
= device_resume(dev
, pm_transition
, true);
893 pm_dev_err(dev
, pm_transition
, " async", error
);
898 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
899 * @state: PM transition of the system being carried out.
901 * Execute the appropriate "resume" callback for all devices whose status
902 * indicates that they are suspended.
904 void dpm_resume(pm_message_t state
)
907 ktime_t starttime
= ktime_get();
909 trace_suspend_resume(TPS("dpm_resume"), state
.event
, true);
912 mutex_lock(&dpm_list_mtx
);
913 pm_transition
= state
;
916 list_for_each_entry(dev
, &dpm_suspended_list
, power
.entry
) {
917 reinit_completion(&dev
->power
.completion
);
920 async_schedule(async_resume
, dev
);
924 while (!list_empty(&dpm_suspended_list
)) {
925 dev
= to_device(dpm_suspended_list
.next
);
927 if (!is_async(dev
)) {
930 mutex_unlock(&dpm_list_mtx
);
932 error
= device_resume(dev
, state
, false);
934 suspend_stats
.failed_resume
++;
935 dpm_save_failed_step(SUSPEND_RESUME
);
936 dpm_save_failed_dev(dev_name(dev
));
937 pm_dev_err(dev
, state
, "", error
);
940 mutex_lock(&dpm_list_mtx
);
942 if (!list_empty(&dev
->power
.entry
))
943 list_move_tail(&dev
->power
.entry
, &dpm_prepared_list
);
946 mutex_unlock(&dpm_list_mtx
);
947 async_synchronize_full();
948 dpm_show_time(starttime
, state
, NULL
);
951 trace_suspend_resume(TPS("dpm_resume"), state
.event
, false);
955 * device_complete - Complete a PM transition for given device.
956 * @dev: Device to handle.
957 * @state: PM transition of the system being carried out.
959 static void device_complete(struct device
*dev
, pm_message_t state
)
961 void (*callback
)(struct device
*) = NULL
;
964 if (dev
->power
.syscore
)
969 if (dev
->pm_domain
) {
970 info
= "completing power domain ";
971 callback
= dev
->pm_domain
->ops
.complete
;
972 } else if (dev
->type
&& dev
->type
->pm
) {
973 info
= "completing type ";
974 callback
= dev
->type
->pm
->complete
;
975 } else if (dev
->class && dev
->class->pm
) {
976 info
= "completing class ";
977 callback
= dev
->class->pm
->complete
;
978 } else if (dev
->bus
&& dev
->bus
->pm
) {
979 info
= "completing bus ";
980 callback
= dev
->bus
->pm
->complete
;
983 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
984 info
= "completing driver ";
985 callback
= dev
->driver
->pm
->complete
;
989 pm_dev_dbg(dev
, state
, info
);
999 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1000 * @state: PM transition of the system being carried out.
1002 * Execute the ->complete() callbacks for all devices whose PM status is not
1003 * DPM_ON (this allows new devices to be registered).
1005 void dpm_complete(pm_message_t state
)
1007 struct list_head list
;
1009 trace_suspend_resume(TPS("dpm_complete"), state
.event
, true);
1012 INIT_LIST_HEAD(&list
);
1013 mutex_lock(&dpm_list_mtx
);
1014 while (!list_empty(&dpm_prepared_list
)) {
1015 struct device
*dev
= to_device(dpm_prepared_list
.prev
);
1018 dev
->power
.is_prepared
= false;
1019 list_move(&dev
->power
.entry
, &list
);
1020 mutex_unlock(&dpm_list_mtx
);
1022 trace_device_pm_callback_start(dev
, "", state
.event
);
1023 device_complete(dev
, state
);
1024 trace_device_pm_callback_end(dev
, 0);
1026 mutex_lock(&dpm_list_mtx
);
1029 list_splice(&list
, &dpm_list
);
1030 mutex_unlock(&dpm_list_mtx
);
1032 /* Allow device probing and trigger re-probing of deferred devices */
1033 device_unblock_probing();
1034 trace_suspend_resume(TPS("dpm_complete"), state
.event
, false);
1038 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1039 * @state: PM transition of the system being carried out.
1041 * Execute "resume" callbacks for all devices and complete the PM transition of
1044 void dpm_resume_end(pm_message_t state
)
1047 dpm_complete(state
);
1049 EXPORT_SYMBOL_GPL(dpm_resume_end
);
1052 /*------------------------- Suspend routines -------------------------*/
1055 * resume_event - Return a "resume" message for given "suspend" sleep state.
1056 * @sleep_state: PM message representing a sleep state.
1058 * Return a PM message representing the resume event corresponding to given
1061 static pm_message_t
resume_event(pm_message_t sleep_state
)
1063 switch (sleep_state
.event
) {
1064 case PM_EVENT_SUSPEND
:
1066 case PM_EVENT_FREEZE
:
1067 case PM_EVENT_QUIESCE
:
1068 return PMSG_RECOVER
;
1069 case PM_EVENT_HIBERNATE
:
1070 return PMSG_RESTORE
;
1076 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1077 * @dev: Device to handle.
1078 * @state: PM transition of the system being carried out.
1079 * @async: If true, the device is being suspended asynchronously.
1081 * The driver of @dev will not receive interrupts while this function is being
1084 static int __device_suspend_noirq(struct device
*dev
, pm_message_t state
, bool async
)
1086 pm_callback_t callback
= NULL
;
1093 dpm_wait_for_subordinate(dev
, async
);
1098 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
1101 if (dev
->pm_domain
) {
1102 info
= "noirq power domain ";
1103 callback
= pm_noirq_op(&dev
->pm_domain
->ops
, state
);
1104 } else if (dev
->type
&& dev
->type
->pm
) {
1105 info
= "noirq type ";
1106 callback
= pm_noirq_op(dev
->type
->pm
, state
);
1107 } else if (dev
->class && dev
->class->pm
) {
1108 info
= "noirq class ";
1109 callback
= pm_noirq_op(dev
->class->pm
, state
);
1110 } else if (dev
->bus
&& dev
->bus
->pm
) {
1111 info
= "noirq bus ";
1112 callback
= pm_noirq_op(dev
->bus
->pm
, state
);
1115 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
1116 info
= "noirq driver ";
1117 callback
= pm_noirq_op(dev
->driver
->pm
, state
);
1120 error
= dpm_run_callback(callback
, dev
, state
, info
);
1122 dev
->power
.is_noirq_suspended
= true;
1124 async_error
= error
;
1127 complete_all(&dev
->power
.completion
);
1128 TRACE_SUSPEND(error
);
1132 static void async_suspend_noirq(void *data
, async_cookie_t cookie
)
1134 struct device
*dev
= (struct device
*)data
;
1137 error
= __device_suspend_noirq(dev
, pm_transition
, true);
1139 dpm_save_failed_dev(dev_name(dev
));
1140 pm_dev_err(dev
, pm_transition
, " async", error
);
1146 static int device_suspend_noirq(struct device
*dev
)
1148 reinit_completion(&dev
->power
.completion
);
1150 if (is_async(dev
)) {
1152 async_schedule(async_suspend_noirq
, dev
);
1155 return __device_suspend_noirq(dev
, pm_transition
, false);
1159 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1160 * @state: PM transition of the system being carried out.
1162 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1163 * handlers for all non-sysdev devices.
1165 int dpm_suspend_noirq(pm_message_t state
)
1167 ktime_t starttime
= ktime_get();
1170 trace_suspend_resume(TPS("dpm_suspend_noirq"), state
.event
, true);
1172 device_wakeup_arm_wake_irqs();
1173 suspend_device_irqs();
1174 mutex_lock(&dpm_list_mtx
);
1175 pm_transition
= state
;
1178 while (!list_empty(&dpm_late_early_list
)) {
1179 struct device
*dev
= to_device(dpm_late_early_list
.prev
);
1182 mutex_unlock(&dpm_list_mtx
);
1184 error
= device_suspend_noirq(dev
);
1186 mutex_lock(&dpm_list_mtx
);
1188 pm_dev_err(dev
, state
, " noirq", error
);
1189 dpm_save_failed_dev(dev_name(dev
));
1193 if (!list_empty(&dev
->power
.entry
))
1194 list_move(&dev
->power
.entry
, &dpm_noirq_list
);
1200 mutex_unlock(&dpm_list_mtx
);
1201 async_synchronize_full();
1203 error
= async_error
;
1206 suspend_stats
.failed_suspend_noirq
++;
1207 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ
);
1208 dpm_resume_noirq(resume_event(state
));
1210 dpm_show_time(starttime
, state
, "noirq");
1212 trace_suspend_resume(TPS("dpm_suspend_noirq"), state
.event
, false);
1217 * device_suspend_late - Execute a "late suspend" callback for given device.
1218 * @dev: Device to handle.
1219 * @state: PM transition of the system being carried out.
1220 * @async: If true, the device is being suspended asynchronously.
1222 * Runtime PM is disabled for @dev while this function is being executed.
1224 static int __device_suspend_late(struct device
*dev
, pm_message_t state
, bool async
)
1226 pm_callback_t callback
= NULL
;
1233 __pm_runtime_disable(dev
, false);
1235 dpm_wait_for_subordinate(dev
, async
);
1240 if (pm_wakeup_pending()) {
1241 async_error
= -EBUSY
;
1245 if (dev
->power
.syscore
|| dev
->power
.direct_complete
)
1248 if (dev
->pm_domain
) {
1249 info
= "late power domain ";
1250 callback
= pm_late_early_op(&dev
->pm_domain
->ops
, state
);
1251 } else if (dev
->type
&& dev
->type
->pm
) {
1252 info
= "late type ";
1253 callback
= pm_late_early_op(dev
->type
->pm
, state
);
1254 } else if (dev
->class && dev
->class->pm
) {
1255 info
= "late class ";
1256 callback
= pm_late_early_op(dev
->class->pm
, state
);
1257 } else if (dev
->bus
&& dev
->bus
->pm
) {
1259 callback
= pm_late_early_op(dev
->bus
->pm
, state
);
1262 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
1263 info
= "late driver ";
1264 callback
= pm_late_early_op(dev
->driver
->pm
, state
);
1267 error
= dpm_run_callback(callback
, dev
, state
, info
);
1269 dev
->power
.is_late_suspended
= true;
1271 async_error
= error
;
1274 TRACE_SUSPEND(error
);
1275 complete_all(&dev
->power
.completion
);
1279 static void async_suspend_late(void *data
, async_cookie_t cookie
)
1281 struct device
*dev
= (struct device
*)data
;
1284 error
= __device_suspend_late(dev
, pm_transition
, true);
1286 dpm_save_failed_dev(dev_name(dev
));
1287 pm_dev_err(dev
, pm_transition
, " async", error
);
1292 static int device_suspend_late(struct device
*dev
)
1294 reinit_completion(&dev
->power
.completion
);
1296 if (is_async(dev
)) {
1298 async_schedule(async_suspend_late
, dev
);
1302 return __device_suspend_late(dev
, pm_transition
, false);
1306 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1307 * @state: PM transition of the system being carried out.
1309 int dpm_suspend_late(pm_message_t state
)
1311 ktime_t starttime
= ktime_get();
1314 trace_suspend_resume(TPS("dpm_suspend_late"), state
.event
, true);
1315 mutex_lock(&dpm_list_mtx
);
1316 pm_transition
= state
;
1319 while (!list_empty(&dpm_suspended_list
)) {
1320 struct device
*dev
= to_device(dpm_suspended_list
.prev
);
1323 mutex_unlock(&dpm_list_mtx
);
1325 error
= device_suspend_late(dev
);
1327 mutex_lock(&dpm_list_mtx
);
1328 if (!list_empty(&dev
->power
.entry
))
1329 list_move(&dev
->power
.entry
, &dpm_late_early_list
);
1332 pm_dev_err(dev
, state
, " late", error
);
1333 dpm_save_failed_dev(dev_name(dev
));
1342 mutex_unlock(&dpm_list_mtx
);
1343 async_synchronize_full();
1345 error
= async_error
;
1347 suspend_stats
.failed_suspend_late
++;
1348 dpm_save_failed_step(SUSPEND_SUSPEND_LATE
);
1349 dpm_resume_early(resume_event(state
));
1351 dpm_show_time(starttime
, state
, "late");
1353 trace_suspend_resume(TPS("dpm_suspend_late"), state
.event
, false);
1358 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1359 * @state: PM transition of the system being carried out.
1361 int dpm_suspend_end(pm_message_t state
)
1363 int error
= dpm_suspend_late(state
);
1367 error
= dpm_suspend_noirq(state
);
1369 dpm_resume_early(resume_event(state
));
1375 EXPORT_SYMBOL_GPL(dpm_suspend_end
);
1378 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1379 * @dev: Device to suspend.
1380 * @state: PM transition of the system being carried out.
1381 * @cb: Suspend callback to execute.
1382 * @info: string description of caller.
1384 static int legacy_suspend(struct device
*dev
, pm_message_t state
,
1385 int (*cb
)(struct device
*dev
, pm_message_t state
),
1391 calltime
= initcall_debug_start(dev
);
1393 trace_device_pm_callback_start(dev
, info
, state
.event
);
1394 error
= cb(dev
, state
);
1395 trace_device_pm_callback_end(dev
, error
);
1396 suspend_report_result(cb
, error
);
1398 initcall_debug_report(dev
, calltime
, error
, state
, info
);
1403 static void dpm_clear_suppliers_direct_complete(struct device
*dev
)
1405 struct device_link
*link
;
1408 idx
= device_links_read_lock();
1410 list_for_each_entry_rcu(link
, &dev
->links
.suppliers
, c_node
) {
1411 spin_lock_irq(&link
->supplier
->power
.lock
);
1412 link
->supplier
->power
.direct_complete
= false;
1413 spin_unlock_irq(&link
->supplier
->power
.lock
);
1416 device_links_read_unlock(idx
);
1420 * device_suspend - Execute "suspend" callbacks for given device.
1421 * @dev: Device to handle.
1422 * @state: PM transition of the system being carried out.
1423 * @async: If true, the device is being suspended asynchronously.
1425 static int __device_suspend(struct device
*dev
, pm_message_t state
, bool async
)
1427 pm_callback_t callback
= NULL
;
1430 DECLARE_DPM_WATCHDOG_ON_STACK(wd
);
1435 dpm_wait_for_subordinate(dev
, async
);
1441 * If a device configured to wake up the system from sleep states
1442 * has been suspended at run time and there's a resume request pending
1443 * for it, this is equivalent to the device signaling wakeup, so the
1444 * system suspend operation should be aborted.
1446 if (pm_runtime_barrier(dev
) && device_may_wakeup(dev
))
1447 pm_wakeup_event(dev
, 0);
1449 if (pm_wakeup_pending()) {
1450 async_error
= -EBUSY
;
1454 if (dev
->power
.syscore
)
1457 if (dev
->power
.direct_complete
) {
1458 if (pm_runtime_status_suspended(dev
)) {
1459 pm_runtime_disable(dev
);
1460 if (pm_runtime_status_suspended(dev
))
1463 pm_runtime_enable(dev
);
1465 dev
->power
.direct_complete
= false;
1468 dpm_watchdog_set(&wd
, dev
);
1471 if (dev
->pm_domain
) {
1472 info
= "power domain ";
1473 callback
= pm_op(&dev
->pm_domain
->ops
, state
);
1477 if (dev
->type
&& dev
->type
->pm
) {
1479 callback
= pm_op(dev
->type
->pm
, state
);
1484 if (dev
->class->pm
) {
1486 callback
= pm_op(dev
->class->pm
, state
);
1488 } else if (dev
->class->suspend
) {
1489 pm_dev_dbg(dev
, state
, "legacy class ");
1490 error
= legacy_suspend(dev
, state
, dev
->class->suspend
,
1499 callback
= pm_op(dev
->bus
->pm
, state
);
1500 } else if (dev
->bus
->suspend
) {
1501 pm_dev_dbg(dev
, state
, "legacy bus ");
1502 error
= legacy_suspend(dev
, state
, dev
->bus
->suspend
,
1509 if (!callback
&& dev
->driver
&& dev
->driver
->pm
) {
1511 callback
= pm_op(dev
->driver
->pm
, state
);
1514 error
= dpm_run_callback(callback
, dev
, state
, info
);
1518 struct device
*parent
= dev
->parent
;
1520 dev
->power
.is_suspended
= true;
1522 spin_lock_irq(&parent
->power
.lock
);
1524 dev
->parent
->power
.direct_complete
= false;
1525 if (dev
->power
.wakeup_path
1526 && !dev
->parent
->power
.ignore_children
)
1527 dev
->parent
->power
.wakeup_path
= true;
1529 spin_unlock_irq(&parent
->power
.lock
);
1531 dpm_clear_suppliers_direct_complete(dev
);
1535 dpm_watchdog_clear(&wd
);
1539 async_error
= error
;
1541 complete_all(&dev
->power
.completion
);
1542 TRACE_SUSPEND(error
);
1546 static void async_suspend(void *data
, async_cookie_t cookie
)
1548 struct device
*dev
= (struct device
*)data
;
1551 error
= __device_suspend(dev
, pm_transition
, true);
1553 dpm_save_failed_dev(dev_name(dev
));
1554 pm_dev_err(dev
, pm_transition
, " async", error
);
1560 static int device_suspend(struct device
*dev
)
1562 reinit_completion(&dev
->power
.completion
);
1564 if (is_async(dev
)) {
1566 async_schedule(async_suspend
, dev
);
1570 return __device_suspend(dev
, pm_transition
, false);
1574 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1575 * @state: PM transition of the system being carried out.
1577 int dpm_suspend(pm_message_t state
)
1579 ktime_t starttime
= ktime_get();
1582 trace_suspend_resume(TPS("dpm_suspend"), state
.event
, true);
1587 mutex_lock(&dpm_list_mtx
);
1588 pm_transition
= state
;
1590 while (!list_empty(&dpm_prepared_list
)) {
1591 struct device
*dev
= to_device(dpm_prepared_list
.prev
);
1594 mutex_unlock(&dpm_list_mtx
);
1596 error
= device_suspend(dev
);
1598 mutex_lock(&dpm_list_mtx
);
1600 pm_dev_err(dev
, state
, "", error
);
1601 dpm_save_failed_dev(dev_name(dev
));
1605 if (!list_empty(&dev
->power
.entry
))
1606 list_move(&dev
->power
.entry
, &dpm_suspended_list
);
1611 mutex_unlock(&dpm_list_mtx
);
1612 async_synchronize_full();
1614 error
= async_error
;
1616 suspend_stats
.failed_suspend
++;
1617 dpm_save_failed_step(SUSPEND_SUSPEND
);
1619 dpm_show_time(starttime
, state
, NULL
);
1620 trace_suspend_resume(TPS("dpm_suspend"), state
.event
, false);
1625 * device_prepare - Prepare a device for system power transition.
1626 * @dev: Device to handle.
1627 * @state: PM transition of the system being carried out.
1629 * Execute the ->prepare() callback(s) for given device. No new children of the
1630 * device may be registered after this function has returned.
1632 static int device_prepare(struct device
*dev
, pm_message_t state
)
1634 int (*callback
)(struct device
*) = NULL
;
1637 if (dev
->power
.syscore
)
1641 * If a device's parent goes into runtime suspend at the wrong time,
1642 * it won't be possible to resume the device. To prevent this we
1643 * block runtime suspend here, during the prepare phase, and allow
1644 * it again during the complete phase.
1646 pm_runtime_get_noresume(dev
);
1650 dev
->power
.wakeup_path
= device_may_wakeup(dev
);
1652 if (dev
->power
.no_pm_callbacks
) {
1653 ret
= 1; /* Let device go direct_complete */
1658 callback
= dev
->pm_domain
->ops
.prepare
;
1659 else if (dev
->type
&& dev
->type
->pm
)
1660 callback
= dev
->type
->pm
->prepare
;
1661 else if (dev
->class && dev
->class->pm
)
1662 callback
= dev
->class->pm
->prepare
;
1663 else if (dev
->bus
&& dev
->bus
->pm
)
1664 callback
= dev
->bus
->pm
->prepare
;
1666 if (!callback
&& dev
->driver
&& dev
->driver
->pm
)
1667 callback
= dev
->driver
->pm
->prepare
;
1670 ret
= callback(dev
);
1676 suspend_report_result(callback
, ret
);
1677 pm_runtime_put(dev
);
1681 * A positive return value from ->prepare() means "this device appears
1682 * to be runtime-suspended and its state is fine, so if it really is
1683 * runtime-suspended, you can leave it in that state provided that you
1684 * will do the same thing with all of its descendants". This only
1685 * applies to suspend transitions, however.
1687 spin_lock_irq(&dev
->power
.lock
);
1688 dev
->power
.direct_complete
= ret
> 0 && state
.event
== PM_EVENT_SUSPEND
;
1689 spin_unlock_irq(&dev
->power
.lock
);
1694 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1695 * @state: PM transition of the system being carried out.
1697 * Execute the ->prepare() callback(s) for all devices.
1699 int dpm_prepare(pm_message_t state
)
1703 trace_suspend_resume(TPS("dpm_prepare"), state
.event
, true);
1707 * Give a chance for the known devices to complete their probes, before
1708 * disable probing of devices. This sync point is important at least
1709 * at boot time + hibernation restore.
1711 wait_for_device_probe();
1713 * It is unsafe if probing of devices will happen during suspend or
1714 * hibernation and system behavior will be unpredictable in this case.
1715 * So, let's prohibit device's probing here and defer their probes
1716 * instead. The normal behavior will be restored in dpm_complete().
1718 device_block_probing();
1720 mutex_lock(&dpm_list_mtx
);
1721 while (!list_empty(&dpm_list
)) {
1722 struct device
*dev
= to_device(dpm_list
.next
);
1725 mutex_unlock(&dpm_list_mtx
);
1727 trace_device_pm_callback_start(dev
, "", state
.event
);
1728 error
= device_prepare(dev
, state
);
1729 trace_device_pm_callback_end(dev
, error
);
1731 mutex_lock(&dpm_list_mtx
);
1733 if (error
== -EAGAIN
) {
1738 printk(KERN_INFO
"PM: Device %s not prepared "
1739 "for power transition: code %d\n",
1740 dev_name(dev
), error
);
1744 dev
->power
.is_prepared
= true;
1745 if (!list_empty(&dev
->power
.entry
))
1746 list_move_tail(&dev
->power
.entry
, &dpm_prepared_list
);
1749 mutex_unlock(&dpm_list_mtx
);
1750 trace_suspend_resume(TPS("dpm_prepare"), state
.event
, false);
1755 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1756 * @state: PM transition of the system being carried out.
1758 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1759 * callbacks for them.
1761 int dpm_suspend_start(pm_message_t state
)
1765 error
= dpm_prepare(state
);
1767 suspend_stats
.failed_prepare
++;
1768 dpm_save_failed_step(SUSPEND_PREPARE
);
1770 error
= dpm_suspend(state
);
1773 EXPORT_SYMBOL_GPL(dpm_suspend_start
);
1775 void __suspend_report_result(const char *function
, void *fn
, int ret
)
1778 printk(KERN_ERR
"%s(): %pF returns %d\n", function
, fn
, ret
);
1780 EXPORT_SYMBOL_GPL(__suspend_report_result
);
1783 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1784 * @dev: Device to wait for.
1785 * @subordinate: Device that needs to wait for @dev.
1787 int device_pm_wait_for_dev(struct device
*subordinate
, struct device
*dev
)
1789 dpm_wait(dev
, subordinate
->power
.async_suspend
);
1792 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev
);
1795 * dpm_for_each_dev - device iterator.
1796 * @data: data for the callback.
1797 * @fn: function to be called for each device.
1799 * Iterate over devices in dpm_list, and call @fn for each device,
1802 void dpm_for_each_dev(void *data
, void (*fn
)(struct device
*, void *))
1810 list_for_each_entry(dev
, &dpm_list
, power
.entry
)
1814 EXPORT_SYMBOL_GPL(dpm_for_each_dev
);
1816 static bool pm_ops_is_empty(const struct dev_pm_ops
*ops
)
1821 return !ops
->prepare
&&
1823 !ops
->suspend_late
&&
1824 !ops
->suspend_noirq
&&
1825 !ops
->resume_noirq
&&
1826 !ops
->resume_early
&&
1831 void device_pm_check_callbacks(struct device
*dev
)
1833 spin_lock_irq(&dev
->power
.lock
);
1834 dev
->power
.no_pm_callbacks
=
1835 (!dev
->bus
|| pm_ops_is_empty(dev
->bus
->pm
)) &&
1836 (!dev
->class || pm_ops_is_empty(dev
->class->pm
)) &&
1837 (!dev
->type
|| pm_ops_is_empty(dev
->type
->pm
)) &&
1838 (!dev
->pm_domain
|| pm_ops_is_empty(&dev
->pm_domain
->ops
)) &&
1839 (!dev
->driver
|| pm_ops_is_empty(dev
->driver
->pm
));
1840 spin_unlock_irq(&dev
->power
.lock
);