]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/base/power/runtime.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / runtime.c
1 /*
2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10 #include <linux/sched/mm.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15
16 #include "../base.h"
17 #include "power.h"
18
19 typedef int (*pm_callback_t)(struct device *);
20
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
46 }
47
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53
54 /**
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
57 *
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
63 * correctly.
64 */
65 void update_pm_runtime_accounting(struct device *dev)
66 {
67 unsigned long now = jiffies;
68 unsigned long delta;
69
70 delta = now - dev->power.accounting_timestamp;
71
72 dev->power.accounting_timestamp = now;
73
74 if (dev->power.disable_depth > 0)
75 return;
76
77 if (dev->power.runtime_status == RPM_SUSPENDED)
78 dev->power.suspended_jiffies += delta;
79 else
80 dev->power.active_jiffies += delta;
81 }
82
83 static void __update_runtime_status(struct device *dev, enum rpm_status status)
84 {
85 update_pm_runtime_accounting(dev);
86 dev->power.runtime_status = status;
87 }
88
89 /**
90 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91 * @dev: Device to handle.
92 */
93 static void pm_runtime_deactivate_timer(struct device *dev)
94 {
95 if (dev->power.timer_expires > 0) {
96 del_timer(&dev->power.suspend_timer);
97 dev->power.timer_expires = 0;
98 }
99 }
100
101 /**
102 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
103 * @dev: Device to handle.
104 */
105 static void pm_runtime_cancel_pending(struct device *dev)
106 {
107 pm_runtime_deactivate_timer(dev);
108 /*
109 * In case there's a request pending, make sure its work function will
110 * return without doing anything.
111 */
112 dev->power.request = RPM_REQ_NONE;
113 }
114
115 /*
116 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
117 * @dev: Device to handle.
118 *
119 * Compute the autosuspend-delay expiration time based on the device's
120 * power.last_busy time. If the delay has already expired or is disabled
121 * (negative) or the power.use_autosuspend flag isn't set, return 0.
122 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
123 *
124 * This function may be called either with or without dev->power.lock held.
125 * Either way it can be racy, since power.last_busy may be updated at any time.
126 */
127 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
128 {
129 int autosuspend_delay;
130 long elapsed;
131 unsigned long last_busy;
132 unsigned long expires = 0;
133
134 if (!dev->power.use_autosuspend)
135 goto out;
136
137 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
138 if (autosuspend_delay < 0)
139 goto out;
140
141 last_busy = ACCESS_ONCE(dev->power.last_busy);
142 elapsed = jiffies - last_busy;
143 if (elapsed < 0)
144 goto out; /* jiffies has wrapped around. */
145
146 /*
147 * If the autosuspend_delay is >= 1 second, align the timer by rounding
148 * up to the nearest second.
149 */
150 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151 if (autosuspend_delay >= 1000)
152 expires = round_jiffies(expires);
153 expires += !expires;
154 if (elapsed >= expires - last_busy)
155 expires = 0; /* Already expired. */
156
157 out:
158 return expires;
159 }
160 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161
162 static int dev_memalloc_noio(struct device *dev, void *data)
163 {
164 return dev->power.memalloc_noio;
165 }
166
167 /*
168 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
169 * @dev: Device to handle.
170 * @enable: True for setting the flag and False for clearing the flag.
171 *
172 * Set the flag for all devices in the path from the device to the
173 * root device in the device tree if @enable is true, otherwise clear
174 * the flag for devices in the path whose siblings don't set the flag.
175 *
176 * The function should only be called by block device, or network
177 * device driver for solving the deadlock problem during runtime
178 * resume/suspend:
179 *
180 * If memory allocation with GFP_KERNEL is called inside runtime
181 * resume/suspend callback of any one of its ancestors(or the
182 * block device itself), the deadlock may be triggered inside the
183 * memory allocation since it might not complete until the block
184 * device becomes active and the involed page I/O finishes. The
185 * situation is pointed out first by Alan Stern. Network device
186 * are involved in iSCSI kind of situation.
187 *
188 * The lock of dev_hotplug_mutex is held in the function for handling
189 * hotplug race because pm_runtime_set_memalloc_noio() may be called
190 * in async probe().
191 *
192 * The function should be called between device_add() and device_del()
193 * on the affected device(block/network device).
194 */
195 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
196 {
197 static DEFINE_MUTEX(dev_hotplug_mutex);
198
199 mutex_lock(&dev_hotplug_mutex);
200 for (;;) {
201 bool enabled;
202
203 /* hold power lock since bitfield is not SMP-safe. */
204 spin_lock_irq(&dev->power.lock);
205 enabled = dev->power.memalloc_noio;
206 dev->power.memalloc_noio = enable;
207 spin_unlock_irq(&dev->power.lock);
208
209 /*
210 * not need to enable ancestors any more if the device
211 * has been enabled.
212 */
213 if (enabled && enable)
214 break;
215
216 dev = dev->parent;
217
218 /*
219 * clear flag of the parent device only if all the
220 * children don't set the flag because ancestor's
221 * flag was set by any one of the descendants.
222 */
223 if (!dev || (!enable &&
224 device_for_each_child(dev, NULL,
225 dev_memalloc_noio)))
226 break;
227 }
228 mutex_unlock(&dev_hotplug_mutex);
229 }
230 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231
232 /**
233 * rpm_check_suspend_allowed - Test whether a device may be suspended.
234 * @dev: Device to test.
235 */
236 static int rpm_check_suspend_allowed(struct device *dev)
237 {
238 int retval = 0;
239
240 if (dev->power.runtime_error)
241 retval = -EINVAL;
242 else if (dev->power.disable_depth > 0)
243 retval = -EACCES;
244 else if (atomic_read(&dev->power.usage_count) > 0)
245 retval = -EAGAIN;
246 else if (!dev->power.ignore_children &&
247 atomic_read(&dev->power.child_count))
248 retval = -EBUSY;
249
250 /* Pending resume requests take precedence over suspends. */
251 else if ((dev->power.deferred_resume
252 && dev->power.runtime_status == RPM_SUSPENDING)
253 || (dev->power.request_pending
254 && dev->power.request == RPM_REQ_RESUME))
255 retval = -EAGAIN;
256 else if (__dev_pm_qos_read_value(dev) < 0)
257 retval = -EPERM;
258 else if (dev->power.runtime_status == RPM_SUSPENDED)
259 retval = 1;
260
261 return retval;
262 }
263
264 static int rpm_get_suppliers(struct device *dev)
265 {
266 struct device_link *link;
267
268 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
269 int retval;
270
271 if (!(link->flags & DL_FLAG_PM_RUNTIME))
272 continue;
273
274 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
275 link->rpm_active)
276 continue;
277
278 retval = pm_runtime_get_sync(link->supplier);
279 if (retval < 0) {
280 pm_runtime_put_noidle(link->supplier);
281 return retval;
282 }
283 link->rpm_active = true;
284 }
285 return 0;
286 }
287
288 static void rpm_put_suppliers(struct device *dev)
289 {
290 struct device_link *link;
291
292 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
293 if (link->rpm_active &&
294 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
295 pm_runtime_put(link->supplier);
296 link->rpm_active = false;
297 }
298 }
299
300 /**
301 * __rpm_callback - Run a given runtime PM callback for a given device.
302 * @cb: Runtime PM callback to run.
303 * @dev: Device to run the callback for.
304 */
305 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
306 __releases(&dev->power.lock) __acquires(&dev->power.lock)
307 {
308 int retval, idx;
309 bool use_links = dev->power.links_count > 0;
310
311 if (dev->power.irq_safe) {
312 spin_unlock(&dev->power.lock);
313 } else {
314 spin_unlock_irq(&dev->power.lock);
315
316 /*
317 * Resume suppliers if necessary.
318 *
319 * The device's runtime PM status cannot change until this
320 * routine returns, so it is safe to read the status outside of
321 * the lock.
322 */
323 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
324 idx = device_links_read_lock();
325
326 retval = rpm_get_suppliers(dev);
327 if (retval)
328 goto fail;
329
330 device_links_read_unlock(idx);
331 }
332 }
333
334 retval = cb(dev);
335
336 if (dev->power.irq_safe) {
337 spin_lock(&dev->power.lock);
338 } else {
339 /*
340 * If the device is suspending and the callback has returned
341 * success, drop the usage counters of the suppliers that have
342 * been reference counted on its resume.
343 *
344 * Do that if resume fails too.
345 */
346 if (use_links
347 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
348 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
349 idx = device_links_read_lock();
350
351 fail:
352 rpm_put_suppliers(dev);
353
354 device_links_read_unlock(idx);
355 }
356
357 spin_lock_irq(&dev->power.lock);
358 }
359
360 return retval;
361 }
362
363 /**
364 * rpm_idle - Notify device bus type if the device can be suspended.
365 * @dev: Device to notify the bus type about.
366 * @rpmflags: Flag bits.
367 *
368 * Check if the device's runtime PM status allows it to be suspended. If
369 * another idle notification has been started earlier, return immediately. If
370 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
371 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
372 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
373 *
374 * This function must be called under dev->power.lock with interrupts disabled.
375 */
376 static int rpm_idle(struct device *dev, int rpmflags)
377 {
378 int (*callback)(struct device *);
379 int retval;
380
381 trace_rpm_idle_rcuidle(dev, rpmflags);
382 retval = rpm_check_suspend_allowed(dev);
383 if (retval < 0)
384 ; /* Conditions are wrong. */
385
386 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
387 else if (dev->power.runtime_status != RPM_ACTIVE)
388 retval = -EAGAIN;
389
390 /*
391 * Any pending request other than an idle notification takes
392 * precedence over us, except that the timer may be running.
393 */
394 else if (dev->power.request_pending &&
395 dev->power.request > RPM_REQ_IDLE)
396 retval = -EAGAIN;
397
398 /* Act as though RPM_NOWAIT is always set. */
399 else if (dev->power.idle_notification)
400 retval = -EINPROGRESS;
401 if (retval)
402 goto out;
403
404 /* Pending requests need to be canceled. */
405 dev->power.request = RPM_REQ_NONE;
406
407 if (dev->power.no_callbacks)
408 goto out;
409
410 /* Carry out an asynchronous or a synchronous idle notification. */
411 if (rpmflags & RPM_ASYNC) {
412 dev->power.request = RPM_REQ_IDLE;
413 if (!dev->power.request_pending) {
414 dev->power.request_pending = true;
415 queue_work(pm_wq, &dev->power.work);
416 }
417 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
418 return 0;
419 }
420
421 dev->power.idle_notification = true;
422
423 callback = RPM_GET_CALLBACK(dev, runtime_idle);
424
425 if (callback)
426 retval = __rpm_callback(callback, dev);
427
428 dev->power.idle_notification = false;
429 wake_up_all(&dev->power.wait_queue);
430
431 out:
432 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
433 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
434 }
435
436 /**
437 * rpm_callback - Run a given runtime PM callback for a given device.
438 * @cb: Runtime PM callback to run.
439 * @dev: Device to run the callback for.
440 */
441 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
442 {
443 int retval;
444
445 if (!cb)
446 return -ENOSYS;
447
448 if (dev->power.memalloc_noio) {
449 unsigned int noio_flag;
450
451 /*
452 * Deadlock might be caused if memory allocation with
453 * GFP_KERNEL happens inside runtime_suspend and
454 * runtime_resume callbacks of one block device's
455 * ancestor or the block device itself. Network
456 * device might be thought as part of iSCSI block
457 * device, so network device and its ancestor should
458 * be marked as memalloc_noio too.
459 */
460 noio_flag = memalloc_noio_save();
461 retval = __rpm_callback(cb, dev);
462 memalloc_noio_restore(noio_flag);
463 } else {
464 retval = __rpm_callback(cb, dev);
465 }
466
467 dev->power.runtime_error = retval;
468 return retval != -EACCES ? retval : -EIO;
469 }
470
471 /**
472 * rpm_suspend - Carry out runtime suspend of given device.
473 * @dev: Device to suspend.
474 * @rpmflags: Flag bits.
475 *
476 * Check if the device's runtime PM status allows it to be suspended.
477 * Cancel a pending idle notification, autosuspend or suspend. If
478 * another suspend has been started earlier, either return immediately
479 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
480 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
481 * otherwise run the ->runtime_suspend() callback directly. When
482 * ->runtime_suspend succeeded, if a deferred resume was requested while
483 * the callback was running then carry it out, otherwise send an idle
484 * notification for its parent (if the suspend succeeded and both
485 * ignore_children of parent->power and irq_safe of dev->power are not set).
486 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
487 * flag is set and the next autosuspend-delay expiration time is in the
488 * future, schedule another autosuspend attempt.
489 *
490 * This function must be called under dev->power.lock with interrupts disabled.
491 */
492 static int rpm_suspend(struct device *dev, int rpmflags)
493 __releases(&dev->power.lock) __acquires(&dev->power.lock)
494 {
495 int (*callback)(struct device *);
496 struct device *parent = NULL;
497 int retval;
498
499 trace_rpm_suspend_rcuidle(dev, rpmflags);
500
501 repeat:
502 retval = rpm_check_suspend_allowed(dev);
503
504 if (retval < 0)
505 ; /* Conditions are wrong. */
506
507 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
508 else if (dev->power.runtime_status == RPM_RESUMING &&
509 !(rpmflags & RPM_ASYNC))
510 retval = -EAGAIN;
511 if (retval)
512 goto out;
513
514 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
515 if ((rpmflags & RPM_AUTO)
516 && dev->power.runtime_status != RPM_SUSPENDING) {
517 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
518
519 if (expires != 0) {
520 /* Pending requests need to be canceled. */
521 dev->power.request = RPM_REQ_NONE;
522
523 /*
524 * Optimization: If the timer is already running and is
525 * set to expire at or before the autosuspend delay,
526 * avoid the overhead of resetting it. Just let it
527 * expire; pm_suspend_timer_fn() will take care of the
528 * rest.
529 */
530 if (!(dev->power.timer_expires && time_before_eq(
531 dev->power.timer_expires, expires))) {
532 dev->power.timer_expires = expires;
533 mod_timer(&dev->power.suspend_timer, expires);
534 }
535 dev->power.timer_autosuspends = 1;
536 goto out;
537 }
538 }
539
540 /* Other scheduled or pending requests need to be canceled. */
541 pm_runtime_cancel_pending(dev);
542
543 if (dev->power.runtime_status == RPM_SUSPENDING) {
544 DEFINE_WAIT(wait);
545
546 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
547 retval = -EINPROGRESS;
548 goto out;
549 }
550
551 if (dev->power.irq_safe) {
552 spin_unlock(&dev->power.lock);
553
554 cpu_relax();
555
556 spin_lock(&dev->power.lock);
557 goto repeat;
558 }
559
560 /* Wait for the other suspend running in parallel with us. */
561 for (;;) {
562 prepare_to_wait(&dev->power.wait_queue, &wait,
563 TASK_UNINTERRUPTIBLE);
564 if (dev->power.runtime_status != RPM_SUSPENDING)
565 break;
566
567 spin_unlock_irq(&dev->power.lock);
568
569 schedule();
570
571 spin_lock_irq(&dev->power.lock);
572 }
573 finish_wait(&dev->power.wait_queue, &wait);
574 goto repeat;
575 }
576
577 if (dev->power.no_callbacks)
578 goto no_callback; /* Assume success. */
579
580 /* Carry out an asynchronous or a synchronous suspend. */
581 if (rpmflags & RPM_ASYNC) {
582 dev->power.request = (rpmflags & RPM_AUTO) ?
583 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
584 if (!dev->power.request_pending) {
585 dev->power.request_pending = true;
586 queue_work(pm_wq, &dev->power.work);
587 }
588 goto out;
589 }
590
591 __update_runtime_status(dev, RPM_SUSPENDING);
592
593 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
594
595 dev_pm_enable_wake_irq_check(dev, true);
596 retval = rpm_callback(callback, dev);
597 if (retval)
598 goto fail;
599
600 no_callback:
601 __update_runtime_status(dev, RPM_SUSPENDED);
602 pm_runtime_deactivate_timer(dev);
603
604 if (dev->parent) {
605 parent = dev->parent;
606 atomic_add_unless(&parent->power.child_count, -1, 0);
607 }
608 wake_up_all(&dev->power.wait_queue);
609
610 if (dev->power.deferred_resume) {
611 dev->power.deferred_resume = false;
612 rpm_resume(dev, 0);
613 retval = -EAGAIN;
614 goto out;
615 }
616
617 /* Maybe the parent is now able to suspend. */
618 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
619 spin_unlock(&dev->power.lock);
620
621 spin_lock(&parent->power.lock);
622 rpm_idle(parent, RPM_ASYNC);
623 spin_unlock(&parent->power.lock);
624
625 spin_lock(&dev->power.lock);
626 }
627
628 out:
629 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
630
631 return retval;
632
633 fail:
634 dev_pm_disable_wake_irq_check(dev);
635 __update_runtime_status(dev, RPM_ACTIVE);
636 dev->power.deferred_resume = false;
637 wake_up_all(&dev->power.wait_queue);
638
639 if (retval == -EAGAIN || retval == -EBUSY) {
640 dev->power.runtime_error = 0;
641
642 /*
643 * If the callback routine failed an autosuspend, and
644 * if the last_busy time has been updated so that there
645 * is a new autosuspend expiration time, automatically
646 * reschedule another autosuspend.
647 */
648 if ((rpmflags & RPM_AUTO) &&
649 pm_runtime_autosuspend_expiration(dev) != 0)
650 goto repeat;
651 } else {
652 pm_runtime_cancel_pending(dev);
653 }
654 goto out;
655 }
656
657 /**
658 * rpm_resume - Carry out runtime resume of given device.
659 * @dev: Device to resume.
660 * @rpmflags: Flag bits.
661 *
662 * Check if the device's runtime PM status allows it to be resumed. Cancel
663 * any scheduled or pending requests. If another resume has been started
664 * earlier, either return immediately or wait for it to finish, depending on the
665 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
666 * parallel with this function, either tell the other process to resume after
667 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
668 * flag is set then queue a resume request; otherwise run the
669 * ->runtime_resume() callback directly. Queue an idle notification for the
670 * device if the resume succeeded.
671 *
672 * This function must be called under dev->power.lock with interrupts disabled.
673 */
674 static int rpm_resume(struct device *dev, int rpmflags)
675 __releases(&dev->power.lock) __acquires(&dev->power.lock)
676 {
677 int (*callback)(struct device *);
678 struct device *parent = NULL;
679 int retval = 0;
680
681 trace_rpm_resume_rcuidle(dev, rpmflags);
682
683 repeat:
684 if (dev->power.runtime_error)
685 retval = -EINVAL;
686 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
687 && dev->power.runtime_status == RPM_ACTIVE)
688 retval = 1;
689 else if (dev->power.disable_depth > 0)
690 retval = -EACCES;
691 if (retval)
692 goto out;
693
694 /*
695 * Other scheduled or pending requests need to be canceled. Small
696 * optimization: If an autosuspend timer is running, leave it running
697 * rather than cancelling it now only to restart it again in the near
698 * future.
699 */
700 dev->power.request = RPM_REQ_NONE;
701 if (!dev->power.timer_autosuspends)
702 pm_runtime_deactivate_timer(dev);
703
704 if (dev->power.runtime_status == RPM_ACTIVE) {
705 retval = 1;
706 goto out;
707 }
708
709 if (dev->power.runtime_status == RPM_RESUMING
710 || dev->power.runtime_status == RPM_SUSPENDING) {
711 DEFINE_WAIT(wait);
712
713 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
714 if (dev->power.runtime_status == RPM_SUSPENDING)
715 dev->power.deferred_resume = true;
716 else
717 retval = -EINPROGRESS;
718 goto out;
719 }
720
721 if (dev->power.irq_safe) {
722 spin_unlock(&dev->power.lock);
723
724 cpu_relax();
725
726 spin_lock(&dev->power.lock);
727 goto repeat;
728 }
729
730 /* Wait for the operation carried out in parallel with us. */
731 for (;;) {
732 prepare_to_wait(&dev->power.wait_queue, &wait,
733 TASK_UNINTERRUPTIBLE);
734 if (dev->power.runtime_status != RPM_RESUMING
735 && dev->power.runtime_status != RPM_SUSPENDING)
736 break;
737
738 spin_unlock_irq(&dev->power.lock);
739
740 schedule();
741
742 spin_lock_irq(&dev->power.lock);
743 }
744 finish_wait(&dev->power.wait_queue, &wait);
745 goto repeat;
746 }
747
748 /*
749 * See if we can skip waking up the parent. This is safe only if
750 * power.no_callbacks is set, because otherwise we don't know whether
751 * the resume will actually succeed.
752 */
753 if (dev->power.no_callbacks && !parent && dev->parent) {
754 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
755 if (dev->parent->power.disable_depth > 0
756 || dev->parent->power.ignore_children
757 || dev->parent->power.runtime_status == RPM_ACTIVE) {
758 atomic_inc(&dev->parent->power.child_count);
759 spin_unlock(&dev->parent->power.lock);
760 retval = 1;
761 goto no_callback; /* Assume success. */
762 }
763 spin_unlock(&dev->parent->power.lock);
764 }
765
766 /* Carry out an asynchronous or a synchronous resume. */
767 if (rpmflags & RPM_ASYNC) {
768 dev->power.request = RPM_REQ_RESUME;
769 if (!dev->power.request_pending) {
770 dev->power.request_pending = true;
771 queue_work(pm_wq, &dev->power.work);
772 }
773 retval = 0;
774 goto out;
775 }
776
777 if (!parent && dev->parent) {
778 /*
779 * Increment the parent's usage counter and resume it if
780 * necessary. Not needed if dev is irq-safe; then the
781 * parent is permanently resumed.
782 */
783 parent = dev->parent;
784 if (dev->power.irq_safe)
785 goto skip_parent;
786 spin_unlock(&dev->power.lock);
787
788 pm_runtime_get_noresume(parent);
789
790 spin_lock(&parent->power.lock);
791 /*
792 * Resume the parent if it has runtime PM enabled and not been
793 * set to ignore its children.
794 */
795 if (!parent->power.disable_depth
796 && !parent->power.ignore_children) {
797 rpm_resume(parent, 0);
798 if (parent->power.runtime_status != RPM_ACTIVE)
799 retval = -EBUSY;
800 }
801 spin_unlock(&parent->power.lock);
802
803 spin_lock(&dev->power.lock);
804 if (retval)
805 goto out;
806 goto repeat;
807 }
808 skip_parent:
809
810 if (dev->power.no_callbacks)
811 goto no_callback; /* Assume success. */
812
813 __update_runtime_status(dev, RPM_RESUMING);
814
815 callback = RPM_GET_CALLBACK(dev, runtime_resume);
816
817 dev_pm_disable_wake_irq_check(dev);
818 retval = rpm_callback(callback, dev);
819 if (retval) {
820 __update_runtime_status(dev, RPM_SUSPENDED);
821 pm_runtime_cancel_pending(dev);
822 dev_pm_enable_wake_irq_check(dev, false);
823 } else {
824 no_callback:
825 __update_runtime_status(dev, RPM_ACTIVE);
826 pm_runtime_mark_last_busy(dev);
827 if (parent)
828 atomic_inc(&parent->power.child_count);
829 }
830 wake_up_all(&dev->power.wait_queue);
831
832 if (retval >= 0)
833 rpm_idle(dev, RPM_ASYNC);
834
835 out:
836 if (parent && !dev->power.irq_safe) {
837 spin_unlock_irq(&dev->power.lock);
838
839 pm_runtime_put(parent);
840
841 spin_lock_irq(&dev->power.lock);
842 }
843
844 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
845
846 return retval;
847 }
848
849 /**
850 * pm_runtime_work - Universal runtime PM work function.
851 * @work: Work structure used for scheduling the execution of this function.
852 *
853 * Use @work to get the device object the work is to be done for, determine what
854 * is to be done and execute the appropriate runtime PM function.
855 */
856 static void pm_runtime_work(struct work_struct *work)
857 {
858 struct device *dev = container_of(work, struct device, power.work);
859 enum rpm_request req;
860
861 spin_lock_irq(&dev->power.lock);
862
863 if (!dev->power.request_pending)
864 goto out;
865
866 req = dev->power.request;
867 dev->power.request = RPM_REQ_NONE;
868 dev->power.request_pending = false;
869
870 switch (req) {
871 case RPM_REQ_NONE:
872 break;
873 case RPM_REQ_IDLE:
874 rpm_idle(dev, RPM_NOWAIT);
875 break;
876 case RPM_REQ_SUSPEND:
877 rpm_suspend(dev, RPM_NOWAIT);
878 break;
879 case RPM_REQ_AUTOSUSPEND:
880 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
881 break;
882 case RPM_REQ_RESUME:
883 rpm_resume(dev, RPM_NOWAIT);
884 break;
885 }
886
887 out:
888 spin_unlock_irq(&dev->power.lock);
889 }
890
891 /**
892 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
893 * @data: Device pointer passed by pm_schedule_suspend().
894 *
895 * Check if the time is right and queue a suspend request.
896 */
897 static void pm_suspend_timer_fn(unsigned long data)
898 {
899 struct device *dev = (struct device *)data;
900 unsigned long flags;
901 unsigned long expires;
902
903 spin_lock_irqsave(&dev->power.lock, flags);
904
905 expires = dev->power.timer_expires;
906 /* If 'expire' is after 'jiffies' we've been called too early. */
907 if (expires > 0 && !time_after(expires, jiffies)) {
908 dev->power.timer_expires = 0;
909 rpm_suspend(dev, dev->power.timer_autosuspends ?
910 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
911 }
912
913 spin_unlock_irqrestore(&dev->power.lock, flags);
914 }
915
916 /**
917 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
918 * @dev: Device to suspend.
919 * @delay: Time to wait before submitting a suspend request, in milliseconds.
920 */
921 int pm_schedule_suspend(struct device *dev, unsigned int delay)
922 {
923 unsigned long flags;
924 int retval;
925
926 spin_lock_irqsave(&dev->power.lock, flags);
927
928 if (!delay) {
929 retval = rpm_suspend(dev, RPM_ASYNC);
930 goto out;
931 }
932
933 retval = rpm_check_suspend_allowed(dev);
934 if (retval)
935 goto out;
936
937 /* Other scheduled or pending requests need to be canceled. */
938 pm_runtime_cancel_pending(dev);
939
940 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
941 dev->power.timer_expires += !dev->power.timer_expires;
942 dev->power.timer_autosuspends = 0;
943 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
944
945 out:
946 spin_unlock_irqrestore(&dev->power.lock, flags);
947
948 return retval;
949 }
950 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
951
952 /**
953 * __pm_runtime_idle - Entry point for runtime idle operations.
954 * @dev: Device to send idle notification for.
955 * @rpmflags: Flag bits.
956 *
957 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
958 * return immediately if it is larger than zero. Then carry out an idle
959 * notification, either synchronous or asynchronous.
960 *
961 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
962 * or if pm_runtime_irq_safe() has been called.
963 */
964 int __pm_runtime_idle(struct device *dev, int rpmflags)
965 {
966 unsigned long flags;
967 int retval;
968
969 if (rpmflags & RPM_GET_PUT) {
970 if (!atomic_dec_and_test(&dev->power.usage_count))
971 return 0;
972 }
973
974 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
975
976 spin_lock_irqsave(&dev->power.lock, flags);
977 retval = rpm_idle(dev, rpmflags);
978 spin_unlock_irqrestore(&dev->power.lock, flags);
979
980 return retval;
981 }
982 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
983
984 /**
985 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
986 * @dev: Device to suspend.
987 * @rpmflags: Flag bits.
988 *
989 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
990 * return immediately if it is larger than zero. Then carry out a suspend,
991 * either synchronous or asynchronous.
992 *
993 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
994 * or if pm_runtime_irq_safe() has been called.
995 */
996 int __pm_runtime_suspend(struct device *dev, int rpmflags)
997 {
998 unsigned long flags;
999 int retval;
1000
1001 if (rpmflags & RPM_GET_PUT) {
1002 if (!atomic_dec_and_test(&dev->power.usage_count))
1003 return 0;
1004 }
1005
1006 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1007
1008 spin_lock_irqsave(&dev->power.lock, flags);
1009 retval = rpm_suspend(dev, rpmflags);
1010 spin_unlock_irqrestore(&dev->power.lock, flags);
1011
1012 return retval;
1013 }
1014 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1015
1016 /**
1017 * __pm_runtime_resume - Entry point for runtime resume operations.
1018 * @dev: Device to resume.
1019 * @rpmflags: Flag bits.
1020 *
1021 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1022 * carry out a resume, either synchronous or asynchronous.
1023 *
1024 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1025 * or if pm_runtime_irq_safe() has been called.
1026 */
1027 int __pm_runtime_resume(struct device *dev, int rpmflags)
1028 {
1029 unsigned long flags;
1030 int retval;
1031
1032 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1033 dev->power.runtime_status != RPM_ACTIVE);
1034
1035 if (rpmflags & RPM_GET_PUT)
1036 atomic_inc(&dev->power.usage_count);
1037
1038 spin_lock_irqsave(&dev->power.lock, flags);
1039 retval = rpm_resume(dev, rpmflags);
1040 spin_unlock_irqrestore(&dev->power.lock, flags);
1041
1042 return retval;
1043 }
1044 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1045
1046 /**
1047 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1048 * @dev: Device to handle.
1049 *
1050 * Return -EINVAL if runtime PM is disabled for the device.
1051 *
1052 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1053 * and the runtime PM usage counter is nonzero, increment the counter and
1054 * return 1. Otherwise return 0 without changing the counter.
1055 */
1056 int pm_runtime_get_if_in_use(struct device *dev)
1057 {
1058 unsigned long flags;
1059 int retval;
1060
1061 spin_lock_irqsave(&dev->power.lock, flags);
1062 retval = dev->power.disable_depth > 0 ? -EINVAL :
1063 dev->power.runtime_status == RPM_ACTIVE
1064 && atomic_inc_not_zero(&dev->power.usage_count);
1065 spin_unlock_irqrestore(&dev->power.lock, flags);
1066 return retval;
1067 }
1068 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1069
1070 /**
1071 * __pm_runtime_set_status - Set runtime PM status of a device.
1072 * @dev: Device to handle.
1073 * @status: New runtime PM status of the device.
1074 *
1075 * If runtime PM of the device is disabled or its power.runtime_error field is
1076 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1077 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1078 * However, if the device has a parent and the parent is not active, and the
1079 * parent's power.ignore_children flag is unset, the device's status cannot be
1080 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1081 *
1082 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1083 * and the device parent's counter of unsuspended children is modified to
1084 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1085 * notification request for the parent is submitted.
1086 */
1087 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1088 {
1089 struct device *parent = dev->parent;
1090 unsigned long flags;
1091 bool notify_parent = false;
1092 int error = 0;
1093
1094 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1095 return -EINVAL;
1096
1097 spin_lock_irqsave(&dev->power.lock, flags);
1098
1099 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1100 error = -EAGAIN;
1101 goto out;
1102 }
1103
1104 if (dev->power.runtime_status == status)
1105 goto out_set;
1106
1107 if (status == RPM_SUSPENDED) {
1108 /*
1109 * It is invalid to suspend a device with an active child,
1110 * unless it has been set to ignore its children.
1111 */
1112 if (!dev->power.ignore_children &&
1113 atomic_read(&dev->power.child_count)) {
1114 dev_err(dev, "runtime PM trying to suspend device but active child\n");
1115 error = -EBUSY;
1116 goto out;
1117 }
1118
1119 if (parent) {
1120 atomic_add_unless(&parent->power.child_count, -1, 0);
1121 notify_parent = !parent->power.ignore_children;
1122 }
1123 goto out_set;
1124 }
1125
1126 if (parent) {
1127 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1128
1129 /*
1130 * It is invalid to put an active child under a parent that is
1131 * not active, has runtime PM enabled and the
1132 * 'power.ignore_children' flag unset.
1133 */
1134 if (!parent->power.disable_depth
1135 && !parent->power.ignore_children
1136 && parent->power.runtime_status != RPM_ACTIVE) {
1137 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1138 dev_name(dev),
1139 dev_name(parent));
1140 error = -EBUSY;
1141 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1142 atomic_inc(&parent->power.child_count);
1143 }
1144
1145 spin_unlock(&parent->power.lock);
1146
1147 if (error)
1148 goto out;
1149 }
1150
1151 out_set:
1152 __update_runtime_status(dev, status);
1153 dev->power.runtime_error = 0;
1154 out:
1155 spin_unlock_irqrestore(&dev->power.lock, flags);
1156
1157 if (notify_parent)
1158 pm_request_idle(parent);
1159
1160 return error;
1161 }
1162 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1163
1164 /**
1165 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1166 * @dev: Device to handle.
1167 *
1168 * Flush all pending requests for the device from pm_wq and wait for all
1169 * runtime PM operations involving the device in progress to complete.
1170 *
1171 * Should be called under dev->power.lock with interrupts disabled.
1172 */
1173 static void __pm_runtime_barrier(struct device *dev)
1174 {
1175 pm_runtime_deactivate_timer(dev);
1176
1177 if (dev->power.request_pending) {
1178 dev->power.request = RPM_REQ_NONE;
1179 spin_unlock_irq(&dev->power.lock);
1180
1181 cancel_work_sync(&dev->power.work);
1182
1183 spin_lock_irq(&dev->power.lock);
1184 dev->power.request_pending = false;
1185 }
1186
1187 if (dev->power.runtime_status == RPM_SUSPENDING
1188 || dev->power.runtime_status == RPM_RESUMING
1189 || dev->power.idle_notification) {
1190 DEFINE_WAIT(wait);
1191
1192 /* Suspend, wake-up or idle notification in progress. */
1193 for (;;) {
1194 prepare_to_wait(&dev->power.wait_queue, &wait,
1195 TASK_UNINTERRUPTIBLE);
1196 if (dev->power.runtime_status != RPM_SUSPENDING
1197 && dev->power.runtime_status != RPM_RESUMING
1198 && !dev->power.idle_notification)
1199 break;
1200 spin_unlock_irq(&dev->power.lock);
1201
1202 schedule();
1203
1204 spin_lock_irq(&dev->power.lock);
1205 }
1206 finish_wait(&dev->power.wait_queue, &wait);
1207 }
1208 }
1209
1210 /**
1211 * pm_runtime_barrier - Flush pending requests and wait for completions.
1212 * @dev: Device to handle.
1213 *
1214 * Prevent the device from being suspended by incrementing its usage counter and
1215 * if there's a pending resume request for the device, wake the device up.
1216 * Next, make sure that all pending requests for the device have been flushed
1217 * from pm_wq and wait for all runtime PM operations involving the device in
1218 * progress to complete.
1219 *
1220 * Return value:
1221 * 1, if there was a resume request pending and the device had to be woken up,
1222 * 0, otherwise
1223 */
1224 int pm_runtime_barrier(struct device *dev)
1225 {
1226 int retval = 0;
1227
1228 pm_runtime_get_noresume(dev);
1229 spin_lock_irq(&dev->power.lock);
1230
1231 if (dev->power.request_pending
1232 && dev->power.request == RPM_REQ_RESUME) {
1233 rpm_resume(dev, 0);
1234 retval = 1;
1235 }
1236
1237 __pm_runtime_barrier(dev);
1238
1239 spin_unlock_irq(&dev->power.lock);
1240 pm_runtime_put_noidle(dev);
1241
1242 return retval;
1243 }
1244 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1245
1246 /**
1247 * __pm_runtime_disable - Disable runtime PM of a device.
1248 * @dev: Device to handle.
1249 * @check_resume: If set, check if there's a resume request for the device.
1250 *
1251 * Increment power.disable_depth for the device and if it was zero previously,
1252 * cancel all pending runtime PM requests for the device and wait for all
1253 * operations in progress to complete. The device can be either active or
1254 * suspended after its runtime PM has been disabled.
1255 *
1256 * If @check_resume is set and there's a resume request pending when
1257 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1258 * function will wake up the device before disabling its runtime PM.
1259 */
1260 void __pm_runtime_disable(struct device *dev, bool check_resume)
1261 {
1262 spin_lock_irq(&dev->power.lock);
1263
1264 if (dev->power.disable_depth > 0) {
1265 dev->power.disable_depth++;
1266 goto out;
1267 }
1268
1269 /*
1270 * Wake up the device if there's a resume request pending, because that
1271 * means there probably is some I/O to process and disabling runtime PM
1272 * shouldn't prevent the device from processing the I/O.
1273 */
1274 if (check_resume && dev->power.request_pending
1275 && dev->power.request == RPM_REQ_RESUME) {
1276 /*
1277 * Prevent suspends and idle notifications from being carried
1278 * out after we have woken up the device.
1279 */
1280 pm_runtime_get_noresume(dev);
1281
1282 rpm_resume(dev, 0);
1283
1284 pm_runtime_put_noidle(dev);
1285 }
1286
1287 if (!dev->power.disable_depth++)
1288 __pm_runtime_barrier(dev);
1289
1290 out:
1291 spin_unlock_irq(&dev->power.lock);
1292 }
1293 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1294
1295 /**
1296 * pm_runtime_enable - Enable runtime PM of a device.
1297 * @dev: Device to handle.
1298 */
1299 void pm_runtime_enable(struct device *dev)
1300 {
1301 unsigned long flags;
1302
1303 spin_lock_irqsave(&dev->power.lock, flags);
1304
1305 if (dev->power.disable_depth > 0)
1306 dev->power.disable_depth--;
1307 else
1308 dev_warn(dev, "Unbalanced %s!\n", __func__);
1309
1310 spin_unlock_irqrestore(&dev->power.lock, flags);
1311 }
1312 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1313
1314 /**
1315 * pm_runtime_forbid - Block runtime PM of a device.
1316 * @dev: Device to handle.
1317 *
1318 * Increase the device's usage count and clear its power.runtime_auto flag,
1319 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1320 * for it.
1321 */
1322 void pm_runtime_forbid(struct device *dev)
1323 {
1324 spin_lock_irq(&dev->power.lock);
1325 if (!dev->power.runtime_auto)
1326 goto out;
1327
1328 dev->power.runtime_auto = false;
1329 atomic_inc(&dev->power.usage_count);
1330 rpm_resume(dev, 0);
1331
1332 out:
1333 spin_unlock_irq(&dev->power.lock);
1334 }
1335 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1336
1337 /**
1338 * pm_runtime_allow - Unblock runtime PM of a device.
1339 * @dev: Device to handle.
1340 *
1341 * Decrease the device's usage count and set its power.runtime_auto flag.
1342 */
1343 void pm_runtime_allow(struct device *dev)
1344 {
1345 spin_lock_irq(&dev->power.lock);
1346 if (dev->power.runtime_auto)
1347 goto out;
1348
1349 dev->power.runtime_auto = true;
1350 if (atomic_dec_and_test(&dev->power.usage_count))
1351 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1352
1353 out:
1354 spin_unlock_irq(&dev->power.lock);
1355 }
1356 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1357
1358 /**
1359 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1360 * @dev: Device to handle.
1361 *
1362 * Set the power.no_callbacks flag, which tells the PM core that this
1363 * device is power-managed through its parent and has no runtime PM
1364 * callbacks of its own. The runtime sysfs attributes will be removed.
1365 */
1366 void pm_runtime_no_callbacks(struct device *dev)
1367 {
1368 spin_lock_irq(&dev->power.lock);
1369 dev->power.no_callbacks = 1;
1370 spin_unlock_irq(&dev->power.lock);
1371 if (device_is_registered(dev))
1372 rpm_sysfs_remove(dev);
1373 }
1374 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1375
1376 /**
1377 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1378 * @dev: Device to handle
1379 *
1380 * Set the power.irq_safe flag, which tells the PM core that the
1381 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1382 * always be invoked with the spinlock held and interrupts disabled. It also
1383 * causes the parent's usage counter to be permanently incremented, preventing
1384 * the parent from runtime suspending -- otherwise an irq-safe child might have
1385 * to wait for a non-irq-safe parent.
1386 */
1387 void pm_runtime_irq_safe(struct device *dev)
1388 {
1389 if (dev->parent)
1390 pm_runtime_get_sync(dev->parent);
1391 spin_lock_irq(&dev->power.lock);
1392 dev->power.irq_safe = 1;
1393 spin_unlock_irq(&dev->power.lock);
1394 }
1395 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1396
1397 /**
1398 * update_autosuspend - Handle a change to a device's autosuspend settings.
1399 * @dev: Device to handle.
1400 * @old_delay: The former autosuspend_delay value.
1401 * @old_use: The former use_autosuspend value.
1402 *
1403 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1404 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1405 *
1406 * This function must be called under dev->power.lock with interrupts disabled.
1407 */
1408 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1409 {
1410 int delay = dev->power.autosuspend_delay;
1411
1412 /* Should runtime suspend be prevented now? */
1413 if (dev->power.use_autosuspend && delay < 0) {
1414
1415 /* If it used to be allowed then prevent it. */
1416 if (!old_use || old_delay >= 0) {
1417 atomic_inc(&dev->power.usage_count);
1418 rpm_resume(dev, 0);
1419 }
1420 }
1421
1422 /* Runtime suspend should be allowed now. */
1423 else {
1424
1425 /* If it used to be prevented then allow it. */
1426 if (old_use && old_delay < 0)
1427 atomic_dec(&dev->power.usage_count);
1428
1429 /* Maybe we can autosuspend now. */
1430 rpm_idle(dev, RPM_AUTO);
1431 }
1432 }
1433
1434 /**
1435 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1436 * @dev: Device to handle.
1437 * @delay: Value of the new delay in milliseconds.
1438 *
1439 * Set the device's power.autosuspend_delay value. If it changes to negative
1440 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1441 * changes the other way, allow runtime suspends.
1442 */
1443 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1444 {
1445 int old_delay, old_use;
1446
1447 spin_lock_irq(&dev->power.lock);
1448 old_delay = dev->power.autosuspend_delay;
1449 old_use = dev->power.use_autosuspend;
1450 dev->power.autosuspend_delay = delay;
1451 update_autosuspend(dev, old_delay, old_use);
1452 spin_unlock_irq(&dev->power.lock);
1453 }
1454 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1455
1456 /**
1457 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1458 * @dev: Device to handle.
1459 * @use: New value for use_autosuspend.
1460 *
1461 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1462 * suspends as needed.
1463 */
1464 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1465 {
1466 int old_delay, old_use;
1467
1468 spin_lock_irq(&dev->power.lock);
1469 old_delay = dev->power.autosuspend_delay;
1470 old_use = dev->power.use_autosuspend;
1471 dev->power.use_autosuspend = use;
1472 update_autosuspend(dev, old_delay, old_use);
1473 spin_unlock_irq(&dev->power.lock);
1474 }
1475 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1476
1477 /**
1478 * pm_runtime_init - Initialize runtime PM fields in given device object.
1479 * @dev: Device object to initialize.
1480 */
1481 void pm_runtime_init(struct device *dev)
1482 {
1483 dev->power.runtime_status = RPM_SUSPENDED;
1484 dev->power.idle_notification = false;
1485
1486 dev->power.disable_depth = 1;
1487 atomic_set(&dev->power.usage_count, 0);
1488
1489 dev->power.runtime_error = 0;
1490
1491 atomic_set(&dev->power.child_count, 0);
1492 pm_suspend_ignore_children(dev, false);
1493 dev->power.runtime_auto = true;
1494
1495 dev->power.request_pending = false;
1496 dev->power.request = RPM_REQ_NONE;
1497 dev->power.deferred_resume = false;
1498 dev->power.accounting_timestamp = jiffies;
1499 INIT_WORK(&dev->power.work, pm_runtime_work);
1500
1501 dev->power.timer_expires = 0;
1502 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1503 (unsigned long)dev);
1504
1505 init_waitqueue_head(&dev->power.wait_queue);
1506 }
1507
1508 /**
1509 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1510 * @dev: Device object to re-initialize.
1511 */
1512 void pm_runtime_reinit(struct device *dev)
1513 {
1514 if (!pm_runtime_enabled(dev)) {
1515 if (dev->power.runtime_status == RPM_ACTIVE)
1516 pm_runtime_set_suspended(dev);
1517 if (dev->power.irq_safe) {
1518 spin_lock_irq(&dev->power.lock);
1519 dev->power.irq_safe = 0;
1520 spin_unlock_irq(&dev->power.lock);
1521 if (dev->parent)
1522 pm_runtime_put(dev->parent);
1523 }
1524 }
1525 }
1526
1527 /**
1528 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1529 * @dev: Device object being removed from device hierarchy.
1530 */
1531 void pm_runtime_remove(struct device *dev)
1532 {
1533 __pm_runtime_disable(dev, false);
1534 pm_runtime_reinit(dev);
1535 }
1536
1537 /**
1538 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1539 * @dev: Device whose driver is going to be removed.
1540 *
1541 * Check links from this device to any consumers and if any of them have active
1542 * runtime PM references to the device, drop the usage counter of the device
1543 * (once per link).
1544 *
1545 * Links with the DL_FLAG_STATELESS flag set are ignored.
1546 *
1547 * Since the device is guaranteed to be runtime-active at the point this is
1548 * called, nothing else needs to be done here.
1549 *
1550 * Moreover, this is called after device_links_busy() has returned 'false', so
1551 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1552 * therefore rpm_active can't be manipulated concurrently.
1553 */
1554 void pm_runtime_clean_up_links(struct device *dev)
1555 {
1556 struct device_link *link;
1557 int idx;
1558
1559 idx = device_links_read_lock();
1560
1561 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1562 if (link->flags & DL_FLAG_STATELESS)
1563 continue;
1564
1565 if (link->rpm_active) {
1566 pm_runtime_put_noidle(dev);
1567 link->rpm_active = false;
1568 }
1569 }
1570
1571 device_links_read_unlock(idx);
1572 }
1573
1574 /**
1575 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1576 * @dev: Consumer device.
1577 */
1578 void pm_runtime_get_suppliers(struct device *dev)
1579 {
1580 struct device_link *link;
1581 int idx;
1582
1583 idx = device_links_read_lock();
1584
1585 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1586 if (link->flags & DL_FLAG_PM_RUNTIME)
1587 pm_runtime_get_sync(link->supplier);
1588
1589 device_links_read_unlock(idx);
1590 }
1591
1592 /**
1593 * pm_runtime_put_suppliers - Drop references to supplier devices.
1594 * @dev: Consumer device.
1595 */
1596 void pm_runtime_put_suppliers(struct device *dev)
1597 {
1598 struct device_link *link;
1599 int idx;
1600
1601 idx = device_links_read_lock();
1602
1603 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1604 if (link->flags & DL_FLAG_PM_RUNTIME)
1605 pm_runtime_put(link->supplier);
1606
1607 device_links_read_unlock(idx);
1608 }
1609
1610 void pm_runtime_new_link(struct device *dev)
1611 {
1612 spin_lock_irq(&dev->power.lock);
1613 dev->power.links_count++;
1614 spin_unlock_irq(&dev->power.lock);
1615 }
1616
1617 void pm_runtime_drop_link(struct device *dev)
1618 {
1619 spin_lock_irq(&dev->power.lock);
1620 WARN_ON(dev->power.links_count == 0);
1621 dev->power.links_count--;
1622 spin_unlock_irq(&dev->power.lock);
1623 }
1624
1625 /**
1626 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1627 * @dev: Device to suspend.
1628 *
1629 * Disable runtime PM so we safely can check the device's runtime PM status and
1630 * if it is active, invoke it's .runtime_suspend callback to bring it into
1631 * suspend state. Keep runtime PM disabled to preserve the state unless we
1632 * encounter errors.
1633 *
1634 * Typically this function may be invoked from a system suspend callback to make
1635 * sure the device is put into low power state.
1636 */
1637 int pm_runtime_force_suspend(struct device *dev)
1638 {
1639 int (*callback)(struct device *);
1640 int ret = 0;
1641
1642 pm_runtime_disable(dev);
1643 if (pm_runtime_status_suspended(dev))
1644 return 0;
1645
1646 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1647
1648 if (!callback) {
1649 ret = -ENOSYS;
1650 goto err;
1651 }
1652
1653 ret = callback(dev);
1654 if (ret)
1655 goto err;
1656
1657 /*
1658 * Increase the runtime PM usage count for the device's parent, in case
1659 * when we find the device being used when system suspend was invoked.
1660 * This informs pm_runtime_force_resume() to resume the parent
1661 * immediately, which is needed to be able to resume its children,
1662 * when not deferring the resume to be managed via runtime PM.
1663 */
1664 if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
1665 pm_runtime_get_noresume(dev->parent);
1666
1667 pm_runtime_set_suspended(dev);
1668 return 0;
1669 err:
1670 pm_runtime_enable(dev);
1671 return ret;
1672 }
1673 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1674
1675 /**
1676 * pm_runtime_force_resume - Force a device into resume state if needed.
1677 * @dev: Device to resume.
1678 *
1679 * Prior invoking this function we expect the user to have brought the device
1680 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1681 * those actions and brings the device into full power, if it is expected to be
1682 * used on system resume. To distinguish that, we check whether the runtime PM
1683 * usage count is greater than 1 (the PM core increases the usage count in the
1684 * system PM prepare phase), as that indicates a real user (such as a subsystem,
1685 * driver, userspace, etc.) is using it. If that is the case, the device is
1686 * expected to be used on system resume as well, so then we resume it. In the
1687 * other case, we defer the resume to be managed via runtime PM.
1688 *
1689 * Typically this function may be invoked from a system resume callback.
1690 */
1691 int pm_runtime_force_resume(struct device *dev)
1692 {
1693 int (*callback)(struct device *);
1694 int ret = 0;
1695
1696 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1697
1698 if (!callback) {
1699 ret = -ENOSYS;
1700 goto out;
1701 }
1702
1703 if (!pm_runtime_status_suspended(dev))
1704 goto out;
1705
1706 /*
1707 * Decrease the parent's runtime PM usage count, if we increased it
1708 * during system suspend in pm_runtime_force_suspend().
1709 */
1710 if (atomic_read(&dev->power.usage_count) > 1) {
1711 if (dev->parent)
1712 pm_runtime_put_noidle(dev->parent);
1713 } else {
1714 goto out;
1715 }
1716
1717 ret = pm_runtime_set_active(dev);
1718 if (ret)
1719 goto out;
1720
1721 ret = callback(dev);
1722 if (ret) {
1723 pm_runtime_set_suspended(dev);
1724 goto out;
1725 }
1726
1727 pm_runtime_mark_last_busy(dev);
1728 out:
1729 pm_runtime_enable(dev);
1730 return ret;
1731 }
1732 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);