]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/base/power/runtime.c
PM / Hibernate: Swap, use KERN_CONT
[mirror_ubuntu-bionic-kernel.git] / drivers / base / power / runtime.c
CommitLineData
5e928f77
RW
1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
13static int __pm_runtime_resume(struct device *dev, bool from_wq);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17/**
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
20 */
21static void pm_runtime_deactivate_timer(struct device *dev)
22{
23 if (dev->power.timer_expires > 0) {
24 del_timer(&dev->power.suspend_timer);
25 dev->power.timer_expires = 0;
26 }
27}
28
29/**
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
32 */
33static void pm_runtime_cancel_pending(struct device *dev)
34{
35 pm_runtime_deactivate_timer(dev);
36 /*
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
39 */
40 dev->power.request = RPM_REQ_NONE;
41}
42
43/**
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
46 *
47 * This function must be called under dev->power.lock with interrupts disabled.
48 */
49static int __pm_runtime_idle(struct device *dev)
50 __releases(&dev->power.lock) __acquires(&dev->power.lock)
51{
52 int retval = 0;
53
5e928f77
RW
54 if (dev->power.runtime_error)
55 retval = -EINVAL;
56 else if (dev->power.idle_notification)
57 retval = -EINPROGRESS;
58 else if (atomic_read(&dev->power.usage_count) > 0
59 || dev->power.disable_depth > 0
60 || dev->power.runtime_status != RPM_ACTIVE)
61 retval = -EAGAIN;
62 else if (!pm_children_suspended(dev))
63 retval = -EBUSY;
64 if (retval)
65 goto out;
66
67 if (dev->power.request_pending) {
68 /*
69 * If an idle notification request is pending, cancel it. Any
70 * other pending request takes precedence over us.
71 */
72 if (dev->power.request == RPM_REQ_IDLE) {
73 dev->power.request = RPM_REQ_NONE;
74 } else if (dev->power.request != RPM_REQ_NONE) {
75 retval = -EAGAIN;
76 goto out;
77 }
78 }
79
80 dev->power.idle_notification = true;
81
82 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
83 spin_unlock_irq(&dev->power.lock);
84
85 dev->bus->pm->runtime_idle(dev);
86
87 spin_lock_irq(&dev->power.lock);
88 }
89
90 dev->power.idle_notification = false;
91 wake_up_all(&dev->power.wait_queue);
92
93 out:
5e928f77
RW
94 return retval;
95}
96
97/**
98 * pm_runtime_idle - Notify device bus type if the device can be suspended.
99 * @dev: Device to notify the bus type about.
100 */
101int pm_runtime_idle(struct device *dev)
102{
103 int retval;
104
105 spin_lock_irq(&dev->power.lock);
106 retval = __pm_runtime_idle(dev);
107 spin_unlock_irq(&dev->power.lock);
108
109 return retval;
110}
111EXPORT_SYMBOL_GPL(pm_runtime_idle);
112
113/**
114 * __pm_runtime_suspend - Carry out run-time suspend of given device.
115 * @dev: Device to suspend.
116 * @from_wq: If set, the function has been called via pm_wq.
117 *
118 * Check if the device can be suspended and run the ->runtime_suspend() callback
119 * provided by its bus type. If another suspend has been started earlier, wait
120 * for it to finish. If an idle notification or suspend request is pending or
121 * scheduled, cancel it.
122 *
123 * This function must be called under dev->power.lock with interrupts disabled.
124 */
125int __pm_runtime_suspend(struct device *dev, bool from_wq)
126 __releases(&dev->power.lock) __acquires(&dev->power.lock)
127{
128 struct device *parent = NULL;
129 bool notify = false;
130 int retval = 0;
131
132 dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
133 from_wq ? " from workqueue" : "");
134
135 repeat:
136 if (dev->power.runtime_error) {
137 retval = -EINVAL;
138 goto out;
139 }
140
141 /* Pending resume requests take precedence over us. */
142 if (dev->power.request_pending
143 && dev->power.request == RPM_REQ_RESUME) {
144 retval = -EAGAIN;
145 goto out;
146 }
147
148 /* Other scheduled or pending requests need to be canceled. */
149 pm_runtime_cancel_pending(dev);
150
151 if (dev->power.runtime_status == RPM_SUSPENDED)
152 retval = 1;
153 else if (dev->power.runtime_status == RPM_RESUMING
154 || dev->power.disable_depth > 0
155 || atomic_read(&dev->power.usage_count) > 0)
156 retval = -EAGAIN;
157 else if (!pm_children_suspended(dev))
158 retval = -EBUSY;
159 if (retval)
160 goto out;
161
162 if (dev->power.runtime_status == RPM_SUSPENDING) {
163 DEFINE_WAIT(wait);
164
165 if (from_wq) {
166 retval = -EINPROGRESS;
167 goto out;
168 }
169
170 /* Wait for the other suspend running in parallel with us. */
171 for (;;) {
172 prepare_to_wait(&dev->power.wait_queue, &wait,
173 TASK_UNINTERRUPTIBLE);
174 if (dev->power.runtime_status != RPM_SUSPENDING)
175 break;
176
177 spin_unlock_irq(&dev->power.lock);
178
179 schedule();
180
181 spin_lock_irq(&dev->power.lock);
182 }
183 finish_wait(&dev->power.wait_queue, &wait);
184 goto repeat;
185 }
186
187 dev->power.runtime_status = RPM_SUSPENDING;
188
189 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
190 spin_unlock_irq(&dev->power.lock);
191
192 retval = dev->bus->pm->runtime_suspend(dev);
193
194 spin_lock_irq(&dev->power.lock);
195 dev->power.runtime_error = retval;
196 } else {
197 retval = -ENOSYS;
198 }
199
200 if (retval) {
201 dev->power.runtime_status = RPM_ACTIVE;
202 pm_runtime_cancel_pending(dev);
203 dev->power.deferred_resume = false;
204
205 if (retval == -EAGAIN || retval == -EBUSY) {
206 notify = true;
207 dev->power.runtime_error = 0;
208 }
209 } else {
210 dev->power.runtime_status = RPM_SUSPENDED;
211
212 if (dev->parent) {
213 parent = dev->parent;
214 atomic_add_unless(&parent->power.child_count, -1, 0);
215 }
216 }
217 wake_up_all(&dev->power.wait_queue);
218
219 if (dev->power.deferred_resume) {
220 dev->power.deferred_resume = false;
221 __pm_runtime_resume(dev, false);
222 retval = -EAGAIN;
223 goto out;
224 }
225
226 if (notify)
227 __pm_runtime_idle(dev);
228
229 if (parent && !parent->power.ignore_children) {
230 spin_unlock_irq(&dev->power.lock);
231
232 pm_request_idle(parent);
233
234 spin_lock_irq(&dev->power.lock);
235 }
236
237 out:
238 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
239
240 return retval;
241}
242
243/**
244 * pm_runtime_suspend - Carry out run-time suspend of given device.
245 * @dev: Device to suspend.
246 */
247int pm_runtime_suspend(struct device *dev)
248{
249 int retval;
250
251 spin_lock_irq(&dev->power.lock);
252 retval = __pm_runtime_suspend(dev, false);
253 spin_unlock_irq(&dev->power.lock);
254
255 return retval;
256}
257EXPORT_SYMBOL_GPL(pm_runtime_suspend);
258
259/**
260 * __pm_runtime_resume - Carry out run-time resume of given device.
261 * @dev: Device to resume.
262 * @from_wq: If set, the function has been called via pm_wq.
263 *
264 * Check if the device can be woken up and run the ->runtime_resume() callback
265 * provided by its bus type. If another resume has been started earlier, wait
266 * for it to finish. If there's a suspend running in parallel with this
267 * function, wait for it to finish and resume the device. Cancel any scheduled
268 * or pending requests.
269 *
270 * This function must be called under dev->power.lock with interrupts disabled.
271 */
272int __pm_runtime_resume(struct device *dev, bool from_wq)
273 __releases(&dev->power.lock) __acquires(&dev->power.lock)
274{
275 struct device *parent = NULL;
276 int retval = 0;
277
278 dev_dbg(dev, "__pm_runtime_resume()%s!\n",
279 from_wq ? " from workqueue" : "");
280
281 repeat:
282 if (dev->power.runtime_error) {
283 retval = -EINVAL;
284 goto out;
285 }
286
287 pm_runtime_cancel_pending(dev);
288
289 if (dev->power.runtime_status == RPM_ACTIVE)
290 retval = 1;
291 else if (dev->power.disable_depth > 0)
292 retval = -EAGAIN;
293 if (retval)
294 goto out;
295
296 if (dev->power.runtime_status == RPM_RESUMING
297 || dev->power.runtime_status == RPM_SUSPENDING) {
298 DEFINE_WAIT(wait);
299
300 if (from_wq) {
301 if (dev->power.runtime_status == RPM_SUSPENDING)
302 dev->power.deferred_resume = true;
303 retval = -EINPROGRESS;
304 goto out;
305 }
306
307 /* Wait for the operation carried out in parallel with us. */
308 for (;;) {
309 prepare_to_wait(&dev->power.wait_queue, &wait,
310 TASK_UNINTERRUPTIBLE);
311 if (dev->power.runtime_status != RPM_RESUMING
312 && dev->power.runtime_status != RPM_SUSPENDING)
313 break;
314
315 spin_unlock_irq(&dev->power.lock);
316
317 schedule();
318
319 spin_lock_irq(&dev->power.lock);
320 }
321 finish_wait(&dev->power.wait_queue, &wait);
322 goto repeat;
323 }
324
325 if (!parent && dev->parent) {
326 /*
327 * Increment the parent's resume counter and resume it if
328 * necessary.
329 */
330 parent = dev->parent;
862f89b3 331 spin_unlock(&dev->power.lock);
5e928f77
RW
332
333 pm_runtime_get_noresume(parent);
334
862f89b3 335 spin_lock(&parent->power.lock);
5e928f77
RW
336 /*
337 * We can resume if the parent's run-time PM is disabled or it
338 * is set to ignore children.
339 */
340 if (!parent->power.disable_depth
341 && !parent->power.ignore_children) {
342 __pm_runtime_resume(parent, false);
343 if (parent->power.runtime_status != RPM_ACTIVE)
344 retval = -EBUSY;
345 }
862f89b3 346 spin_unlock(&parent->power.lock);
5e928f77 347
862f89b3 348 spin_lock(&dev->power.lock);
5e928f77
RW
349 if (retval)
350 goto out;
351 goto repeat;
352 }
353
354 dev->power.runtime_status = RPM_RESUMING;
355
356 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
357 spin_unlock_irq(&dev->power.lock);
358
359 retval = dev->bus->pm->runtime_resume(dev);
360
361 spin_lock_irq(&dev->power.lock);
362 dev->power.runtime_error = retval;
363 } else {
364 retval = -ENOSYS;
365 }
366
367 if (retval) {
368 dev->power.runtime_status = RPM_SUSPENDED;
369 pm_runtime_cancel_pending(dev);
370 } else {
371 dev->power.runtime_status = RPM_ACTIVE;
372 if (parent)
373 atomic_inc(&parent->power.child_count);
374 }
375 wake_up_all(&dev->power.wait_queue);
376
377 if (!retval)
378 __pm_request_idle(dev);
379
380 out:
381 if (parent) {
382 spin_unlock_irq(&dev->power.lock);
383
384 pm_runtime_put(parent);
385
386 spin_lock_irq(&dev->power.lock);
387 }
388
389 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
390
391 return retval;
392}
393
394/**
395 * pm_runtime_resume - Carry out run-time resume of given device.
396 * @dev: Device to suspend.
397 */
398int pm_runtime_resume(struct device *dev)
399{
400 int retval;
401
402 spin_lock_irq(&dev->power.lock);
403 retval = __pm_runtime_resume(dev, false);
404 spin_unlock_irq(&dev->power.lock);
405
406 return retval;
407}
408EXPORT_SYMBOL_GPL(pm_runtime_resume);
409
410/**
411 * pm_runtime_work - Universal run-time PM work function.
412 * @work: Work structure used for scheduling the execution of this function.
413 *
414 * Use @work to get the device object the work is to be done for, determine what
415 * is to be done and execute the appropriate run-time PM function.
416 */
417static void pm_runtime_work(struct work_struct *work)
418{
419 struct device *dev = container_of(work, struct device, power.work);
420 enum rpm_request req;
421
422 spin_lock_irq(&dev->power.lock);
423
424 if (!dev->power.request_pending)
425 goto out;
426
427 req = dev->power.request;
428 dev->power.request = RPM_REQ_NONE;
429 dev->power.request_pending = false;
430
431 switch (req) {
432 case RPM_REQ_NONE:
433 break;
434 case RPM_REQ_IDLE:
435 __pm_runtime_idle(dev);
436 break;
437 case RPM_REQ_SUSPEND:
438 __pm_runtime_suspend(dev, true);
439 break;
440 case RPM_REQ_RESUME:
441 __pm_runtime_resume(dev, true);
442 break;
443 }
444
445 out:
446 spin_unlock_irq(&dev->power.lock);
447}
448
449/**
450 * __pm_request_idle - Submit an idle notification request for given device.
451 * @dev: Device to handle.
452 *
453 * Check if the device's run-time PM status is correct for suspending the device
454 * and queue up a request to run __pm_runtime_idle() for it.
455 *
456 * This function must be called under dev->power.lock with interrupts disabled.
457 */
458static int __pm_request_idle(struct device *dev)
459{
460 int retval = 0;
461
462 if (dev->power.runtime_error)
463 retval = -EINVAL;
464 else if (atomic_read(&dev->power.usage_count) > 0
465 || dev->power.disable_depth > 0
466 || dev->power.runtime_status == RPM_SUSPENDED
467 || dev->power.runtime_status == RPM_SUSPENDING)
468 retval = -EAGAIN;
469 else if (!pm_children_suspended(dev))
470 retval = -EBUSY;
471 if (retval)
472 return retval;
473
474 if (dev->power.request_pending) {
475 /* Any requests other then RPM_REQ_IDLE take precedence. */
476 if (dev->power.request == RPM_REQ_NONE)
477 dev->power.request = RPM_REQ_IDLE;
478 else if (dev->power.request != RPM_REQ_IDLE)
479 retval = -EAGAIN;
480 return retval;
481 }
482
483 dev->power.request = RPM_REQ_IDLE;
484 dev->power.request_pending = true;
485 queue_work(pm_wq, &dev->power.work);
486
487 return retval;
488}
489
490/**
491 * pm_request_idle - Submit an idle notification request for given device.
492 * @dev: Device to handle.
493 */
494int pm_request_idle(struct device *dev)
495{
496 unsigned long flags;
497 int retval;
498
499 spin_lock_irqsave(&dev->power.lock, flags);
500 retval = __pm_request_idle(dev);
501 spin_unlock_irqrestore(&dev->power.lock, flags);
502
503 return retval;
504}
505EXPORT_SYMBOL_GPL(pm_request_idle);
506
507/**
508 * __pm_request_suspend - Submit a suspend request for given device.
509 * @dev: Device to suspend.
510 *
511 * This function must be called under dev->power.lock with interrupts disabled.
512 */
513static int __pm_request_suspend(struct device *dev)
514{
515 int retval = 0;
516
517 if (dev->power.runtime_error)
518 return -EINVAL;
519
520 if (dev->power.runtime_status == RPM_SUSPENDED)
521 retval = 1;
522 else if (atomic_read(&dev->power.usage_count) > 0
523 || dev->power.disable_depth > 0)
524 retval = -EAGAIN;
525 else if (dev->power.runtime_status == RPM_SUSPENDING)
526 retval = -EINPROGRESS;
527 else if (!pm_children_suspended(dev))
528 retval = -EBUSY;
529 if (retval < 0)
530 return retval;
531
532 pm_runtime_deactivate_timer(dev);
533
534 if (dev->power.request_pending) {
535 /*
536 * Pending resume requests take precedence over us, but we can
537 * overtake any other pending request.
538 */
539 if (dev->power.request == RPM_REQ_RESUME)
540 retval = -EAGAIN;
541 else if (dev->power.request != RPM_REQ_SUSPEND)
542 dev->power.request = retval ?
543 RPM_REQ_NONE : RPM_REQ_SUSPEND;
544 return retval;
545 } else if (retval) {
546 return retval;
547 }
548
549 dev->power.request = RPM_REQ_SUSPEND;
550 dev->power.request_pending = true;
551 queue_work(pm_wq, &dev->power.work);
552
553 return 0;
554}
555
556/**
557 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
558 * @data: Device pointer passed by pm_schedule_suspend().
559 *
560 * Check if the time is right and execute __pm_request_suspend() in that case.
561 */
562static void pm_suspend_timer_fn(unsigned long data)
563{
564 struct device *dev = (struct device *)data;
565 unsigned long flags;
566 unsigned long expires;
567
568 spin_lock_irqsave(&dev->power.lock, flags);
569
570 expires = dev->power.timer_expires;
571 /* If 'expire' is after 'jiffies' we've been called too early. */
572 if (expires > 0 && !time_after(expires, jiffies)) {
573 dev->power.timer_expires = 0;
574 __pm_request_suspend(dev);
575 }
576
577 spin_unlock_irqrestore(&dev->power.lock, flags);
578}
579
580/**
581 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
582 * @dev: Device to suspend.
583 * @delay: Time to wait before submitting a suspend request, in milliseconds.
584 */
585int pm_schedule_suspend(struct device *dev, unsigned int delay)
586{
587 unsigned long flags;
588 int retval = 0;
589
590 spin_lock_irqsave(&dev->power.lock, flags);
591
592 if (dev->power.runtime_error) {
593 retval = -EINVAL;
594 goto out;
595 }
596
597 if (!delay) {
598 retval = __pm_request_suspend(dev);
599 goto out;
600 }
601
602 pm_runtime_deactivate_timer(dev);
603
604 if (dev->power.request_pending) {
605 /*
606 * Pending resume requests take precedence over us, but any
607 * other pending requests have to be canceled.
608 */
609 if (dev->power.request == RPM_REQ_RESUME) {
610 retval = -EAGAIN;
611 goto out;
612 }
613 dev->power.request = RPM_REQ_NONE;
614 }
615
616 if (dev->power.runtime_status == RPM_SUSPENDED)
617 retval = 1;
618 else if (dev->power.runtime_status == RPM_SUSPENDING)
619 retval = -EINPROGRESS;
620 else if (atomic_read(&dev->power.usage_count) > 0
621 || dev->power.disable_depth > 0)
622 retval = -EAGAIN;
623 else if (!pm_children_suspended(dev))
624 retval = -EBUSY;
625 if (retval)
626 goto out;
627
628 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
629 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
630
631 out:
632 spin_unlock_irqrestore(&dev->power.lock, flags);
633
634 return retval;
635}
636EXPORT_SYMBOL_GPL(pm_schedule_suspend);
637
638/**
639 * pm_request_resume - Submit a resume request for given device.
640 * @dev: Device to resume.
641 *
642 * This function must be called under dev->power.lock with interrupts disabled.
643 */
644static int __pm_request_resume(struct device *dev)
645{
646 int retval = 0;
647
648 if (dev->power.runtime_error)
649 return -EINVAL;
650
651 if (dev->power.runtime_status == RPM_ACTIVE)
652 retval = 1;
653 else if (dev->power.runtime_status == RPM_RESUMING)
654 retval = -EINPROGRESS;
655 else if (dev->power.disable_depth > 0)
656 retval = -EAGAIN;
657 if (retval < 0)
658 return retval;
659
660 pm_runtime_deactivate_timer(dev);
661
662 if (dev->power.request_pending) {
663 /* If non-resume request is pending, we can overtake it. */
664 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
665 return retval;
666 } else if (retval) {
667 return retval;
668 }
669
670 dev->power.request = RPM_REQ_RESUME;
671 dev->power.request_pending = true;
672 queue_work(pm_wq, &dev->power.work);
673
674 return retval;
675}
676
677/**
678 * pm_request_resume - Submit a resume request for given device.
679 * @dev: Device to resume.
680 */
681int pm_request_resume(struct device *dev)
682{
683 unsigned long flags;
684 int retval;
685
686 spin_lock_irqsave(&dev->power.lock, flags);
687 retval = __pm_request_resume(dev);
688 spin_unlock_irqrestore(&dev->power.lock, flags);
689
690 return retval;
691}
692EXPORT_SYMBOL_GPL(pm_request_resume);
693
694/**
695 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
696 * @dev: Device to handle.
697 * @sync: If set and the device is suspended, resume it synchronously.
698 *
699 * Increment the usage count of the device and if it was zero previously,
700 * resume it or submit a resume request for it, depending on the value of @sync.
701 */
702int __pm_runtime_get(struct device *dev, bool sync)
703{
704 int retval = 1;
705
706 if (atomic_add_return(1, &dev->power.usage_count) == 1)
707 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
708
709 return retval;
710}
711EXPORT_SYMBOL_GPL(__pm_runtime_get);
712
713/**
714 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
715 * @dev: Device to handle.
716 * @sync: If the device's bus type is to be notified, do that synchronously.
717 *
718 * Decrement the usage count of the device and if it reaches zero, carry out a
719 * synchronous idle notification or submit an idle notification request for it,
720 * depending on the value of @sync.
721 */
722int __pm_runtime_put(struct device *dev, bool sync)
723{
724 int retval = 0;
725
726 if (atomic_dec_and_test(&dev->power.usage_count))
727 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
728
729 return retval;
730}
731EXPORT_SYMBOL_GPL(__pm_runtime_put);
732
733/**
734 * __pm_runtime_set_status - Set run-time PM status of a device.
735 * @dev: Device to handle.
736 * @status: New run-time PM status of the device.
737 *
738 * If run-time PM of the device is disabled or its power.runtime_error field is
739 * different from zero, the status may be changed either to RPM_ACTIVE, or to
740 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
741 * However, if the device has a parent and the parent is not active, and the
742 * parent's power.ignore_children flag is unset, the device's status cannot be
743 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
744 *
745 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
746 * and the device parent's counter of unsuspended children is modified to
747 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
748 * notification request for the parent is submitted.
749 */
750int __pm_runtime_set_status(struct device *dev, unsigned int status)
751{
752 struct device *parent = dev->parent;
753 unsigned long flags;
754 bool notify_parent = false;
755 int error = 0;
756
757 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
758 return -EINVAL;
759
760 spin_lock_irqsave(&dev->power.lock, flags);
761
762 if (!dev->power.runtime_error && !dev->power.disable_depth) {
763 error = -EAGAIN;
764 goto out;
765 }
766
767 if (dev->power.runtime_status == status)
768 goto out_set;
769
770 if (status == RPM_SUSPENDED) {
771 /* It always is possible to set the status to 'suspended'. */
772 if (parent) {
773 atomic_add_unless(&parent->power.child_count, -1, 0);
774 notify_parent = !parent->power.ignore_children;
775 }
776 goto out_set;
777 }
778
779 if (parent) {
862f89b3 780 spin_lock(&parent->power.lock);
5e928f77
RW
781
782 /*
783 * It is invalid to put an active child under a parent that is
784 * not active, has run-time PM enabled and the
785 * 'power.ignore_children' flag unset.
786 */
787 if (!parent->power.disable_depth
788 && !parent->power.ignore_children
789 && parent->power.runtime_status != RPM_ACTIVE) {
790 error = -EBUSY;
791 } else {
792 if (dev->power.runtime_status == RPM_SUSPENDED)
793 atomic_inc(&parent->power.child_count);
794 }
795
862f89b3 796 spin_unlock(&parent->power.lock);
5e928f77
RW
797
798 if (error)
799 goto out;
800 }
801
802 out_set:
803 dev->power.runtime_status = status;
804 dev->power.runtime_error = 0;
805 out:
806 spin_unlock_irqrestore(&dev->power.lock, flags);
807
808 if (notify_parent)
809 pm_request_idle(parent);
810
811 return error;
812}
813EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
814
815/**
816 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
817 * @dev: Device to handle.
818 *
819 * Flush all pending requests for the device from pm_wq and wait for all
820 * run-time PM operations involving the device in progress to complete.
821 *
822 * Should be called under dev->power.lock with interrupts disabled.
823 */
824static void __pm_runtime_barrier(struct device *dev)
825{
826 pm_runtime_deactivate_timer(dev);
827
828 if (dev->power.request_pending) {
829 dev->power.request = RPM_REQ_NONE;
830 spin_unlock_irq(&dev->power.lock);
831
832 cancel_work_sync(&dev->power.work);
833
834 spin_lock_irq(&dev->power.lock);
835 dev->power.request_pending = false;
836 }
837
838 if (dev->power.runtime_status == RPM_SUSPENDING
839 || dev->power.runtime_status == RPM_RESUMING
840 || dev->power.idle_notification) {
841 DEFINE_WAIT(wait);
842
843 /* Suspend, wake-up or idle notification in progress. */
844 for (;;) {
845 prepare_to_wait(&dev->power.wait_queue, &wait,
846 TASK_UNINTERRUPTIBLE);
847 if (dev->power.runtime_status != RPM_SUSPENDING
848 && dev->power.runtime_status != RPM_RESUMING
849 && !dev->power.idle_notification)
850 break;
851 spin_unlock_irq(&dev->power.lock);
852
853 schedule();
854
855 spin_lock_irq(&dev->power.lock);
856 }
857 finish_wait(&dev->power.wait_queue, &wait);
858 }
859}
860
861/**
862 * pm_runtime_barrier - Flush pending requests and wait for completions.
863 * @dev: Device to handle.
864 *
865 * Prevent the device from being suspended by incrementing its usage counter and
866 * if there's a pending resume request for the device, wake the device up.
867 * Next, make sure that all pending requests for the device have been flushed
868 * from pm_wq and wait for all run-time PM operations involving the device in
869 * progress to complete.
870 *
871 * Return value:
872 * 1, if there was a resume request pending and the device had to be woken up,
873 * 0, otherwise
874 */
875int pm_runtime_barrier(struct device *dev)
876{
877 int retval = 0;
878
879 pm_runtime_get_noresume(dev);
880 spin_lock_irq(&dev->power.lock);
881
882 if (dev->power.request_pending
883 && dev->power.request == RPM_REQ_RESUME) {
884 __pm_runtime_resume(dev, false);
885 retval = 1;
886 }
887
888 __pm_runtime_barrier(dev);
889
890 spin_unlock_irq(&dev->power.lock);
891 pm_runtime_put_noidle(dev);
892
893 return retval;
894}
895EXPORT_SYMBOL_GPL(pm_runtime_barrier);
896
897/**
898 * __pm_runtime_disable - Disable run-time PM of a device.
899 * @dev: Device to handle.
900 * @check_resume: If set, check if there's a resume request for the device.
901 *
902 * Increment power.disable_depth for the device and if was zero previously,
903 * cancel all pending run-time PM requests for the device and wait for all
904 * operations in progress to complete. The device can be either active or
905 * suspended after its run-time PM has been disabled.
906 *
907 * If @check_resume is set and there's a resume request pending when
908 * __pm_runtime_disable() is called and power.disable_depth is zero, the
909 * function will wake up the device before disabling its run-time PM.
910 */
911void __pm_runtime_disable(struct device *dev, bool check_resume)
912{
913 spin_lock_irq(&dev->power.lock);
914
915 if (dev->power.disable_depth > 0) {
916 dev->power.disable_depth++;
917 goto out;
918 }
919
920 /*
921 * Wake up the device if there's a resume request pending, because that
922 * means there probably is some I/O to process and disabling run-time PM
923 * shouldn't prevent the device from processing the I/O.
924 */
925 if (check_resume && dev->power.request_pending
926 && dev->power.request == RPM_REQ_RESUME) {
927 /*
928 * Prevent suspends and idle notifications from being carried
929 * out after we have woken up the device.
930 */
931 pm_runtime_get_noresume(dev);
932
933 __pm_runtime_resume(dev, false);
934
935 pm_runtime_put_noidle(dev);
936 }
937
938 if (!dev->power.disable_depth++)
939 __pm_runtime_barrier(dev);
940
941 out:
942 spin_unlock_irq(&dev->power.lock);
943}
944EXPORT_SYMBOL_GPL(__pm_runtime_disable);
945
946/**
947 * pm_runtime_enable - Enable run-time PM of a device.
948 * @dev: Device to handle.
949 */
950void pm_runtime_enable(struct device *dev)
951{
952 unsigned long flags;
953
954 spin_lock_irqsave(&dev->power.lock, flags);
955
956 if (dev->power.disable_depth > 0)
957 dev->power.disable_depth--;
958 else
959 dev_warn(dev, "Unbalanced %s!\n", __func__);
960
961 spin_unlock_irqrestore(&dev->power.lock, flags);
962}
963EXPORT_SYMBOL_GPL(pm_runtime_enable);
964
965/**
966 * pm_runtime_init - Initialize run-time PM fields in given device object.
967 * @dev: Device object to initialize.
968 */
969void pm_runtime_init(struct device *dev)
970{
971 spin_lock_init(&dev->power.lock);
972
973 dev->power.runtime_status = RPM_SUSPENDED;
974 dev->power.idle_notification = false;
975
976 dev->power.disable_depth = 1;
977 atomic_set(&dev->power.usage_count, 0);
978
979 dev->power.runtime_error = 0;
980
981 atomic_set(&dev->power.child_count, 0);
982 pm_suspend_ignore_children(dev, false);
983
984 dev->power.request_pending = false;
985 dev->power.request = RPM_REQ_NONE;
986 dev->power.deferred_resume = false;
987 INIT_WORK(&dev->power.work, pm_runtime_work);
988
989 dev->power.timer_expires = 0;
990 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
991 (unsigned long)dev);
992
993 init_waitqueue_head(&dev->power.wait_queue);
994}
995
996/**
997 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
998 * @dev: Device object being removed from device hierarchy.
999 */
1000void pm_runtime_remove(struct device *dev)
1001{
1002 __pm_runtime_disable(dev, false);
1003
1004 /* Change the status back to 'suspended' to match the initial status. */
1005 if (dev->power.runtime_status == RPM_ACTIVE)
1006 pm_runtime_set_suspended(dev);
1007}