]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/base/power/domain.c
PM / Domains: Use power.sybsys_data to reduce overhead
[mirror_ubuntu-zesty-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18
19 static LIST_HEAD(gpd_list);
20 static DEFINE_MUTEX(gpd_list_lock);
21
22 #ifdef CONFIG_PM
23
24 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25 {
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
28
29 return pd_to_genpd(dev->pm_domain);
30 }
31
32 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33 {
34 bool ret = false;
35
36 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
37 ret = !!atomic_dec_and_test(&genpd->sd_count);
38
39 return ret;
40 }
41
42 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
43 {
44 atomic_inc(&genpd->sd_count);
45 smp_mb__after_atomic_inc();
46 }
47
48 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
49 {
50 DEFINE_WAIT(wait);
51
52 mutex_lock(&genpd->lock);
53 /*
54 * Wait for the domain to transition into either the active,
55 * or the power off state.
56 */
57 for (;;) {
58 prepare_to_wait(&genpd->status_wait_queue, &wait,
59 TASK_UNINTERRUPTIBLE);
60 if (genpd->status == GPD_STATE_ACTIVE
61 || genpd->status == GPD_STATE_POWER_OFF)
62 break;
63 mutex_unlock(&genpd->lock);
64
65 schedule();
66
67 mutex_lock(&genpd->lock);
68 }
69 finish_wait(&genpd->status_wait_queue, &wait);
70 }
71
72 static void genpd_release_lock(struct generic_pm_domain *genpd)
73 {
74 mutex_unlock(&genpd->lock);
75 }
76
77 static void genpd_set_active(struct generic_pm_domain *genpd)
78 {
79 if (genpd->resume_count == 0)
80 genpd->status = GPD_STATE_ACTIVE;
81 }
82
83 /**
84 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
85 * @genpd: PM domain to power up.
86 *
87 * Restore power to @genpd and all of its masters so that it is possible to
88 * resume a device belonging to it.
89 */
90 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
91 __releases(&genpd->lock) __acquires(&genpd->lock)
92 {
93 struct gpd_link *link;
94 DEFINE_WAIT(wait);
95 int ret = 0;
96
97 /* If the domain's master is being waited for, we have to wait too. */
98 for (;;) {
99 prepare_to_wait(&genpd->status_wait_queue, &wait,
100 TASK_UNINTERRUPTIBLE);
101 if (genpd->status != GPD_STATE_WAIT_MASTER)
102 break;
103 mutex_unlock(&genpd->lock);
104
105 schedule();
106
107 mutex_lock(&genpd->lock);
108 }
109 finish_wait(&genpd->status_wait_queue, &wait);
110
111 if (genpd->status == GPD_STATE_ACTIVE
112 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
113 return 0;
114
115 if (genpd->status != GPD_STATE_POWER_OFF) {
116 genpd_set_active(genpd);
117 return 0;
118 }
119
120 /*
121 * The list is guaranteed not to change while the loop below is being
122 * executed, unless one of the masters' .power_on() callbacks fiddles
123 * with it.
124 */
125 list_for_each_entry(link, &genpd->slave_links, slave_node) {
126 genpd_sd_counter_inc(link->master);
127 genpd->status = GPD_STATE_WAIT_MASTER;
128
129 mutex_unlock(&genpd->lock);
130
131 ret = pm_genpd_poweron(link->master);
132
133 mutex_lock(&genpd->lock);
134
135 /*
136 * The "wait for parent" status is guaranteed not to change
137 * while the master is powering on.
138 */
139 genpd->status = GPD_STATE_POWER_OFF;
140 wake_up_all(&genpd->status_wait_queue);
141 if (ret) {
142 genpd_sd_counter_dec(link->master);
143 goto err;
144 }
145 }
146
147 if (genpd->power_on) {
148 ret = genpd->power_on(genpd);
149 if (ret)
150 goto err;
151 }
152
153 genpd_set_active(genpd);
154
155 return 0;
156
157 err:
158 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
159 genpd_sd_counter_dec(link->master);
160
161 return ret;
162 }
163
164 /**
165 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
166 * @genpd: PM domain to power up.
167 */
168 int pm_genpd_poweron(struct generic_pm_domain *genpd)
169 {
170 int ret;
171
172 mutex_lock(&genpd->lock);
173 ret = __pm_genpd_poweron(genpd);
174 mutex_unlock(&genpd->lock);
175 return ret;
176 }
177
178 #endif /* CONFIG_PM */
179
180 #ifdef CONFIG_PM_RUNTIME
181
182 /**
183 * __pm_genpd_save_device - Save the pre-suspend state of a device.
184 * @pdd: Domain data of the device to save the state of.
185 * @genpd: PM domain the device belongs to.
186 */
187 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
188 struct generic_pm_domain *genpd)
189 __releases(&genpd->lock) __acquires(&genpd->lock)
190 {
191 struct device *dev = pdd->dev;
192 struct device_driver *drv = dev->driver;
193 int ret = 0;
194
195 if (pdd->need_restore)
196 return 0;
197
198 mutex_unlock(&genpd->lock);
199
200 if (drv && drv->pm && drv->pm->runtime_suspend) {
201 if (genpd->start_device)
202 genpd->start_device(dev);
203
204 ret = drv->pm->runtime_suspend(dev);
205
206 if (genpd->stop_device)
207 genpd->stop_device(dev);
208 }
209
210 mutex_lock(&genpd->lock);
211
212 if (!ret)
213 pdd->need_restore = true;
214
215 return ret;
216 }
217
218 /**
219 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
220 * @pdd: Domain data of the device to restore the state of.
221 * @genpd: PM domain the device belongs to.
222 */
223 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
224 struct generic_pm_domain *genpd)
225 __releases(&genpd->lock) __acquires(&genpd->lock)
226 {
227 struct device *dev = pdd->dev;
228 struct device_driver *drv = dev->driver;
229
230 if (!pdd->need_restore)
231 return;
232
233 mutex_unlock(&genpd->lock);
234
235 if (drv && drv->pm && drv->pm->runtime_resume) {
236 if (genpd->start_device)
237 genpd->start_device(dev);
238
239 drv->pm->runtime_resume(dev);
240
241 if (genpd->stop_device)
242 genpd->stop_device(dev);
243 }
244
245 mutex_lock(&genpd->lock);
246
247 pdd->need_restore = false;
248 }
249
250 /**
251 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
252 * @genpd: PM domain to check.
253 *
254 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
255 * a "power off" operation, which means that a "power on" has occured in the
256 * meantime, or if its resume_count field is different from zero, which means
257 * that one of its devices has been resumed in the meantime.
258 */
259 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
260 {
261 return genpd->status == GPD_STATE_WAIT_MASTER
262 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
263 }
264
265 /**
266 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
267 * @genpd: PM domait to power off.
268 *
269 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
270 * before.
271 */
272 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
273 {
274 if (!work_pending(&genpd->power_off_work))
275 queue_work(pm_wq, &genpd->power_off_work);
276 }
277
278 /**
279 * pm_genpd_poweroff - Remove power from a given PM domain.
280 * @genpd: PM domain to power down.
281 *
282 * If all of the @genpd's devices have been suspended and all of its subdomains
283 * have been powered down, run the runtime suspend callbacks provided by all of
284 * the @genpd's devices' drivers and remove power from @genpd.
285 */
286 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
287 __releases(&genpd->lock) __acquires(&genpd->lock)
288 {
289 struct pm_domain_data *pdd;
290 struct gpd_link *link;
291 unsigned int not_suspended;
292 int ret = 0;
293
294 start:
295 /*
296 * Do not try to power off the domain in the following situations:
297 * (1) The domain is already in the "power off" state.
298 * (2) The domain is waiting for its master to power up.
299 * (3) One of the domain's devices is being resumed right now.
300 * (4) System suspend is in progress.
301 */
302 if (genpd->status == GPD_STATE_POWER_OFF
303 || genpd->status == GPD_STATE_WAIT_MASTER
304 || genpd->resume_count > 0 || genpd->prepared_count > 0)
305 return 0;
306
307 if (atomic_read(&genpd->sd_count) > 0)
308 return -EBUSY;
309
310 not_suspended = 0;
311 list_for_each_entry(pdd, &genpd->dev_list, list_node)
312 if (pdd->dev->driver && !pm_runtime_suspended(pdd->dev))
313 not_suspended++;
314
315 if (not_suspended > genpd->in_progress)
316 return -EBUSY;
317
318 if (genpd->poweroff_task) {
319 /*
320 * Another instance of pm_genpd_poweroff() is executing
321 * callbacks, so tell it to start over and return.
322 */
323 genpd->status = GPD_STATE_REPEAT;
324 return 0;
325 }
326
327 if (genpd->gov && genpd->gov->power_down_ok) {
328 if (!genpd->gov->power_down_ok(&genpd->domain))
329 return -EAGAIN;
330 }
331
332 genpd->status = GPD_STATE_BUSY;
333 genpd->poweroff_task = current;
334
335 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
336 ret = atomic_read(&genpd->sd_count) == 0 ?
337 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
338
339 if (genpd_abort_poweroff(genpd))
340 goto out;
341
342 if (ret) {
343 genpd_set_active(genpd);
344 goto out;
345 }
346
347 if (genpd->status == GPD_STATE_REPEAT) {
348 genpd->poweroff_task = NULL;
349 goto start;
350 }
351 }
352
353 if (genpd->power_off) {
354 if (atomic_read(&genpd->sd_count) > 0) {
355 ret = -EBUSY;
356 goto out;
357 }
358
359 /*
360 * If sd_count > 0 at this point, one of the subdomains hasn't
361 * managed to call pm_genpd_poweron() for the master yet after
362 * incrementing it. In that case pm_genpd_poweron() will wait
363 * for us to drop the lock, so we can call .power_off() and let
364 * the pm_genpd_poweron() restore power for us (this shouldn't
365 * happen very often).
366 */
367 ret = genpd->power_off(genpd);
368 if (ret == -EBUSY) {
369 genpd_set_active(genpd);
370 goto out;
371 }
372 }
373
374 genpd->status = GPD_STATE_POWER_OFF;
375
376 list_for_each_entry(link, &genpd->slave_links, slave_node) {
377 genpd_sd_counter_dec(link->master);
378 genpd_queue_power_off_work(link->master);
379 }
380
381 out:
382 genpd->poweroff_task = NULL;
383 wake_up_all(&genpd->status_wait_queue);
384 return ret;
385 }
386
387 /**
388 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
389 * @work: Work structure used for scheduling the execution of this function.
390 */
391 static void genpd_power_off_work_fn(struct work_struct *work)
392 {
393 struct generic_pm_domain *genpd;
394
395 genpd = container_of(work, struct generic_pm_domain, power_off_work);
396
397 genpd_acquire_lock(genpd);
398 pm_genpd_poweroff(genpd);
399 genpd_release_lock(genpd);
400 }
401
402 /**
403 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
404 * @dev: Device to suspend.
405 *
406 * Carry out a runtime suspend of a device under the assumption that its
407 * pm_domain field points to the domain member of an object of type
408 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
409 */
410 static int pm_genpd_runtime_suspend(struct device *dev)
411 {
412 struct generic_pm_domain *genpd;
413
414 dev_dbg(dev, "%s()\n", __func__);
415
416 genpd = dev_to_genpd(dev);
417 if (IS_ERR(genpd))
418 return -EINVAL;
419
420 if (genpd->stop_device) {
421 int ret = genpd->stop_device(dev);
422 if (ret)
423 return ret;
424 }
425
426 mutex_lock(&genpd->lock);
427 genpd->in_progress++;
428 pm_genpd_poweroff(genpd);
429 genpd->in_progress--;
430 mutex_unlock(&genpd->lock);
431
432 return 0;
433 }
434
435 /**
436 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
437 * @dev: Device to resume.
438 *
439 * Carry out a runtime resume of a device under the assumption that its
440 * pm_domain field points to the domain member of an object of type
441 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
442 */
443 static int pm_genpd_runtime_resume(struct device *dev)
444 {
445 struct generic_pm_domain *genpd;
446 DEFINE_WAIT(wait);
447 int ret;
448
449 dev_dbg(dev, "%s()\n", __func__);
450
451 genpd = dev_to_genpd(dev);
452 if (IS_ERR(genpd))
453 return -EINVAL;
454
455 mutex_lock(&genpd->lock);
456 ret = __pm_genpd_poweron(genpd);
457 if (ret) {
458 mutex_unlock(&genpd->lock);
459 return ret;
460 }
461 genpd->status = GPD_STATE_BUSY;
462 genpd->resume_count++;
463 for (;;) {
464 prepare_to_wait(&genpd->status_wait_queue, &wait,
465 TASK_UNINTERRUPTIBLE);
466 /*
467 * If current is the powering off task, we have been called
468 * reentrantly from one of the device callbacks, so we should
469 * not wait.
470 */
471 if (!genpd->poweroff_task || genpd->poweroff_task == current)
472 break;
473 mutex_unlock(&genpd->lock);
474
475 schedule();
476
477 mutex_lock(&genpd->lock);
478 }
479 finish_wait(&genpd->status_wait_queue, &wait);
480 __pm_genpd_restore_device(&dev->power.subsys_data->domain_data, genpd);
481 genpd->resume_count--;
482 genpd_set_active(genpd);
483 wake_up_all(&genpd->status_wait_queue);
484 mutex_unlock(&genpd->lock);
485
486 if (genpd->start_device)
487 genpd->start_device(dev);
488
489 return 0;
490 }
491
492 /**
493 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
494 */
495 void pm_genpd_poweroff_unused(void)
496 {
497 struct generic_pm_domain *genpd;
498
499 mutex_lock(&gpd_list_lock);
500
501 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
502 genpd_queue_power_off_work(genpd);
503
504 mutex_unlock(&gpd_list_lock);
505 }
506
507 #else
508
509 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
510
511 #define pm_genpd_runtime_suspend NULL
512 #define pm_genpd_runtime_resume NULL
513
514 #endif /* CONFIG_PM_RUNTIME */
515
516 #ifdef CONFIG_PM_SLEEP
517
518 /**
519 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
520 * @genpd: PM domain to power off, if possible.
521 *
522 * Check if the given PM domain can be powered off (during system suspend or
523 * hibernation) and do that if so. Also, in that case propagate to its masters.
524 *
525 * This function is only called in "noirq" stages of system power transitions,
526 * so it need not acquire locks (all of the "noirq" callbacks are executed
527 * sequentially, so it is guaranteed that it will never run twice in parallel).
528 */
529 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
530 {
531 struct gpd_link *link;
532
533 if (genpd->status == GPD_STATE_POWER_OFF)
534 return;
535
536 if (genpd->suspended_count != genpd->device_count
537 || atomic_read(&genpd->sd_count) > 0)
538 return;
539
540 if (genpd->power_off)
541 genpd->power_off(genpd);
542
543 genpd->status = GPD_STATE_POWER_OFF;
544
545 list_for_each_entry(link, &genpd->slave_links, slave_node) {
546 genpd_sd_counter_dec(link->master);
547 pm_genpd_sync_poweroff(link->master);
548 }
549 }
550
551 /**
552 * resume_needed - Check whether to resume a device before system suspend.
553 * @dev: Device to check.
554 * @genpd: PM domain the device belongs to.
555 *
556 * There are two cases in which a device that can wake up the system from sleep
557 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
558 * to wake up the system and it has to remain active for this purpose while the
559 * system is in the sleep state and (2) if the device is not enabled to wake up
560 * the system from sleep states and it generally doesn't generate wakeup signals
561 * by itself (those signals are generated on its behalf by other parts of the
562 * system). In the latter case it may be necessary to reconfigure the device's
563 * wakeup settings during system suspend, because it may have been set up to
564 * signal remote wakeup from the system's working state as needed by runtime PM.
565 * Return 'true' in either of the above cases.
566 */
567 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
568 {
569 bool active_wakeup;
570
571 if (!device_can_wakeup(dev))
572 return false;
573
574 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
575 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
576 }
577
578 /**
579 * pm_genpd_prepare - Start power transition of a device in a PM domain.
580 * @dev: Device to start the transition of.
581 *
582 * Start a power transition of a device (during a system-wide power transition)
583 * under the assumption that its pm_domain field points to the domain member of
584 * an object of type struct generic_pm_domain representing a PM domain
585 * consisting of I/O devices.
586 */
587 static int pm_genpd_prepare(struct device *dev)
588 {
589 struct generic_pm_domain *genpd;
590 int ret;
591
592 dev_dbg(dev, "%s()\n", __func__);
593
594 genpd = dev_to_genpd(dev);
595 if (IS_ERR(genpd))
596 return -EINVAL;
597
598 /*
599 * If a wakeup request is pending for the device, it should be woken up
600 * at this point and a system wakeup event should be reported if it's
601 * set up to wake up the system from sleep states.
602 */
603 pm_runtime_get_noresume(dev);
604 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
605 pm_wakeup_event(dev, 0);
606
607 if (pm_wakeup_pending()) {
608 pm_runtime_put_sync(dev);
609 return -EBUSY;
610 }
611
612 if (resume_needed(dev, genpd))
613 pm_runtime_resume(dev);
614
615 genpd_acquire_lock(genpd);
616
617 if (genpd->prepared_count++ == 0)
618 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
619
620 genpd_release_lock(genpd);
621
622 if (genpd->suspend_power_off) {
623 pm_runtime_put_noidle(dev);
624 return 0;
625 }
626
627 /*
628 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
629 * so pm_genpd_poweron() will return immediately, but if the device
630 * is suspended (e.g. it's been stopped by .stop_device()), we need
631 * to make it operational.
632 */
633 pm_runtime_resume(dev);
634 __pm_runtime_disable(dev, false);
635
636 ret = pm_generic_prepare(dev);
637 if (ret) {
638 mutex_lock(&genpd->lock);
639
640 if (--genpd->prepared_count == 0)
641 genpd->suspend_power_off = false;
642
643 mutex_unlock(&genpd->lock);
644 pm_runtime_enable(dev);
645 }
646
647 pm_runtime_put_sync(dev);
648 return ret;
649 }
650
651 /**
652 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
653 * @dev: Device to suspend.
654 *
655 * Suspend a device under the assumption that its pm_domain field points to the
656 * domain member of an object of type struct generic_pm_domain representing
657 * a PM domain consisting of I/O devices.
658 */
659 static int pm_genpd_suspend(struct device *dev)
660 {
661 struct generic_pm_domain *genpd;
662
663 dev_dbg(dev, "%s()\n", __func__);
664
665 genpd = dev_to_genpd(dev);
666 if (IS_ERR(genpd))
667 return -EINVAL;
668
669 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
670 }
671
672 /**
673 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
674 * @dev: Device to suspend.
675 *
676 * Carry out a late suspend of a device under the assumption that its
677 * pm_domain field points to the domain member of an object of type
678 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
679 */
680 static int pm_genpd_suspend_noirq(struct device *dev)
681 {
682 struct generic_pm_domain *genpd;
683 int ret;
684
685 dev_dbg(dev, "%s()\n", __func__);
686
687 genpd = dev_to_genpd(dev);
688 if (IS_ERR(genpd))
689 return -EINVAL;
690
691 if (genpd->suspend_power_off)
692 return 0;
693
694 ret = pm_generic_suspend_noirq(dev);
695 if (ret)
696 return ret;
697
698 if (device_may_wakeup(dev)
699 && genpd->active_wakeup && genpd->active_wakeup(dev))
700 return 0;
701
702 if (genpd->stop_device)
703 genpd->stop_device(dev);
704
705 /*
706 * Since all of the "noirq" callbacks are executed sequentially, it is
707 * guaranteed that this function will never run twice in parallel for
708 * the same PM domain, so it is not necessary to use locking here.
709 */
710 genpd->suspended_count++;
711 pm_genpd_sync_poweroff(genpd);
712
713 return 0;
714 }
715
716 /**
717 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
718 * @dev: Device to resume.
719 *
720 * Carry out an early resume of a device under the assumption that its
721 * pm_domain field points to the domain member of an object of type
722 * struct generic_pm_domain representing a power domain consisting of I/O
723 * devices.
724 */
725 static int pm_genpd_resume_noirq(struct device *dev)
726 {
727 struct generic_pm_domain *genpd;
728
729 dev_dbg(dev, "%s()\n", __func__);
730
731 genpd = dev_to_genpd(dev);
732 if (IS_ERR(genpd))
733 return -EINVAL;
734
735 if (genpd->suspend_power_off)
736 return 0;
737
738 /*
739 * Since all of the "noirq" callbacks are executed sequentially, it is
740 * guaranteed that this function will never run twice in parallel for
741 * the same PM domain, so it is not necessary to use locking here.
742 */
743 pm_genpd_poweron(genpd);
744 genpd->suspended_count--;
745 if (genpd->start_device)
746 genpd->start_device(dev);
747
748 return pm_generic_resume_noirq(dev);
749 }
750
751 /**
752 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
753 * @dev: Device to resume.
754 *
755 * Resume a device under the assumption that its pm_domain field points to the
756 * domain member of an object of type struct generic_pm_domain representing
757 * a power domain consisting of I/O devices.
758 */
759 static int pm_genpd_resume(struct device *dev)
760 {
761 struct generic_pm_domain *genpd;
762
763 dev_dbg(dev, "%s()\n", __func__);
764
765 genpd = dev_to_genpd(dev);
766 if (IS_ERR(genpd))
767 return -EINVAL;
768
769 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
770 }
771
772 /**
773 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
774 * @dev: Device to freeze.
775 *
776 * Freeze a device under the assumption that its pm_domain field points to the
777 * domain member of an object of type struct generic_pm_domain representing
778 * a power domain consisting of I/O devices.
779 */
780 static int pm_genpd_freeze(struct device *dev)
781 {
782 struct generic_pm_domain *genpd;
783
784 dev_dbg(dev, "%s()\n", __func__);
785
786 genpd = dev_to_genpd(dev);
787 if (IS_ERR(genpd))
788 return -EINVAL;
789
790 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
791 }
792
793 /**
794 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
795 * @dev: Device to freeze.
796 *
797 * Carry out a late freeze of a device under the assumption that its
798 * pm_domain field points to the domain member of an object of type
799 * struct generic_pm_domain representing a power domain consisting of I/O
800 * devices.
801 */
802 static int pm_genpd_freeze_noirq(struct device *dev)
803 {
804 struct generic_pm_domain *genpd;
805 int ret;
806
807 dev_dbg(dev, "%s()\n", __func__);
808
809 genpd = dev_to_genpd(dev);
810 if (IS_ERR(genpd))
811 return -EINVAL;
812
813 if (genpd->suspend_power_off)
814 return 0;
815
816 ret = pm_generic_freeze_noirq(dev);
817 if (ret)
818 return ret;
819
820 if (genpd->stop_device)
821 genpd->stop_device(dev);
822
823 return 0;
824 }
825
826 /**
827 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
828 * @dev: Device to thaw.
829 *
830 * Carry out an early thaw of a device under the assumption that its
831 * pm_domain field points to the domain member of an object of type
832 * struct generic_pm_domain representing a power domain consisting of I/O
833 * devices.
834 */
835 static int pm_genpd_thaw_noirq(struct device *dev)
836 {
837 struct generic_pm_domain *genpd;
838
839 dev_dbg(dev, "%s()\n", __func__);
840
841 genpd = dev_to_genpd(dev);
842 if (IS_ERR(genpd))
843 return -EINVAL;
844
845 if (genpd->suspend_power_off)
846 return 0;
847
848 if (genpd->start_device)
849 genpd->start_device(dev);
850
851 return pm_generic_thaw_noirq(dev);
852 }
853
854 /**
855 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
856 * @dev: Device to thaw.
857 *
858 * Thaw a device under the assumption that its pm_domain field points to the
859 * domain member of an object of type struct generic_pm_domain representing
860 * a power domain consisting of I/O devices.
861 */
862 static int pm_genpd_thaw(struct device *dev)
863 {
864 struct generic_pm_domain *genpd;
865
866 dev_dbg(dev, "%s()\n", __func__);
867
868 genpd = dev_to_genpd(dev);
869 if (IS_ERR(genpd))
870 return -EINVAL;
871
872 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
873 }
874
875 /**
876 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
877 * @dev: Device to suspend.
878 *
879 * Power off a device under the assumption that its pm_domain field points to
880 * the domain member of an object of type struct generic_pm_domain representing
881 * a PM domain consisting of I/O devices.
882 */
883 static int pm_genpd_dev_poweroff(struct device *dev)
884 {
885 struct generic_pm_domain *genpd;
886
887 dev_dbg(dev, "%s()\n", __func__);
888
889 genpd = dev_to_genpd(dev);
890 if (IS_ERR(genpd))
891 return -EINVAL;
892
893 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
894 }
895
896 /**
897 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
898 * @dev: Device to suspend.
899 *
900 * Carry out a late powering off of a device under the assumption that its
901 * pm_domain field points to the domain member of an object of type
902 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
903 */
904 static int pm_genpd_dev_poweroff_noirq(struct device *dev)
905 {
906 struct generic_pm_domain *genpd;
907 int ret;
908
909 dev_dbg(dev, "%s()\n", __func__);
910
911 genpd = dev_to_genpd(dev);
912 if (IS_ERR(genpd))
913 return -EINVAL;
914
915 if (genpd->suspend_power_off)
916 return 0;
917
918 ret = pm_generic_poweroff_noirq(dev);
919 if (ret)
920 return ret;
921
922 if (device_may_wakeup(dev)
923 && genpd->active_wakeup && genpd->active_wakeup(dev))
924 return 0;
925
926 if (genpd->stop_device)
927 genpd->stop_device(dev);
928
929 /*
930 * Since all of the "noirq" callbacks are executed sequentially, it is
931 * guaranteed that this function will never run twice in parallel for
932 * the same PM domain, so it is not necessary to use locking here.
933 */
934 genpd->suspended_count++;
935 pm_genpd_sync_poweroff(genpd);
936
937 return 0;
938 }
939
940 /**
941 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
942 * @dev: Device to resume.
943 *
944 * Carry out an early restore of a device under the assumption that its
945 * pm_domain field points to the domain member of an object of type
946 * struct generic_pm_domain representing a power domain consisting of I/O
947 * devices.
948 */
949 static int pm_genpd_restore_noirq(struct device *dev)
950 {
951 struct generic_pm_domain *genpd;
952
953 dev_dbg(dev, "%s()\n", __func__);
954
955 genpd = dev_to_genpd(dev);
956 if (IS_ERR(genpd))
957 return -EINVAL;
958
959 /*
960 * Since all of the "noirq" callbacks are executed sequentially, it is
961 * guaranteed that this function will never run twice in parallel for
962 * the same PM domain, so it is not necessary to use locking here.
963 */
964 genpd->status = GPD_STATE_POWER_OFF;
965 if (genpd->suspend_power_off) {
966 /*
967 * The boot kernel might put the domain into the power on state,
968 * so make sure it really is powered off.
969 */
970 if (genpd->power_off)
971 genpd->power_off(genpd);
972 return 0;
973 }
974
975 pm_genpd_poweron(genpd);
976 genpd->suspended_count--;
977 if (genpd->start_device)
978 genpd->start_device(dev);
979
980 return pm_generic_restore_noirq(dev);
981 }
982
983 /**
984 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
985 * @dev: Device to resume.
986 *
987 * Restore a device under the assumption that its pm_domain field points to the
988 * domain member of an object of type struct generic_pm_domain representing
989 * a power domain consisting of I/O devices.
990 */
991 static int pm_genpd_restore(struct device *dev)
992 {
993 struct generic_pm_domain *genpd;
994
995 dev_dbg(dev, "%s()\n", __func__);
996
997 genpd = dev_to_genpd(dev);
998 if (IS_ERR(genpd))
999 return -EINVAL;
1000
1001 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
1002 }
1003
1004 /**
1005 * pm_genpd_complete - Complete power transition of a device in a power domain.
1006 * @dev: Device to complete the transition of.
1007 *
1008 * Complete a power transition of a device (during a system-wide power
1009 * transition) under the assumption that its pm_domain field points to the
1010 * domain member of an object of type struct generic_pm_domain representing
1011 * a power domain consisting of I/O devices.
1012 */
1013 static void pm_genpd_complete(struct device *dev)
1014 {
1015 struct generic_pm_domain *genpd;
1016 bool run_complete;
1017
1018 dev_dbg(dev, "%s()\n", __func__);
1019
1020 genpd = dev_to_genpd(dev);
1021 if (IS_ERR(genpd))
1022 return;
1023
1024 mutex_lock(&genpd->lock);
1025
1026 run_complete = !genpd->suspend_power_off;
1027 if (--genpd->prepared_count == 0)
1028 genpd->suspend_power_off = false;
1029
1030 mutex_unlock(&genpd->lock);
1031
1032 if (run_complete) {
1033 pm_generic_complete(dev);
1034 pm_runtime_set_active(dev);
1035 pm_runtime_enable(dev);
1036 pm_runtime_idle(dev);
1037 }
1038 }
1039
1040 #else
1041
1042 #define pm_genpd_prepare NULL
1043 #define pm_genpd_suspend NULL
1044 #define pm_genpd_suspend_noirq NULL
1045 #define pm_genpd_resume_noirq NULL
1046 #define pm_genpd_resume NULL
1047 #define pm_genpd_freeze NULL
1048 #define pm_genpd_freeze_noirq NULL
1049 #define pm_genpd_thaw_noirq NULL
1050 #define pm_genpd_thaw NULL
1051 #define pm_genpd_dev_poweroff_noirq NULL
1052 #define pm_genpd_dev_poweroff NULL
1053 #define pm_genpd_restore_noirq NULL
1054 #define pm_genpd_restore NULL
1055 #define pm_genpd_complete NULL
1056
1057 #endif /* CONFIG_PM_SLEEP */
1058
1059 /**
1060 * pm_genpd_add_device - Add a device to an I/O PM domain.
1061 * @genpd: PM domain to add the device to.
1062 * @dev: Device to be added.
1063 */
1064 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1065 {
1066 struct pm_domain_data *pdd;
1067 int ret = 0;
1068
1069 dev_dbg(dev, "%s()\n", __func__);
1070
1071 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1072 return -EINVAL;
1073
1074 genpd_acquire_lock(genpd);
1075
1076 if (genpd->status == GPD_STATE_POWER_OFF) {
1077 ret = -EINVAL;
1078 goto out;
1079 }
1080
1081 if (genpd->prepared_count > 0) {
1082 ret = -EAGAIN;
1083 goto out;
1084 }
1085
1086 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1087 if (pdd->dev == dev) {
1088 ret = -EINVAL;
1089 goto out;
1090 }
1091
1092 genpd->device_count++;
1093
1094 dev->pm_domain = &genpd->domain;
1095 dev_pm_get_subsys_data(dev);
1096 pdd = &dev->power.subsys_data->domain_data;
1097 pdd->dev = dev;
1098 pdd->need_restore = false;
1099 list_add_tail(&pdd->list_node, &genpd->dev_list);
1100
1101 out:
1102 genpd_release_lock(genpd);
1103
1104 return ret;
1105 }
1106
1107 /**
1108 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1109 * @genpd: PM domain to remove the device from.
1110 * @dev: Device to be removed.
1111 */
1112 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1113 struct device *dev)
1114 {
1115 struct pm_domain_data *pdd;
1116 int ret = -EINVAL;
1117
1118 dev_dbg(dev, "%s()\n", __func__);
1119
1120 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1121 return -EINVAL;
1122
1123 genpd_acquire_lock(genpd);
1124
1125 if (genpd->prepared_count > 0) {
1126 ret = -EAGAIN;
1127 goto out;
1128 }
1129
1130 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1131 if (pdd->dev != dev)
1132 continue;
1133
1134 list_del_init(&pdd->list_node);
1135 pdd->dev = NULL;
1136 dev_pm_put_subsys_data(dev);
1137 dev->pm_domain = NULL;
1138
1139 genpd->device_count--;
1140
1141 ret = 0;
1142 break;
1143 }
1144
1145 out:
1146 genpd_release_lock(genpd);
1147
1148 return ret;
1149 }
1150
1151 /**
1152 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1153 * @genpd: Master PM domain to add the subdomain to.
1154 * @subdomain: Subdomain to be added.
1155 */
1156 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1157 struct generic_pm_domain *subdomain)
1158 {
1159 struct gpd_link *link;
1160 int ret = 0;
1161
1162 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1163 return -EINVAL;
1164
1165 start:
1166 genpd_acquire_lock(genpd);
1167 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1168
1169 if (subdomain->status != GPD_STATE_POWER_OFF
1170 && subdomain->status != GPD_STATE_ACTIVE) {
1171 mutex_unlock(&subdomain->lock);
1172 genpd_release_lock(genpd);
1173 goto start;
1174 }
1175
1176 if (genpd->status == GPD_STATE_POWER_OFF
1177 && subdomain->status != GPD_STATE_POWER_OFF) {
1178 ret = -EINVAL;
1179 goto out;
1180 }
1181
1182 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1183 if (link->slave == subdomain && link->master == genpd) {
1184 ret = -EINVAL;
1185 goto out;
1186 }
1187 }
1188
1189 link = kzalloc(sizeof(*link), GFP_KERNEL);
1190 if (!link) {
1191 ret = -ENOMEM;
1192 goto out;
1193 }
1194 link->master = genpd;
1195 list_add_tail(&link->master_node, &genpd->master_links);
1196 link->slave = subdomain;
1197 list_add_tail(&link->slave_node, &subdomain->slave_links);
1198 if (subdomain->status != GPD_STATE_POWER_OFF)
1199 genpd_sd_counter_inc(genpd);
1200
1201 out:
1202 mutex_unlock(&subdomain->lock);
1203 genpd_release_lock(genpd);
1204
1205 return ret;
1206 }
1207
1208 /**
1209 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1210 * @genpd: Master PM domain to remove the subdomain from.
1211 * @subdomain: Subdomain to be removed.
1212 */
1213 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1214 struct generic_pm_domain *subdomain)
1215 {
1216 struct gpd_link *link;
1217 int ret = -EINVAL;
1218
1219 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1220 return -EINVAL;
1221
1222 start:
1223 genpd_acquire_lock(genpd);
1224
1225 list_for_each_entry(link, &genpd->master_links, master_node) {
1226 if (link->slave != subdomain)
1227 continue;
1228
1229 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1230
1231 if (subdomain->status != GPD_STATE_POWER_OFF
1232 && subdomain->status != GPD_STATE_ACTIVE) {
1233 mutex_unlock(&subdomain->lock);
1234 genpd_release_lock(genpd);
1235 goto start;
1236 }
1237
1238 list_del(&link->master_node);
1239 list_del(&link->slave_node);
1240 kfree(link);
1241 if (subdomain->status != GPD_STATE_POWER_OFF)
1242 genpd_sd_counter_dec(genpd);
1243
1244 mutex_unlock(&subdomain->lock);
1245
1246 ret = 0;
1247 break;
1248 }
1249
1250 genpd_release_lock(genpd);
1251
1252 return ret;
1253 }
1254
1255 /**
1256 * pm_genpd_init - Initialize a generic I/O PM domain object.
1257 * @genpd: PM domain object to initialize.
1258 * @gov: PM domain governor to associate with the domain (may be NULL).
1259 * @is_off: Initial value of the domain's power_is_off field.
1260 */
1261 void pm_genpd_init(struct generic_pm_domain *genpd,
1262 struct dev_power_governor *gov, bool is_off)
1263 {
1264 if (IS_ERR_OR_NULL(genpd))
1265 return;
1266
1267 INIT_LIST_HEAD(&genpd->master_links);
1268 INIT_LIST_HEAD(&genpd->slave_links);
1269 INIT_LIST_HEAD(&genpd->dev_list);
1270 mutex_init(&genpd->lock);
1271 genpd->gov = gov;
1272 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1273 genpd->in_progress = 0;
1274 atomic_set(&genpd->sd_count, 0);
1275 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1276 init_waitqueue_head(&genpd->status_wait_queue);
1277 genpd->poweroff_task = NULL;
1278 genpd->resume_count = 0;
1279 genpd->device_count = 0;
1280 genpd->suspended_count = 0;
1281 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1282 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1283 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1284 genpd->domain.ops.prepare = pm_genpd_prepare;
1285 genpd->domain.ops.suspend = pm_genpd_suspend;
1286 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1287 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1288 genpd->domain.ops.resume = pm_genpd_resume;
1289 genpd->domain.ops.freeze = pm_genpd_freeze;
1290 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1291 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1292 genpd->domain.ops.thaw = pm_genpd_thaw;
1293 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1294 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1295 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1296 genpd->domain.ops.restore = pm_genpd_restore;
1297 genpd->domain.ops.complete = pm_genpd_complete;
1298 mutex_lock(&gpd_list_lock);
1299 list_add(&genpd->gpd_list_node, &gpd_list);
1300 mutex_unlock(&gpd_list_lock);
1301 }