]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/domain.c
PM / Domains: Make pm_genpd_poweron() always survive parent removal
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
f721889f 18
5125bbf3
RW
19static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock);
21
5248051b
RW
22#ifdef CONFIG_PM
23
24static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
28
596ba34b 29 return pd_to_genpd(dev->pm_domain);
5248051b 30}
f721889f 31
c4bb3160 32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 33{
c4bb3160
RW
34 bool ret = false;
35
36 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
37 ret = !!atomic_dec_and_test(&genpd->sd_count);
38
39 return ret;
40}
41
42static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
43{
44 atomic_inc(&genpd->sd_count);
45 smp_mb__after_atomic_inc();
f721889f
RW
46}
47
17b75eca
RW
48static void genpd_acquire_lock(struct generic_pm_domain *genpd)
49{
50 DEFINE_WAIT(wait);
51
52 mutex_lock(&genpd->lock);
53 /*
54 * Wait for the domain to transition into either the active,
55 * or the power off state.
56 */
57 for (;;) {
58 prepare_to_wait(&genpd->status_wait_queue, &wait,
59 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
60 if (genpd->status == GPD_STATE_ACTIVE
61 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
62 break;
63 mutex_unlock(&genpd->lock);
64
65 schedule();
66
67 mutex_lock(&genpd->lock);
68 }
69 finish_wait(&genpd->status_wait_queue, &wait);
70}
71
72static void genpd_release_lock(struct generic_pm_domain *genpd)
73{
74 mutex_unlock(&genpd->lock);
75}
76
c6d22b37
RW
77static void genpd_set_active(struct generic_pm_domain *genpd)
78{
79 if (genpd->resume_count == 0)
80 genpd->status = GPD_STATE_ACTIVE;
81}
82
5248051b
RW
83/**
84 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
85 * @genpd: PM domain to power up.
86 *
87 * Restore power to @genpd and all of its parents so that it is possible to
88 * resume a device belonging to it.
89 */
18b4f3f5 90int pm_genpd_poweron(struct generic_pm_domain *genpd)
5248051b 91{
9e08cf42 92 struct generic_pm_domain *parent;
5248051b
RW
93 int ret = 0;
94
3c07cbc4 95 mutex_lock(&genpd->lock);
17b75eca 96
9e08cf42
RW
97 parent = genpd->parent;
98
99 start:
17b75eca 100 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 101 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
5248051b
RW
102 goto out;
103
c6d22b37
RW
104 if (genpd->status != GPD_STATE_POWER_OFF) {
105 genpd_set_active(genpd);
106 goto out;
107 }
108
3c07cbc4
RW
109 if (parent) {
110 genpd_sd_counter_inc(parent);
111
5248051b 112 mutex_unlock(&genpd->lock);
5248051b 113
17b75eca 114 ret = pm_genpd_poweron(parent);
9e08cf42
RW
115
116 mutex_lock(&genpd->lock);
117
118 if (ret)
119 goto err;
5248051b 120
3c07cbc4 121 parent = NULL;
5248051b
RW
122 goto start;
123 }
124
9e08cf42 125 if (genpd->power_on) {
fe202fde 126 ret = genpd->power_on(genpd);
9e08cf42
RW
127 if (ret)
128 goto err;
3c07cbc4 129 }
5248051b 130
9e08cf42
RW
131 genpd_set_active(genpd);
132
5248051b
RW
133 out:
134 mutex_unlock(&genpd->lock);
5248051b
RW
135
136 return ret;
9e08cf42
RW
137
138 err:
139 if (genpd->parent)
140 genpd_sd_counter_dec(genpd->parent);
141
142 goto out;
5248051b
RW
143}
144
145#endif /* CONFIG_PM */
146
147#ifdef CONFIG_PM_RUNTIME
148
f721889f
RW
149/**
150 * __pm_genpd_save_device - Save the pre-suspend state of a device.
151 * @dle: Device list entry of the device to save the state of.
152 * @genpd: PM domain the device belongs to.
153 */
154static int __pm_genpd_save_device(struct dev_list_entry *dle,
155 struct generic_pm_domain *genpd)
17b75eca 156 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
157{
158 struct device *dev = dle->dev;
159 struct device_driver *drv = dev->driver;
160 int ret = 0;
161
162 if (dle->need_restore)
163 return 0;
164
17b75eca
RW
165 mutex_unlock(&genpd->lock);
166
f721889f
RW
167 if (drv && drv->pm && drv->pm->runtime_suspend) {
168 if (genpd->start_device)
169 genpd->start_device(dev);
170
171 ret = drv->pm->runtime_suspend(dev);
172
173 if (genpd->stop_device)
174 genpd->stop_device(dev);
175 }
176
17b75eca
RW
177 mutex_lock(&genpd->lock);
178
f721889f
RW
179 if (!ret)
180 dle->need_restore = true;
181
182 return ret;
183}
184
185/**
186 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
187 * @dle: Device list entry of the device to restore the state of.
188 * @genpd: PM domain the device belongs to.
189 */
190static void __pm_genpd_restore_device(struct dev_list_entry *dle,
191 struct generic_pm_domain *genpd)
17b75eca 192 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
193{
194 struct device *dev = dle->dev;
195 struct device_driver *drv = dev->driver;
196
197 if (!dle->need_restore)
198 return;
199
17b75eca
RW
200 mutex_unlock(&genpd->lock);
201
f721889f
RW
202 if (drv && drv->pm && drv->pm->runtime_resume) {
203 if (genpd->start_device)
204 genpd->start_device(dev);
205
206 drv->pm->runtime_resume(dev);
207
208 if (genpd->stop_device)
209 genpd->stop_device(dev);
210 }
211
17b75eca
RW
212 mutex_lock(&genpd->lock);
213
f721889f
RW
214 dle->need_restore = false;
215}
216
c6d22b37
RW
217/**
218 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
219 * @genpd: PM domain to check.
220 *
221 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
222 * a "power off" operation, which means that a "power on" has occured in the
223 * meantime, or if its resume_count field is different from zero, which means
224 * that one of its devices has been resumed in the meantime.
225 */
226static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
227{
228 return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
229}
230
56375fd4
RW
231/**
232 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
233 * @genpd: PM domait to power off.
234 *
235 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
236 * before.
237 */
0bc5b2de 238void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
239{
240 if (!work_pending(&genpd->power_off_work))
241 queue_work(pm_wq, &genpd->power_off_work);
242}
243
f721889f
RW
244/**
245 * pm_genpd_poweroff - Remove power from a given PM domain.
246 * @genpd: PM domain to power down.
247 *
248 * If all of the @genpd's devices have been suspended and all of its subdomains
249 * have been powered down, run the runtime suspend callbacks provided by all of
250 * the @genpd's devices' drivers and remove power from @genpd.
251 */
252static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 253 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
254{
255 struct generic_pm_domain *parent;
256 struct dev_list_entry *dle;
257 unsigned int not_suspended;
c6d22b37 258 int ret = 0;
f721889f 259
c6d22b37
RW
260 start:
261 /*
262 * Do not try to power off the domain in the following situations:
263 * (1) The domain is already in the "power off" state.
264 * (2) System suspend is in progress.
265 * (3) One of the domain's devices is being resumed right now.
266 */
267 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
268 || genpd->resume_count > 0)
f721889f
RW
269 return 0;
270
c4bb3160 271 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
272 return -EBUSY;
273
274 not_suspended = 0;
275 list_for_each_entry(dle, &genpd->dev_list, node)
276 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
277 not_suspended++;
278
279 if (not_suspended > genpd->in_progress)
280 return -EBUSY;
281
c6d22b37
RW
282 if (genpd->poweroff_task) {
283 /*
284 * Another instance of pm_genpd_poweroff() is executing
285 * callbacks, so tell it to start over and return.
286 */
287 genpd->status = GPD_STATE_REPEAT;
288 return 0;
289 }
290
f721889f
RW
291 if (genpd->gov && genpd->gov->power_down_ok) {
292 if (!genpd->gov->power_down_ok(&genpd->domain))
293 return -EAGAIN;
294 }
295
17b75eca 296 genpd->status = GPD_STATE_BUSY;
c6d22b37 297 genpd->poweroff_task = current;
17b75eca 298
f721889f 299 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
3c07cbc4
RW
300 ret = atomic_read(&genpd->sd_count) == 0 ?
301 __pm_genpd_save_device(dle, genpd) : -EBUSY;
697a7f37
RW
302 if (ret) {
303 genpd_set_active(genpd);
304 goto out;
305 }
f721889f 306
c6d22b37
RW
307 if (genpd_abort_poweroff(genpd))
308 goto out;
309
310 if (genpd->status == GPD_STATE_REPEAT) {
311 genpd->poweroff_task = NULL;
312 goto start;
313 }
314 }
17b75eca 315
3c07cbc4
RW
316 if (genpd->power_off) {
317 if (atomic_read(&genpd->sd_count) > 0) {
318 ret = -EBUSY;
c6d22b37
RW
319 goto out;
320 }
17b75eca 321
3c07cbc4
RW
322 /*
323 * If sd_count > 0 at this point, one of the children hasn't
324 * managed to call pm_genpd_poweron() for the parent yet after
325 * incrementing it. In that case pm_genpd_poweron() will wait
326 * for us to drop the lock, so we can call .power_off() and let
327 * the pm_genpd_poweron() restore power for us (this shouldn't
328 * happen very often).
329 */
d2805402
RW
330 ret = genpd->power_off(genpd);
331 if (ret == -EBUSY) {
332 genpd_set_active(genpd);
d2805402
RW
333 goto out;
334 }
335 }
f721889f 336
17b75eca 337 genpd->status = GPD_STATE_POWER_OFF;
f721889f 338
3c07cbc4
RW
339 parent = genpd->parent;
340 if (parent && genpd_sd_counter_dec(parent))
341 genpd_queue_power_off_work(parent);
f721889f 342
c6d22b37
RW
343 out:
344 genpd->poweroff_task = NULL;
345 wake_up_all(&genpd->status_wait_queue);
346 return ret;
f721889f
RW
347}
348
349/**
350 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
351 * @work: Work structure used for scheduling the execution of this function.
352 */
353static void genpd_power_off_work_fn(struct work_struct *work)
354{
355 struct generic_pm_domain *genpd;
356
357 genpd = container_of(work, struct generic_pm_domain, power_off_work);
358
17b75eca 359 genpd_acquire_lock(genpd);
f721889f 360 pm_genpd_poweroff(genpd);
17b75eca 361 genpd_release_lock(genpd);
f721889f
RW
362}
363
364/**
365 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
366 * @dev: Device to suspend.
367 *
368 * Carry out a runtime suspend of a device under the assumption that its
369 * pm_domain field points to the domain member of an object of type
370 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
371 */
372static int pm_genpd_runtime_suspend(struct device *dev)
373{
374 struct generic_pm_domain *genpd;
375
376 dev_dbg(dev, "%s()\n", __func__);
377
5248051b
RW
378 genpd = dev_to_genpd(dev);
379 if (IS_ERR(genpd))
f721889f
RW
380 return -EINVAL;
381
f721889f
RW
382 if (genpd->stop_device) {
383 int ret = genpd->stop_device(dev);
384 if (ret)
17b75eca 385 return ret;
f721889f 386 }
17b75eca 387
c6d22b37 388 mutex_lock(&genpd->lock);
f721889f
RW
389 genpd->in_progress++;
390 pm_genpd_poweroff(genpd);
391 genpd->in_progress--;
c6d22b37 392 mutex_unlock(&genpd->lock);
f721889f
RW
393
394 return 0;
395}
396
596ba34b
RW
397/**
398 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
399 * @dev: Device to resume.
400 * @genpd: PM domain the device belongs to.
401 */
402static void __pm_genpd_runtime_resume(struct device *dev,
403 struct generic_pm_domain *genpd)
404{
405 struct dev_list_entry *dle;
406
407 list_for_each_entry(dle, &genpd->dev_list, node) {
408 if (dle->dev == dev) {
409 __pm_genpd_restore_device(dle, genpd);
410 break;
411 }
412 }
596ba34b
RW
413}
414
f721889f
RW
415/**
416 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
417 * @dev: Device to resume.
418 *
419 * Carry out a runtime resume of a device under the assumption that its
420 * pm_domain field points to the domain member of an object of type
421 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
422 */
423static int pm_genpd_runtime_resume(struct device *dev)
424{
425 struct generic_pm_domain *genpd;
c6d22b37 426 DEFINE_WAIT(wait);
f721889f
RW
427 int ret;
428
429 dev_dbg(dev, "%s()\n", __func__);
430
5248051b
RW
431 genpd = dev_to_genpd(dev);
432 if (IS_ERR(genpd))
f721889f
RW
433 return -EINVAL;
434
f721889f
RW
435 ret = pm_genpd_poweron(genpd);
436 if (ret)
437 return ret;
438
c6d22b37 439 mutex_lock(&genpd->lock);
17b75eca 440 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
441 genpd->resume_count++;
442 for (;;) {
443 prepare_to_wait(&genpd->status_wait_queue, &wait,
444 TASK_UNINTERRUPTIBLE);
445 /*
446 * If current is the powering off task, we have been called
447 * reentrantly from one of the device callbacks, so we should
448 * not wait.
449 */
450 if (!genpd->poweroff_task || genpd->poweroff_task == current)
451 break;
452 mutex_unlock(&genpd->lock);
453
454 schedule();
455
456 mutex_lock(&genpd->lock);
457 }
458 finish_wait(&genpd->status_wait_queue, &wait);
596ba34b 459 __pm_genpd_runtime_resume(dev, genpd);
c6d22b37
RW
460 genpd->resume_count--;
461 genpd_set_active(genpd);
17b75eca 462 wake_up_all(&genpd->status_wait_queue);
c6d22b37 463 mutex_unlock(&genpd->lock);
17b75eca
RW
464
465 if (genpd->start_device)
466 genpd->start_device(dev);
f721889f
RW
467
468 return 0;
469}
470
17f2ae7f
RW
471/**
472 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
473 */
474void pm_genpd_poweroff_unused(void)
475{
476 struct generic_pm_domain *genpd;
477
478 mutex_lock(&gpd_list_lock);
479
480 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
481 genpd_queue_power_off_work(genpd);
482
483 mutex_unlock(&gpd_list_lock);
484}
485
f721889f
RW
486#else
487
488static inline void genpd_power_off_work_fn(struct work_struct *work) {}
596ba34b
RW
489static inline void __pm_genpd_runtime_resume(struct device *dev,
490 struct generic_pm_domain *genpd) {}
f721889f
RW
491
492#define pm_genpd_runtime_suspend NULL
493#define pm_genpd_runtime_resume NULL
494
495#endif /* CONFIG_PM_RUNTIME */
496
596ba34b
RW
497#ifdef CONFIG_PM_SLEEP
498
499/**
500 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
501 * @genpd: PM domain to power off, if possible.
502 *
503 * Check if the given PM domain can be powered off (during system suspend or
504 * hibernation) and do that if so. Also, in that case propagate to its parent.
505 *
506 * This function is only called in "noirq" stages of system power transitions,
507 * so it need not acquire locks (all of the "noirq" callbacks are executed
508 * sequentially, so it is guaranteed that it will never run twice in parallel).
509 */
510static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
511{
512 struct generic_pm_domain *parent = genpd->parent;
513
17b75eca 514 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
515 return;
516
c4bb3160
RW
517 if (genpd->suspended_count != genpd->device_count
518 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
519 return;
520
521 if (genpd->power_off)
522 genpd->power_off(genpd);
523
17b75eca 524 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
525 if (parent) {
526 genpd_sd_counter_dec(parent);
527 pm_genpd_sync_poweroff(parent);
528 }
529}
530
4ecd6e65
RW
531/**
532 * resume_needed - Check whether to resume a device before system suspend.
533 * @dev: Device to check.
534 * @genpd: PM domain the device belongs to.
535 *
536 * There are two cases in which a device that can wake up the system from sleep
537 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
538 * to wake up the system and it has to remain active for this purpose while the
539 * system is in the sleep state and (2) if the device is not enabled to wake up
540 * the system from sleep states and it generally doesn't generate wakeup signals
541 * by itself (those signals are generated on its behalf by other parts of the
542 * system). In the latter case it may be necessary to reconfigure the device's
543 * wakeup settings during system suspend, because it may have been set up to
544 * signal remote wakeup from the system's working state as needed by runtime PM.
545 * Return 'true' in either of the above cases.
546 */
547static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
548{
549 bool active_wakeup;
550
551 if (!device_can_wakeup(dev))
552 return false;
553
554 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
555 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
556}
557
596ba34b
RW
558/**
559 * pm_genpd_prepare - Start power transition of a device in a PM domain.
560 * @dev: Device to start the transition of.
561 *
562 * Start a power transition of a device (during a system-wide power transition)
563 * under the assumption that its pm_domain field points to the domain member of
564 * an object of type struct generic_pm_domain representing a PM domain
565 * consisting of I/O devices.
566 */
567static int pm_genpd_prepare(struct device *dev)
568{
569 struct generic_pm_domain *genpd;
b6c10c84 570 int ret;
596ba34b
RW
571
572 dev_dbg(dev, "%s()\n", __func__);
573
574 genpd = dev_to_genpd(dev);
575 if (IS_ERR(genpd))
576 return -EINVAL;
577
17b75eca
RW
578 /*
579 * If a wakeup request is pending for the device, it should be woken up
580 * at this point and a system wakeup event should be reported if it's
581 * set up to wake up the system from sleep states.
582 */
583 pm_runtime_get_noresume(dev);
584 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
585 pm_wakeup_event(dev, 0);
586
587 if (pm_wakeup_pending()) {
588 pm_runtime_put_sync(dev);
589 return -EBUSY;
590 }
591
4ecd6e65
RW
592 if (resume_needed(dev, genpd))
593 pm_runtime_resume(dev);
594
17b75eca 595 genpd_acquire_lock(genpd);
596ba34b
RW
596
597 if (genpd->prepared_count++ == 0)
17b75eca
RW
598 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
599
600 genpd_release_lock(genpd);
596ba34b
RW
601
602 if (genpd->suspend_power_off) {
17b75eca 603 pm_runtime_put_noidle(dev);
596ba34b
RW
604 return 0;
605 }
606
607 /*
17b75eca
RW
608 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
609 * so pm_genpd_poweron() will return immediately, but if the device
610 * is suspended (e.g. it's been stopped by .stop_device()), we need
611 * to make it operational.
596ba34b 612 */
17b75eca 613 pm_runtime_resume(dev);
596ba34b
RW
614 __pm_runtime_disable(dev, false);
615
b6c10c84
RW
616 ret = pm_generic_prepare(dev);
617 if (ret) {
618 mutex_lock(&genpd->lock);
619
620 if (--genpd->prepared_count == 0)
621 genpd->suspend_power_off = false;
622
623 mutex_unlock(&genpd->lock);
17b75eca 624 pm_runtime_enable(dev);
b6c10c84 625 }
17b75eca
RW
626
627 pm_runtime_put_sync(dev);
b6c10c84 628 return ret;
596ba34b
RW
629}
630
631/**
632 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
633 * @dev: Device to suspend.
634 *
635 * Suspend a device under the assumption that its pm_domain field points to the
636 * domain member of an object of type struct generic_pm_domain representing
637 * a PM domain consisting of I/O devices.
638 */
639static int pm_genpd_suspend(struct device *dev)
640{
641 struct generic_pm_domain *genpd;
642
643 dev_dbg(dev, "%s()\n", __func__);
644
645 genpd = dev_to_genpd(dev);
646 if (IS_ERR(genpd))
647 return -EINVAL;
648
649 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
650}
651
652/**
653 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
654 * @dev: Device to suspend.
655 *
656 * Carry out a late suspend of a device under the assumption that its
657 * pm_domain field points to the domain member of an object of type
658 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
659 */
660static int pm_genpd_suspend_noirq(struct device *dev)
661{
662 struct generic_pm_domain *genpd;
663 int ret;
664
665 dev_dbg(dev, "%s()\n", __func__);
666
667 genpd = dev_to_genpd(dev);
668 if (IS_ERR(genpd))
669 return -EINVAL;
670
671 if (genpd->suspend_power_off)
672 return 0;
673
674 ret = pm_generic_suspend_noirq(dev);
675 if (ret)
676 return ret;
677
d4f2d87a
RW
678 if (device_may_wakeup(dev)
679 && genpd->active_wakeup && genpd->active_wakeup(dev))
680 return 0;
681
596ba34b
RW
682 if (genpd->stop_device)
683 genpd->stop_device(dev);
684
685 /*
686 * Since all of the "noirq" callbacks are executed sequentially, it is
687 * guaranteed that this function will never run twice in parallel for
688 * the same PM domain, so it is not necessary to use locking here.
689 */
690 genpd->suspended_count++;
691 pm_genpd_sync_poweroff(genpd);
692
693 return 0;
694}
695
696/**
697 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
698 * @dev: Device to resume.
699 *
700 * Carry out an early resume of a device under the assumption that its
701 * pm_domain field points to the domain member of an object of type
702 * struct generic_pm_domain representing a power domain consisting of I/O
703 * devices.
704 */
705static int pm_genpd_resume_noirq(struct device *dev)
706{
707 struct generic_pm_domain *genpd;
708
709 dev_dbg(dev, "%s()\n", __func__);
710
711 genpd = dev_to_genpd(dev);
712 if (IS_ERR(genpd))
713 return -EINVAL;
714
715 if (genpd->suspend_power_off)
716 return 0;
717
718 /*
719 * Since all of the "noirq" callbacks are executed sequentially, it is
720 * guaranteed that this function will never run twice in parallel for
721 * the same PM domain, so it is not necessary to use locking here.
722 */
723 pm_genpd_poweron(genpd);
724 genpd->suspended_count--;
725 if (genpd->start_device)
726 genpd->start_device(dev);
727
728 return pm_generic_resume_noirq(dev);
729}
730
731/**
732 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
733 * @dev: Device to resume.
734 *
735 * Resume a device under the assumption that its pm_domain field points to the
736 * domain member of an object of type struct generic_pm_domain representing
737 * a power domain consisting of I/O devices.
738 */
739static int pm_genpd_resume(struct device *dev)
740{
741 struct generic_pm_domain *genpd;
742
743 dev_dbg(dev, "%s()\n", __func__);
744
745 genpd = dev_to_genpd(dev);
746 if (IS_ERR(genpd))
747 return -EINVAL;
748
749 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
750}
751
752/**
753 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
754 * @dev: Device to freeze.
755 *
756 * Freeze a device under the assumption that its pm_domain field points to the
757 * domain member of an object of type struct generic_pm_domain representing
758 * a power domain consisting of I/O devices.
759 */
760static int pm_genpd_freeze(struct device *dev)
761{
762 struct generic_pm_domain *genpd;
763
764 dev_dbg(dev, "%s()\n", __func__);
765
766 genpd = dev_to_genpd(dev);
767 if (IS_ERR(genpd))
768 return -EINVAL;
769
770 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
771}
772
773/**
774 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
775 * @dev: Device to freeze.
776 *
777 * Carry out a late freeze of a device under the assumption that its
778 * pm_domain field points to the domain member of an object of type
779 * struct generic_pm_domain representing a power domain consisting of I/O
780 * devices.
781 */
782static int pm_genpd_freeze_noirq(struct device *dev)
783{
784 struct generic_pm_domain *genpd;
785 int ret;
786
787 dev_dbg(dev, "%s()\n", __func__);
788
789 genpd = dev_to_genpd(dev);
790 if (IS_ERR(genpd))
791 return -EINVAL;
792
793 if (genpd->suspend_power_off)
794 return 0;
795
796 ret = pm_generic_freeze_noirq(dev);
797 if (ret)
798 return ret;
799
800 if (genpd->stop_device)
801 genpd->stop_device(dev);
802
803 return 0;
804}
805
806/**
807 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
808 * @dev: Device to thaw.
809 *
810 * Carry out an early thaw of a device under the assumption that its
811 * pm_domain field points to the domain member of an object of type
812 * struct generic_pm_domain representing a power domain consisting of I/O
813 * devices.
814 */
815static int pm_genpd_thaw_noirq(struct device *dev)
816{
817 struct generic_pm_domain *genpd;
818
819 dev_dbg(dev, "%s()\n", __func__);
820
821 genpd = dev_to_genpd(dev);
822 if (IS_ERR(genpd))
823 return -EINVAL;
824
825 if (genpd->suspend_power_off)
826 return 0;
827
828 if (genpd->start_device)
829 genpd->start_device(dev);
830
831 return pm_generic_thaw_noirq(dev);
832}
833
834/**
835 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
836 * @dev: Device to thaw.
837 *
838 * Thaw a device under the assumption that its pm_domain field points to the
839 * domain member of an object of type struct generic_pm_domain representing
840 * a power domain consisting of I/O devices.
841 */
842static int pm_genpd_thaw(struct device *dev)
843{
844 struct generic_pm_domain *genpd;
845
846 dev_dbg(dev, "%s()\n", __func__);
847
848 genpd = dev_to_genpd(dev);
849 if (IS_ERR(genpd))
850 return -EINVAL;
851
852 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
853}
854
855/**
856 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
857 * @dev: Device to suspend.
858 *
859 * Power off a device under the assumption that its pm_domain field points to
860 * the domain member of an object of type struct generic_pm_domain representing
861 * a PM domain consisting of I/O devices.
862 */
863static int pm_genpd_dev_poweroff(struct device *dev)
864{
865 struct generic_pm_domain *genpd;
866
867 dev_dbg(dev, "%s()\n", __func__);
868
869 genpd = dev_to_genpd(dev);
870 if (IS_ERR(genpd))
871 return -EINVAL;
872
873 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
874}
875
876/**
877 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
878 * @dev: Device to suspend.
879 *
880 * Carry out a late powering off of a device under the assumption that its
881 * pm_domain field points to the domain member of an object of type
882 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
883 */
884static int pm_genpd_dev_poweroff_noirq(struct device *dev)
885{
886 struct generic_pm_domain *genpd;
887 int ret;
888
889 dev_dbg(dev, "%s()\n", __func__);
890
891 genpd = dev_to_genpd(dev);
892 if (IS_ERR(genpd))
893 return -EINVAL;
894
895 if (genpd->suspend_power_off)
896 return 0;
897
898 ret = pm_generic_poweroff_noirq(dev);
899 if (ret)
900 return ret;
901
d4f2d87a
RW
902 if (device_may_wakeup(dev)
903 && genpd->active_wakeup && genpd->active_wakeup(dev))
904 return 0;
905
596ba34b
RW
906 if (genpd->stop_device)
907 genpd->stop_device(dev);
908
909 /*
910 * Since all of the "noirq" callbacks are executed sequentially, it is
911 * guaranteed that this function will never run twice in parallel for
912 * the same PM domain, so it is not necessary to use locking here.
913 */
914 genpd->suspended_count++;
915 pm_genpd_sync_poweroff(genpd);
916
917 return 0;
918}
919
920/**
921 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
922 * @dev: Device to resume.
923 *
924 * Carry out an early restore of a device under the assumption that its
925 * pm_domain field points to the domain member of an object of type
926 * struct generic_pm_domain representing a power domain consisting of I/O
927 * devices.
928 */
929static int pm_genpd_restore_noirq(struct device *dev)
930{
931 struct generic_pm_domain *genpd;
932
933 dev_dbg(dev, "%s()\n", __func__);
934
935 genpd = dev_to_genpd(dev);
936 if (IS_ERR(genpd))
937 return -EINVAL;
938
939 /*
940 * Since all of the "noirq" callbacks are executed sequentially, it is
941 * guaranteed that this function will never run twice in parallel for
942 * the same PM domain, so it is not necessary to use locking here.
943 */
17b75eca 944 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
945 if (genpd->suspend_power_off) {
946 /*
947 * The boot kernel might put the domain into the power on state,
948 * so make sure it really is powered off.
949 */
950 if (genpd->power_off)
951 genpd->power_off(genpd);
952 return 0;
953 }
954
955 pm_genpd_poweron(genpd);
956 genpd->suspended_count--;
957 if (genpd->start_device)
958 genpd->start_device(dev);
959
960 return pm_generic_restore_noirq(dev);
961}
962
963/**
964 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
965 * @dev: Device to resume.
966 *
967 * Restore a device under the assumption that its pm_domain field points to the
968 * domain member of an object of type struct generic_pm_domain representing
969 * a power domain consisting of I/O devices.
970 */
971static int pm_genpd_restore(struct device *dev)
972{
973 struct generic_pm_domain *genpd;
974
975 dev_dbg(dev, "%s()\n", __func__);
976
977 genpd = dev_to_genpd(dev);
978 if (IS_ERR(genpd))
979 return -EINVAL;
980
981 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
982}
983
984/**
985 * pm_genpd_complete - Complete power transition of a device in a power domain.
986 * @dev: Device to complete the transition of.
987 *
988 * Complete a power transition of a device (during a system-wide power
989 * transition) under the assumption that its pm_domain field points to the
990 * domain member of an object of type struct generic_pm_domain representing
991 * a power domain consisting of I/O devices.
992 */
993static void pm_genpd_complete(struct device *dev)
994{
995 struct generic_pm_domain *genpd;
996 bool run_complete;
997
998 dev_dbg(dev, "%s()\n", __func__);
999
1000 genpd = dev_to_genpd(dev);
1001 if (IS_ERR(genpd))
1002 return;
1003
1004 mutex_lock(&genpd->lock);
1005
1006 run_complete = !genpd->suspend_power_off;
1007 if (--genpd->prepared_count == 0)
1008 genpd->suspend_power_off = false;
1009
1010 mutex_unlock(&genpd->lock);
1011
1012 if (run_complete) {
1013 pm_generic_complete(dev);
6f00ff78 1014 pm_runtime_set_active(dev);
596ba34b 1015 pm_runtime_enable(dev);
6f00ff78 1016 pm_runtime_idle(dev);
596ba34b
RW
1017 }
1018}
1019
1020#else
1021
1022#define pm_genpd_prepare NULL
1023#define pm_genpd_suspend NULL
1024#define pm_genpd_suspend_noirq NULL
1025#define pm_genpd_resume_noirq NULL
1026#define pm_genpd_resume NULL
1027#define pm_genpd_freeze NULL
1028#define pm_genpd_freeze_noirq NULL
1029#define pm_genpd_thaw_noirq NULL
1030#define pm_genpd_thaw NULL
1031#define pm_genpd_dev_poweroff_noirq NULL
1032#define pm_genpd_dev_poweroff NULL
1033#define pm_genpd_restore_noirq NULL
1034#define pm_genpd_restore NULL
1035#define pm_genpd_complete NULL
1036
1037#endif /* CONFIG_PM_SLEEP */
1038
f721889f
RW
1039/**
1040 * pm_genpd_add_device - Add a device to an I/O PM domain.
1041 * @genpd: PM domain to add the device to.
1042 * @dev: Device to be added.
1043 */
1044int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1045{
1046 struct dev_list_entry *dle;
1047 int ret = 0;
1048
1049 dev_dbg(dev, "%s()\n", __func__);
1050
1051 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1052 return -EINVAL;
1053
17b75eca 1054 genpd_acquire_lock(genpd);
f721889f 1055
17b75eca 1056 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1057 ret = -EINVAL;
1058 goto out;
1059 }
1060
596ba34b
RW
1061 if (genpd->prepared_count > 0) {
1062 ret = -EAGAIN;
1063 goto out;
1064 }
1065
f721889f
RW
1066 list_for_each_entry(dle, &genpd->dev_list, node)
1067 if (dle->dev == dev) {
1068 ret = -EINVAL;
1069 goto out;
1070 }
1071
1072 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
1073 if (!dle) {
1074 ret = -ENOMEM;
1075 goto out;
1076 }
1077
1078 dle->dev = dev;
1079 dle->need_restore = false;
1080 list_add_tail(&dle->node, &genpd->dev_list);
596ba34b 1081 genpd->device_count++;
f721889f
RW
1082
1083 spin_lock_irq(&dev->power.lock);
1084 dev->pm_domain = &genpd->domain;
1085 spin_unlock_irq(&dev->power.lock);
1086
1087 out:
17b75eca 1088 genpd_release_lock(genpd);
f721889f
RW
1089
1090 return ret;
1091}
1092
1093/**
1094 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1095 * @genpd: PM domain to remove the device from.
1096 * @dev: Device to be removed.
1097 */
1098int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1099 struct device *dev)
1100{
1101 struct dev_list_entry *dle;
1102 int ret = -EINVAL;
1103
1104 dev_dbg(dev, "%s()\n", __func__);
1105
1106 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1107 return -EINVAL;
1108
17b75eca 1109 genpd_acquire_lock(genpd);
f721889f 1110
596ba34b
RW
1111 if (genpd->prepared_count > 0) {
1112 ret = -EAGAIN;
1113 goto out;
1114 }
1115
f721889f
RW
1116 list_for_each_entry(dle, &genpd->dev_list, node) {
1117 if (dle->dev != dev)
1118 continue;
1119
1120 spin_lock_irq(&dev->power.lock);
1121 dev->pm_domain = NULL;
1122 spin_unlock_irq(&dev->power.lock);
1123
596ba34b 1124 genpd->device_count--;
f721889f
RW
1125 list_del(&dle->node);
1126 kfree(dle);
1127
1128 ret = 0;
1129 break;
1130 }
1131
596ba34b 1132 out:
17b75eca 1133 genpd_release_lock(genpd);
f721889f
RW
1134
1135 return ret;
1136}
1137
1138/**
1139 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1140 * @genpd: Master PM domain to add the subdomain to.
1141 * @new_subdomain: Subdomain to be added.
1142 */
1143int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1144 struct generic_pm_domain *new_subdomain)
1145{
1146 struct generic_pm_domain *subdomain;
1147 int ret = 0;
1148
1149 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1150 return -EINVAL;
1151
17b75eca
RW
1152 start:
1153 genpd_acquire_lock(genpd);
1154 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1155
17b75eca
RW
1156 if (new_subdomain->status != GPD_STATE_POWER_OFF
1157 && new_subdomain->status != GPD_STATE_ACTIVE) {
1158 mutex_unlock(&new_subdomain->lock);
1159 genpd_release_lock(genpd);
1160 goto start;
1161 }
1162
1163 if (genpd->status == GPD_STATE_POWER_OFF
1164 && new_subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1165 ret = -EINVAL;
1166 goto out;
1167 }
1168
1169 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1170 if (subdomain == new_subdomain) {
1171 ret = -EINVAL;
1172 goto out;
1173 }
1174 }
1175
f721889f
RW
1176 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
1177 new_subdomain->parent = genpd;
17b75eca 1178 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1179 genpd_sd_counter_inc(genpd);
f721889f 1180
f721889f 1181 out:
17b75eca
RW
1182 mutex_unlock(&new_subdomain->lock);
1183 genpd_release_lock(genpd);
f721889f
RW
1184
1185 return ret;
1186}
1187
1188/**
1189 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1190 * @genpd: Master PM domain to remove the subdomain from.
1191 * @target: Subdomain to be removed.
1192 */
1193int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1194 struct generic_pm_domain *target)
1195{
1196 struct generic_pm_domain *subdomain;
1197 int ret = -EINVAL;
1198
1199 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
1200 return -EINVAL;
1201
17b75eca
RW
1202 start:
1203 genpd_acquire_lock(genpd);
f721889f
RW
1204
1205 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1206 if (subdomain != target)
1207 continue;
1208
1209 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1210
17b75eca
RW
1211 if (subdomain->status != GPD_STATE_POWER_OFF
1212 && subdomain->status != GPD_STATE_ACTIVE) {
1213 mutex_unlock(&subdomain->lock);
1214 genpd_release_lock(genpd);
1215 goto start;
1216 }
1217
f721889f
RW
1218 list_del(&subdomain->sd_node);
1219 subdomain->parent = NULL;
17b75eca 1220 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1221 genpd_sd_counter_dec(genpd);
1222
1223 mutex_unlock(&subdomain->lock);
1224
1225 ret = 0;
1226 break;
1227 }
1228
17b75eca 1229 genpd_release_lock(genpd);
f721889f
RW
1230
1231 return ret;
1232}
1233
1234/**
1235 * pm_genpd_init - Initialize a generic I/O PM domain object.
1236 * @genpd: PM domain object to initialize.
1237 * @gov: PM domain governor to associate with the domain (may be NULL).
1238 * @is_off: Initial value of the domain's power_is_off field.
1239 */
1240void pm_genpd_init(struct generic_pm_domain *genpd,
1241 struct dev_power_governor *gov, bool is_off)
1242{
1243 if (IS_ERR_OR_NULL(genpd))
1244 return;
1245
1246 INIT_LIST_HEAD(&genpd->sd_node);
1247 genpd->parent = NULL;
1248 INIT_LIST_HEAD(&genpd->dev_list);
1249 INIT_LIST_HEAD(&genpd->sd_list);
1250 mutex_init(&genpd->lock);
1251 genpd->gov = gov;
1252 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1253 genpd->in_progress = 0;
c4bb3160 1254 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1255 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1256 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1257 genpd->poweroff_task = NULL;
1258 genpd->resume_count = 0;
596ba34b
RW
1259 genpd->device_count = 0;
1260 genpd->suspended_count = 0;
f721889f
RW
1261 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1262 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1263 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1264 genpd->domain.ops.prepare = pm_genpd_prepare;
1265 genpd->domain.ops.suspend = pm_genpd_suspend;
1266 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1267 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1268 genpd->domain.ops.resume = pm_genpd_resume;
1269 genpd->domain.ops.freeze = pm_genpd_freeze;
1270 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1271 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1272 genpd->domain.ops.thaw = pm_genpd_thaw;
1273 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1274 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1275 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1276 genpd->domain.ops.restore = pm_genpd_restore;
1277 genpd->domain.ops.complete = pm_genpd_complete;
5125bbf3
RW
1278 mutex_lock(&gpd_list_lock);
1279 list_add(&genpd->gpd_list_node, &gpd_list);
1280 mutex_unlock(&gpd_list_lock);
1281}