]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/base/power/domain.c
PM / Domains: Allow runtime PM callbacks to be re-used during system PM
[mirror_ubuntu-bionic-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22
23 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
24
25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
26 ({ \
27 type (*__routine)(struct device *__d); \
28 type __ret = (type)0; \
29 \
30 __routine = genpd->dev_ops.callback; \
31 if (__routine) { \
32 __ret = __routine(dev); \
33 } \
34 __ret; \
35 })
36
37 static LIST_HEAD(gpd_list);
38 static DEFINE_MUTEX(gpd_list_lock);
39
40 /*
41 * Get the generic PM domain for a particular struct device.
42 * This validates the struct device pointer, the PM domain pointer,
43 * and checks that the PM domain pointer is a real generic PM domain.
44 * Any failure results in NULL being returned.
45 */
46 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
47 {
48 struct generic_pm_domain *genpd = NULL, *gpd;
49
50 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
51 return NULL;
52
53 mutex_lock(&gpd_list_lock);
54 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
55 if (&gpd->domain == dev->pm_domain) {
56 genpd = gpd;
57 break;
58 }
59 }
60 mutex_unlock(&gpd_list_lock);
61
62 return genpd;
63 }
64
65 /*
66 * This should only be used where we are certain that the pm_domain
67 * attached to the device is a genpd domain.
68 */
69 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
70 {
71 if (IS_ERR_OR_NULL(dev->pm_domain))
72 return ERR_PTR(-EINVAL);
73
74 return pd_to_genpd(dev->pm_domain);
75 }
76
77 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
78 {
79 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
80 }
81
82 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
83 {
84 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
85 }
86
87 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
88 {
89 bool ret = false;
90
91 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
92 ret = !!atomic_dec_and_test(&genpd->sd_count);
93
94 return ret;
95 }
96
97 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
98 {
99 atomic_inc(&genpd->sd_count);
100 smp_mb__after_atomic();
101 }
102
103 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
104 {
105 ktime_t time_start;
106 s64 elapsed_ns;
107 int ret;
108
109 if (!genpd->power_on)
110 return 0;
111
112 if (!timed)
113 return genpd->power_on(genpd);
114
115 time_start = ktime_get();
116 ret = genpd->power_on(genpd);
117 if (ret)
118 return ret;
119
120 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
121 if (elapsed_ns <= genpd->power_on_latency_ns)
122 return ret;
123
124 genpd->power_on_latency_ns = elapsed_ns;
125 genpd->max_off_time_changed = true;
126 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
127 genpd->name, "on", elapsed_ns);
128
129 return ret;
130 }
131
132 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
133 {
134 ktime_t time_start;
135 s64 elapsed_ns;
136 int ret;
137
138 if (!genpd->power_off)
139 return 0;
140
141 if (!timed)
142 return genpd->power_off(genpd);
143
144 time_start = ktime_get();
145 ret = genpd->power_off(genpd);
146 if (ret == -EBUSY)
147 return ret;
148
149 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
150 if (elapsed_ns <= genpd->power_off_latency_ns)
151 return ret;
152
153 genpd->power_off_latency_ns = elapsed_ns;
154 genpd->max_off_time_changed = true;
155 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
156 genpd->name, "off", elapsed_ns);
157
158 return ret;
159 }
160
161 /**
162 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
163 * @genpd: PM domait to power off.
164 *
165 * Queue up the execution of genpd_poweroff() unless it's already been done
166 * before.
167 */
168 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
169 {
170 queue_work(pm_wq, &genpd->power_off_work);
171 }
172
173 static int genpd_poweron(struct generic_pm_domain *genpd);
174
175 /**
176 * __genpd_poweron - Restore power to a given PM domain and its masters.
177 * @genpd: PM domain to power up.
178 *
179 * Restore power to @genpd and all of its masters so that it is possible to
180 * resume a device belonging to it.
181 */
182 static int __genpd_poweron(struct generic_pm_domain *genpd)
183 {
184 struct gpd_link *link;
185 int ret = 0;
186
187 if (genpd->status == GPD_STATE_ACTIVE
188 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
189 return 0;
190
191 /*
192 * The list is guaranteed not to change while the loop below is being
193 * executed, unless one of the masters' .power_on() callbacks fiddles
194 * with it.
195 */
196 list_for_each_entry(link, &genpd->slave_links, slave_node) {
197 genpd_sd_counter_inc(link->master);
198
199 ret = genpd_poweron(link->master);
200 if (ret) {
201 genpd_sd_counter_dec(link->master);
202 goto err;
203 }
204 }
205
206 ret = genpd_power_on(genpd, true);
207 if (ret)
208 goto err;
209
210 genpd->status = GPD_STATE_ACTIVE;
211 return 0;
212
213 err:
214 list_for_each_entry_continue_reverse(link,
215 &genpd->slave_links,
216 slave_node) {
217 genpd_sd_counter_dec(link->master);
218 genpd_queue_power_off_work(link->master);
219 }
220
221 return ret;
222 }
223
224 /**
225 * genpd_poweron - Restore power to a given PM domain and its masters.
226 * @genpd: PM domain to power up.
227 */
228 static int genpd_poweron(struct generic_pm_domain *genpd)
229 {
230 int ret;
231
232 mutex_lock(&genpd->lock);
233 ret = __genpd_poweron(genpd);
234 mutex_unlock(&genpd->lock);
235 return ret;
236 }
237
238 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
239 {
240 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
241 }
242
243 static int genpd_restore_dev(struct generic_pm_domain *genpd,
244 struct device *dev)
245 {
246 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
247 }
248
249 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
250 unsigned long val, void *ptr)
251 {
252 struct generic_pm_domain_data *gpd_data;
253 struct device *dev;
254
255 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
256 dev = gpd_data->base.dev;
257
258 for (;;) {
259 struct generic_pm_domain *genpd;
260 struct pm_domain_data *pdd;
261
262 spin_lock_irq(&dev->power.lock);
263
264 pdd = dev->power.subsys_data ?
265 dev->power.subsys_data->domain_data : NULL;
266 if (pdd && pdd->dev) {
267 to_gpd_data(pdd)->td.constraint_changed = true;
268 genpd = dev_to_genpd(dev);
269 } else {
270 genpd = ERR_PTR(-ENODATA);
271 }
272
273 spin_unlock_irq(&dev->power.lock);
274
275 if (!IS_ERR(genpd)) {
276 mutex_lock(&genpd->lock);
277 genpd->max_off_time_changed = true;
278 mutex_unlock(&genpd->lock);
279 }
280
281 dev = dev->parent;
282 if (!dev || dev->power.ignore_children)
283 break;
284 }
285
286 return NOTIFY_DONE;
287 }
288
289 /**
290 * genpd_poweroff - Remove power from a given PM domain.
291 * @genpd: PM domain to power down.
292 * @is_async: PM domain is powered down from a scheduled work
293 *
294 * If all of the @genpd's devices have been suspended and all of its subdomains
295 * have been powered down, remove power from @genpd.
296 */
297 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
298 {
299 struct pm_domain_data *pdd;
300 struct gpd_link *link;
301 unsigned int not_suspended = 0;
302
303 /*
304 * Do not try to power off the domain in the following situations:
305 * (1) The domain is already in the "power off" state.
306 * (2) System suspend is in progress.
307 */
308 if (genpd->status == GPD_STATE_POWER_OFF
309 || genpd->prepared_count > 0)
310 return 0;
311
312 if (atomic_read(&genpd->sd_count) > 0)
313 return -EBUSY;
314
315 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
316 enum pm_qos_flags_status stat;
317
318 stat = dev_pm_qos_flags(pdd->dev,
319 PM_QOS_FLAG_NO_POWER_OFF
320 | PM_QOS_FLAG_REMOTE_WAKEUP);
321 if (stat > PM_QOS_FLAGS_NONE)
322 return -EBUSY;
323
324 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
325 not_suspended++;
326 }
327
328 if (not_suspended > 1 || (not_suspended == 1 && is_async))
329 return -EBUSY;
330
331 if (genpd->gov && genpd->gov->power_down_ok) {
332 if (!genpd->gov->power_down_ok(&genpd->domain))
333 return -EAGAIN;
334 }
335
336 if (genpd->power_off) {
337 int ret;
338
339 if (atomic_read(&genpd->sd_count) > 0)
340 return -EBUSY;
341
342 /*
343 * If sd_count > 0 at this point, one of the subdomains hasn't
344 * managed to call genpd_poweron() for the master yet after
345 * incrementing it. In that case genpd_poweron() will wait
346 * for us to drop the lock, so we can call .power_off() and let
347 * the genpd_poweron() restore power for us (this shouldn't
348 * happen very often).
349 */
350 ret = genpd_power_off(genpd, true);
351 if (ret)
352 return ret;
353 }
354
355 genpd->status = GPD_STATE_POWER_OFF;
356
357 list_for_each_entry(link, &genpd->slave_links, slave_node) {
358 genpd_sd_counter_dec(link->master);
359 genpd_queue_power_off_work(link->master);
360 }
361
362 return 0;
363 }
364
365 /**
366 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
367 * @work: Work structure used for scheduling the execution of this function.
368 */
369 static void genpd_power_off_work_fn(struct work_struct *work)
370 {
371 struct generic_pm_domain *genpd;
372
373 genpd = container_of(work, struct generic_pm_domain, power_off_work);
374
375 mutex_lock(&genpd->lock);
376 genpd_poweroff(genpd, true);
377 mutex_unlock(&genpd->lock);
378 }
379
380 /**
381 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
382 * @dev: Device to suspend.
383 *
384 * Carry out a runtime suspend of a device under the assumption that its
385 * pm_domain field points to the domain member of an object of type
386 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
387 */
388 static int pm_genpd_runtime_suspend(struct device *dev)
389 {
390 struct generic_pm_domain *genpd;
391 bool (*stop_ok)(struct device *__dev);
392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
393 bool runtime_pm = pm_runtime_enabled(dev);
394 ktime_t time_start;
395 s64 elapsed_ns;
396 int ret;
397
398 dev_dbg(dev, "%s()\n", __func__);
399
400 genpd = dev_to_genpd(dev);
401 if (IS_ERR(genpd))
402 return -EINVAL;
403
404 /*
405 * A runtime PM centric subsystem/driver may re-use the runtime PM
406 * callbacks for other purposes than runtime PM. In those scenarios
407 * runtime PM is disabled. Under these circumstances, we shall skip
408 * validating/measuring the PM QoS latency.
409 */
410 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
411 if (runtime_pm && stop_ok && !stop_ok(dev))
412 return -EBUSY;
413
414 /* Measure suspend latency. */
415 if (runtime_pm)
416 time_start = ktime_get();
417
418 ret = genpd_save_dev(genpd, dev);
419 if (ret)
420 return ret;
421
422 ret = genpd_stop_dev(genpd, dev);
423 if (ret) {
424 genpd_restore_dev(genpd, dev);
425 return ret;
426 }
427
428 /* Update suspend latency value if the measured time exceeds it. */
429 if (runtime_pm) {
430 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
431 if (elapsed_ns > td->suspend_latency_ns) {
432 td->suspend_latency_ns = elapsed_ns;
433 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
434 elapsed_ns);
435 genpd->max_off_time_changed = true;
436 td->constraint_changed = true;
437 }
438 }
439
440 /*
441 * If power.irq_safe is set, this routine will be run with interrupts
442 * off, so it can't use mutexes.
443 */
444 if (dev->power.irq_safe)
445 return 0;
446
447 mutex_lock(&genpd->lock);
448 genpd_poweroff(genpd, false);
449 mutex_unlock(&genpd->lock);
450
451 return 0;
452 }
453
454 /**
455 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
456 * @dev: Device to resume.
457 *
458 * Carry out a runtime resume of a device under the assumption that its
459 * pm_domain field points to the domain member of an object of type
460 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
461 */
462 static int pm_genpd_runtime_resume(struct device *dev)
463 {
464 struct generic_pm_domain *genpd;
465 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
466 bool runtime_pm = pm_runtime_enabled(dev);
467 ktime_t time_start;
468 s64 elapsed_ns;
469 int ret;
470 bool timed = true;
471
472 dev_dbg(dev, "%s()\n", __func__);
473
474 genpd = dev_to_genpd(dev);
475 if (IS_ERR(genpd))
476 return -EINVAL;
477
478 /* If power.irq_safe, the PM domain is never powered off. */
479 if (dev->power.irq_safe) {
480 timed = false;
481 goto out;
482 }
483
484 mutex_lock(&genpd->lock);
485 ret = __genpd_poweron(genpd);
486 mutex_unlock(&genpd->lock);
487
488 if (ret)
489 return ret;
490
491 out:
492 /* Measure resume latency. */
493 if (timed && runtime_pm)
494 time_start = ktime_get();
495
496 genpd_start_dev(genpd, dev);
497 genpd_restore_dev(genpd, dev);
498
499 /* Update resume latency value if the measured time exceeds it. */
500 if (timed && runtime_pm) {
501 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
502 if (elapsed_ns > td->resume_latency_ns) {
503 td->resume_latency_ns = elapsed_ns;
504 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
505 elapsed_ns);
506 genpd->max_off_time_changed = true;
507 td->constraint_changed = true;
508 }
509 }
510
511 return 0;
512 }
513
514 static bool pd_ignore_unused;
515 static int __init pd_ignore_unused_setup(char *__unused)
516 {
517 pd_ignore_unused = true;
518 return 1;
519 }
520 __setup("pd_ignore_unused", pd_ignore_unused_setup);
521
522 /**
523 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
524 */
525 static int __init genpd_poweroff_unused(void)
526 {
527 struct generic_pm_domain *genpd;
528
529 if (pd_ignore_unused) {
530 pr_warn("genpd: Not disabling unused power domains\n");
531 return 0;
532 }
533
534 mutex_lock(&gpd_list_lock);
535
536 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
537 genpd_queue_power_off_work(genpd);
538
539 mutex_unlock(&gpd_list_lock);
540
541 return 0;
542 }
543 late_initcall(genpd_poweroff_unused);
544
545 #ifdef CONFIG_PM_SLEEP
546
547 /**
548 * pm_genpd_present - Check if the given PM domain has been initialized.
549 * @genpd: PM domain to check.
550 */
551 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
552 {
553 const struct generic_pm_domain *gpd;
554
555 if (IS_ERR_OR_NULL(genpd))
556 return false;
557
558 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
559 if (gpd == genpd)
560 return true;
561
562 return false;
563 }
564
565 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
566 struct device *dev)
567 {
568 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
569 }
570
571 /**
572 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
573 * @genpd: PM domain to power off, if possible.
574 * @timed: True if latency measurements are allowed.
575 *
576 * Check if the given PM domain can be powered off (during system suspend or
577 * hibernation) and do that if so. Also, in that case propagate to its masters.
578 *
579 * This function is only called in "noirq" and "syscore" stages of system power
580 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
581 * executed sequentially, so it is guaranteed that it will never run twice in
582 * parallel).
583 */
584 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
585 bool timed)
586 {
587 struct gpd_link *link;
588
589 if (genpd->status == GPD_STATE_POWER_OFF)
590 return;
591
592 if (genpd->suspended_count != genpd->device_count
593 || atomic_read(&genpd->sd_count) > 0)
594 return;
595
596 genpd_power_off(genpd, timed);
597
598 genpd->status = GPD_STATE_POWER_OFF;
599
600 list_for_each_entry(link, &genpd->slave_links, slave_node) {
601 genpd_sd_counter_dec(link->master);
602 pm_genpd_sync_poweroff(link->master, timed);
603 }
604 }
605
606 /**
607 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
608 * @genpd: PM domain to power on.
609 * @timed: True if latency measurements are allowed.
610 *
611 * This function is only called in "noirq" and "syscore" stages of system power
612 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
613 * executed sequentially, so it is guaranteed that it will never run twice in
614 * parallel).
615 */
616 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
617 bool timed)
618 {
619 struct gpd_link *link;
620
621 if (genpd->status == GPD_STATE_ACTIVE)
622 return;
623
624 list_for_each_entry(link, &genpd->slave_links, slave_node) {
625 pm_genpd_sync_poweron(link->master, timed);
626 genpd_sd_counter_inc(link->master);
627 }
628
629 genpd_power_on(genpd, timed);
630
631 genpd->status = GPD_STATE_ACTIVE;
632 }
633
634 /**
635 * resume_needed - Check whether to resume a device before system suspend.
636 * @dev: Device to check.
637 * @genpd: PM domain the device belongs to.
638 *
639 * There are two cases in which a device that can wake up the system from sleep
640 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
641 * to wake up the system and it has to remain active for this purpose while the
642 * system is in the sleep state and (2) if the device is not enabled to wake up
643 * the system from sleep states and it generally doesn't generate wakeup signals
644 * by itself (those signals are generated on its behalf by other parts of the
645 * system). In the latter case it may be necessary to reconfigure the device's
646 * wakeup settings during system suspend, because it may have been set up to
647 * signal remote wakeup from the system's working state as needed by runtime PM.
648 * Return 'true' in either of the above cases.
649 */
650 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
651 {
652 bool active_wakeup;
653
654 if (!device_can_wakeup(dev))
655 return false;
656
657 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
658 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
659 }
660
661 /**
662 * pm_genpd_prepare - Start power transition of a device in a PM domain.
663 * @dev: Device to start the transition of.
664 *
665 * Start a power transition of a device (during a system-wide power transition)
666 * under the assumption that its pm_domain field points to the domain member of
667 * an object of type struct generic_pm_domain representing a PM domain
668 * consisting of I/O devices.
669 */
670 static int pm_genpd_prepare(struct device *dev)
671 {
672 struct generic_pm_domain *genpd;
673 int ret;
674
675 dev_dbg(dev, "%s()\n", __func__);
676
677 genpd = dev_to_genpd(dev);
678 if (IS_ERR(genpd))
679 return -EINVAL;
680
681 /*
682 * If a wakeup request is pending for the device, it should be woken up
683 * at this point and a system wakeup event should be reported if it's
684 * set up to wake up the system from sleep states.
685 */
686 pm_runtime_get_noresume(dev);
687 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
688 pm_wakeup_event(dev, 0);
689
690 if (pm_wakeup_pending()) {
691 pm_runtime_put(dev);
692 return -EBUSY;
693 }
694
695 if (resume_needed(dev, genpd))
696 pm_runtime_resume(dev);
697
698 mutex_lock(&genpd->lock);
699
700 if (genpd->prepared_count++ == 0) {
701 genpd->suspended_count = 0;
702 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
703 }
704
705 mutex_unlock(&genpd->lock);
706
707 if (genpd->suspend_power_off) {
708 pm_runtime_put_noidle(dev);
709 return 0;
710 }
711
712 /*
713 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
714 * so genpd_poweron() will return immediately, but if the device
715 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
716 * to make it operational.
717 */
718 pm_runtime_resume(dev);
719 __pm_runtime_disable(dev, false);
720
721 ret = pm_generic_prepare(dev);
722 if (ret) {
723 mutex_lock(&genpd->lock);
724
725 if (--genpd->prepared_count == 0)
726 genpd->suspend_power_off = false;
727
728 mutex_unlock(&genpd->lock);
729 pm_runtime_enable(dev);
730 }
731
732 pm_runtime_put(dev);
733 return ret;
734 }
735
736 /**
737 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
738 * @dev: Device to suspend.
739 *
740 * Suspend a device under the assumption that its pm_domain field points to the
741 * domain member of an object of type struct generic_pm_domain representing
742 * a PM domain consisting of I/O devices.
743 */
744 static int pm_genpd_suspend(struct device *dev)
745 {
746 struct generic_pm_domain *genpd;
747
748 dev_dbg(dev, "%s()\n", __func__);
749
750 genpd = dev_to_genpd(dev);
751 if (IS_ERR(genpd))
752 return -EINVAL;
753
754 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
755 }
756
757 /**
758 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
759 * @dev: Device to suspend.
760 *
761 * Carry out a late suspend of a device under the assumption that its
762 * pm_domain field points to the domain member of an object of type
763 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
764 */
765 static int pm_genpd_suspend_late(struct device *dev)
766 {
767 struct generic_pm_domain *genpd;
768
769 dev_dbg(dev, "%s()\n", __func__);
770
771 genpd = dev_to_genpd(dev);
772 if (IS_ERR(genpd))
773 return -EINVAL;
774
775 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
776 }
777
778 /**
779 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
780 * @dev: Device to suspend.
781 *
782 * Stop the device and remove power from the domain if all devices in it have
783 * been stopped.
784 */
785 static int pm_genpd_suspend_noirq(struct device *dev)
786 {
787 struct generic_pm_domain *genpd;
788
789 dev_dbg(dev, "%s()\n", __func__);
790
791 genpd = dev_to_genpd(dev);
792 if (IS_ERR(genpd))
793 return -EINVAL;
794
795 if (genpd->suspend_power_off
796 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
797 return 0;
798
799 genpd_stop_dev(genpd, dev);
800
801 /*
802 * Since all of the "noirq" callbacks are executed sequentially, it is
803 * guaranteed that this function will never run twice in parallel for
804 * the same PM domain, so it is not necessary to use locking here.
805 */
806 genpd->suspended_count++;
807 pm_genpd_sync_poweroff(genpd, true);
808
809 return 0;
810 }
811
812 /**
813 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
814 * @dev: Device to resume.
815 *
816 * Restore power to the device's PM domain, if necessary, and start the device.
817 */
818 static int pm_genpd_resume_noirq(struct device *dev)
819 {
820 struct generic_pm_domain *genpd;
821
822 dev_dbg(dev, "%s()\n", __func__);
823
824 genpd = dev_to_genpd(dev);
825 if (IS_ERR(genpd))
826 return -EINVAL;
827
828 if (genpd->suspend_power_off
829 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
830 return 0;
831
832 /*
833 * Since all of the "noirq" callbacks are executed sequentially, it is
834 * guaranteed that this function will never run twice in parallel for
835 * the same PM domain, so it is not necessary to use locking here.
836 */
837 pm_genpd_sync_poweron(genpd, true);
838 genpd->suspended_count--;
839
840 return genpd_start_dev(genpd, dev);
841 }
842
843 /**
844 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
845 * @dev: Device to resume.
846 *
847 * Carry out an early resume of a device under the assumption that its
848 * pm_domain field points to the domain member of an object of type
849 * struct generic_pm_domain representing a power domain consisting of I/O
850 * devices.
851 */
852 static int pm_genpd_resume_early(struct device *dev)
853 {
854 struct generic_pm_domain *genpd;
855
856 dev_dbg(dev, "%s()\n", __func__);
857
858 genpd = dev_to_genpd(dev);
859 if (IS_ERR(genpd))
860 return -EINVAL;
861
862 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
863 }
864
865 /**
866 * pm_genpd_resume - Resume of device in an I/O PM domain.
867 * @dev: Device to resume.
868 *
869 * Resume a device under the assumption that its pm_domain field points to the
870 * domain member of an object of type struct generic_pm_domain representing
871 * a power domain consisting of I/O devices.
872 */
873 static int pm_genpd_resume(struct device *dev)
874 {
875 struct generic_pm_domain *genpd;
876
877 dev_dbg(dev, "%s()\n", __func__);
878
879 genpd = dev_to_genpd(dev);
880 if (IS_ERR(genpd))
881 return -EINVAL;
882
883 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
884 }
885
886 /**
887 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
888 * @dev: Device to freeze.
889 *
890 * Freeze a device under the assumption that its pm_domain field points to the
891 * domain member of an object of type struct generic_pm_domain representing
892 * a power domain consisting of I/O devices.
893 */
894 static int pm_genpd_freeze(struct device *dev)
895 {
896 struct generic_pm_domain *genpd;
897
898 dev_dbg(dev, "%s()\n", __func__);
899
900 genpd = dev_to_genpd(dev);
901 if (IS_ERR(genpd))
902 return -EINVAL;
903
904 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
905 }
906
907 /**
908 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
909 * @dev: Device to freeze.
910 *
911 * Carry out a late freeze of a device under the assumption that its
912 * pm_domain field points to the domain member of an object of type
913 * struct generic_pm_domain representing a power domain consisting of I/O
914 * devices.
915 */
916 static int pm_genpd_freeze_late(struct device *dev)
917 {
918 struct generic_pm_domain *genpd;
919
920 dev_dbg(dev, "%s()\n", __func__);
921
922 genpd = dev_to_genpd(dev);
923 if (IS_ERR(genpd))
924 return -EINVAL;
925
926 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
927 }
928
929 /**
930 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
931 * @dev: Device to freeze.
932 *
933 * Carry out a late freeze of a device under the assumption that its
934 * pm_domain field points to the domain member of an object of type
935 * struct generic_pm_domain representing a power domain consisting of I/O
936 * devices.
937 */
938 static int pm_genpd_freeze_noirq(struct device *dev)
939 {
940 struct generic_pm_domain *genpd;
941
942 dev_dbg(dev, "%s()\n", __func__);
943
944 genpd = dev_to_genpd(dev);
945 if (IS_ERR(genpd))
946 return -EINVAL;
947
948 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
949 }
950
951 /**
952 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
953 * @dev: Device to thaw.
954 *
955 * Start the device, unless power has been removed from the domain already
956 * before the system transition.
957 */
958 static int pm_genpd_thaw_noirq(struct device *dev)
959 {
960 struct generic_pm_domain *genpd;
961
962 dev_dbg(dev, "%s()\n", __func__);
963
964 genpd = dev_to_genpd(dev);
965 if (IS_ERR(genpd))
966 return -EINVAL;
967
968 return genpd->suspend_power_off ?
969 0 : genpd_start_dev(genpd, dev);
970 }
971
972 /**
973 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
974 * @dev: Device to thaw.
975 *
976 * Carry out an early thaw of a device under the assumption that its
977 * pm_domain field points to the domain member of an object of type
978 * struct generic_pm_domain representing a power domain consisting of I/O
979 * devices.
980 */
981 static int pm_genpd_thaw_early(struct device *dev)
982 {
983 struct generic_pm_domain *genpd;
984
985 dev_dbg(dev, "%s()\n", __func__);
986
987 genpd = dev_to_genpd(dev);
988 if (IS_ERR(genpd))
989 return -EINVAL;
990
991 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
992 }
993
994 /**
995 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
996 * @dev: Device to thaw.
997 *
998 * Thaw a device under the assumption that its pm_domain field points to the
999 * domain member of an object of type struct generic_pm_domain representing
1000 * a power domain consisting of I/O devices.
1001 */
1002 static int pm_genpd_thaw(struct device *dev)
1003 {
1004 struct generic_pm_domain *genpd;
1005
1006 dev_dbg(dev, "%s()\n", __func__);
1007
1008 genpd = dev_to_genpd(dev);
1009 if (IS_ERR(genpd))
1010 return -EINVAL;
1011
1012 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1013 }
1014
1015 /**
1016 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1017 * @dev: Device to resume.
1018 *
1019 * Make sure the domain will be in the same power state as before the
1020 * hibernation the system is resuming from and start the device if necessary.
1021 */
1022 static int pm_genpd_restore_noirq(struct device *dev)
1023 {
1024 struct generic_pm_domain *genpd;
1025
1026 dev_dbg(dev, "%s()\n", __func__);
1027
1028 genpd = dev_to_genpd(dev);
1029 if (IS_ERR(genpd))
1030 return -EINVAL;
1031
1032 /*
1033 * Since all of the "noirq" callbacks are executed sequentially, it is
1034 * guaranteed that this function will never run twice in parallel for
1035 * the same PM domain, so it is not necessary to use locking here.
1036 *
1037 * At this point suspended_count == 0 means we are being run for the
1038 * first time for the given domain in the present cycle.
1039 */
1040 if (genpd->suspended_count++ == 0) {
1041 /*
1042 * The boot kernel might put the domain into arbitrary state,
1043 * so make it appear as powered off to pm_genpd_sync_poweron(),
1044 * so that it tries to power it on in case it was really off.
1045 */
1046 genpd->status = GPD_STATE_POWER_OFF;
1047 if (genpd->suspend_power_off) {
1048 /*
1049 * If the domain was off before the hibernation, make
1050 * sure it will be off going forward.
1051 */
1052 genpd_power_off(genpd, true);
1053
1054 return 0;
1055 }
1056 }
1057
1058 if (genpd->suspend_power_off)
1059 return 0;
1060
1061 pm_genpd_sync_poweron(genpd, true);
1062
1063 return genpd_start_dev(genpd, dev);
1064 }
1065
1066 /**
1067 * pm_genpd_complete - Complete power transition of a device in a power domain.
1068 * @dev: Device to complete the transition of.
1069 *
1070 * Complete a power transition of a device (during a system-wide power
1071 * transition) under the assumption that its pm_domain field points to the
1072 * domain member of an object of type struct generic_pm_domain representing
1073 * a power domain consisting of I/O devices.
1074 */
1075 static void pm_genpd_complete(struct device *dev)
1076 {
1077 struct generic_pm_domain *genpd;
1078 bool run_complete;
1079
1080 dev_dbg(dev, "%s()\n", __func__);
1081
1082 genpd = dev_to_genpd(dev);
1083 if (IS_ERR(genpd))
1084 return;
1085
1086 mutex_lock(&genpd->lock);
1087
1088 run_complete = !genpd->suspend_power_off;
1089 if (--genpd->prepared_count == 0)
1090 genpd->suspend_power_off = false;
1091
1092 mutex_unlock(&genpd->lock);
1093
1094 if (run_complete) {
1095 pm_generic_complete(dev);
1096 pm_runtime_set_active(dev);
1097 pm_runtime_enable(dev);
1098 pm_request_idle(dev);
1099 }
1100 }
1101
1102 /**
1103 * genpd_syscore_switch - Switch power during system core suspend or resume.
1104 * @dev: Device that normally is marked as "always on" to switch power for.
1105 *
1106 * This routine may only be called during the system core (syscore) suspend or
1107 * resume phase for devices whose "always on" flags are set.
1108 */
1109 static void genpd_syscore_switch(struct device *dev, bool suspend)
1110 {
1111 struct generic_pm_domain *genpd;
1112
1113 genpd = dev_to_genpd(dev);
1114 if (!pm_genpd_present(genpd))
1115 return;
1116
1117 if (suspend) {
1118 genpd->suspended_count++;
1119 pm_genpd_sync_poweroff(genpd, false);
1120 } else {
1121 pm_genpd_sync_poweron(genpd, false);
1122 genpd->suspended_count--;
1123 }
1124 }
1125
1126 void pm_genpd_syscore_poweroff(struct device *dev)
1127 {
1128 genpd_syscore_switch(dev, true);
1129 }
1130 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1131
1132 void pm_genpd_syscore_poweron(struct device *dev)
1133 {
1134 genpd_syscore_switch(dev, false);
1135 }
1136 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1137
1138 #else /* !CONFIG_PM_SLEEP */
1139
1140 #define pm_genpd_prepare NULL
1141 #define pm_genpd_suspend NULL
1142 #define pm_genpd_suspend_late NULL
1143 #define pm_genpd_suspend_noirq NULL
1144 #define pm_genpd_resume_early NULL
1145 #define pm_genpd_resume_noirq NULL
1146 #define pm_genpd_resume NULL
1147 #define pm_genpd_freeze NULL
1148 #define pm_genpd_freeze_late NULL
1149 #define pm_genpd_freeze_noirq NULL
1150 #define pm_genpd_thaw_early NULL
1151 #define pm_genpd_thaw_noirq NULL
1152 #define pm_genpd_thaw NULL
1153 #define pm_genpd_restore_noirq NULL
1154 #define pm_genpd_complete NULL
1155
1156 #endif /* CONFIG_PM_SLEEP */
1157
1158 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1159 struct generic_pm_domain *genpd,
1160 struct gpd_timing_data *td)
1161 {
1162 struct generic_pm_domain_data *gpd_data;
1163 int ret;
1164
1165 ret = dev_pm_get_subsys_data(dev);
1166 if (ret)
1167 return ERR_PTR(ret);
1168
1169 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1170 if (!gpd_data) {
1171 ret = -ENOMEM;
1172 goto err_put;
1173 }
1174
1175 if (td)
1176 gpd_data->td = *td;
1177
1178 gpd_data->base.dev = dev;
1179 gpd_data->td.constraint_changed = true;
1180 gpd_data->td.effective_constraint_ns = -1;
1181 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1182
1183 spin_lock_irq(&dev->power.lock);
1184
1185 if (dev->power.subsys_data->domain_data) {
1186 ret = -EINVAL;
1187 goto err_free;
1188 }
1189
1190 dev->power.subsys_data->domain_data = &gpd_data->base;
1191 dev->pm_domain = &genpd->domain;
1192
1193 spin_unlock_irq(&dev->power.lock);
1194
1195 return gpd_data;
1196
1197 err_free:
1198 spin_unlock_irq(&dev->power.lock);
1199 kfree(gpd_data);
1200 err_put:
1201 dev_pm_put_subsys_data(dev);
1202 return ERR_PTR(ret);
1203 }
1204
1205 static void genpd_free_dev_data(struct device *dev,
1206 struct generic_pm_domain_data *gpd_data)
1207 {
1208 spin_lock_irq(&dev->power.lock);
1209
1210 dev->pm_domain = NULL;
1211 dev->power.subsys_data->domain_data = NULL;
1212
1213 spin_unlock_irq(&dev->power.lock);
1214
1215 kfree(gpd_data);
1216 dev_pm_put_subsys_data(dev);
1217 }
1218
1219 /**
1220 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1221 * @genpd: PM domain to add the device to.
1222 * @dev: Device to be added.
1223 * @td: Set of PM QoS timing parameters to attach to the device.
1224 */
1225 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1226 struct gpd_timing_data *td)
1227 {
1228 struct generic_pm_domain_data *gpd_data;
1229 int ret = 0;
1230
1231 dev_dbg(dev, "%s()\n", __func__);
1232
1233 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1234 return -EINVAL;
1235
1236 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1237 if (IS_ERR(gpd_data))
1238 return PTR_ERR(gpd_data);
1239
1240 mutex_lock(&genpd->lock);
1241
1242 if (genpd->prepared_count > 0) {
1243 ret = -EAGAIN;
1244 goto out;
1245 }
1246
1247 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1248 if (ret)
1249 goto out;
1250
1251 genpd->device_count++;
1252 genpd->max_off_time_changed = true;
1253
1254 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1255
1256 out:
1257 mutex_unlock(&genpd->lock);
1258
1259 if (ret)
1260 genpd_free_dev_data(dev, gpd_data);
1261 else
1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263
1264 return ret;
1265 }
1266
1267 /**
1268 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1269 * @genpd: PM domain to remove the device from.
1270 * @dev: Device to be removed.
1271 */
1272 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1273 struct device *dev)
1274 {
1275 struct generic_pm_domain_data *gpd_data;
1276 struct pm_domain_data *pdd;
1277 int ret = 0;
1278
1279 dev_dbg(dev, "%s()\n", __func__);
1280
1281 if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1282 return -EINVAL;
1283
1284 /* The above validation also means we have existing domain_data. */
1285 pdd = dev->power.subsys_data->domain_data;
1286 gpd_data = to_gpd_data(pdd);
1287 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1288
1289 mutex_lock(&genpd->lock);
1290
1291 if (genpd->prepared_count > 0) {
1292 ret = -EAGAIN;
1293 goto out;
1294 }
1295
1296 genpd->device_count--;
1297 genpd->max_off_time_changed = true;
1298
1299 if (genpd->detach_dev)
1300 genpd->detach_dev(genpd, dev);
1301
1302 list_del_init(&pdd->list_node);
1303
1304 mutex_unlock(&genpd->lock);
1305
1306 genpd_free_dev_data(dev, gpd_data);
1307
1308 return 0;
1309
1310 out:
1311 mutex_unlock(&genpd->lock);
1312 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1313
1314 return ret;
1315 }
1316
1317 /**
1318 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1319 * @genpd: Master PM domain to add the subdomain to.
1320 * @subdomain: Subdomain to be added.
1321 */
1322 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1323 struct generic_pm_domain *subdomain)
1324 {
1325 struct gpd_link *link, *itr;
1326 int ret = 0;
1327
1328 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1329 || genpd == subdomain)
1330 return -EINVAL;
1331
1332 link = kzalloc(sizeof(*link), GFP_KERNEL);
1333 if (!link)
1334 return -ENOMEM;
1335
1336 mutex_lock(&genpd->lock);
1337 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1338
1339 if (genpd->status == GPD_STATE_POWER_OFF
1340 && subdomain->status != GPD_STATE_POWER_OFF) {
1341 ret = -EINVAL;
1342 goto out;
1343 }
1344
1345 list_for_each_entry(itr, &genpd->master_links, master_node) {
1346 if (itr->slave == subdomain && itr->master == genpd) {
1347 ret = -EINVAL;
1348 goto out;
1349 }
1350 }
1351
1352 link->master = genpd;
1353 list_add_tail(&link->master_node, &genpd->master_links);
1354 link->slave = subdomain;
1355 list_add_tail(&link->slave_node, &subdomain->slave_links);
1356 if (subdomain->status != GPD_STATE_POWER_OFF)
1357 genpd_sd_counter_inc(genpd);
1358
1359 out:
1360 mutex_unlock(&subdomain->lock);
1361 mutex_unlock(&genpd->lock);
1362 if (ret)
1363 kfree(link);
1364 return ret;
1365 }
1366
1367 /**
1368 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1369 * @genpd: Master PM domain to remove the subdomain from.
1370 * @subdomain: Subdomain to be removed.
1371 */
1372 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1373 struct generic_pm_domain *subdomain)
1374 {
1375 struct gpd_link *link;
1376 int ret = -EINVAL;
1377
1378 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1379 return -EINVAL;
1380
1381 mutex_lock(&genpd->lock);
1382
1383 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1384 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1385 subdomain->name);
1386 ret = -EBUSY;
1387 goto out;
1388 }
1389
1390 list_for_each_entry(link, &genpd->master_links, master_node) {
1391 if (link->slave != subdomain)
1392 continue;
1393
1394 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1395
1396 list_del(&link->master_node);
1397 list_del(&link->slave_node);
1398 kfree(link);
1399 if (subdomain->status != GPD_STATE_POWER_OFF)
1400 genpd_sd_counter_dec(genpd);
1401
1402 mutex_unlock(&subdomain->lock);
1403
1404 ret = 0;
1405 break;
1406 }
1407
1408 out:
1409 mutex_unlock(&genpd->lock);
1410
1411 return ret;
1412 }
1413
1414 /* Default device callbacks for generic PM domains. */
1415
1416 /**
1417 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1418 * @dev: Device to handle.
1419 */
1420 static int pm_genpd_default_save_state(struct device *dev)
1421 {
1422 int (*cb)(struct device *__dev);
1423
1424 if (dev->type && dev->type->pm)
1425 cb = dev->type->pm->runtime_suspend;
1426 else if (dev->class && dev->class->pm)
1427 cb = dev->class->pm->runtime_suspend;
1428 else if (dev->bus && dev->bus->pm)
1429 cb = dev->bus->pm->runtime_suspend;
1430 else
1431 cb = NULL;
1432
1433 if (!cb && dev->driver && dev->driver->pm)
1434 cb = dev->driver->pm->runtime_suspend;
1435
1436 return cb ? cb(dev) : 0;
1437 }
1438
1439 /**
1440 * pm_genpd_default_restore_state - Default PM domains "restore device state".
1441 * @dev: Device to handle.
1442 */
1443 static int pm_genpd_default_restore_state(struct device *dev)
1444 {
1445 int (*cb)(struct device *__dev);
1446
1447 if (dev->type && dev->type->pm)
1448 cb = dev->type->pm->runtime_resume;
1449 else if (dev->class && dev->class->pm)
1450 cb = dev->class->pm->runtime_resume;
1451 else if (dev->bus && dev->bus->pm)
1452 cb = dev->bus->pm->runtime_resume;
1453 else
1454 cb = NULL;
1455
1456 if (!cb && dev->driver && dev->driver->pm)
1457 cb = dev->driver->pm->runtime_resume;
1458
1459 return cb ? cb(dev) : 0;
1460 }
1461
1462 /**
1463 * pm_genpd_init - Initialize a generic I/O PM domain object.
1464 * @genpd: PM domain object to initialize.
1465 * @gov: PM domain governor to associate with the domain (may be NULL).
1466 * @is_off: Initial value of the domain's power_is_off field.
1467 */
1468 void pm_genpd_init(struct generic_pm_domain *genpd,
1469 struct dev_power_governor *gov, bool is_off)
1470 {
1471 if (IS_ERR_OR_NULL(genpd))
1472 return;
1473
1474 INIT_LIST_HEAD(&genpd->master_links);
1475 INIT_LIST_HEAD(&genpd->slave_links);
1476 INIT_LIST_HEAD(&genpd->dev_list);
1477 mutex_init(&genpd->lock);
1478 genpd->gov = gov;
1479 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1480 atomic_set(&genpd->sd_count, 0);
1481 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1482 genpd->device_count = 0;
1483 genpd->max_off_time_ns = -1;
1484 genpd->max_off_time_changed = true;
1485 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1486 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1487 genpd->domain.ops.prepare = pm_genpd_prepare;
1488 genpd->domain.ops.suspend = pm_genpd_suspend;
1489 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1490 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1491 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1492 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1493 genpd->domain.ops.resume = pm_genpd_resume;
1494 genpd->domain.ops.freeze = pm_genpd_freeze;
1495 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1496 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1497 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1498 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1499 genpd->domain.ops.thaw = pm_genpd_thaw;
1500 genpd->domain.ops.poweroff = pm_genpd_suspend;
1501 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1502 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1503 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1504 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1505 genpd->domain.ops.restore = pm_genpd_resume;
1506 genpd->domain.ops.complete = pm_genpd_complete;
1507 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1508 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1509
1510 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1511 genpd->dev_ops.stop = pm_clk_suspend;
1512 genpd->dev_ops.start = pm_clk_resume;
1513 }
1514
1515 mutex_lock(&gpd_list_lock);
1516 list_add(&genpd->gpd_list_node, &gpd_list);
1517 mutex_unlock(&gpd_list_lock);
1518 }
1519 EXPORT_SYMBOL_GPL(pm_genpd_init);
1520
1521 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1522 /*
1523 * Device Tree based PM domain providers.
1524 *
1525 * The code below implements generic device tree based PM domain providers that
1526 * bind device tree nodes with generic PM domains registered in the system.
1527 *
1528 * Any driver that registers generic PM domains and needs to support binding of
1529 * devices to these domains is supposed to register a PM domain provider, which
1530 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1531 *
1532 * Two simple mapping functions have been provided for convenience:
1533 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1534 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1535 * index.
1536 */
1537
1538 /**
1539 * struct of_genpd_provider - PM domain provider registration structure
1540 * @link: Entry in global list of PM domain providers
1541 * @node: Pointer to device tree node of PM domain provider
1542 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1543 * into a PM domain.
1544 * @data: context pointer to be passed into @xlate callback
1545 */
1546 struct of_genpd_provider {
1547 struct list_head link;
1548 struct device_node *node;
1549 genpd_xlate_t xlate;
1550 void *data;
1551 };
1552
1553 /* List of registered PM domain providers. */
1554 static LIST_HEAD(of_genpd_providers);
1555 /* Mutex to protect the list above. */
1556 static DEFINE_MUTEX(of_genpd_mutex);
1557
1558 /**
1559 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1560 * @genpdspec: OF phandle args to map into a PM domain
1561 * @data: xlate function private data - pointer to struct generic_pm_domain
1562 *
1563 * This is a generic xlate function that can be used to model PM domains that
1564 * have their own device tree nodes. The private data of xlate function needs
1565 * to be a valid pointer to struct generic_pm_domain.
1566 */
1567 struct generic_pm_domain *__of_genpd_xlate_simple(
1568 struct of_phandle_args *genpdspec,
1569 void *data)
1570 {
1571 if (genpdspec->args_count != 0)
1572 return ERR_PTR(-EINVAL);
1573 return data;
1574 }
1575 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1576
1577 /**
1578 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1579 * @genpdspec: OF phandle args to map into a PM domain
1580 * @data: xlate function private data - pointer to struct genpd_onecell_data
1581 *
1582 * This is a generic xlate function that can be used to model simple PM domain
1583 * controllers that have one device tree node and provide multiple PM domains.
1584 * A single cell is used as an index into an array of PM domains specified in
1585 * the genpd_onecell_data struct when registering the provider.
1586 */
1587 struct generic_pm_domain *__of_genpd_xlate_onecell(
1588 struct of_phandle_args *genpdspec,
1589 void *data)
1590 {
1591 struct genpd_onecell_data *genpd_data = data;
1592 unsigned int idx = genpdspec->args[0];
1593
1594 if (genpdspec->args_count != 1)
1595 return ERR_PTR(-EINVAL);
1596
1597 if (idx >= genpd_data->num_domains) {
1598 pr_err("%s: invalid domain index %u\n", __func__, idx);
1599 return ERR_PTR(-EINVAL);
1600 }
1601
1602 if (!genpd_data->domains[idx])
1603 return ERR_PTR(-ENOENT);
1604
1605 return genpd_data->domains[idx];
1606 }
1607 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1608
1609 /**
1610 * __of_genpd_add_provider() - Register a PM domain provider for a node
1611 * @np: Device node pointer associated with the PM domain provider.
1612 * @xlate: Callback for decoding PM domain from phandle arguments.
1613 * @data: Context pointer for @xlate callback.
1614 */
1615 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1616 void *data)
1617 {
1618 struct of_genpd_provider *cp;
1619
1620 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1621 if (!cp)
1622 return -ENOMEM;
1623
1624 cp->node = of_node_get(np);
1625 cp->data = data;
1626 cp->xlate = xlate;
1627
1628 mutex_lock(&of_genpd_mutex);
1629 list_add(&cp->link, &of_genpd_providers);
1630 mutex_unlock(&of_genpd_mutex);
1631 pr_debug("Added domain provider from %s\n", np->full_name);
1632
1633 return 0;
1634 }
1635 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
1636
1637 /**
1638 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1639 * @np: Device node pointer associated with the PM domain provider
1640 */
1641 void of_genpd_del_provider(struct device_node *np)
1642 {
1643 struct of_genpd_provider *cp;
1644
1645 mutex_lock(&of_genpd_mutex);
1646 list_for_each_entry(cp, &of_genpd_providers, link) {
1647 if (cp->node == np) {
1648 list_del(&cp->link);
1649 of_node_put(cp->node);
1650 kfree(cp);
1651 break;
1652 }
1653 }
1654 mutex_unlock(&of_genpd_mutex);
1655 }
1656 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1657
1658 /**
1659 * of_genpd_get_from_provider() - Look-up PM domain
1660 * @genpdspec: OF phandle args to use for look-up
1661 *
1662 * Looks for a PM domain provider under the node specified by @genpdspec and if
1663 * found, uses xlate function of the provider to map phandle args to a PM
1664 * domain.
1665 *
1666 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1667 * on failure.
1668 */
1669 struct generic_pm_domain *of_genpd_get_from_provider(
1670 struct of_phandle_args *genpdspec)
1671 {
1672 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1673 struct of_genpd_provider *provider;
1674
1675 mutex_lock(&of_genpd_mutex);
1676
1677 /* Check if we have such a provider in our array */
1678 list_for_each_entry(provider, &of_genpd_providers, link) {
1679 if (provider->node == genpdspec->np)
1680 genpd = provider->xlate(genpdspec, provider->data);
1681 if (!IS_ERR(genpd))
1682 break;
1683 }
1684
1685 mutex_unlock(&of_genpd_mutex);
1686
1687 return genpd;
1688 }
1689 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1690
1691 /**
1692 * genpd_dev_pm_detach - Detach a device from its PM domain.
1693 * @dev: Device to detach.
1694 * @power_off: Currently not used
1695 *
1696 * Try to locate a corresponding generic PM domain, which the device was
1697 * attached to previously. If such is found, the device is detached from it.
1698 */
1699 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1700 {
1701 struct generic_pm_domain *pd;
1702 unsigned int i;
1703 int ret = 0;
1704
1705 pd = pm_genpd_lookup_dev(dev);
1706 if (!pd)
1707 return;
1708
1709 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1710
1711 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1712 ret = pm_genpd_remove_device(pd, dev);
1713 if (ret != -EAGAIN)
1714 break;
1715
1716 mdelay(i);
1717 cond_resched();
1718 }
1719
1720 if (ret < 0) {
1721 dev_err(dev, "failed to remove from PM domain %s: %d",
1722 pd->name, ret);
1723 return;
1724 }
1725
1726 /* Check if PM domain can be powered off after removing this device. */
1727 genpd_queue_power_off_work(pd);
1728 }
1729
1730 static void genpd_dev_pm_sync(struct device *dev)
1731 {
1732 struct generic_pm_domain *pd;
1733
1734 pd = dev_to_genpd(dev);
1735 if (IS_ERR(pd))
1736 return;
1737
1738 genpd_queue_power_off_work(pd);
1739 }
1740
1741 /**
1742 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1743 * @dev: Device to attach.
1744 *
1745 * Parse device's OF node to find a PM domain specifier. If such is found,
1746 * attaches the device to retrieved pm_domain ops.
1747 *
1748 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1749 * backwards compatibility with existing DTBs.
1750 *
1751 * Returns 0 on successfully attached PM domain or negative error code. Note
1752 * that if a power-domain exists for the device, but it cannot be found or
1753 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1754 * probed and to re-try again later.
1755 */
1756 int genpd_dev_pm_attach(struct device *dev)
1757 {
1758 struct of_phandle_args pd_args;
1759 struct generic_pm_domain *pd;
1760 unsigned int i;
1761 int ret;
1762
1763 if (!dev->of_node)
1764 return -ENODEV;
1765
1766 if (dev->pm_domain)
1767 return -EEXIST;
1768
1769 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1770 "#power-domain-cells", 0, &pd_args);
1771 if (ret < 0) {
1772 if (ret != -ENOENT)
1773 return ret;
1774
1775 /*
1776 * Try legacy Samsung-specific bindings
1777 * (for backwards compatibility of DT ABI)
1778 */
1779 pd_args.args_count = 0;
1780 pd_args.np = of_parse_phandle(dev->of_node,
1781 "samsung,power-domain", 0);
1782 if (!pd_args.np)
1783 return -ENOENT;
1784 }
1785
1786 pd = of_genpd_get_from_provider(&pd_args);
1787 of_node_put(pd_args.np);
1788 if (IS_ERR(pd)) {
1789 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1790 __func__, PTR_ERR(pd));
1791 return -EPROBE_DEFER;
1792 }
1793
1794 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1795
1796 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1797 ret = pm_genpd_add_device(pd, dev);
1798 if (ret != -EAGAIN)
1799 break;
1800
1801 mdelay(i);
1802 cond_resched();
1803 }
1804
1805 if (ret < 0) {
1806 dev_err(dev, "failed to add to PM domain %s: %d",
1807 pd->name, ret);
1808 goto out;
1809 }
1810
1811 dev->pm_domain->detach = genpd_dev_pm_detach;
1812 dev->pm_domain->sync = genpd_dev_pm_sync;
1813 ret = genpd_poweron(pd);
1814
1815 out:
1816 return ret ? -EPROBE_DEFER : 0;
1817 }
1818 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1819 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1820
1821
1822 /*** debugfs support ***/
1823
1824 #ifdef CONFIG_PM_ADVANCED_DEBUG
1825 #include <linux/pm.h>
1826 #include <linux/device.h>
1827 #include <linux/debugfs.h>
1828 #include <linux/seq_file.h>
1829 #include <linux/init.h>
1830 #include <linux/kobject.h>
1831 static struct dentry *pm_genpd_debugfs_dir;
1832
1833 /*
1834 * TODO: This function is a slightly modified version of rtpm_status_show
1835 * from sysfs.c, so generalize it.
1836 */
1837 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1838 {
1839 static const char * const status_lookup[] = {
1840 [RPM_ACTIVE] = "active",
1841 [RPM_RESUMING] = "resuming",
1842 [RPM_SUSPENDED] = "suspended",
1843 [RPM_SUSPENDING] = "suspending"
1844 };
1845 const char *p = "";
1846
1847 if (dev->power.runtime_error)
1848 p = "error";
1849 else if (dev->power.disable_depth)
1850 p = "unsupported";
1851 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1852 p = status_lookup[dev->power.runtime_status];
1853 else
1854 WARN_ON(1);
1855
1856 seq_puts(s, p);
1857 }
1858
1859 static int pm_genpd_summary_one(struct seq_file *s,
1860 struct generic_pm_domain *genpd)
1861 {
1862 static const char * const status_lookup[] = {
1863 [GPD_STATE_ACTIVE] = "on",
1864 [GPD_STATE_POWER_OFF] = "off"
1865 };
1866 struct pm_domain_data *pm_data;
1867 const char *kobj_path;
1868 struct gpd_link *link;
1869 int ret;
1870
1871 ret = mutex_lock_interruptible(&genpd->lock);
1872 if (ret)
1873 return -ERESTARTSYS;
1874
1875 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1876 goto exit;
1877 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
1878
1879 /*
1880 * Modifications on the list require holding locks on both
1881 * master and slave, so we are safe.
1882 * Also genpd->name is immutable.
1883 */
1884 list_for_each_entry(link, &genpd->master_links, master_node) {
1885 seq_printf(s, "%s", link->slave->name);
1886 if (!list_is_last(&link->master_node, &genpd->master_links))
1887 seq_puts(s, ", ");
1888 }
1889
1890 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1891 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1892 if (kobj_path == NULL)
1893 continue;
1894
1895 seq_printf(s, "\n %-50s ", kobj_path);
1896 rtpm_status_str(s, pm_data->dev);
1897 kfree(kobj_path);
1898 }
1899
1900 seq_puts(s, "\n");
1901 exit:
1902 mutex_unlock(&genpd->lock);
1903
1904 return 0;
1905 }
1906
1907 static int pm_genpd_summary_show(struct seq_file *s, void *data)
1908 {
1909 struct generic_pm_domain *genpd;
1910 int ret = 0;
1911
1912 seq_puts(s, "domain status slaves\n");
1913 seq_puts(s, " /device runtime status\n");
1914 seq_puts(s, "----------------------------------------------------------------------\n");
1915
1916 ret = mutex_lock_interruptible(&gpd_list_lock);
1917 if (ret)
1918 return -ERESTARTSYS;
1919
1920 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1921 ret = pm_genpd_summary_one(s, genpd);
1922 if (ret)
1923 break;
1924 }
1925 mutex_unlock(&gpd_list_lock);
1926
1927 return ret;
1928 }
1929
1930 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
1931 {
1932 return single_open(file, pm_genpd_summary_show, NULL);
1933 }
1934
1935 static const struct file_operations pm_genpd_summary_fops = {
1936 .open = pm_genpd_summary_open,
1937 .read = seq_read,
1938 .llseek = seq_lseek,
1939 .release = single_release,
1940 };
1941
1942 static int __init pm_genpd_debug_init(void)
1943 {
1944 struct dentry *d;
1945
1946 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
1947
1948 if (!pm_genpd_debugfs_dir)
1949 return -ENOMEM;
1950
1951 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
1952 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
1953 if (!d)
1954 return -ENOMEM;
1955
1956 return 0;
1957 }
1958 late_initcall(pm_genpd_debug_init);
1959
1960 static void __exit pm_genpd_debug_exit(void)
1961 {
1962 debugfs_remove_recursive(pm_genpd_debugfs_dir);
1963 }
1964 __exitcall(pm_genpd_debug_exit);
1965 #endif /* CONFIG_PM_ADVANCED_DEBUG */