]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/base/power/domain.c
Merge tag 'for-4.4' of git://git.osdn.jp/gitroot/uclinux-h8/linux
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22
23 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
24
25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
26 ({ \
27 type (*__routine)(struct device *__d); \
28 type __ret = (type)0; \
29 \
30 __routine = genpd->dev_ops.callback; \
31 if (__routine) { \
32 __ret = __routine(dev); \
33 } \
34 __ret; \
35 })
36
37 static LIST_HEAD(gpd_list);
38 static DEFINE_MUTEX(gpd_list_lock);
39
40 /*
41 * Get the generic PM domain for a particular struct device.
42 * This validates the struct device pointer, the PM domain pointer,
43 * and checks that the PM domain pointer is a real generic PM domain.
44 * Any failure results in NULL being returned.
45 */
46 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
47 {
48 struct generic_pm_domain *genpd = NULL, *gpd;
49
50 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
51 return NULL;
52
53 mutex_lock(&gpd_list_lock);
54 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
55 if (&gpd->domain == dev->pm_domain) {
56 genpd = gpd;
57 break;
58 }
59 }
60 mutex_unlock(&gpd_list_lock);
61
62 return genpd;
63 }
64
65 /*
66 * This should only be used where we are certain that the pm_domain
67 * attached to the device is a genpd domain.
68 */
69 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
70 {
71 if (IS_ERR_OR_NULL(dev->pm_domain))
72 return ERR_PTR(-EINVAL);
73
74 return pd_to_genpd(dev->pm_domain);
75 }
76
77 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
78 {
79 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
80 }
81
82 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
83 {
84 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
85 }
86
87 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
88 {
89 bool ret = false;
90
91 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
92 ret = !!atomic_dec_and_test(&genpd->sd_count);
93
94 return ret;
95 }
96
97 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
98 {
99 atomic_inc(&genpd->sd_count);
100 smp_mb__after_atomic();
101 }
102
103 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
104 {
105 ktime_t time_start;
106 s64 elapsed_ns;
107 int ret;
108
109 if (!genpd->power_on)
110 return 0;
111
112 if (!timed)
113 return genpd->power_on(genpd);
114
115 time_start = ktime_get();
116 ret = genpd->power_on(genpd);
117 if (ret)
118 return ret;
119
120 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
121 if (elapsed_ns <= genpd->power_on_latency_ns)
122 return ret;
123
124 genpd->power_on_latency_ns = elapsed_ns;
125 genpd->max_off_time_changed = true;
126 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
127 genpd->name, "on", elapsed_ns);
128
129 return ret;
130 }
131
132 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
133 {
134 ktime_t time_start;
135 s64 elapsed_ns;
136 int ret;
137
138 if (!genpd->power_off)
139 return 0;
140
141 if (!timed)
142 return genpd->power_off(genpd);
143
144 time_start = ktime_get();
145 ret = genpd->power_off(genpd);
146 if (ret == -EBUSY)
147 return ret;
148
149 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
150 if (elapsed_ns <= genpd->power_off_latency_ns)
151 return ret;
152
153 genpd->power_off_latency_ns = elapsed_ns;
154 genpd->max_off_time_changed = true;
155 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
156 genpd->name, "off", elapsed_ns);
157
158 return ret;
159 }
160
161 /**
162 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
163 * @genpd: PM domait to power off.
164 *
165 * Queue up the execution of genpd_poweroff() unless it's already been done
166 * before.
167 */
168 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
169 {
170 queue_work(pm_wq, &genpd->power_off_work);
171 }
172
173 static int genpd_poweron(struct generic_pm_domain *genpd);
174
175 /**
176 * __genpd_poweron - Restore power to a given PM domain and its masters.
177 * @genpd: PM domain to power up.
178 *
179 * Restore power to @genpd and all of its masters so that it is possible to
180 * resume a device belonging to it.
181 */
182 static int __genpd_poweron(struct generic_pm_domain *genpd)
183 {
184 struct gpd_link *link;
185 int ret = 0;
186
187 if (genpd->status == GPD_STATE_ACTIVE
188 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
189 return 0;
190
191 /*
192 * The list is guaranteed not to change while the loop below is being
193 * executed, unless one of the masters' .power_on() callbacks fiddles
194 * with it.
195 */
196 list_for_each_entry(link, &genpd->slave_links, slave_node) {
197 genpd_sd_counter_inc(link->master);
198
199 ret = genpd_poweron(link->master);
200 if (ret) {
201 genpd_sd_counter_dec(link->master);
202 goto err;
203 }
204 }
205
206 ret = genpd_power_on(genpd, true);
207 if (ret)
208 goto err;
209
210 genpd->status = GPD_STATE_ACTIVE;
211 return 0;
212
213 err:
214 list_for_each_entry_continue_reverse(link,
215 &genpd->slave_links,
216 slave_node) {
217 genpd_sd_counter_dec(link->master);
218 genpd_queue_power_off_work(link->master);
219 }
220
221 return ret;
222 }
223
224 /**
225 * genpd_poweron - Restore power to a given PM domain and its masters.
226 * @genpd: PM domain to power up.
227 */
228 static int genpd_poweron(struct generic_pm_domain *genpd)
229 {
230 int ret;
231
232 mutex_lock(&genpd->lock);
233 ret = __genpd_poweron(genpd);
234 mutex_unlock(&genpd->lock);
235 return ret;
236 }
237
238 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
239 {
240 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
241 }
242
243 static int genpd_restore_dev(struct generic_pm_domain *genpd,
244 struct device *dev)
245 {
246 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
247 }
248
249 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
250 unsigned long val, void *ptr)
251 {
252 struct generic_pm_domain_data *gpd_data;
253 struct device *dev;
254
255 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
256 dev = gpd_data->base.dev;
257
258 for (;;) {
259 struct generic_pm_domain *genpd;
260 struct pm_domain_data *pdd;
261
262 spin_lock_irq(&dev->power.lock);
263
264 pdd = dev->power.subsys_data ?
265 dev->power.subsys_data->domain_data : NULL;
266 if (pdd && pdd->dev) {
267 to_gpd_data(pdd)->td.constraint_changed = true;
268 genpd = dev_to_genpd(dev);
269 } else {
270 genpd = ERR_PTR(-ENODATA);
271 }
272
273 spin_unlock_irq(&dev->power.lock);
274
275 if (!IS_ERR(genpd)) {
276 mutex_lock(&genpd->lock);
277 genpd->max_off_time_changed = true;
278 mutex_unlock(&genpd->lock);
279 }
280
281 dev = dev->parent;
282 if (!dev || dev->power.ignore_children)
283 break;
284 }
285
286 return NOTIFY_DONE;
287 }
288
289 /**
290 * genpd_poweroff - Remove power from a given PM domain.
291 * @genpd: PM domain to power down.
292 * @is_async: PM domain is powered down from a scheduled work
293 *
294 * If all of the @genpd's devices have been suspended and all of its subdomains
295 * have been powered down, remove power from @genpd.
296 */
297 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
298 {
299 struct pm_domain_data *pdd;
300 struct gpd_link *link;
301 unsigned int not_suspended = 0;
302
303 /*
304 * Do not try to power off the domain in the following situations:
305 * (1) The domain is already in the "power off" state.
306 * (2) System suspend is in progress.
307 */
308 if (genpd->status == GPD_STATE_POWER_OFF
309 || genpd->prepared_count > 0)
310 return 0;
311
312 if (atomic_read(&genpd->sd_count) > 0)
313 return -EBUSY;
314
315 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
316 enum pm_qos_flags_status stat;
317
318 stat = dev_pm_qos_flags(pdd->dev,
319 PM_QOS_FLAG_NO_POWER_OFF
320 | PM_QOS_FLAG_REMOTE_WAKEUP);
321 if (stat > PM_QOS_FLAGS_NONE)
322 return -EBUSY;
323
324 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
325 not_suspended++;
326 }
327
328 if (not_suspended > 1 || (not_suspended == 1 && is_async))
329 return -EBUSY;
330
331 if (genpd->gov && genpd->gov->power_down_ok) {
332 if (!genpd->gov->power_down_ok(&genpd->domain))
333 return -EAGAIN;
334 }
335
336 if (genpd->power_off) {
337 int ret;
338
339 if (atomic_read(&genpd->sd_count) > 0)
340 return -EBUSY;
341
342 /*
343 * If sd_count > 0 at this point, one of the subdomains hasn't
344 * managed to call genpd_poweron() for the master yet after
345 * incrementing it. In that case genpd_poweron() will wait
346 * for us to drop the lock, so we can call .power_off() and let
347 * the genpd_poweron() restore power for us (this shouldn't
348 * happen very often).
349 */
350 ret = genpd_power_off(genpd, true);
351 if (ret)
352 return ret;
353 }
354
355 genpd->status = GPD_STATE_POWER_OFF;
356
357 list_for_each_entry(link, &genpd->slave_links, slave_node) {
358 genpd_sd_counter_dec(link->master);
359 genpd_queue_power_off_work(link->master);
360 }
361
362 return 0;
363 }
364
365 /**
366 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
367 * @work: Work structure used for scheduling the execution of this function.
368 */
369 static void genpd_power_off_work_fn(struct work_struct *work)
370 {
371 struct generic_pm_domain *genpd;
372
373 genpd = container_of(work, struct generic_pm_domain, power_off_work);
374
375 mutex_lock(&genpd->lock);
376 genpd_poweroff(genpd, true);
377 mutex_unlock(&genpd->lock);
378 }
379
380 /**
381 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
382 * @dev: Device to suspend.
383 *
384 * Carry out a runtime suspend of a device under the assumption that its
385 * pm_domain field points to the domain member of an object of type
386 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
387 */
388 static int pm_genpd_runtime_suspend(struct device *dev)
389 {
390 struct generic_pm_domain *genpd;
391 bool (*stop_ok)(struct device *__dev);
392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
393 ktime_t time_start;
394 s64 elapsed_ns;
395 int ret;
396
397 dev_dbg(dev, "%s()\n", __func__);
398
399 genpd = dev_to_genpd(dev);
400 if (IS_ERR(genpd))
401 return -EINVAL;
402
403 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
404 if (stop_ok && !stop_ok(dev))
405 return -EBUSY;
406
407 /* Measure suspend latency. */
408 time_start = ktime_get();
409
410 ret = genpd_save_dev(genpd, dev);
411 if (ret)
412 return ret;
413
414 ret = genpd_stop_dev(genpd, dev);
415 if (ret) {
416 genpd_restore_dev(genpd, dev);
417 return ret;
418 }
419
420 /* Update suspend latency value if the measured time exceeds it. */
421 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
422 if (elapsed_ns > td->suspend_latency_ns) {
423 td->suspend_latency_ns = elapsed_ns;
424 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
425 elapsed_ns);
426 genpd->max_off_time_changed = true;
427 td->constraint_changed = true;
428 }
429
430 /*
431 * If power.irq_safe is set, this routine will be run with interrupts
432 * off, so it can't use mutexes.
433 */
434 if (dev->power.irq_safe)
435 return 0;
436
437 mutex_lock(&genpd->lock);
438 genpd_poweroff(genpd, false);
439 mutex_unlock(&genpd->lock);
440
441 return 0;
442 }
443
444 /**
445 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
446 * @dev: Device to resume.
447 *
448 * Carry out a runtime resume of a device under the assumption that its
449 * pm_domain field points to the domain member of an object of type
450 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
451 */
452 static int pm_genpd_runtime_resume(struct device *dev)
453 {
454 struct generic_pm_domain *genpd;
455 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
456 ktime_t time_start;
457 s64 elapsed_ns;
458 int ret;
459 bool timed = true;
460
461 dev_dbg(dev, "%s()\n", __func__);
462
463 genpd = dev_to_genpd(dev);
464 if (IS_ERR(genpd))
465 return -EINVAL;
466
467 /* If power.irq_safe, the PM domain is never powered off. */
468 if (dev->power.irq_safe) {
469 timed = false;
470 goto out;
471 }
472
473 mutex_lock(&genpd->lock);
474 ret = __genpd_poweron(genpd);
475 mutex_unlock(&genpd->lock);
476
477 if (ret)
478 return ret;
479
480 out:
481 /* Measure resume latency. */
482 if (timed)
483 time_start = ktime_get();
484
485 genpd_start_dev(genpd, dev);
486 genpd_restore_dev(genpd, dev);
487
488 /* Update resume latency value if the measured time exceeds it. */
489 if (timed) {
490 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
491 if (elapsed_ns > td->resume_latency_ns) {
492 td->resume_latency_ns = elapsed_ns;
493 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
494 elapsed_ns);
495 genpd->max_off_time_changed = true;
496 td->constraint_changed = true;
497 }
498 }
499
500 return 0;
501 }
502
503 static bool pd_ignore_unused;
504 static int __init pd_ignore_unused_setup(char *__unused)
505 {
506 pd_ignore_unused = true;
507 return 1;
508 }
509 __setup("pd_ignore_unused", pd_ignore_unused_setup);
510
511 /**
512 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
513 */
514 static int __init genpd_poweroff_unused(void)
515 {
516 struct generic_pm_domain *genpd;
517
518 if (pd_ignore_unused) {
519 pr_warn("genpd: Not disabling unused power domains\n");
520 return 0;
521 }
522
523 mutex_lock(&gpd_list_lock);
524
525 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
526 genpd_queue_power_off_work(genpd);
527
528 mutex_unlock(&gpd_list_lock);
529
530 return 0;
531 }
532 late_initcall(genpd_poweroff_unused);
533
534 #ifdef CONFIG_PM_SLEEP
535
536 /**
537 * pm_genpd_present - Check if the given PM domain has been initialized.
538 * @genpd: PM domain to check.
539 */
540 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
541 {
542 const struct generic_pm_domain *gpd;
543
544 if (IS_ERR_OR_NULL(genpd))
545 return false;
546
547 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
548 if (gpd == genpd)
549 return true;
550
551 return false;
552 }
553
554 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
555 struct device *dev)
556 {
557 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
558 }
559
560 /**
561 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
562 * @genpd: PM domain to power off, if possible.
563 * @timed: True if latency measurements are allowed.
564 *
565 * Check if the given PM domain can be powered off (during system suspend or
566 * hibernation) and do that if so. Also, in that case propagate to its masters.
567 *
568 * This function is only called in "noirq" and "syscore" stages of system power
569 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
570 * executed sequentially, so it is guaranteed that it will never run twice in
571 * parallel).
572 */
573 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
574 bool timed)
575 {
576 struct gpd_link *link;
577
578 if (genpd->status == GPD_STATE_POWER_OFF)
579 return;
580
581 if (genpd->suspended_count != genpd->device_count
582 || atomic_read(&genpd->sd_count) > 0)
583 return;
584
585 genpd_power_off(genpd, timed);
586
587 genpd->status = GPD_STATE_POWER_OFF;
588
589 list_for_each_entry(link, &genpd->slave_links, slave_node) {
590 genpd_sd_counter_dec(link->master);
591 pm_genpd_sync_poweroff(link->master, timed);
592 }
593 }
594
595 /**
596 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
597 * @genpd: PM domain to power on.
598 * @timed: True if latency measurements are allowed.
599 *
600 * This function is only called in "noirq" and "syscore" stages of system power
601 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
602 * executed sequentially, so it is guaranteed that it will never run twice in
603 * parallel).
604 */
605 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
606 bool timed)
607 {
608 struct gpd_link *link;
609
610 if (genpd->status == GPD_STATE_ACTIVE)
611 return;
612
613 list_for_each_entry(link, &genpd->slave_links, slave_node) {
614 pm_genpd_sync_poweron(link->master, timed);
615 genpd_sd_counter_inc(link->master);
616 }
617
618 genpd_power_on(genpd, timed);
619
620 genpd->status = GPD_STATE_ACTIVE;
621 }
622
623 /**
624 * resume_needed - Check whether to resume a device before system suspend.
625 * @dev: Device to check.
626 * @genpd: PM domain the device belongs to.
627 *
628 * There are two cases in which a device that can wake up the system from sleep
629 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
630 * to wake up the system and it has to remain active for this purpose while the
631 * system is in the sleep state and (2) if the device is not enabled to wake up
632 * the system from sleep states and it generally doesn't generate wakeup signals
633 * by itself (those signals are generated on its behalf by other parts of the
634 * system). In the latter case it may be necessary to reconfigure the device's
635 * wakeup settings during system suspend, because it may have been set up to
636 * signal remote wakeup from the system's working state as needed by runtime PM.
637 * Return 'true' in either of the above cases.
638 */
639 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
640 {
641 bool active_wakeup;
642
643 if (!device_can_wakeup(dev))
644 return false;
645
646 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
647 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
648 }
649
650 /**
651 * pm_genpd_prepare - Start power transition of a device in a PM domain.
652 * @dev: Device to start the transition of.
653 *
654 * Start a power transition of a device (during a system-wide power transition)
655 * under the assumption that its pm_domain field points to the domain member of
656 * an object of type struct generic_pm_domain representing a PM domain
657 * consisting of I/O devices.
658 */
659 static int pm_genpd_prepare(struct device *dev)
660 {
661 struct generic_pm_domain *genpd;
662 int ret;
663
664 dev_dbg(dev, "%s()\n", __func__);
665
666 genpd = dev_to_genpd(dev);
667 if (IS_ERR(genpd))
668 return -EINVAL;
669
670 /*
671 * If a wakeup request is pending for the device, it should be woken up
672 * at this point and a system wakeup event should be reported if it's
673 * set up to wake up the system from sleep states.
674 */
675 pm_runtime_get_noresume(dev);
676 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
677 pm_wakeup_event(dev, 0);
678
679 if (pm_wakeup_pending()) {
680 pm_runtime_put(dev);
681 return -EBUSY;
682 }
683
684 if (resume_needed(dev, genpd))
685 pm_runtime_resume(dev);
686
687 mutex_lock(&genpd->lock);
688
689 if (genpd->prepared_count++ == 0) {
690 genpd->suspended_count = 0;
691 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
692 }
693
694 mutex_unlock(&genpd->lock);
695
696 if (genpd->suspend_power_off) {
697 pm_runtime_put_noidle(dev);
698 return 0;
699 }
700
701 /*
702 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
703 * so genpd_poweron() will return immediately, but if the device
704 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
705 * to make it operational.
706 */
707 pm_runtime_resume(dev);
708 __pm_runtime_disable(dev, false);
709
710 ret = pm_generic_prepare(dev);
711 if (ret) {
712 mutex_lock(&genpd->lock);
713
714 if (--genpd->prepared_count == 0)
715 genpd->suspend_power_off = false;
716
717 mutex_unlock(&genpd->lock);
718 pm_runtime_enable(dev);
719 }
720
721 pm_runtime_put(dev);
722 return ret;
723 }
724
725 /**
726 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
727 * @dev: Device to suspend.
728 *
729 * Suspend a device under the assumption that its pm_domain field points to the
730 * domain member of an object of type struct generic_pm_domain representing
731 * a PM domain consisting of I/O devices.
732 */
733 static int pm_genpd_suspend(struct device *dev)
734 {
735 struct generic_pm_domain *genpd;
736
737 dev_dbg(dev, "%s()\n", __func__);
738
739 genpd = dev_to_genpd(dev);
740 if (IS_ERR(genpd))
741 return -EINVAL;
742
743 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
744 }
745
746 /**
747 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
748 * @dev: Device to suspend.
749 *
750 * Carry out a late suspend of a device under the assumption that its
751 * pm_domain field points to the domain member of an object of type
752 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
753 */
754 static int pm_genpd_suspend_late(struct device *dev)
755 {
756 struct generic_pm_domain *genpd;
757
758 dev_dbg(dev, "%s()\n", __func__);
759
760 genpd = dev_to_genpd(dev);
761 if (IS_ERR(genpd))
762 return -EINVAL;
763
764 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
765 }
766
767 /**
768 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
769 * @dev: Device to suspend.
770 *
771 * Stop the device and remove power from the domain if all devices in it have
772 * been stopped.
773 */
774 static int pm_genpd_suspend_noirq(struct device *dev)
775 {
776 struct generic_pm_domain *genpd;
777
778 dev_dbg(dev, "%s()\n", __func__);
779
780 genpd = dev_to_genpd(dev);
781 if (IS_ERR(genpd))
782 return -EINVAL;
783
784 if (genpd->suspend_power_off
785 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
786 return 0;
787
788 genpd_stop_dev(genpd, dev);
789
790 /*
791 * Since all of the "noirq" callbacks are executed sequentially, it is
792 * guaranteed that this function will never run twice in parallel for
793 * the same PM domain, so it is not necessary to use locking here.
794 */
795 genpd->suspended_count++;
796 pm_genpd_sync_poweroff(genpd, true);
797
798 return 0;
799 }
800
801 /**
802 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
803 * @dev: Device to resume.
804 *
805 * Restore power to the device's PM domain, if necessary, and start the device.
806 */
807 static int pm_genpd_resume_noirq(struct device *dev)
808 {
809 struct generic_pm_domain *genpd;
810
811 dev_dbg(dev, "%s()\n", __func__);
812
813 genpd = dev_to_genpd(dev);
814 if (IS_ERR(genpd))
815 return -EINVAL;
816
817 if (genpd->suspend_power_off
818 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
819 return 0;
820
821 /*
822 * Since all of the "noirq" callbacks are executed sequentially, it is
823 * guaranteed that this function will never run twice in parallel for
824 * the same PM domain, so it is not necessary to use locking here.
825 */
826 pm_genpd_sync_poweron(genpd, true);
827 genpd->suspended_count--;
828
829 return genpd_start_dev(genpd, dev);
830 }
831
832 /**
833 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
834 * @dev: Device to resume.
835 *
836 * Carry out an early resume of a device under the assumption that its
837 * pm_domain field points to the domain member of an object of type
838 * struct generic_pm_domain representing a power domain consisting of I/O
839 * devices.
840 */
841 static int pm_genpd_resume_early(struct device *dev)
842 {
843 struct generic_pm_domain *genpd;
844
845 dev_dbg(dev, "%s()\n", __func__);
846
847 genpd = dev_to_genpd(dev);
848 if (IS_ERR(genpd))
849 return -EINVAL;
850
851 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
852 }
853
854 /**
855 * pm_genpd_resume - Resume of device in an I/O PM domain.
856 * @dev: Device to resume.
857 *
858 * Resume a device under the assumption that its pm_domain field points to the
859 * domain member of an object of type struct generic_pm_domain representing
860 * a power domain consisting of I/O devices.
861 */
862 static int pm_genpd_resume(struct device *dev)
863 {
864 struct generic_pm_domain *genpd;
865
866 dev_dbg(dev, "%s()\n", __func__);
867
868 genpd = dev_to_genpd(dev);
869 if (IS_ERR(genpd))
870 return -EINVAL;
871
872 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
873 }
874
875 /**
876 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
877 * @dev: Device to freeze.
878 *
879 * Freeze a device under the assumption that its pm_domain field points to the
880 * domain member of an object of type struct generic_pm_domain representing
881 * a power domain consisting of I/O devices.
882 */
883 static int pm_genpd_freeze(struct device *dev)
884 {
885 struct generic_pm_domain *genpd;
886
887 dev_dbg(dev, "%s()\n", __func__);
888
889 genpd = dev_to_genpd(dev);
890 if (IS_ERR(genpd))
891 return -EINVAL;
892
893 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
894 }
895
896 /**
897 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
898 * @dev: Device to freeze.
899 *
900 * Carry out a late freeze of a device under the assumption that its
901 * pm_domain field points to the domain member of an object of type
902 * struct generic_pm_domain representing a power domain consisting of I/O
903 * devices.
904 */
905 static int pm_genpd_freeze_late(struct device *dev)
906 {
907 struct generic_pm_domain *genpd;
908
909 dev_dbg(dev, "%s()\n", __func__);
910
911 genpd = dev_to_genpd(dev);
912 if (IS_ERR(genpd))
913 return -EINVAL;
914
915 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
916 }
917
918 /**
919 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
920 * @dev: Device to freeze.
921 *
922 * Carry out a late freeze of a device under the assumption that its
923 * pm_domain field points to the domain member of an object of type
924 * struct generic_pm_domain representing a power domain consisting of I/O
925 * devices.
926 */
927 static int pm_genpd_freeze_noirq(struct device *dev)
928 {
929 struct generic_pm_domain *genpd;
930
931 dev_dbg(dev, "%s()\n", __func__);
932
933 genpd = dev_to_genpd(dev);
934 if (IS_ERR(genpd))
935 return -EINVAL;
936
937 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
938 }
939
940 /**
941 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
942 * @dev: Device to thaw.
943 *
944 * Start the device, unless power has been removed from the domain already
945 * before the system transition.
946 */
947 static int pm_genpd_thaw_noirq(struct device *dev)
948 {
949 struct generic_pm_domain *genpd;
950
951 dev_dbg(dev, "%s()\n", __func__);
952
953 genpd = dev_to_genpd(dev);
954 if (IS_ERR(genpd))
955 return -EINVAL;
956
957 return genpd->suspend_power_off ?
958 0 : genpd_start_dev(genpd, dev);
959 }
960
961 /**
962 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
963 * @dev: Device to thaw.
964 *
965 * Carry out an early thaw of a device under the assumption that its
966 * pm_domain field points to the domain member of an object of type
967 * struct generic_pm_domain representing a power domain consisting of I/O
968 * devices.
969 */
970 static int pm_genpd_thaw_early(struct device *dev)
971 {
972 struct generic_pm_domain *genpd;
973
974 dev_dbg(dev, "%s()\n", __func__);
975
976 genpd = dev_to_genpd(dev);
977 if (IS_ERR(genpd))
978 return -EINVAL;
979
980 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
981 }
982
983 /**
984 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
985 * @dev: Device to thaw.
986 *
987 * Thaw a device under the assumption that its pm_domain field points to the
988 * domain member of an object of type struct generic_pm_domain representing
989 * a power domain consisting of I/O devices.
990 */
991 static int pm_genpd_thaw(struct device *dev)
992 {
993 struct generic_pm_domain *genpd;
994
995 dev_dbg(dev, "%s()\n", __func__);
996
997 genpd = dev_to_genpd(dev);
998 if (IS_ERR(genpd))
999 return -EINVAL;
1000
1001 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1002 }
1003
1004 /**
1005 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1006 * @dev: Device to resume.
1007 *
1008 * Make sure the domain will be in the same power state as before the
1009 * hibernation the system is resuming from and start the device if necessary.
1010 */
1011 static int pm_genpd_restore_noirq(struct device *dev)
1012 {
1013 struct generic_pm_domain *genpd;
1014
1015 dev_dbg(dev, "%s()\n", __func__);
1016
1017 genpd = dev_to_genpd(dev);
1018 if (IS_ERR(genpd))
1019 return -EINVAL;
1020
1021 /*
1022 * Since all of the "noirq" callbacks are executed sequentially, it is
1023 * guaranteed that this function will never run twice in parallel for
1024 * the same PM domain, so it is not necessary to use locking here.
1025 *
1026 * At this point suspended_count == 0 means we are being run for the
1027 * first time for the given domain in the present cycle.
1028 */
1029 if (genpd->suspended_count++ == 0) {
1030 /*
1031 * The boot kernel might put the domain into arbitrary state,
1032 * so make it appear as powered off to pm_genpd_sync_poweron(),
1033 * so that it tries to power it on in case it was really off.
1034 */
1035 genpd->status = GPD_STATE_POWER_OFF;
1036 if (genpd->suspend_power_off) {
1037 /*
1038 * If the domain was off before the hibernation, make
1039 * sure it will be off going forward.
1040 */
1041 genpd_power_off(genpd, true);
1042
1043 return 0;
1044 }
1045 }
1046
1047 if (genpd->suspend_power_off)
1048 return 0;
1049
1050 pm_genpd_sync_poweron(genpd, true);
1051
1052 return genpd_start_dev(genpd, dev);
1053 }
1054
1055 /**
1056 * pm_genpd_complete - Complete power transition of a device in a power domain.
1057 * @dev: Device to complete the transition of.
1058 *
1059 * Complete a power transition of a device (during a system-wide power
1060 * transition) under the assumption that its pm_domain field points to the
1061 * domain member of an object of type struct generic_pm_domain representing
1062 * a power domain consisting of I/O devices.
1063 */
1064 static void pm_genpd_complete(struct device *dev)
1065 {
1066 struct generic_pm_domain *genpd;
1067 bool run_complete;
1068
1069 dev_dbg(dev, "%s()\n", __func__);
1070
1071 genpd = dev_to_genpd(dev);
1072 if (IS_ERR(genpd))
1073 return;
1074
1075 mutex_lock(&genpd->lock);
1076
1077 run_complete = !genpd->suspend_power_off;
1078 if (--genpd->prepared_count == 0)
1079 genpd->suspend_power_off = false;
1080
1081 mutex_unlock(&genpd->lock);
1082
1083 if (run_complete) {
1084 pm_generic_complete(dev);
1085 pm_runtime_set_active(dev);
1086 pm_runtime_enable(dev);
1087 pm_request_idle(dev);
1088 }
1089 }
1090
1091 /**
1092 * genpd_syscore_switch - Switch power during system core suspend or resume.
1093 * @dev: Device that normally is marked as "always on" to switch power for.
1094 *
1095 * This routine may only be called during the system core (syscore) suspend or
1096 * resume phase for devices whose "always on" flags are set.
1097 */
1098 static void genpd_syscore_switch(struct device *dev, bool suspend)
1099 {
1100 struct generic_pm_domain *genpd;
1101
1102 genpd = dev_to_genpd(dev);
1103 if (!pm_genpd_present(genpd))
1104 return;
1105
1106 if (suspend) {
1107 genpd->suspended_count++;
1108 pm_genpd_sync_poweroff(genpd, false);
1109 } else {
1110 pm_genpd_sync_poweron(genpd, false);
1111 genpd->suspended_count--;
1112 }
1113 }
1114
1115 void pm_genpd_syscore_poweroff(struct device *dev)
1116 {
1117 genpd_syscore_switch(dev, true);
1118 }
1119 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1120
1121 void pm_genpd_syscore_poweron(struct device *dev)
1122 {
1123 genpd_syscore_switch(dev, false);
1124 }
1125 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1126
1127 #else /* !CONFIG_PM_SLEEP */
1128
1129 #define pm_genpd_prepare NULL
1130 #define pm_genpd_suspend NULL
1131 #define pm_genpd_suspend_late NULL
1132 #define pm_genpd_suspend_noirq NULL
1133 #define pm_genpd_resume_early NULL
1134 #define pm_genpd_resume_noirq NULL
1135 #define pm_genpd_resume NULL
1136 #define pm_genpd_freeze NULL
1137 #define pm_genpd_freeze_late NULL
1138 #define pm_genpd_freeze_noirq NULL
1139 #define pm_genpd_thaw_early NULL
1140 #define pm_genpd_thaw_noirq NULL
1141 #define pm_genpd_thaw NULL
1142 #define pm_genpd_restore_noirq NULL
1143 #define pm_genpd_complete NULL
1144
1145 #endif /* CONFIG_PM_SLEEP */
1146
1147 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1148 struct generic_pm_domain *genpd,
1149 struct gpd_timing_data *td)
1150 {
1151 struct generic_pm_domain_data *gpd_data;
1152 int ret;
1153
1154 ret = dev_pm_get_subsys_data(dev);
1155 if (ret)
1156 return ERR_PTR(ret);
1157
1158 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1159 if (!gpd_data) {
1160 ret = -ENOMEM;
1161 goto err_put;
1162 }
1163
1164 if (td)
1165 gpd_data->td = *td;
1166
1167 gpd_data->base.dev = dev;
1168 gpd_data->td.constraint_changed = true;
1169 gpd_data->td.effective_constraint_ns = -1;
1170 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1171
1172 spin_lock_irq(&dev->power.lock);
1173
1174 if (dev->power.subsys_data->domain_data) {
1175 ret = -EINVAL;
1176 goto err_free;
1177 }
1178
1179 dev->power.subsys_data->domain_data = &gpd_data->base;
1180 dev->pm_domain = &genpd->domain;
1181
1182 spin_unlock_irq(&dev->power.lock);
1183
1184 return gpd_data;
1185
1186 err_free:
1187 spin_unlock_irq(&dev->power.lock);
1188 kfree(gpd_data);
1189 err_put:
1190 dev_pm_put_subsys_data(dev);
1191 return ERR_PTR(ret);
1192 }
1193
1194 static void genpd_free_dev_data(struct device *dev,
1195 struct generic_pm_domain_data *gpd_data)
1196 {
1197 spin_lock_irq(&dev->power.lock);
1198
1199 dev->pm_domain = NULL;
1200 dev->power.subsys_data->domain_data = NULL;
1201
1202 spin_unlock_irq(&dev->power.lock);
1203
1204 kfree(gpd_data);
1205 dev_pm_put_subsys_data(dev);
1206 }
1207
1208 /**
1209 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1210 * @genpd: PM domain to add the device to.
1211 * @dev: Device to be added.
1212 * @td: Set of PM QoS timing parameters to attach to the device.
1213 */
1214 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1215 struct gpd_timing_data *td)
1216 {
1217 struct generic_pm_domain_data *gpd_data;
1218 int ret = 0;
1219
1220 dev_dbg(dev, "%s()\n", __func__);
1221
1222 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1223 return -EINVAL;
1224
1225 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1226 if (IS_ERR(gpd_data))
1227 return PTR_ERR(gpd_data);
1228
1229 mutex_lock(&genpd->lock);
1230
1231 if (genpd->prepared_count > 0) {
1232 ret = -EAGAIN;
1233 goto out;
1234 }
1235
1236 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1237 if (ret)
1238 goto out;
1239
1240 genpd->device_count++;
1241 genpd->max_off_time_changed = true;
1242
1243 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1244
1245 out:
1246 mutex_unlock(&genpd->lock);
1247
1248 if (ret)
1249 genpd_free_dev_data(dev, gpd_data);
1250 else
1251 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1252
1253 return ret;
1254 }
1255
1256 /**
1257 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1258 * @genpd: PM domain to remove the device from.
1259 * @dev: Device to be removed.
1260 */
1261 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1262 struct device *dev)
1263 {
1264 struct generic_pm_domain_data *gpd_data;
1265 struct pm_domain_data *pdd;
1266 int ret = 0;
1267
1268 dev_dbg(dev, "%s()\n", __func__);
1269
1270 if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1271 return -EINVAL;
1272
1273 /* The above validation also means we have existing domain_data. */
1274 pdd = dev->power.subsys_data->domain_data;
1275 gpd_data = to_gpd_data(pdd);
1276 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1277
1278 mutex_lock(&genpd->lock);
1279
1280 if (genpd->prepared_count > 0) {
1281 ret = -EAGAIN;
1282 goto out;
1283 }
1284
1285 genpd->device_count--;
1286 genpd->max_off_time_changed = true;
1287
1288 if (genpd->detach_dev)
1289 genpd->detach_dev(genpd, dev);
1290
1291 list_del_init(&pdd->list_node);
1292
1293 mutex_unlock(&genpd->lock);
1294
1295 genpd_free_dev_data(dev, gpd_data);
1296
1297 return 0;
1298
1299 out:
1300 mutex_unlock(&genpd->lock);
1301 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1302
1303 return ret;
1304 }
1305
1306 /**
1307 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1308 * @genpd: Master PM domain to add the subdomain to.
1309 * @subdomain: Subdomain to be added.
1310 */
1311 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1312 struct generic_pm_domain *subdomain)
1313 {
1314 struct gpd_link *link, *itr;
1315 int ret = 0;
1316
1317 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1318 || genpd == subdomain)
1319 return -EINVAL;
1320
1321 link = kzalloc(sizeof(*link), GFP_KERNEL);
1322 if (!link)
1323 return -ENOMEM;
1324
1325 mutex_lock(&genpd->lock);
1326 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1327
1328 if (genpd->status == GPD_STATE_POWER_OFF
1329 && subdomain->status != GPD_STATE_POWER_OFF) {
1330 ret = -EINVAL;
1331 goto out;
1332 }
1333
1334 list_for_each_entry(itr, &genpd->master_links, master_node) {
1335 if (itr->slave == subdomain && itr->master == genpd) {
1336 ret = -EINVAL;
1337 goto out;
1338 }
1339 }
1340
1341 link->master = genpd;
1342 list_add_tail(&link->master_node, &genpd->master_links);
1343 link->slave = subdomain;
1344 list_add_tail(&link->slave_node, &subdomain->slave_links);
1345 if (subdomain->status != GPD_STATE_POWER_OFF)
1346 genpd_sd_counter_inc(genpd);
1347
1348 out:
1349 mutex_unlock(&subdomain->lock);
1350 mutex_unlock(&genpd->lock);
1351 if (ret)
1352 kfree(link);
1353 return ret;
1354 }
1355 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1356
1357 /**
1358 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1359 * @genpd: Master PM domain to remove the subdomain from.
1360 * @subdomain: Subdomain to be removed.
1361 */
1362 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1363 struct generic_pm_domain *subdomain)
1364 {
1365 struct gpd_link *link;
1366 int ret = -EINVAL;
1367
1368 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1369 return -EINVAL;
1370
1371 mutex_lock(&genpd->lock);
1372
1373 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1374 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1375 subdomain->name);
1376 ret = -EBUSY;
1377 goto out;
1378 }
1379
1380 list_for_each_entry(link, &genpd->master_links, master_node) {
1381 if (link->slave != subdomain)
1382 continue;
1383
1384 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1385
1386 list_del(&link->master_node);
1387 list_del(&link->slave_node);
1388 kfree(link);
1389 if (subdomain->status != GPD_STATE_POWER_OFF)
1390 genpd_sd_counter_dec(genpd);
1391
1392 mutex_unlock(&subdomain->lock);
1393
1394 ret = 0;
1395 break;
1396 }
1397
1398 out:
1399 mutex_unlock(&genpd->lock);
1400
1401 return ret;
1402 }
1403 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1404
1405 /* Default device callbacks for generic PM domains. */
1406
1407 /**
1408 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1409 * @dev: Device to handle.
1410 */
1411 static int pm_genpd_default_save_state(struct device *dev)
1412 {
1413 int (*cb)(struct device *__dev);
1414
1415 if (dev->type && dev->type->pm)
1416 cb = dev->type->pm->runtime_suspend;
1417 else if (dev->class && dev->class->pm)
1418 cb = dev->class->pm->runtime_suspend;
1419 else if (dev->bus && dev->bus->pm)
1420 cb = dev->bus->pm->runtime_suspend;
1421 else
1422 cb = NULL;
1423
1424 if (!cb && dev->driver && dev->driver->pm)
1425 cb = dev->driver->pm->runtime_suspend;
1426
1427 return cb ? cb(dev) : 0;
1428 }
1429
1430 /**
1431 * pm_genpd_default_restore_state - Default PM domains "restore device state".
1432 * @dev: Device to handle.
1433 */
1434 static int pm_genpd_default_restore_state(struct device *dev)
1435 {
1436 int (*cb)(struct device *__dev);
1437
1438 if (dev->type && dev->type->pm)
1439 cb = dev->type->pm->runtime_resume;
1440 else if (dev->class && dev->class->pm)
1441 cb = dev->class->pm->runtime_resume;
1442 else if (dev->bus && dev->bus->pm)
1443 cb = dev->bus->pm->runtime_resume;
1444 else
1445 cb = NULL;
1446
1447 if (!cb && dev->driver && dev->driver->pm)
1448 cb = dev->driver->pm->runtime_resume;
1449
1450 return cb ? cb(dev) : 0;
1451 }
1452
1453 /**
1454 * pm_genpd_init - Initialize a generic I/O PM domain object.
1455 * @genpd: PM domain object to initialize.
1456 * @gov: PM domain governor to associate with the domain (may be NULL).
1457 * @is_off: Initial value of the domain's power_is_off field.
1458 */
1459 void pm_genpd_init(struct generic_pm_domain *genpd,
1460 struct dev_power_governor *gov, bool is_off)
1461 {
1462 if (IS_ERR_OR_NULL(genpd))
1463 return;
1464
1465 INIT_LIST_HEAD(&genpd->master_links);
1466 INIT_LIST_HEAD(&genpd->slave_links);
1467 INIT_LIST_HEAD(&genpd->dev_list);
1468 mutex_init(&genpd->lock);
1469 genpd->gov = gov;
1470 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1471 atomic_set(&genpd->sd_count, 0);
1472 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1473 genpd->device_count = 0;
1474 genpd->max_off_time_ns = -1;
1475 genpd->max_off_time_changed = true;
1476 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1477 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1478 genpd->domain.ops.prepare = pm_genpd_prepare;
1479 genpd->domain.ops.suspend = pm_genpd_suspend;
1480 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1481 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1482 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1483 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1484 genpd->domain.ops.resume = pm_genpd_resume;
1485 genpd->domain.ops.freeze = pm_genpd_freeze;
1486 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1487 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1488 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1489 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1490 genpd->domain.ops.thaw = pm_genpd_thaw;
1491 genpd->domain.ops.poweroff = pm_genpd_suspend;
1492 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1493 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1494 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1495 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1496 genpd->domain.ops.restore = pm_genpd_resume;
1497 genpd->domain.ops.complete = pm_genpd_complete;
1498 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1499 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1500
1501 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1502 genpd->dev_ops.stop = pm_clk_suspend;
1503 genpd->dev_ops.start = pm_clk_resume;
1504 }
1505
1506 mutex_lock(&gpd_list_lock);
1507 list_add(&genpd->gpd_list_node, &gpd_list);
1508 mutex_unlock(&gpd_list_lock);
1509 }
1510 EXPORT_SYMBOL_GPL(pm_genpd_init);
1511
1512 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1513 /*
1514 * Device Tree based PM domain providers.
1515 *
1516 * The code below implements generic device tree based PM domain providers that
1517 * bind device tree nodes with generic PM domains registered in the system.
1518 *
1519 * Any driver that registers generic PM domains and needs to support binding of
1520 * devices to these domains is supposed to register a PM domain provider, which
1521 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1522 *
1523 * Two simple mapping functions have been provided for convenience:
1524 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1525 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1526 * index.
1527 */
1528
1529 /**
1530 * struct of_genpd_provider - PM domain provider registration structure
1531 * @link: Entry in global list of PM domain providers
1532 * @node: Pointer to device tree node of PM domain provider
1533 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1534 * into a PM domain.
1535 * @data: context pointer to be passed into @xlate callback
1536 */
1537 struct of_genpd_provider {
1538 struct list_head link;
1539 struct device_node *node;
1540 genpd_xlate_t xlate;
1541 void *data;
1542 };
1543
1544 /* List of registered PM domain providers. */
1545 static LIST_HEAD(of_genpd_providers);
1546 /* Mutex to protect the list above. */
1547 static DEFINE_MUTEX(of_genpd_mutex);
1548
1549 /**
1550 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1551 * @genpdspec: OF phandle args to map into a PM domain
1552 * @data: xlate function private data - pointer to struct generic_pm_domain
1553 *
1554 * This is a generic xlate function that can be used to model PM domains that
1555 * have their own device tree nodes. The private data of xlate function needs
1556 * to be a valid pointer to struct generic_pm_domain.
1557 */
1558 struct generic_pm_domain *__of_genpd_xlate_simple(
1559 struct of_phandle_args *genpdspec,
1560 void *data)
1561 {
1562 if (genpdspec->args_count != 0)
1563 return ERR_PTR(-EINVAL);
1564 return data;
1565 }
1566 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1567
1568 /**
1569 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1570 * @genpdspec: OF phandle args to map into a PM domain
1571 * @data: xlate function private data - pointer to struct genpd_onecell_data
1572 *
1573 * This is a generic xlate function that can be used to model simple PM domain
1574 * controllers that have one device tree node and provide multiple PM domains.
1575 * A single cell is used as an index into an array of PM domains specified in
1576 * the genpd_onecell_data struct when registering the provider.
1577 */
1578 struct generic_pm_domain *__of_genpd_xlate_onecell(
1579 struct of_phandle_args *genpdspec,
1580 void *data)
1581 {
1582 struct genpd_onecell_data *genpd_data = data;
1583 unsigned int idx = genpdspec->args[0];
1584
1585 if (genpdspec->args_count != 1)
1586 return ERR_PTR(-EINVAL);
1587
1588 if (idx >= genpd_data->num_domains) {
1589 pr_err("%s: invalid domain index %u\n", __func__, idx);
1590 return ERR_PTR(-EINVAL);
1591 }
1592
1593 if (!genpd_data->domains[idx])
1594 return ERR_PTR(-ENOENT);
1595
1596 return genpd_data->domains[idx];
1597 }
1598 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1599
1600 /**
1601 * __of_genpd_add_provider() - Register a PM domain provider for a node
1602 * @np: Device node pointer associated with the PM domain provider.
1603 * @xlate: Callback for decoding PM domain from phandle arguments.
1604 * @data: Context pointer for @xlate callback.
1605 */
1606 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1607 void *data)
1608 {
1609 struct of_genpd_provider *cp;
1610
1611 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1612 if (!cp)
1613 return -ENOMEM;
1614
1615 cp->node = of_node_get(np);
1616 cp->data = data;
1617 cp->xlate = xlate;
1618
1619 mutex_lock(&of_genpd_mutex);
1620 list_add(&cp->link, &of_genpd_providers);
1621 mutex_unlock(&of_genpd_mutex);
1622 pr_debug("Added domain provider from %s\n", np->full_name);
1623
1624 return 0;
1625 }
1626 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
1627
1628 /**
1629 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1630 * @np: Device node pointer associated with the PM domain provider
1631 */
1632 void of_genpd_del_provider(struct device_node *np)
1633 {
1634 struct of_genpd_provider *cp;
1635
1636 mutex_lock(&of_genpd_mutex);
1637 list_for_each_entry(cp, &of_genpd_providers, link) {
1638 if (cp->node == np) {
1639 list_del(&cp->link);
1640 of_node_put(cp->node);
1641 kfree(cp);
1642 break;
1643 }
1644 }
1645 mutex_unlock(&of_genpd_mutex);
1646 }
1647 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1648
1649 /**
1650 * of_genpd_get_from_provider() - Look-up PM domain
1651 * @genpdspec: OF phandle args to use for look-up
1652 *
1653 * Looks for a PM domain provider under the node specified by @genpdspec and if
1654 * found, uses xlate function of the provider to map phandle args to a PM
1655 * domain.
1656 *
1657 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1658 * on failure.
1659 */
1660 struct generic_pm_domain *of_genpd_get_from_provider(
1661 struct of_phandle_args *genpdspec)
1662 {
1663 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1664 struct of_genpd_provider *provider;
1665
1666 mutex_lock(&of_genpd_mutex);
1667
1668 /* Check if we have such a provider in our array */
1669 list_for_each_entry(provider, &of_genpd_providers, link) {
1670 if (provider->node == genpdspec->np)
1671 genpd = provider->xlate(genpdspec, provider->data);
1672 if (!IS_ERR(genpd))
1673 break;
1674 }
1675
1676 mutex_unlock(&of_genpd_mutex);
1677
1678 return genpd;
1679 }
1680 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1681
1682 /**
1683 * genpd_dev_pm_detach - Detach a device from its PM domain.
1684 * @dev: Device to detach.
1685 * @power_off: Currently not used
1686 *
1687 * Try to locate a corresponding generic PM domain, which the device was
1688 * attached to previously. If such is found, the device is detached from it.
1689 */
1690 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1691 {
1692 struct generic_pm_domain *pd;
1693 unsigned int i;
1694 int ret = 0;
1695
1696 pd = pm_genpd_lookup_dev(dev);
1697 if (!pd)
1698 return;
1699
1700 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1701
1702 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1703 ret = pm_genpd_remove_device(pd, dev);
1704 if (ret != -EAGAIN)
1705 break;
1706
1707 mdelay(i);
1708 cond_resched();
1709 }
1710
1711 if (ret < 0) {
1712 dev_err(dev, "failed to remove from PM domain %s: %d",
1713 pd->name, ret);
1714 return;
1715 }
1716
1717 /* Check if PM domain can be powered off after removing this device. */
1718 genpd_queue_power_off_work(pd);
1719 }
1720
1721 static void genpd_dev_pm_sync(struct device *dev)
1722 {
1723 struct generic_pm_domain *pd;
1724
1725 pd = dev_to_genpd(dev);
1726 if (IS_ERR(pd))
1727 return;
1728
1729 genpd_queue_power_off_work(pd);
1730 }
1731
1732 /**
1733 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1734 * @dev: Device to attach.
1735 *
1736 * Parse device's OF node to find a PM domain specifier. If such is found,
1737 * attaches the device to retrieved pm_domain ops.
1738 *
1739 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1740 * backwards compatibility with existing DTBs.
1741 *
1742 * Returns 0 on successfully attached PM domain or negative error code. Note
1743 * that if a power-domain exists for the device, but it cannot be found or
1744 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1745 * probed and to re-try again later.
1746 */
1747 int genpd_dev_pm_attach(struct device *dev)
1748 {
1749 struct of_phandle_args pd_args;
1750 struct generic_pm_domain *pd;
1751 unsigned int i;
1752 int ret;
1753
1754 if (!dev->of_node)
1755 return -ENODEV;
1756
1757 if (dev->pm_domain)
1758 return -EEXIST;
1759
1760 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1761 "#power-domain-cells", 0, &pd_args);
1762 if (ret < 0) {
1763 if (ret != -ENOENT)
1764 return ret;
1765
1766 /*
1767 * Try legacy Samsung-specific bindings
1768 * (for backwards compatibility of DT ABI)
1769 */
1770 pd_args.args_count = 0;
1771 pd_args.np = of_parse_phandle(dev->of_node,
1772 "samsung,power-domain", 0);
1773 if (!pd_args.np)
1774 return -ENOENT;
1775 }
1776
1777 pd = of_genpd_get_from_provider(&pd_args);
1778 if (IS_ERR(pd)) {
1779 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1780 __func__, PTR_ERR(pd));
1781 of_node_put(dev->of_node);
1782 return -EPROBE_DEFER;
1783 }
1784
1785 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1786
1787 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1788 ret = pm_genpd_add_device(pd, dev);
1789 if (ret != -EAGAIN)
1790 break;
1791
1792 mdelay(i);
1793 cond_resched();
1794 }
1795
1796 if (ret < 0) {
1797 dev_err(dev, "failed to add to PM domain %s: %d",
1798 pd->name, ret);
1799 of_node_put(dev->of_node);
1800 goto out;
1801 }
1802
1803 dev->pm_domain->detach = genpd_dev_pm_detach;
1804 dev->pm_domain->sync = genpd_dev_pm_sync;
1805 ret = genpd_poweron(pd);
1806
1807 out:
1808 return ret ? -EPROBE_DEFER : 0;
1809 }
1810 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1811 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1812
1813
1814 /*** debugfs support ***/
1815
1816 #ifdef CONFIG_PM_ADVANCED_DEBUG
1817 #include <linux/pm.h>
1818 #include <linux/device.h>
1819 #include <linux/debugfs.h>
1820 #include <linux/seq_file.h>
1821 #include <linux/init.h>
1822 #include <linux/kobject.h>
1823 static struct dentry *pm_genpd_debugfs_dir;
1824
1825 /*
1826 * TODO: This function is a slightly modified version of rtpm_status_show
1827 * from sysfs.c, so generalize it.
1828 */
1829 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1830 {
1831 static const char * const status_lookup[] = {
1832 [RPM_ACTIVE] = "active",
1833 [RPM_RESUMING] = "resuming",
1834 [RPM_SUSPENDED] = "suspended",
1835 [RPM_SUSPENDING] = "suspending"
1836 };
1837 const char *p = "";
1838
1839 if (dev->power.runtime_error)
1840 p = "error";
1841 else if (dev->power.disable_depth)
1842 p = "unsupported";
1843 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1844 p = status_lookup[dev->power.runtime_status];
1845 else
1846 WARN_ON(1);
1847
1848 seq_puts(s, p);
1849 }
1850
1851 static int pm_genpd_summary_one(struct seq_file *s,
1852 struct generic_pm_domain *genpd)
1853 {
1854 static const char * const status_lookup[] = {
1855 [GPD_STATE_ACTIVE] = "on",
1856 [GPD_STATE_POWER_OFF] = "off"
1857 };
1858 struct pm_domain_data *pm_data;
1859 const char *kobj_path;
1860 struct gpd_link *link;
1861 int ret;
1862
1863 ret = mutex_lock_interruptible(&genpd->lock);
1864 if (ret)
1865 return -ERESTARTSYS;
1866
1867 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1868 goto exit;
1869 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
1870
1871 /*
1872 * Modifications on the list require holding locks on both
1873 * master and slave, so we are safe.
1874 * Also genpd->name is immutable.
1875 */
1876 list_for_each_entry(link, &genpd->master_links, master_node) {
1877 seq_printf(s, "%s", link->slave->name);
1878 if (!list_is_last(&link->master_node, &genpd->master_links))
1879 seq_puts(s, ", ");
1880 }
1881
1882 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1883 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1884 if (kobj_path == NULL)
1885 continue;
1886
1887 seq_printf(s, "\n %-50s ", kobj_path);
1888 rtpm_status_str(s, pm_data->dev);
1889 kfree(kobj_path);
1890 }
1891
1892 seq_puts(s, "\n");
1893 exit:
1894 mutex_unlock(&genpd->lock);
1895
1896 return 0;
1897 }
1898
1899 static int pm_genpd_summary_show(struct seq_file *s, void *data)
1900 {
1901 struct generic_pm_domain *genpd;
1902 int ret = 0;
1903
1904 seq_puts(s, "domain status slaves\n");
1905 seq_puts(s, " /device runtime status\n");
1906 seq_puts(s, "----------------------------------------------------------------------\n");
1907
1908 ret = mutex_lock_interruptible(&gpd_list_lock);
1909 if (ret)
1910 return -ERESTARTSYS;
1911
1912 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1913 ret = pm_genpd_summary_one(s, genpd);
1914 if (ret)
1915 break;
1916 }
1917 mutex_unlock(&gpd_list_lock);
1918
1919 return ret;
1920 }
1921
1922 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
1923 {
1924 return single_open(file, pm_genpd_summary_show, NULL);
1925 }
1926
1927 static const struct file_operations pm_genpd_summary_fops = {
1928 .open = pm_genpd_summary_open,
1929 .read = seq_read,
1930 .llseek = seq_lseek,
1931 .release = single_release,
1932 };
1933
1934 static int __init pm_genpd_debug_init(void)
1935 {
1936 struct dentry *d;
1937
1938 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
1939
1940 if (!pm_genpd_debugfs_dir)
1941 return -ENOMEM;
1942
1943 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
1944 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
1945 if (!d)
1946 return -ENOMEM;
1947
1948 return 0;
1949 }
1950 late_initcall(pm_genpd_debug_init);
1951
1952 static void __exit pm_genpd_debug_exit(void)
1953 {
1954 debugfs_remove_recursive(pm_genpd_debugfs_dir);
1955 }
1956 __exitcall(pm_genpd_debug_exit);
1957 #endif /* CONFIG_PM_ADVANCED_DEBUG */