]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/base/power/domain.c
Merge tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22
23 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
24
25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
26 ({ \
27 type (*__routine)(struct device *__d); \
28 type __ret = (type)0; \
29 \
30 __routine = genpd->dev_ops.callback; \
31 if (__routine) { \
32 __ret = __routine(dev); \
33 } \
34 __ret; \
35 })
36
37 static LIST_HEAD(gpd_list);
38 static DEFINE_MUTEX(gpd_list_lock);
39
40 /*
41 * Get the generic PM domain for a particular struct device.
42 * This validates the struct device pointer, the PM domain pointer,
43 * and checks that the PM domain pointer is a real generic PM domain.
44 * Any failure results in NULL being returned.
45 */
46 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
47 {
48 struct generic_pm_domain *genpd = NULL, *gpd;
49
50 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
51 return NULL;
52
53 mutex_lock(&gpd_list_lock);
54 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
55 if (&gpd->domain == dev->pm_domain) {
56 genpd = gpd;
57 break;
58 }
59 }
60 mutex_unlock(&gpd_list_lock);
61
62 return genpd;
63 }
64
65 /*
66 * This should only be used where we are certain that the pm_domain
67 * attached to the device is a genpd domain.
68 */
69 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
70 {
71 if (IS_ERR_OR_NULL(dev->pm_domain))
72 return ERR_PTR(-EINVAL);
73
74 return pd_to_genpd(dev->pm_domain);
75 }
76
77 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
78 {
79 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
80 }
81
82 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
83 {
84 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
85 }
86
87 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
88 {
89 bool ret = false;
90
91 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
92 ret = !!atomic_dec_and_test(&genpd->sd_count);
93
94 return ret;
95 }
96
97 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
98 {
99 atomic_inc(&genpd->sd_count);
100 smp_mb__after_atomic();
101 }
102
103 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
104 {
105 ktime_t time_start;
106 s64 elapsed_ns;
107 int ret;
108
109 if (!genpd->power_on)
110 return 0;
111
112 if (!timed)
113 return genpd->power_on(genpd);
114
115 time_start = ktime_get();
116 ret = genpd->power_on(genpd);
117 if (ret)
118 return ret;
119
120 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
121 if (elapsed_ns <= genpd->power_on_latency_ns)
122 return ret;
123
124 genpd->power_on_latency_ns = elapsed_ns;
125 genpd->max_off_time_changed = true;
126 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
127 genpd->name, "on", elapsed_ns);
128
129 return ret;
130 }
131
132 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
133 {
134 ktime_t time_start;
135 s64 elapsed_ns;
136 int ret;
137
138 if (!genpd->power_off)
139 return 0;
140
141 if (!timed)
142 return genpd->power_off(genpd);
143
144 time_start = ktime_get();
145 ret = genpd->power_off(genpd);
146 if (ret == -EBUSY)
147 return ret;
148
149 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
150 if (elapsed_ns <= genpd->power_off_latency_ns)
151 return ret;
152
153 genpd->power_off_latency_ns = elapsed_ns;
154 genpd->max_off_time_changed = true;
155 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
156 genpd->name, "off", elapsed_ns);
157
158 return ret;
159 }
160
161 /**
162 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
163 * @genpd: PM domait to power off.
164 *
165 * Queue up the execution of genpd_poweroff() unless it's already been done
166 * before.
167 */
168 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
169 {
170 queue_work(pm_wq, &genpd->power_off_work);
171 }
172
173 static int genpd_poweron(struct generic_pm_domain *genpd);
174
175 /**
176 * __genpd_poweron - Restore power to a given PM domain and its masters.
177 * @genpd: PM domain to power up.
178 *
179 * Restore power to @genpd and all of its masters so that it is possible to
180 * resume a device belonging to it.
181 */
182 static int __genpd_poweron(struct generic_pm_domain *genpd)
183 {
184 struct gpd_link *link;
185 int ret = 0;
186
187 if (genpd->status == GPD_STATE_ACTIVE
188 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
189 return 0;
190
191 /*
192 * The list is guaranteed not to change while the loop below is being
193 * executed, unless one of the masters' .power_on() callbacks fiddles
194 * with it.
195 */
196 list_for_each_entry(link, &genpd->slave_links, slave_node) {
197 genpd_sd_counter_inc(link->master);
198
199 ret = genpd_poweron(link->master);
200 if (ret) {
201 genpd_sd_counter_dec(link->master);
202 goto err;
203 }
204 }
205
206 ret = genpd_power_on(genpd, true);
207 if (ret)
208 goto err;
209
210 genpd->status = GPD_STATE_ACTIVE;
211 return 0;
212
213 err:
214 list_for_each_entry_continue_reverse(link,
215 &genpd->slave_links,
216 slave_node) {
217 genpd_sd_counter_dec(link->master);
218 genpd_queue_power_off_work(link->master);
219 }
220
221 return ret;
222 }
223
224 /**
225 * genpd_poweron - Restore power to a given PM domain and its masters.
226 * @genpd: PM domain to power up.
227 */
228 static int genpd_poweron(struct generic_pm_domain *genpd)
229 {
230 int ret;
231
232 mutex_lock(&genpd->lock);
233 ret = __genpd_poweron(genpd);
234 mutex_unlock(&genpd->lock);
235 return ret;
236 }
237
238 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
239 {
240 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
241 }
242
243 static int genpd_restore_dev(struct generic_pm_domain *genpd,
244 struct device *dev)
245 {
246 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
247 }
248
249 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
250 unsigned long val, void *ptr)
251 {
252 struct generic_pm_domain_data *gpd_data;
253 struct device *dev;
254
255 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
256 dev = gpd_data->base.dev;
257
258 for (;;) {
259 struct generic_pm_domain *genpd;
260 struct pm_domain_data *pdd;
261
262 spin_lock_irq(&dev->power.lock);
263
264 pdd = dev->power.subsys_data ?
265 dev->power.subsys_data->domain_data : NULL;
266 if (pdd && pdd->dev) {
267 to_gpd_data(pdd)->td.constraint_changed = true;
268 genpd = dev_to_genpd(dev);
269 } else {
270 genpd = ERR_PTR(-ENODATA);
271 }
272
273 spin_unlock_irq(&dev->power.lock);
274
275 if (!IS_ERR(genpd)) {
276 mutex_lock(&genpd->lock);
277 genpd->max_off_time_changed = true;
278 mutex_unlock(&genpd->lock);
279 }
280
281 dev = dev->parent;
282 if (!dev || dev->power.ignore_children)
283 break;
284 }
285
286 return NOTIFY_DONE;
287 }
288
289 /**
290 * genpd_poweroff - Remove power from a given PM domain.
291 * @genpd: PM domain to power down.
292 * @is_async: PM domain is powered down from a scheduled work
293 *
294 * If all of the @genpd's devices have been suspended and all of its subdomains
295 * have been powered down, remove power from @genpd.
296 */
297 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
298 {
299 struct pm_domain_data *pdd;
300 struct gpd_link *link;
301 unsigned int not_suspended = 0;
302
303 /*
304 * Do not try to power off the domain in the following situations:
305 * (1) The domain is already in the "power off" state.
306 * (2) System suspend is in progress.
307 */
308 if (genpd->status == GPD_STATE_POWER_OFF
309 || genpd->prepared_count > 0)
310 return 0;
311
312 if (atomic_read(&genpd->sd_count) > 0)
313 return -EBUSY;
314
315 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
316 enum pm_qos_flags_status stat;
317
318 stat = dev_pm_qos_flags(pdd->dev,
319 PM_QOS_FLAG_NO_POWER_OFF
320 | PM_QOS_FLAG_REMOTE_WAKEUP);
321 if (stat > PM_QOS_FLAGS_NONE)
322 return -EBUSY;
323
324 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
325 || pdd->dev->power.irq_safe))
326 not_suspended++;
327 }
328
329 if (not_suspended > 1 || (not_suspended == 1 && is_async))
330 return -EBUSY;
331
332 if (genpd->gov && genpd->gov->power_down_ok) {
333 if (!genpd->gov->power_down_ok(&genpd->domain))
334 return -EAGAIN;
335 }
336
337 if (genpd->power_off) {
338 int ret;
339
340 if (atomic_read(&genpd->sd_count) > 0)
341 return -EBUSY;
342
343 /*
344 * If sd_count > 0 at this point, one of the subdomains hasn't
345 * managed to call genpd_poweron() for the master yet after
346 * incrementing it. In that case genpd_poweron() will wait
347 * for us to drop the lock, so we can call .power_off() and let
348 * the genpd_poweron() restore power for us (this shouldn't
349 * happen very often).
350 */
351 ret = genpd_power_off(genpd, true);
352 if (ret)
353 return ret;
354 }
355
356 genpd->status = GPD_STATE_POWER_OFF;
357
358 list_for_each_entry(link, &genpd->slave_links, slave_node) {
359 genpd_sd_counter_dec(link->master);
360 genpd_queue_power_off_work(link->master);
361 }
362
363 return 0;
364 }
365
366 /**
367 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
368 * @work: Work structure used for scheduling the execution of this function.
369 */
370 static void genpd_power_off_work_fn(struct work_struct *work)
371 {
372 struct generic_pm_domain *genpd;
373
374 genpd = container_of(work, struct generic_pm_domain, power_off_work);
375
376 mutex_lock(&genpd->lock);
377 genpd_poweroff(genpd, true);
378 mutex_unlock(&genpd->lock);
379 }
380
381 /**
382 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
383 * @dev: Device to suspend.
384 *
385 * Carry out a runtime suspend of a device under the assumption that its
386 * pm_domain field points to the domain member of an object of type
387 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
388 */
389 static int pm_genpd_runtime_suspend(struct device *dev)
390 {
391 struct generic_pm_domain *genpd;
392 bool (*stop_ok)(struct device *__dev);
393 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
394 ktime_t time_start;
395 s64 elapsed_ns;
396 int ret;
397
398 dev_dbg(dev, "%s()\n", __func__);
399
400 genpd = dev_to_genpd(dev);
401 if (IS_ERR(genpd))
402 return -EINVAL;
403
404 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
405 if (stop_ok && !stop_ok(dev))
406 return -EBUSY;
407
408 /* Measure suspend latency. */
409 time_start = ktime_get();
410
411 ret = genpd_save_dev(genpd, dev);
412 if (ret)
413 return ret;
414
415 ret = genpd_stop_dev(genpd, dev);
416 if (ret) {
417 genpd_restore_dev(genpd, dev);
418 return ret;
419 }
420
421 /* Update suspend latency value if the measured time exceeds it. */
422 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
423 if (elapsed_ns > td->suspend_latency_ns) {
424 td->suspend_latency_ns = elapsed_ns;
425 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
426 elapsed_ns);
427 genpd->max_off_time_changed = true;
428 td->constraint_changed = true;
429 }
430
431 /*
432 * If power.irq_safe is set, this routine will be run with interrupts
433 * off, so it can't use mutexes.
434 */
435 if (dev->power.irq_safe)
436 return 0;
437
438 mutex_lock(&genpd->lock);
439 genpd_poweroff(genpd, false);
440 mutex_unlock(&genpd->lock);
441
442 return 0;
443 }
444
445 /**
446 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
447 * @dev: Device to resume.
448 *
449 * Carry out a runtime resume of a device under the assumption that its
450 * pm_domain field points to the domain member of an object of type
451 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
452 */
453 static int pm_genpd_runtime_resume(struct device *dev)
454 {
455 struct generic_pm_domain *genpd;
456 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
457 ktime_t time_start;
458 s64 elapsed_ns;
459 int ret;
460 bool timed = true;
461
462 dev_dbg(dev, "%s()\n", __func__);
463
464 genpd = dev_to_genpd(dev);
465 if (IS_ERR(genpd))
466 return -EINVAL;
467
468 /* If power.irq_safe, the PM domain is never powered off. */
469 if (dev->power.irq_safe) {
470 timed = false;
471 goto out;
472 }
473
474 mutex_lock(&genpd->lock);
475 ret = __genpd_poweron(genpd);
476 mutex_unlock(&genpd->lock);
477
478 if (ret)
479 return ret;
480
481 out:
482 /* Measure resume latency. */
483 if (timed)
484 time_start = ktime_get();
485
486 genpd_start_dev(genpd, dev);
487 genpd_restore_dev(genpd, dev);
488
489 /* Update resume latency value if the measured time exceeds it. */
490 if (timed) {
491 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
492 if (elapsed_ns > td->resume_latency_ns) {
493 td->resume_latency_ns = elapsed_ns;
494 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
495 elapsed_ns);
496 genpd->max_off_time_changed = true;
497 td->constraint_changed = true;
498 }
499 }
500
501 return 0;
502 }
503
504 static bool pd_ignore_unused;
505 static int __init pd_ignore_unused_setup(char *__unused)
506 {
507 pd_ignore_unused = true;
508 return 1;
509 }
510 __setup("pd_ignore_unused", pd_ignore_unused_setup);
511
512 /**
513 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
514 */
515 static int __init genpd_poweroff_unused(void)
516 {
517 struct generic_pm_domain *genpd;
518
519 if (pd_ignore_unused) {
520 pr_warn("genpd: Not disabling unused power domains\n");
521 return 0;
522 }
523
524 mutex_lock(&gpd_list_lock);
525
526 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
527 genpd_queue_power_off_work(genpd);
528
529 mutex_unlock(&gpd_list_lock);
530
531 return 0;
532 }
533 late_initcall(genpd_poweroff_unused);
534
535 #ifdef CONFIG_PM_SLEEP
536
537 /**
538 * pm_genpd_present - Check if the given PM domain has been initialized.
539 * @genpd: PM domain to check.
540 */
541 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
542 {
543 const struct generic_pm_domain *gpd;
544
545 if (IS_ERR_OR_NULL(genpd))
546 return false;
547
548 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
549 if (gpd == genpd)
550 return true;
551
552 return false;
553 }
554
555 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
556 struct device *dev)
557 {
558 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
559 }
560
561 /**
562 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
563 * @genpd: PM domain to power off, if possible.
564 * @timed: True if latency measurements are allowed.
565 *
566 * Check if the given PM domain can be powered off (during system suspend or
567 * hibernation) and do that if so. Also, in that case propagate to its masters.
568 *
569 * This function is only called in "noirq" and "syscore" stages of system power
570 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
571 * executed sequentially, so it is guaranteed that it will never run twice in
572 * parallel).
573 */
574 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
575 bool timed)
576 {
577 struct gpd_link *link;
578
579 if (genpd->status == GPD_STATE_POWER_OFF)
580 return;
581
582 if (genpd->suspended_count != genpd->device_count
583 || atomic_read(&genpd->sd_count) > 0)
584 return;
585
586 genpd_power_off(genpd, timed);
587
588 genpd->status = GPD_STATE_POWER_OFF;
589
590 list_for_each_entry(link, &genpd->slave_links, slave_node) {
591 genpd_sd_counter_dec(link->master);
592 pm_genpd_sync_poweroff(link->master, timed);
593 }
594 }
595
596 /**
597 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
598 * @genpd: PM domain to power on.
599 * @timed: True if latency measurements are allowed.
600 *
601 * This function is only called in "noirq" and "syscore" stages of system power
602 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
603 * executed sequentially, so it is guaranteed that it will never run twice in
604 * parallel).
605 */
606 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
607 bool timed)
608 {
609 struct gpd_link *link;
610
611 if (genpd->status == GPD_STATE_ACTIVE)
612 return;
613
614 list_for_each_entry(link, &genpd->slave_links, slave_node) {
615 pm_genpd_sync_poweron(link->master, timed);
616 genpd_sd_counter_inc(link->master);
617 }
618
619 genpd_power_on(genpd, timed);
620
621 genpd->status = GPD_STATE_ACTIVE;
622 }
623
624 /**
625 * resume_needed - Check whether to resume a device before system suspend.
626 * @dev: Device to check.
627 * @genpd: PM domain the device belongs to.
628 *
629 * There are two cases in which a device that can wake up the system from sleep
630 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
631 * to wake up the system and it has to remain active for this purpose while the
632 * system is in the sleep state and (2) if the device is not enabled to wake up
633 * the system from sleep states and it generally doesn't generate wakeup signals
634 * by itself (those signals are generated on its behalf by other parts of the
635 * system). In the latter case it may be necessary to reconfigure the device's
636 * wakeup settings during system suspend, because it may have been set up to
637 * signal remote wakeup from the system's working state as needed by runtime PM.
638 * Return 'true' in either of the above cases.
639 */
640 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
641 {
642 bool active_wakeup;
643
644 if (!device_can_wakeup(dev))
645 return false;
646
647 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
648 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
649 }
650
651 /**
652 * pm_genpd_prepare - Start power transition of a device in a PM domain.
653 * @dev: Device to start the transition of.
654 *
655 * Start a power transition of a device (during a system-wide power transition)
656 * under the assumption that its pm_domain field points to the domain member of
657 * an object of type struct generic_pm_domain representing a PM domain
658 * consisting of I/O devices.
659 */
660 static int pm_genpd_prepare(struct device *dev)
661 {
662 struct generic_pm_domain *genpd;
663 int ret;
664
665 dev_dbg(dev, "%s()\n", __func__);
666
667 genpd = dev_to_genpd(dev);
668 if (IS_ERR(genpd))
669 return -EINVAL;
670
671 /*
672 * If a wakeup request is pending for the device, it should be woken up
673 * at this point and a system wakeup event should be reported if it's
674 * set up to wake up the system from sleep states.
675 */
676 pm_runtime_get_noresume(dev);
677 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
678 pm_wakeup_event(dev, 0);
679
680 if (pm_wakeup_pending()) {
681 pm_runtime_put(dev);
682 return -EBUSY;
683 }
684
685 if (resume_needed(dev, genpd))
686 pm_runtime_resume(dev);
687
688 mutex_lock(&genpd->lock);
689
690 if (genpd->prepared_count++ == 0) {
691 genpd->suspended_count = 0;
692 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
693 }
694
695 mutex_unlock(&genpd->lock);
696
697 if (genpd->suspend_power_off) {
698 pm_runtime_put_noidle(dev);
699 return 0;
700 }
701
702 /*
703 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
704 * so genpd_poweron() will return immediately, but if the device
705 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
706 * to make it operational.
707 */
708 pm_runtime_resume(dev);
709 __pm_runtime_disable(dev, false);
710
711 ret = pm_generic_prepare(dev);
712 if (ret) {
713 mutex_lock(&genpd->lock);
714
715 if (--genpd->prepared_count == 0)
716 genpd->suspend_power_off = false;
717
718 mutex_unlock(&genpd->lock);
719 pm_runtime_enable(dev);
720 }
721
722 pm_runtime_put(dev);
723 return ret;
724 }
725
726 /**
727 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
728 * @dev: Device to suspend.
729 *
730 * Suspend a device under the assumption that its pm_domain field points to the
731 * domain member of an object of type struct generic_pm_domain representing
732 * a PM domain consisting of I/O devices.
733 */
734 static int pm_genpd_suspend(struct device *dev)
735 {
736 struct generic_pm_domain *genpd;
737
738 dev_dbg(dev, "%s()\n", __func__);
739
740 genpd = dev_to_genpd(dev);
741 if (IS_ERR(genpd))
742 return -EINVAL;
743
744 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
745 }
746
747 /**
748 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
749 * @dev: Device to suspend.
750 *
751 * Carry out a late suspend of a device under the assumption that its
752 * pm_domain field points to the domain member of an object of type
753 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
754 */
755 static int pm_genpd_suspend_late(struct device *dev)
756 {
757 struct generic_pm_domain *genpd;
758
759 dev_dbg(dev, "%s()\n", __func__);
760
761 genpd = dev_to_genpd(dev);
762 if (IS_ERR(genpd))
763 return -EINVAL;
764
765 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
766 }
767
768 /**
769 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
770 * @dev: Device to suspend.
771 *
772 * Stop the device and remove power from the domain if all devices in it have
773 * been stopped.
774 */
775 static int pm_genpd_suspend_noirq(struct device *dev)
776 {
777 struct generic_pm_domain *genpd;
778
779 dev_dbg(dev, "%s()\n", __func__);
780
781 genpd = dev_to_genpd(dev);
782 if (IS_ERR(genpd))
783 return -EINVAL;
784
785 if (genpd->suspend_power_off
786 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
787 return 0;
788
789 genpd_stop_dev(genpd, dev);
790
791 /*
792 * Since all of the "noirq" callbacks are executed sequentially, it is
793 * guaranteed that this function will never run twice in parallel for
794 * the same PM domain, so it is not necessary to use locking here.
795 */
796 genpd->suspended_count++;
797 pm_genpd_sync_poweroff(genpd, true);
798
799 return 0;
800 }
801
802 /**
803 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
804 * @dev: Device to resume.
805 *
806 * Restore power to the device's PM domain, if necessary, and start the device.
807 */
808 static int pm_genpd_resume_noirq(struct device *dev)
809 {
810 struct generic_pm_domain *genpd;
811
812 dev_dbg(dev, "%s()\n", __func__);
813
814 genpd = dev_to_genpd(dev);
815 if (IS_ERR(genpd))
816 return -EINVAL;
817
818 if (genpd->suspend_power_off
819 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
820 return 0;
821
822 /*
823 * Since all of the "noirq" callbacks are executed sequentially, it is
824 * guaranteed that this function will never run twice in parallel for
825 * the same PM domain, so it is not necessary to use locking here.
826 */
827 pm_genpd_sync_poweron(genpd, true);
828 genpd->suspended_count--;
829
830 return genpd_start_dev(genpd, dev);
831 }
832
833 /**
834 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
835 * @dev: Device to resume.
836 *
837 * Carry out an early resume of a device under the assumption that its
838 * pm_domain field points to the domain member of an object of type
839 * struct generic_pm_domain representing a power domain consisting of I/O
840 * devices.
841 */
842 static int pm_genpd_resume_early(struct device *dev)
843 {
844 struct generic_pm_domain *genpd;
845
846 dev_dbg(dev, "%s()\n", __func__);
847
848 genpd = dev_to_genpd(dev);
849 if (IS_ERR(genpd))
850 return -EINVAL;
851
852 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
853 }
854
855 /**
856 * pm_genpd_resume - Resume of device in an I/O PM domain.
857 * @dev: Device to resume.
858 *
859 * Resume a device under the assumption that its pm_domain field points to the
860 * domain member of an object of type struct generic_pm_domain representing
861 * a power domain consisting of I/O devices.
862 */
863 static int pm_genpd_resume(struct device *dev)
864 {
865 struct generic_pm_domain *genpd;
866
867 dev_dbg(dev, "%s()\n", __func__);
868
869 genpd = dev_to_genpd(dev);
870 if (IS_ERR(genpd))
871 return -EINVAL;
872
873 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
874 }
875
876 /**
877 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
878 * @dev: Device to freeze.
879 *
880 * Freeze a device under the assumption that its pm_domain field points to the
881 * domain member of an object of type struct generic_pm_domain representing
882 * a power domain consisting of I/O devices.
883 */
884 static int pm_genpd_freeze(struct device *dev)
885 {
886 struct generic_pm_domain *genpd;
887
888 dev_dbg(dev, "%s()\n", __func__);
889
890 genpd = dev_to_genpd(dev);
891 if (IS_ERR(genpd))
892 return -EINVAL;
893
894 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
895 }
896
897 /**
898 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
899 * @dev: Device to freeze.
900 *
901 * Carry out a late freeze of a device under the assumption that its
902 * pm_domain field points to the domain member of an object of type
903 * struct generic_pm_domain representing a power domain consisting of I/O
904 * devices.
905 */
906 static int pm_genpd_freeze_late(struct device *dev)
907 {
908 struct generic_pm_domain *genpd;
909
910 dev_dbg(dev, "%s()\n", __func__);
911
912 genpd = dev_to_genpd(dev);
913 if (IS_ERR(genpd))
914 return -EINVAL;
915
916 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
917 }
918
919 /**
920 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
921 * @dev: Device to freeze.
922 *
923 * Carry out a late freeze of a device under the assumption that its
924 * pm_domain field points to the domain member of an object of type
925 * struct generic_pm_domain representing a power domain consisting of I/O
926 * devices.
927 */
928 static int pm_genpd_freeze_noirq(struct device *dev)
929 {
930 struct generic_pm_domain *genpd;
931
932 dev_dbg(dev, "%s()\n", __func__);
933
934 genpd = dev_to_genpd(dev);
935 if (IS_ERR(genpd))
936 return -EINVAL;
937
938 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
939 }
940
941 /**
942 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
943 * @dev: Device to thaw.
944 *
945 * Start the device, unless power has been removed from the domain already
946 * before the system transition.
947 */
948 static int pm_genpd_thaw_noirq(struct device *dev)
949 {
950 struct generic_pm_domain *genpd;
951
952 dev_dbg(dev, "%s()\n", __func__);
953
954 genpd = dev_to_genpd(dev);
955 if (IS_ERR(genpd))
956 return -EINVAL;
957
958 return genpd->suspend_power_off ?
959 0 : genpd_start_dev(genpd, dev);
960 }
961
962 /**
963 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
964 * @dev: Device to thaw.
965 *
966 * Carry out an early thaw of a device under the assumption that its
967 * pm_domain field points to the domain member of an object of type
968 * struct generic_pm_domain representing a power domain consisting of I/O
969 * devices.
970 */
971 static int pm_genpd_thaw_early(struct device *dev)
972 {
973 struct generic_pm_domain *genpd;
974
975 dev_dbg(dev, "%s()\n", __func__);
976
977 genpd = dev_to_genpd(dev);
978 if (IS_ERR(genpd))
979 return -EINVAL;
980
981 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
982 }
983
984 /**
985 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
986 * @dev: Device to thaw.
987 *
988 * Thaw a device under the assumption that its pm_domain field points to the
989 * domain member of an object of type struct generic_pm_domain representing
990 * a power domain consisting of I/O devices.
991 */
992 static int pm_genpd_thaw(struct device *dev)
993 {
994 struct generic_pm_domain *genpd;
995
996 dev_dbg(dev, "%s()\n", __func__);
997
998 genpd = dev_to_genpd(dev);
999 if (IS_ERR(genpd))
1000 return -EINVAL;
1001
1002 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1003 }
1004
1005 /**
1006 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1007 * @dev: Device to resume.
1008 *
1009 * Make sure the domain will be in the same power state as before the
1010 * hibernation the system is resuming from and start the device if necessary.
1011 */
1012 static int pm_genpd_restore_noirq(struct device *dev)
1013 {
1014 struct generic_pm_domain *genpd;
1015
1016 dev_dbg(dev, "%s()\n", __func__);
1017
1018 genpd = dev_to_genpd(dev);
1019 if (IS_ERR(genpd))
1020 return -EINVAL;
1021
1022 /*
1023 * Since all of the "noirq" callbacks are executed sequentially, it is
1024 * guaranteed that this function will never run twice in parallel for
1025 * the same PM domain, so it is not necessary to use locking here.
1026 *
1027 * At this point suspended_count == 0 means we are being run for the
1028 * first time for the given domain in the present cycle.
1029 */
1030 if (genpd->suspended_count++ == 0) {
1031 /*
1032 * The boot kernel might put the domain into arbitrary state,
1033 * so make it appear as powered off to pm_genpd_sync_poweron(),
1034 * so that it tries to power it on in case it was really off.
1035 */
1036 genpd->status = GPD_STATE_POWER_OFF;
1037 if (genpd->suspend_power_off) {
1038 /*
1039 * If the domain was off before the hibernation, make
1040 * sure it will be off going forward.
1041 */
1042 genpd_power_off(genpd, true);
1043
1044 return 0;
1045 }
1046 }
1047
1048 if (genpd->suspend_power_off)
1049 return 0;
1050
1051 pm_genpd_sync_poweron(genpd, true);
1052
1053 return genpd_start_dev(genpd, dev);
1054 }
1055
1056 /**
1057 * pm_genpd_complete - Complete power transition of a device in a power domain.
1058 * @dev: Device to complete the transition of.
1059 *
1060 * Complete a power transition of a device (during a system-wide power
1061 * transition) under the assumption that its pm_domain field points to the
1062 * domain member of an object of type struct generic_pm_domain representing
1063 * a power domain consisting of I/O devices.
1064 */
1065 static void pm_genpd_complete(struct device *dev)
1066 {
1067 struct generic_pm_domain *genpd;
1068 bool run_complete;
1069
1070 dev_dbg(dev, "%s()\n", __func__);
1071
1072 genpd = dev_to_genpd(dev);
1073 if (IS_ERR(genpd))
1074 return;
1075
1076 mutex_lock(&genpd->lock);
1077
1078 run_complete = !genpd->suspend_power_off;
1079 if (--genpd->prepared_count == 0)
1080 genpd->suspend_power_off = false;
1081
1082 mutex_unlock(&genpd->lock);
1083
1084 if (run_complete) {
1085 pm_generic_complete(dev);
1086 pm_runtime_set_active(dev);
1087 pm_runtime_enable(dev);
1088 pm_request_idle(dev);
1089 }
1090 }
1091
1092 /**
1093 * genpd_syscore_switch - Switch power during system core suspend or resume.
1094 * @dev: Device that normally is marked as "always on" to switch power for.
1095 *
1096 * This routine may only be called during the system core (syscore) suspend or
1097 * resume phase for devices whose "always on" flags are set.
1098 */
1099 static void genpd_syscore_switch(struct device *dev, bool suspend)
1100 {
1101 struct generic_pm_domain *genpd;
1102
1103 genpd = dev_to_genpd(dev);
1104 if (!pm_genpd_present(genpd))
1105 return;
1106
1107 if (suspend) {
1108 genpd->suspended_count++;
1109 pm_genpd_sync_poweroff(genpd, false);
1110 } else {
1111 pm_genpd_sync_poweron(genpd, false);
1112 genpd->suspended_count--;
1113 }
1114 }
1115
1116 void pm_genpd_syscore_poweroff(struct device *dev)
1117 {
1118 genpd_syscore_switch(dev, true);
1119 }
1120 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1121
1122 void pm_genpd_syscore_poweron(struct device *dev)
1123 {
1124 genpd_syscore_switch(dev, false);
1125 }
1126 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1127
1128 #else /* !CONFIG_PM_SLEEP */
1129
1130 #define pm_genpd_prepare NULL
1131 #define pm_genpd_suspend NULL
1132 #define pm_genpd_suspend_late NULL
1133 #define pm_genpd_suspend_noirq NULL
1134 #define pm_genpd_resume_early NULL
1135 #define pm_genpd_resume_noirq NULL
1136 #define pm_genpd_resume NULL
1137 #define pm_genpd_freeze NULL
1138 #define pm_genpd_freeze_late NULL
1139 #define pm_genpd_freeze_noirq NULL
1140 #define pm_genpd_thaw_early NULL
1141 #define pm_genpd_thaw_noirq NULL
1142 #define pm_genpd_thaw NULL
1143 #define pm_genpd_restore_noirq NULL
1144 #define pm_genpd_complete NULL
1145
1146 #endif /* CONFIG_PM_SLEEP */
1147
1148 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1149 struct generic_pm_domain *genpd,
1150 struct gpd_timing_data *td)
1151 {
1152 struct generic_pm_domain_data *gpd_data;
1153 int ret;
1154
1155 ret = dev_pm_get_subsys_data(dev);
1156 if (ret)
1157 return ERR_PTR(ret);
1158
1159 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1160 if (!gpd_data) {
1161 ret = -ENOMEM;
1162 goto err_put;
1163 }
1164
1165 if (td)
1166 gpd_data->td = *td;
1167
1168 gpd_data->base.dev = dev;
1169 gpd_data->td.constraint_changed = true;
1170 gpd_data->td.effective_constraint_ns = -1;
1171 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1172
1173 spin_lock_irq(&dev->power.lock);
1174
1175 if (dev->power.subsys_data->domain_data) {
1176 ret = -EINVAL;
1177 goto err_free;
1178 }
1179
1180 dev->power.subsys_data->domain_data = &gpd_data->base;
1181 dev->pm_domain = &genpd->domain;
1182
1183 spin_unlock_irq(&dev->power.lock);
1184
1185 return gpd_data;
1186
1187 err_free:
1188 spin_unlock_irq(&dev->power.lock);
1189 kfree(gpd_data);
1190 err_put:
1191 dev_pm_put_subsys_data(dev);
1192 return ERR_PTR(ret);
1193 }
1194
1195 static void genpd_free_dev_data(struct device *dev,
1196 struct generic_pm_domain_data *gpd_data)
1197 {
1198 spin_lock_irq(&dev->power.lock);
1199
1200 dev->pm_domain = NULL;
1201 dev->power.subsys_data->domain_data = NULL;
1202
1203 spin_unlock_irq(&dev->power.lock);
1204
1205 kfree(gpd_data);
1206 dev_pm_put_subsys_data(dev);
1207 }
1208
1209 /**
1210 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1211 * @genpd: PM domain to add the device to.
1212 * @dev: Device to be added.
1213 * @td: Set of PM QoS timing parameters to attach to the device.
1214 */
1215 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1216 struct gpd_timing_data *td)
1217 {
1218 struct generic_pm_domain_data *gpd_data;
1219 int ret = 0;
1220
1221 dev_dbg(dev, "%s()\n", __func__);
1222
1223 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1224 return -EINVAL;
1225
1226 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1227 if (IS_ERR(gpd_data))
1228 return PTR_ERR(gpd_data);
1229
1230 mutex_lock(&genpd->lock);
1231
1232 if (genpd->prepared_count > 0) {
1233 ret = -EAGAIN;
1234 goto out;
1235 }
1236
1237 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1238 if (ret)
1239 goto out;
1240
1241 genpd->device_count++;
1242 genpd->max_off_time_changed = true;
1243
1244 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1245
1246 out:
1247 mutex_unlock(&genpd->lock);
1248
1249 if (ret)
1250 genpd_free_dev_data(dev, gpd_data);
1251 else
1252 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1253
1254 return ret;
1255 }
1256
1257 /**
1258 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1259 * @genpd: PM domain to remove the device from.
1260 * @dev: Device to be removed.
1261 */
1262 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1263 struct device *dev)
1264 {
1265 struct generic_pm_domain_data *gpd_data;
1266 struct pm_domain_data *pdd;
1267 int ret = 0;
1268
1269 dev_dbg(dev, "%s()\n", __func__);
1270
1271 if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1272 return -EINVAL;
1273
1274 /* The above validation also means we have existing domain_data. */
1275 pdd = dev->power.subsys_data->domain_data;
1276 gpd_data = to_gpd_data(pdd);
1277 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1278
1279 mutex_lock(&genpd->lock);
1280
1281 if (genpd->prepared_count > 0) {
1282 ret = -EAGAIN;
1283 goto out;
1284 }
1285
1286 genpd->device_count--;
1287 genpd->max_off_time_changed = true;
1288
1289 if (genpd->detach_dev)
1290 genpd->detach_dev(genpd, dev);
1291
1292 list_del_init(&pdd->list_node);
1293
1294 mutex_unlock(&genpd->lock);
1295
1296 genpd_free_dev_data(dev, gpd_data);
1297
1298 return 0;
1299
1300 out:
1301 mutex_unlock(&genpd->lock);
1302 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1303
1304 return ret;
1305 }
1306
1307 /**
1308 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1309 * @genpd: Master PM domain to add the subdomain to.
1310 * @subdomain: Subdomain to be added.
1311 */
1312 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1313 struct generic_pm_domain *subdomain)
1314 {
1315 struct gpd_link *link;
1316 int ret = 0;
1317
1318 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1319 || genpd == subdomain)
1320 return -EINVAL;
1321
1322 mutex_lock(&genpd->lock);
1323 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1324
1325 if (genpd->status == GPD_STATE_POWER_OFF
1326 && subdomain->status != GPD_STATE_POWER_OFF) {
1327 ret = -EINVAL;
1328 goto out;
1329 }
1330
1331 list_for_each_entry(link, &genpd->master_links, master_node) {
1332 if (link->slave == subdomain && link->master == genpd) {
1333 ret = -EINVAL;
1334 goto out;
1335 }
1336 }
1337
1338 link = kzalloc(sizeof(*link), GFP_KERNEL);
1339 if (!link) {
1340 ret = -ENOMEM;
1341 goto out;
1342 }
1343 link->master = genpd;
1344 list_add_tail(&link->master_node, &genpd->master_links);
1345 link->slave = subdomain;
1346 list_add_tail(&link->slave_node, &subdomain->slave_links);
1347 if (subdomain->status != GPD_STATE_POWER_OFF)
1348 genpd_sd_counter_inc(genpd);
1349
1350 out:
1351 mutex_unlock(&subdomain->lock);
1352 mutex_unlock(&genpd->lock);
1353
1354 return ret;
1355 }
1356 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1357
1358 /**
1359 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1360 * @genpd: Master PM domain to remove the subdomain from.
1361 * @subdomain: Subdomain to be removed.
1362 */
1363 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1364 struct generic_pm_domain *subdomain)
1365 {
1366 struct gpd_link *link;
1367 int ret = -EINVAL;
1368
1369 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1370 return -EINVAL;
1371
1372 mutex_lock(&genpd->lock);
1373
1374 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1375 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1376 subdomain->name);
1377 ret = -EBUSY;
1378 goto out;
1379 }
1380
1381 list_for_each_entry(link, &genpd->master_links, master_node) {
1382 if (link->slave != subdomain)
1383 continue;
1384
1385 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1386
1387 list_del(&link->master_node);
1388 list_del(&link->slave_node);
1389 kfree(link);
1390 if (subdomain->status != GPD_STATE_POWER_OFF)
1391 genpd_sd_counter_dec(genpd);
1392
1393 mutex_unlock(&subdomain->lock);
1394
1395 ret = 0;
1396 break;
1397 }
1398
1399 out:
1400 mutex_unlock(&genpd->lock);
1401
1402 return ret;
1403 }
1404 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1405
1406 /* Default device callbacks for generic PM domains. */
1407
1408 /**
1409 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1410 * @dev: Device to handle.
1411 */
1412 static int pm_genpd_default_save_state(struct device *dev)
1413 {
1414 int (*cb)(struct device *__dev);
1415
1416 if (dev->type && dev->type->pm)
1417 cb = dev->type->pm->runtime_suspend;
1418 else if (dev->class && dev->class->pm)
1419 cb = dev->class->pm->runtime_suspend;
1420 else if (dev->bus && dev->bus->pm)
1421 cb = dev->bus->pm->runtime_suspend;
1422 else
1423 cb = NULL;
1424
1425 if (!cb && dev->driver && dev->driver->pm)
1426 cb = dev->driver->pm->runtime_suspend;
1427
1428 return cb ? cb(dev) : 0;
1429 }
1430
1431 /**
1432 * pm_genpd_default_restore_state - Default PM domains "restore device state".
1433 * @dev: Device to handle.
1434 */
1435 static int pm_genpd_default_restore_state(struct device *dev)
1436 {
1437 int (*cb)(struct device *__dev);
1438
1439 if (dev->type && dev->type->pm)
1440 cb = dev->type->pm->runtime_resume;
1441 else if (dev->class && dev->class->pm)
1442 cb = dev->class->pm->runtime_resume;
1443 else if (dev->bus && dev->bus->pm)
1444 cb = dev->bus->pm->runtime_resume;
1445 else
1446 cb = NULL;
1447
1448 if (!cb && dev->driver && dev->driver->pm)
1449 cb = dev->driver->pm->runtime_resume;
1450
1451 return cb ? cb(dev) : 0;
1452 }
1453
1454 /**
1455 * pm_genpd_init - Initialize a generic I/O PM domain object.
1456 * @genpd: PM domain object to initialize.
1457 * @gov: PM domain governor to associate with the domain (may be NULL).
1458 * @is_off: Initial value of the domain's power_is_off field.
1459 */
1460 void pm_genpd_init(struct generic_pm_domain *genpd,
1461 struct dev_power_governor *gov, bool is_off)
1462 {
1463 if (IS_ERR_OR_NULL(genpd))
1464 return;
1465
1466 INIT_LIST_HEAD(&genpd->master_links);
1467 INIT_LIST_HEAD(&genpd->slave_links);
1468 INIT_LIST_HEAD(&genpd->dev_list);
1469 mutex_init(&genpd->lock);
1470 genpd->gov = gov;
1471 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1472 atomic_set(&genpd->sd_count, 0);
1473 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1474 genpd->device_count = 0;
1475 genpd->max_off_time_ns = -1;
1476 genpd->max_off_time_changed = true;
1477 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1478 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1479 genpd->domain.ops.prepare = pm_genpd_prepare;
1480 genpd->domain.ops.suspend = pm_genpd_suspend;
1481 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1482 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1483 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1484 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1485 genpd->domain.ops.resume = pm_genpd_resume;
1486 genpd->domain.ops.freeze = pm_genpd_freeze;
1487 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1488 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1489 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1490 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1491 genpd->domain.ops.thaw = pm_genpd_thaw;
1492 genpd->domain.ops.poweroff = pm_genpd_suspend;
1493 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1494 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1495 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1496 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1497 genpd->domain.ops.restore = pm_genpd_resume;
1498 genpd->domain.ops.complete = pm_genpd_complete;
1499 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1500 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1501
1502 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1503 genpd->dev_ops.stop = pm_clk_suspend;
1504 genpd->dev_ops.start = pm_clk_resume;
1505 }
1506
1507 mutex_lock(&gpd_list_lock);
1508 list_add(&genpd->gpd_list_node, &gpd_list);
1509 mutex_unlock(&gpd_list_lock);
1510 }
1511 EXPORT_SYMBOL_GPL(pm_genpd_init);
1512
1513 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1514 /*
1515 * Device Tree based PM domain providers.
1516 *
1517 * The code below implements generic device tree based PM domain providers that
1518 * bind device tree nodes with generic PM domains registered in the system.
1519 *
1520 * Any driver that registers generic PM domains and needs to support binding of
1521 * devices to these domains is supposed to register a PM domain provider, which
1522 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1523 *
1524 * Two simple mapping functions have been provided for convenience:
1525 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1526 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1527 * index.
1528 */
1529
1530 /**
1531 * struct of_genpd_provider - PM domain provider registration structure
1532 * @link: Entry in global list of PM domain providers
1533 * @node: Pointer to device tree node of PM domain provider
1534 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1535 * into a PM domain.
1536 * @data: context pointer to be passed into @xlate callback
1537 */
1538 struct of_genpd_provider {
1539 struct list_head link;
1540 struct device_node *node;
1541 genpd_xlate_t xlate;
1542 void *data;
1543 };
1544
1545 /* List of registered PM domain providers. */
1546 static LIST_HEAD(of_genpd_providers);
1547 /* Mutex to protect the list above. */
1548 static DEFINE_MUTEX(of_genpd_mutex);
1549
1550 /**
1551 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1552 * @genpdspec: OF phandle args to map into a PM domain
1553 * @data: xlate function private data - pointer to struct generic_pm_domain
1554 *
1555 * This is a generic xlate function that can be used to model PM domains that
1556 * have their own device tree nodes. The private data of xlate function needs
1557 * to be a valid pointer to struct generic_pm_domain.
1558 */
1559 struct generic_pm_domain *__of_genpd_xlate_simple(
1560 struct of_phandle_args *genpdspec,
1561 void *data)
1562 {
1563 if (genpdspec->args_count != 0)
1564 return ERR_PTR(-EINVAL);
1565 return data;
1566 }
1567 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1568
1569 /**
1570 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1571 * @genpdspec: OF phandle args to map into a PM domain
1572 * @data: xlate function private data - pointer to struct genpd_onecell_data
1573 *
1574 * This is a generic xlate function that can be used to model simple PM domain
1575 * controllers that have one device tree node and provide multiple PM domains.
1576 * A single cell is used as an index into an array of PM domains specified in
1577 * the genpd_onecell_data struct when registering the provider.
1578 */
1579 struct generic_pm_domain *__of_genpd_xlate_onecell(
1580 struct of_phandle_args *genpdspec,
1581 void *data)
1582 {
1583 struct genpd_onecell_data *genpd_data = data;
1584 unsigned int idx = genpdspec->args[0];
1585
1586 if (genpdspec->args_count != 1)
1587 return ERR_PTR(-EINVAL);
1588
1589 if (idx >= genpd_data->num_domains) {
1590 pr_err("%s: invalid domain index %u\n", __func__, idx);
1591 return ERR_PTR(-EINVAL);
1592 }
1593
1594 if (!genpd_data->domains[idx])
1595 return ERR_PTR(-ENOENT);
1596
1597 return genpd_data->domains[idx];
1598 }
1599 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1600
1601 /**
1602 * __of_genpd_add_provider() - Register a PM domain provider for a node
1603 * @np: Device node pointer associated with the PM domain provider.
1604 * @xlate: Callback for decoding PM domain from phandle arguments.
1605 * @data: Context pointer for @xlate callback.
1606 */
1607 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1608 void *data)
1609 {
1610 struct of_genpd_provider *cp;
1611
1612 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1613 if (!cp)
1614 return -ENOMEM;
1615
1616 cp->node = of_node_get(np);
1617 cp->data = data;
1618 cp->xlate = xlate;
1619
1620 mutex_lock(&of_genpd_mutex);
1621 list_add(&cp->link, &of_genpd_providers);
1622 mutex_unlock(&of_genpd_mutex);
1623 pr_debug("Added domain provider from %s\n", np->full_name);
1624
1625 return 0;
1626 }
1627 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
1628
1629 /**
1630 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1631 * @np: Device node pointer associated with the PM domain provider
1632 */
1633 void of_genpd_del_provider(struct device_node *np)
1634 {
1635 struct of_genpd_provider *cp;
1636
1637 mutex_lock(&of_genpd_mutex);
1638 list_for_each_entry(cp, &of_genpd_providers, link) {
1639 if (cp->node == np) {
1640 list_del(&cp->link);
1641 of_node_put(cp->node);
1642 kfree(cp);
1643 break;
1644 }
1645 }
1646 mutex_unlock(&of_genpd_mutex);
1647 }
1648 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1649
1650 /**
1651 * of_genpd_get_from_provider() - Look-up PM domain
1652 * @genpdspec: OF phandle args to use for look-up
1653 *
1654 * Looks for a PM domain provider under the node specified by @genpdspec and if
1655 * found, uses xlate function of the provider to map phandle args to a PM
1656 * domain.
1657 *
1658 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1659 * on failure.
1660 */
1661 struct generic_pm_domain *of_genpd_get_from_provider(
1662 struct of_phandle_args *genpdspec)
1663 {
1664 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1665 struct of_genpd_provider *provider;
1666
1667 mutex_lock(&of_genpd_mutex);
1668
1669 /* Check if we have such a provider in our array */
1670 list_for_each_entry(provider, &of_genpd_providers, link) {
1671 if (provider->node == genpdspec->np)
1672 genpd = provider->xlate(genpdspec, provider->data);
1673 if (!IS_ERR(genpd))
1674 break;
1675 }
1676
1677 mutex_unlock(&of_genpd_mutex);
1678
1679 return genpd;
1680 }
1681 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1682
1683 /**
1684 * genpd_dev_pm_detach - Detach a device from its PM domain.
1685 * @dev: Device to detach.
1686 * @power_off: Currently not used
1687 *
1688 * Try to locate a corresponding generic PM domain, which the device was
1689 * attached to previously. If such is found, the device is detached from it.
1690 */
1691 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1692 {
1693 struct generic_pm_domain *pd;
1694 unsigned int i;
1695 int ret = 0;
1696
1697 pd = pm_genpd_lookup_dev(dev);
1698 if (!pd)
1699 return;
1700
1701 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1702
1703 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1704 ret = pm_genpd_remove_device(pd, dev);
1705 if (ret != -EAGAIN)
1706 break;
1707
1708 mdelay(i);
1709 cond_resched();
1710 }
1711
1712 if (ret < 0) {
1713 dev_err(dev, "failed to remove from PM domain %s: %d",
1714 pd->name, ret);
1715 return;
1716 }
1717
1718 /* Check if PM domain can be powered off after removing this device. */
1719 genpd_queue_power_off_work(pd);
1720 }
1721
1722 static void genpd_dev_pm_sync(struct device *dev)
1723 {
1724 struct generic_pm_domain *pd;
1725
1726 pd = dev_to_genpd(dev);
1727 if (IS_ERR(pd))
1728 return;
1729
1730 genpd_queue_power_off_work(pd);
1731 }
1732
1733 /**
1734 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1735 * @dev: Device to attach.
1736 *
1737 * Parse device's OF node to find a PM domain specifier. If such is found,
1738 * attaches the device to retrieved pm_domain ops.
1739 *
1740 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1741 * backwards compatibility with existing DTBs.
1742 *
1743 * Returns 0 on successfully attached PM domain or negative error code. Note
1744 * that if a power-domain exists for the device, but it cannot be found or
1745 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1746 * probed and to re-try again later.
1747 */
1748 int genpd_dev_pm_attach(struct device *dev)
1749 {
1750 struct of_phandle_args pd_args;
1751 struct generic_pm_domain *pd;
1752 unsigned int i;
1753 int ret;
1754
1755 if (!dev->of_node)
1756 return -ENODEV;
1757
1758 if (dev->pm_domain)
1759 return -EEXIST;
1760
1761 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1762 "#power-domain-cells", 0, &pd_args);
1763 if (ret < 0) {
1764 if (ret != -ENOENT)
1765 return ret;
1766
1767 /*
1768 * Try legacy Samsung-specific bindings
1769 * (for backwards compatibility of DT ABI)
1770 */
1771 pd_args.args_count = 0;
1772 pd_args.np = of_parse_phandle(dev->of_node,
1773 "samsung,power-domain", 0);
1774 if (!pd_args.np)
1775 return -ENOENT;
1776 }
1777
1778 pd = of_genpd_get_from_provider(&pd_args);
1779 if (IS_ERR(pd)) {
1780 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1781 __func__, PTR_ERR(pd));
1782 of_node_put(dev->of_node);
1783 return -EPROBE_DEFER;
1784 }
1785
1786 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1787
1788 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1789 ret = pm_genpd_add_device(pd, dev);
1790 if (ret != -EAGAIN)
1791 break;
1792
1793 mdelay(i);
1794 cond_resched();
1795 }
1796
1797 if (ret < 0) {
1798 dev_err(dev, "failed to add to PM domain %s: %d",
1799 pd->name, ret);
1800 of_node_put(dev->of_node);
1801 goto out;
1802 }
1803
1804 dev->pm_domain->detach = genpd_dev_pm_detach;
1805 dev->pm_domain->sync = genpd_dev_pm_sync;
1806 ret = genpd_poweron(pd);
1807
1808 out:
1809 return ret ? -EPROBE_DEFER : 0;
1810 }
1811 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1812 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1813
1814
1815 /*** debugfs support ***/
1816
1817 #ifdef CONFIG_PM_ADVANCED_DEBUG
1818 #include <linux/pm.h>
1819 #include <linux/device.h>
1820 #include <linux/debugfs.h>
1821 #include <linux/seq_file.h>
1822 #include <linux/init.h>
1823 #include <linux/kobject.h>
1824 static struct dentry *pm_genpd_debugfs_dir;
1825
1826 /*
1827 * TODO: This function is a slightly modified version of rtpm_status_show
1828 * from sysfs.c, so generalize it.
1829 */
1830 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1831 {
1832 static const char * const status_lookup[] = {
1833 [RPM_ACTIVE] = "active",
1834 [RPM_RESUMING] = "resuming",
1835 [RPM_SUSPENDED] = "suspended",
1836 [RPM_SUSPENDING] = "suspending"
1837 };
1838 const char *p = "";
1839
1840 if (dev->power.runtime_error)
1841 p = "error";
1842 else if (dev->power.disable_depth)
1843 p = "unsupported";
1844 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1845 p = status_lookup[dev->power.runtime_status];
1846 else
1847 WARN_ON(1);
1848
1849 seq_puts(s, p);
1850 }
1851
1852 static int pm_genpd_summary_one(struct seq_file *s,
1853 struct generic_pm_domain *genpd)
1854 {
1855 static const char * const status_lookup[] = {
1856 [GPD_STATE_ACTIVE] = "on",
1857 [GPD_STATE_POWER_OFF] = "off"
1858 };
1859 struct pm_domain_data *pm_data;
1860 const char *kobj_path;
1861 struct gpd_link *link;
1862 int ret;
1863
1864 ret = mutex_lock_interruptible(&genpd->lock);
1865 if (ret)
1866 return -ERESTARTSYS;
1867
1868 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1869 goto exit;
1870 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
1871
1872 /*
1873 * Modifications on the list require holding locks on both
1874 * master and slave, so we are safe.
1875 * Also genpd->name is immutable.
1876 */
1877 list_for_each_entry(link, &genpd->master_links, master_node) {
1878 seq_printf(s, "%s", link->slave->name);
1879 if (!list_is_last(&link->master_node, &genpd->master_links))
1880 seq_puts(s, ", ");
1881 }
1882
1883 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1884 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1885 if (kobj_path == NULL)
1886 continue;
1887
1888 seq_printf(s, "\n %-50s ", kobj_path);
1889 rtpm_status_str(s, pm_data->dev);
1890 kfree(kobj_path);
1891 }
1892
1893 seq_puts(s, "\n");
1894 exit:
1895 mutex_unlock(&genpd->lock);
1896
1897 return 0;
1898 }
1899
1900 static int pm_genpd_summary_show(struct seq_file *s, void *data)
1901 {
1902 struct generic_pm_domain *genpd;
1903 int ret = 0;
1904
1905 seq_puts(s, "domain status slaves\n");
1906 seq_puts(s, " /device runtime status\n");
1907 seq_puts(s, "----------------------------------------------------------------------\n");
1908
1909 ret = mutex_lock_interruptible(&gpd_list_lock);
1910 if (ret)
1911 return -ERESTARTSYS;
1912
1913 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1914 ret = pm_genpd_summary_one(s, genpd);
1915 if (ret)
1916 break;
1917 }
1918 mutex_unlock(&gpd_list_lock);
1919
1920 return ret;
1921 }
1922
1923 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
1924 {
1925 return single_open(file, pm_genpd_summary_show, NULL);
1926 }
1927
1928 static const struct file_operations pm_genpd_summary_fops = {
1929 .open = pm_genpd_summary_open,
1930 .read = seq_read,
1931 .llseek = seq_lseek,
1932 .release = single_release,
1933 };
1934
1935 static int __init pm_genpd_debug_init(void)
1936 {
1937 struct dentry *d;
1938
1939 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
1940
1941 if (!pm_genpd_debugfs_dir)
1942 return -ENOMEM;
1943
1944 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
1945 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
1946 if (!d)
1947 return -ENOMEM;
1948
1949 return 0;
1950 }
1951 late_initcall(pm_genpd_debug_init);
1952
1953 static void __exit pm_genpd_debug_exit(void)
1954 {
1955 debugfs_remove_recursive(pm_genpd_debugfs_dir);
1956 }
1957 __exitcall(pm_genpd_debug_exit);
1958 #endif /* CONFIG_PM_ADVANCED_DEBUG */