]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/base/power/domain.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22
23 #include "power.h"
24
25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
26
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
28 ({ \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
31 \
32 __routine = genpd->dev_ops.callback; \
33 if (__routine) { \
34 __ret = __routine(dev); \
35 } \
36 __ret; \
37 })
38
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
41
42 struct genpd_lock_ops {
43 void (*lock)(struct generic_pm_domain *genpd);
44 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 void (*unlock)(struct generic_pm_domain *genpd);
47 };
48
49 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
50 {
51 mutex_lock(&genpd->mlock);
52 }
53
54 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
55 int depth)
56 {
57 mutex_lock_nested(&genpd->mlock, depth);
58 }
59
60 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
61 {
62 return mutex_lock_interruptible(&genpd->mlock);
63 }
64
65 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
66 {
67 return mutex_unlock(&genpd->mlock);
68 }
69
70 static const struct genpd_lock_ops genpd_mtx_ops = {
71 .lock = genpd_lock_mtx,
72 .lock_nested = genpd_lock_nested_mtx,
73 .lock_interruptible = genpd_lock_interruptible_mtx,
74 .unlock = genpd_unlock_mtx,
75 };
76
77 static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 __acquires(&genpd->slock)
79 {
80 unsigned long flags;
81
82 spin_lock_irqsave(&genpd->slock, flags);
83 genpd->lock_flags = flags;
84 }
85
86 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
87 int depth)
88 __acquires(&genpd->slock)
89 {
90 unsigned long flags;
91
92 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 genpd->lock_flags = flags;
94 }
95
96 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 __acquires(&genpd->slock)
98 {
99 unsigned long flags;
100
101 spin_lock_irqsave(&genpd->slock, flags);
102 genpd->lock_flags = flags;
103 return 0;
104 }
105
106 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 __releases(&genpd->slock)
108 {
109 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
110 }
111
112 static const struct genpd_lock_ops genpd_spin_ops = {
113 .lock = genpd_lock_spin,
114 .lock_nested = genpd_lock_nested_spin,
115 .lock_interruptible = genpd_lock_interruptible_spin,
116 .unlock = genpd_unlock_spin,
117 };
118
119 #define genpd_lock(p) p->lock_ops->lock(p)
120 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
121 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
122 #define genpd_unlock(p) p->lock_ops->unlock(p)
123
124 #define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
125 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
126 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
127
128 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
129 const struct generic_pm_domain *genpd)
130 {
131 bool ret;
132
133 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
134
135 /*
136 * Warn once if an IRQ safe device is attached to a no sleep domain, as
137 * to indicate a suboptimal configuration for PM. For an always on
138 * domain this isn't case, thus don't warn.
139 */
140 if (ret && !genpd_is_always_on(genpd))
141 dev_warn_once(dev, "PM domain %s will not be powered off\n",
142 genpd->name);
143
144 return ret;
145 }
146
147 /*
148 * Get the generic PM domain for a particular struct device.
149 * This validates the struct device pointer, the PM domain pointer,
150 * and checks that the PM domain pointer is a real generic PM domain.
151 * Any failure results in NULL being returned.
152 */
153 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
154 {
155 struct generic_pm_domain *genpd = NULL, *gpd;
156
157 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
158 return NULL;
159
160 mutex_lock(&gpd_list_lock);
161 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
162 if (&gpd->domain == dev->pm_domain) {
163 genpd = gpd;
164 break;
165 }
166 }
167 mutex_unlock(&gpd_list_lock);
168
169 return genpd;
170 }
171
172 /*
173 * This should only be used where we are certain that the pm_domain
174 * attached to the device is a genpd domain.
175 */
176 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
177 {
178 if (IS_ERR_OR_NULL(dev->pm_domain))
179 return ERR_PTR(-EINVAL);
180
181 return pd_to_genpd(dev->pm_domain);
182 }
183
184 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
185 struct device *dev)
186 {
187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
188 }
189
190 static int genpd_start_dev(const struct generic_pm_domain *genpd,
191 struct device *dev)
192 {
193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
194 }
195
196 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
197 {
198 bool ret = false;
199
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
202
203 return ret;
204 }
205
206 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
207 {
208 atomic_inc(&genpd->sd_count);
209 smp_mb__after_atomic();
210 }
211
212 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
213 {
214 unsigned int state_idx = genpd->state_idx;
215 ktime_t time_start;
216 s64 elapsed_ns;
217 int ret;
218
219 if (!genpd->power_on)
220 return 0;
221
222 if (!timed)
223 return genpd->power_on(genpd);
224
225 time_start = ktime_get();
226 ret = genpd->power_on(genpd);
227 if (ret)
228 return ret;
229
230 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
231 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
232 return ret;
233
234 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
235 genpd->max_off_time_changed = true;
236 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
237 genpd->name, "on", elapsed_ns);
238
239 return ret;
240 }
241
242 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
243 {
244 unsigned int state_idx = genpd->state_idx;
245 ktime_t time_start;
246 s64 elapsed_ns;
247 int ret;
248
249 if (!genpd->power_off)
250 return 0;
251
252 if (!timed)
253 return genpd->power_off(genpd);
254
255 time_start = ktime_get();
256 ret = genpd->power_off(genpd);
257 if (ret == -EBUSY)
258 return ret;
259
260 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
261 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
262 return ret;
263
264 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
265 genpd->max_off_time_changed = true;
266 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
267 genpd->name, "off", elapsed_ns);
268
269 return ret;
270 }
271
272 /**
273 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
274 * @genpd: PM domain to power off.
275 *
276 * Queue up the execution of genpd_power_off() unless it's already been done
277 * before.
278 */
279 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
280 {
281 queue_work(pm_wq, &genpd->power_off_work);
282 }
283
284 /**
285 * genpd_power_off - Remove power from a given PM domain.
286 * @genpd: PM domain to power down.
287 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
288 * RPM status of the releated device is in an intermediate state, not yet turned
289 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
290 * be RPM_SUSPENDED, while it tries to power off the PM domain.
291 *
292 * If all of the @genpd's devices have been suspended and all of its subdomains
293 * have been powered down, remove power from @genpd.
294 */
295 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
296 unsigned int depth)
297 {
298 struct pm_domain_data *pdd;
299 struct gpd_link *link;
300 unsigned int not_suspended = 0;
301
302 /*
303 * Do not try to power off the domain in the following situations:
304 * (1) The domain is already in the "power off" state.
305 * (2) System suspend is in progress.
306 */
307 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
308 return 0;
309
310 /*
311 * Abort power off for the PM domain in the following situations:
312 * (1) The domain is configured as always on.
313 * (2) When the domain has a subdomain being powered on.
314 */
315 if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
316 return -EBUSY;
317
318 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
319 enum pm_qos_flags_status stat;
320
321 stat = dev_pm_qos_flags(pdd->dev,
322 PM_QOS_FLAG_NO_POWER_OFF
323 | PM_QOS_FLAG_REMOTE_WAKEUP);
324 if (stat > PM_QOS_FLAGS_NONE)
325 return -EBUSY;
326
327 /*
328 * Do not allow PM domain to be powered off, when an IRQ safe
329 * device is part of a non-IRQ safe domain.
330 */
331 if (!pm_runtime_suspended(pdd->dev) ||
332 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
333 not_suspended++;
334 }
335
336 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
337 return -EBUSY;
338
339 if (genpd->gov && genpd->gov->power_down_ok) {
340 if (!genpd->gov->power_down_ok(&genpd->domain))
341 return -EAGAIN;
342 }
343
344 if (genpd->power_off) {
345 int ret;
346
347 if (atomic_read(&genpd->sd_count) > 0)
348 return -EBUSY;
349
350 /*
351 * If sd_count > 0 at this point, one of the subdomains hasn't
352 * managed to call genpd_power_on() for the master yet after
353 * incrementing it. In that case genpd_power_on() will wait
354 * for us to drop the lock, so we can call .power_off() and let
355 * the genpd_power_on() restore power for us (this shouldn't
356 * happen very often).
357 */
358 ret = _genpd_power_off(genpd, true);
359 if (ret)
360 return ret;
361 }
362
363 genpd->status = GPD_STATE_POWER_OFF;
364
365 list_for_each_entry(link, &genpd->slave_links, slave_node) {
366 genpd_sd_counter_dec(link->master);
367 genpd_lock_nested(link->master, depth + 1);
368 genpd_power_off(link->master, false, depth + 1);
369 genpd_unlock(link->master);
370 }
371
372 return 0;
373 }
374
375 /**
376 * genpd_power_on - Restore power to a given PM domain and its masters.
377 * @genpd: PM domain to power up.
378 * @depth: nesting count for lockdep.
379 *
380 * Restore power to @genpd and all of its masters so that it is possible to
381 * resume a device belonging to it.
382 */
383 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
384 {
385 struct gpd_link *link;
386 int ret = 0;
387
388 if (genpd_status_on(genpd))
389 return 0;
390
391 /*
392 * The list is guaranteed not to change while the loop below is being
393 * executed, unless one of the masters' .power_on() callbacks fiddles
394 * with it.
395 */
396 list_for_each_entry(link, &genpd->slave_links, slave_node) {
397 struct generic_pm_domain *master = link->master;
398
399 genpd_sd_counter_inc(master);
400
401 genpd_lock_nested(master, depth + 1);
402 ret = genpd_power_on(master, depth + 1);
403 genpd_unlock(master);
404
405 if (ret) {
406 genpd_sd_counter_dec(master);
407 goto err;
408 }
409 }
410
411 ret = _genpd_power_on(genpd, true);
412 if (ret)
413 goto err;
414
415 genpd->status = GPD_STATE_ACTIVE;
416 return 0;
417
418 err:
419 list_for_each_entry_continue_reverse(link,
420 &genpd->slave_links,
421 slave_node) {
422 genpd_sd_counter_dec(link->master);
423 genpd_lock_nested(link->master, depth + 1);
424 genpd_power_off(link->master, false, depth + 1);
425 genpd_unlock(link->master);
426 }
427
428 return ret;
429 }
430
431 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
432 unsigned long val, void *ptr)
433 {
434 struct generic_pm_domain_data *gpd_data;
435 struct device *dev;
436
437 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
438 dev = gpd_data->base.dev;
439
440 for (;;) {
441 struct generic_pm_domain *genpd;
442 struct pm_domain_data *pdd;
443
444 spin_lock_irq(&dev->power.lock);
445
446 pdd = dev->power.subsys_data ?
447 dev->power.subsys_data->domain_data : NULL;
448 if (pdd) {
449 to_gpd_data(pdd)->td.constraint_changed = true;
450 genpd = dev_to_genpd(dev);
451 } else {
452 genpd = ERR_PTR(-ENODATA);
453 }
454
455 spin_unlock_irq(&dev->power.lock);
456
457 if (!IS_ERR(genpd)) {
458 genpd_lock(genpd);
459 genpd->max_off_time_changed = true;
460 genpd_unlock(genpd);
461 }
462
463 dev = dev->parent;
464 if (!dev || dev->power.ignore_children)
465 break;
466 }
467
468 return NOTIFY_DONE;
469 }
470
471 /**
472 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
473 * @work: Work structure used for scheduling the execution of this function.
474 */
475 static void genpd_power_off_work_fn(struct work_struct *work)
476 {
477 struct generic_pm_domain *genpd;
478
479 genpd = container_of(work, struct generic_pm_domain, power_off_work);
480
481 genpd_lock(genpd);
482 genpd_power_off(genpd, false, 0);
483 genpd_unlock(genpd);
484 }
485
486 /**
487 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
488 * @dev: Device to handle.
489 */
490 static int __genpd_runtime_suspend(struct device *dev)
491 {
492 int (*cb)(struct device *__dev);
493
494 if (dev->type && dev->type->pm)
495 cb = dev->type->pm->runtime_suspend;
496 else if (dev->class && dev->class->pm)
497 cb = dev->class->pm->runtime_suspend;
498 else if (dev->bus && dev->bus->pm)
499 cb = dev->bus->pm->runtime_suspend;
500 else
501 cb = NULL;
502
503 if (!cb && dev->driver && dev->driver->pm)
504 cb = dev->driver->pm->runtime_suspend;
505
506 return cb ? cb(dev) : 0;
507 }
508
509 /**
510 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
511 * @dev: Device to handle.
512 */
513 static int __genpd_runtime_resume(struct device *dev)
514 {
515 int (*cb)(struct device *__dev);
516
517 if (dev->type && dev->type->pm)
518 cb = dev->type->pm->runtime_resume;
519 else if (dev->class && dev->class->pm)
520 cb = dev->class->pm->runtime_resume;
521 else if (dev->bus && dev->bus->pm)
522 cb = dev->bus->pm->runtime_resume;
523 else
524 cb = NULL;
525
526 if (!cb && dev->driver && dev->driver->pm)
527 cb = dev->driver->pm->runtime_resume;
528
529 return cb ? cb(dev) : 0;
530 }
531
532 /**
533 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
534 * @dev: Device to suspend.
535 *
536 * Carry out a runtime suspend of a device under the assumption that its
537 * pm_domain field points to the domain member of an object of type
538 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
539 */
540 static int genpd_runtime_suspend(struct device *dev)
541 {
542 struct generic_pm_domain *genpd;
543 bool (*suspend_ok)(struct device *__dev);
544 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
545 bool runtime_pm = pm_runtime_enabled(dev);
546 ktime_t time_start;
547 s64 elapsed_ns;
548 int ret;
549
550 dev_dbg(dev, "%s()\n", __func__);
551
552 genpd = dev_to_genpd(dev);
553 if (IS_ERR(genpd))
554 return -EINVAL;
555
556 /*
557 * A runtime PM centric subsystem/driver may re-use the runtime PM
558 * callbacks for other purposes than runtime PM. In those scenarios
559 * runtime PM is disabled. Under these circumstances, we shall skip
560 * validating/measuring the PM QoS latency.
561 */
562 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
563 if (runtime_pm && suspend_ok && !suspend_ok(dev))
564 return -EBUSY;
565
566 /* Measure suspend latency. */
567 time_start = 0;
568 if (runtime_pm)
569 time_start = ktime_get();
570
571 ret = __genpd_runtime_suspend(dev);
572 if (ret)
573 return ret;
574
575 ret = genpd_stop_dev(genpd, dev);
576 if (ret) {
577 __genpd_runtime_resume(dev);
578 return ret;
579 }
580
581 /* Update suspend latency value if the measured time exceeds it. */
582 if (runtime_pm) {
583 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
584 if (elapsed_ns > td->suspend_latency_ns) {
585 td->suspend_latency_ns = elapsed_ns;
586 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
587 elapsed_ns);
588 genpd->max_off_time_changed = true;
589 td->constraint_changed = true;
590 }
591 }
592
593 /*
594 * If power.irq_safe is set, this routine may be run with
595 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
596 */
597 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
598 return 0;
599
600 genpd_lock(genpd);
601 genpd_power_off(genpd, true, 0);
602 genpd_unlock(genpd);
603
604 return 0;
605 }
606
607 /**
608 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
609 * @dev: Device to resume.
610 *
611 * Carry out a runtime resume of a device under the assumption that its
612 * pm_domain field points to the domain member of an object of type
613 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
614 */
615 static int genpd_runtime_resume(struct device *dev)
616 {
617 struct generic_pm_domain *genpd;
618 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
619 bool runtime_pm = pm_runtime_enabled(dev);
620 ktime_t time_start;
621 s64 elapsed_ns;
622 int ret;
623 bool timed = true;
624
625 dev_dbg(dev, "%s()\n", __func__);
626
627 genpd = dev_to_genpd(dev);
628 if (IS_ERR(genpd))
629 return -EINVAL;
630
631 /*
632 * As we don't power off a non IRQ safe domain, which holds
633 * an IRQ safe device, we don't need to restore power to it.
634 */
635 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
636 timed = false;
637 goto out;
638 }
639
640 genpd_lock(genpd);
641 ret = genpd_power_on(genpd, 0);
642 genpd_unlock(genpd);
643
644 if (ret)
645 return ret;
646
647 out:
648 /* Measure resume latency. */
649 time_start = 0;
650 if (timed && runtime_pm)
651 time_start = ktime_get();
652
653 ret = genpd_start_dev(genpd, dev);
654 if (ret)
655 goto err_poweroff;
656
657 ret = __genpd_runtime_resume(dev);
658 if (ret)
659 goto err_stop;
660
661 /* Update resume latency value if the measured time exceeds it. */
662 if (timed && runtime_pm) {
663 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
664 if (elapsed_ns > td->resume_latency_ns) {
665 td->resume_latency_ns = elapsed_ns;
666 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
667 elapsed_ns);
668 genpd->max_off_time_changed = true;
669 td->constraint_changed = true;
670 }
671 }
672
673 return 0;
674
675 err_stop:
676 genpd_stop_dev(genpd, dev);
677 err_poweroff:
678 if (!pm_runtime_is_irq_safe(dev) ||
679 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
680 genpd_lock(genpd);
681 genpd_power_off(genpd, true, 0);
682 genpd_unlock(genpd);
683 }
684
685 return ret;
686 }
687
688 static bool pd_ignore_unused;
689 static int __init pd_ignore_unused_setup(char *__unused)
690 {
691 pd_ignore_unused = true;
692 return 1;
693 }
694 __setup("pd_ignore_unused", pd_ignore_unused_setup);
695
696 /**
697 * genpd_power_off_unused - Power off all PM domains with no devices in use.
698 */
699 static int __init genpd_power_off_unused(void)
700 {
701 struct generic_pm_domain *genpd;
702
703 if (pd_ignore_unused) {
704 pr_warn("genpd: Not disabling unused power domains\n");
705 return 0;
706 }
707
708 mutex_lock(&gpd_list_lock);
709
710 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
711 genpd_queue_power_off_work(genpd);
712
713 mutex_unlock(&gpd_list_lock);
714
715 return 0;
716 }
717 late_initcall(genpd_power_off_unused);
718
719 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
720
721 /**
722 * pm_genpd_present - Check if the given PM domain has been initialized.
723 * @genpd: PM domain to check.
724 */
725 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
726 {
727 const struct generic_pm_domain *gpd;
728
729 if (IS_ERR_OR_NULL(genpd))
730 return false;
731
732 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
733 if (gpd == genpd)
734 return true;
735
736 return false;
737 }
738
739 #endif
740
741 #ifdef CONFIG_PM_SLEEP
742
743 static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
744 struct device *dev)
745 {
746 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
747 }
748
749 /**
750 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
751 * @genpd: PM domain to power off, if possible.
752 * @use_lock: use the lock.
753 * @depth: nesting count for lockdep.
754 *
755 * Check if the given PM domain can be powered off (during system suspend or
756 * hibernation) and do that if so. Also, in that case propagate to its masters.
757 *
758 * This function is only called in "noirq" and "syscore" stages of system power
759 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
760 * these cases the lock must be held.
761 */
762 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
763 unsigned int depth)
764 {
765 struct gpd_link *link;
766
767 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
768 return;
769
770 if (genpd->suspended_count != genpd->device_count
771 || atomic_read(&genpd->sd_count) > 0)
772 return;
773
774 /* Choose the deepest state when suspending */
775 genpd->state_idx = genpd->state_count - 1;
776 if (_genpd_power_off(genpd, false))
777 return;
778
779 genpd->status = GPD_STATE_POWER_OFF;
780
781 list_for_each_entry(link, &genpd->slave_links, slave_node) {
782 genpd_sd_counter_dec(link->master);
783
784 if (use_lock)
785 genpd_lock_nested(link->master, depth + 1);
786
787 genpd_sync_power_off(link->master, use_lock, depth + 1);
788
789 if (use_lock)
790 genpd_unlock(link->master);
791 }
792 }
793
794 /**
795 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
796 * @genpd: PM domain to power on.
797 * @use_lock: use the lock.
798 * @depth: nesting count for lockdep.
799 *
800 * This function is only called in "noirq" and "syscore" stages of system power
801 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
802 * these cases the lock must be held.
803 */
804 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
805 unsigned int depth)
806 {
807 struct gpd_link *link;
808
809 if (genpd_status_on(genpd))
810 return;
811
812 list_for_each_entry(link, &genpd->slave_links, slave_node) {
813 genpd_sd_counter_inc(link->master);
814
815 if (use_lock)
816 genpd_lock_nested(link->master, depth + 1);
817
818 genpd_sync_power_on(link->master, use_lock, depth + 1);
819
820 if (use_lock)
821 genpd_unlock(link->master);
822 }
823
824 _genpd_power_on(genpd, false);
825
826 genpd->status = GPD_STATE_ACTIVE;
827 }
828
829 /**
830 * resume_needed - Check whether to resume a device before system suspend.
831 * @dev: Device to check.
832 * @genpd: PM domain the device belongs to.
833 *
834 * There are two cases in which a device that can wake up the system from sleep
835 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
836 * to wake up the system and it has to remain active for this purpose while the
837 * system is in the sleep state and (2) if the device is not enabled to wake up
838 * the system from sleep states and it generally doesn't generate wakeup signals
839 * by itself (those signals are generated on its behalf by other parts of the
840 * system). In the latter case it may be necessary to reconfigure the device's
841 * wakeup settings during system suspend, because it may have been set up to
842 * signal remote wakeup from the system's working state as needed by runtime PM.
843 * Return 'true' in either of the above cases.
844 */
845 static bool resume_needed(struct device *dev,
846 const struct generic_pm_domain *genpd)
847 {
848 bool active_wakeup;
849
850 if (!device_can_wakeup(dev))
851 return false;
852
853 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
854 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
855 }
856
857 /**
858 * pm_genpd_prepare - Start power transition of a device in a PM domain.
859 * @dev: Device to start the transition of.
860 *
861 * Start a power transition of a device (during a system-wide power transition)
862 * under the assumption that its pm_domain field points to the domain member of
863 * an object of type struct generic_pm_domain representing a PM domain
864 * consisting of I/O devices.
865 */
866 static int pm_genpd_prepare(struct device *dev)
867 {
868 struct generic_pm_domain *genpd;
869 int ret;
870
871 dev_dbg(dev, "%s()\n", __func__);
872
873 genpd = dev_to_genpd(dev);
874 if (IS_ERR(genpd))
875 return -EINVAL;
876
877 /*
878 * If a wakeup request is pending for the device, it should be woken up
879 * at this point and a system wakeup event should be reported if it's
880 * set up to wake up the system from sleep states.
881 */
882 if (resume_needed(dev, genpd))
883 pm_runtime_resume(dev);
884
885 genpd_lock(genpd);
886
887 if (genpd->prepared_count++ == 0)
888 genpd->suspended_count = 0;
889
890 genpd_unlock(genpd);
891
892 ret = pm_generic_prepare(dev);
893 if (ret) {
894 genpd_lock(genpd);
895
896 genpd->prepared_count--;
897
898 genpd_unlock(genpd);
899 }
900
901 return ret;
902 }
903
904 /**
905 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
906 * I/O pm domain.
907 * @dev: Device to suspend.
908 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
909 *
910 * Stop the device and remove power from the domain if all devices in it have
911 * been stopped.
912 */
913 static int genpd_finish_suspend(struct device *dev, bool poweroff)
914 {
915 struct generic_pm_domain *genpd;
916 int ret;
917
918 genpd = dev_to_genpd(dev);
919 if (IS_ERR(genpd))
920 return -EINVAL;
921
922 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
923 return 0;
924
925 if (poweroff)
926 ret = pm_generic_poweroff_noirq(dev);
927 else
928 ret = pm_generic_suspend_noirq(dev);
929 if (ret)
930 return ret;
931
932 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
933 ret = pm_runtime_force_suspend(dev);
934 if (ret)
935 return ret;
936 }
937
938 genpd_lock(genpd);
939 genpd->suspended_count++;
940 genpd_sync_power_off(genpd, true, 0);
941 genpd_unlock(genpd);
942
943 return 0;
944 }
945
946 /**
947 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
948 * @dev: Device to suspend.
949 *
950 * Stop the device and remove power from the domain if all devices in it have
951 * been stopped.
952 */
953 static int pm_genpd_suspend_noirq(struct device *dev)
954 {
955 dev_dbg(dev, "%s()\n", __func__);
956
957 return genpd_finish_suspend(dev, false);
958 }
959
960 /**
961 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
962 * @dev: Device to resume.
963 *
964 * Restore power to the device's PM domain, if necessary, and start the device.
965 */
966 static int pm_genpd_resume_noirq(struct device *dev)
967 {
968 struct generic_pm_domain *genpd;
969 int ret = 0;
970
971 dev_dbg(dev, "%s()\n", __func__);
972
973 genpd = dev_to_genpd(dev);
974 if (IS_ERR(genpd))
975 return -EINVAL;
976
977 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
978 return 0;
979
980 genpd_lock(genpd);
981 genpd_sync_power_on(genpd, true, 0);
982 genpd->suspended_count--;
983 genpd_unlock(genpd);
984
985 if (genpd->dev_ops.stop && genpd->dev_ops.start)
986 ret = pm_runtime_force_resume(dev);
987
988 ret = pm_generic_resume_noirq(dev);
989 if (ret)
990 return ret;
991
992 return ret;
993 }
994
995 /**
996 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
997 * @dev: Device to freeze.
998 *
999 * Carry out a late freeze of a device under the assumption that its
1000 * pm_domain field points to the domain member of an object of type
1001 * struct generic_pm_domain representing a power domain consisting of I/O
1002 * devices.
1003 */
1004 static int pm_genpd_freeze_noirq(struct device *dev)
1005 {
1006 const struct generic_pm_domain *genpd;
1007 int ret = 0;
1008
1009 dev_dbg(dev, "%s()\n", __func__);
1010
1011 genpd = dev_to_genpd(dev);
1012 if (IS_ERR(genpd))
1013 return -EINVAL;
1014
1015 ret = pm_generic_freeze_noirq(dev);
1016 if (ret)
1017 return ret;
1018
1019 if (genpd->dev_ops.stop && genpd->dev_ops.start)
1020 ret = pm_runtime_force_suspend(dev);
1021
1022 return ret;
1023 }
1024
1025 /**
1026 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1027 * @dev: Device to thaw.
1028 *
1029 * Start the device, unless power has been removed from the domain already
1030 * before the system transition.
1031 */
1032 static int pm_genpd_thaw_noirq(struct device *dev)
1033 {
1034 const struct generic_pm_domain *genpd;
1035 int ret = 0;
1036
1037 dev_dbg(dev, "%s()\n", __func__);
1038
1039 genpd = dev_to_genpd(dev);
1040 if (IS_ERR(genpd))
1041 return -EINVAL;
1042
1043 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
1044 ret = pm_runtime_force_resume(dev);
1045 if (ret)
1046 return ret;
1047 }
1048
1049 return pm_generic_thaw_noirq(dev);
1050 }
1051
1052 /**
1053 * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
1054 * I/O PM domain.
1055 * @dev: Device to poweroff.
1056 *
1057 * Stop the device and remove power from the domain if all devices in it have
1058 * been stopped.
1059 */
1060 static int pm_genpd_poweroff_noirq(struct device *dev)
1061 {
1062 dev_dbg(dev, "%s()\n", __func__);
1063
1064 return genpd_finish_suspend(dev, true);
1065 }
1066
1067 /**
1068 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1069 * @dev: Device to resume.
1070 *
1071 * Make sure the domain will be in the same power state as before the
1072 * hibernation the system is resuming from and start the device if necessary.
1073 */
1074 static int pm_genpd_restore_noirq(struct device *dev)
1075 {
1076 struct generic_pm_domain *genpd;
1077 int ret = 0;
1078
1079 dev_dbg(dev, "%s()\n", __func__);
1080
1081 genpd = dev_to_genpd(dev);
1082 if (IS_ERR(genpd))
1083 return -EINVAL;
1084
1085 /*
1086 * At this point suspended_count == 0 means we are being run for the
1087 * first time for the given domain in the present cycle.
1088 */
1089 genpd_lock(genpd);
1090 if (genpd->suspended_count++ == 0)
1091 /*
1092 * The boot kernel might put the domain into arbitrary state,
1093 * so make it appear as powered off to genpd_sync_power_on(),
1094 * so that it tries to power it on in case it was really off.
1095 */
1096 genpd->status = GPD_STATE_POWER_OFF;
1097
1098 genpd_sync_power_on(genpd, true, 0);
1099 genpd_unlock(genpd);
1100
1101 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
1102 ret = pm_runtime_force_resume(dev);
1103 if (ret)
1104 return ret;
1105 }
1106
1107 return pm_generic_restore_noirq(dev);
1108 }
1109
1110 /**
1111 * pm_genpd_complete - Complete power transition of a device in a power domain.
1112 * @dev: Device to complete the transition of.
1113 *
1114 * Complete a power transition of a device (during a system-wide power
1115 * transition) under the assumption that its pm_domain field points to the
1116 * domain member of an object of type struct generic_pm_domain representing
1117 * a power domain consisting of I/O devices.
1118 */
1119 static void pm_genpd_complete(struct device *dev)
1120 {
1121 struct generic_pm_domain *genpd;
1122
1123 dev_dbg(dev, "%s()\n", __func__);
1124
1125 genpd = dev_to_genpd(dev);
1126 if (IS_ERR(genpd))
1127 return;
1128
1129 pm_generic_complete(dev);
1130
1131 genpd_lock(genpd);
1132
1133 genpd->prepared_count--;
1134 if (!genpd->prepared_count)
1135 genpd_queue_power_off_work(genpd);
1136
1137 genpd_unlock(genpd);
1138 }
1139
1140 /**
1141 * genpd_syscore_switch - Switch power during system core suspend or resume.
1142 * @dev: Device that normally is marked as "always on" to switch power for.
1143 *
1144 * This routine may only be called during the system core (syscore) suspend or
1145 * resume phase for devices whose "always on" flags are set.
1146 */
1147 static void genpd_syscore_switch(struct device *dev, bool suspend)
1148 {
1149 struct generic_pm_domain *genpd;
1150
1151 genpd = dev_to_genpd(dev);
1152 if (!pm_genpd_present(genpd))
1153 return;
1154
1155 if (suspend) {
1156 genpd->suspended_count++;
1157 genpd_sync_power_off(genpd, false, 0);
1158 } else {
1159 genpd_sync_power_on(genpd, false, 0);
1160 genpd->suspended_count--;
1161 }
1162 }
1163
1164 void pm_genpd_syscore_poweroff(struct device *dev)
1165 {
1166 genpd_syscore_switch(dev, true);
1167 }
1168 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1169
1170 void pm_genpd_syscore_poweron(struct device *dev)
1171 {
1172 genpd_syscore_switch(dev, false);
1173 }
1174 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1175
1176 #else /* !CONFIG_PM_SLEEP */
1177
1178 #define pm_genpd_prepare NULL
1179 #define pm_genpd_suspend_noirq NULL
1180 #define pm_genpd_resume_noirq NULL
1181 #define pm_genpd_freeze_noirq NULL
1182 #define pm_genpd_thaw_noirq NULL
1183 #define pm_genpd_poweroff_noirq NULL
1184 #define pm_genpd_restore_noirq NULL
1185 #define pm_genpd_complete NULL
1186
1187 #endif /* CONFIG_PM_SLEEP */
1188
1189 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1190 struct generic_pm_domain *genpd,
1191 struct gpd_timing_data *td)
1192 {
1193 struct generic_pm_domain_data *gpd_data;
1194 int ret;
1195
1196 ret = dev_pm_get_subsys_data(dev);
1197 if (ret)
1198 return ERR_PTR(ret);
1199
1200 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1201 if (!gpd_data) {
1202 ret = -ENOMEM;
1203 goto err_put;
1204 }
1205
1206 if (td)
1207 gpd_data->td = *td;
1208
1209 gpd_data->base.dev = dev;
1210 gpd_data->td.constraint_changed = true;
1211 gpd_data->td.effective_constraint_ns = -1;
1212 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1213
1214 spin_lock_irq(&dev->power.lock);
1215
1216 if (dev->power.subsys_data->domain_data) {
1217 ret = -EINVAL;
1218 goto err_free;
1219 }
1220
1221 dev->power.subsys_data->domain_data = &gpd_data->base;
1222
1223 spin_unlock_irq(&dev->power.lock);
1224
1225 dev_pm_domain_set(dev, &genpd->domain);
1226
1227 return gpd_data;
1228
1229 err_free:
1230 spin_unlock_irq(&dev->power.lock);
1231 kfree(gpd_data);
1232 err_put:
1233 dev_pm_put_subsys_data(dev);
1234 return ERR_PTR(ret);
1235 }
1236
1237 static void genpd_free_dev_data(struct device *dev,
1238 struct generic_pm_domain_data *gpd_data)
1239 {
1240 dev_pm_domain_set(dev, NULL);
1241
1242 spin_lock_irq(&dev->power.lock);
1243
1244 dev->power.subsys_data->domain_data = NULL;
1245
1246 spin_unlock_irq(&dev->power.lock);
1247
1248 kfree(gpd_data);
1249 dev_pm_put_subsys_data(dev);
1250 }
1251
1252 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1253 struct gpd_timing_data *td)
1254 {
1255 struct generic_pm_domain_data *gpd_data;
1256 int ret = 0;
1257
1258 dev_dbg(dev, "%s()\n", __func__);
1259
1260 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1261 return -EINVAL;
1262
1263 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1264 if (IS_ERR(gpd_data))
1265 return PTR_ERR(gpd_data);
1266
1267 genpd_lock(genpd);
1268
1269 if (genpd->prepared_count > 0) {
1270 ret = -EAGAIN;
1271 goto out;
1272 }
1273
1274 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1275 if (ret)
1276 goto out;
1277
1278 genpd->device_count++;
1279 genpd->max_off_time_changed = true;
1280
1281 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1282
1283 out:
1284 genpd_unlock(genpd);
1285
1286 if (ret)
1287 genpd_free_dev_data(dev, gpd_data);
1288 else
1289 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1290
1291 return ret;
1292 }
1293
1294 /**
1295 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1296 * @genpd: PM domain to add the device to.
1297 * @dev: Device to be added.
1298 * @td: Set of PM QoS timing parameters to attach to the device.
1299 */
1300 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1301 struct gpd_timing_data *td)
1302 {
1303 int ret;
1304
1305 mutex_lock(&gpd_list_lock);
1306 ret = genpd_add_device(genpd, dev, td);
1307 mutex_unlock(&gpd_list_lock);
1308
1309 return ret;
1310 }
1311 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1312
1313 static int genpd_remove_device(struct generic_pm_domain *genpd,
1314 struct device *dev)
1315 {
1316 struct generic_pm_domain_data *gpd_data;
1317 struct pm_domain_data *pdd;
1318 int ret = 0;
1319
1320 dev_dbg(dev, "%s()\n", __func__);
1321
1322 pdd = dev->power.subsys_data->domain_data;
1323 gpd_data = to_gpd_data(pdd);
1324 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1325
1326 genpd_lock(genpd);
1327
1328 if (genpd->prepared_count > 0) {
1329 ret = -EAGAIN;
1330 goto out;
1331 }
1332
1333 genpd->device_count--;
1334 genpd->max_off_time_changed = true;
1335
1336 if (genpd->detach_dev)
1337 genpd->detach_dev(genpd, dev);
1338
1339 list_del_init(&pdd->list_node);
1340
1341 genpd_unlock(genpd);
1342
1343 genpd_free_dev_data(dev, gpd_data);
1344
1345 return 0;
1346
1347 out:
1348 genpd_unlock(genpd);
1349 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1350
1351 return ret;
1352 }
1353
1354 /**
1355 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1356 * @genpd: PM domain to remove the device from.
1357 * @dev: Device to be removed.
1358 */
1359 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1360 struct device *dev)
1361 {
1362 if (!genpd || genpd != genpd_lookup_dev(dev))
1363 return -EINVAL;
1364
1365 return genpd_remove_device(genpd, dev);
1366 }
1367 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1368
1369 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1370 struct generic_pm_domain *subdomain)
1371 {
1372 struct gpd_link *link, *itr;
1373 int ret = 0;
1374
1375 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1376 || genpd == subdomain)
1377 return -EINVAL;
1378
1379 /*
1380 * If the domain can be powered on/off in an IRQ safe
1381 * context, ensure that the subdomain can also be
1382 * powered on/off in that context.
1383 */
1384 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1385 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1386 genpd->name, subdomain->name);
1387 return -EINVAL;
1388 }
1389
1390 link = kzalloc(sizeof(*link), GFP_KERNEL);
1391 if (!link)
1392 return -ENOMEM;
1393
1394 genpd_lock(subdomain);
1395 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1396
1397 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1398 ret = -EINVAL;
1399 goto out;
1400 }
1401
1402 list_for_each_entry(itr, &genpd->master_links, master_node) {
1403 if (itr->slave == subdomain && itr->master == genpd) {
1404 ret = -EINVAL;
1405 goto out;
1406 }
1407 }
1408
1409 link->master = genpd;
1410 list_add_tail(&link->master_node, &genpd->master_links);
1411 link->slave = subdomain;
1412 list_add_tail(&link->slave_node, &subdomain->slave_links);
1413 if (genpd_status_on(subdomain))
1414 genpd_sd_counter_inc(genpd);
1415
1416 out:
1417 genpd_unlock(genpd);
1418 genpd_unlock(subdomain);
1419 if (ret)
1420 kfree(link);
1421 return ret;
1422 }
1423
1424 /**
1425 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1426 * @genpd: Master PM domain to add the subdomain to.
1427 * @subdomain: Subdomain to be added.
1428 */
1429 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1430 struct generic_pm_domain *subdomain)
1431 {
1432 int ret;
1433
1434 mutex_lock(&gpd_list_lock);
1435 ret = genpd_add_subdomain(genpd, subdomain);
1436 mutex_unlock(&gpd_list_lock);
1437
1438 return ret;
1439 }
1440 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1441
1442 /**
1443 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1444 * @genpd: Master PM domain to remove the subdomain from.
1445 * @subdomain: Subdomain to be removed.
1446 */
1447 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1448 struct generic_pm_domain *subdomain)
1449 {
1450 struct gpd_link *l, *link;
1451 int ret = -EINVAL;
1452
1453 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1454 return -EINVAL;
1455
1456 genpd_lock(subdomain);
1457 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1458
1459 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1460 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1461 subdomain->name);
1462 ret = -EBUSY;
1463 goto out;
1464 }
1465
1466 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1467 if (link->slave != subdomain)
1468 continue;
1469
1470 list_del(&link->master_node);
1471 list_del(&link->slave_node);
1472 kfree(link);
1473 if (genpd_status_on(subdomain))
1474 genpd_sd_counter_dec(genpd);
1475
1476 ret = 0;
1477 break;
1478 }
1479
1480 out:
1481 genpd_unlock(genpd);
1482 genpd_unlock(subdomain);
1483
1484 return ret;
1485 }
1486 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1487
1488 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1489 {
1490 struct genpd_power_state *state;
1491
1492 state = kzalloc(sizeof(*state), GFP_KERNEL);
1493 if (!state)
1494 return -ENOMEM;
1495
1496 genpd->states = state;
1497 genpd->state_count = 1;
1498 genpd->free = state;
1499
1500 return 0;
1501 }
1502
1503 static void genpd_lock_init(struct generic_pm_domain *genpd)
1504 {
1505 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1506 spin_lock_init(&genpd->slock);
1507 genpd->lock_ops = &genpd_spin_ops;
1508 } else {
1509 mutex_init(&genpd->mlock);
1510 genpd->lock_ops = &genpd_mtx_ops;
1511 }
1512 }
1513
1514 /**
1515 * pm_genpd_init - Initialize a generic I/O PM domain object.
1516 * @genpd: PM domain object to initialize.
1517 * @gov: PM domain governor to associate with the domain (may be NULL).
1518 * @is_off: Initial value of the domain's power_is_off field.
1519 *
1520 * Returns 0 on successful initialization, else a negative error code.
1521 */
1522 int pm_genpd_init(struct generic_pm_domain *genpd,
1523 struct dev_power_governor *gov, bool is_off)
1524 {
1525 int ret;
1526
1527 if (IS_ERR_OR_NULL(genpd))
1528 return -EINVAL;
1529
1530 INIT_LIST_HEAD(&genpd->master_links);
1531 INIT_LIST_HEAD(&genpd->slave_links);
1532 INIT_LIST_HEAD(&genpd->dev_list);
1533 genpd_lock_init(genpd);
1534 genpd->gov = gov;
1535 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1536 atomic_set(&genpd->sd_count, 0);
1537 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1538 genpd->device_count = 0;
1539 genpd->max_off_time_ns = -1;
1540 genpd->max_off_time_changed = true;
1541 genpd->provider = NULL;
1542 genpd->has_provider = false;
1543 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1544 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1545 genpd->domain.ops.prepare = pm_genpd_prepare;
1546 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1547 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1548 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1549 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1550 genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
1551 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1552 genpd->domain.ops.complete = pm_genpd_complete;
1553
1554 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1555 genpd->dev_ops.stop = pm_clk_suspend;
1556 genpd->dev_ops.start = pm_clk_resume;
1557 }
1558
1559 /* Always-on domains must be powered on at initialization. */
1560 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1561 return -EINVAL;
1562
1563 /* Use only one "off" state if there were no states declared */
1564 if (genpd->state_count == 0) {
1565 ret = genpd_set_default_power_state(genpd);
1566 if (ret)
1567 return ret;
1568 }
1569
1570 mutex_lock(&gpd_list_lock);
1571 list_add(&genpd->gpd_list_node, &gpd_list);
1572 mutex_unlock(&gpd_list_lock);
1573
1574 return 0;
1575 }
1576 EXPORT_SYMBOL_GPL(pm_genpd_init);
1577
1578 static int genpd_remove(struct generic_pm_domain *genpd)
1579 {
1580 struct gpd_link *l, *link;
1581
1582 if (IS_ERR_OR_NULL(genpd))
1583 return -EINVAL;
1584
1585 genpd_lock(genpd);
1586
1587 if (genpd->has_provider) {
1588 genpd_unlock(genpd);
1589 pr_err("Provider present, unable to remove %s\n", genpd->name);
1590 return -EBUSY;
1591 }
1592
1593 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1594 genpd_unlock(genpd);
1595 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1596 return -EBUSY;
1597 }
1598
1599 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1600 list_del(&link->master_node);
1601 list_del(&link->slave_node);
1602 kfree(link);
1603 }
1604
1605 list_del(&genpd->gpd_list_node);
1606 genpd_unlock(genpd);
1607 cancel_work_sync(&genpd->power_off_work);
1608 kfree(genpd->free);
1609 pr_debug("%s: removed %s\n", __func__, genpd->name);
1610
1611 return 0;
1612 }
1613
1614 /**
1615 * pm_genpd_remove - Remove a generic I/O PM domain
1616 * @genpd: Pointer to PM domain that is to be removed.
1617 *
1618 * To remove the PM domain, this function:
1619 * - Removes the PM domain as a subdomain to any parent domains,
1620 * if it was added.
1621 * - Removes the PM domain from the list of registered PM domains.
1622 *
1623 * The PM domain will only be removed, if the associated provider has
1624 * been removed, it is not a parent to any other PM domain and has no
1625 * devices associated with it.
1626 */
1627 int pm_genpd_remove(struct generic_pm_domain *genpd)
1628 {
1629 int ret;
1630
1631 mutex_lock(&gpd_list_lock);
1632 ret = genpd_remove(genpd);
1633 mutex_unlock(&gpd_list_lock);
1634
1635 return ret;
1636 }
1637 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1638
1639 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1640
1641 /*
1642 * Device Tree based PM domain providers.
1643 *
1644 * The code below implements generic device tree based PM domain providers that
1645 * bind device tree nodes with generic PM domains registered in the system.
1646 *
1647 * Any driver that registers generic PM domains and needs to support binding of
1648 * devices to these domains is supposed to register a PM domain provider, which
1649 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1650 *
1651 * Two simple mapping functions have been provided for convenience:
1652 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1653 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1654 * index.
1655 */
1656
1657 /**
1658 * struct of_genpd_provider - PM domain provider registration structure
1659 * @link: Entry in global list of PM domain providers
1660 * @node: Pointer to device tree node of PM domain provider
1661 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1662 * into a PM domain.
1663 * @data: context pointer to be passed into @xlate callback
1664 */
1665 struct of_genpd_provider {
1666 struct list_head link;
1667 struct device_node *node;
1668 genpd_xlate_t xlate;
1669 void *data;
1670 };
1671
1672 /* List of registered PM domain providers. */
1673 static LIST_HEAD(of_genpd_providers);
1674 /* Mutex to protect the list above. */
1675 static DEFINE_MUTEX(of_genpd_mutex);
1676
1677 /**
1678 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1679 * @genpdspec: OF phandle args to map into a PM domain
1680 * @data: xlate function private data - pointer to struct generic_pm_domain
1681 *
1682 * This is a generic xlate function that can be used to model PM domains that
1683 * have their own device tree nodes. The private data of xlate function needs
1684 * to be a valid pointer to struct generic_pm_domain.
1685 */
1686 static struct generic_pm_domain *genpd_xlate_simple(
1687 struct of_phandle_args *genpdspec,
1688 void *data)
1689 {
1690 return data;
1691 }
1692
1693 /**
1694 * genpd_xlate_onecell() - Xlate function using a single index.
1695 * @genpdspec: OF phandle args to map into a PM domain
1696 * @data: xlate function private data - pointer to struct genpd_onecell_data
1697 *
1698 * This is a generic xlate function that can be used to model simple PM domain
1699 * controllers that have one device tree node and provide multiple PM domains.
1700 * A single cell is used as an index into an array of PM domains specified in
1701 * the genpd_onecell_data struct when registering the provider.
1702 */
1703 static struct generic_pm_domain *genpd_xlate_onecell(
1704 struct of_phandle_args *genpdspec,
1705 void *data)
1706 {
1707 struct genpd_onecell_data *genpd_data = data;
1708 unsigned int idx = genpdspec->args[0];
1709
1710 if (genpdspec->args_count != 1)
1711 return ERR_PTR(-EINVAL);
1712
1713 if (idx >= genpd_data->num_domains) {
1714 pr_err("%s: invalid domain index %u\n", __func__, idx);
1715 return ERR_PTR(-EINVAL);
1716 }
1717
1718 if (!genpd_data->domains[idx])
1719 return ERR_PTR(-ENOENT);
1720
1721 return genpd_data->domains[idx];
1722 }
1723
1724 /**
1725 * genpd_add_provider() - Register a PM domain provider for a node
1726 * @np: Device node pointer associated with the PM domain provider.
1727 * @xlate: Callback for decoding PM domain from phandle arguments.
1728 * @data: Context pointer for @xlate callback.
1729 */
1730 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1731 void *data)
1732 {
1733 struct of_genpd_provider *cp;
1734
1735 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1736 if (!cp)
1737 return -ENOMEM;
1738
1739 cp->node = of_node_get(np);
1740 cp->data = data;
1741 cp->xlate = xlate;
1742
1743 mutex_lock(&of_genpd_mutex);
1744 list_add(&cp->link, &of_genpd_providers);
1745 mutex_unlock(&of_genpd_mutex);
1746 pr_debug("Added domain provider from %s\n", np->full_name);
1747
1748 return 0;
1749 }
1750
1751 /**
1752 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1753 * @np: Device node pointer associated with the PM domain provider.
1754 * @genpd: Pointer to PM domain associated with the PM domain provider.
1755 */
1756 int of_genpd_add_provider_simple(struct device_node *np,
1757 struct generic_pm_domain *genpd)
1758 {
1759 int ret = -EINVAL;
1760
1761 if (!np || !genpd)
1762 return -EINVAL;
1763
1764 mutex_lock(&gpd_list_lock);
1765
1766 if (pm_genpd_present(genpd)) {
1767 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1768 if (!ret) {
1769 genpd->provider = &np->fwnode;
1770 genpd->has_provider = true;
1771 }
1772 }
1773
1774 mutex_unlock(&gpd_list_lock);
1775
1776 return ret;
1777 }
1778 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1779
1780 /**
1781 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1782 * @np: Device node pointer associated with the PM domain provider.
1783 * @data: Pointer to the data associated with the PM domain provider.
1784 */
1785 int of_genpd_add_provider_onecell(struct device_node *np,
1786 struct genpd_onecell_data *data)
1787 {
1788 unsigned int i;
1789 int ret = -EINVAL;
1790
1791 if (!np || !data)
1792 return -EINVAL;
1793
1794 mutex_lock(&gpd_list_lock);
1795
1796 if (!data->xlate)
1797 data->xlate = genpd_xlate_onecell;
1798
1799 for (i = 0; i < data->num_domains; i++) {
1800 if (!data->domains[i])
1801 continue;
1802 if (!pm_genpd_present(data->domains[i]))
1803 goto error;
1804
1805 data->domains[i]->provider = &np->fwnode;
1806 data->domains[i]->has_provider = true;
1807 }
1808
1809 ret = genpd_add_provider(np, data->xlate, data);
1810 if (ret < 0)
1811 goto error;
1812
1813 mutex_unlock(&gpd_list_lock);
1814
1815 return 0;
1816
1817 error:
1818 while (i--) {
1819 if (!data->domains[i])
1820 continue;
1821 data->domains[i]->provider = NULL;
1822 data->domains[i]->has_provider = false;
1823 }
1824
1825 mutex_unlock(&gpd_list_lock);
1826
1827 return ret;
1828 }
1829 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1830
1831 /**
1832 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1833 * @np: Device node pointer associated with the PM domain provider
1834 */
1835 void of_genpd_del_provider(struct device_node *np)
1836 {
1837 struct of_genpd_provider *cp, *tmp;
1838 struct generic_pm_domain *gpd;
1839
1840 mutex_lock(&gpd_list_lock);
1841 mutex_lock(&of_genpd_mutex);
1842 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
1843 if (cp->node == np) {
1844 /*
1845 * For each PM domain associated with the
1846 * provider, set the 'has_provider' to false
1847 * so that the PM domain can be safely removed.
1848 */
1849 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1850 if (gpd->provider == &np->fwnode)
1851 gpd->has_provider = false;
1852
1853 list_del(&cp->link);
1854 of_node_put(cp->node);
1855 kfree(cp);
1856 break;
1857 }
1858 }
1859 mutex_unlock(&of_genpd_mutex);
1860 mutex_unlock(&gpd_list_lock);
1861 }
1862 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1863
1864 /**
1865 * genpd_get_from_provider() - Look-up PM domain
1866 * @genpdspec: OF phandle args to use for look-up
1867 *
1868 * Looks for a PM domain provider under the node specified by @genpdspec and if
1869 * found, uses xlate function of the provider to map phandle args to a PM
1870 * domain.
1871 *
1872 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1873 * on failure.
1874 */
1875 static struct generic_pm_domain *genpd_get_from_provider(
1876 struct of_phandle_args *genpdspec)
1877 {
1878 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1879 struct of_genpd_provider *provider;
1880
1881 if (!genpdspec)
1882 return ERR_PTR(-EINVAL);
1883
1884 mutex_lock(&of_genpd_mutex);
1885
1886 /* Check if we have such a provider in our array */
1887 list_for_each_entry(provider, &of_genpd_providers, link) {
1888 if (provider->node == genpdspec->np)
1889 genpd = provider->xlate(genpdspec, provider->data);
1890 if (!IS_ERR(genpd))
1891 break;
1892 }
1893
1894 mutex_unlock(&of_genpd_mutex);
1895
1896 return genpd;
1897 }
1898
1899 /**
1900 * of_genpd_add_device() - Add a device to an I/O PM domain
1901 * @genpdspec: OF phandle args to use for look-up PM domain
1902 * @dev: Device to be added.
1903 *
1904 * Looks-up an I/O PM domain based upon phandle args provided and adds
1905 * the device to the PM domain. Returns a negative error code on failure.
1906 */
1907 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1908 {
1909 struct generic_pm_domain *genpd;
1910 int ret;
1911
1912 mutex_lock(&gpd_list_lock);
1913
1914 genpd = genpd_get_from_provider(genpdspec);
1915 if (IS_ERR(genpd)) {
1916 ret = PTR_ERR(genpd);
1917 goto out;
1918 }
1919
1920 ret = genpd_add_device(genpd, dev, NULL);
1921
1922 out:
1923 mutex_unlock(&gpd_list_lock);
1924
1925 return ret;
1926 }
1927 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1928
1929 /**
1930 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1931 * @parent_spec: OF phandle args to use for parent PM domain look-up
1932 * @subdomain_spec: OF phandle args to use for subdomain look-up
1933 *
1934 * Looks-up a parent PM domain and subdomain based upon phandle args
1935 * provided and adds the subdomain to the parent PM domain. Returns a
1936 * negative error code on failure.
1937 */
1938 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1939 struct of_phandle_args *subdomain_spec)
1940 {
1941 struct generic_pm_domain *parent, *subdomain;
1942 int ret;
1943
1944 mutex_lock(&gpd_list_lock);
1945
1946 parent = genpd_get_from_provider(parent_spec);
1947 if (IS_ERR(parent)) {
1948 ret = PTR_ERR(parent);
1949 goto out;
1950 }
1951
1952 subdomain = genpd_get_from_provider(subdomain_spec);
1953 if (IS_ERR(subdomain)) {
1954 ret = PTR_ERR(subdomain);
1955 goto out;
1956 }
1957
1958 ret = genpd_add_subdomain(parent, subdomain);
1959
1960 out:
1961 mutex_unlock(&gpd_list_lock);
1962
1963 return ret;
1964 }
1965 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1966
1967 /**
1968 * of_genpd_remove_last - Remove the last PM domain registered for a provider
1969 * @provider: Pointer to device structure associated with provider
1970 *
1971 * Find the last PM domain that was added by a particular provider and
1972 * remove this PM domain from the list of PM domains. The provider is
1973 * identified by the 'provider' device structure that is passed. The PM
1974 * domain will only be removed, if the provider associated with domain
1975 * has been removed.
1976 *
1977 * Returns a valid pointer to struct generic_pm_domain on success or
1978 * ERR_PTR() on failure.
1979 */
1980 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1981 {
1982 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
1983 int ret;
1984
1985 if (IS_ERR_OR_NULL(np))
1986 return ERR_PTR(-EINVAL);
1987
1988 mutex_lock(&gpd_list_lock);
1989 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
1990 if (gpd->provider == &np->fwnode) {
1991 ret = genpd_remove(gpd);
1992 genpd = ret ? ERR_PTR(ret) : gpd;
1993 break;
1994 }
1995 }
1996 mutex_unlock(&gpd_list_lock);
1997
1998 return genpd;
1999 }
2000 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2001
2002 /**
2003 * genpd_dev_pm_detach - Detach a device from its PM domain.
2004 * @dev: Device to detach.
2005 * @power_off: Currently not used
2006 *
2007 * Try to locate a corresponding generic PM domain, which the device was
2008 * attached to previously. If such is found, the device is detached from it.
2009 */
2010 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2011 {
2012 struct generic_pm_domain *pd;
2013 unsigned int i;
2014 int ret = 0;
2015
2016 pd = dev_to_genpd(dev);
2017 if (IS_ERR(pd))
2018 return;
2019
2020 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2021
2022 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2023 ret = genpd_remove_device(pd, dev);
2024 if (ret != -EAGAIN)
2025 break;
2026
2027 mdelay(i);
2028 cond_resched();
2029 }
2030
2031 if (ret < 0) {
2032 dev_err(dev, "failed to remove from PM domain %s: %d",
2033 pd->name, ret);
2034 return;
2035 }
2036
2037 /* Check if PM domain can be powered off after removing this device. */
2038 genpd_queue_power_off_work(pd);
2039 }
2040
2041 static void genpd_dev_pm_sync(struct device *dev)
2042 {
2043 struct generic_pm_domain *pd;
2044
2045 pd = dev_to_genpd(dev);
2046 if (IS_ERR(pd))
2047 return;
2048
2049 genpd_queue_power_off_work(pd);
2050 }
2051
2052 /**
2053 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2054 * @dev: Device to attach.
2055 *
2056 * Parse device's OF node to find a PM domain specifier. If such is found,
2057 * attaches the device to retrieved pm_domain ops.
2058 *
2059 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2060 * backwards compatibility with existing DTBs.
2061 *
2062 * Returns 0 on successfully attached PM domain or negative error code. Note
2063 * that if a power-domain exists for the device, but it cannot be found or
2064 * turned on, then return -EPROBE_DEFER to ensure that the device is not
2065 * probed and to re-try again later.
2066 */
2067 int genpd_dev_pm_attach(struct device *dev)
2068 {
2069 struct of_phandle_args pd_args;
2070 struct generic_pm_domain *pd;
2071 unsigned int i;
2072 int ret;
2073
2074 if (!dev->of_node)
2075 return -ENODEV;
2076
2077 if (dev->pm_domain)
2078 return -EEXIST;
2079
2080 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2081 "#power-domain-cells", 0, &pd_args);
2082 if (ret < 0) {
2083 if (ret != -ENOENT)
2084 return ret;
2085
2086 /*
2087 * Try legacy Samsung-specific bindings
2088 * (for backwards compatibility of DT ABI)
2089 */
2090 pd_args.args_count = 0;
2091 pd_args.np = of_parse_phandle(dev->of_node,
2092 "samsung,power-domain", 0);
2093 if (!pd_args.np)
2094 return -ENOENT;
2095 }
2096
2097 mutex_lock(&gpd_list_lock);
2098 pd = genpd_get_from_provider(&pd_args);
2099 of_node_put(pd_args.np);
2100 if (IS_ERR(pd)) {
2101 mutex_unlock(&gpd_list_lock);
2102 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2103 __func__, PTR_ERR(pd));
2104 return -EPROBE_DEFER;
2105 }
2106
2107 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2108
2109 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2110 ret = genpd_add_device(pd, dev, NULL);
2111 if (ret != -EAGAIN)
2112 break;
2113
2114 mdelay(i);
2115 cond_resched();
2116 }
2117 mutex_unlock(&gpd_list_lock);
2118
2119 if (ret < 0) {
2120 if (ret != -EPROBE_DEFER)
2121 dev_err(dev, "failed to add to PM domain %s: %d",
2122 pd->name, ret);
2123 goto out;
2124 }
2125
2126 dev->pm_domain->detach = genpd_dev_pm_detach;
2127 dev->pm_domain->sync = genpd_dev_pm_sync;
2128
2129 genpd_lock(pd);
2130 ret = genpd_power_on(pd, 0);
2131 genpd_unlock(pd);
2132 out:
2133 return ret ? -EPROBE_DEFER : 0;
2134 }
2135 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2136
2137 static const struct of_device_id idle_state_match[] = {
2138 { .compatible = "domain-idle-state", },
2139 { }
2140 };
2141
2142 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2143 struct device_node *state_node)
2144 {
2145 int err;
2146 u32 residency;
2147 u32 entry_latency, exit_latency;
2148
2149 err = of_property_read_u32(state_node, "entry-latency-us",
2150 &entry_latency);
2151 if (err) {
2152 pr_debug(" * %s missing entry-latency-us property\n",
2153 state_node->full_name);
2154 return -EINVAL;
2155 }
2156
2157 err = of_property_read_u32(state_node, "exit-latency-us",
2158 &exit_latency);
2159 if (err) {
2160 pr_debug(" * %s missing exit-latency-us property\n",
2161 state_node->full_name);
2162 return -EINVAL;
2163 }
2164
2165 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2166 if (!err)
2167 genpd_state->residency_ns = 1000 * residency;
2168
2169 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2170 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2171 genpd_state->fwnode = &state_node->fwnode;
2172
2173 return 0;
2174 }
2175
2176 /**
2177 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2178 *
2179 * @dn: The genpd device node
2180 * @states: The pointer to which the state array will be saved.
2181 * @n: The count of elements in the array returned from this function.
2182 *
2183 * Returns the device states parsed from the OF node. The memory for the states
2184 * is allocated by this function and is the responsibility of the caller to
2185 * free the memory after use.
2186 */
2187 int of_genpd_parse_idle_states(struct device_node *dn,
2188 struct genpd_power_state **states, int *n)
2189 {
2190 struct genpd_power_state *st;
2191 struct device_node *np;
2192 int i = 0;
2193 int err, ret;
2194 int count;
2195 struct of_phandle_iterator it;
2196 const struct of_device_id *match_id;
2197
2198 count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2199 if (count <= 0)
2200 return -EINVAL;
2201
2202 st = kcalloc(count, sizeof(*st), GFP_KERNEL);
2203 if (!st)
2204 return -ENOMEM;
2205
2206 /* Loop over the phandles until all the requested entry is found */
2207 of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
2208 np = it.node;
2209 match_id = of_match_node(idle_state_match, np);
2210 if (!match_id)
2211 continue;
2212 ret = genpd_parse_state(&st[i++], np);
2213 if (ret) {
2214 pr_err
2215 ("Parsing idle state node %s failed with err %d\n",
2216 np->full_name, ret);
2217 of_node_put(np);
2218 kfree(st);
2219 return ret;
2220 }
2221 }
2222
2223 *n = i;
2224 if (!i)
2225 kfree(st);
2226 else
2227 *states = st;
2228
2229 return 0;
2230 }
2231 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2232
2233 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2234
2235
2236 /*** debugfs support ***/
2237
2238 #ifdef CONFIG_DEBUG_FS
2239 #include <linux/pm.h>
2240 #include <linux/device.h>
2241 #include <linux/debugfs.h>
2242 #include <linux/seq_file.h>
2243 #include <linux/init.h>
2244 #include <linux/kobject.h>
2245 static struct dentry *pm_genpd_debugfs_dir;
2246
2247 /*
2248 * TODO: This function is a slightly modified version of rtpm_status_show
2249 * from sysfs.c, so generalize it.
2250 */
2251 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2252 {
2253 static const char * const status_lookup[] = {
2254 [RPM_ACTIVE] = "active",
2255 [RPM_RESUMING] = "resuming",
2256 [RPM_SUSPENDED] = "suspended",
2257 [RPM_SUSPENDING] = "suspending"
2258 };
2259 const char *p = "";
2260
2261 if (dev->power.runtime_error)
2262 p = "error";
2263 else if (dev->power.disable_depth)
2264 p = "unsupported";
2265 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2266 p = status_lookup[dev->power.runtime_status];
2267 else
2268 WARN_ON(1);
2269
2270 seq_puts(s, p);
2271 }
2272
2273 static int pm_genpd_summary_one(struct seq_file *s,
2274 struct generic_pm_domain *genpd)
2275 {
2276 static const char * const status_lookup[] = {
2277 [GPD_STATE_ACTIVE] = "on",
2278 [GPD_STATE_POWER_OFF] = "off"
2279 };
2280 struct pm_domain_data *pm_data;
2281 const char *kobj_path;
2282 struct gpd_link *link;
2283 char state[16];
2284 int ret;
2285
2286 ret = genpd_lock_interruptible(genpd);
2287 if (ret)
2288 return -ERESTARTSYS;
2289
2290 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2291 goto exit;
2292 if (!genpd_status_on(genpd))
2293 snprintf(state, sizeof(state), "%s-%u",
2294 status_lookup[genpd->status], genpd->state_idx);
2295 else
2296 snprintf(state, sizeof(state), "%s",
2297 status_lookup[genpd->status]);
2298 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2299
2300 /*
2301 * Modifications on the list require holding locks on both
2302 * master and slave, so we are safe.
2303 * Also genpd->name is immutable.
2304 */
2305 list_for_each_entry(link, &genpd->master_links, master_node) {
2306 seq_printf(s, "%s", link->slave->name);
2307 if (!list_is_last(&link->master_node, &genpd->master_links))
2308 seq_puts(s, ", ");
2309 }
2310
2311 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2312 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2313 genpd_is_irq_safe(genpd) ?
2314 GFP_ATOMIC : GFP_KERNEL);
2315 if (kobj_path == NULL)
2316 continue;
2317
2318 seq_printf(s, "\n %-50s ", kobj_path);
2319 rtpm_status_str(s, pm_data->dev);
2320 kfree(kobj_path);
2321 }
2322
2323 seq_puts(s, "\n");
2324 exit:
2325 genpd_unlock(genpd);
2326
2327 return 0;
2328 }
2329
2330 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2331 {
2332 struct generic_pm_domain *genpd;
2333 int ret = 0;
2334
2335 seq_puts(s, "domain status slaves\n");
2336 seq_puts(s, " /device runtime status\n");
2337 seq_puts(s, "----------------------------------------------------------------------\n");
2338
2339 ret = mutex_lock_interruptible(&gpd_list_lock);
2340 if (ret)
2341 return -ERESTARTSYS;
2342
2343 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2344 ret = pm_genpd_summary_one(s, genpd);
2345 if (ret)
2346 break;
2347 }
2348 mutex_unlock(&gpd_list_lock);
2349
2350 return ret;
2351 }
2352
2353 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2354 {
2355 return single_open(file, pm_genpd_summary_show, NULL);
2356 }
2357
2358 static const struct file_operations pm_genpd_summary_fops = {
2359 .open = pm_genpd_summary_open,
2360 .read = seq_read,
2361 .llseek = seq_lseek,
2362 .release = single_release,
2363 };
2364
2365 static int __init pm_genpd_debug_init(void)
2366 {
2367 struct dentry *d;
2368
2369 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2370
2371 if (!pm_genpd_debugfs_dir)
2372 return -ENOMEM;
2373
2374 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2375 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2376 if (!d)
2377 return -ENOMEM;
2378
2379 return 0;
2380 }
2381 late_initcall(pm_genpd_debug_init);
2382
2383 static void __exit pm_genpd_debug_exit(void)
2384 {
2385 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2386 }
2387 __exitcall(pm_genpd_debug_exit);
2388 #endif /* CONFIG_DEBUG_FS */