]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/base/power/domain.c
PM / Domains: Avoid a potential deadlock
[mirror_ubuntu-eoan-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #define pr_fmt(fmt) "PM: " fmt
10
11 #include <linux/delay.h>
12 #include <linux/kernel.h>
13 #include <linux/io.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_opp.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/pm_domain.h>
18 #include <linux/pm_qos.h>
19 #include <linux/pm_clock.h>
20 #include <linux/slab.h>
21 #include <linux/err.h>
22 #include <linux/sched.h>
23 #include <linux/suspend.h>
24 #include <linux/export.h>
25
26 #include "power.h"
27
28 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
29
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
31 ({ \
32 type (*__routine)(struct device *__d); \
33 type __ret = (type)0; \
34 \
35 __routine = genpd->dev_ops.callback; \
36 if (__routine) { \
37 __ret = __routine(dev); \
38 } \
39 __ret; \
40 })
41
42 static LIST_HEAD(gpd_list);
43 static DEFINE_MUTEX(gpd_list_lock);
44
45 struct genpd_lock_ops {
46 void (*lock)(struct generic_pm_domain *genpd);
47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 void (*unlock)(struct generic_pm_domain *genpd);
50 };
51
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 {
54 mutex_lock(&genpd->mlock);
55 }
56
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 int depth)
59 {
60 mutex_lock_nested(&genpd->mlock, depth);
61 }
62
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 {
65 return mutex_lock_interruptible(&genpd->mlock);
66 }
67
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 {
70 return mutex_unlock(&genpd->mlock);
71 }
72
73 static const struct genpd_lock_ops genpd_mtx_ops = {
74 .lock = genpd_lock_mtx,
75 .lock_nested = genpd_lock_nested_mtx,
76 .lock_interruptible = genpd_lock_interruptible_mtx,
77 .unlock = genpd_unlock_mtx,
78 };
79
80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 __acquires(&genpd->slock)
82 {
83 unsigned long flags;
84
85 spin_lock_irqsave(&genpd->slock, flags);
86 genpd->lock_flags = flags;
87 }
88
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 int depth)
91 __acquires(&genpd->slock)
92 {
93 unsigned long flags;
94
95 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 genpd->lock_flags = flags;
97 }
98
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 __acquires(&genpd->slock)
101 {
102 unsigned long flags;
103
104 spin_lock_irqsave(&genpd->slock, flags);
105 genpd->lock_flags = flags;
106 return 0;
107 }
108
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 __releases(&genpd->slock)
111 {
112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 }
114
115 static const struct genpd_lock_ops genpd_spin_ops = {
116 .lock = genpd_lock_spin,
117 .lock_nested = genpd_lock_nested_spin,
118 .lock_interruptible = genpd_lock_interruptible_spin,
119 .unlock = genpd_unlock_spin,
120 };
121
122 #define genpd_lock(p) p->lock_ops->lock(p)
123 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
125 #define genpd_unlock(p) p->lock_ops->unlock(p)
126
127 #define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131
132 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
133 const struct generic_pm_domain *genpd)
134 {
135 bool ret;
136
137 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
138
139 /*
140 * Warn once if an IRQ safe device is attached to a no sleep domain, as
141 * to indicate a suboptimal configuration for PM. For an always on
142 * domain this isn't case, thus don't warn.
143 */
144 if (ret && !genpd_is_always_on(genpd))
145 dev_warn_once(dev, "PM domain %s will not be powered off\n",
146 genpd->name);
147
148 return ret;
149 }
150
151 /*
152 * Get the generic PM domain for a particular struct device.
153 * This validates the struct device pointer, the PM domain pointer,
154 * and checks that the PM domain pointer is a real generic PM domain.
155 * Any failure results in NULL being returned.
156 */
157 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
158 {
159 struct generic_pm_domain *genpd = NULL, *gpd;
160
161 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
162 return NULL;
163
164 mutex_lock(&gpd_list_lock);
165 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
166 if (&gpd->domain == dev->pm_domain) {
167 genpd = gpd;
168 break;
169 }
170 }
171 mutex_unlock(&gpd_list_lock);
172
173 return genpd;
174 }
175
176 /*
177 * This should only be used where we are certain that the pm_domain
178 * attached to the device is a genpd domain.
179 */
180 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
181 {
182 if (IS_ERR_OR_NULL(dev->pm_domain))
183 return ERR_PTR(-EINVAL);
184
185 return pd_to_genpd(dev->pm_domain);
186 }
187
188 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
189 struct device *dev)
190 {
191 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
192 }
193
194 static int genpd_start_dev(const struct generic_pm_domain *genpd,
195 struct device *dev)
196 {
197 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
198 }
199
200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
201 {
202 bool ret = false;
203
204 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
205 ret = !!atomic_dec_and_test(&genpd->sd_count);
206
207 return ret;
208 }
209
210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
211 {
212 atomic_inc(&genpd->sd_count);
213 smp_mb__after_atomic();
214 }
215
216 #ifdef CONFIG_DEBUG_FS
217 static void genpd_update_accounting(struct generic_pm_domain *genpd)
218 {
219 ktime_t delta, now;
220
221 now = ktime_get();
222 delta = ktime_sub(now, genpd->accounting_time);
223
224 /*
225 * If genpd->status is active, it means we are just
226 * out of off and so update the idle time and vice
227 * versa.
228 */
229 if (genpd->status == GPD_STATE_ACTIVE) {
230 int state_idx = genpd->state_idx;
231
232 genpd->states[state_idx].idle_time =
233 ktime_add(genpd->states[state_idx].idle_time, delta);
234 } else {
235 genpd->on_time = ktime_add(genpd->on_time, delta);
236 }
237
238 genpd->accounting_time = now;
239 }
240 #else
241 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
242 #endif
243
244 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
245 unsigned int state)
246 {
247 struct generic_pm_domain_data *pd_data;
248 struct pm_domain_data *pdd;
249 struct gpd_link *link;
250
251 /* New requested state is same as Max requested state */
252 if (state == genpd->performance_state)
253 return state;
254
255 /* New requested state is higher than Max requested state */
256 if (state > genpd->performance_state)
257 return state;
258
259 /* Traverse all devices within the domain */
260 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
261 pd_data = to_gpd_data(pdd);
262
263 if (pd_data->performance_state > state)
264 state = pd_data->performance_state;
265 }
266
267 /*
268 * Traverse all sub-domains within the domain. This can be
269 * done without any additional locking as the link->performance_state
270 * field is protected by the master genpd->lock, which is already taken.
271 *
272 * Also note that link->performance_state (subdomain's performance state
273 * requirement to master domain) is different from
274 * link->slave->performance_state (current performance state requirement
275 * of the devices/sub-domains of the subdomain) and so can have a
276 * different value.
277 *
278 * Note that we also take vote from powered-off sub-domains into account
279 * as the same is done for devices right now.
280 */
281 list_for_each_entry(link, &genpd->master_links, master_node) {
282 if (link->performance_state > state)
283 state = link->performance_state;
284 }
285
286 return state;
287 }
288
289 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
290 unsigned int state, int depth)
291 {
292 struct generic_pm_domain *master;
293 struct gpd_link *link;
294 int master_state, ret;
295
296 if (state == genpd->performance_state)
297 return 0;
298
299 /* Propagate to masters of genpd */
300 list_for_each_entry(link, &genpd->slave_links, slave_node) {
301 master = link->master;
302
303 if (!master->set_performance_state)
304 continue;
305
306 /* Find master's performance state */
307 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
308 master->opp_table,
309 state);
310 if (unlikely(ret < 0))
311 goto err;
312
313 master_state = ret;
314
315 genpd_lock_nested(master, depth + 1);
316
317 link->prev_performance_state = link->performance_state;
318 link->performance_state = master_state;
319 master_state = _genpd_reeval_performance_state(master,
320 master_state);
321 ret = _genpd_set_performance_state(master, master_state, depth + 1);
322 if (ret)
323 link->performance_state = link->prev_performance_state;
324
325 genpd_unlock(master);
326
327 if (ret)
328 goto err;
329 }
330
331 ret = genpd->set_performance_state(genpd, state);
332 if (ret)
333 goto err;
334
335 genpd->performance_state = state;
336 return 0;
337
338 err:
339 /* Encountered an error, lets rollback */
340 list_for_each_entry_continue_reverse(link, &genpd->slave_links,
341 slave_node) {
342 master = link->master;
343
344 if (!master->set_performance_state)
345 continue;
346
347 genpd_lock_nested(master, depth + 1);
348
349 master_state = link->prev_performance_state;
350 link->performance_state = master_state;
351
352 master_state = _genpd_reeval_performance_state(master,
353 master_state);
354 if (_genpd_set_performance_state(master, master_state, depth + 1)) {
355 pr_err("%s: Failed to roll back to %d performance state\n",
356 master->name, master_state);
357 }
358
359 genpd_unlock(master);
360 }
361
362 return ret;
363 }
364
365 /**
366 * dev_pm_genpd_set_performance_state- Set performance state of device's power
367 * domain.
368 *
369 * @dev: Device for which the performance-state needs to be set.
370 * @state: Target performance state of the device. This can be set as 0 when the
371 * device doesn't have any performance state constraints left (And so
372 * the device wouldn't participate anymore to find the target
373 * performance state of the genpd).
374 *
375 * It is assumed that the users guarantee that the genpd wouldn't be detached
376 * while this routine is getting called.
377 *
378 * Returns 0 on success and negative error values on failures.
379 */
380 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
381 {
382 struct generic_pm_domain *genpd;
383 struct generic_pm_domain_data *gpd_data;
384 unsigned int prev;
385 int ret;
386
387 genpd = dev_to_genpd(dev);
388 if (IS_ERR(genpd))
389 return -ENODEV;
390
391 if (unlikely(!genpd->set_performance_state))
392 return -EINVAL;
393
394 if (unlikely(!dev->power.subsys_data ||
395 !dev->power.subsys_data->domain_data)) {
396 WARN_ON(1);
397 return -EINVAL;
398 }
399
400 genpd_lock(genpd);
401
402 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
403 prev = gpd_data->performance_state;
404 gpd_data->performance_state = state;
405
406 state = _genpd_reeval_performance_state(genpd, state);
407 ret = _genpd_set_performance_state(genpd, state, 0);
408 if (ret)
409 gpd_data->performance_state = prev;
410
411 genpd_unlock(genpd);
412
413 return ret;
414 }
415 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
416
417 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
418 {
419 unsigned int state_idx = genpd->state_idx;
420 ktime_t time_start;
421 s64 elapsed_ns;
422 int ret;
423
424 if (!genpd->power_on)
425 return 0;
426
427 if (!timed)
428 return genpd->power_on(genpd);
429
430 time_start = ktime_get();
431 ret = genpd->power_on(genpd);
432 if (ret)
433 return ret;
434
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
437 return ret;
438
439 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
440 genpd->max_off_time_changed = true;
441 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
442 genpd->name, "on", elapsed_ns);
443
444 return ret;
445 }
446
447 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
448 {
449 unsigned int state_idx = genpd->state_idx;
450 ktime_t time_start;
451 s64 elapsed_ns;
452 int ret;
453
454 if (!genpd->power_off)
455 return 0;
456
457 if (!timed)
458 return genpd->power_off(genpd);
459
460 time_start = ktime_get();
461 ret = genpd->power_off(genpd);
462 if (ret)
463 return ret;
464
465 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
466 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
467 return 0;
468
469 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
470 genpd->max_off_time_changed = true;
471 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
472 genpd->name, "off", elapsed_ns);
473
474 return 0;
475 }
476
477 /**
478 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
479 * @genpd: PM domain to power off.
480 *
481 * Queue up the execution of genpd_power_off() unless it's already been done
482 * before.
483 */
484 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
485 {
486 queue_work(pm_wq, &genpd->power_off_work);
487 }
488
489 /**
490 * genpd_power_off - Remove power from a given PM domain.
491 * @genpd: PM domain to power down.
492 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
493 * RPM status of the releated device is in an intermediate state, not yet turned
494 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
495 * be RPM_SUSPENDED, while it tries to power off the PM domain.
496 *
497 * If all of the @genpd's devices have been suspended and all of its subdomains
498 * have been powered down, remove power from @genpd.
499 */
500 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
501 unsigned int depth)
502 {
503 struct pm_domain_data *pdd;
504 struct gpd_link *link;
505 unsigned int not_suspended = 0;
506
507 /*
508 * Do not try to power off the domain in the following situations:
509 * (1) The domain is already in the "power off" state.
510 * (2) System suspend is in progress.
511 */
512 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
513 return 0;
514
515 /*
516 * Abort power off for the PM domain in the following situations:
517 * (1) The domain is configured as always on.
518 * (2) When the domain has a subdomain being powered on.
519 */
520 if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
521 return -EBUSY;
522
523 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
524 enum pm_qos_flags_status stat;
525
526 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
527 if (stat > PM_QOS_FLAGS_NONE)
528 return -EBUSY;
529
530 /*
531 * Do not allow PM domain to be powered off, when an IRQ safe
532 * device is part of a non-IRQ safe domain.
533 */
534 if (!pm_runtime_suspended(pdd->dev) ||
535 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
536 not_suspended++;
537 }
538
539 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
540 return -EBUSY;
541
542 if (genpd->gov && genpd->gov->power_down_ok) {
543 if (!genpd->gov->power_down_ok(&genpd->domain))
544 return -EAGAIN;
545 }
546
547 /* Default to shallowest state. */
548 if (!genpd->gov)
549 genpd->state_idx = 0;
550
551 if (genpd->power_off) {
552 int ret;
553
554 if (atomic_read(&genpd->sd_count) > 0)
555 return -EBUSY;
556
557 /*
558 * If sd_count > 0 at this point, one of the subdomains hasn't
559 * managed to call genpd_power_on() for the master yet after
560 * incrementing it. In that case genpd_power_on() will wait
561 * for us to drop the lock, so we can call .power_off() and let
562 * the genpd_power_on() restore power for us (this shouldn't
563 * happen very often).
564 */
565 ret = _genpd_power_off(genpd, true);
566 if (ret)
567 return ret;
568 }
569
570 genpd->status = GPD_STATE_POWER_OFF;
571 genpd_update_accounting(genpd);
572
573 list_for_each_entry(link, &genpd->slave_links, slave_node) {
574 genpd_sd_counter_dec(link->master);
575 genpd_lock_nested(link->master, depth + 1);
576 genpd_power_off(link->master, false, depth + 1);
577 genpd_unlock(link->master);
578 }
579
580 return 0;
581 }
582
583 /**
584 * genpd_power_on - Restore power to a given PM domain and its masters.
585 * @genpd: PM domain to power up.
586 * @depth: nesting count for lockdep.
587 *
588 * Restore power to @genpd and all of its masters so that it is possible to
589 * resume a device belonging to it.
590 */
591 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
592 {
593 struct gpd_link *link;
594 int ret = 0;
595
596 if (genpd_status_on(genpd))
597 return 0;
598
599 /*
600 * The list is guaranteed not to change while the loop below is being
601 * executed, unless one of the masters' .power_on() callbacks fiddles
602 * with it.
603 */
604 list_for_each_entry(link, &genpd->slave_links, slave_node) {
605 struct generic_pm_domain *master = link->master;
606
607 genpd_sd_counter_inc(master);
608
609 genpd_lock_nested(master, depth + 1);
610 ret = genpd_power_on(master, depth + 1);
611 genpd_unlock(master);
612
613 if (ret) {
614 genpd_sd_counter_dec(master);
615 goto err;
616 }
617 }
618
619 ret = _genpd_power_on(genpd, true);
620 if (ret)
621 goto err;
622
623 genpd->status = GPD_STATE_ACTIVE;
624 genpd_update_accounting(genpd);
625
626 return 0;
627
628 err:
629 list_for_each_entry_continue_reverse(link,
630 &genpd->slave_links,
631 slave_node) {
632 genpd_sd_counter_dec(link->master);
633 genpd_lock_nested(link->master, depth + 1);
634 genpd_power_off(link->master, false, depth + 1);
635 genpd_unlock(link->master);
636 }
637
638 return ret;
639 }
640
641 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
642 unsigned long val, void *ptr)
643 {
644 struct generic_pm_domain_data *gpd_data;
645 struct device *dev;
646
647 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
648 dev = gpd_data->base.dev;
649
650 for (;;) {
651 struct generic_pm_domain *genpd;
652 struct pm_domain_data *pdd;
653
654 spin_lock_irq(&dev->power.lock);
655
656 pdd = dev->power.subsys_data ?
657 dev->power.subsys_data->domain_data : NULL;
658 if (pdd) {
659 to_gpd_data(pdd)->td.constraint_changed = true;
660 genpd = dev_to_genpd(dev);
661 } else {
662 genpd = ERR_PTR(-ENODATA);
663 }
664
665 spin_unlock_irq(&dev->power.lock);
666
667 if (!IS_ERR(genpd)) {
668 genpd_lock(genpd);
669 genpd->max_off_time_changed = true;
670 genpd_unlock(genpd);
671 }
672
673 dev = dev->parent;
674 if (!dev || dev->power.ignore_children)
675 break;
676 }
677
678 return NOTIFY_DONE;
679 }
680
681 /**
682 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
683 * @work: Work structure used for scheduling the execution of this function.
684 */
685 static void genpd_power_off_work_fn(struct work_struct *work)
686 {
687 struct generic_pm_domain *genpd;
688
689 genpd = container_of(work, struct generic_pm_domain, power_off_work);
690
691 genpd_lock(genpd);
692 genpd_power_off(genpd, false, 0);
693 genpd_unlock(genpd);
694 }
695
696 /**
697 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
698 * @dev: Device to handle.
699 */
700 static int __genpd_runtime_suspend(struct device *dev)
701 {
702 int (*cb)(struct device *__dev);
703
704 if (dev->type && dev->type->pm)
705 cb = dev->type->pm->runtime_suspend;
706 else if (dev->class && dev->class->pm)
707 cb = dev->class->pm->runtime_suspend;
708 else if (dev->bus && dev->bus->pm)
709 cb = dev->bus->pm->runtime_suspend;
710 else
711 cb = NULL;
712
713 if (!cb && dev->driver && dev->driver->pm)
714 cb = dev->driver->pm->runtime_suspend;
715
716 return cb ? cb(dev) : 0;
717 }
718
719 /**
720 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
721 * @dev: Device to handle.
722 */
723 static int __genpd_runtime_resume(struct device *dev)
724 {
725 int (*cb)(struct device *__dev);
726
727 if (dev->type && dev->type->pm)
728 cb = dev->type->pm->runtime_resume;
729 else if (dev->class && dev->class->pm)
730 cb = dev->class->pm->runtime_resume;
731 else if (dev->bus && dev->bus->pm)
732 cb = dev->bus->pm->runtime_resume;
733 else
734 cb = NULL;
735
736 if (!cb && dev->driver && dev->driver->pm)
737 cb = dev->driver->pm->runtime_resume;
738
739 return cb ? cb(dev) : 0;
740 }
741
742 /**
743 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
744 * @dev: Device to suspend.
745 *
746 * Carry out a runtime suspend of a device under the assumption that its
747 * pm_domain field points to the domain member of an object of type
748 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
749 */
750 static int genpd_runtime_suspend(struct device *dev)
751 {
752 struct generic_pm_domain *genpd;
753 bool (*suspend_ok)(struct device *__dev);
754 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
755 bool runtime_pm = pm_runtime_enabled(dev);
756 ktime_t time_start;
757 s64 elapsed_ns;
758 int ret;
759
760 dev_dbg(dev, "%s()\n", __func__);
761
762 genpd = dev_to_genpd(dev);
763 if (IS_ERR(genpd))
764 return -EINVAL;
765
766 /*
767 * A runtime PM centric subsystem/driver may re-use the runtime PM
768 * callbacks for other purposes than runtime PM. In those scenarios
769 * runtime PM is disabled. Under these circumstances, we shall skip
770 * validating/measuring the PM QoS latency.
771 */
772 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
773 if (runtime_pm && suspend_ok && !suspend_ok(dev))
774 return -EBUSY;
775
776 /* Measure suspend latency. */
777 time_start = 0;
778 if (runtime_pm)
779 time_start = ktime_get();
780
781 ret = __genpd_runtime_suspend(dev);
782 if (ret)
783 return ret;
784
785 ret = genpd_stop_dev(genpd, dev);
786 if (ret) {
787 __genpd_runtime_resume(dev);
788 return ret;
789 }
790
791 /* Update suspend latency value if the measured time exceeds it. */
792 if (runtime_pm) {
793 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
794 if (elapsed_ns > td->suspend_latency_ns) {
795 td->suspend_latency_ns = elapsed_ns;
796 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
797 elapsed_ns);
798 genpd->max_off_time_changed = true;
799 td->constraint_changed = true;
800 }
801 }
802
803 /*
804 * If power.irq_safe is set, this routine may be run with
805 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
806 */
807 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
808 return 0;
809
810 genpd_lock(genpd);
811 genpd_power_off(genpd, true, 0);
812 genpd_unlock(genpd);
813
814 return 0;
815 }
816
817 /**
818 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
819 * @dev: Device to resume.
820 *
821 * Carry out a runtime resume of a device under the assumption that its
822 * pm_domain field points to the domain member of an object of type
823 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
824 */
825 static int genpd_runtime_resume(struct device *dev)
826 {
827 struct generic_pm_domain *genpd;
828 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
829 bool runtime_pm = pm_runtime_enabled(dev);
830 ktime_t time_start;
831 s64 elapsed_ns;
832 int ret;
833 bool timed = true;
834
835 dev_dbg(dev, "%s()\n", __func__);
836
837 genpd = dev_to_genpd(dev);
838 if (IS_ERR(genpd))
839 return -EINVAL;
840
841 /*
842 * As we don't power off a non IRQ safe domain, which holds
843 * an IRQ safe device, we don't need to restore power to it.
844 */
845 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
846 timed = false;
847 goto out;
848 }
849
850 genpd_lock(genpd);
851 ret = genpd_power_on(genpd, 0);
852 genpd_unlock(genpd);
853
854 if (ret)
855 return ret;
856
857 out:
858 /* Measure resume latency. */
859 time_start = 0;
860 if (timed && runtime_pm)
861 time_start = ktime_get();
862
863 ret = genpd_start_dev(genpd, dev);
864 if (ret)
865 goto err_poweroff;
866
867 ret = __genpd_runtime_resume(dev);
868 if (ret)
869 goto err_stop;
870
871 /* Update resume latency value if the measured time exceeds it. */
872 if (timed && runtime_pm) {
873 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
874 if (elapsed_ns > td->resume_latency_ns) {
875 td->resume_latency_ns = elapsed_ns;
876 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
877 elapsed_ns);
878 genpd->max_off_time_changed = true;
879 td->constraint_changed = true;
880 }
881 }
882
883 return 0;
884
885 err_stop:
886 genpd_stop_dev(genpd, dev);
887 err_poweroff:
888 if (!pm_runtime_is_irq_safe(dev) ||
889 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
890 genpd_lock(genpd);
891 genpd_power_off(genpd, true, 0);
892 genpd_unlock(genpd);
893 }
894
895 return ret;
896 }
897
898 static bool pd_ignore_unused;
899 static int __init pd_ignore_unused_setup(char *__unused)
900 {
901 pd_ignore_unused = true;
902 return 1;
903 }
904 __setup("pd_ignore_unused", pd_ignore_unused_setup);
905
906 /**
907 * genpd_power_off_unused - Power off all PM domains with no devices in use.
908 */
909 static int __init genpd_power_off_unused(void)
910 {
911 struct generic_pm_domain *genpd;
912
913 if (pd_ignore_unused) {
914 pr_warn("genpd: Not disabling unused power domains\n");
915 return 0;
916 }
917
918 mutex_lock(&gpd_list_lock);
919
920 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
921 genpd_queue_power_off_work(genpd);
922
923 mutex_unlock(&gpd_list_lock);
924
925 return 0;
926 }
927 late_initcall(genpd_power_off_unused);
928
929 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
930
931 static bool genpd_present(const struct generic_pm_domain *genpd)
932 {
933 const struct generic_pm_domain *gpd;
934
935 if (IS_ERR_OR_NULL(genpd))
936 return false;
937
938 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
939 if (gpd == genpd)
940 return true;
941
942 return false;
943 }
944
945 #endif
946
947 #ifdef CONFIG_PM_SLEEP
948
949 /**
950 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
951 * @genpd: PM domain to power off, if possible.
952 * @use_lock: use the lock.
953 * @depth: nesting count for lockdep.
954 *
955 * Check if the given PM domain can be powered off (during system suspend or
956 * hibernation) and do that if so. Also, in that case propagate to its masters.
957 *
958 * This function is only called in "noirq" and "syscore" stages of system power
959 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
960 * these cases the lock must be held.
961 */
962 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
963 unsigned int depth)
964 {
965 struct gpd_link *link;
966
967 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
968 return;
969
970 if (genpd->suspended_count != genpd->device_count
971 || atomic_read(&genpd->sd_count) > 0)
972 return;
973
974 /* Choose the deepest state when suspending */
975 genpd->state_idx = genpd->state_count - 1;
976 if (_genpd_power_off(genpd, false))
977 return;
978
979 genpd->status = GPD_STATE_POWER_OFF;
980
981 list_for_each_entry(link, &genpd->slave_links, slave_node) {
982 genpd_sd_counter_dec(link->master);
983
984 if (use_lock)
985 genpd_lock_nested(link->master, depth + 1);
986
987 genpd_sync_power_off(link->master, use_lock, depth + 1);
988
989 if (use_lock)
990 genpd_unlock(link->master);
991 }
992 }
993
994 /**
995 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
996 * @genpd: PM domain to power on.
997 * @use_lock: use the lock.
998 * @depth: nesting count for lockdep.
999 *
1000 * This function is only called in "noirq" and "syscore" stages of system power
1001 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1002 * these cases the lock must be held.
1003 */
1004 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1005 unsigned int depth)
1006 {
1007 struct gpd_link *link;
1008
1009 if (genpd_status_on(genpd))
1010 return;
1011
1012 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1013 genpd_sd_counter_inc(link->master);
1014
1015 if (use_lock)
1016 genpd_lock_nested(link->master, depth + 1);
1017
1018 genpd_sync_power_on(link->master, use_lock, depth + 1);
1019
1020 if (use_lock)
1021 genpd_unlock(link->master);
1022 }
1023
1024 _genpd_power_on(genpd, false);
1025
1026 genpd->status = GPD_STATE_ACTIVE;
1027 }
1028
1029 /**
1030 * resume_needed - Check whether to resume a device before system suspend.
1031 * @dev: Device to check.
1032 * @genpd: PM domain the device belongs to.
1033 *
1034 * There are two cases in which a device that can wake up the system from sleep
1035 * states should be resumed by genpd_prepare(): (1) if the device is enabled
1036 * to wake up the system and it has to remain active for this purpose while the
1037 * system is in the sleep state and (2) if the device is not enabled to wake up
1038 * the system from sleep states and it generally doesn't generate wakeup signals
1039 * by itself (those signals are generated on its behalf by other parts of the
1040 * system). In the latter case it may be necessary to reconfigure the device's
1041 * wakeup settings during system suspend, because it may have been set up to
1042 * signal remote wakeup from the system's working state as needed by runtime PM.
1043 * Return 'true' in either of the above cases.
1044 */
1045 static bool resume_needed(struct device *dev,
1046 const struct generic_pm_domain *genpd)
1047 {
1048 bool active_wakeup;
1049
1050 if (!device_can_wakeup(dev))
1051 return false;
1052
1053 active_wakeup = genpd_is_active_wakeup(genpd);
1054 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1055 }
1056
1057 /**
1058 * genpd_prepare - Start power transition of a device in a PM domain.
1059 * @dev: Device to start the transition of.
1060 *
1061 * Start a power transition of a device (during a system-wide power transition)
1062 * under the assumption that its pm_domain field points to the domain member of
1063 * an object of type struct generic_pm_domain representing a PM domain
1064 * consisting of I/O devices.
1065 */
1066 static int genpd_prepare(struct device *dev)
1067 {
1068 struct generic_pm_domain *genpd;
1069 int ret;
1070
1071 dev_dbg(dev, "%s()\n", __func__);
1072
1073 genpd = dev_to_genpd(dev);
1074 if (IS_ERR(genpd))
1075 return -EINVAL;
1076
1077 /*
1078 * If a wakeup request is pending for the device, it should be woken up
1079 * at this point and a system wakeup event should be reported if it's
1080 * set up to wake up the system from sleep states.
1081 */
1082 if (resume_needed(dev, genpd))
1083 pm_runtime_resume(dev);
1084
1085 genpd_lock(genpd);
1086
1087 if (genpd->prepared_count++ == 0)
1088 genpd->suspended_count = 0;
1089
1090 genpd_unlock(genpd);
1091
1092 ret = pm_generic_prepare(dev);
1093 if (ret < 0) {
1094 genpd_lock(genpd);
1095
1096 genpd->prepared_count--;
1097
1098 genpd_unlock(genpd);
1099 }
1100
1101 /* Never return 1, as genpd don't cope with the direct_complete path. */
1102 return ret >= 0 ? 0 : ret;
1103 }
1104
1105 /**
1106 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1107 * I/O pm domain.
1108 * @dev: Device to suspend.
1109 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1110 *
1111 * Stop the device and remove power from the domain if all devices in it have
1112 * been stopped.
1113 */
1114 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1115 {
1116 struct generic_pm_domain *genpd;
1117 int ret = 0;
1118
1119 genpd = dev_to_genpd(dev);
1120 if (IS_ERR(genpd))
1121 return -EINVAL;
1122
1123 if (poweroff)
1124 ret = pm_generic_poweroff_noirq(dev);
1125 else
1126 ret = pm_generic_suspend_noirq(dev);
1127 if (ret)
1128 return ret;
1129
1130 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1131 return 0;
1132
1133 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1134 !pm_runtime_status_suspended(dev)) {
1135 ret = genpd_stop_dev(genpd, dev);
1136 if (ret) {
1137 if (poweroff)
1138 pm_generic_restore_noirq(dev);
1139 else
1140 pm_generic_resume_noirq(dev);
1141 return ret;
1142 }
1143 }
1144
1145 genpd_lock(genpd);
1146 genpd->suspended_count++;
1147 genpd_sync_power_off(genpd, true, 0);
1148 genpd_unlock(genpd);
1149
1150 return 0;
1151 }
1152
1153 /**
1154 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1155 * @dev: Device to suspend.
1156 *
1157 * Stop the device and remove power from the domain if all devices in it have
1158 * been stopped.
1159 */
1160 static int genpd_suspend_noirq(struct device *dev)
1161 {
1162 dev_dbg(dev, "%s()\n", __func__);
1163
1164 return genpd_finish_suspend(dev, false);
1165 }
1166
1167 /**
1168 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1169 * @dev: Device to resume.
1170 *
1171 * Restore power to the device's PM domain, if necessary, and start the device.
1172 */
1173 static int genpd_resume_noirq(struct device *dev)
1174 {
1175 struct generic_pm_domain *genpd;
1176 int ret;
1177
1178 dev_dbg(dev, "%s()\n", __func__);
1179
1180 genpd = dev_to_genpd(dev);
1181 if (IS_ERR(genpd))
1182 return -EINVAL;
1183
1184 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1185 return pm_generic_resume_noirq(dev);
1186
1187 genpd_lock(genpd);
1188 genpd_sync_power_on(genpd, true, 0);
1189 genpd->suspended_count--;
1190 genpd_unlock(genpd);
1191
1192 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1193 !pm_runtime_status_suspended(dev)) {
1194 ret = genpd_start_dev(genpd, dev);
1195 if (ret)
1196 return ret;
1197 }
1198
1199 return pm_generic_resume_noirq(dev);
1200 }
1201
1202 /**
1203 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1204 * @dev: Device to freeze.
1205 *
1206 * Carry out a late freeze of a device under the assumption that its
1207 * pm_domain field points to the domain member of an object of type
1208 * struct generic_pm_domain representing a power domain consisting of I/O
1209 * devices.
1210 */
1211 static int genpd_freeze_noirq(struct device *dev)
1212 {
1213 const struct generic_pm_domain *genpd;
1214 int ret = 0;
1215
1216 dev_dbg(dev, "%s()\n", __func__);
1217
1218 genpd = dev_to_genpd(dev);
1219 if (IS_ERR(genpd))
1220 return -EINVAL;
1221
1222 ret = pm_generic_freeze_noirq(dev);
1223 if (ret)
1224 return ret;
1225
1226 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1227 !pm_runtime_status_suspended(dev))
1228 ret = genpd_stop_dev(genpd, dev);
1229
1230 return ret;
1231 }
1232
1233 /**
1234 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1235 * @dev: Device to thaw.
1236 *
1237 * Start the device, unless power has been removed from the domain already
1238 * before the system transition.
1239 */
1240 static int genpd_thaw_noirq(struct device *dev)
1241 {
1242 const struct generic_pm_domain *genpd;
1243 int ret = 0;
1244
1245 dev_dbg(dev, "%s()\n", __func__);
1246
1247 genpd = dev_to_genpd(dev);
1248 if (IS_ERR(genpd))
1249 return -EINVAL;
1250
1251 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1252 !pm_runtime_status_suspended(dev)) {
1253 ret = genpd_start_dev(genpd, dev);
1254 if (ret)
1255 return ret;
1256 }
1257
1258 return pm_generic_thaw_noirq(dev);
1259 }
1260
1261 /**
1262 * genpd_poweroff_noirq - Completion of hibernation of device in an
1263 * I/O PM domain.
1264 * @dev: Device to poweroff.
1265 *
1266 * Stop the device and remove power from the domain if all devices in it have
1267 * been stopped.
1268 */
1269 static int genpd_poweroff_noirq(struct device *dev)
1270 {
1271 dev_dbg(dev, "%s()\n", __func__);
1272
1273 return genpd_finish_suspend(dev, true);
1274 }
1275
1276 /**
1277 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1278 * @dev: Device to resume.
1279 *
1280 * Make sure the domain will be in the same power state as before the
1281 * hibernation the system is resuming from and start the device if necessary.
1282 */
1283 static int genpd_restore_noirq(struct device *dev)
1284 {
1285 struct generic_pm_domain *genpd;
1286 int ret = 0;
1287
1288 dev_dbg(dev, "%s()\n", __func__);
1289
1290 genpd = dev_to_genpd(dev);
1291 if (IS_ERR(genpd))
1292 return -EINVAL;
1293
1294 /*
1295 * At this point suspended_count == 0 means we are being run for the
1296 * first time for the given domain in the present cycle.
1297 */
1298 genpd_lock(genpd);
1299 if (genpd->suspended_count++ == 0)
1300 /*
1301 * The boot kernel might put the domain into arbitrary state,
1302 * so make it appear as powered off to genpd_sync_power_on(),
1303 * so that it tries to power it on in case it was really off.
1304 */
1305 genpd->status = GPD_STATE_POWER_OFF;
1306
1307 genpd_sync_power_on(genpd, true, 0);
1308 genpd_unlock(genpd);
1309
1310 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1311 !pm_runtime_status_suspended(dev)) {
1312 ret = genpd_start_dev(genpd, dev);
1313 if (ret)
1314 return ret;
1315 }
1316
1317 return pm_generic_restore_noirq(dev);
1318 }
1319
1320 /**
1321 * genpd_complete - Complete power transition of a device in a power domain.
1322 * @dev: Device to complete the transition of.
1323 *
1324 * Complete a power transition of a device (during a system-wide power
1325 * transition) under the assumption that its pm_domain field points to the
1326 * domain member of an object of type struct generic_pm_domain representing
1327 * a power domain consisting of I/O devices.
1328 */
1329 static void genpd_complete(struct device *dev)
1330 {
1331 struct generic_pm_domain *genpd;
1332
1333 dev_dbg(dev, "%s()\n", __func__);
1334
1335 genpd = dev_to_genpd(dev);
1336 if (IS_ERR(genpd))
1337 return;
1338
1339 pm_generic_complete(dev);
1340
1341 genpd_lock(genpd);
1342
1343 genpd->prepared_count--;
1344 if (!genpd->prepared_count)
1345 genpd_queue_power_off_work(genpd);
1346
1347 genpd_unlock(genpd);
1348 }
1349
1350 /**
1351 * genpd_syscore_switch - Switch power during system core suspend or resume.
1352 * @dev: Device that normally is marked as "always on" to switch power for.
1353 *
1354 * This routine may only be called during the system core (syscore) suspend or
1355 * resume phase for devices whose "always on" flags are set.
1356 */
1357 static void genpd_syscore_switch(struct device *dev, bool suspend)
1358 {
1359 struct generic_pm_domain *genpd;
1360
1361 genpd = dev_to_genpd(dev);
1362 if (!genpd_present(genpd))
1363 return;
1364
1365 if (suspend) {
1366 genpd->suspended_count++;
1367 genpd_sync_power_off(genpd, false, 0);
1368 } else {
1369 genpd_sync_power_on(genpd, false, 0);
1370 genpd->suspended_count--;
1371 }
1372 }
1373
1374 void pm_genpd_syscore_poweroff(struct device *dev)
1375 {
1376 genpd_syscore_switch(dev, true);
1377 }
1378 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1379
1380 void pm_genpd_syscore_poweron(struct device *dev)
1381 {
1382 genpd_syscore_switch(dev, false);
1383 }
1384 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1385
1386 #else /* !CONFIG_PM_SLEEP */
1387
1388 #define genpd_prepare NULL
1389 #define genpd_suspend_noirq NULL
1390 #define genpd_resume_noirq NULL
1391 #define genpd_freeze_noirq NULL
1392 #define genpd_thaw_noirq NULL
1393 #define genpd_poweroff_noirq NULL
1394 #define genpd_restore_noirq NULL
1395 #define genpd_complete NULL
1396
1397 #endif /* CONFIG_PM_SLEEP */
1398
1399 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1400 struct gpd_timing_data *td)
1401 {
1402 struct generic_pm_domain_data *gpd_data;
1403 int ret;
1404
1405 ret = dev_pm_get_subsys_data(dev);
1406 if (ret)
1407 return ERR_PTR(ret);
1408
1409 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1410 if (!gpd_data) {
1411 ret = -ENOMEM;
1412 goto err_put;
1413 }
1414
1415 if (td)
1416 gpd_data->td = *td;
1417
1418 gpd_data->base.dev = dev;
1419 gpd_data->td.constraint_changed = true;
1420 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1421 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1422
1423 spin_lock_irq(&dev->power.lock);
1424
1425 if (dev->power.subsys_data->domain_data) {
1426 ret = -EINVAL;
1427 goto err_free;
1428 }
1429
1430 dev->power.subsys_data->domain_data = &gpd_data->base;
1431
1432 spin_unlock_irq(&dev->power.lock);
1433
1434 return gpd_data;
1435
1436 err_free:
1437 spin_unlock_irq(&dev->power.lock);
1438 kfree(gpd_data);
1439 err_put:
1440 dev_pm_put_subsys_data(dev);
1441 return ERR_PTR(ret);
1442 }
1443
1444 static void genpd_free_dev_data(struct device *dev,
1445 struct generic_pm_domain_data *gpd_data)
1446 {
1447 spin_lock_irq(&dev->power.lock);
1448
1449 dev->power.subsys_data->domain_data = NULL;
1450
1451 spin_unlock_irq(&dev->power.lock);
1452
1453 kfree(gpd_data);
1454 dev_pm_put_subsys_data(dev);
1455 }
1456
1457 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1458 struct gpd_timing_data *td)
1459 {
1460 struct generic_pm_domain_data *gpd_data;
1461 int ret;
1462
1463 dev_dbg(dev, "%s()\n", __func__);
1464
1465 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1466 return -EINVAL;
1467
1468 gpd_data = genpd_alloc_dev_data(dev, td);
1469 if (IS_ERR(gpd_data))
1470 return PTR_ERR(gpd_data);
1471
1472 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1473 if (ret)
1474 goto out;
1475
1476 genpd_lock(genpd);
1477
1478 dev_pm_domain_set(dev, &genpd->domain);
1479
1480 genpd->device_count++;
1481 genpd->max_off_time_changed = true;
1482
1483 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1484
1485 genpd_unlock(genpd);
1486 out:
1487 if (ret)
1488 genpd_free_dev_data(dev, gpd_data);
1489 else
1490 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1491
1492 return ret;
1493 }
1494
1495 /**
1496 * pm_genpd_add_device - Add a device to an I/O PM domain.
1497 * @genpd: PM domain to add the device to.
1498 * @dev: Device to be added.
1499 */
1500 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1501 {
1502 int ret;
1503
1504 mutex_lock(&gpd_list_lock);
1505 ret = genpd_add_device(genpd, dev, NULL);
1506 mutex_unlock(&gpd_list_lock);
1507
1508 return ret;
1509 }
1510 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1511
1512 static int genpd_remove_device(struct generic_pm_domain *genpd,
1513 struct device *dev)
1514 {
1515 struct generic_pm_domain_data *gpd_data;
1516 struct pm_domain_data *pdd;
1517 int ret = 0;
1518
1519 dev_dbg(dev, "%s()\n", __func__);
1520
1521 pdd = dev->power.subsys_data->domain_data;
1522 gpd_data = to_gpd_data(pdd);
1523 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1524
1525 genpd_lock(genpd);
1526
1527 if (genpd->prepared_count > 0) {
1528 ret = -EAGAIN;
1529 goto out;
1530 }
1531
1532 genpd->device_count--;
1533 genpd->max_off_time_changed = true;
1534
1535 dev_pm_domain_set(dev, NULL);
1536
1537 list_del_init(&pdd->list_node);
1538
1539 genpd_unlock(genpd);
1540
1541 if (genpd->detach_dev)
1542 genpd->detach_dev(genpd, dev);
1543
1544 genpd_free_dev_data(dev, gpd_data);
1545
1546 return 0;
1547
1548 out:
1549 genpd_unlock(genpd);
1550 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1551
1552 return ret;
1553 }
1554
1555 /**
1556 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1557 * @dev: Device to be removed.
1558 */
1559 int pm_genpd_remove_device(struct device *dev)
1560 {
1561 struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
1562
1563 if (!genpd)
1564 return -EINVAL;
1565
1566 return genpd_remove_device(genpd, dev);
1567 }
1568 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1569
1570 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1571 struct generic_pm_domain *subdomain)
1572 {
1573 struct gpd_link *link, *itr;
1574 int ret = 0;
1575
1576 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1577 || genpd == subdomain)
1578 return -EINVAL;
1579
1580 /*
1581 * If the domain can be powered on/off in an IRQ safe
1582 * context, ensure that the subdomain can also be
1583 * powered on/off in that context.
1584 */
1585 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1586 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1587 genpd->name, subdomain->name);
1588 return -EINVAL;
1589 }
1590
1591 link = kzalloc(sizeof(*link), GFP_KERNEL);
1592 if (!link)
1593 return -ENOMEM;
1594
1595 genpd_lock(subdomain);
1596 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1597
1598 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1599 ret = -EINVAL;
1600 goto out;
1601 }
1602
1603 list_for_each_entry(itr, &genpd->master_links, master_node) {
1604 if (itr->slave == subdomain && itr->master == genpd) {
1605 ret = -EINVAL;
1606 goto out;
1607 }
1608 }
1609
1610 link->master = genpd;
1611 list_add_tail(&link->master_node, &genpd->master_links);
1612 link->slave = subdomain;
1613 list_add_tail(&link->slave_node, &subdomain->slave_links);
1614 if (genpd_status_on(subdomain))
1615 genpd_sd_counter_inc(genpd);
1616
1617 out:
1618 genpd_unlock(genpd);
1619 genpd_unlock(subdomain);
1620 if (ret)
1621 kfree(link);
1622 return ret;
1623 }
1624
1625 /**
1626 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1627 * @genpd: Master PM domain to add the subdomain to.
1628 * @subdomain: Subdomain to be added.
1629 */
1630 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1631 struct generic_pm_domain *subdomain)
1632 {
1633 int ret;
1634
1635 mutex_lock(&gpd_list_lock);
1636 ret = genpd_add_subdomain(genpd, subdomain);
1637 mutex_unlock(&gpd_list_lock);
1638
1639 return ret;
1640 }
1641 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1642
1643 /**
1644 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1645 * @genpd: Master PM domain to remove the subdomain from.
1646 * @subdomain: Subdomain to be removed.
1647 */
1648 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1649 struct generic_pm_domain *subdomain)
1650 {
1651 struct gpd_link *l, *link;
1652 int ret = -EINVAL;
1653
1654 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1655 return -EINVAL;
1656
1657 genpd_lock(subdomain);
1658 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1659
1660 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1661 pr_warn("%s: unable to remove subdomain %s\n",
1662 genpd->name, subdomain->name);
1663 ret = -EBUSY;
1664 goto out;
1665 }
1666
1667 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1668 if (link->slave != subdomain)
1669 continue;
1670
1671 list_del(&link->master_node);
1672 list_del(&link->slave_node);
1673 kfree(link);
1674 if (genpd_status_on(subdomain))
1675 genpd_sd_counter_dec(genpd);
1676
1677 ret = 0;
1678 break;
1679 }
1680
1681 out:
1682 genpd_unlock(genpd);
1683 genpd_unlock(subdomain);
1684
1685 return ret;
1686 }
1687 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1688
1689 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1690 {
1691 struct genpd_power_state *state;
1692
1693 state = kzalloc(sizeof(*state), GFP_KERNEL);
1694 if (!state)
1695 return -ENOMEM;
1696
1697 genpd->states = state;
1698 genpd->state_count = 1;
1699 genpd->free = state;
1700
1701 return 0;
1702 }
1703
1704 static void genpd_lock_init(struct generic_pm_domain *genpd)
1705 {
1706 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1707 spin_lock_init(&genpd->slock);
1708 genpd->lock_ops = &genpd_spin_ops;
1709 } else {
1710 mutex_init(&genpd->mlock);
1711 genpd->lock_ops = &genpd_mtx_ops;
1712 }
1713 }
1714
1715 /**
1716 * pm_genpd_init - Initialize a generic I/O PM domain object.
1717 * @genpd: PM domain object to initialize.
1718 * @gov: PM domain governor to associate with the domain (may be NULL).
1719 * @is_off: Initial value of the domain's power_is_off field.
1720 *
1721 * Returns 0 on successful initialization, else a negative error code.
1722 */
1723 int pm_genpd_init(struct generic_pm_domain *genpd,
1724 struct dev_power_governor *gov, bool is_off)
1725 {
1726 int ret;
1727
1728 if (IS_ERR_OR_NULL(genpd))
1729 return -EINVAL;
1730
1731 INIT_LIST_HEAD(&genpd->master_links);
1732 INIT_LIST_HEAD(&genpd->slave_links);
1733 INIT_LIST_HEAD(&genpd->dev_list);
1734 genpd_lock_init(genpd);
1735 genpd->gov = gov;
1736 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1737 atomic_set(&genpd->sd_count, 0);
1738 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1739 genpd->device_count = 0;
1740 genpd->max_off_time_ns = -1;
1741 genpd->max_off_time_changed = true;
1742 genpd->provider = NULL;
1743 genpd->has_provider = false;
1744 genpd->accounting_time = ktime_get();
1745 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1746 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1747 genpd->domain.ops.prepare = genpd_prepare;
1748 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1749 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1750 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1751 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1752 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1753 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1754 genpd->domain.ops.complete = genpd_complete;
1755
1756 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1757 genpd->dev_ops.stop = pm_clk_suspend;
1758 genpd->dev_ops.start = pm_clk_resume;
1759 }
1760
1761 /* Always-on domains must be powered on at initialization. */
1762 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1763 return -EINVAL;
1764
1765 /* Use only one "off" state if there were no states declared */
1766 if (genpd->state_count == 0) {
1767 ret = genpd_set_default_power_state(genpd);
1768 if (ret)
1769 return ret;
1770 } else if (!gov && genpd->state_count > 1) {
1771 pr_warn("%s: no governor for states\n", genpd->name);
1772 }
1773
1774 device_initialize(&genpd->dev);
1775 dev_set_name(&genpd->dev, "%s", genpd->name);
1776
1777 mutex_lock(&gpd_list_lock);
1778 list_add(&genpd->gpd_list_node, &gpd_list);
1779 mutex_unlock(&gpd_list_lock);
1780
1781 return 0;
1782 }
1783 EXPORT_SYMBOL_GPL(pm_genpd_init);
1784
1785 static int genpd_remove(struct generic_pm_domain *genpd)
1786 {
1787 struct gpd_link *l, *link;
1788
1789 if (IS_ERR_OR_NULL(genpd))
1790 return -EINVAL;
1791
1792 genpd_lock(genpd);
1793
1794 if (genpd->has_provider) {
1795 genpd_unlock(genpd);
1796 pr_err("Provider present, unable to remove %s\n", genpd->name);
1797 return -EBUSY;
1798 }
1799
1800 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1801 genpd_unlock(genpd);
1802 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1803 return -EBUSY;
1804 }
1805
1806 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1807 list_del(&link->master_node);
1808 list_del(&link->slave_node);
1809 kfree(link);
1810 }
1811
1812 list_del(&genpd->gpd_list_node);
1813 genpd_unlock(genpd);
1814 cancel_work_sync(&genpd->power_off_work);
1815 kfree(genpd->free);
1816 pr_debug("%s: removed %s\n", __func__, genpd->name);
1817
1818 return 0;
1819 }
1820
1821 /**
1822 * pm_genpd_remove - Remove a generic I/O PM domain
1823 * @genpd: Pointer to PM domain that is to be removed.
1824 *
1825 * To remove the PM domain, this function:
1826 * - Removes the PM domain as a subdomain to any parent domains,
1827 * if it was added.
1828 * - Removes the PM domain from the list of registered PM domains.
1829 *
1830 * The PM domain will only be removed, if the associated provider has
1831 * been removed, it is not a parent to any other PM domain and has no
1832 * devices associated with it.
1833 */
1834 int pm_genpd_remove(struct generic_pm_domain *genpd)
1835 {
1836 int ret;
1837
1838 mutex_lock(&gpd_list_lock);
1839 ret = genpd_remove(genpd);
1840 mutex_unlock(&gpd_list_lock);
1841
1842 return ret;
1843 }
1844 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1845
1846 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1847
1848 /*
1849 * Device Tree based PM domain providers.
1850 *
1851 * The code below implements generic device tree based PM domain providers that
1852 * bind device tree nodes with generic PM domains registered in the system.
1853 *
1854 * Any driver that registers generic PM domains and needs to support binding of
1855 * devices to these domains is supposed to register a PM domain provider, which
1856 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1857 *
1858 * Two simple mapping functions have been provided for convenience:
1859 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1860 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1861 * index.
1862 */
1863
1864 /**
1865 * struct of_genpd_provider - PM domain provider registration structure
1866 * @link: Entry in global list of PM domain providers
1867 * @node: Pointer to device tree node of PM domain provider
1868 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1869 * into a PM domain.
1870 * @data: context pointer to be passed into @xlate callback
1871 */
1872 struct of_genpd_provider {
1873 struct list_head link;
1874 struct device_node *node;
1875 genpd_xlate_t xlate;
1876 void *data;
1877 };
1878
1879 /* List of registered PM domain providers. */
1880 static LIST_HEAD(of_genpd_providers);
1881 /* Mutex to protect the list above. */
1882 static DEFINE_MUTEX(of_genpd_mutex);
1883
1884 /**
1885 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1886 * @genpdspec: OF phandle args to map into a PM domain
1887 * @data: xlate function private data - pointer to struct generic_pm_domain
1888 *
1889 * This is a generic xlate function that can be used to model PM domains that
1890 * have their own device tree nodes. The private data of xlate function needs
1891 * to be a valid pointer to struct generic_pm_domain.
1892 */
1893 static struct generic_pm_domain *genpd_xlate_simple(
1894 struct of_phandle_args *genpdspec,
1895 void *data)
1896 {
1897 return data;
1898 }
1899
1900 /**
1901 * genpd_xlate_onecell() - Xlate function using a single index.
1902 * @genpdspec: OF phandle args to map into a PM domain
1903 * @data: xlate function private data - pointer to struct genpd_onecell_data
1904 *
1905 * This is a generic xlate function that can be used to model simple PM domain
1906 * controllers that have one device tree node and provide multiple PM domains.
1907 * A single cell is used as an index into an array of PM domains specified in
1908 * the genpd_onecell_data struct when registering the provider.
1909 */
1910 static struct generic_pm_domain *genpd_xlate_onecell(
1911 struct of_phandle_args *genpdspec,
1912 void *data)
1913 {
1914 struct genpd_onecell_data *genpd_data = data;
1915 unsigned int idx = genpdspec->args[0];
1916
1917 if (genpdspec->args_count != 1)
1918 return ERR_PTR(-EINVAL);
1919
1920 if (idx >= genpd_data->num_domains) {
1921 pr_err("%s: invalid domain index %u\n", __func__, idx);
1922 return ERR_PTR(-EINVAL);
1923 }
1924
1925 if (!genpd_data->domains[idx])
1926 return ERR_PTR(-ENOENT);
1927
1928 return genpd_data->domains[idx];
1929 }
1930
1931 /**
1932 * genpd_add_provider() - Register a PM domain provider for a node
1933 * @np: Device node pointer associated with the PM domain provider.
1934 * @xlate: Callback for decoding PM domain from phandle arguments.
1935 * @data: Context pointer for @xlate callback.
1936 */
1937 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1938 void *data)
1939 {
1940 struct of_genpd_provider *cp;
1941
1942 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1943 if (!cp)
1944 return -ENOMEM;
1945
1946 cp->node = of_node_get(np);
1947 cp->data = data;
1948 cp->xlate = xlate;
1949
1950 mutex_lock(&of_genpd_mutex);
1951 list_add(&cp->link, &of_genpd_providers);
1952 mutex_unlock(&of_genpd_mutex);
1953 pr_debug("Added domain provider from %pOF\n", np);
1954
1955 return 0;
1956 }
1957
1958 /**
1959 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1960 * @np: Device node pointer associated with the PM domain provider.
1961 * @genpd: Pointer to PM domain associated with the PM domain provider.
1962 */
1963 int of_genpd_add_provider_simple(struct device_node *np,
1964 struct generic_pm_domain *genpd)
1965 {
1966 int ret = -EINVAL;
1967
1968 if (!np || !genpd)
1969 return -EINVAL;
1970
1971 mutex_lock(&gpd_list_lock);
1972
1973 if (!genpd_present(genpd))
1974 goto unlock;
1975
1976 genpd->dev.of_node = np;
1977
1978 /* Parse genpd OPP table */
1979 if (genpd->set_performance_state) {
1980 ret = dev_pm_opp_of_add_table(&genpd->dev);
1981 if (ret) {
1982 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
1983 ret);
1984 goto unlock;
1985 }
1986
1987 /*
1988 * Save table for faster processing while setting performance
1989 * state.
1990 */
1991 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
1992 WARN_ON(!genpd->opp_table);
1993 }
1994
1995 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1996 if (ret) {
1997 if (genpd->set_performance_state) {
1998 dev_pm_opp_put_opp_table(genpd->opp_table);
1999 dev_pm_opp_of_remove_table(&genpd->dev);
2000 }
2001
2002 goto unlock;
2003 }
2004
2005 genpd->provider = &np->fwnode;
2006 genpd->has_provider = true;
2007
2008 unlock:
2009 mutex_unlock(&gpd_list_lock);
2010
2011 return ret;
2012 }
2013 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2014
2015 /**
2016 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2017 * @np: Device node pointer associated with the PM domain provider.
2018 * @data: Pointer to the data associated with the PM domain provider.
2019 */
2020 int of_genpd_add_provider_onecell(struct device_node *np,
2021 struct genpd_onecell_data *data)
2022 {
2023 struct generic_pm_domain *genpd;
2024 unsigned int i;
2025 int ret = -EINVAL;
2026
2027 if (!np || !data)
2028 return -EINVAL;
2029
2030 mutex_lock(&gpd_list_lock);
2031
2032 if (!data->xlate)
2033 data->xlate = genpd_xlate_onecell;
2034
2035 for (i = 0; i < data->num_domains; i++) {
2036 genpd = data->domains[i];
2037
2038 if (!genpd)
2039 continue;
2040 if (!genpd_present(genpd))
2041 goto error;
2042
2043 genpd->dev.of_node = np;
2044
2045 /* Parse genpd OPP table */
2046 if (genpd->set_performance_state) {
2047 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2048 if (ret) {
2049 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2050 i, ret);
2051 goto error;
2052 }
2053
2054 /*
2055 * Save table for faster processing while setting
2056 * performance state.
2057 */
2058 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2059 WARN_ON(!genpd->opp_table);
2060 }
2061
2062 genpd->provider = &np->fwnode;
2063 genpd->has_provider = true;
2064 }
2065
2066 ret = genpd_add_provider(np, data->xlate, data);
2067 if (ret < 0)
2068 goto error;
2069
2070 mutex_unlock(&gpd_list_lock);
2071
2072 return 0;
2073
2074 error:
2075 while (i--) {
2076 genpd = data->domains[i];
2077
2078 if (!genpd)
2079 continue;
2080
2081 genpd->provider = NULL;
2082 genpd->has_provider = false;
2083
2084 if (genpd->set_performance_state) {
2085 dev_pm_opp_put_opp_table(genpd->opp_table);
2086 dev_pm_opp_of_remove_table(&genpd->dev);
2087 }
2088 }
2089
2090 mutex_unlock(&gpd_list_lock);
2091
2092 return ret;
2093 }
2094 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2095
2096 /**
2097 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2098 * @np: Device node pointer associated with the PM domain provider
2099 */
2100 void of_genpd_del_provider(struct device_node *np)
2101 {
2102 struct of_genpd_provider *cp, *tmp;
2103 struct generic_pm_domain *gpd;
2104
2105 mutex_lock(&gpd_list_lock);
2106 mutex_lock(&of_genpd_mutex);
2107 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2108 if (cp->node == np) {
2109 /*
2110 * For each PM domain associated with the
2111 * provider, set the 'has_provider' to false
2112 * so that the PM domain can be safely removed.
2113 */
2114 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2115 if (gpd->provider == &np->fwnode) {
2116 gpd->has_provider = false;
2117
2118 if (!gpd->set_performance_state)
2119 continue;
2120
2121 dev_pm_opp_put_opp_table(gpd->opp_table);
2122 dev_pm_opp_of_remove_table(&gpd->dev);
2123 }
2124 }
2125
2126 list_del(&cp->link);
2127 of_node_put(cp->node);
2128 kfree(cp);
2129 break;
2130 }
2131 }
2132 mutex_unlock(&of_genpd_mutex);
2133 mutex_unlock(&gpd_list_lock);
2134 }
2135 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2136
2137 /**
2138 * genpd_get_from_provider() - Look-up PM domain
2139 * @genpdspec: OF phandle args to use for look-up
2140 *
2141 * Looks for a PM domain provider under the node specified by @genpdspec and if
2142 * found, uses xlate function of the provider to map phandle args to a PM
2143 * domain.
2144 *
2145 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2146 * on failure.
2147 */
2148 static struct generic_pm_domain *genpd_get_from_provider(
2149 struct of_phandle_args *genpdspec)
2150 {
2151 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2152 struct of_genpd_provider *provider;
2153
2154 if (!genpdspec)
2155 return ERR_PTR(-EINVAL);
2156
2157 mutex_lock(&of_genpd_mutex);
2158
2159 /* Check if we have such a provider in our array */
2160 list_for_each_entry(provider, &of_genpd_providers, link) {
2161 if (provider->node == genpdspec->np)
2162 genpd = provider->xlate(genpdspec, provider->data);
2163 if (!IS_ERR(genpd))
2164 break;
2165 }
2166
2167 mutex_unlock(&of_genpd_mutex);
2168
2169 return genpd;
2170 }
2171
2172 /**
2173 * of_genpd_add_device() - Add a device to an I/O PM domain
2174 * @genpdspec: OF phandle args to use for look-up PM domain
2175 * @dev: Device to be added.
2176 *
2177 * Looks-up an I/O PM domain based upon phandle args provided and adds
2178 * the device to the PM domain. Returns a negative error code on failure.
2179 */
2180 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2181 {
2182 struct generic_pm_domain *genpd;
2183 int ret;
2184
2185 mutex_lock(&gpd_list_lock);
2186
2187 genpd = genpd_get_from_provider(genpdspec);
2188 if (IS_ERR(genpd)) {
2189 ret = PTR_ERR(genpd);
2190 goto out;
2191 }
2192
2193 ret = genpd_add_device(genpd, dev, NULL);
2194
2195 out:
2196 mutex_unlock(&gpd_list_lock);
2197
2198 return ret;
2199 }
2200 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2201
2202 /**
2203 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2204 * @parent_spec: OF phandle args to use for parent PM domain look-up
2205 * @subdomain_spec: OF phandle args to use for subdomain look-up
2206 *
2207 * Looks-up a parent PM domain and subdomain based upon phandle args
2208 * provided and adds the subdomain to the parent PM domain. Returns a
2209 * negative error code on failure.
2210 */
2211 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2212 struct of_phandle_args *subdomain_spec)
2213 {
2214 struct generic_pm_domain *parent, *subdomain;
2215 int ret;
2216
2217 mutex_lock(&gpd_list_lock);
2218
2219 parent = genpd_get_from_provider(parent_spec);
2220 if (IS_ERR(parent)) {
2221 ret = PTR_ERR(parent);
2222 goto out;
2223 }
2224
2225 subdomain = genpd_get_from_provider(subdomain_spec);
2226 if (IS_ERR(subdomain)) {
2227 ret = PTR_ERR(subdomain);
2228 goto out;
2229 }
2230
2231 ret = genpd_add_subdomain(parent, subdomain);
2232
2233 out:
2234 mutex_unlock(&gpd_list_lock);
2235
2236 return ret;
2237 }
2238 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2239
2240 /**
2241 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2242 * @provider: Pointer to device structure associated with provider
2243 *
2244 * Find the last PM domain that was added by a particular provider and
2245 * remove this PM domain from the list of PM domains. The provider is
2246 * identified by the 'provider' device structure that is passed. The PM
2247 * domain will only be removed, if the provider associated with domain
2248 * has been removed.
2249 *
2250 * Returns a valid pointer to struct generic_pm_domain on success or
2251 * ERR_PTR() on failure.
2252 */
2253 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2254 {
2255 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2256 int ret;
2257
2258 if (IS_ERR_OR_NULL(np))
2259 return ERR_PTR(-EINVAL);
2260
2261 mutex_lock(&gpd_list_lock);
2262 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2263 if (gpd->provider == &np->fwnode) {
2264 ret = genpd_remove(gpd);
2265 genpd = ret ? ERR_PTR(ret) : gpd;
2266 break;
2267 }
2268 }
2269 mutex_unlock(&gpd_list_lock);
2270
2271 return genpd;
2272 }
2273 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2274
2275 static void genpd_release_dev(struct device *dev)
2276 {
2277 kfree(dev);
2278 }
2279
2280 static struct bus_type genpd_bus_type = {
2281 .name = "genpd",
2282 };
2283
2284 /**
2285 * genpd_dev_pm_detach - Detach a device from its PM domain.
2286 * @dev: Device to detach.
2287 * @power_off: Currently not used
2288 *
2289 * Try to locate a corresponding generic PM domain, which the device was
2290 * attached to previously. If such is found, the device is detached from it.
2291 */
2292 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2293 {
2294 struct generic_pm_domain *pd;
2295 unsigned int i;
2296 int ret = 0;
2297
2298 pd = dev_to_genpd(dev);
2299 if (IS_ERR(pd))
2300 return;
2301
2302 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2303
2304 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2305 ret = genpd_remove_device(pd, dev);
2306 if (ret != -EAGAIN)
2307 break;
2308
2309 mdelay(i);
2310 cond_resched();
2311 }
2312
2313 if (ret < 0) {
2314 dev_err(dev, "failed to remove from PM domain %s: %d",
2315 pd->name, ret);
2316 return;
2317 }
2318
2319 /* Check if PM domain can be powered off after removing this device. */
2320 genpd_queue_power_off_work(pd);
2321
2322 /* Unregister the device if it was created by genpd. */
2323 if (dev->bus == &genpd_bus_type)
2324 device_unregister(dev);
2325 }
2326
2327 static void genpd_dev_pm_sync(struct device *dev)
2328 {
2329 struct generic_pm_domain *pd;
2330
2331 pd = dev_to_genpd(dev);
2332 if (IS_ERR(pd))
2333 return;
2334
2335 genpd_queue_power_off_work(pd);
2336 }
2337
2338 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2339 unsigned int index, bool power_on)
2340 {
2341 struct of_phandle_args pd_args;
2342 struct generic_pm_domain *pd;
2343 int ret;
2344
2345 ret = of_parse_phandle_with_args(np, "power-domains",
2346 "#power-domain-cells", index, &pd_args);
2347 if (ret < 0)
2348 return ret;
2349
2350 mutex_lock(&gpd_list_lock);
2351 pd = genpd_get_from_provider(&pd_args);
2352 of_node_put(pd_args.np);
2353 if (IS_ERR(pd)) {
2354 mutex_unlock(&gpd_list_lock);
2355 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2356 __func__, PTR_ERR(pd));
2357 return driver_deferred_probe_check_state(dev);
2358 }
2359
2360 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2361
2362 ret = genpd_add_device(pd, dev, NULL);
2363 mutex_unlock(&gpd_list_lock);
2364
2365 if (ret < 0) {
2366 if (ret != -EPROBE_DEFER)
2367 dev_err(dev, "failed to add to PM domain %s: %d",
2368 pd->name, ret);
2369 return ret;
2370 }
2371
2372 dev->pm_domain->detach = genpd_dev_pm_detach;
2373 dev->pm_domain->sync = genpd_dev_pm_sync;
2374
2375 if (power_on) {
2376 genpd_lock(pd);
2377 ret = genpd_power_on(pd, 0);
2378 genpd_unlock(pd);
2379 }
2380
2381 if (ret)
2382 genpd_remove_device(pd, dev);
2383
2384 return ret ? -EPROBE_DEFER : 1;
2385 }
2386
2387 /**
2388 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2389 * @dev: Device to attach.
2390 *
2391 * Parse device's OF node to find a PM domain specifier. If such is found,
2392 * attaches the device to retrieved pm_domain ops.
2393 *
2394 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2395 * PM domain or when multiple power-domains exists for it, else a negative error
2396 * code. Note that if a power-domain exists for the device, but it cannot be
2397 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2398 * not probed and to re-try again later.
2399 */
2400 int genpd_dev_pm_attach(struct device *dev)
2401 {
2402 if (!dev->of_node)
2403 return 0;
2404
2405 /*
2406 * Devices with multiple PM domains must be attached separately, as we
2407 * can only attach one PM domain per device.
2408 */
2409 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2410 "#power-domain-cells") != 1)
2411 return 0;
2412
2413 return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2414 }
2415 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2416
2417 /**
2418 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2419 * @dev: The device used to lookup the PM domain.
2420 * @index: The index of the PM domain.
2421 *
2422 * Parse device's OF node to find a PM domain specifier at the provided @index.
2423 * If such is found, creates a virtual device and attaches it to the retrieved
2424 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2425 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2426 *
2427 * Returns the created virtual device if successfully attached PM domain, NULL
2428 * when the device don't need a PM domain, else an ERR_PTR() in case of
2429 * failures. If a power-domain exists for the device, but cannot be found or
2430 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2431 * is not probed and to re-try again later.
2432 */
2433 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2434 unsigned int index)
2435 {
2436 struct device *virt_dev;
2437 int num_domains;
2438 int ret;
2439
2440 if (!dev->of_node)
2441 return NULL;
2442
2443 /* Deal only with devices using multiple PM domains. */
2444 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2445 "#power-domain-cells");
2446 if (num_domains < 2 || index >= num_domains)
2447 return NULL;
2448
2449 /* Allocate and register device on the genpd bus. */
2450 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2451 if (!virt_dev)
2452 return ERR_PTR(-ENOMEM);
2453
2454 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2455 virt_dev->bus = &genpd_bus_type;
2456 virt_dev->release = genpd_release_dev;
2457
2458 ret = device_register(virt_dev);
2459 if (ret) {
2460 kfree(virt_dev);
2461 return ERR_PTR(ret);
2462 }
2463
2464 /* Try to attach the device to the PM domain at the specified index. */
2465 ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
2466 if (ret < 1) {
2467 device_unregister(virt_dev);
2468 return ret ? ERR_PTR(ret) : NULL;
2469 }
2470
2471 pm_runtime_enable(virt_dev);
2472 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2473
2474 return virt_dev;
2475 }
2476 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2477
2478 /**
2479 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2480 * @dev: The device used to lookup the PM domain.
2481 * @name: The name of the PM domain.
2482 *
2483 * Parse device's OF node to find a PM domain specifier using the
2484 * power-domain-names DT property. For further description see
2485 * genpd_dev_pm_attach_by_id().
2486 */
2487 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2488 {
2489 int index;
2490
2491 if (!dev->of_node)
2492 return NULL;
2493
2494 index = of_property_match_string(dev->of_node, "power-domain-names",
2495 name);
2496 if (index < 0)
2497 return NULL;
2498
2499 return genpd_dev_pm_attach_by_id(dev, index);
2500 }
2501
2502 static const struct of_device_id idle_state_match[] = {
2503 { .compatible = "domain-idle-state", },
2504 { }
2505 };
2506
2507 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2508 struct device_node *state_node)
2509 {
2510 int err;
2511 u32 residency;
2512 u32 entry_latency, exit_latency;
2513
2514 err = of_property_read_u32(state_node, "entry-latency-us",
2515 &entry_latency);
2516 if (err) {
2517 pr_debug(" * %pOF missing entry-latency-us property\n",
2518 state_node);
2519 return -EINVAL;
2520 }
2521
2522 err = of_property_read_u32(state_node, "exit-latency-us",
2523 &exit_latency);
2524 if (err) {
2525 pr_debug(" * %pOF missing exit-latency-us property\n",
2526 state_node);
2527 return -EINVAL;
2528 }
2529
2530 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2531 if (!err)
2532 genpd_state->residency_ns = 1000 * residency;
2533
2534 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2535 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2536 genpd_state->fwnode = &state_node->fwnode;
2537
2538 return 0;
2539 }
2540
2541 static int genpd_iterate_idle_states(struct device_node *dn,
2542 struct genpd_power_state *states)
2543 {
2544 int ret;
2545 struct of_phandle_iterator it;
2546 struct device_node *np;
2547 int i = 0;
2548
2549 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2550 if (ret <= 0)
2551 return ret;
2552
2553 /* Loop over the phandles until all the requested entry is found */
2554 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2555 np = it.node;
2556 if (!of_match_node(idle_state_match, np))
2557 continue;
2558 if (states) {
2559 ret = genpd_parse_state(&states[i], np);
2560 if (ret) {
2561 pr_err("Parsing idle state node %pOF failed with err %d\n",
2562 np, ret);
2563 of_node_put(np);
2564 return ret;
2565 }
2566 }
2567 i++;
2568 }
2569
2570 return i;
2571 }
2572
2573 /**
2574 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2575 *
2576 * @dn: The genpd device node
2577 * @states: The pointer to which the state array will be saved.
2578 * @n: The count of elements in the array returned from this function.
2579 *
2580 * Returns the device states parsed from the OF node. The memory for the states
2581 * is allocated by this function and is the responsibility of the caller to
2582 * free the memory after use. If any or zero compatible domain idle states is
2583 * found it returns 0 and in case of errors, a negative error code is returned.
2584 */
2585 int of_genpd_parse_idle_states(struct device_node *dn,
2586 struct genpd_power_state **states, int *n)
2587 {
2588 struct genpd_power_state *st;
2589 int ret;
2590
2591 ret = genpd_iterate_idle_states(dn, NULL);
2592 if (ret < 0)
2593 return ret;
2594
2595 if (!ret) {
2596 *states = NULL;
2597 *n = 0;
2598 return 0;
2599 }
2600
2601 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2602 if (!st)
2603 return -ENOMEM;
2604
2605 ret = genpd_iterate_idle_states(dn, st);
2606 if (ret <= 0) {
2607 kfree(st);
2608 return ret < 0 ? ret : -EINVAL;
2609 }
2610
2611 *states = st;
2612 *n = ret;
2613
2614 return 0;
2615 }
2616 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2617
2618 /**
2619 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2620 *
2621 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2622 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2623 * state.
2624 *
2625 * Returns performance state encoded in the OPP of the genpd. This calls
2626 * platform specific genpd->opp_to_performance_state() callback to translate
2627 * power domain OPP to performance state.
2628 *
2629 * Returns performance state on success and 0 on failure.
2630 */
2631 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2632 struct dev_pm_opp *opp)
2633 {
2634 struct generic_pm_domain *genpd = NULL;
2635 int state;
2636
2637 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2638
2639 if (unlikely(!genpd->opp_to_performance_state))
2640 return 0;
2641
2642 genpd_lock(genpd);
2643 state = genpd->opp_to_performance_state(genpd, opp);
2644 genpd_unlock(genpd);
2645
2646 return state;
2647 }
2648 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2649
2650 static int __init genpd_bus_init(void)
2651 {
2652 return bus_register(&genpd_bus_type);
2653 }
2654 core_initcall(genpd_bus_init);
2655
2656 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2657
2658
2659 /*** debugfs support ***/
2660
2661 #ifdef CONFIG_DEBUG_FS
2662 #include <linux/pm.h>
2663 #include <linux/device.h>
2664 #include <linux/debugfs.h>
2665 #include <linux/seq_file.h>
2666 #include <linux/init.h>
2667 #include <linux/kobject.h>
2668 static struct dentry *genpd_debugfs_dir;
2669
2670 /*
2671 * TODO: This function is a slightly modified version of rtpm_status_show
2672 * from sysfs.c, so generalize it.
2673 */
2674 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2675 {
2676 static const char * const status_lookup[] = {
2677 [RPM_ACTIVE] = "active",
2678 [RPM_RESUMING] = "resuming",
2679 [RPM_SUSPENDED] = "suspended",
2680 [RPM_SUSPENDING] = "suspending"
2681 };
2682 const char *p = "";
2683
2684 if (dev->power.runtime_error)
2685 p = "error";
2686 else if (dev->power.disable_depth)
2687 p = "unsupported";
2688 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2689 p = status_lookup[dev->power.runtime_status];
2690 else
2691 WARN_ON(1);
2692
2693 seq_puts(s, p);
2694 }
2695
2696 static int genpd_summary_one(struct seq_file *s,
2697 struct generic_pm_domain *genpd)
2698 {
2699 static const char * const status_lookup[] = {
2700 [GPD_STATE_ACTIVE] = "on",
2701 [GPD_STATE_POWER_OFF] = "off"
2702 };
2703 struct pm_domain_data *pm_data;
2704 const char *kobj_path;
2705 struct gpd_link *link;
2706 char state[16];
2707 int ret;
2708
2709 ret = genpd_lock_interruptible(genpd);
2710 if (ret)
2711 return -ERESTARTSYS;
2712
2713 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2714 goto exit;
2715 if (!genpd_status_on(genpd))
2716 snprintf(state, sizeof(state), "%s-%u",
2717 status_lookup[genpd->status], genpd->state_idx);
2718 else
2719 snprintf(state, sizeof(state), "%s",
2720 status_lookup[genpd->status]);
2721 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2722
2723 /*
2724 * Modifications on the list require holding locks on both
2725 * master and slave, so we are safe.
2726 * Also genpd->name is immutable.
2727 */
2728 list_for_each_entry(link, &genpd->master_links, master_node) {
2729 seq_printf(s, "%s", link->slave->name);
2730 if (!list_is_last(&link->master_node, &genpd->master_links))
2731 seq_puts(s, ", ");
2732 }
2733
2734 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2735 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2736 genpd_is_irq_safe(genpd) ?
2737 GFP_ATOMIC : GFP_KERNEL);
2738 if (kobj_path == NULL)
2739 continue;
2740
2741 seq_printf(s, "\n %-50s ", kobj_path);
2742 rtpm_status_str(s, pm_data->dev);
2743 kfree(kobj_path);
2744 }
2745
2746 seq_puts(s, "\n");
2747 exit:
2748 genpd_unlock(genpd);
2749
2750 return 0;
2751 }
2752
2753 static int summary_show(struct seq_file *s, void *data)
2754 {
2755 struct generic_pm_domain *genpd;
2756 int ret = 0;
2757
2758 seq_puts(s, "domain status slaves\n");
2759 seq_puts(s, " /device runtime status\n");
2760 seq_puts(s, "----------------------------------------------------------------------\n");
2761
2762 ret = mutex_lock_interruptible(&gpd_list_lock);
2763 if (ret)
2764 return -ERESTARTSYS;
2765
2766 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2767 ret = genpd_summary_one(s, genpd);
2768 if (ret)
2769 break;
2770 }
2771 mutex_unlock(&gpd_list_lock);
2772
2773 return ret;
2774 }
2775
2776 static int status_show(struct seq_file *s, void *data)
2777 {
2778 static const char * const status_lookup[] = {
2779 [GPD_STATE_ACTIVE] = "on",
2780 [GPD_STATE_POWER_OFF] = "off"
2781 };
2782
2783 struct generic_pm_domain *genpd = s->private;
2784 int ret = 0;
2785
2786 ret = genpd_lock_interruptible(genpd);
2787 if (ret)
2788 return -ERESTARTSYS;
2789
2790 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2791 goto exit;
2792
2793 if (genpd->status == GPD_STATE_POWER_OFF)
2794 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2795 genpd->state_idx);
2796 else
2797 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2798 exit:
2799 genpd_unlock(genpd);
2800 return ret;
2801 }
2802
2803 static int sub_domains_show(struct seq_file *s, void *data)
2804 {
2805 struct generic_pm_domain *genpd = s->private;
2806 struct gpd_link *link;
2807 int ret = 0;
2808
2809 ret = genpd_lock_interruptible(genpd);
2810 if (ret)
2811 return -ERESTARTSYS;
2812
2813 list_for_each_entry(link, &genpd->master_links, master_node)
2814 seq_printf(s, "%s\n", link->slave->name);
2815
2816 genpd_unlock(genpd);
2817 return ret;
2818 }
2819
2820 static int idle_states_show(struct seq_file *s, void *data)
2821 {
2822 struct generic_pm_domain *genpd = s->private;
2823 unsigned int i;
2824 int ret = 0;
2825
2826 ret = genpd_lock_interruptible(genpd);
2827 if (ret)
2828 return -ERESTARTSYS;
2829
2830 seq_puts(s, "State Time Spent(ms)\n");
2831
2832 for (i = 0; i < genpd->state_count; i++) {
2833 ktime_t delta = 0;
2834 s64 msecs;
2835
2836 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2837 (genpd->state_idx == i))
2838 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2839
2840 msecs = ktime_to_ms(
2841 ktime_add(genpd->states[i].idle_time, delta));
2842 seq_printf(s, "S%-13i %lld\n", i, msecs);
2843 }
2844
2845 genpd_unlock(genpd);
2846 return ret;
2847 }
2848
2849 static int active_time_show(struct seq_file *s, void *data)
2850 {
2851 struct generic_pm_domain *genpd = s->private;
2852 ktime_t delta = 0;
2853 int ret = 0;
2854
2855 ret = genpd_lock_interruptible(genpd);
2856 if (ret)
2857 return -ERESTARTSYS;
2858
2859 if (genpd->status == GPD_STATE_ACTIVE)
2860 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2861
2862 seq_printf(s, "%lld ms\n", ktime_to_ms(
2863 ktime_add(genpd->on_time, delta)));
2864
2865 genpd_unlock(genpd);
2866 return ret;
2867 }
2868
2869 static int total_idle_time_show(struct seq_file *s, void *data)
2870 {
2871 struct generic_pm_domain *genpd = s->private;
2872 ktime_t delta = 0, total = 0;
2873 unsigned int i;
2874 int ret = 0;
2875
2876 ret = genpd_lock_interruptible(genpd);
2877 if (ret)
2878 return -ERESTARTSYS;
2879
2880 for (i = 0; i < genpd->state_count; i++) {
2881
2882 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2883 (genpd->state_idx == i))
2884 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2885
2886 total = ktime_add(total, genpd->states[i].idle_time);
2887 }
2888 total = ktime_add(total, delta);
2889
2890 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2891
2892 genpd_unlock(genpd);
2893 return ret;
2894 }
2895
2896
2897 static int devices_show(struct seq_file *s, void *data)
2898 {
2899 struct generic_pm_domain *genpd = s->private;
2900 struct pm_domain_data *pm_data;
2901 const char *kobj_path;
2902 int ret = 0;
2903
2904 ret = genpd_lock_interruptible(genpd);
2905 if (ret)
2906 return -ERESTARTSYS;
2907
2908 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2909 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2910 genpd_is_irq_safe(genpd) ?
2911 GFP_ATOMIC : GFP_KERNEL);
2912 if (kobj_path == NULL)
2913 continue;
2914
2915 seq_printf(s, "%s\n", kobj_path);
2916 kfree(kobj_path);
2917 }
2918
2919 genpd_unlock(genpd);
2920 return ret;
2921 }
2922
2923 static int perf_state_show(struct seq_file *s, void *data)
2924 {
2925 struct generic_pm_domain *genpd = s->private;
2926
2927 if (genpd_lock_interruptible(genpd))
2928 return -ERESTARTSYS;
2929
2930 seq_printf(s, "%u\n", genpd->performance_state);
2931
2932 genpd_unlock(genpd);
2933 return 0;
2934 }
2935
2936 DEFINE_SHOW_ATTRIBUTE(summary);
2937 DEFINE_SHOW_ATTRIBUTE(status);
2938 DEFINE_SHOW_ATTRIBUTE(sub_domains);
2939 DEFINE_SHOW_ATTRIBUTE(idle_states);
2940 DEFINE_SHOW_ATTRIBUTE(active_time);
2941 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
2942 DEFINE_SHOW_ATTRIBUTE(devices);
2943 DEFINE_SHOW_ATTRIBUTE(perf_state);
2944
2945 static int __init genpd_debug_init(void)
2946 {
2947 struct dentry *d;
2948 struct generic_pm_domain *genpd;
2949
2950 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2951
2952 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
2953 NULL, &summary_fops);
2954
2955 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2956 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2957
2958 debugfs_create_file("current_state", 0444,
2959 d, genpd, &status_fops);
2960 debugfs_create_file("sub_domains", 0444,
2961 d, genpd, &sub_domains_fops);
2962 debugfs_create_file("idle_states", 0444,
2963 d, genpd, &idle_states_fops);
2964 debugfs_create_file("active_time", 0444,
2965 d, genpd, &active_time_fops);
2966 debugfs_create_file("total_idle_time", 0444,
2967 d, genpd, &total_idle_time_fops);
2968 debugfs_create_file("devices", 0444,
2969 d, genpd, &devices_fops);
2970 if (genpd->set_performance_state)
2971 debugfs_create_file("perf_state", 0444,
2972 d, genpd, &perf_state_fops);
2973 }
2974
2975 return 0;
2976 }
2977 late_initcall(genpd_debug_init);
2978
2979 static void __exit genpd_debug_exit(void)
2980 {
2981 debugfs_remove_recursive(genpd_debugfs_dir);
2982 }
2983 __exitcall(genpd_debug_exit);
2984 #endif /* CONFIG_DEBUG_FS */