]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/base/power/domain.c
cde5983de6c233dc09921d052c9a9ca135f08ed8
[mirror_ubuntu-zesty-kernel.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/suspend.h>
19 #include <linux/export.h>
20
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
22 ({ \
23 type (*__routine)(struct device *__d); \
24 type __ret = (type)0; \
25 \
26 __routine = genpd->dev_ops.callback; \
27 if (__routine) { \
28 __ret = __routine(dev); \
29 } else { \
30 __routine = dev_gpd_data(dev)->ops.callback; \
31 if (__routine) \
32 __ret = __routine(dev); \
33 } \
34 __ret; \
35 })
36
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
38 ({ \
39 ktime_t __start = ktime_get(); \
40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43 if (!__retval && __elapsed > __td->field) { \
44 __td->field = __elapsed; \
45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
46 __elapsed); \
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
49 } \
50 __retval; \
51 })
52
53 static LIST_HEAD(gpd_list);
54 static DEFINE_MUTEX(gpd_list_lock);
55
56 #ifdef CONFIG_PM
57
58 struct generic_pm_domain *dev_to_genpd(struct device *dev)
59 {
60 if (IS_ERR_OR_NULL(dev->pm_domain))
61 return ERR_PTR(-EINVAL);
62
63 return pd_to_genpd(dev->pm_domain);
64 }
65
66 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
67 {
68 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
69 stop_latency_ns, "stop");
70 }
71
72 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
73 {
74 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
75 start_latency_ns, "start");
76 }
77
78 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
79 {
80 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
81 save_state_latency_ns, "state save");
82 }
83
84 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
85 {
86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
87 restore_state_latency_ns,
88 "state restore");
89 }
90
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92 {
93 bool ret = false;
94
95 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98 return ret;
99 }
100
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102 {
103 atomic_inc(&genpd->sd_count);
104 smp_mb__after_atomic_inc();
105 }
106
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108 {
109 DEFINE_WAIT(wait);
110
111 mutex_lock(&genpd->lock);
112 /*
113 * Wait for the domain to transition into either the active,
114 * or the power off state.
115 */
116 for (;;) {
117 prepare_to_wait(&genpd->status_wait_queue, &wait,
118 TASK_UNINTERRUPTIBLE);
119 if (genpd->status == GPD_STATE_ACTIVE
120 || genpd->status == GPD_STATE_POWER_OFF)
121 break;
122 mutex_unlock(&genpd->lock);
123
124 schedule();
125
126 mutex_lock(&genpd->lock);
127 }
128 finish_wait(&genpd->status_wait_queue, &wait);
129 }
130
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
132 {
133 mutex_unlock(&genpd->lock);
134 }
135
136 static void genpd_set_active(struct generic_pm_domain *genpd)
137 {
138 if (genpd->resume_count == 0)
139 genpd->status = GPD_STATE_ACTIVE;
140 }
141
142 /**
143 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
144 * @genpd: PM domain to power up.
145 *
146 * Restore power to @genpd and all of its masters so that it is possible to
147 * resume a device belonging to it.
148 */
149 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
150 __releases(&genpd->lock) __acquires(&genpd->lock)
151 {
152 struct gpd_link *link;
153 DEFINE_WAIT(wait);
154 int ret = 0;
155
156 /* If the domain's master is being waited for, we have to wait too. */
157 for (;;) {
158 prepare_to_wait(&genpd->status_wait_queue, &wait,
159 TASK_UNINTERRUPTIBLE);
160 if (genpd->status != GPD_STATE_WAIT_MASTER)
161 break;
162 mutex_unlock(&genpd->lock);
163
164 schedule();
165
166 mutex_lock(&genpd->lock);
167 }
168 finish_wait(&genpd->status_wait_queue, &wait);
169
170 if (genpd->status == GPD_STATE_ACTIVE
171 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
172 return 0;
173
174 if (genpd->status != GPD_STATE_POWER_OFF) {
175 genpd_set_active(genpd);
176 return 0;
177 }
178
179 /*
180 * The list is guaranteed not to change while the loop below is being
181 * executed, unless one of the masters' .power_on() callbacks fiddles
182 * with it.
183 */
184 list_for_each_entry(link, &genpd->slave_links, slave_node) {
185 genpd_sd_counter_inc(link->master);
186 genpd->status = GPD_STATE_WAIT_MASTER;
187
188 mutex_unlock(&genpd->lock);
189
190 ret = pm_genpd_poweron(link->master);
191
192 mutex_lock(&genpd->lock);
193
194 /*
195 * The "wait for parent" status is guaranteed not to change
196 * while the master is powering on.
197 */
198 genpd->status = GPD_STATE_POWER_OFF;
199 wake_up_all(&genpd->status_wait_queue);
200 if (ret) {
201 genpd_sd_counter_dec(link->master);
202 goto err;
203 }
204 }
205
206 if (genpd->power_on) {
207 ktime_t time_start = ktime_get();
208 s64 elapsed_ns;
209
210 ret = genpd->power_on(genpd);
211 if (ret)
212 goto err;
213
214 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
215 if (elapsed_ns > genpd->power_on_latency_ns) {
216 genpd->power_on_latency_ns = elapsed_ns;
217 genpd->max_off_time_changed = true;
218 if (genpd->name)
219 pr_warning("%s: Power-on latency exceeded, "
220 "new value %lld ns\n", genpd->name,
221 elapsed_ns);
222 }
223 }
224
225 genpd_set_active(genpd);
226
227 return 0;
228
229 err:
230 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
231 genpd_sd_counter_dec(link->master);
232
233 return ret;
234 }
235
236 /**
237 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
238 * @genpd: PM domain to power up.
239 */
240 int pm_genpd_poweron(struct generic_pm_domain *genpd)
241 {
242 int ret;
243
244 mutex_lock(&genpd->lock);
245 ret = __pm_genpd_poweron(genpd);
246 mutex_unlock(&genpd->lock);
247 return ret;
248 }
249
250 #endif /* CONFIG_PM */
251
252 #ifdef CONFIG_PM_RUNTIME
253
254 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255 unsigned long val, void *ptr)
256 {
257 struct generic_pm_domain_data *gpd_data;
258 struct device *dev;
259
260 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
261
262 mutex_lock(&gpd_data->lock);
263 dev = gpd_data->base.dev;
264 if (!dev) {
265 mutex_unlock(&gpd_data->lock);
266 return NOTIFY_DONE;
267 }
268 mutex_unlock(&gpd_data->lock);
269
270 for (;;) {
271 struct generic_pm_domain *genpd;
272 struct pm_domain_data *pdd;
273
274 spin_lock_irq(&dev->power.lock);
275
276 pdd = dev->power.subsys_data ?
277 dev->power.subsys_data->domain_data : NULL;
278 if (pdd) {
279 to_gpd_data(pdd)->td.constraint_changed = true;
280 genpd = dev_to_genpd(dev);
281 } else {
282 genpd = ERR_PTR(-ENODATA);
283 }
284
285 spin_unlock_irq(&dev->power.lock);
286
287 if (!IS_ERR(genpd)) {
288 mutex_lock(&genpd->lock);
289 genpd->max_off_time_changed = true;
290 mutex_unlock(&genpd->lock);
291 }
292
293 dev = dev->parent;
294 if (!dev || dev->power.ignore_children)
295 break;
296 }
297
298 return NOTIFY_DONE;
299 }
300
301 /**
302 * __pm_genpd_save_device - Save the pre-suspend state of a device.
303 * @pdd: Domain data of the device to save the state of.
304 * @genpd: PM domain the device belongs to.
305 */
306 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
307 struct generic_pm_domain *genpd)
308 __releases(&genpd->lock) __acquires(&genpd->lock)
309 {
310 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
311 struct device *dev = pdd->dev;
312 int ret = 0;
313
314 if (gpd_data->need_restore)
315 return 0;
316
317 mutex_unlock(&genpd->lock);
318
319 genpd_start_dev(genpd, dev);
320 ret = genpd_save_dev(genpd, dev);
321 genpd_stop_dev(genpd, dev);
322
323 mutex_lock(&genpd->lock);
324
325 if (!ret)
326 gpd_data->need_restore = true;
327
328 return ret;
329 }
330
331 /**
332 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
333 * @pdd: Domain data of the device to restore the state of.
334 * @genpd: PM domain the device belongs to.
335 */
336 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
337 struct generic_pm_domain *genpd)
338 __releases(&genpd->lock) __acquires(&genpd->lock)
339 {
340 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
341 struct device *dev = pdd->dev;
342
343 if (!gpd_data->need_restore)
344 return;
345
346 mutex_unlock(&genpd->lock);
347
348 genpd_start_dev(genpd, dev);
349 genpd_restore_dev(genpd, dev);
350 genpd_stop_dev(genpd, dev);
351
352 mutex_lock(&genpd->lock);
353
354 gpd_data->need_restore = false;
355 }
356
357 /**
358 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
359 * @genpd: PM domain to check.
360 *
361 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
362 * a "power off" operation, which means that a "power on" has occured in the
363 * meantime, or if its resume_count field is different from zero, which means
364 * that one of its devices has been resumed in the meantime.
365 */
366 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
367 {
368 return genpd->status == GPD_STATE_WAIT_MASTER
369 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
370 }
371
372 /**
373 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
374 * @genpd: PM domait to power off.
375 *
376 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
377 * before.
378 */
379 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
380 {
381 if (!work_pending(&genpd->power_off_work))
382 queue_work(pm_wq, &genpd->power_off_work);
383 }
384
385 /**
386 * pm_genpd_poweroff - Remove power from a given PM domain.
387 * @genpd: PM domain to power down.
388 *
389 * If all of the @genpd's devices have been suspended and all of its subdomains
390 * have been powered down, run the runtime suspend callbacks provided by all of
391 * the @genpd's devices' drivers and remove power from @genpd.
392 */
393 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
394 __releases(&genpd->lock) __acquires(&genpd->lock)
395 {
396 struct pm_domain_data *pdd;
397 struct gpd_link *link;
398 unsigned int not_suspended;
399 int ret = 0;
400
401 start:
402 /*
403 * Do not try to power off the domain in the following situations:
404 * (1) The domain is already in the "power off" state.
405 * (2) The domain is waiting for its master to power up.
406 * (3) One of the domain's devices is being resumed right now.
407 * (4) System suspend is in progress.
408 */
409 if (genpd->status == GPD_STATE_POWER_OFF
410 || genpd->status == GPD_STATE_WAIT_MASTER
411 || genpd->resume_count > 0 || genpd->prepared_count > 0)
412 return 0;
413
414 if (atomic_read(&genpd->sd_count) > 0)
415 return -EBUSY;
416
417 not_suspended = 0;
418 list_for_each_entry(pdd, &genpd->dev_list, list_node)
419 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
420 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
421 not_suspended++;
422
423 if (not_suspended > genpd->in_progress)
424 return -EBUSY;
425
426 if (genpd->poweroff_task) {
427 /*
428 * Another instance of pm_genpd_poweroff() is executing
429 * callbacks, so tell it to start over and return.
430 */
431 genpd->status = GPD_STATE_REPEAT;
432 return 0;
433 }
434
435 if (genpd->gov && genpd->gov->power_down_ok) {
436 if (!genpd->gov->power_down_ok(&genpd->domain))
437 return -EAGAIN;
438 }
439
440 genpd->status = GPD_STATE_BUSY;
441 genpd->poweroff_task = current;
442
443 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
444 ret = atomic_read(&genpd->sd_count) == 0 ?
445 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
446
447 if (genpd_abort_poweroff(genpd))
448 goto out;
449
450 if (ret) {
451 genpd_set_active(genpd);
452 goto out;
453 }
454
455 if (genpd->status == GPD_STATE_REPEAT) {
456 genpd->poweroff_task = NULL;
457 goto start;
458 }
459 }
460
461 if (genpd->power_off) {
462 ktime_t time_start;
463 s64 elapsed_ns;
464
465 if (atomic_read(&genpd->sd_count) > 0) {
466 ret = -EBUSY;
467 goto out;
468 }
469
470 time_start = ktime_get();
471
472 /*
473 * If sd_count > 0 at this point, one of the subdomains hasn't
474 * managed to call pm_genpd_poweron() for the master yet after
475 * incrementing it. In that case pm_genpd_poweron() will wait
476 * for us to drop the lock, so we can call .power_off() and let
477 * the pm_genpd_poweron() restore power for us (this shouldn't
478 * happen very often).
479 */
480 ret = genpd->power_off(genpd);
481 if (ret == -EBUSY) {
482 genpd_set_active(genpd);
483 goto out;
484 }
485
486 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
487 if (elapsed_ns > genpd->power_off_latency_ns) {
488 genpd->power_off_latency_ns = elapsed_ns;
489 genpd->max_off_time_changed = true;
490 if (genpd->name)
491 pr_warning("%s: Power-off latency exceeded, "
492 "new value %lld ns\n", genpd->name,
493 elapsed_ns);
494 }
495 }
496
497 genpd->status = GPD_STATE_POWER_OFF;
498
499 list_for_each_entry(link, &genpd->slave_links, slave_node) {
500 genpd_sd_counter_dec(link->master);
501 genpd_queue_power_off_work(link->master);
502 }
503
504 out:
505 genpd->poweroff_task = NULL;
506 wake_up_all(&genpd->status_wait_queue);
507 return ret;
508 }
509
510 /**
511 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
512 * @work: Work structure used for scheduling the execution of this function.
513 */
514 static void genpd_power_off_work_fn(struct work_struct *work)
515 {
516 struct generic_pm_domain *genpd;
517
518 genpd = container_of(work, struct generic_pm_domain, power_off_work);
519
520 genpd_acquire_lock(genpd);
521 pm_genpd_poweroff(genpd);
522 genpd_release_lock(genpd);
523 }
524
525 /**
526 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
527 * @dev: Device to suspend.
528 *
529 * Carry out a runtime suspend of a device under the assumption that its
530 * pm_domain field points to the domain member of an object of type
531 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
532 */
533 static int pm_genpd_runtime_suspend(struct device *dev)
534 {
535 struct generic_pm_domain *genpd;
536 bool (*stop_ok)(struct device *__dev);
537 int ret;
538
539 dev_dbg(dev, "%s()\n", __func__);
540
541 genpd = dev_to_genpd(dev);
542 if (IS_ERR(genpd))
543 return -EINVAL;
544
545 might_sleep_if(!genpd->dev_irq_safe);
546
547 if (dev_gpd_data(dev)->always_on)
548 return -EBUSY;
549
550 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
551 if (stop_ok && !stop_ok(dev))
552 return -EBUSY;
553
554 ret = genpd_stop_dev(genpd, dev);
555 if (ret)
556 return ret;
557
558 /*
559 * If power.irq_safe is set, this routine will be run with interrupts
560 * off, so it can't use mutexes.
561 */
562 if (dev->power.irq_safe)
563 return 0;
564
565 mutex_lock(&genpd->lock);
566 genpd->in_progress++;
567 pm_genpd_poweroff(genpd);
568 genpd->in_progress--;
569 mutex_unlock(&genpd->lock);
570
571 return 0;
572 }
573
574 /**
575 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
576 * @dev: Device to resume.
577 *
578 * Carry out a runtime resume of a device under the assumption that its
579 * pm_domain field points to the domain member of an object of type
580 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
581 */
582 static int pm_genpd_runtime_resume(struct device *dev)
583 {
584 struct generic_pm_domain *genpd;
585 DEFINE_WAIT(wait);
586 int ret;
587
588 dev_dbg(dev, "%s()\n", __func__);
589
590 genpd = dev_to_genpd(dev);
591 if (IS_ERR(genpd))
592 return -EINVAL;
593
594 might_sleep_if(!genpd->dev_irq_safe);
595
596 /* If power.irq_safe, the PM domain is never powered off. */
597 if (dev->power.irq_safe)
598 goto out;
599
600 mutex_lock(&genpd->lock);
601 ret = __pm_genpd_poweron(genpd);
602 if (ret) {
603 mutex_unlock(&genpd->lock);
604 return ret;
605 }
606 genpd->status = GPD_STATE_BUSY;
607 genpd->resume_count++;
608 for (;;) {
609 prepare_to_wait(&genpd->status_wait_queue, &wait,
610 TASK_UNINTERRUPTIBLE);
611 /*
612 * If current is the powering off task, we have been called
613 * reentrantly from one of the device callbacks, so we should
614 * not wait.
615 */
616 if (!genpd->poweroff_task || genpd->poweroff_task == current)
617 break;
618 mutex_unlock(&genpd->lock);
619
620 schedule();
621
622 mutex_lock(&genpd->lock);
623 }
624 finish_wait(&genpd->status_wait_queue, &wait);
625 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
626 genpd->resume_count--;
627 genpd_set_active(genpd);
628 wake_up_all(&genpd->status_wait_queue);
629 mutex_unlock(&genpd->lock);
630
631 out:
632 genpd_start_dev(genpd, dev);
633
634 return 0;
635 }
636
637 /**
638 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
639 */
640 void pm_genpd_poweroff_unused(void)
641 {
642 struct generic_pm_domain *genpd;
643
644 mutex_lock(&gpd_list_lock);
645
646 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
647 genpd_queue_power_off_work(genpd);
648
649 mutex_unlock(&gpd_list_lock);
650 }
651
652 #else
653
654 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
655 unsigned long val, void *ptr)
656 {
657 return NOTIFY_DONE;
658 }
659
660 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
661
662 #define pm_genpd_runtime_suspend NULL
663 #define pm_genpd_runtime_resume NULL
664
665 #endif /* CONFIG_PM_RUNTIME */
666
667 #ifdef CONFIG_PM_SLEEP
668
669 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
670 struct device *dev)
671 {
672 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
673 }
674
675 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
676 {
677 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
678 }
679
680 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
681 {
682 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
683 }
684
685 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
686 {
687 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
688 }
689
690 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
691 {
692 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
693 }
694
695 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
696 {
697 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
698 }
699
700 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
701 {
702 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
703 }
704
705 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
706 {
707 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
708 }
709
710 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
711 {
712 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
713 }
714
715 /**
716 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
717 * @genpd: PM domain to power off, if possible.
718 *
719 * Check if the given PM domain can be powered off (during system suspend or
720 * hibernation) and do that if so. Also, in that case propagate to its masters.
721 *
722 * This function is only called in "noirq" stages of system power transitions,
723 * so it need not acquire locks (all of the "noirq" callbacks are executed
724 * sequentially, so it is guaranteed that it will never run twice in parallel).
725 */
726 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
727 {
728 struct gpd_link *link;
729
730 if (genpd->status == GPD_STATE_POWER_OFF)
731 return;
732
733 if (genpd->suspended_count != genpd->device_count
734 || atomic_read(&genpd->sd_count) > 0)
735 return;
736
737 if (genpd->power_off)
738 genpd->power_off(genpd);
739
740 genpd->status = GPD_STATE_POWER_OFF;
741
742 list_for_each_entry(link, &genpd->slave_links, slave_node) {
743 genpd_sd_counter_dec(link->master);
744 pm_genpd_sync_poweroff(link->master);
745 }
746 }
747
748 /**
749 * resume_needed - Check whether to resume a device before system suspend.
750 * @dev: Device to check.
751 * @genpd: PM domain the device belongs to.
752 *
753 * There are two cases in which a device that can wake up the system from sleep
754 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
755 * to wake up the system and it has to remain active for this purpose while the
756 * system is in the sleep state and (2) if the device is not enabled to wake up
757 * the system from sleep states and it generally doesn't generate wakeup signals
758 * by itself (those signals are generated on its behalf by other parts of the
759 * system). In the latter case it may be necessary to reconfigure the device's
760 * wakeup settings during system suspend, because it may have been set up to
761 * signal remote wakeup from the system's working state as needed by runtime PM.
762 * Return 'true' in either of the above cases.
763 */
764 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
765 {
766 bool active_wakeup;
767
768 if (!device_can_wakeup(dev))
769 return false;
770
771 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
772 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
773 }
774
775 /**
776 * pm_genpd_prepare - Start power transition of a device in a PM domain.
777 * @dev: Device to start the transition of.
778 *
779 * Start a power transition of a device (during a system-wide power transition)
780 * under the assumption that its pm_domain field points to the domain member of
781 * an object of type struct generic_pm_domain representing a PM domain
782 * consisting of I/O devices.
783 */
784 static int pm_genpd_prepare(struct device *dev)
785 {
786 struct generic_pm_domain *genpd;
787 int ret;
788
789 dev_dbg(dev, "%s()\n", __func__);
790
791 genpd = dev_to_genpd(dev);
792 if (IS_ERR(genpd))
793 return -EINVAL;
794
795 /*
796 * If a wakeup request is pending for the device, it should be woken up
797 * at this point and a system wakeup event should be reported if it's
798 * set up to wake up the system from sleep states.
799 */
800 pm_runtime_get_noresume(dev);
801 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
802 pm_wakeup_event(dev, 0);
803
804 if (pm_wakeup_pending()) {
805 pm_runtime_put_sync(dev);
806 return -EBUSY;
807 }
808
809 if (resume_needed(dev, genpd))
810 pm_runtime_resume(dev);
811
812 genpd_acquire_lock(genpd);
813
814 if (genpd->prepared_count++ == 0) {
815 genpd->suspended_count = 0;
816 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
817 }
818
819 genpd_release_lock(genpd);
820
821 if (genpd->suspend_power_off) {
822 pm_runtime_put_noidle(dev);
823 return 0;
824 }
825
826 /*
827 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
828 * so pm_genpd_poweron() will return immediately, but if the device
829 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
830 * to make it operational.
831 */
832 pm_runtime_resume(dev);
833 __pm_runtime_disable(dev, false);
834
835 ret = pm_generic_prepare(dev);
836 if (ret) {
837 mutex_lock(&genpd->lock);
838
839 if (--genpd->prepared_count == 0)
840 genpd->suspend_power_off = false;
841
842 mutex_unlock(&genpd->lock);
843 pm_runtime_enable(dev);
844 }
845
846 pm_runtime_put_sync(dev);
847 return ret;
848 }
849
850 /**
851 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
852 * @dev: Device to suspend.
853 *
854 * Suspend a device under the assumption that its pm_domain field points to the
855 * domain member of an object of type struct generic_pm_domain representing
856 * a PM domain consisting of I/O devices.
857 */
858 static int pm_genpd_suspend(struct device *dev)
859 {
860 struct generic_pm_domain *genpd;
861
862 dev_dbg(dev, "%s()\n", __func__);
863
864 genpd = dev_to_genpd(dev);
865 if (IS_ERR(genpd))
866 return -EINVAL;
867
868 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
869 }
870
871 /**
872 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
873 * @dev: Device to suspend.
874 *
875 * Carry out a late suspend of a device under the assumption that its
876 * pm_domain field points to the domain member of an object of type
877 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
878 */
879 static int pm_genpd_suspend_late(struct device *dev)
880 {
881 struct generic_pm_domain *genpd;
882
883 dev_dbg(dev, "%s()\n", __func__);
884
885 genpd = dev_to_genpd(dev);
886 if (IS_ERR(genpd))
887 return -EINVAL;
888
889 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
890 }
891
892 /**
893 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
894 * @dev: Device to suspend.
895 *
896 * Stop the device and remove power from the domain if all devices in it have
897 * been stopped.
898 */
899 static int pm_genpd_suspend_noirq(struct device *dev)
900 {
901 struct generic_pm_domain *genpd;
902
903 dev_dbg(dev, "%s()\n", __func__);
904
905 genpd = dev_to_genpd(dev);
906 if (IS_ERR(genpd))
907 return -EINVAL;
908
909 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
910 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
911 return 0;
912
913 genpd_stop_dev(genpd, dev);
914
915 /*
916 * Since all of the "noirq" callbacks are executed sequentially, it is
917 * guaranteed that this function will never run twice in parallel for
918 * the same PM domain, so it is not necessary to use locking here.
919 */
920 genpd->suspended_count++;
921 pm_genpd_sync_poweroff(genpd);
922
923 return 0;
924 }
925
926 /**
927 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
928 * @dev: Device to resume.
929 *
930 * Restore power to the device's PM domain, if necessary, and start the device.
931 */
932 static int pm_genpd_resume_noirq(struct device *dev)
933 {
934 struct generic_pm_domain *genpd;
935
936 dev_dbg(dev, "%s()\n", __func__);
937
938 genpd = dev_to_genpd(dev);
939 if (IS_ERR(genpd))
940 return -EINVAL;
941
942 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
943 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
944 return 0;
945
946 /*
947 * Since all of the "noirq" callbacks are executed sequentially, it is
948 * guaranteed that this function will never run twice in parallel for
949 * the same PM domain, so it is not necessary to use locking here.
950 */
951 pm_genpd_poweron(genpd);
952 genpd->suspended_count--;
953
954 return genpd_start_dev(genpd, dev);
955 }
956
957 /**
958 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
959 * @dev: Device to resume.
960 *
961 * Carry out an early resume of a device under the assumption that its
962 * pm_domain field points to the domain member of an object of type
963 * struct generic_pm_domain representing a power domain consisting of I/O
964 * devices.
965 */
966 static int pm_genpd_resume_early(struct device *dev)
967 {
968 struct generic_pm_domain *genpd;
969
970 dev_dbg(dev, "%s()\n", __func__);
971
972 genpd = dev_to_genpd(dev);
973 if (IS_ERR(genpd))
974 return -EINVAL;
975
976 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
977 }
978
979 /**
980 * pm_genpd_resume - Resume of device in an I/O PM domain.
981 * @dev: Device to resume.
982 *
983 * Resume a device under the assumption that its pm_domain field points to the
984 * domain member of an object of type struct generic_pm_domain representing
985 * a power domain consisting of I/O devices.
986 */
987 static int pm_genpd_resume(struct device *dev)
988 {
989 struct generic_pm_domain *genpd;
990
991 dev_dbg(dev, "%s()\n", __func__);
992
993 genpd = dev_to_genpd(dev);
994 if (IS_ERR(genpd))
995 return -EINVAL;
996
997 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
998 }
999
1000 /**
1001 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1002 * @dev: Device to freeze.
1003 *
1004 * Freeze a device under the assumption that its pm_domain field points to the
1005 * domain member of an object of type struct generic_pm_domain representing
1006 * a power domain consisting of I/O devices.
1007 */
1008 static int pm_genpd_freeze(struct device *dev)
1009 {
1010 struct generic_pm_domain *genpd;
1011
1012 dev_dbg(dev, "%s()\n", __func__);
1013
1014 genpd = dev_to_genpd(dev);
1015 if (IS_ERR(genpd))
1016 return -EINVAL;
1017
1018 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1019 }
1020
1021 /**
1022 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1023 * @dev: Device to freeze.
1024 *
1025 * Carry out a late freeze of a device under the assumption that its
1026 * pm_domain field points to the domain member of an object of type
1027 * struct generic_pm_domain representing a power domain consisting of I/O
1028 * devices.
1029 */
1030 static int pm_genpd_freeze_late(struct device *dev)
1031 {
1032 struct generic_pm_domain *genpd;
1033
1034 dev_dbg(dev, "%s()\n", __func__);
1035
1036 genpd = dev_to_genpd(dev);
1037 if (IS_ERR(genpd))
1038 return -EINVAL;
1039
1040 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1041 }
1042
1043 /**
1044 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1045 * @dev: Device to freeze.
1046 *
1047 * Carry out a late freeze of a device under the assumption that its
1048 * pm_domain field points to the domain member of an object of type
1049 * struct generic_pm_domain representing a power domain consisting of I/O
1050 * devices.
1051 */
1052 static int pm_genpd_freeze_noirq(struct device *dev)
1053 {
1054 struct generic_pm_domain *genpd;
1055
1056 dev_dbg(dev, "%s()\n", __func__);
1057
1058 genpd = dev_to_genpd(dev);
1059 if (IS_ERR(genpd))
1060 return -EINVAL;
1061
1062 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1063 0 : genpd_stop_dev(genpd, dev);
1064 }
1065
1066 /**
1067 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1068 * @dev: Device to thaw.
1069 *
1070 * Start the device, unless power has been removed from the domain already
1071 * before the system transition.
1072 */
1073 static int pm_genpd_thaw_noirq(struct device *dev)
1074 {
1075 struct generic_pm_domain *genpd;
1076
1077 dev_dbg(dev, "%s()\n", __func__);
1078
1079 genpd = dev_to_genpd(dev);
1080 if (IS_ERR(genpd))
1081 return -EINVAL;
1082
1083 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1084 0 : genpd_start_dev(genpd, dev);
1085 }
1086
1087 /**
1088 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1089 * @dev: Device to thaw.
1090 *
1091 * Carry out an early thaw of a device under the assumption that its
1092 * pm_domain field points to the domain member of an object of type
1093 * struct generic_pm_domain representing a power domain consisting of I/O
1094 * devices.
1095 */
1096 static int pm_genpd_thaw_early(struct device *dev)
1097 {
1098 struct generic_pm_domain *genpd;
1099
1100 dev_dbg(dev, "%s()\n", __func__);
1101
1102 genpd = dev_to_genpd(dev);
1103 if (IS_ERR(genpd))
1104 return -EINVAL;
1105
1106 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1107 }
1108
1109 /**
1110 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1111 * @dev: Device to thaw.
1112 *
1113 * Thaw a device under the assumption that its pm_domain field points to the
1114 * domain member of an object of type struct generic_pm_domain representing
1115 * a power domain consisting of I/O devices.
1116 */
1117 static int pm_genpd_thaw(struct device *dev)
1118 {
1119 struct generic_pm_domain *genpd;
1120
1121 dev_dbg(dev, "%s()\n", __func__);
1122
1123 genpd = dev_to_genpd(dev);
1124 if (IS_ERR(genpd))
1125 return -EINVAL;
1126
1127 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1128 }
1129
1130 /**
1131 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1132 * @dev: Device to resume.
1133 *
1134 * Make sure the domain will be in the same power state as before the
1135 * hibernation the system is resuming from and start the device if necessary.
1136 */
1137 static int pm_genpd_restore_noirq(struct device *dev)
1138 {
1139 struct generic_pm_domain *genpd;
1140
1141 dev_dbg(dev, "%s()\n", __func__);
1142
1143 genpd = dev_to_genpd(dev);
1144 if (IS_ERR(genpd))
1145 return -EINVAL;
1146
1147 /*
1148 * Since all of the "noirq" callbacks are executed sequentially, it is
1149 * guaranteed that this function will never run twice in parallel for
1150 * the same PM domain, so it is not necessary to use locking here.
1151 *
1152 * At this point suspended_count == 0 means we are being run for the
1153 * first time for the given domain in the present cycle.
1154 */
1155 if (genpd->suspended_count++ == 0) {
1156 /*
1157 * The boot kernel might put the domain into arbitrary state,
1158 * so make it appear as powered off to pm_genpd_poweron(), so
1159 * that it tries to power it on in case it was really off.
1160 */
1161 genpd->status = GPD_STATE_POWER_OFF;
1162 if (genpd->suspend_power_off) {
1163 /*
1164 * If the domain was off before the hibernation, make
1165 * sure it will be off going forward.
1166 */
1167 if (genpd->power_off)
1168 genpd->power_off(genpd);
1169
1170 return 0;
1171 }
1172 }
1173
1174 if (genpd->suspend_power_off)
1175 return 0;
1176
1177 pm_genpd_poweron(genpd);
1178
1179 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
1180 }
1181
1182 /**
1183 * pm_genpd_complete - Complete power transition of a device in a power domain.
1184 * @dev: Device to complete the transition of.
1185 *
1186 * Complete a power transition of a device (during a system-wide power
1187 * transition) under the assumption that its pm_domain field points to the
1188 * domain member of an object of type struct generic_pm_domain representing
1189 * a power domain consisting of I/O devices.
1190 */
1191 static void pm_genpd_complete(struct device *dev)
1192 {
1193 struct generic_pm_domain *genpd;
1194 bool run_complete;
1195
1196 dev_dbg(dev, "%s()\n", __func__);
1197
1198 genpd = dev_to_genpd(dev);
1199 if (IS_ERR(genpd))
1200 return;
1201
1202 mutex_lock(&genpd->lock);
1203
1204 run_complete = !genpd->suspend_power_off;
1205 if (--genpd->prepared_count == 0)
1206 genpd->suspend_power_off = false;
1207
1208 mutex_unlock(&genpd->lock);
1209
1210 if (run_complete) {
1211 pm_generic_complete(dev);
1212 pm_runtime_set_active(dev);
1213 pm_runtime_enable(dev);
1214 pm_runtime_idle(dev);
1215 }
1216 }
1217
1218 #else
1219
1220 #define pm_genpd_prepare NULL
1221 #define pm_genpd_suspend NULL
1222 #define pm_genpd_suspend_late NULL
1223 #define pm_genpd_suspend_noirq NULL
1224 #define pm_genpd_resume_early NULL
1225 #define pm_genpd_resume_noirq NULL
1226 #define pm_genpd_resume NULL
1227 #define pm_genpd_freeze NULL
1228 #define pm_genpd_freeze_late NULL
1229 #define pm_genpd_freeze_noirq NULL
1230 #define pm_genpd_thaw_early NULL
1231 #define pm_genpd_thaw_noirq NULL
1232 #define pm_genpd_thaw NULL
1233 #define pm_genpd_restore_noirq NULL
1234 #define pm_genpd_complete NULL
1235
1236 #endif /* CONFIG_PM_SLEEP */
1237
1238 /**
1239 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1240 * @genpd: PM domain to add the device to.
1241 * @dev: Device to be added.
1242 * @td: Set of PM QoS timing parameters to attach to the device.
1243 */
1244 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1245 struct gpd_timing_data *td)
1246 {
1247 struct generic_pm_domain_data *gpd_data;
1248 struct pm_domain_data *pdd;
1249 int ret = 0;
1250
1251 dev_dbg(dev, "%s()\n", __func__);
1252
1253 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1254 return -EINVAL;
1255
1256 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1257 if (!gpd_data)
1258 return -ENOMEM;
1259
1260 mutex_init(&gpd_data->lock);
1261 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263
1264 genpd_acquire_lock(genpd);
1265
1266 if (genpd->status == GPD_STATE_POWER_OFF) {
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270
1271 if (genpd->prepared_count > 0) {
1272 ret = -EAGAIN;
1273 goto out;
1274 }
1275
1276 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1277 if (pdd->dev == dev) {
1278 ret = -EINVAL;
1279 goto out;
1280 }
1281
1282 genpd->device_count++;
1283 genpd->max_off_time_changed = true;
1284
1285 dev_pm_get_subsys_data(dev);
1286
1287 mutex_lock(&gpd_data->lock);
1288 spin_lock_irq(&dev->power.lock);
1289 dev->pm_domain = &genpd->domain;
1290 dev->power.subsys_data->domain_data = &gpd_data->base;
1291 gpd_data->base.dev = dev;
1292 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1293 gpd_data->need_restore = false;
1294 if (td)
1295 gpd_data->td = *td;
1296
1297 gpd_data->td.constraint_changed = true;
1298 gpd_data->td.effective_constraint_ns = -1;
1299 spin_unlock_irq(&dev->power.lock);
1300 mutex_unlock(&gpd_data->lock);
1301
1302 genpd_release_lock(genpd);
1303
1304 return 0;
1305
1306 out:
1307 genpd_release_lock(genpd);
1308
1309 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1310 kfree(gpd_data);
1311 return ret;
1312 }
1313
1314 /**
1315 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1316 * @genpd_node: Device tree node pointer representing a PM domain to which the
1317 * the device is added to.
1318 * @dev: Device to be added.
1319 * @td: Set of PM QoS timing parameters to attach to the device.
1320 */
1321 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1322 struct gpd_timing_data *td)
1323 {
1324 struct generic_pm_domain *genpd = NULL, *gpd;
1325
1326 dev_dbg(dev, "%s()\n", __func__);
1327
1328 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1329 return -EINVAL;
1330
1331 mutex_lock(&gpd_list_lock);
1332 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1333 if (gpd->of_node == genpd_node) {
1334 genpd = gpd;
1335 break;
1336 }
1337 }
1338 mutex_unlock(&gpd_list_lock);
1339
1340 if (!genpd)
1341 return -EINVAL;
1342
1343 return __pm_genpd_add_device(genpd, dev, td);
1344 }
1345
1346 /**
1347 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1348 * @genpd: PM domain to remove the device from.
1349 * @dev: Device to be removed.
1350 */
1351 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1352 struct device *dev)
1353 {
1354 struct generic_pm_domain_data *gpd_data;
1355 struct pm_domain_data *pdd;
1356 int ret = 0;
1357
1358 dev_dbg(dev, "%s()\n", __func__);
1359
1360 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1361 || IS_ERR_OR_NULL(dev->pm_domain)
1362 || pd_to_genpd(dev->pm_domain) != genpd)
1363 return -EINVAL;
1364
1365 genpd_acquire_lock(genpd);
1366
1367 if (genpd->prepared_count > 0) {
1368 ret = -EAGAIN;
1369 goto out;
1370 }
1371
1372 genpd->device_count--;
1373 genpd->max_off_time_changed = true;
1374
1375 spin_lock_irq(&dev->power.lock);
1376 dev->pm_domain = NULL;
1377 pdd = dev->power.subsys_data->domain_data;
1378 list_del_init(&pdd->list_node);
1379 dev->power.subsys_data->domain_data = NULL;
1380 spin_unlock_irq(&dev->power.lock);
1381
1382 gpd_data = to_gpd_data(pdd);
1383 mutex_lock(&gpd_data->lock);
1384 pdd->dev = NULL;
1385 mutex_unlock(&gpd_data->lock);
1386
1387 genpd_release_lock(genpd);
1388
1389 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1390 kfree(gpd_data);
1391 dev_pm_put_subsys_data(dev);
1392 return 0;
1393
1394 out:
1395 genpd_release_lock(genpd);
1396
1397 return ret;
1398 }
1399
1400 /**
1401 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1402 * @dev: Device to set/unset the flag for.
1403 * @val: The new value of the device's "always on" flag.
1404 */
1405 void pm_genpd_dev_always_on(struct device *dev, bool val)
1406 {
1407 struct pm_subsys_data *psd;
1408 unsigned long flags;
1409
1410 spin_lock_irqsave(&dev->power.lock, flags);
1411
1412 psd = dev_to_psd(dev);
1413 if (psd && psd->domain_data)
1414 to_gpd_data(psd->domain_data)->always_on = val;
1415
1416 spin_unlock_irqrestore(&dev->power.lock, flags);
1417 }
1418 EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1419
1420 /**
1421 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1422 * @genpd: Master PM domain to add the subdomain to.
1423 * @subdomain: Subdomain to be added.
1424 */
1425 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1426 struct generic_pm_domain *subdomain)
1427 {
1428 struct gpd_link *link;
1429 int ret = 0;
1430
1431 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1432 return -EINVAL;
1433
1434 start:
1435 genpd_acquire_lock(genpd);
1436 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1437
1438 if (subdomain->status != GPD_STATE_POWER_OFF
1439 && subdomain->status != GPD_STATE_ACTIVE) {
1440 mutex_unlock(&subdomain->lock);
1441 genpd_release_lock(genpd);
1442 goto start;
1443 }
1444
1445 if (genpd->status == GPD_STATE_POWER_OFF
1446 && subdomain->status != GPD_STATE_POWER_OFF) {
1447 ret = -EINVAL;
1448 goto out;
1449 }
1450
1451 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1452 if (link->slave == subdomain && link->master == genpd) {
1453 ret = -EINVAL;
1454 goto out;
1455 }
1456 }
1457
1458 link = kzalloc(sizeof(*link), GFP_KERNEL);
1459 if (!link) {
1460 ret = -ENOMEM;
1461 goto out;
1462 }
1463 link->master = genpd;
1464 list_add_tail(&link->master_node, &genpd->master_links);
1465 link->slave = subdomain;
1466 list_add_tail(&link->slave_node, &subdomain->slave_links);
1467 if (subdomain->status != GPD_STATE_POWER_OFF)
1468 genpd_sd_counter_inc(genpd);
1469
1470 out:
1471 mutex_unlock(&subdomain->lock);
1472 genpd_release_lock(genpd);
1473
1474 return ret;
1475 }
1476
1477 /**
1478 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1479 * @genpd: Master PM domain to remove the subdomain from.
1480 * @subdomain: Subdomain to be removed.
1481 */
1482 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1483 struct generic_pm_domain *subdomain)
1484 {
1485 struct gpd_link *link;
1486 int ret = -EINVAL;
1487
1488 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1489 return -EINVAL;
1490
1491 start:
1492 genpd_acquire_lock(genpd);
1493
1494 list_for_each_entry(link, &genpd->master_links, master_node) {
1495 if (link->slave != subdomain)
1496 continue;
1497
1498 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1499
1500 if (subdomain->status != GPD_STATE_POWER_OFF
1501 && subdomain->status != GPD_STATE_ACTIVE) {
1502 mutex_unlock(&subdomain->lock);
1503 genpd_release_lock(genpd);
1504 goto start;
1505 }
1506
1507 list_del(&link->master_node);
1508 list_del(&link->slave_node);
1509 kfree(link);
1510 if (subdomain->status != GPD_STATE_POWER_OFF)
1511 genpd_sd_counter_dec(genpd);
1512
1513 mutex_unlock(&subdomain->lock);
1514
1515 ret = 0;
1516 break;
1517 }
1518
1519 genpd_release_lock(genpd);
1520
1521 return ret;
1522 }
1523
1524 /**
1525 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1526 * @dev: Device to add the callbacks to.
1527 * @ops: Set of callbacks to add.
1528 * @td: Timing data to add to the device along with the callbacks (optional).
1529 */
1530 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1531 struct gpd_timing_data *td)
1532 {
1533 struct pm_domain_data *pdd;
1534 int ret = 0;
1535
1536 if (!(dev && dev->power.subsys_data && ops))
1537 return -EINVAL;
1538
1539 pm_runtime_disable(dev);
1540 device_pm_lock();
1541
1542 pdd = dev->power.subsys_data->domain_data;
1543 if (pdd) {
1544 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1545
1546 gpd_data->ops = *ops;
1547 if (td)
1548 gpd_data->td = *td;
1549 } else {
1550 ret = -EINVAL;
1551 }
1552
1553 device_pm_unlock();
1554 pm_runtime_enable(dev);
1555
1556 return ret;
1557 }
1558 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1559
1560 /**
1561 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1562 * @dev: Device to remove the callbacks from.
1563 * @clear_td: If set, clear the device's timing data too.
1564 */
1565 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1566 {
1567 struct pm_domain_data *pdd;
1568 int ret = 0;
1569
1570 if (!(dev && dev->power.subsys_data))
1571 return -EINVAL;
1572
1573 pm_runtime_disable(dev);
1574 device_pm_lock();
1575
1576 pdd = dev->power.subsys_data->domain_data;
1577 if (pdd) {
1578 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1579
1580 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1581 if (clear_td)
1582 gpd_data->td = (struct gpd_timing_data){ 0 };
1583 } else {
1584 ret = -EINVAL;
1585 }
1586
1587 device_pm_unlock();
1588 pm_runtime_enable(dev);
1589
1590 return ret;
1591 }
1592 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1593
1594 /* Default device callbacks for generic PM domains. */
1595
1596 /**
1597 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1598 * @dev: Device to handle.
1599 */
1600 static int pm_genpd_default_save_state(struct device *dev)
1601 {
1602 int (*cb)(struct device *__dev);
1603 struct device_driver *drv = dev->driver;
1604
1605 cb = dev_gpd_data(dev)->ops.save_state;
1606 if (cb)
1607 return cb(dev);
1608
1609 if (drv && drv->pm && drv->pm->runtime_suspend)
1610 return drv->pm->runtime_suspend(dev);
1611
1612 return 0;
1613 }
1614
1615 /**
1616 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1617 * @dev: Device to handle.
1618 */
1619 static int pm_genpd_default_restore_state(struct device *dev)
1620 {
1621 int (*cb)(struct device *__dev);
1622 struct device_driver *drv = dev->driver;
1623
1624 cb = dev_gpd_data(dev)->ops.restore_state;
1625 if (cb)
1626 return cb(dev);
1627
1628 if (drv && drv->pm && drv->pm->runtime_resume)
1629 return drv->pm->runtime_resume(dev);
1630
1631 return 0;
1632 }
1633
1634 #ifdef CONFIG_PM_SLEEP
1635
1636 /**
1637 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1638 * @dev: Device to handle.
1639 */
1640 static int pm_genpd_default_suspend(struct device *dev)
1641 {
1642 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1643
1644 return cb ? cb(dev) : pm_generic_suspend(dev);
1645 }
1646
1647 /**
1648 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1649 * @dev: Device to handle.
1650 */
1651 static int pm_genpd_default_suspend_late(struct device *dev)
1652 {
1653 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1654
1655 return cb ? cb(dev) : pm_generic_suspend_late(dev);
1656 }
1657
1658 /**
1659 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1660 * @dev: Device to handle.
1661 */
1662 static int pm_genpd_default_resume_early(struct device *dev)
1663 {
1664 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1665
1666 return cb ? cb(dev) : pm_generic_resume_early(dev);
1667 }
1668
1669 /**
1670 * pm_genpd_default_resume - Default "device resume" for PM domians.
1671 * @dev: Device to handle.
1672 */
1673 static int pm_genpd_default_resume(struct device *dev)
1674 {
1675 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1676
1677 return cb ? cb(dev) : pm_generic_resume(dev);
1678 }
1679
1680 /**
1681 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1682 * @dev: Device to handle.
1683 */
1684 static int pm_genpd_default_freeze(struct device *dev)
1685 {
1686 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1687
1688 return cb ? cb(dev) : pm_generic_freeze(dev);
1689 }
1690
1691 /**
1692 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1693 * @dev: Device to handle.
1694 */
1695 static int pm_genpd_default_freeze_late(struct device *dev)
1696 {
1697 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1698
1699 return cb ? cb(dev) : pm_generic_freeze_late(dev);
1700 }
1701
1702 /**
1703 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1704 * @dev: Device to handle.
1705 */
1706 static int pm_genpd_default_thaw_early(struct device *dev)
1707 {
1708 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1709
1710 return cb ? cb(dev) : pm_generic_thaw_early(dev);
1711 }
1712
1713 /**
1714 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1715 * @dev: Device to handle.
1716 */
1717 static int pm_genpd_default_thaw(struct device *dev)
1718 {
1719 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1720
1721 return cb ? cb(dev) : pm_generic_thaw(dev);
1722 }
1723
1724 #else /* !CONFIG_PM_SLEEP */
1725
1726 #define pm_genpd_default_suspend NULL
1727 #define pm_genpd_default_suspend_late NULL
1728 #define pm_genpd_default_resume_early NULL
1729 #define pm_genpd_default_resume NULL
1730 #define pm_genpd_default_freeze NULL
1731 #define pm_genpd_default_freeze_late NULL
1732 #define pm_genpd_default_thaw_early NULL
1733 #define pm_genpd_default_thaw NULL
1734
1735 #endif /* !CONFIG_PM_SLEEP */
1736
1737 /**
1738 * pm_genpd_init - Initialize a generic I/O PM domain object.
1739 * @genpd: PM domain object to initialize.
1740 * @gov: PM domain governor to associate with the domain (may be NULL).
1741 * @is_off: Initial value of the domain's power_is_off field.
1742 */
1743 void pm_genpd_init(struct generic_pm_domain *genpd,
1744 struct dev_power_governor *gov, bool is_off)
1745 {
1746 if (IS_ERR_OR_NULL(genpd))
1747 return;
1748
1749 INIT_LIST_HEAD(&genpd->master_links);
1750 INIT_LIST_HEAD(&genpd->slave_links);
1751 INIT_LIST_HEAD(&genpd->dev_list);
1752 mutex_init(&genpd->lock);
1753 genpd->gov = gov;
1754 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1755 genpd->in_progress = 0;
1756 atomic_set(&genpd->sd_count, 0);
1757 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1758 init_waitqueue_head(&genpd->status_wait_queue);
1759 genpd->poweroff_task = NULL;
1760 genpd->resume_count = 0;
1761 genpd->device_count = 0;
1762 genpd->max_off_time_ns = -1;
1763 genpd->max_off_time_changed = true;
1764 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1765 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1766 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1767 genpd->domain.ops.prepare = pm_genpd_prepare;
1768 genpd->domain.ops.suspend = pm_genpd_suspend;
1769 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1770 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1771 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1772 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1773 genpd->domain.ops.resume = pm_genpd_resume;
1774 genpd->domain.ops.freeze = pm_genpd_freeze;
1775 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1776 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1777 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1778 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1779 genpd->domain.ops.thaw = pm_genpd_thaw;
1780 genpd->domain.ops.poweroff = pm_genpd_suspend;
1781 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1782 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1783 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1784 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1785 genpd->domain.ops.restore = pm_genpd_resume;
1786 genpd->domain.ops.complete = pm_genpd_complete;
1787 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1788 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1789 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1790 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1791 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1792 genpd->dev_ops.resume = pm_genpd_default_resume;
1793 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1794 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1795 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1796 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1797 mutex_lock(&gpd_list_lock);
1798 list_add(&genpd->gpd_list_node, &gpd_list);
1799 mutex_unlock(&gpd_list_lock);
1800 }