]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/domain.c
ARM: shmobile: Convert to genpd flags for PM clocks for R-mobile
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
f721889f
RW
9#include <linux/kernel.h>
10#include <linux/io.h>
aa42240a 11#include <linux/platform_device.h>
f721889f
RW
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
6ff7bb0d 14#include <linux/pm_qos.h>
c11f6f5b 15#include <linux/pm_clock.h>
f721889f
RW
16#include <linux/slab.h>
17#include <linux/err.h>
17b75eca
RW
18#include <linux/sched.h>
19#include <linux/suspend.h>
d5e4cbfe
RW
20#include <linux/export.h>
21
22#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
23({ \
24 type (*__routine)(struct device *__d); \
25 type __ret = (type)0; \
26 \
27 __routine = genpd->dev_ops.callback; \
28 if (__routine) { \
29 __ret = __routine(dev); \
d5e4cbfe
RW
30 } \
31 __ret; \
32})
f721889f 33
0140d8bd
RW
34#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
35({ \
36 ktime_t __start = ktime_get(); \
37 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
38 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
6ff7bb0d
RW
39 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
40 if (!__retval && __elapsed > __td->field) { \
41 __td->field = __elapsed; \
7d1af287 42 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
0140d8bd 43 __elapsed); \
6ff7bb0d
RW
44 genpd->max_off_time_changed = true; \
45 __td->constraint_changed = true; \
0140d8bd
RW
46 } \
47 __retval; \
48})
49
5125bbf3
RW
50static LIST_HEAD(gpd_list);
51static DEFINE_MUTEX(gpd_list_lock);
52
8bc0251d
RW
53static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
54{
55 struct generic_pm_domain *genpd = NULL, *gpd;
56
57 if (IS_ERR_OR_NULL(domain_name))
58 return NULL;
59
60 mutex_lock(&gpd_list_lock);
61 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
62 if (!strcmp(gpd->name, domain_name)) {
63 genpd = gpd;
64 break;
65 }
66 }
67 mutex_unlock(&gpd_list_lock);
68 return genpd;
69}
70
b02c999a 71struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
72{
73 if (IS_ERR_OR_NULL(dev->pm_domain))
74 return ERR_PTR(-EINVAL);
75
596ba34b 76 return pd_to_genpd(dev->pm_domain);
5248051b 77}
f721889f 78
d5e4cbfe
RW
79static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80{
0140d8bd
RW
81 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
82 stop_latency_ns, "stop");
d5e4cbfe
RW
83}
84
85static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
86{
0140d8bd
RW
87 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
88 start_latency_ns, "start");
d5e4cbfe
RW
89}
90
c4bb3160 91static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 92{
c4bb3160
RW
93 bool ret = false;
94
95 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98 return ret;
99}
100
101static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102{
103 atomic_inc(&genpd->sd_count);
4e857c58 104 smp_mb__after_atomic();
f721889f
RW
105}
106
17b75eca
RW
107static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108{
109 DEFINE_WAIT(wait);
110
111 mutex_lock(&genpd->lock);
112 /*
113 * Wait for the domain to transition into either the active,
114 * or the power off state.
115 */
116 for (;;) {
117 prepare_to_wait(&genpd->status_wait_queue, &wait,
118 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
119 if (genpd->status == GPD_STATE_ACTIVE
120 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
121 break;
122 mutex_unlock(&genpd->lock);
123
124 schedule();
125
126 mutex_lock(&genpd->lock);
127 }
128 finish_wait(&genpd->status_wait_queue, &wait);
129}
130
131static void genpd_release_lock(struct generic_pm_domain *genpd)
132{
133 mutex_unlock(&genpd->lock);
134}
135
c6d22b37
RW
136static void genpd_set_active(struct generic_pm_domain *genpd)
137{
138 if (genpd->resume_count == 0)
139 genpd->status = GPD_STATE_ACTIVE;
140}
141
cbc9ef02
RW
142static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
143{
144 s64 usecs64;
145
f39cb179 146 if (!genpd->cpuidle_data)
cbc9ef02
RW
147 return;
148
149 usecs64 = genpd->power_on_latency_ns;
150 do_div(usecs64, NSEC_PER_USEC);
f39cb179
UH
151 usecs64 += genpd->cpuidle_data->saved_exit_latency;
152 genpd->cpuidle_data->idle_state->exit_latency = usecs64;
cbc9ef02
RW
153}
154
c8f0ea45
GU
155static int genpd_power_on(struct generic_pm_domain *genpd)
156{
157 ktime_t time_start;
158 s64 elapsed_ns;
159 int ret;
160
161 if (!genpd->power_on)
162 return 0;
163
164 time_start = ktime_get();
165 ret = genpd->power_on(genpd);
166 if (ret)
167 return ret;
168
169 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
170 if (elapsed_ns <= genpd->power_on_latency_ns)
171 return ret;
172
173 genpd->power_on_latency_ns = elapsed_ns;
174 genpd->max_off_time_changed = true;
175 genpd_recalc_cpu_exit_latency(genpd);
176 pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
177 genpd->name, "on", elapsed_ns);
178
179 return ret;
180}
181
182static int genpd_power_off(struct generic_pm_domain *genpd)
183{
184 ktime_t time_start;
185 s64 elapsed_ns;
186 int ret;
187
188 if (!genpd->power_off)
189 return 0;
190
191 time_start = ktime_get();
192 ret = genpd->power_off(genpd);
193 if (ret == -EBUSY)
194 return ret;
195
196 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
197 if (elapsed_ns <= genpd->power_off_latency_ns)
198 return ret;
199
200 genpd->power_off_latency_ns = elapsed_ns;
201 genpd->max_off_time_changed = true;
202 pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
203 genpd->name, "off", elapsed_ns);
204
205 return ret;
206}
207
5248051b 208/**
5063ce15 209 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
210 * @genpd: PM domain to power up.
211 *
5063ce15 212 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
213 * resume a device belonging to it.
214 */
8951ef02 215static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
3f241775 216 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 217{
5063ce15 218 struct gpd_link *link;
3f241775 219 DEFINE_WAIT(wait);
5248051b
RW
220 int ret = 0;
221
5063ce15 222 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
223 for (;;) {
224 prepare_to_wait(&genpd->status_wait_queue, &wait,
225 TASK_UNINTERRUPTIBLE);
17877eb5 226 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
227 break;
228 mutex_unlock(&genpd->lock);
17b75eca 229
3f241775
RW
230 schedule();
231
232 mutex_lock(&genpd->lock);
233 }
234 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 235
17b75eca 236 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 237 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 238 return 0;
5248051b 239
c6d22b37
RW
240 if (genpd->status != GPD_STATE_POWER_OFF) {
241 genpd_set_active(genpd);
3f241775 242 return 0;
c6d22b37
RW
243 }
244
f39cb179 245 if (genpd->cpuidle_data) {
cbc9ef02 246 cpuidle_pause_and_lock();
f39cb179 247 genpd->cpuidle_data->idle_state->disabled = true;
cbc9ef02
RW
248 cpuidle_resume_and_unlock();
249 goto out;
250 }
251
5063ce15
RW
252 /*
253 * The list is guaranteed not to change while the loop below is being
254 * executed, unless one of the masters' .power_on() callbacks fiddles
255 * with it.
256 */
257 list_for_each_entry(link, &genpd->slave_links, slave_node) {
258 genpd_sd_counter_inc(link->master);
17877eb5 259 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 260
5248051b 261 mutex_unlock(&genpd->lock);
5248051b 262
5063ce15 263 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
264
265 mutex_lock(&genpd->lock);
266
3f241775
RW
267 /*
268 * The "wait for parent" status is guaranteed not to change
5063ce15 269 * while the master is powering on.
3f241775
RW
270 */
271 genpd->status = GPD_STATE_POWER_OFF;
272 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
273 if (ret) {
274 genpd_sd_counter_dec(link->master);
9e08cf42 275 goto err;
5063ce15 276 }
5248051b
RW
277 }
278
c8f0ea45
GU
279 ret = genpd_power_on(genpd);
280 if (ret)
281 goto err;
5248051b 282
cbc9ef02 283 out:
9e08cf42
RW
284 genpd_set_active(genpd);
285
3f241775 286 return 0;
9e08cf42
RW
287
288 err:
5063ce15
RW
289 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
290 genpd_sd_counter_dec(link->master);
9e08cf42 291
3f241775
RW
292 return ret;
293}
294
295/**
5063ce15 296 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
297 * @genpd: PM domain to power up.
298 */
299int pm_genpd_poweron(struct generic_pm_domain *genpd)
300{
301 int ret;
302
303 mutex_lock(&genpd->lock);
304 ret = __pm_genpd_poweron(genpd);
305 mutex_unlock(&genpd->lock);
306 return ret;
5248051b
RW
307}
308
8bc0251d
RW
309/**
310 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
311 * @domain_name: Name of the PM domain to power up.
312 */
313int pm_genpd_name_poweron(const char *domain_name)
314{
315 struct generic_pm_domain *genpd;
316
317 genpd = pm_genpd_lookup_name(domain_name);
318 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
319}
320
5248051b
RW
321#ifdef CONFIG_PM_RUNTIME
322
b3d3b9fb
SK
323static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
324 struct device *dev)
325{
326 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
327}
328
8e9afafd
RW
329static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
330{
331 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
332 save_state_latency_ns, "state save");
333}
334
335static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
336{
337 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
338 restore_state_latency_ns,
339 "state restore");
340}
341
6ff7bb0d
RW
342static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
343 unsigned long val, void *ptr)
344{
345 struct generic_pm_domain_data *gpd_data;
346 struct device *dev;
347
348 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
349
350 mutex_lock(&gpd_data->lock);
351 dev = gpd_data->base.dev;
352 if (!dev) {
353 mutex_unlock(&gpd_data->lock);
354 return NOTIFY_DONE;
355 }
356 mutex_unlock(&gpd_data->lock);
357
358 for (;;) {
359 struct generic_pm_domain *genpd;
360 struct pm_domain_data *pdd;
361
362 spin_lock_irq(&dev->power.lock);
363
364 pdd = dev->power.subsys_data ?
365 dev->power.subsys_data->domain_data : NULL;
1d5fcfec 366 if (pdd && pdd->dev) {
6ff7bb0d
RW
367 to_gpd_data(pdd)->td.constraint_changed = true;
368 genpd = dev_to_genpd(dev);
369 } else {
370 genpd = ERR_PTR(-ENODATA);
371 }
372
373 spin_unlock_irq(&dev->power.lock);
374
375 if (!IS_ERR(genpd)) {
376 mutex_lock(&genpd->lock);
377 genpd->max_off_time_changed = true;
378 mutex_unlock(&genpd->lock);
379 }
380
381 dev = dev->parent;
382 if (!dev || dev->power.ignore_children)
383 break;
384 }
385
386 return NOTIFY_DONE;
387}
388
f721889f
RW
389/**
390 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 391 * @pdd: Domain data of the device to save the state of.
f721889f
RW
392 * @genpd: PM domain the device belongs to.
393 */
4605ab65 394static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 395 struct generic_pm_domain *genpd)
17b75eca 396 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 397{
cd0ea672 398 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 399 struct device *dev = pdd->dev;
f721889f
RW
400 int ret = 0;
401
67732cd3 402 if (gpd_data->need_restore > 0)
f721889f
RW
403 return 0;
404
67732cd3
UH
405 /*
406 * If the value of the need_restore flag is still unknown at this point,
407 * we trust that pm_genpd_poweroff() has verified that the device is
408 * already runtime PM suspended.
409 */
410 if (gpd_data->need_restore < 0) {
411 gpd_data->need_restore = 1;
412 return 0;
413 }
414
17b75eca
RW
415 mutex_unlock(&genpd->lock);
416
ecf00475
RW
417 genpd_start_dev(genpd, dev);
418 ret = genpd_save_dev(genpd, dev);
419 genpd_stop_dev(genpd, dev);
f721889f 420
17b75eca
RW
421 mutex_lock(&genpd->lock);
422
f721889f 423 if (!ret)
67732cd3 424 gpd_data->need_restore = 1;
f721889f
RW
425
426 return ret;
427}
428
429/**
430 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 431 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
432 * @genpd: PM domain the device belongs to.
433 */
4605ab65 434static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 435 struct generic_pm_domain *genpd)
17b75eca 436 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 437{
cd0ea672 438 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 439 struct device *dev = pdd->dev;
67732cd3 440 int need_restore = gpd_data->need_restore;
f721889f 441
67732cd3 442 gpd_data->need_restore = 0;
17b75eca
RW
443 mutex_unlock(&genpd->lock);
444
ecf00475 445 genpd_start_dev(genpd, dev);
67732cd3
UH
446
447 /*
448 * Call genpd_restore_dev() for recently added devices too (need_restore
449 * is negative then).
450 */
80de3d7f
RW
451 if (need_restore)
452 genpd_restore_dev(genpd, dev);
f721889f 453
17b75eca 454 mutex_lock(&genpd->lock);
f721889f
RW
455}
456
c6d22b37
RW
457/**
458 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
459 * @genpd: PM domain to check.
460 *
461 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
462 * a "power off" operation, which means that a "power on" has occured in the
463 * meantime, or if its resume_count field is different from zero, which means
464 * that one of its devices has been resumed in the meantime.
465 */
466static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
467{
17877eb5 468 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 469 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
470}
471
56375fd4
RW
472/**
473 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
474 * @genpd: PM domait to power off.
475 *
476 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
477 * before.
478 */
d971f0b0 479static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4 480{
a4ca26a4 481 queue_work(pm_wq, &genpd->power_off_work);
56375fd4
RW
482}
483
f721889f
RW
484/**
485 * pm_genpd_poweroff - Remove power from a given PM domain.
486 * @genpd: PM domain to power down.
487 *
488 * If all of the @genpd's devices have been suspended and all of its subdomains
489 * have been powered down, run the runtime suspend callbacks provided by all of
490 * the @genpd's devices' drivers and remove power from @genpd.
491 */
492static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 493 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 494{
4605ab65 495 struct pm_domain_data *pdd;
5063ce15 496 struct gpd_link *link;
f721889f 497 unsigned int not_suspended;
c6d22b37 498 int ret = 0;
f721889f 499
c6d22b37
RW
500 start:
501 /*
502 * Do not try to power off the domain in the following situations:
503 * (1) The domain is already in the "power off" state.
5063ce15 504 * (2) The domain is waiting for its master to power up.
c6d22b37 505 * (3) One of the domain's devices is being resumed right now.
3f241775 506 * (4) System suspend is in progress.
c6d22b37 507 */
3f241775 508 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 509 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 510 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
511 return 0;
512
c4bb3160 513 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
514 return -EBUSY;
515
516 not_suspended = 0;
34b1f762
RW
517 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
518 enum pm_qos_flags_status stat;
519
520 stat = dev_pm_qos_flags(pdd->dev,
521 PM_QOS_FLAG_NO_POWER_OFF
522 | PM_QOS_FLAG_REMOTE_WAKEUP);
523 if (stat > PM_QOS_FLAGS_NONE)
524 return -EBUSY;
525
0aa2a221 526 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
feb70af0 527 || pdd->dev->power.irq_safe))
f721889f 528 not_suspended++;
34b1f762 529 }
f721889f
RW
530
531 if (not_suspended > genpd->in_progress)
532 return -EBUSY;
533
c6d22b37
RW
534 if (genpd->poweroff_task) {
535 /*
536 * Another instance of pm_genpd_poweroff() is executing
537 * callbacks, so tell it to start over and return.
538 */
539 genpd->status = GPD_STATE_REPEAT;
540 return 0;
541 }
542
f721889f
RW
543 if (genpd->gov && genpd->gov->power_down_ok) {
544 if (!genpd->gov->power_down_ok(&genpd->domain))
545 return -EAGAIN;
546 }
547
17b75eca 548 genpd->status = GPD_STATE_BUSY;
c6d22b37 549 genpd->poweroff_task = current;
17b75eca 550
4605ab65 551 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 552 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 553 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
554
555 if (genpd_abort_poweroff(genpd))
556 goto out;
557
697a7f37
RW
558 if (ret) {
559 genpd_set_active(genpd);
560 goto out;
561 }
f721889f 562
c6d22b37
RW
563 if (genpd->status == GPD_STATE_REPEAT) {
564 genpd->poweroff_task = NULL;
565 goto start;
566 }
567 }
17b75eca 568
f39cb179 569 if (genpd->cpuidle_data) {
cbc9ef02 570 /*
f39cb179
UH
571 * If cpuidle_data is set, cpuidle should turn the domain off
572 * when the CPU in it is idle. In that case we don't decrement
573 * the subdomain counts of the master domains, so that power is
574 * not removed from the current domain prematurely as a result
575 * of cutting off the masters' power.
cbc9ef02
RW
576 */
577 genpd->status = GPD_STATE_POWER_OFF;
578 cpuidle_pause_and_lock();
f39cb179 579 genpd->cpuidle_data->idle_state->disabled = false;
cbc9ef02
RW
580 cpuidle_resume_and_unlock();
581 goto out;
582 }
583
3c07cbc4
RW
584 if (genpd->power_off) {
585 if (atomic_read(&genpd->sd_count) > 0) {
586 ret = -EBUSY;
c6d22b37
RW
587 goto out;
588 }
17b75eca 589
3c07cbc4 590 /*
5063ce15
RW
591 * If sd_count > 0 at this point, one of the subdomains hasn't
592 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
593 * incrementing it. In that case pm_genpd_poweron() will wait
594 * for us to drop the lock, so we can call .power_off() and let
595 * the pm_genpd_poweron() restore power for us (this shouldn't
596 * happen very often).
597 */
c8f0ea45 598 ret = genpd_power_off(genpd);
d2805402
RW
599 if (ret == -EBUSY) {
600 genpd_set_active(genpd);
d2805402
RW
601 goto out;
602 }
603 }
f721889f 604
17b75eca 605 genpd->status = GPD_STATE_POWER_OFF;
221e9b58 606
5063ce15
RW
607 list_for_each_entry(link, &genpd->slave_links, slave_node) {
608 genpd_sd_counter_dec(link->master);
609 genpd_queue_power_off_work(link->master);
610 }
f721889f 611
c6d22b37
RW
612 out:
613 genpd->poweroff_task = NULL;
614 wake_up_all(&genpd->status_wait_queue);
615 return ret;
f721889f
RW
616}
617
618/**
619 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
620 * @work: Work structure used for scheduling the execution of this function.
621 */
622static void genpd_power_off_work_fn(struct work_struct *work)
623{
624 struct generic_pm_domain *genpd;
625
626 genpd = container_of(work, struct generic_pm_domain, power_off_work);
627
17b75eca 628 genpd_acquire_lock(genpd);
f721889f 629 pm_genpd_poweroff(genpd);
17b75eca 630 genpd_release_lock(genpd);
f721889f
RW
631}
632
633/**
634 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
635 * @dev: Device to suspend.
636 *
637 * Carry out a runtime suspend of a device under the assumption that its
638 * pm_domain field points to the domain member of an object of type
639 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
640 */
641static int pm_genpd_runtime_suspend(struct device *dev)
642{
643 struct generic_pm_domain *genpd;
67732cd3 644 struct generic_pm_domain_data *gpd_data;
b02c999a 645 bool (*stop_ok)(struct device *__dev);
d5e4cbfe 646 int ret;
f721889f
RW
647
648 dev_dbg(dev, "%s()\n", __func__);
649
5248051b
RW
650 genpd = dev_to_genpd(dev);
651 if (IS_ERR(genpd))
f721889f
RW
652 return -EINVAL;
653
b02c999a
RW
654 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
655 if (stop_ok && !stop_ok(dev))
656 return -EBUSY;
657
d5e4cbfe
RW
658 ret = genpd_stop_dev(genpd, dev);
659 if (ret)
660 return ret;
17b75eca 661
0aa2a221
RW
662 /*
663 * If power.irq_safe is set, this routine will be run with interrupts
664 * off, so it can't use mutexes.
665 */
666 if (dev->power.irq_safe)
667 return 0;
668
c6d22b37 669 mutex_lock(&genpd->lock);
67732cd3
UH
670
671 /*
672 * If we have an unknown state of the need_restore flag, it means none
673 * of the runtime PM callbacks has been invoked yet. Let's update the
674 * flag to reflect that the current state is active.
675 */
676 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
677 if (gpd_data->need_restore < 0)
678 gpd_data->need_restore = 0;
679
f721889f
RW
680 genpd->in_progress++;
681 pm_genpd_poweroff(genpd);
682 genpd->in_progress--;
c6d22b37 683 mutex_unlock(&genpd->lock);
f721889f
RW
684
685 return 0;
686}
687
f721889f
RW
688/**
689 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
690 * @dev: Device to resume.
691 *
692 * Carry out a runtime resume of a device under the assumption that its
693 * pm_domain field points to the domain member of an object of type
694 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
695 */
696static int pm_genpd_runtime_resume(struct device *dev)
697{
698 struct generic_pm_domain *genpd;
c6d22b37 699 DEFINE_WAIT(wait);
f721889f
RW
700 int ret;
701
702 dev_dbg(dev, "%s()\n", __func__);
703
5248051b
RW
704 genpd = dev_to_genpd(dev);
705 if (IS_ERR(genpd))
f721889f
RW
706 return -EINVAL;
707
0aa2a221
RW
708 /* If power.irq_safe, the PM domain is never powered off. */
709 if (dev->power.irq_safe)
e2e3e4e5 710 return genpd_start_dev_no_timing(genpd, dev);
0aa2a221 711
c6d22b37 712 mutex_lock(&genpd->lock);
3f241775
RW
713 ret = __pm_genpd_poweron(genpd);
714 if (ret) {
715 mutex_unlock(&genpd->lock);
716 return ret;
717 }
17b75eca 718 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
719 genpd->resume_count++;
720 for (;;) {
721 prepare_to_wait(&genpd->status_wait_queue, &wait,
722 TASK_UNINTERRUPTIBLE);
723 /*
724 * If current is the powering off task, we have been called
725 * reentrantly from one of the device callbacks, so we should
726 * not wait.
727 */
728 if (!genpd->poweroff_task || genpd->poweroff_task == current)
729 break;
730 mutex_unlock(&genpd->lock);
731
732 schedule();
733
734 mutex_lock(&genpd->lock);
735 }
736 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 737 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
738 genpd->resume_count--;
739 genpd_set_active(genpd);
17b75eca 740 wake_up_all(&genpd->status_wait_queue);
c6d22b37 741 mutex_unlock(&genpd->lock);
17b75eca 742
f721889f
RW
743 return 0;
744}
745
39ac5ba5
TB
746static bool pd_ignore_unused;
747static int __init pd_ignore_unused_setup(char *__unused)
748{
749 pd_ignore_unused = true;
750 return 1;
751}
752__setup("pd_ignore_unused", pd_ignore_unused_setup);
753
17f2ae7f
RW
754/**
755 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
756 */
757void pm_genpd_poweroff_unused(void)
758{
759 struct generic_pm_domain *genpd;
760
39ac5ba5
TB
761 if (pd_ignore_unused) {
762 pr_warn("genpd: Not disabling unused power domains\n");
763 return;
764 }
765
17f2ae7f
RW
766 mutex_lock(&gpd_list_lock);
767
768 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
769 genpd_queue_power_off_work(genpd);
770
771 mutex_unlock(&gpd_list_lock);
772}
773
2fe71dcd
UH
774static int __init genpd_poweroff_unused(void)
775{
776 pm_genpd_poweroff_unused();
777 return 0;
778}
779late_initcall(genpd_poweroff_unused);
780
f721889f
RW
781#else
782
6ff7bb0d
RW
783static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
784 unsigned long val, void *ptr)
785{
786 return NOTIFY_DONE;
787}
788
d971f0b0
UH
789static inline void
790genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
791
f721889f
RW
792static inline void genpd_power_off_work_fn(struct work_struct *work) {}
793
794#define pm_genpd_runtime_suspend NULL
795#define pm_genpd_runtime_resume NULL
796
797#endif /* CONFIG_PM_RUNTIME */
798
596ba34b
RW
799#ifdef CONFIG_PM_SLEEP
800
77f827de
RW
801/**
802 * pm_genpd_present - Check if the given PM domain has been initialized.
803 * @genpd: PM domain to check.
804 */
895b31f3 805static bool pm_genpd_present(const struct generic_pm_domain *genpd)
77f827de 806{
895b31f3 807 const struct generic_pm_domain *gpd;
77f827de
RW
808
809 if (IS_ERR_OR_NULL(genpd))
810 return false;
811
812 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
813 if (gpd == genpd)
814 return true;
815
816 return false;
817}
818
d5e4cbfe
RW
819static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
820 struct device *dev)
821{
822 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
823}
824
596ba34b 825/**
5063ce15 826 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
827 * @genpd: PM domain to power off, if possible.
828 *
829 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 830 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b 831 *
77f827de
RW
832 * This function is only called in "noirq" and "syscore" stages of system power
833 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
834 * executed sequentially, so it is guaranteed that it will never run twice in
835 * parallel).
596ba34b
RW
836 */
837static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
838{
5063ce15 839 struct gpd_link *link;
596ba34b 840
17b75eca 841 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
842 return;
843
c4bb3160
RW
844 if (genpd->suspended_count != genpd->device_count
845 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
846 return;
847
c8f0ea45 848 genpd_power_off(genpd);
596ba34b 849
17b75eca 850 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
851
852 list_for_each_entry(link, &genpd->slave_links, slave_node) {
853 genpd_sd_counter_dec(link->master);
854 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
855 }
856}
857
802d8b49
RW
858/**
859 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
860 * @genpd: PM domain to power on.
861 *
77f827de
RW
862 * This function is only called in "noirq" and "syscore" stages of system power
863 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
864 * executed sequentially, so it is guaranteed that it will never run twice in
865 * parallel).
802d8b49
RW
866 */
867static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
868{
869 struct gpd_link *link;
870
871 if (genpd->status != GPD_STATE_POWER_OFF)
872 return;
873
874 list_for_each_entry(link, &genpd->slave_links, slave_node) {
875 pm_genpd_sync_poweron(link->master);
876 genpd_sd_counter_inc(link->master);
877 }
878
c8f0ea45 879 genpd_power_on(genpd);
802d8b49
RW
880
881 genpd->status = GPD_STATE_ACTIVE;
882}
883
4ecd6e65
RW
884/**
885 * resume_needed - Check whether to resume a device before system suspend.
886 * @dev: Device to check.
887 * @genpd: PM domain the device belongs to.
888 *
889 * There are two cases in which a device that can wake up the system from sleep
890 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
891 * to wake up the system and it has to remain active for this purpose while the
892 * system is in the sleep state and (2) if the device is not enabled to wake up
893 * the system from sleep states and it generally doesn't generate wakeup signals
894 * by itself (those signals are generated on its behalf by other parts of the
895 * system). In the latter case it may be necessary to reconfigure the device's
896 * wakeup settings during system suspend, because it may have been set up to
897 * signal remote wakeup from the system's working state as needed by runtime PM.
898 * Return 'true' in either of the above cases.
899 */
900static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
901{
902 bool active_wakeup;
903
904 if (!device_can_wakeup(dev))
905 return false;
906
d5e4cbfe 907 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
908 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
909}
910
596ba34b
RW
911/**
912 * pm_genpd_prepare - Start power transition of a device in a PM domain.
913 * @dev: Device to start the transition of.
914 *
915 * Start a power transition of a device (during a system-wide power transition)
916 * under the assumption that its pm_domain field points to the domain member of
917 * an object of type struct generic_pm_domain representing a PM domain
918 * consisting of I/O devices.
919 */
920static int pm_genpd_prepare(struct device *dev)
921{
922 struct generic_pm_domain *genpd;
b6c10c84 923 int ret;
596ba34b
RW
924
925 dev_dbg(dev, "%s()\n", __func__);
926
927 genpd = dev_to_genpd(dev);
928 if (IS_ERR(genpd))
929 return -EINVAL;
930
17b75eca
RW
931 /*
932 * If a wakeup request is pending for the device, it should be woken up
933 * at this point and a system wakeup event should be reported if it's
934 * set up to wake up the system from sleep states.
935 */
936 pm_runtime_get_noresume(dev);
937 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
938 pm_wakeup_event(dev, 0);
939
940 if (pm_wakeup_pending()) {
84167035 941 pm_runtime_put(dev);
17b75eca
RW
942 return -EBUSY;
943 }
944
4ecd6e65
RW
945 if (resume_needed(dev, genpd))
946 pm_runtime_resume(dev);
947
17b75eca 948 genpd_acquire_lock(genpd);
596ba34b 949
65533bbf
RW
950 if (genpd->prepared_count++ == 0) {
951 genpd->suspended_count = 0;
17b75eca 952 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
65533bbf 953 }
17b75eca
RW
954
955 genpd_release_lock(genpd);
596ba34b
RW
956
957 if (genpd->suspend_power_off) {
17b75eca 958 pm_runtime_put_noidle(dev);
596ba34b
RW
959 return 0;
960 }
961
962 /*
17b75eca
RW
963 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
964 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 965 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 966 * to make it operational.
596ba34b 967 */
17b75eca 968 pm_runtime_resume(dev);
596ba34b
RW
969 __pm_runtime_disable(dev, false);
970
b6c10c84
RW
971 ret = pm_generic_prepare(dev);
972 if (ret) {
973 mutex_lock(&genpd->lock);
974
975 if (--genpd->prepared_count == 0)
976 genpd->suspend_power_off = false;
977
978 mutex_unlock(&genpd->lock);
17b75eca 979 pm_runtime_enable(dev);
b6c10c84 980 }
17b75eca 981
84167035 982 pm_runtime_put(dev);
b6c10c84 983 return ret;
596ba34b
RW
984}
985
986/**
987 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
988 * @dev: Device to suspend.
989 *
990 * Suspend a device under the assumption that its pm_domain field points to the
991 * domain member of an object of type struct generic_pm_domain representing
992 * a PM domain consisting of I/O devices.
993 */
994static int pm_genpd_suspend(struct device *dev)
995{
996 struct generic_pm_domain *genpd;
997
998 dev_dbg(dev, "%s()\n", __func__);
999
1000 genpd = dev_to_genpd(dev);
1001 if (IS_ERR(genpd))
1002 return -EINVAL;
1003
1e0407ca 1004 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
596ba34b
RW
1005}
1006
1007/**
0496c8ae 1008 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
596ba34b
RW
1009 * @dev: Device to suspend.
1010 *
1011 * Carry out a late suspend of a device under the assumption that its
1012 * pm_domain field points to the domain member of an object of type
1013 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1014 */
0496c8ae 1015static int pm_genpd_suspend_late(struct device *dev)
596ba34b
RW
1016{
1017 struct generic_pm_domain *genpd;
596ba34b
RW
1018
1019 dev_dbg(dev, "%s()\n", __func__);
1020
1021 genpd = dev_to_genpd(dev);
1022 if (IS_ERR(genpd))
1023 return -EINVAL;
1024
1e0407ca 1025 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
0496c8ae 1026}
596ba34b 1027
0496c8ae
RW
1028/**
1029 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1030 * @dev: Device to suspend.
1031 *
1032 * Stop the device and remove power from the domain if all devices in it have
1033 * been stopped.
1034 */
1035static int pm_genpd_suspend_noirq(struct device *dev)
1036{
1037 struct generic_pm_domain *genpd;
1038
1039 dev_dbg(dev, "%s()\n", __func__);
1040
1041 genpd = dev_to_genpd(dev);
1042 if (IS_ERR(genpd))
1043 return -EINVAL;
596ba34b 1044
dbf37414 1045 if (genpd->suspend_power_off
0496c8ae 1046 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
d4f2d87a
RW
1047 return 0;
1048
d5e4cbfe 1049 genpd_stop_dev(genpd, dev);
596ba34b
RW
1050
1051 /*
1052 * Since all of the "noirq" callbacks are executed sequentially, it is
1053 * guaranteed that this function will never run twice in parallel for
1054 * the same PM domain, so it is not necessary to use locking here.
1055 */
1056 genpd->suspended_count++;
1057 pm_genpd_sync_poweroff(genpd);
1058
1059 return 0;
1060}
1061
1062/**
0496c8ae 1063 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
596ba34b
RW
1064 * @dev: Device to resume.
1065 *
0496c8ae 1066 * Restore power to the device's PM domain, if necessary, and start the device.
596ba34b
RW
1067 */
1068static int pm_genpd_resume_noirq(struct device *dev)
1069{
1070 struct generic_pm_domain *genpd;
1071
1072 dev_dbg(dev, "%s()\n", __func__);
1073
1074 genpd = dev_to_genpd(dev);
1075 if (IS_ERR(genpd))
1076 return -EINVAL;
1077
dbf37414 1078 if (genpd->suspend_power_off
cc85b207 1079 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
596ba34b
RW
1080 return 0;
1081
1082 /*
1083 * Since all of the "noirq" callbacks are executed sequentially, it is
1084 * guaranteed that this function will never run twice in parallel for
1085 * the same PM domain, so it is not necessary to use locking here.
1086 */
802d8b49 1087 pm_genpd_sync_poweron(genpd);
596ba34b 1088 genpd->suspended_count--;
596ba34b 1089
0496c8ae 1090 return genpd_start_dev(genpd, dev);
596ba34b
RW
1091}
1092
1093/**
0496c8ae
RW
1094 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1095 * @dev: Device to resume.
1096 *
1097 * Carry out an early resume of a device under the assumption that its
1098 * pm_domain field points to the domain member of an object of type
1099 * struct generic_pm_domain representing a power domain consisting of I/O
1100 * devices.
1101 */
1102static int pm_genpd_resume_early(struct device *dev)
1103{
1104 struct generic_pm_domain *genpd;
1105
1106 dev_dbg(dev, "%s()\n", __func__);
1107
1108 genpd = dev_to_genpd(dev);
1109 if (IS_ERR(genpd))
1110 return -EINVAL;
1111
1e0407ca 1112 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
0496c8ae
RW
1113}
1114
1115/**
1116 * pm_genpd_resume - Resume of device in an I/O PM domain.
596ba34b
RW
1117 * @dev: Device to resume.
1118 *
1119 * Resume a device under the assumption that its pm_domain field points to the
1120 * domain member of an object of type struct generic_pm_domain representing
1121 * a power domain consisting of I/O devices.
1122 */
1123static int pm_genpd_resume(struct device *dev)
1124{
1125 struct generic_pm_domain *genpd;
1126
1127 dev_dbg(dev, "%s()\n", __func__);
1128
1129 genpd = dev_to_genpd(dev);
1130 if (IS_ERR(genpd))
1131 return -EINVAL;
1132
1e0407ca 1133 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
596ba34b
RW
1134}
1135
1136/**
0496c8ae 1137 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
596ba34b
RW
1138 * @dev: Device to freeze.
1139 *
1140 * Freeze a device under the assumption that its pm_domain field points to the
1141 * domain member of an object of type struct generic_pm_domain representing
1142 * a power domain consisting of I/O devices.
1143 */
1144static int pm_genpd_freeze(struct device *dev)
1145{
1146 struct generic_pm_domain *genpd;
1147
1148 dev_dbg(dev, "%s()\n", __func__);
1149
1150 genpd = dev_to_genpd(dev);
1151 if (IS_ERR(genpd))
1152 return -EINVAL;
1153
1e0407ca 1154 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
596ba34b
RW
1155}
1156
1157/**
0496c8ae
RW
1158 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1159 * @dev: Device to freeze.
1160 *
1161 * Carry out a late freeze of a device under the assumption that its
1162 * pm_domain field points to the domain member of an object of type
1163 * struct generic_pm_domain representing a power domain consisting of I/O
1164 * devices.
1165 */
1166static int pm_genpd_freeze_late(struct device *dev)
1167{
1168 struct generic_pm_domain *genpd;
1169
1170 dev_dbg(dev, "%s()\n", __func__);
1171
1172 genpd = dev_to_genpd(dev);
1173 if (IS_ERR(genpd))
1174 return -EINVAL;
1175
1e0407ca 1176 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
0496c8ae
RW
1177}
1178
1179/**
1180 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
596ba34b
RW
1181 * @dev: Device to freeze.
1182 *
1183 * Carry out a late freeze of a device under the assumption that its
1184 * pm_domain field points to the domain member of an object of type
1185 * struct generic_pm_domain representing a power domain consisting of I/O
1186 * devices.
1187 */
1188static int pm_genpd_freeze_noirq(struct device *dev)
1189{
1190 struct generic_pm_domain *genpd;
596ba34b
RW
1191
1192 dev_dbg(dev, "%s()\n", __func__);
1193
1194 genpd = dev_to_genpd(dev);
1195 if (IS_ERR(genpd))
1196 return -EINVAL;
1197
dbf37414 1198 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
0496c8ae 1199}
596ba34b 1200
0496c8ae
RW
1201/**
1202 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1203 * @dev: Device to thaw.
1204 *
1205 * Start the device, unless power has been removed from the domain already
1206 * before the system transition.
1207 */
1208static int pm_genpd_thaw_noirq(struct device *dev)
1209{
1210 struct generic_pm_domain *genpd;
596ba34b 1211
0496c8ae 1212 dev_dbg(dev, "%s()\n", __func__);
596ba34b 1213
0496c8ae
RW
1214 genpd = dev_to_genpd(dev);
1215 if (IS_ERR(genpd))
1216 return -EINVAL;
1217
dbf37414 1218 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
596ba34b
RW
1219}
1220
1221/**
0496c8ae 1222 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
596ba34b
RW
1223 * @dev: Device to thaw.
1224 *
1225 * Carry out an early thaw of a device under the assumption that its
1226 * pm_domain field points to the domain member of an object of type
1227 * struct generic_pm_domain representing a power domain consisting of I/O
1228 * devices.
1229 */
0496c8ae 1230static int pm_genpd_thaw_early(struct device *dev)
596ba34b
RW
1231{
1232 struct generic_pm_domain *genpd;
1233
1234 dev_dbg(dev, "%s()\n", __func__);
1235
1236 genpd = dev_to_genpd(dev);
1237 if (IS_ERR(genpd))
1238 return -EINVAL;
1239
1e0407ca 1240 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
596ba34b
RW
1241}
1242
1243/**
1244 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1245 * @dev: Device to thaw.
1246 *
1247 * Thaw a device under the assumption that its pm_domain field points to the
1248 * domain member of an object of type struct generic_pm_domain representing
1249 * a power domain consisting of I/O devices.
1250 */
1251static int pm_genpd_thaw(struct device *dev)
1252{
1253 struct generic_pm_domain *genpd;
1254
1255 dev_dbg(dev, "%s()\n", __func__);
1256
1257 genpd = dev_to_genpd(dev);
1258 if (IS_ERR(genpd))
1259 return -EINVAL;
1260
1e0407ca 1261 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
596ba34b
RW
1262}
1263
1264/**
0496c8ae 1265 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
596ba34b
RW
1266 * @dev: Device to resume.
1267 *
0496c8ae
RW
1268 * Make sure the domain will be in the same power state as before the
1269 * hibernation the system is resuming from and start the device if necessary.
596ba34b
RW
1270 */
1271static int pm_genpd_restore_noirq(struct device *dev)
1272{
1273 struct generic_pm_domain *genpd;
1274
1275 dev_dbg(dev, "%s()\n", __func__);
1276
1277 genpd = dev_to_genpd(dev);
1278 if (IS_ERR(genpd))
1279 return -EINVAL;
1280
1281 /*
1282 * Since all of the "noirq" callbacks are executed sequentially, it is
1283 * guaranteed that this function will never run twice in parallel for
1284 * the same PM domain, so it is not necessary to use locking here.
65533bbf
RW
1285 *
1286 * At this point suspended_count == 0 means we are being run for the
1287 * first time for the given domain in the present cycle.
596ba34b 1288 */
65533bbf 1289 if (genpd->suspended_count++ == 0) {
596ba34b 1290 /*
65533bbf 1291 * The boot kernel might put the domain into arbitrary state,
802d8b49
RW
1292 * so make it appear as powered off to pm_genpd_sync_poweron(),
1293 * so that it tries to power it on in case it was really off.
596ba34b 1294 */
65533bbf
RW
1295 genpd->status = GPD_STATE_POWER_OFF;
1296 if (genpd->suspend_power_off) {
1297 /*
1298 * If the domain was off before the hibernation, make
1299 * sure it will be off going forward.
1300 */
c8f0ea45 1301 genpd_power_off(genpd);
65533bbf
RW
1302
1303 return 0;
1304 }
596ba34b
RW
1305 }
1306
18dd2ece
RW
1307 if (genpd->suspend_power_off)
1308 return 0;
1309
802d8b49 1310 pm_genpd_sync_poweron(genpd);
596ba34b 1311
dbf37414 1312 return genpd_start_dev(genpd, dev);
596ba34b
RW
1313}
1314
1315/**
1316 * pm_genpd_complete - Complete power transition of a device in a power domain.
1317 * @dev: Device to complete the transition of.
1318 *
1319 * Complete a power transition of a device (during a system-wide power
1320 * transition) under the assumption that its pm_domain field points to the
1321 * domain member of an object of type struct generic_pm_domain representing
1322 * a power domain consisting of I/O devices.
1323 */
1324static void pm_genpd_complete(struct device *dev)
1325{
1326 struct generic_pm_domain *genpd;
1327 bool run_complete;
1328
1329 dev_dbg(dev, "%s()\n", __func__);
1330
1331 genpd = dev_to_genpd(dev);
1332 if (IS_ERR(genpd))
1333 return;
1334
1335 mutex_lock(&genpd->lock);
1336
1337 run_complete = !genpd->suspend_power_off;
1338 if (--genpd->prepared_count == 0)
1339 genpd->suspend_power_off = false;
1340
1341 mutex_unlock(&genpd->lock);
1342
1343 if (run_complete) {
1344 pm_generic_complete(dev);
6f00ff78 1345 pm_runtime_set_active(dev);
596ba34b 1346 pm_runtime_enable(dev);
af939339 1347 pm_request_idle(dev);
596ba34b
RW
1348 }
1349}
1350
77f827de 1351/**
d47e6464 1352 * genpd_syscore_switch - Switch power during system core suspend or resume.
77f827de
RW
1353 * @dev: Device that normally is marked as "always on" to switch power for.
1354 *
1355 * This routine may only be called during the system core (syscore) suspend or
1356 * resume phase for devices whose "always on" flags are set.
1357 */
d47e6464 1358static void genpd_syscore_switch(struct device *dev, bool suspend)
77f827de
RW
1359{
1360 struct generic_pm_domain *genpd;
1361
1362 genpd = dev_to_genpd(dev);
1363 if (!pm_genpd_present(genpd))
1364 return;
1365
1366 if (suspend) {
1367 genpd->suspended_count++;
1368 pm_genpd_sync_poweroff(genpd);
1369 } else {
1370 pm_genpd_sync_poweron(genpd);
1371 genpd->suspended_count--;
1372 }
1373}
d47e6464
UH
1374
1375void pm_genpd_syscore_poweroff(struct device *dev)
1376{
1377 genpd_syscore_switch(dev, true);
1378}
1379EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1380
1381void pm_genpd_syscore_poweron(struct device *dev)
1382{
1383 genpd_syscore_switch(dev, false);
1384}
1385EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
77f827de 1386
596ba34b
RW
1387#else
1388
1389#define pm_genpd_prepare NULL
1390#define pm_genpd_suspend NULL
0496c8ae 1391#define pm_genpd_suspend_late NULL
596ba34b 1392#define pm_genpd_suspend_noirq NULL
0496c8ae 1393#define pm_genpd_resume_early NULL
596ba34b
RW
1394#define pm_genpd_resume_noirq NULL
1395#define pm_genpd_resume NULL
1396#define pm_genpd_freeze NULL
0496c8ae 1397#define pm_genpd_freeze_late NULL
596ba34b 1398#define pm_genpd_freeze_noirq NULL
0496c8ae 1399#define pm_genpd_thaw_early NULL
596ba34b
RW
1400#define pm_genpd_thaw_noirq NULL
1401#define pm_genpd_thaw NULL
596ba34b 1402#define pm_genpd_restore_noirq NULL
596ba34b
RW
1403#define pm_genpd_complete NULL
1404
1405#endif /* CONFIG_PM_SLEEP */
1406
1d5fcfec
RW
1407static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1408{
1409 struct generic_pm_domain_data *gpd_data;
1410
1411 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1412 if (!gpd_data)
1413 return NULL;
1414
1415 mutex_init(&gpd_data->lock);
1416 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1417 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1418 return gpd_data;
1419}
1420
1421static void __pm_genpd_free_dev_data(struct device *dev,
1422 struct generic_pm_domain_data *gpd_data)
1423{
1424 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1425 kfree(gpd_data);
1426}
1427
f721889f 1428/**
b02c999a 1429 * __pm_genpd_add_device - Add a device to an I/O PM domain.
f721889f
RW
1430 * @genpd: PM domain to add the device to.
1431 * @dev: Device to be added.
b02c999a 1432 * @td: Set of PM QoS timing parameters to attach to the device.
f721889f 1433 */
b02c999a
RW
1434int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1435 struct gpd_timing_data *td)
f721889f 1436{
1d5fcfec 1437 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
4605ab65 1438 struct pm_domain_data *pdd;
f721889f
RW
1439 int ret = 0;
1440
1441 dev_dbg(dev, "%s()\n", __func__);
1442
1443 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1444 return -EINVAL;
1445
1d5fcfec
RW
1446 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1447 if (!gpd_data_new)
6ff7bb0d
RW
1448 return -ENOMEM;
1449
17b75eca 1450 genpd_acquire_lock(genpd);
f721889f 1451
596ba34b
RW
1452 if (genpd->prepared_count > 0) {
1453 ret = -EAGAIN;
1454 goto out;
1455 }
1456
4605ab65
RW
1457 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1458 if (pdd->dev == dev) {
f721889f
RW
1459 ret = -EINVAL;
1460 goto out;
1461 }
1462
1d5fcfec
RW
1463 ret = dev_pm_get_subsys_data(dev);
1464 if (ret)
1465 goto out;
1466
596ba34b 1467 genpd->device_count++;
6ff7bb0d 1468 genpd->max_off_time_changed = true;
f721889f 1469
6ff7bb0d 1470 spin_lock_irq(&dev->power.lock);
1d5fcfec 1471
6ff7bb0d 1472 dev->pm_domain = &genpd->domain;
1d5fcfec
RW
1473 if (dev->power.subsys_data->domain_data) {
1474 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1475 } else {
1476 gpd_data = gpd_data_new;
1477 dev->power.subsys_data->domain_data = &gpd_data->base;
1478 }
1479 gpd_data->refcount++;
b02c999a
RW
1480 if (td)
1481 gpd_data->td = *td;
f721889f 1482
1d5fcfec
RW
1483 spin_unlock_irq(&dev->power.lock);
1484
d79b6fe1 1485 if (genpd->attach_dev)
c16561e8 1486 genpd->attach_dev(genpd, dev);
d79b6fe1 1487
1d5fcfec
RW
1488 mutex_lock(&gpd_data->lock);
1489 gpd_data->base.dev = dev;
1490 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
67732cd3 1491 gpd_data->need_restore = -1;
6ff7bb0d
RW
1492 gpd_data->td.constraint_changed = true;
1493 gpd_data->td.effective_constraint_ns = -1;
6ff7bb0d
RW
1494 mutex_unlock(&gpd_data->lock);
1495
f721889f 1496 out:
17b75eca 1497 genpd_release_lock(genpd);
f721889f 1498
1d5fcfec
RW
1499 if (gpd_data != gpd_data_new)
1500 __pm_genpd_free_dev_data(dev, gpd_data_new);
1501
f721889f
RW
1502 return ret;
1503}
1504
b5abb085
RW
1505/**
1506 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1507 * @domain_name: Name of the PM domain to add the device to.
1508 * @dev: Device to be added.
1509 * @td: Set of PM QoS timing parameters to attach to the device.
1510 */
1511int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1512 struct gpd_timing_data *td)
1513{
8bc0251d 1514 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
b5abb085
RW
1515}
1516
f721889f
RW
1517/**
1518 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1519 * @genpd: PM domain to remove the device from.
1520 * @dev: Device to be removed.
1521 */
1522int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1523 struct device *dev)
1524{
6ff7bb0d 1525 struct generic_pm_domain_data *gpd_data;
4605ab65 1526 struct pm_domain_data *pdd;
1d5fcfec 1527 bool remove = false;
efa69025 1528 int ret = 0;
f721889f
RW
1529
1530 dev_dbg(dev, "%s()\n", __func__);
1531
efa69025
RW
1532 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1533 || IS_ERR_OR_NULL(dev->pm_domain)
1534 || pd_to_genpd(dev->pm_domain) != genpd)
f721889f
RW
1535 return -EINVAL;
1536
17b75eca 1537 genpd_acquire_lock(genpd);
f721889f 1538
596ba34b
RW
1539 if (genpd->prepared_count > 0) {
1540 ret = -EAGAIN;
1541 goto out;
1542 }
1543
6ff7bb0d
RW
1544 genpd->device_count--;
1545 genpd->max_off_time_changed = true;
1546
d79b6fe1 1547 if (genpd->detach_dev)
c16561e8 1548 genpd->detach_dev(genpd, dev);
d79b6fe1 1549
6ff7bb0d 1550 spin_lock_irq(&dev->power.lock);
1d5fcfec 1551
efa69025
RW
1552 dev->pm_domain = NULL;
1553 pdd = dev->power.subsys_data->domain_data;
1554 list_del_init(&pdd->list_node);
1d5fcfec
RW
1555 gpd_data = to_gpd_data(pdd);
1556 if (--gpd_data->refcount == 0) {
1557 dev->power.subsys_data->domain_data = NULL;
1558 remove = true;
1559 }
1560
6ff7bb0d 1561 spin_unlock_irq(&dev->power.lock);
f721889f 1562
6ff7bb0d
RW
1563 mutex_lock(&gpd_data->lock);
1564 pdd->dev = NULL;
1565 mutex_unlock(&gpd_data->lock);
1566
1567 genpd_release_lock(genpd);
1568
6ff7bb0d 1569 dev_pm_put_subsys_data(dev);
1d5fcfec
RW
1570 if (remove)
1571 __pm_genpd_free_dev_data(dev, gpd_data);
1572
6ff7bb0d 1573 return 0;
f721889f 1574
596ba34b 1575 out:
17b75eca 1576 genpd_release_lock(genpd);
f721889f
RW
1577
1578 return ret;
1579}
1580
ca1d72f0
RW
1581/**
1582 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1583 * @dev: Device to set/unset the flag for.
1584 * @val: The new value of the device's "need restore" flag.
1585 */
1586void pm_genpd_dev_need_restore(struct device *dev, bool val)
1587{
1588 struct pm_subsys_data *psd;
1589 unsigned long flags;
1590
1591 spin_lock_irqsave(&dev->power.lock, flags);
1592
1593 psd = dev_to_psd(dev);
1594 if (psd && psd->domain_data)
67732cd3 1595 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
ca1d72f0
RW
1596
1597 spin_unlock_irqrestore(&dev->power.lock, flags);
1598}
1599EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1600
f721889f
RW
1601/**
1602 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1603 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1604 * @subdomain: Subdomain to be added.
f721889f
RW
1605 */
1606int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1607 struct generic_pm_domain *subdomain)
f721889f 1608{
5063ce15 1609 struct gpd_link *link;
f721889f
RW
1610 int ret = 0;
1611
fb7268be
RW
1612 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1613 || genpd == subdomain)
f721889f
RW
1614 return -EINVAL;
1615
17b75eca
RW
1616 start:
1617 genpd_acquire_lock(genpd);
bc0403ff 1618 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1619
bc0403ff
RW
1620 if (subdomain->status != GPD_STATE_POWER_OFF
1621 && subdomain->status != GPD_STATE_ACTIVE) {
1622 mutex_unlock(&subdomain->lock);
17b75eca
RW
1623 genpd_release_lock(genpd);
1624 goto start;
1625 }
1626
1627 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1628 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1629 ret = -EINVAL;
1630 goto out;
1631 }
1632
4fcac10d 1633 list_for_each_entry(link, &genpd->master_links, master_node) {
bc0403ff 1634 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1635 ret = -EINVAL;
1636 goto out;
1637 }
1638 }
1639
5063ce15
RW
1640 link = kzalloc(sizeof(*link), GFP_KERNEL);
1641 if (!link) {
1642 ret = -ENOMEM;
1643 goto out;
1644 }
1645 link->master = genpd;
1646 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1647 link->slave = subdomain;
1648 list_add_tail(&link->slave_node, &subdomain->slave_links);
1649 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1650 genpd_sd_counter_inc(genpd);
f721889f 1651
f721889f 1652 out:
bc0403ff 1653 mutex_unlock(&subdomain->lock);
17b75eca 1654 genpd_release_lock(genpd);
f721889f
RW
1655
1656 return ret;
1657}
1658
fb7268be
RW
1659/**
1660 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1661 * @master_name: Name of the master PM domain to add the subdomain to.
1662 * @subdomain_name: Name of the subdomain to be added.
1663 */
1664int pm_genpd_add_subdomain_names(const char *master_name,
1665 const char *subdomain_name)
1666{
1667 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1668
1669 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1670 return -EINVAL;
1671
1672 mutex_lock(&gpd_list_lock);
1673 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1674 if (!master && !strcmp(gpd->name, master_name))
1675 master = gpd;
1676
1677 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1678 subdomain = gpd;
1679
1680 if (master && subdomain)
1681 break;
1682 }
1683 mutex_unlock(&gpd_list_lock);
1684
1685 return pm_genpd_add_subdomain(master, subdomain);
1686}
1687
f721889f
RW
1688/**
1689 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1690 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1691 * @subdomain: Subdomain to be removed.
f721889f
RW
1692 */
1693int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1694 struct generic_pm_domain *subdomain)
f721889f 1695{
5063ce15 1696 struct gpd_link *link;
f721889f
RW
1697 int ret = -EINVAL;
1698
5063ce15 1699 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1700 return -EINVAL;
1701
17b75eca
RW
1702 start:
1703 genpd_acquire_lock(genpd);
f721889f 1704
5063ce15
RW
1705 list_for_each_entry(link, &genpd->master_links, master_node) {
1706 if (link->slave != subdomain)
f721889f
RW
1707 continue;
1708
1709 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1710
17b75eca
RW
1711 if (subdomain->status != GPD_STATE_POWER_OFF
1712 && subdomain->status != GPD_STATE_ACTIVE) {
1713 mutex_unlock(&subdomain->lock);
1714 genpd_release_lock(genpd);
1715 goto start;
1716 }
1717
5063ce15
RW
1718 list_del(&link->master_node);
1719 list_del(&link->slave_node);
1720 kfree(link);
17b75eca 1721 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1722 genpd_sd_counter_dec(genpd);
1723
1724 mutex_unlock(&subdomain->lock);
1725
1726 ret = 0;
1727 break;
1728 }
1729
17b75eca 1730 genpd_release_lock(genpd);
f721889f
RW
1731
1732 return ret;
1733}
1734
40114447
RW
1735/**
1736 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1737 * @genpd: PM domain to be connected with cpuidle.
1738 * @state: cpuidle state this domain can disable/enable.
1739 *
1740 * Make a PM domain behave as though it contained a CPU core, that is, instead
1741 * of calling its power down routine it will enable the given cpuidle state so
1742 * that the cpuidle subsystem can power it down (if possible and desirable).
1743 */
1744int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
cbc9ef02
RW
1745{
1746 struct cpuidle_driver *cpuidle_drv;
f39cb179 1747 struct gpd_cpuidle_data *cpuidle_data;
cbc9ef02
RW
1748 struct cpuidle_state *idle_state;
1749 int ret = 0;
1750
1751 if (IS_ERR_OR_NULL(genpd) || state < 0)
1752 return -EINVAL;
1753
1754 genpd_acquire_lock(genpd);
1755
f39cb179 1756 if (genpd->cpuidle_data) {
cbc9ef02
RW
1757 ret = -EEXIST;
1758 goto out;
1759 }
f39cb179
UH
1760 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1761 if (!cpuidle_data) {
cbc9ef02
RW
1762 ret = -ENOMEM;
1763 goto out;
1764 }
1765 cpuidle_drv = cpuidle_driver_ref();
1766 if (!cpuidle_drv) {
1767 ret = -ENODEV;
debe081a 1768 goto err_drv;
cbc9ef02
RW
1769 }
1770 if (cpuidle_drv->state_count <= state) {
1771 ret = -EINVAL;
1772 goto err;
1773 }
1774 idle_state = &cpuidle_drv->states[state];
1775 if (!idle_state->disabled) {
1776 ret = -EAGAIN;
1777 goto err;
1778 }
f39cb179
UH
1779 cpuidle_data->idle_state = idle_state;
1780 cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1781 genpd->cpuidle_data = cpuidle_data;
cbc9ef02
RW
1782 genpd_recalc_cpu_exit_latency(genpd);
1783
1784 out:
1785 genpd_release_lock(genpd);
1786 return ret;
1787
1788 err:
1789 cpuidle_driver_unref();
debe081a 1790
1791 err_drv:
f39cb179 1792 kfree(cpuidle_data);
cbc9ef02
RW
1793 goto out;
1794}
1795
74a2799a
RW
1796/**
1797 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1798 * @name: Name of the domain to connect to cpuidle.
1799 * @state: cpuidle state this domain can manipulate.
1800 */
1801int pm_genpd_name_attach_cpuidle(const char *name, int state)
1802{
1803 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1804}
1805
40114447
RW
1806/**
1807 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1808 * @genpd: PM domain to remove the cpuidle connection from.
1809 *
1810 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1811 * given PM domain.
1812 */
1813int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
cbc9ef02 1814{
f39cb179 1815 struct gpd_cpuidle_data *cpuidle_data;
cbc9ef02
RW
1816 struct cpuidle_state *idle_state;
1817 int ret = 0;
1818
1819 if (IS_ERR_OR_NULL(genpd))
1820 return -EINVAL;
1821
1822 genpd_acquire_lock(genpd);
1823
f39cb179
UH
1824 cpuidle_data = genpd->cpuidle_data;
1825 if (!cpuidle_data) {
cbc9ef02
RW
1826 ret = -ENODEV;
1827 goto out;
1828 }
f39cb179 1829 idle_state = cpuidle_data->idle_state;
cbc9ef02
RW
1830 if (!idle_state->disabled) {
1831 ret = -EAGAIN;
1832 goto out;
1833 }
f39cb179 1834 idle_state->exit_latency = cpuidle_data->saved_exit_latency;
cbc9ef02 1835 cpuidle_driver_unref();
f39cb179
UH
1836 genpd->cpuidle_data = NULL;
1837 kfree(cpuidle_data);
cbc9ef02
RW
1838
1839 out:
1840 genpd_release_lock(genpd);
1841 return ret;
1842}
1843
74a2799a
RW
1844/**
1845 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1846 * @name: Name of the domain to disconnect cpuidle from.
1847 */
1848int pm_genpd_name_detach_cpuidle(const char *name)
1849{
1850 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1851}
1852
d23b9b00
RW
1853/* Default device callbacks for generic PM domains. */
1854
ecf00475 1855/**
12e10bb6 1856 * pm_genpd_default_save_state - Default "save device state" for PM domains.
ecf00475
RW
1857 * @dev: Device to handle.
1858 */
1859static int pm_genpd_default_save_state(struct device *dev)
1860{
1861 int (*cb)(struct device *__dev);
ecf00475 1862
0b589741
RW
1863 if (dev->type && dev->type->pm)
1864 cb = dev->type->pm->runtime_suspend;
1865 else if (dev->class && dev->class->pm)
1866 cb = dev->class->pm->runtime_suspend;
1867 else if (dev->bus && dev->bus->pm)
1868 cb = dev->bus->pm->runtime_suspend;
1869 else
1870 cb = NULL;
ecf00475 1871
0b589741
RW
1872 if (!cb && dev->driver && dev->driver->pm)
1873 cb = dev->driver->pm->runtime_suspend;
1874
1875 return cb ? cb(dev) : 0;
ecf00475
RW
1876}
1877
1878/**
12e10bb6 1879 * pm_genpd_default_restore_state - Default PM domains "restore device state".
ecf00475
RW
1880 * @dev: Device to handle.
1881 */
1882static int pm_genpd_default_restore_state(struct device *dev)
1883{
1884 int (*cb)(struct device *__dev);
ecf00475 1885
0b589741
RW
1886 if (dev->type && dev->type->pm)
1887 cb = dev->type->pm->runtime_resume;
1888 else if (dev->class && dev->class->pm)
1889 cb = dev->class->pm->runtime_resume;
1890 else if (dev->bus && dev->bus->pm)
1891 cb = dev->bus->pm->runtime_resume;
1892 else
1893 cb = NULL;
ecf00475 1894
0b589741
RW
1895 if (!cb && dev->driver && dev->driver->pm)
1896 cb = dev->driver->pm->runtime_resume;
1897
1898 return cb ? cb(dev) : 0;
ecf00475
RW
1899}
1900
f721889f
RW
1901/**
1902 * pm_genpd_init - Initialize a generic I/O PM domain object.
1903 * @genpd: PM domain object to initialize.
1904 * @gov: PM domain governor to associate with the domain (may be NULL).
1905 * @is_off: Initial value of the domain's power_is_off field.
1906 */
1907void pm_genpd_init(struct generic_pm_domain *genpd,
1908 struct dev_power_governor *gov, bool is_off)
1909{
1910 if (IS_ERR_OR_NULL(genpd))
1911 return;
1912
5063ce15
RW
1913 INIT_LIST_HEAD(&genpd->master_links);
1914 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1915 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1916 mutex_init(&genpd->lock);
1917 genpd->gov = gov;
1918 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1919 genpd->in_progress = 0;
c4bb3160 1920 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1921 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1922 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1923 genpd->poweroff_task = NULL;
1924 genpd->resume_count = 0;
596ba34b 1925 genpd->device_count = 0;
221e9b58 1926 genpd->max_off_time_ns = -1;
6ff7bb0d 1927 genpd->max_off_time_changed = true;
f721889f
RW
1928 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1929 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
596ba34b
RW
1930 genpd->domain.ops.prepare = pm_genpd_prepare;
1931 genpd->domain.ops.suspend = pm_genpd_suspend;
0496c8ae 1932 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
596ba34b
RW
1933 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1934 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
0496c8ae 1935 genpd->domain.ops.resume_early = pm_genpd_resume_early;
596ba34b
RW
1936 genpd->domain.ops.resume = pm_genpd_resume;
1937 genpd->domain.ops.freeze = pm_genpd_freeze;
0496c8ae 1938 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
596ba34b
RW
1939 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1940 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
0496c8ae 1941 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
596ba34b 1942 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00 1943 genpd->domain.ops.poweroff = pm_genpd_suspend;
0496c8ae 1944 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
d23b9b00 1945 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 1946 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
0496c8ae 1947 genpd->domain.ops.restore_early = pm_genpd_resume_early;
d23b9b00 1948 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 1949 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
1950 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1951 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
c11f6f5b
UH
1952
1953 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1954 genpd->dev_ops.stop = pm_clk_suspend;
1955 genpd->dev_ops.start = pm_clk_resume;
1956 }
1957
5125bbf3
RW
1958 mutex_lock(&gpd_list_lock);
1959 list_add(&genpd->gpd_list_node, &gpd_list);
1960 mutex_unlock(&gpd_list_lock);
1961}
aa42240a
TF
1962
1963#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1964/*
1965 * Device Tree based PM domain providers.
1966 *
1967 * The code below implements generic device tree based PM domain providers that
1968 * bind device tree nodes with generic PM domains registered in the system.
1969 *
1970 * Any driver that registers generic PM domains and needs to support binding of
1971 * devices to these domains is supposed to register a PM domain provider, which
1972 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1973 *
1974 * Two simple mapping functions have been provided for convenience:
1975 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1976 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1977 * index.
1978 */
1979
1980/**
1981 * struct of_genpd_provider - PM domain provider registration structure
1982 * @link: Entry in global list of PM domain providers
1983 * @node: Pointer to device tree node of PM domain provider
1984 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1985 * into a PM domain.
1986 * @data: context pointer to be passed into @xlate callback
1987 */
1988struct of_genpd_provider {
1989 struct list_head link;
1990 struct device_node *node;
1991 genpd_xlate_t xlate;
1992 void *data;
1993};
1994
1995/* List of registered PM domain providers. */
1996static LIST_HEAD(of_genpd_providers);
1997/* Mutex to protect the list above. */
1998static DEFINE_MUTEX(of_genpd_mutex);
1999
2000/**
2001 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
2002 * @genpdspec: OF phandle args to map into a PM domain
2003 * @data: xlate function private data - pointer to struct generic_pm_domain
2004 *
2005 * This is a generic xlate function that can be used to model PM domains that
2006 * have their own device tree nodes. The private data of xlate function needs
2007 * to be a valid pointer to struct generic_pm_domain.
2008 */
2009struct generic_pm_domain *__of_genpd_xlate_simple(
2010 struct of_phandle_args *genpdspec,
2011 void *data)
2012{
2013 if (genpdspec->args_count != 0)
2014 return ERR_PTR(-EINVAL);
2015 return data;
2016}
2017EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
2018
2019/**
2020 * __of_genpd_xlate_onecell() - Xlate function using a single index.
2021 * @genpdspec: OF phandle args to map into a PM domain
2022 * @data: xlate function private data - pointer to struct genpd_onecell_data
2023 *
2024 * This is a generic xlate function that can be used to model simple PM domain
2025 * controllers that have one device tree node and provide multiple PM domains.
2026 * A single cell is used as an index into an array of PM domains specified in
2027 * the genpd_onecell_data struct when registering the provider.
2028 */
2029struct generic_pm_domain *__of_genpd_xlate_onecell(
2030 struct of_phandle_args *genpdspec,
2031 void *data)
2032{
2033 struct genpd_onecell_data *genpd_data = data;
2034 unsigned int idx = genpdspec->args[0];
2035
2036 if (genpdspec->args_count != 1)
2037 return ERR_PTR(-EINVAL);
2038
2039 if (idx >= genpd_data->num_domains) {
2040 pr_err("%s: invalid domain index %u\n", __func__, idx);
2041 return ERR_PTR(-EINVAL);
2042 }
2043
2044 if (!genpd_data->domains[idx])
2045 return ERR_PTR(-ENOENT);
2046
2047 return genpd_data->domains[idx];
2048}
2049EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
2050
2051/**
2052 * __of_genpd_add_provider() - Register a PM domain provider for a node
2053 * @np: Device node pointer associated with the PM domain provider.
2054 * @xlate: Callback for decoding PM domain from phandle arguments.
2055 * @data: Context pointer for @xlate callback.
2056 */
2057int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2058 void *data)
2059{
2060 struct of_genpd_provider *cp;
2061
2062 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2063 if (!cp)
2064 return -ENOMEM;
2065
2066 cp->node = of_node_get(np);
2067 cp->data = data;
2068 cp->xlate = xlate;
2069
2070 mutex_lock(&of_genpd_mutex);
2071 list_add(&cp->link, &of_genpd_providers);
2072 mutex_unlock(&of_genpd_mutex);
2073 pr_debug("Added domain provider from %s\n", np->full_name);
2074
2075 return 0;
2076}
2077EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2078
2079/**
2080 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2081 * @np: Device node pointer associated with the PM domain provider
2082 */
2083void of_genpd_del_provider(struct device_node *np)
2084{
2085 struct of_genpd_provider *cp;
2086
2087 mutex_lock(&of_genpd_mutex);
2088 list_for_each_entry(cp, &of_genpd_providers, link) {
2089 if (cp->node == np) {
2090 list_del(&cp->link);
2091 of_node_put(cp->node);
2092 kfree(cp);
2093 break;
2094 }
2095 }
2096 mutex_unlock(&of_genpd_mutex);
2097}
2098EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2099
2100/**
2101 * of_genpd_get_from_provider() - Look-up PM domain
2102 * @genpdspec: OF phandle args to use for look-up
2103 *
2104 * Looks for a PM domain provider under the node specified by @genpdspec and if
2105 * found, uses xlate function of the provider to map phandle args to a PM
2106 * domain.
2107 *
2108 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2109 * on failure.
2110 */
2111static struct generic_pm_domain *of_genpd_get_from_provider(
2112 struct of_phandle_args *genpdspec)
2113{
2114 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2115 struct of_genpd_provider *provider;
2116
2117 mutex_lock(&of_genpd_mutex);
2118
2119 /* Check if we have such a provider in our array */
2120 list_for_each_entry(provider, &of_genpd_providers, link) {
2121 if (provider->node == genpdspec->np)
2122 genpd = provider->xlate(genpdspec, provider->data);
2123 if (!IS_ERR(genpd))
2124 break;
2125 }
2126
2127 mutex_unlock(&of_genpd_mutex);
2128
2129 return genpd;
2130}
2131
2132/**
2133 * genpd_dev_pm_detach - Detach a device from its PM domain.
2134 * @dev: Device to attach.
2135 * @power_off: Currently not used
2136 *
2137 * Try to locate a corresponding generic PM domain, which the device was
2138 * attached to previously. If such is found, the device is detached from it.
2139 */
2140static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2141{
2142 struct generic_pm_domain *pd = NULL, *gpd;
2143 int ret = 0;
2144
2145 if (!dev->pm_domain)
2146 return;
2147
2148 mutex_lock(&gpd_list_lock);
2149 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2150 if (&gpd->domain == dev->pm_domain) {
2151 pd = gpd;
2152 break;
2153 }
2154 }
2155 mutex_unlock(&gpd_list_lock);
2156
2157 if (!pd)
2158 return;
2159
2160 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2161
2162 while (1) {
2163 ret = pm_genpd_remove_device(pd, dev);
2164 if (ret != -EAGAIN)
2165 break;
2166 cond_resched();
2167 }
2168
2169 if (ret < 0) {
2170 dev_err(dev, "failed to remove from PM domain %s: %d",
2171 pd->name, ret);
2172 return;
2173 }
2174
2175 /* Check if PM domain can be powered off after removing this device. */
2176 genpd_queue_power_off_work(pd);
2177}
2178
2179/**
2180 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2181 * @dev: Device to attach.
2182 *
2183 * Parse device's OF node to find a PM domain specifier. If such is found,
2184 * attaches the device to retrieved pm_domain ops.
2185 *
2186 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2187 * backwards compatibility with existing DTBs.
2188 *
2189 * Returns 0 on successfully attached PM domain or negative error code.
2190 */
2191int genpd_dev_pm_attach(struct device *dev)
2192{
2193 struct of_phandle_args pd_args;
2194 struct generic_pm_domain *pd;
2195 int ret;
2196
2197 if (!dev->of_node)
2198 return -ENODEV;
2199
2200 if (dev->pm_domain)
2201 return -EEXIST;
2202
2203 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2204 "#power-domain-cells", 0, &pd_args);
2205 if (ret < 0) {
2206 if (ret != -ENOENT)
2207 return ret;
2208
2209 /*
2210 * Try legacy Samsung-specific bindings
2211 * (for backwards compatibility of DT ABI)
2212 */
2213 pd_args.args_count = 0;
2214 pd_args.np = of_parse_phandle(dev->of_node,
2215 "samsung,power-domain", 0);
2216 if (!pd_args.np)
2217 return -ENOENT;
2218 }
2219
2220 pd = of_genpd_get_from_provider(&pd_args);
2221 if (IS_ERR(pd)) {
2222 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2223 __func__, PTR_ERR(pd));
2224 of_node_put(dev->of_node);
2225 return PTR_ERR(pd);
2226 }
2227
2228 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2229
2230 while (1) {
2231 ret = pm_genpd_add_device(pd, dev);
2232 if (ret != -EAGAIN)
2233 break;
2234 cond_resched();
2235 }
2236
2237 if (ret < 0) {
2238 dev_err(dev, "failed to add to PM domain %s: %d",
2239 pd->name, ret);
2240 of_node_put(dev->of_node);
2241 return ret;
2242 }
2243
2244 dev->pm_domain->detach = genpd_dev_pm_detach;
2ed12769 2245 pm_genpd_poweron(pd);
aa42240a
TF
2246
2247 return 0;
2248}
2249EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2250#endif
2bd5306a
MM
2251
2252
2253/*** debugfs support ***/
2254
2255#ifdef CONFIG_PM_ADVANCED_DEBUG
2256#include <linux/pm.h>
2257#include <linux/device.h>
2258#include <linux/debugfs.h>
2259#include <linux/seq_file.h>
2260#include <linux/init.h>
2261#include <linux/kobject.h>
2262static struct dentry *pm_genpd_debugfs_dir;
2263
2264/*
2265 * TODO: This function is a slightly modified version of rtpm_status_show
2266 * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
2267 * are too loose to generalize it.
2268 */
2269#ifdef CONFIG_PM_RUNTIME
2270static void rtpm_status_str(struct seq_file *s, struct device *dev)
2271{
2272 static const char * const status_lookup[] = {
2273 [RPM_ACTIVE] = "active",
2274 [RPM_RESUMING] = "resuming",
2275 [RPM_SUSPENDED] = "suspended",
2276 [RPM_SUSPENDING] = "suspending"
2277 };
2278 const char *p = "";
2279
2280 if (dev->power.runtime_error)
2281 p = "error";
2282 else if (dev->power.disable_depth)
2283 p = "unsupported";
2284 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2285 p = status_lookup[dev->power.runtime_status];
2286 else
2287 WARN_ON(1);
2288
2289 seq_puts(s, p);
2290}
2291#else
2292static void rtpm_status_str(struct seq_file *s, struct device *dev)
2293{
2294 seq_puts(s, "active");
2295}
2296#endif
2297
2298static int pm_genpd_summary_one(struct seq_file *s,
2299 struct generic_pm_domain *gpd)
2300{
2301 static const char * const status_lookup[] = {
2302 [GPD_STATE_ACTIVE] = "on",
2303 [GPD_STATE_WAIT_MASTER] = "wait-master",
2304 [GPD_STATE_BUSY] = "busy",
2305 [GPD_STATE_REPEAT] = "off-in-progress",
2306 [GPD_STATE_POWER_OFF] = "off"
2307 };
2308 struct pm_domain_data *pm_data;
2309 const char *kobj_path;
2310 struct gpd_link *link;
2311 int ret;
2312
2313 ret = mutex_lock_interruptible(&gpd->lock);
2314 if (ret)
2315 return -ERESTARTSYS;
2316
2317 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
2318 goto exit;
2319 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
2320
2321 /*
2322 * Modifications on the list require holding locks on both
2323 * master and slave, so we are safe.
2324 * Also gpd->name is immutable.
2325 */
2326 list_for_each_entry(link, &gpd->master_links, master_node) {
2327 seq_printf(s, "%s", link->slave->name);
2328 if (!list_is_last(&link->master_node, &gpd->master_links))
2329 seq_puts(s, ", ");
2330 }
2331
2332 list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
2333 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2334 if (kobj_path == NULL)
2335 continue;
2336
2337 seq_printf(s, "\n %-50s ", kobj_path);
2338 rtpm_status_str(s, pm_data->dev);
2339 kfree(kobj_path);
2340 }
2341
2342 seq_puts(s, "\n");
2343exit:
2344 mutex_unlock(&gpd->lock);
2345
2346 return 0;
2347}
2348
2349static int pm_genpd_summary_show(struct seq_file *s, void *data)
2350{
2351 struct generic_pm_domain *gpd;
2352 int ret = 0;
2353
2354 seq_puts(s, " domain status slaves\n");
2355 seq_puts(s, " /device runtime status\n");
2356 seq_puts(s, "----------------------------------------------------------------------\n");
2357
2358 ret = mutex_lock_interruptible(&gpd_list_lock);
2359 if (ret)
2360 return -ERESTARTSYS;
2361
2362 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2363 ret = pm_genpd_summary_one(s, gpd);
2364 if (ret)
2365 break;
2366 }
2367 mutex_unlock(&gpd_list_lock);
2368
2369 return ret;
2370}
2371
2372static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2373{
2374 return single_open(file, pm_genpd_summary_show, NULL);
2375}
2376
2377static const struct file_operations pm_genpd_summary_fops = {
2378 .open = pm_genpd_summary_open,
2379 .read = seq_read,
2380 .llseek = seq_lseek,
2381 .release = single_release,
2382};
2383
2384static int __init pm_genpd_debug_init(void)
2385{
2386 struct dentry *d;
2387
2388 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2389
2390 if (!pm_genpd_debugfs_dir)
2391 return -ENOMEM;
2392
2393 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2394 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2395 if (!d)
2396 return -ENOMEM;
2397
2398 return 0;
2399}
2400late_initcall(pm_genpd_debug_init);
2401
2402static void __exit pm_genpd_debug_exit(void)
2403{
2404 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2405}
2406__exitcall(pm_genpd_debug_exit);
2407#endif /* CONFIG_PM_ADVANCED_DEBUG */