]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/soc/rockchip/pm_domains.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide
[mirror_ubuntu-bionic-kernel.git] / drivers / soc / rockchip / pm_domains.c
1 /*
2 * Rockchip Generic power domain support.
3 *
4 * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/err.h>
14 #include <linux/pm_clock.h>
15 #include <linux/pm_domain.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
18 #include <linux/clk.h>
19 #include <linux/regmap.h>
20 #include <linux/mfd/syscon.h>
21 #include <dt-bindings/power/rk3288-power.h>
22 #include <dt-bindings/power/rk3328-power.h>
23 #include <dt-bindings/power/rk3366-power.h>
24 #include <dt-bindings/power/rk3368-power.h>
25 #include <dt-bindings/power/rk3399-power.h>
26
27 struct rockchip_domain_info {
28 int pwr_mask;
29 int status_mask;
30 int req_mask;
31 int idle_mask;
32 int ack_mask;
33 bool active_wakeup;
34 int pwr_w_mask;
35 int req_w_mask;
36 };
37
38 struct rockchip_pmu_info {
39 u32 pwr_offset;
40 u32 status_offset;
41 u32 req_offset;
42 u32 idle_offset;
43 u32 ack_offset;
44
45 u32 core_pwrcnt_offset;
46 u32 gpu_pwrcnt_offset;
47
48 unsigned int core_power_transition_time;
49 unsigned int gpu_power_transition_time;
50
51 int num_domains;
52 const struct rockchip_domain_info *domain_info;
53 };
54
55 #define MAX_QOS_REGS_NUM 5
56 #define QOS_PRIORITY 0x08
57 #define QOS_MODE 0x0c
58 #define QOS_BANDWIDTH 0x10
59 #define QOS_SATURATION 0x14
60 #define QOS_EXTCONTROL 0x18
61
62 struct rockchip_pm_domain {
63 struct generic_pm_domain genpd;
64 const struct rockchip_domain_info *info;
65 struct rockchip_pmu *pmu;
66 int num_qos;
67 struct regmap **qos_regmap;
68 u32 *qos_save_regs[MAX_QOS_REGS_NUM];
69 int num_clks;
70 struct clk *clks[];
71 };
72
73 struct rockchip_pmu {
74 struct device *dev;
75 struct regmap *regmap;
76 const struct rockchip_pmu_info *info;
77 struct mutex mutex; /* mutex lock for pmu */
78 struct genpd_onecell_data genpd_data;
79 struct generic_pm_domain *domains[];
80 };
81
82 #define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
83
84 #define DOMAIN(pwr, status, req, idle, ack, wakeup) \
85 { \
86 .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
87 .status_mask = (status >= 0) ? BIT(status) : 0, \
88 .req_mask = (req >= 0) ? BIT(req) : 0, \
89 .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
90 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
91 .active_wakeup = wakeup, \
92 }
93
94 #define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
95 { \
96 .pwr_w_mask = (pwr >= 0) ? BIT(pwr + 16) : 0, \
97 .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
98 .status_mask = (status >= 0) ? BIT(status) : 0, \
99 .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
100 .req_mask = (req >= 0) ? BIT(req) : 0, \
101 .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
102 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
103 .active_wakeup = wakeup, \
104 }
105
106 #define DOMAIN_RK3288(pwr, status, req, wakeup) \
107 DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
108
109 #define DOMAIN_RK3328(pwr, status, req, wakeup) \
110 DOMAIN_M(pwr, pwr, req, (req) + 10, req, wakeup)
111
112 #define DOMAIN_RK3368(pwr, status, req, wakeup) \
113 DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
114
115 #define DOMAIN_RK3399(pwr, status, req, wakeup) \
116 DOMAIN(pwr, status, req, req, req, wakeup)
117
118 static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
119 {
120 struct rockchip_pmu *pmu = pd->pmu;
121 const struct rockchip_domain_info *pd_info = pd->info;
122 unsigned int val;
123
124 regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
125 return (val & pd_info->idle_mask) == pd_info->idle_mask;
126 }
127
128 static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
129 {
130 unsigned int val;
131
132 regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
133 return val;
134 }
135
136 static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
137 bool idle)
138 {
139 const struct rockchip_domain_info *pd_info = pd->info;
140 struct generic_pm_domain *genpd = &pd->genpd;
141 struct rockchip_pmu *pmu = pd->pmu;
142 unsigned int target_ack;
143 unsigned int val;
144 bool is_idle;
145 int ret;
146
147 if (pd_info->req_mask == 0)
148 return 0;
149 else if (pd_info->req_w_mask)
150 regmap_write(pmu->regmap, pmu->info->req_offset,
151 idle ? (pd_info->req_mask | pd_info->req_w_mask) :
152 pd_info->req_w_mask);
153 else
154 regmap_update_bits(pmu->regmap, pmu->info->req_offset,
155 pd_info->req_mask, idle ? -1U : 0);
156
157 dsb(sy);
158
159 /* Wait util idle_ack = 1 */
160 target_ack = idle ? pd_info->ack_mask : 0;
161 ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
162 (val & pd_info->ack_mask) == target_ack,
163 0, 10000);
164 if (ret) {
165 dev_err(pmu->dev,
166 "failed to get ack on domain '%s', val=0x%x\n",
167 genpd->name, val);
168 return ret;
169 }
170
171 ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
172 is_idle, is_idle == idle, 0, 10000);
173 if (ret) {
174 dev_err(pmu->dev,
175 "failed to set idle on domain '%s', val=%d\n",
176 genpd->name, is_idle);
177 return ret;
178 }
179
180 return 0;
181 }
182
183 static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
184 {
185 int i;
186
187 for (i = 0; i < pd->num_qos; i++) {
188 regmap_read(pd->qos_regmap[i],
189 QOS_PRIORITY,
190 &pd->qos_save_regs[0][i]);
191 regmap_read(pd->qos_regmap[i],
192 QOS_MODE,
193 &pd->qos_save_regs[1][i]);
194 regmap_read(pd->qos_regmap[i],
195 QOS_BANDWIDTH,
196 &pd->qos_save_regs[2][i]);
197 regmap_read(pd->qos_regmap[i],
198 QOS_SATURATION,
199 &pd->qos_save_regs[3][i]);
200 regmap_read(pd->qos_regmap[i],
201 QOS_EXTCONTROL,
202 &pd->qos_save_regs[4][i]);
203 }
204 return 0;
205 }
206
207 static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
208 {
209 int i;
210
211 for (i = 0; i < pd->num_qos; i++) {
212 regmap_write(pd->qos_regmap[i],
213 QOS_PRIORITY,
214 pd->qos_save_regs[0][i]);
215 regmap_write(pd->qos_regmap[i],
216 QOS_MODE,
217 pd->qos_save_regs[1][i]);
218 regmap_write(pd->qos_regmap[i],
219 QOS_BANDWIDTH,
220 pd->qos_save_regs[2][i]);
221 regmap_write(pd->qos_regmap[i],
222 QOS_SATURATION,
223 pd->qos_save_regs[3][i]);
224 regmap_write(pd->qos_regmap[i],
225 QOS_EXTCONTROL,
226 pd->qos_save_regs[4][i]);
227 }
228
229 return 0;
230 }
231
232 static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
233 {
234 struct rockchip_pmu *pmu = pd->pmu;
235 unsigned int val;
236
237 /* check idle status for idle-only domains */
238 if (pd->info->status_mask == 0)
239 return !rockchip_pmu_domain_is_idle(pd);
240
241 regmap_read(pmu->regmap, pmu->info->status_offset, &val);
242
243 /* 1'b0: power on, 1'b1: power off */
244 return !(val & pd->info->status_mask);
245 }
246
247 static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
248 bool on)
249 {
250 struct rockchip_pmu *pmu = pd->pmu;
251 struct generic_pm_domain *genpd = &pd->genpd;
252 bool is_on;
253
254 if (pd->info->pwr_mask == 0)
255 return;
256 else if (pd->info->pwr_w_mask)
257 regmap_write(pmu->regmap, pmu->info->pwr_offset,
258 on ? pd->info->pwr_mask :
259 (pd->info->pwr_mask | pd->info->pwr_w_mask));
260 else
261 regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
262 pd->info->pwr_mask, on ? 0 : -1U);
263
264 dsb(sy);
265
266 if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
267 is_on == on, 0, 10000)) {
268 dev_err(pmu->dev,
269 "failed to set domain '%s', val=%d\n",
270 genpd->name, is_on);
271 return;
272 }
273 }
274
275 static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
276 {
277 int i;
278
279 mutex_lock(&pd->pmu->mutex);
280
281 if (rockchip_pmu_domain_is_on(pd) != power_on) {
282 for (i = 0; i < pd->num_clks; i++)
283 clk_enable(pd->clks[i]);
284
285 if (!power_on) {
286 rockchip_pmu_save_qos(pd);
287
288 /* if powering down, idle request to NIU first */
289 rockchip_pmu_set_idle_request(pd, true);
290 }
291
292 rockchip_do_pmu_set_power_domain(pd, power_on);
293
294 if (power_on) {
295 /* if powering up, leave idle mode */
296 rockchip_pmu_set_idle_request(pd, false);
297
298 rockchip_pmu_restore_qos(pd);
299 }
300
301 for (i = pd->num_clks - 1; i >= 0; i--)
302 clk_disable(pd->clks[i]);
303 }
304
305 mutex_unlock(&pd->pmu->mutex);
306 return 0;
307 }
308
309 static int rockchip_pd_power_on(struct generic_pm_domain *domain)
310 {
311 struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
312
313 return rockchip_pd_power(pd, true);
314 }
315
316 static int rockchip_pd_power_off(struct generic_pm_domain *domain)
317 {
318 struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
319
320 return rockchip_pd_power(pd, false);
321 }
322
323 static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
324 struct device *dev)
325 {
326 struct clk *clk;
327 int i;
328 int error;
329
330 dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
331
332 error = pm_clk_create(dev);
333 if (error) {
334 dev_err(dev, "pm_clk_create failed %d\n", error);
335 return error;
336 }
337
338 i = 0;
339 while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
340 dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
341 error = pm_clk_add_clk(dev, clk);
342 if (error) {
343 dev_err(dev, "pm_clk_add_clk failed %d\n", error);
344 clk_put(clk);
345 pm_clk_destroy(dev);
346 return error;
347 }
348 }
349
350 return 0;
351 }
352
353 static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
354 struct device *dev)
355 {
356 dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
357
358 pm_clk_destroy(dev);
359 }
360
361 static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
362 struct device_node *node)
363 {
364 const struct rockchip_domain_info *pd_info;
365 struct rockchip_pm_domain *pd;
366 struct device_node *qos_node;
367 struct clk *clk;
368 int clk_cnt;
369 int i, j;
370 u32 id;
371 int error;
372
373 error = of_property_read_u32(node, "reg", &id);
374 if (error) {
375 dev_err(pmu->dev,
376 "%s: failed to retrieve domain id (reg): %d\n",
377 node->name, error);
378 return -EINVAL;
379 }
380
381 if (id >= pmu->info->num_domains) {
382 dev_err(pmu->dev, "%s: invalid domain id %d\n",
383 node->name, id);
384 return -EINVAL;
385 }
386
387 pd_info = &pmu->info->domain_info[id];
388 if (!pd_info) {
389 dev_err(pmu->dev, "%s: undefined domain id %d\n",
390 node->name, id);
391 return -EINVAL;
392 }
393
394 clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
395 pd = devm_kzalloc(pmu->dev,
396 sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
397 GFP_KERNEL);
398 if (!pd)
399 return -ENOMEM;
400
401 pd->info = pd_info;
402 pd->pmu = pmu;
403
404 for (i = 0; i < clk_cnt; i++) {
405 clk = of_clk_get(node, i);
406 if (IS_ERR(clk)) {
407 error = PTR_ERR(clk);
408 dev_err(pmu->dev,
409 "%s: failed to get clk at index %d: %d\n",
410 node->name, i, error);
411 goto err_out;
412 }
413
414 error = clk_prepare(clk);
415 if (error) {
416 dev_err(pmu->dev,
417 "%s: failed to prepare clk %pC (index %d): %d\n",
418 node->name, clk, i, error);
419 clk_put(clk);
420 goto err_out;
421 }
422
423 pd->clks[pd->num_clks++] = clk;
424
425 dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
426 clk, node->name);
427 }
428
429 pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
430 NULL);
431
432 if (pd->num_qos > 0) {
433 pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
434 sizeof(*pd->qos_regmap),
435 GFP_KERNEL);
436 if (!pd->qos_regmap) {
437 error = -ENOMEM;
438 goto err_out;
439 }
440
441 for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
442 pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
443 pd->num_qos,
444 sizeof(u32),
445 GFP_KERNEL);
446 if (!pd->qos_save_regs[j]) {
447 error = -ENOMEM;
448 goto err_out;
449 }
450 }
451
452 for (j = 0; j < pd->num_qos; j++) {
453 qos_node = of_parse_phandle(node, "pm_qos", j);
454 if (!qos_node) {
455 error = -ENODEV;
456 goto err_out;
457 }
458 pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
459 if (IS_ERR(pd->qos_regmap[j])) {
460 error = -ENODEV;
461 of_node_put(qos_node);
462 goto err_out;
463 }
464 of_node_put(qos_node);
465 }
466 }
467
468 error = rockchip_pd_power(pd, true);
469 if (error) {
470 dev_err(pmu->dev,
471 "failed to power on domain '%s': %d\n",
472 node->name, error);
473 goto err_out;
474 }
475
476 pd->genpd.name = node->name;
477 pd->genpd.power_off = rockchip_pd_power_off;
478 pd->genpd.power_on = rockchip_pd_power_on;
479 pd->genpd.attach_dev = rockchip_pd_attach_dev;
480 pd->genpd.detach_dev = rockchip_pd_detach_dev;
481 pd->genpd.flags = GENPD_FLAG_PM_CLK;
482 if (pd_info->active_wakeup)
483 pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
484 pm_genpd_init(&pd->genpd, NULL, false);
485
486 pmu->genpd_data.domains[id] = &pd->genpd;
487 return 0;
488
489 err_out:
490 while (--i >= 0) {
491 clk_unprepare(pd->clks[i]);
492 clk_put(pd->clks[i]);
493 }
494 return error;
495 }
496
497 static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
498 {
499 int i, ret;
500
501 /*
502 * We're in the error cleanup already, so we only complain,
503 * but won't emit another error on top of the original one.
504 */
505 ret = pm_genpd_remove(&pd->genpd);
506 if (ret < 0)
507 dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
508 pd->genpd.name, ret);
509
510 for (i = 0; i < pd->num_clks; i++) {
511 clk_unprepare(pd->clks[i]);
512 clk_put(pd->clks[i]);
513 }
514
515 /* protect the zeroing of pm->num_clks */
516 mutex_lock(&pd->pmu->mutex);
517 pd->num_clks = 0;
518 mutex_unlock(&pd->pmu->mutex);
519
520 /* devm will free our memory */
521 }
522
523 static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
524 {
525 struct generic_pm_domain *genpd;
526 struct rockchip_pm_domain *pd;
527 int i;
528
529 for (i = 0; i < pmu->genpd_data.num_domains; i++) {
530 genpd = pmu->genpd_data.domains[i];
531 if (genpd) {
532 pd = to_rockchip_pd(genpd);
533 rockchip_pm_remove_one_domain(pd);
534 }
535 }
536
537 /* devm will free our memory */
538 }
539
540 static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
541 u32 domain_reg_offset,
542 unsigned int count)
543 {
544 /* First configure domain power down transition count ... */
545 regmap_write(pmu->regmap, domain_reg_offset, count);
546 /* ... and then power up count. */
547 regmap_write(pmu->regmap, domain_reg_offset + 4, count);
548 }
549
550 static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
551 struct device_node *parent)
552 {
553 struct device_node *np;
554 struct generic_pm_domain *child_domain, *parent_domain;
555 int error;
556
557 for_each_child_of_node(parent, np) {
558 u32 idx;
559
560 error = of_property_read_u32(parent, "reg", &idx);
561 if (error) {
562 dev_err(pmu->dev,
563 "%s: failed to retrieve domain id (reg): %d\n",
564 parent->name, error);
565 goto err_out;
566 }
567 parent_domain = pmu->genpd_data.domains[idx];
568
569 error = rockchip_pm_add_one_domain(pmu, np);
570 if (error) {
571 dev_err(pmu->dev, "failed to handle node %s: %d\n",
572 np->name, error);
573 goto err_out;
574 }
575
576 error = of_property_read_u32(np, "reg", &idx);
577 if (error) {
578 dev_err(pmu->dev,
579 "%s: failed to retrieve domain id (reg): %d\n",
580 np->name, error);
581 goto err_out;
582 }
583 child_domain = pmu->genpd_data.domains[idx];
584
585 error = pm_genpd_add_subdomain(parent_domain, child_domain);
586 if (error) {
587 dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
588 parent_domain->name, child_domain->name, error);
589 goto err_out;
590 } else {
591 dev_dbg(pmu->dev, "%s add subdomain: %s\n",
592 parent_domain->name, child_domain->name);
593 }
594
595 rockchip_pm_add_subdomain(pmu, np);
596 }
597
598 return 0;
599
600 err_out:
601 of_node_put(np);
602 return error;
603 }
604
605 static int rockchip_pm_domain_probe(struct platform_device *pdev)
606 {
607 struct device *dev = &pdev->dev;
608 struct device_node *np = dev->of_node;
609 struct device_node *node;
610 struct device *parent;
611 struct rockchip_pmu *pmu;
612 const struct of_device_id *match;
613 const struct rockchip_pmu_info *pmu_info;
614 int error;
615
616 if (!np) {
617 dev_err(dev, "device tree node not found\n");
618 return -ENODEV;
619 }
620
621 match = of_match_device(dev->driver->of_match_table, dev);
622 if (!match || !match->data) {
623 dev_err(dev, "missing pmu data\n");
624 return -EINVAL;
625 }
626
627 pmu_info = match->data;
628
629 pmu = devm_kzalloc(dev,
630 sizeof(*pmu) +
631 pmu_info->num_domains * sizeof(pmu->domains[0]),
632 GFP_KERNEL);
633 if (!pmu)
634 return -ENOMEM;
635
636 pmu->dev = &pdev->dev;
637 mutex_init(&pmu->mutex);
638
639 pmu->info = pmu_info;
640
641 pmu->genpd_data.domains = pmu->domains;
642 pmu->genpd_data.num_domains = pmu_info->num_domains;
643
644 parent = dev->parent;
645 if (!parent) {
646 dev_err(dev, "no parent for syscon devices\n");
647 return -ENODEV;
648 }
649
650 pmu->regmap = syscon_node_to_regmap(parent->of_node);
651 if (IS_ERR(pmu->regmap)) {
652 dev_err(dev, "no regmap available\n");
653 return PTR_ERR(pmu->regmap);
654 }
655
656 /*
657 * Configure power up and down transition delays for CORE
658 * and GPU domains.
659 */
660 if (pmu_info->core_power_transition_time)
661 rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
662 pmu_info->core_power_transition_time);
663 if (pmu_info->gpu_pwrcnt_offset)
664 rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
665 pmu_info->gpu_power_transition_time);
666
667 error = -ENODEV;
668
669 for_each_available_child_of_node(np, node) {
670 error = rockchip_pm_add_one_domain(pmu, node);
671 if (error) {
672 dev_err(dev, "failed to handle node %s: %d\n",
673 node->name, error);
674 of_node_put(node);
675 goto err_out;
676 }
677
678 error = rockchip_pm_add_subdomain(pmu, node);
679 if (error < 0) {
680 dev_err(dev, "failed to handle subdomain node %s: %d\n",
681 node->name, error);
682 of_node_put(node);
683 goto err_out;
684 }
685 }
686
687 if (error) {
688 dev_dbg(dev, "no power domains defined\n");
689 goto err_out;
690 }
691
692 error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
693 if (error) {
694 dev_err(dev, "failed to add provider: %d\n", error);
695 goto err_out;
696 }
697
698 return 0;
699
700 err_out:
701 rockchip_pm_domain_cleanup(pmu);
702 return error;
703 }
704
705 static const struct rockchip_domain_info rk3288_pm_domains[] = {
706 [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
707 [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
708 [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
709 [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
710 };
711
712 static const struct rockchip_domain_info rk3328_pm_domains[] = {
713 [RK3328_PD_CORE] = DOMAIN_RK3328(-1, 0, 0, false),
714 [RK3328_PD_GPU] = DOMAIN_RK3328(-1, 1, 1, false),
715 [RK3328_PD_BUS] = DOMAIN_RK3328(-1, 2, 2, true),
716 [RK3328_PD_MSCH] = DOMAIN_RK3328(-1, 3, 3, true),
717 [RK3328_PD_PERI] = DOMAIN_RK3328(-1, 4, 4, true),
718 [RK3328_PD_VIDEO] = DOMAIN_RK3328(-1, 5, 5, false),
719 [RK3328_PD_HEVC] = DOMAIN_RK3328(-1, 6, 6, false),
720 [RK3328_PD_VIO] = DOMAIN_RK3328(-1, 8, 8, false),
721 [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false),
722 };
723
724 static const struct rockchip_domain_info rk3366_pm_domains[] = {
725 [RK3366_PD_PERI] = DOMAIN_RK3368(10, 10, 6, true),
726 [RK3366_PD_VIO] = DOMAIN_RK3368(14, 14, 8, false),
727 [RK3366_PD_VIDEO] = DOMAIN_RK3368(13, 13, 7, false),
728 [RK3366_PD_RKVDEC] = DOMAIN_RK3368(11, 11, 7, false),
729 [RK3366_PD_WIFIBT] = DOMAIN_RK3368(8, 8, 9, false),
730 [RK3366_PD_VPU] = DOMAIN_RK3368(12, 12, 7, false),
731 [RK3366_PD_GPU] = DOMAIN_RK3368(15, 15, 2, false),
732 };
733
734 static const struct rockchip_domain_info rk3368_pm_domains[] = {
735 [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
736 [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
737 [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
738 [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
739 [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
740 };
741
742 static const struct rockchip_domain_info rk3399_pm_domains[] = {
743 [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
744 [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
745 [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
746 [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
747 [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
748 [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
749 [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
750 [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
751 [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
752 [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
753 [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
754 [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
755 [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
756 [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
757 [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
758 [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
759 [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
760 [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
761 [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
762 [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
763 [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
764 [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
765 [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
766 [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
767 [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
768 [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
769 [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
770 };
771
772 static const struct rockchip_pmu_info rk3288_pmu = {
773 .pwr_offset = 0x08,
774 .status_offset = 0x0c,
775 .req_offset = 0x10,
776 .idle_offset = 0x14,
777 .ack_offset = 0x14,
778
779 .core_pwrcnt_offset = 0x34,
780 .gpu_pwrcnt_offset = 0x3c,
781
782 .core_power_transition_time = 24, /* 1us */
783 .gpu_power_transition_time = 24, /* 1us */
784
785 .num_domains = ARRAY_SIZE(rk3288_pm_domains),
786 .domain_info = rk3288_pm_domains,
787 };
788
789 static const struct rockchip_pmu_info rk3328_pmu = {
790 .req_offset = 0x414,
791 .idle_offset = 0x484,
792 .ack_offset = 0x484,
793
794 .num_domains = ARRAY_SIZE(rk3328_pm_domains),
795 .domain_info = rk3328_pm_domains,
796 };
797
798 static const struct rockchip_pmu_info rk3366_pmu = {
799 .pwr_offset = 0x0c,
800 .status_offset = 0x10,
801 .req_offset = 0x3c,
802 .idle_offset = 0x40,
803 .ack_offset = 0x40,
804
805 .core_pwrcnt_offset = 0x48,
806 .gpu_pwrcnt_offset = 0x50,
807
808 .core_power_transition_time = 24,
809 .gpu_power_transition_time = 24,
810
811 .num_domains = ARRAY_SIZE(rk3366_pm_domains),
812 .domain_info = rk3366_pm_domains,
813 };
814
815 static const struct rockchip_pmu_info rk3368_pmu = {
816 .pwr_offset = 0x0c,
817 .status_offset = 0x10,
818 .req_offset = 0x3c,
819 .idle_offset = 0x40,
820 .ack_offset = 0x40,
821
822 .core_pwrcnt_offset = 0x48,
823 .gpu_pwrcnt_offset = 0x50,
824
825 .core_power_transition_time = 24,
826 .gpu_power_transition_time = 24,
827
828 .num_domains = ARRAY_SIZE(rk3368_pm_domains),
829 .domain_info = rk3368_pm_domains,
830 };
831
832 static const struct rockchip_pmu_info rk3399_pmu = {
833 .pwr_offset = 0x14,
834 .status_offset = 0x18,
835 .req_offset = 0x60,
836 .idle_offset = 0x64,
837 .ack_offset = 0x68,
838
839 /* ARM Trusted Firmware manages power transition times */
840
841 .num_domains = ARRAY_SIZE(rk3399_pm_domains),
842 .domain_info = rk3399_pm_domains,
843 };
844
845 static const struct of_device_id rockchip_pm_domain_dt_match[] = {
846 {
847 .compatible = "rockchip,rk3288-power-controller",
848 .data = (void *)&rk3288_pmu,
849 },
850 {
851 .compatible = "rockchip,rk3328-power-controller",
852 .data = (void *)&rk3328_pmu,
853 },
854 {
855 .compatible = "rockchip,rk3366-power-controller",
856 .data = (void *)&rk3366_pmu,
857 },
858 {
859 .compatible = "rockchip,rk3368-power-controller",
860 .data = (void *)&rk3368_pmu,
861 },
862 {
863 .compatible = "rockchip,rk3399-power-controller",
864 .data = (void *)&rk3399_pmu,
865 },
866 { /* sentinel */ },
867 };
868
869 static struct platform_driver rockchip_pm_domain_driver = {
870 .probe = rockchip_pm_domain_probe,
871 .driver = {
872 .name = "rockchip-pm-domain",
873 .of_match_table = rockchip_pm_domain_dt_match,
874 /*
875 * We can't forcibly eject devices form power domain,
876 * so we can't really remove power domains once they
877 * were added.
878 */
879 .suppress_bind_attrs = true,
880 },
881 };
882
883 static int __init rockchip_pm_domain_drv_register(void)
884 {
885 return platform_driver_register(&rockchip_pm_domain_driver);
886 }
887 postcore_initcall(rockchip_pm_domain_drv_register);