]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/bus/ti-sysc.c
Merge branch 'i2c/for-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[mirror_ubuntu-jammy-kernel.git] / drivers / bus / ti-sysc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ti-sysc.c - Texas Instruments sysc interconnect target driver
4 */
5
6 #include <linux/io.h>
7 #include <linux/clk.h>
8 #include <linux/clkdev.h>
9 #include <linux/delay.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
18 #include <linux/slab.h>
19 #include <linux/sys_soc.h>
20 #include <linux/iopoll.h>
21
22 #include <linux/platform_data/ti-sysc.h>
23
24 #include <dt-bindings/bus/ti-sysc.h>
25
26 #define DIS_ISP BIT(2)
27 #define DIS_IVA BIT(1)
28 #define DIS_SGX BIT(0)
29
30 #define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
31
32 #define MAX_MODULE_SOFTRESET_WAIT 10000
33
34 enum sysc_soc {
35 SOC_UNKNOWN,
36 SOC_2420,
37 SOC_2430,
38 SOC_3430,
39 SOC_3630,
40 SOC_4430,
41 SOC_4460,
42 SOC_4470,
43 SOC_5430,
44 SOC_AM3,
45 SOC_AM4,
46 SOC_DRA7,
47 };
48
49 struct sysc_address {
50 unsigned long base;
51 struct list_head node;
52 };
53
54 struct sysc_soc_info {
55 unsigned long general_purpose:1;
56 enum sysc_soc soc;
57 struct mutex list_lock; /* disabled modules list lock */
58 struct list_head disabled_modules;
59 };
60
61 enum sysc_clocks {
62 SYSC_FCK,
63 SYSC_ICK,
64 SYSC_OPTFCK0,
65 SYSC_OPTFCK1,
66 SYSC_OPTFCK2,
67 SYSC_OPTFCK3,
68 SYSC_OPTFCK4,
69 SYSC_OPTFCK5,
70 SYSC_OPTFCK6,
71 SYSC_OPTFCK7,
72 SYSC_MAX_CLOCKS,
73 };
74
75 static struct sysc_soc_info *sysc_soc;
76 static const char * const reg_names[] = { "rev", "sysc", "syss", };
77 static const char * const clock_names[SYSC_MAX_CLOCKS] = {
78 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
79 "opt5", "opt6", "opt7",
80 };
81
82 #define SYSC_IDLEMODE_MASK 3
83 #define SYSC_CLOCKACTIVITY_MASK 3
84
85 /**
86 * struct sysc - TI sysc interconnect target module registers and capabilities
87 * @dev: struct device pointer
88 * @module_pa: physical address of the interconnect target module
89 * @module_size: size of the interconnect target module
90 * @module_va: virtual address of the interconnect target module
91 * @offsets: register offsets from module base
92 * @mdata: ti-sysc to hwmod translation data for a module
93 * @clocks: clocks used by the interconnect target module
94 * @clock_roles: clock role names for the found clocks
95 * @nr_clocks: number of clocks used by the interconnect target module
96 * @rsts: resets used by the interconnect target module
97 * @legacy_mode: configured for legacy mode if set
98 * @cap: interconnect target module capabilities
99 * @cfg: interconnect target module configuration
100 * @cookie: data used by legacy platform callbacks
101 * @name: name if available
102 * @revision: interconnect target module revision
103 * @enabled: sysc runtime enabled status
104 * @needs_resume: runtime resume needed on resume from suspend
105 * @child_needs_resume: runtime resume needed for child on resume from suspend
106 * @disable_on_idle: status flag used for disabling modules with resets
107 * @idle_work: work structure used to perform delayed idle on a module
108 * @pre_reset_quirk: module specific pre-reset quirk
109 * @post_reset_quirk: module specific post-reset quirk
110 * @reset_done_quirk: module specific reset done quirk
111 * @module_enable_quirk: module specific enable quirk
112 * @module_disable_quirk: module specific disable quirk
113 * @module_unlock_quirk: module specific sysconfig unlock quirk
114 * @module_lock_quirk: module specific sysconfig lock quirk
115 */
116 struct sysc {
117 struct device *dev;
118 u64 module_pa;
119 u32 module_size;
120 void __iomem *module_va;
121 int offsets[SYSC_MAX_REGS];
122 struct ti_sysc_module_data *mdata;
123 struct clk **clocks;
124 const char **clock_roles;
125 int nr_clocks;
126 struct reset_control *rsts;
127 const char *legacy_mode;
128 const struct sysc_capabilities *cap;
129 struct sysc_config cfg;
130 struct ti_sysc_cookie cookie;
131 const char *name;
132 u32 revision;
133 unsigned int enabled:1;
134 unsigned int needs_resume:1;
135 unsigned int child_needs_resume:1;
136 struct delayed_work idle_work;
137 void (*pre_reset_quirk)(struct sysc *sysc);
138 void (*post_reset_quirk)(struct sysc *sysc);
139 void (*reset_done_quirk)(struct sysc *sysc);
140 void (*module_enable_quirk)(struct sysc *sysc);
141 void (*module_disable_quirk)(struct sysc *sysc);
142 void (*module_unlock_quirk)(struct sysc *sysc);
143 void (*module_lock_quirk)(struct sysc *sysc);
144 };
145
146 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
147 bool is_child);
148
149 static void sysc_write(struct sysc *ddata, int offset, u32 value)
150 {
151 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
152 writew_relaxed(value & 0xffff, ddata->module_va + offset);
153
154 /* Only i2c revision has LO and HI register with stride of 4 */
155 if (ddata->offsets[SYSC_REVISION] >= 0 &&
156 offset == ddata->offsets[SYSC_REVISION]) {
157 u16 hi = value >> 16;
158
159 writew_relaxed(hi, ddata->module_va + offset + 4);
160 }
161
162 return;
163 }
164
165 writel_relaxed(value, ddata->module_va + offset);
166 }
167
168 static u32 sysc_read(struct sysc *ddata, int offset)
169 {
170 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
171 u32 val;
172
173 val = readw_relaxed(ddata->module_va + offset);
174
175 /* Only i2c revision has LO and HI register with stride of 4 */
176 if (ddata->offsets[SYSC_REVISION] >= 0 &&
177 offset == ddata->offsets[SYSC_REVISION]) {
178 u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
179
180 val |= tmp << 16;
181 }
182
183 return val;
184 }
185
186 return readl_relaxed(ddata->module_va + offset);
187 }
188
189 static bool sysc_opt_clks_needed(struct sysc *ddata)
190 {
191 return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
192 }
193
194 static u32 sysc_read_revision(struct sysc *ddata)
195 {
196 int offset = ddata->offsets[SYSC_REVISION];
197
198 if (offset < 0)
199 return 0;
200
201 return sysc_read(ddata, offset);
202 }
203
204 static u32 sysc_read_sysconfig(struct sysc *ddata)
205 {
206 int offset = ddata->offsets[SYSC_SYSCONFIG];
207
208 if (offset < 0)
209 return 0;
210
211 return sysc_read(ddata, offset);
212 }
213
214 static u32 sysc_read_sysstatus(struct sysc *ddata)
215 {
216 int offset = ddata->offsets[SYSC_SYSSTATUS];
217
218 if (offset < 0)
219 return 0;
220
221 return sysc_read(ddata, offset);
222 }
223
224 /* Poll on reset status */
225 static int sysc_wait_softreset(struct sysc *ddata)
226 {
227 u32 sysc_mask, syss_done, rstval;
228 int syss_offset, error = 0;
229
230 if (ddata->cap->regbits->srst_shift < 0)
231 return 0;
232
233 syss_offset = ddata->offsets[SYSC_SYSSTATUS];
234 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
235
236 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
237 syss_done = 0;
238 else
239 syss_done = ddata->cfg.syss_mask;
240
241 if (syss_offset >= 0) {
242 error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
243 rstval, (rstval & ddata->cfg.syss_mask) ==
244 syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
245
246 } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
247 error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
248 rstval, !(rstval & sysc_mask),
249 100, MAX_MODULE_SOFTRESET_WAIT);
250 }
251
252 return error;
253 }
254
255 static int sysc_add_named_clock_from_child(struct sysc *ddata,
256 const char *name,
257 const char *optfck_name)
258 {
259 struct device_node *np = ddata->dev->of_node;
260 struct device_node *child;
261 struct clk_lookup *cl;
262 struct clk *clock;
263 const char *n;
264
265 if (name)
266 n = name;
267 else
268 n = optfck_name;
269
270 /* Does the clock alias already exist? */
271 clock = of_clk_get_by_name(np, n);
272 if (!IS_ERR(clock)) {
273 clk_put(clock);
274
275 return 0;
276 }
277
278 child = of_get_next_available_child(np, NULL);
279 if (!child)
280 return -ENODEV;
281
282 clock = devm_get_clk_from_child(ddata->dev, child, name);
283 if (IS_ERR(clock))
284 return PTR_ERR(clock);
285
286 /*
287 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
288 * limit for clk_get(). If cl ever needs to be freed, it should be done
289 * with clkdev_drop().
290 */
291 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
292 if (!cl)
293 return -ENOMEM;
294
295 cl->con_id = n;
296 cl->dev_id = dev_name(ddata->dev);
297 cl->clk = clock;
298 clkdev_add(cl);
299
300 clk_put(clock);
301
302 return 0;
303 }
304
305 static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
306 {
307 const char *optfck_name;
308 int error, index;
309
310 if (ddata->nr_clocks < SYSC_OPTFCK0)
311 index = SYSC_OPTFCK0;
312 else
313 index = ddata->nr_clocks;
314
315 if (name)
316 optfck_name = name;
317 else
318 optfck_name = clock_names[index];
319
320 error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
321 if (error)
322 return error;
323
324 ddata->clock_roles[index] = optfck_name;
325 ddata->nr_clocks++;
326
327 return 0;
328 }
329
330 static int sysc_get_one_clock(struct sysc *ddata, const char *name)
331 {
332 int error, i, index = -ENODEV;
333
334 if (!strncmp(clock_names[SYSC_FCK], name, 3))
335 index = SYSC_FCK;
336 else if (!strncmp(clock_names[SYSC_ICK], name, 3))
337 index = SYSC_ICK;
338
339 if (index < 0) {
340 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
341 if (!ddata->clocks[i]) {
342 index = i;
343 break;
344 }
345 }
346 }
347
348 if (index < 0) {
349 dev_err(ddata->dev, "clock %s not added\n", name);
350 return index;
351 }
352
353 ddata->clocks[index] = devm_clk_get(ddata->dev, name);
354 if (IS_ERR(ddata->clocks[index])) {
355 dev_err(ddata->dev, "clock get error for %s: %li\n",
356 name, PTR_ERR(ddata->clocks[index]));
357
358 return PTR_ERR(ddata->clocks[index]);
359 }
360
361 error = clk_prepare(ddata->clocks[index]);
362 if (error) {
363 dev_err(ddata->dev, "clock prepare error for %s: %i\n",
364 name, error);
365
366 return error;
367 }
368
369 return 0;
370 }
371
372 static int sysc_get_clocks(struct sysc *ddata)
373 {
374 struct device_node *np = ddata->dev->of_node;
375 struct property *prop;
376 const char *name;
377 int nr_fck = 0, nr_ick = 0, i, error = 0;
378
379 ddata->clock_roles = devm_kcalloc(ddata->dev,
380 SYSC_MAX_CLOCKS,
381 sizeof(*ddata->clock_roles),
382 GFP_KERNEL);
383 if (!ddata->clock_roles)
384 return -ENOMEM;
385
386 of_property_for_each_string(np, "clock-names", prop, name) {
387 if (!strncmp(clock_names[SYSC_FCK], name, 3))
388 nr_fck++;
389 if (!strncmp(clock_names[SYSC_ICK], name, 3))
390 nr_ick++;
391 ddata->clock_roles[ddata->nr_clocks] = name;
392 ddata->nr_clocks++;
393 }
394
395 if (ddata->nr_clocks < 1)
396 return 0;
397
398 if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
399 error = sysc_init_ext_opt_clock(ddata, NULL);
400 if (error)
401 return error;
402 }
403
404 if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
405 dev_err(ddata->dev, "too many clocks for %pOF\n", np);
406
407 return -EINVAL;
408 }
409
410 if (nr_fck > 1 || nr_ick > 1) {
411 dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
412
413 return -EINVAL;
414 }
415
416 /* Always add a slot for main clocks fck and ick even if unused */
417 if (!nr_fck)
418 ddata->nr_clocks++;
419 if (!nr_ick)
420 ddata->nr_clocks++;
421
422 ddata->clocks = devm_kcalloc(ddata->dev,
423 ddata->nr_clocks, sizeof(*ddata->clocks),
424 GFP_KERNEL);
425 if (!ddata->clocks)
426 return -ENOMEM;
427
428 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
429 const char *name = ddata->clock_roles[i];
430
431 if (!name)
432 continue;
433
434 error = sysc_get_one_clock(ddata, name);
435 if (error)
436 return error;
437 }
438
439 return 0;
440 }
441
442 static int sysc_enable_main_clocks(struct sysc *ddata)
443 {
444 struct clk *clock;
445 int i, error;
446
447 if (!ddata->clocks)
448 return 0;
449
450 for (i = 0; i < SYSC_OPTFCK0; i++) {
451 clock = ddata->clocks[i];
452
453 /* Main clocks may not have ick */
454 if (IS_ERR_OR_NULL(clock))
455 continue;
456
457 error = clk_enable(clock);
458 if (error)
459 goto err_disable;
460 }
461
462 return 0;
463
464 err_disable:
465 for (i--; i >= 0; i--) {
466 clock = ddata->clocks[i];
467
468 /* Main clocks may not have ick */
469 if (IS_ERR_OR_NULL(clock))
470 continue;
471
472 clk_disable(clock);
473 }
474
475 return error;
476 }
477
478 static void sysc_disable_main_clocks(struct sysc *ddata)
479 {
480 struct clk *clock;
481 int i;
482
483 if (!ddata->clocks)
484 return;
485
486 for (i = 0; i < SYSC_OPTFCK0; i++) {
487 clock = ddata->clocks[i];
488 if (IS_ERR_OR_NULL(clock))
489 continue;
490
491 clk_disable(clock);
492 }
493 }
494
495 static int sysc_enable_opt_clocks(struct sysc *ddata)
496 {
497 struct clk *clock;
498 int i, error;
499
500 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
501 return 0;
502
503 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
504 clock = ddata->clocks[i];
505
506 /* Assume no holes for opt clocks */
507 if (IS_ERR_OR_NULL(clock))
508 return 0;
509
510 error = clk_enable(clock);
511 if (error)
512 goto err_disable;
513 }
514
515 return 0;
516
517 err_disable:
518 for (i--; i >= 0; i--) {
519 clock = ddata->clocks[i];
520 if (IS_ERR_OR_NULL(clock))
521 continue;
522
523 clk_disable(clock);
524 }
525
526 return error;
527 }
528
529 static void sysc_disable_opt_clocks(struct sysc *ddata)
530 {
531 struct clk *clock;
532 int i;
533
534 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
535 return;
536
537 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
538 clock = ddata->clocks[i];
539
540 /* Assume no holes for opt clocks */
541 if (IS_ERR_OR_NULL(clock))
542 return;
543
544 clk_disable(clock);
545 }
546 }
547
548 static void sysc_clkdm_deny_idle(struct sysc *ddata)
549 {
550 struct ti_sysc_platform_data *pdata;
551
552 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
553 return;
554
555 pdata = dev_get_platdata(ddata->dev);
556 if (pdata && pdata->clkdm_deny_idle)
557 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
558 }
559
560 static void sysc_clkdm_allow_idle(struct sysc *ddata)
561 {
562 struct ti_sysc_platform_data *pdata;
563
564 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
565 return;
566
567 pdata = dev_get_platdata(ddata->dev);
568 if (pdata && pdata->clkdm_allow_idle)
569 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
570 }
571
572 /**
573 * sysc_init_resets - init rstctrl reset line if configured
574 * @ddata: device driver data
575 *
576 * See sysc_rstctrl_reset_deassert().
577 */
578 static int sysc_init_resets(struct sysc *ddata)
579 {
580 ddata->rsts =
581 devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
582
583 return PTR_ERR_OR_ZERO(ddata->rsts);
584 }
585
586 /**
587 * sysc_parse_and_check_child_range - parses module IO region from ranges
588 * @ddata: device driver data
589 *
590 * In general we only need rev, syss, and sysc registers and not the whole
591 * module range. But we do want the offsets for these registers from the
592 * module base. This allows us to check them against the legacy hwmod
593 * platform data. Let's also check the ranges are configured properly.
594 */
595 static int sysc_parse_and_check_child_range(struct sysc *ddata)
596 {
597 struct device_node *np = ddata->dev->of_node;
598 const __be32 *ranges;
599 u32 nr_addr, nr_size;
600 int len, error;
601
602 ranges = of_get_property(np, "ranges", &len);
603 if (!ranges) {
604 dev_err(ddata->dev, "missing ranges for %pOF\n", np);
605
606 return -ENOENT;
607 }
608
609 len /= sizeof(*ranges);
610
611 if (len < 3) {
612 dev_err(ddata->dev, "incomplete ranges for %pOF\n", np);
613
614 return -EINVAL;
615 }
616
617 error = of_property_read_u32(np, "#address-cells", &nr_addr);
618 if (error)
619 return -ENOENT;
620
621 error = of_property_read_u32(np, "#size-cells", &nr_size);
622 if (error)
623 return -ENOENT;
624
625 if (nr_addr != 1 || nr_size != 1) {
626 dev_err(ddata->dev, "invalid ranges for %pOF\n", np);
627
628 return -EINVAL;
629 }
630
631 ranges++;
632 ddata->module_pa = of_translate_address(np, ranges++);
633 ddata->module_size = be32_to_cpup(ranges);
634
635 return 0;
636 }
637
638 /* Interconnect instances to probe before l4_per instances */
639 static struct resource early_bus_ranges[] = {
640 /* am3/4 l4_wkup */
641 { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
642 /* omap4/5 and dra7 l4_cfg */
643 { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
644 /* omap4 l4_wkup */
645 { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
646 /* omap5 and dra7 l4_wkup without dra7 dcan segment */
647 { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
648 };
649
650 static atomic_t sysc_defer = ATOMIC_INIT(10);
651
652 /**
653 * sysc_defer_non_critical - defer non_critical interconnect probing
654 * @ddata: device driver data
655 *
656 * We want to probe l4_cfg and l4_wkup interconnect instances before any
657 * l4_per instances as l4_per instances depend on resources on l4_cfg and
658 * l4_wkup interconnects.
659 */
660 static int sysc_defer_non_critical(struct sysc *ddata)
661 {
662 struct resource *res;
663 int i;
664
665 if (!atomic_read(&sysc_defer))
666 return 0;
667
668 for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
669 res = &early_bus_ranges[i];
670 if (ddata->module_pa >= res->start &&
671 ddata->module_pa <= res->end) {
672 atomic_set(&sysc_defer, 0);
673
674 return 0;
675 }
676 }
677
678 atomic_dec_if_positive(&sysc_defer);
679
680 return -EPROBE_DEFER;
681 }
682
683 static struct device_node *stdout_path;
684
685 static void sysc_init_stdout_path(struct sysc *ddata)
686 {
687 struct device_node *np = NULL;
688 const char *uart;
689
690 if (IS_ERR(stdout_path))
691 return;
692
693 if (stdout_path)
694 return;
695
696 np = of_find_node_by_path("/chosen");
697 if (!np)
698 goto err;
699
700 uart = of_get_property(np, "stdout-path", NULL);
701 if (!uart)
702 goto err;
703
704 np = of_find_node_by_path(uart);
705 if (!np)
706 goto err;
707
708 stdout_path = np;
709
710 return;
711
712 err:
713 stdout_path = ERR_PTR(-ENODEV);
714 }
715
716 static void sysc_check_quirk_stdout(struct sysc *ddata,
717 struct device_node *np)
718 {
719 sysc_init_stdout_path(ddata);
720 if (np != stdout_path)
721 return;
722
723 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
724 SYSC_QUIRK_NO_RESET_ON_INIT;
725 }
726
727 /**
728 * sysc_check_one_child - check child configuration
729 * @ddata: device driver data
730 * @np: child device node
731 *
732 * Let's avoid messy situations where we have new interconnect target
733 * node but children have "ti,hwmods". These belong to the interconnect
734 * target node and are managed by this driver.
735 */
736 static void sysc_check_one_child(struct sysc *ddata,
737 struct device_node *np)
738 {
739 const char *name;
740
741 name = of_get_property(np, "ti,hwmods", NULL);
742 if (name && !of_device_is_compatible(np, "ti,sysc"))
743 dev_warn(ddata->dev, "really a child ti,hwmods property?");
744
745 sysc_check_quirk_stdout(ddata, np);
746 sysc_parse_dts_quirks(ddata, np, true);
747 }
748
749 static void sysc_check_children(struct sysc *ddata)
750 {
751 struct device_node *child;
752
753 for_each_child_of_node(ddata->dev->of_node, child)
754 sysc_check_one_child(ddata, child);
755 }
756
757 /*
758 * So far only I2C uses 16-bit read access with clockactivity with revision
759 * in two registers with stride of 4. We can detect this based on the rev
760 * register size to configure things far enough to be able to properly read
761 * the revision register.
762 */
763 static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
764 {
765 if (resource_size(res) == 8)
766 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
767 }
768
769 /**
770 * sysc_parse_one - parses the interconnect target module registers
771 * @ddata: device driver data
772 * @reg: register to parse
773 */
774 static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
775 {
776 struct resource *res;
777 const char *name;
778
779 switch (reg) {
780 case SYSC_REVISION:
781 case SYSC_SYSCONFIG:
782 case SYSC_SYSSTATUS:
783 name = reg_names[reg];
784 break;
785 default:
786 return -EINVAL;
787 }
788
789 res = platform_get_resource_byname(to_platform_device(ddata->dev),
790 IORESOURCE_MEM, name);
791 if (!res) {
792 ddata->offsets[reg] = -ENODEV;
793
794 return 0;
795 }
796
797 ddata->offsets[reg] = res->start - ddata->module_pa;
798 if (reg == SYSC_REVISION)
799 sysc_check_quirk_16bit(ddata, res);
800
801 return 0;
802 }
803
804 static int sysc_parse_registers(struct sysc *ddata)
805 {
806 int i, error;
807
808 for (i = 0; i < SYSC_MAX_REGS; i++) {
809 error = sysc_parse_one(ddata, i);
810 if (error)
811 return error;
812 }
813
814 return 0;
815 }
816
817 /**
818 * sysc_check_registers - check for misconfigured register overlaps
819 * @ddata: device driver data
820 */
821 static int sysc_check_registers(struct sysc *ddata)
822 {
823 int i, j, nr_regs = 0, nr_matches = 0;
824
825 for (i = 0; i < SYSC_MAX_REGS; i++) {
826 if (ddata->offsets[i] < 0)
827 continue;
828
829 if (ddata->offsets[i] > (ddata->module_size - 4)) {
830 dev_err(ddata->dev, "register outside module range");
831
832 return -EINVAL;
833 }
834
835 for (j = 0; j < SYSC_MAX_REGS; j++) {
836 if (ddata->offsets[j] < 0)
837 continue;
838
839 if (ddata->offsets[i] == ddata->offsets[j])
840 nr_matches++;
841 }
842 nr_regs++;
843 }
844
845 if (nr_matches > nr_regs) {
846 dev_err(ddata->dev, "overlapping registers: (%i/%i)",
847 nr_regs, nr_matches);
848
849 return -EINVAL;
850 }
851
852 return 0;
853 }
854
855 /**
856 * syc_ioremap - ioremap register space for the interconnect target module
857 * @ddata: device driver data
858 *
859 * Note that the interconnect target module registers can be anywhere
860 * within the interconnect target module range. For example, SGX has
861 * them at offset 0x1fc00 in the 32MB module address space. And cpsw
862 * has them at offset 0x1200 in the CPSW_WR child. Usually the
863 * the interconnect target module registers are at the beginning of
864 * the module range though.
865 */
866 static int sysc_ioremap(struct sysc *ddata)
867 {
868 int size;
869
870 if (ddata->offsets[SYSC_REVISION] < 0 &&
871 ddata->offsets[SYSC_SYSCONFIG] < 0 &&
872 ddata->offsets[SYSC_SYSSTATUS] < 0) {
873 size = ddata->module_size;
874 } else {
875 size = max3(ddata->offsets[SYSC_REVISION],
876 ddata->offsets[SYSC_SYSCONFIG],
877 ddata->offsets[SYSC_SYSSTATUS]);
878
879 if (size < SZ_1K)
880 size = SZ_1K;
881
882 if ((size + sizeof(u32)) > ddata->module_size)
883 size = ddata->module_size;
884 }
885
886 ddata->module_va = devm_ioremap(ddata->dev,
887 ddata->module_pa,
888 size + sizeof(u32));
889 if (!ddata->module_va)
890 return -EIO;
891
892 return 0;
893 }
894
895 /**
896 * sysc_map_and_check_registers - ioremap and check device registers
897 * @ddata: device driver data
898 */
899 static int sysc_map_and_check_registers(struct sysc *ddata)
900 {
901 struct device_node *np = ddata->dev->of_node;
902 int error;
903
904 error = sysc_parse_and_check_child_range(ddata);
905 if (error)
906 return error;
907
908 error = sysc_defer_non_critical(ddata);
909 if (error)
910 return error;
911
912 sysc_check_children(ddata);
913
914 if (!of_get_property(np, "reg", NULL))
915 return 0;
916
917 error = sysc_parse_registers(ddata);
918 if (error)
919 return error;
920
921 error = sysc_ioremap(ddata);
922 if (error)
923 return error;
924
925 error = sysc_check_registers(ddata);
926 if (error)
927 return error;
928
929 return 0;
930 }
931
932 /**
933 * sysc_show_rev - read and show interconnect target module revision
934 * @bufp: buffer to print the information to
935 * @ddata: device driver data
936 */
937 static int sysc_show_rev(char *bufp, struct sysc *ddata)
938 {
939 int len;
940
941 if (ddata->offsets[SYSC_REVISION] < 0)
942 return sprintf(bufp, ":NA");
943
944 len = sprintf(bufp, ":%08x", ddata->revision);
945
946 return len;
947 }
948
949 static int sysc_show_reg(struct sysc *ddata,
950 char *bufp, enum sysc_registers reg)
951 {
952 if (ddata->offsets[reg] < 0)
953 return sprintf(bufp, ":NA");
954
955 return sprintf(bufp, ":%x", ddata->offsets[reg]);
956 }
957
958 static int sysc_show_name(char *bufp, struct sysc *ddata)
959 {
960 if (!ddata->name)
961 return 0;
962
963 return sprintf(bufp, ":%s", ddata->name);
964 }
965
966 /**
967 * sysc_show_registers - show information about interconnect target module
968 * @ddata: device driver data
969 */
970 static void sysc_show_registers(struct sysc *ddata)
971 {
972 char buf[128];
973 char *bufp = buf;
974 int i;
975
976 for (i = 0; i < SYSC_MAX_REGS; i++)
977 bufp += sysc_show_reg(ddata, bufp, i);
978
979 bufp += sysc_show_rev(bufp, ddata);
980 bufp += sysc_show_name(bufp, ddata);
981
982 dev_dbg(ddata->dev, "%llx:%x%s\n",
983 ddata->module_pa, ddata->module_size,
984 buf);
985 }
986
987 /**
988 * sysc_write_sysconfig - handle sysconfig quirks for register write
989 * @ddata: device driver data
990 * @value: register value
991 */
992 static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
993 {
994 if (ddata->module_unlock_quirk)
995 ddata->module_unlock_quirk(ddata);
996
997 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
998
999 if (ddata->module_lock_quirk)
1000 ddata->module_lock_quirk(ddata);
1001 }
1002
1003 #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
1004 #define SYSC_CLOCACT_ICK 2
1005
1006 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1007 static int sysc_enable_module(struct device *dev)
1008 {
1009 struct sysc *ddata;
1010 const struct sysc_regbits *regbits;
1011 u32 reg, idlemodes, best_mode;
1012 int error;
1013
1014 ddata = dev_get_drvdata(dev);
1015
1016 /*
1017 * Some modules like DSS reset automatically on idle. Enable optional
1018 * reset clocks and wait for OCP softreset to complete.
1019 */
1020 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
1021 error = sysc_enable_opt_clocks(ddata);
1022 if (error) {
1023 dev_err(ddata->dev,
1024 "Optional clocks failed for enable: %i\n",
1025 error);
1026 return error;
1027 }
1028 }
1029 /*
1030 * Some modules like i2c and hdq1w have unusable reset status unless
1031 * the module reset quirk is enabled. Skip status check on enable.
1032 */
1033 if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
1034 error = sysc_wait_softreset(ddata);
1035 if (error)
1036 dev_warn(ddata->dev, "OCP softreset timed out\n");
1037 }
1038 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1039 sysc_disable_opt_clocks(ddata);
1040
1041 /*
1042 * Some subsystem private interconnects, like DSS top level module,
1043 * need only the automatic OCP softreset handling with no sysconfig
1044 * register bits to configure.
1045 */
1046 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1047 return 0;
1048
1049 regbits = ddata->cap->regbits;
1050 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1051
1052 /*
1053 * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1054 * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1055 * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1056 */
1057 if (regbits->clkact_shift >= 0 &&
1058 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1059 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1060
1061 /* Set SIDLE mode */
1062 idlemodes = ddata->cfg.sidlemodes;
1063 if (!idlemodes || regbits->sidle_shift < 0)
1064 goto set_midle;
1065
1066 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
1067 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
1068 best_mode = SYSC_IDLE_NO;
1069 } else {
1070 best_mode = fls(ddata->cfg.sidlemodes) - 1;
1071 if (best_mode > SYSC_IDLE_MASK) {
1072 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1073 return -EINVAL;
1074 }
1075
1076 /* Set WAKEUP */
1077 if (regbits->enwkup_shift >= 0 &&
1078 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1079 reg |= BIT(regbits->enwkup_shift);
1080 }
1081
1082 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1083 reg |= best_mode << regbits->sidle_shift;
1084 sysc_write_sysconfig(ddata, reg);
1085
1086 set_midle:
1087 /* Set MIDLE mode */
1088 idlemodes = ddata->cfg.midlemodes;
1089 if (!idlemodes || regbits->midle_shift < 0)
1090 goto set_autoidle;
1091
1092 best_mode = fls(ddata->cfg.midlemodes) - 1;
1093 if (best_mode > SYSC_IDLE_MASK) {
1094 dev_err(dev, "%s: invalid midlemode\n", __func__);
1095 return -EINVAL;
1096 }
1097
1098 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
1099 best_mode = SYSC_IDLE_NO;
1100
1101 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1102 reg |= best_mode << regbits->midle_shift;
1103 sysc_write_sysconfig(ddata, reg);
1104
1105 set_autoidle:
1106 /* Autoidle bit must enabled separately if available */
1107 if (regbits->autoidle_shift >= 0 &&
1108 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
1109 reg |= 1 << regbits->autoidle_shift;
1110 sysc_write_sysconfig(ddata, reg);
1111 }
1112
1113 /* Flush posted write */
1114 sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1115
1116 if (ddata->module_enable_quirk)
1117 ddata->module_enable_quirk(ddata);
1118
1119 return 0;
1120 }
1121
1122 static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
1123 {
1124 if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
1125 *best_mode = SYSC_IDLE_SMART_WKUP;
1126 else if (idlemodes & BIT(SYSC_IDLE_SMART))
1127 *best_mode = SYSC_IDLE_SMART;
1128 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
1129 *best_mode = SYSC_IDLE_FORCE;
1130 else
1131 return -EINVAL;
1132
1133 return 0;
1134 }
1135
1136 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1137 static int sysc_disable_module(struct device *dev)
1138 {
1139 struct sysc *ddata;
1140 const struct sysc_regbits *regbits;
1141 u32 reg, idlemodes, best_mode;
1142 int ret;
1143
1144 ddata = dev_get_drvdata(dev);
1145 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1146 return 0;
1147
1148 if (ddata->module_disable_quirk)
1149 ddata->module_disable_quirk(ddata);
1150
1151 regbits = ddata->cap->regbits;
1152 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1153
1154 /* Set MIDLE mode */
1155 idlemodes = ddata->cfg.midlemodes;
1156 if (!idlemodes || regbits->midle_shift < 0)
1157 goto set_sidle;
1158
1159 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1160 if (ret) {
1161 dev_err(dev, "%s: invalid midlemode\n", __func__);
1162 return ret;
1163 }
1164
1165 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
1166 ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
1167 best_mode = SYSC_IDLE_FORCE;
1168
1169 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1170 reg |= best_mode << regbits->midle_shift;
1171 sysc_write_sysconfig(ddata, reg);
1172
1173 set_sidle:
1174 /* Set SIDLE mode */
1175 idlemodes = ddata->cfg.sidlemodes;
1176 if (!idlemodes || regbits->sidle_shift < 0)
1177 return 0;
1178
1179 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
1180 best_mode = SYSC_IDLE_FORCE;
1181 } else {
1182 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1183 if (ret) {
1184 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1185 return ret;
1186 }
1187 }
1188
1189 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1190 reg |= best_mode << regbits->sidle_shift;
1191 if (regbits->autoidle_shift >= 0 &&
1192 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
1193 reg |= 1 << regbits->autoidle_shift;
1194 sysc_write_sysconfig(ddata, reg);
1195
1196 /* Flush posted write */
1197 sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1198
1199 return 0;
1200 }
1201
1202 static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
1203 struct sysc *ddata)
1204 {
1205 struct ti_sysc_platform_data *pdata;
1206 int error;
1207
1208 pdata = dev_get_platdata(ddata->dev);
1209 if (!pdata)
1210 return 0;
1211
1212 if (!pdata->idle_module)
1213 return -ENODEV;
1214
1215 error = pdata->idle_module(dev, &ddata->cookie);
1216 if (error)
1217 dev_err(dev, "%s: could not idle: %i\n",
1218 __func__, error);
1219
1220 reset_control_assert(ddata->rsts);
1221
1222 return 0;
1223 }
1224
1225 static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
1226 struct sysc *ddata)
1227 {
1228 struct ti_sysc_platform_data *pdata;
1229 int error;
1230
1231 pdata = dev_get_platdata(ddata->dev);
1232 if (!pdata)
1233 return 0;
1234
1235 if (!pdata->enable_module)
1236 return -ENODEV;
1237
1238 error = pdata->enable_module(dev, &ddata->cookie);
1239 if (error)
1240 dev_err(dev, "%s: could not enable: %i\n",
1241 __func__, error);
1242
1243 reset_control_deassert(ddata->rsts);
1244
1245 return 0;
1246 }
1247
1248 static int __maybe_unused sysc_runtime_suspend(struct device *dev)
1249 {
1250 struct sysc *ddata;
1251 int error = 0;
1252
1253 ddata = dev_get_drvdata(dev);
1254
1255 if (!ddata->enabled)
1256 return 0;
1257
1258 sysc_clkdm_deny_idle(ddata);
1259
1260 if (ddata->legacy_mode) {
1261 error = sysc_runtime_suspend_legacy(dev, ddata);
1262 if (error)
1263 goto err_allow_idle;
1264 } else {
1265 error = sysc_disable_module(dev);
1266 if (error)
1267 goto err_allow_idle;
1268 }
1269
1270 sysc_disable_main_clocks(ddata);
1271
1272 if (sysc_opt_clks_needed(ddata))
1273 sysc_disable_opt_clocks(ddata);
1274
1275 ddata->enabled = false;
1276
1277 err_allow_idle:
1278 sysc_clkdm_allow_idle(ddata);
1279
1280 reset_control_assert(ddata->rsts);
1281
1282 return error;
1283 }
1284
1285 static int __maybe_unused sysc_runtime_resume(struct device *dev)
1286 {
1287 struct sysc *ddata;
1288 int error = 0;
1289
1290 ddata = dev_get_drvdata(dev);
1291
1292 if (ddata->enabled)
1293 return 0;
1294
1295
1296 sysc_clkdm_deny_idle(ddata);
1297
1298 if (sysc_opt_clks_needed(ddata)) {
1299 error = sysc_enable_opt_clocks(ddata);
1300 if (error)
1301 goto err_allow_idle;
1302 }
1303
1304 error = sysc_enable_main_clocks(ddata);
1305 if (error)
1306 goto err_opt_clocks;
1307
1308 reset_control_deassert(ddata->rsts);
1309
1310 if (ddata->legacy_mode) {
1311 error = sysc_runtime_resume_legacy(dev, ddata);
1312 if (error)
1313 goto err_main_clocks;
1314 } else {
1315 error = sysc_enable_module(dev);
1316 if (error)
1317 goto err_main_clocks;
1318 }
1319
1320 ddata->enabled = true;
1321
1322 sysc_clkdm_allow_idle(ddata);
1323
1324 return 0;
1325
1326 err_main_clocks:
1327 sysc_disable_main_clocks(ddata);
1328 err_opt_clocks:
1329 if (sysc_opt_clks_needed(ddata))
1330 sysc_disable_opt_clocks(ddata);
1331 err_allow_idle:
1332 sysc_clkdm_allow_idle(ddata);
1333
1334 return error;
1335 }
1336
1337 static int __maybe_unused sysc_noirq_suspend(struct device *dev)
1338 {
1339 struct sysc *ddata;
1340
1341 ddata = dev_get_drvdata(dev);
1342
1343 if (ddata->cfg.quirks &
1344 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1345 return 0;
1346
1347 return pm_runtime_force_suspend(dev);
1348 }
1349
1350 static int __maybe_unused sysc_noirq_resume(struct device *dev)
1351 {
1352 struct sysc *ddata;
1353
1354 ddata = dev_get_drvdata(dev);
1355
1356 if (ddata->cfg.quirks &
1357 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1358 return 0;
1359
1360 return pm_runtime_force_resume(dev);
1361 }
1362
1363 static const struct dev_pm_ops sysc_pm_ops = {
1364 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
1365 SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
1366 sysc_runtime_resume,
1367 NULL)
1368 };
1369
1370 /* Module revision register based quirks */
1371 struct sysc_revision_quirk {
1372 const char *name;
1373 u32 base;
1374 int rev_offset;
1375 int sysc_offset;
1376 int syss_offset;
1377 u32 revision;
1378 u32 revision_mask;
1379 u32 quirks;
1380 };
1381
1382 #define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
1383 optrev_val, optrevmask, optquirkmask) \
1384 { \
1385 .name = (optname), \
1386 .base = (optbase), \
1387 .rev_offset = (optrev), \
1388 .sysc_offset = (optsysc), \
1389 .syss_offset = (optsyss), \
1390 .revision = (optrev_val), \
1391 .revision_mask = (optrevmask), \
1392 .quirks = (optquirkmask), \
1393 }
1394
1395 static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1396 /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
1397 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
1398 SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
1399 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
1400 SYSC_QUIRK_LEGACY_IDLE),
1401 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
1402 SYSC_QUIRK_LEGACY_IDLE),
1403 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
1404 SYSC_QUIRK_LEGACY_IDLE),
1405 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
1406 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1407 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
1408 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1409 /* Uarts on omap4 and later */
1410 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1411 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1412 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
1413 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1414
1415 /* Quirks that need to be set based on the module address */
1416 SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
1417 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
1418 SYSC_QUIRK_SWSUP_SIDLE),
1419
1420 /* Quirks that need to be set based on detected module */
1421 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
1422 SYSC_MODULE_QUIRK_AESS),
1423 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1424 SYSC_QUIRK_CLKDM_NOAUTO),
1425 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
1426 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1427 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
1428 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1429 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
1430 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1431 SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1432 SYSC_QUIRK_CLKDM_NOAUTO),
1433 SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1434 SYSC_QUIRK_CLKDM_NOAUTO),
1435 SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
1436 SYSC_QUIRK_GPMC_DEBUG),
1437 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
1438 SYSC_QUIRK_OPT_CLKS_NEEDED),
1439 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1440 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1441 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1442 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1443 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1444 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1445 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1446 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1447 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1448 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1449 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1450 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1451 SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
1452 SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
1453 SYSC_MODULE_QUIRK_SGX),
1454 SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
1455 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1456 SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
1457 SYSC_MODULE_QUIRK_RTC_UNLOCK),
1458 SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
1459 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1460 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
1461 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1462 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
1463 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1464 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
1465 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1466 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1467 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1468 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1469 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1470 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1471 SYSC_MODULE_QUIRK_WDT),
1472 /* PRUSS on am3, am4 and am5 */
1473 SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
1474 SYSC_MODULE_QUIRK_PRUSS),
1475 /* Watchdog on am3 and am4 */
1476 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1477 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
1478
1479 #ifdef DEBUG
1480 SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
1481 SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
1482 SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
1483 SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1484 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1485 0xffff00f0, 0),
1486 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
1487 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
1488 SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1489 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1490 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
1491 SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
1492 SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1493 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1494 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1495 SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1496 SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
1497 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1498 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1499 SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1500 SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
1501 SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
1502 SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
1503 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
1504 SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
1505 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
1506 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
1507 SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
1508 SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1509 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
1510 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
1511 SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
1512 SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
1513 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
1514 SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
1515 SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
1516 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
1517 SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
1518 SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
1519 SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
1520 SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1521 SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1522 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
1523 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
1524 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
1525 SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1526 SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1527 SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, 0),
1528 SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1529 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
1530 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
1531 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
1532 SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
1533 SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
1534 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
1535 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
1536 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
1537 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
1538 SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
1539 SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
1540 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
1541 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
1542 /* Some timers on omap4 and later */
1543 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
1544 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
1545 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
1546 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
1547 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
1548 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
1549 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
1550 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
1551 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
1552 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
1553 #endif
1554 };
1555
1556 /*
1557 * Early quirks based on module base and register offsets only that are
1558 * needed before the module revision can be read
1559 */
1560 static void sysc_init_early_quirks(struct sysc *ddata)
1561 {
1562 const struct sysc_revision_quirk *q;
1563 int i;
1564
1565 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1566 q = &sysc_revision_quirks[i];
1567
1568 if (!q->base)
1569 continue;
1570
1571 if (q->base != ddata->module_pa)
1572 continue;
1573
1574 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1575 continue;
1576
1577 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1578 continue;
1579
1580 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1581 continue;
1582
1583 ddata->name = q->name;
1584 ddata->cfg.quirks |= q->quirks;
1585 }
1586 }
1587
1588 /* Quirks that also consider the revision register value */
1589 static void sysc_init_revision_quirks(struct sysc *ddata)
1590 {
1591 const struct sysc_revision_quirk *q;
1592 int i;
1593
1594 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1595 q = &sysc_revision_quirks[i];
1596
1597 if (q->base && q->base != ddata->module_pa)
1598 continue;
1599
1600 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1601 continue;
1602
1603 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1604 continue;
1605
1606 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1607 continue;
1608
1609 if (q->revision == ddata->revision ||
1610 (q->revision & q->revision_mask) ==
1611 (ddata->revision & q->revision_mask)) {
1612 ddata->name = q->name;
1613 ddata->cfg.quirks |= q->quirks;
1614 }
1615 }
1616 }
1617
1618 /*
1619 * DSS needs dispc outputs disabled to reset modules. Returns mask of
1620 * enabled DSS interrupts. Eventually we may be able to do this on
1621 * dispc init rather than top-level DSS init.
1622 */
1623 static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1624 bool disable)
1625 {
1626 bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
1627 const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
1628 int manager_count;
1629 bool framedonetv_irq = true;
1630 u32 val, irq_mask = 0;
1631
1632 switch (sysc_soc->soc) {
1633 case SOC_2420 ... SOC_3630:
1634 manager_count = 2;
1635 framedonetv_irq = false;
1636 break;
1637 case SOC_4430 ... SOC_4470:
1638 manager_count = 3;
1639 break;
1640 case SOC_5430:
1641 case SOC_DRA7:
1642 manager_count = 4;
1643 break;
1644 case SOC_AM4:
1645 manager_count = 1;
1646 framedonetv_irq = false;
1647 break;
1648 case SOC_UNKNOWN:
1649 default:
1650 return 0;
1651 }
1652
1653 /* Remap the whole module range to be able to reset dispc outputs */
1654 devm_iounmap(ddata->dev, ddata->module_va);
1655 ddata->module_va = devm_ioremap(ddata->dev,
1656 ddata->module_pa,
1657 ddata->module_size);
1658 if (!ddata->module_va)
1659 return -EIO;
1660
1661 /* DISP_CONTROL */
1662 val = sysc_read(ddata, dispc_offset + 0x40);
1663 lcd_en = val & lcd_en_mask;
1664 digit_en = val & digit_en_mask;
1665 if (lcd_en)
1666 irq_mask |= BIT(0); /* FRAMEDONE */
1667 if (digit_en) {
1668 if (framedonetv_irq)
1669 irq_mask |= BIT(24); /* FRAMEDONETV */
1670 else
1671 irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
1672 }
1673 if (disable & (lcd_en | digit_en))
1674 sysc_write(ddata, dispc_offset + 0x40,
1675 val & ~(lcd_en_mask | digit_en_mask));
1676
1677 if (manager_count <= 2)
1678 return irq_mask;
1679
1680 /* DISPC_CONTROL2 */
1681 val = sysc_read(ddata, dispc_offset + 0x238);
1682 lcd2_en = val & lcd_en_mask;
1683 if (lcd2_en)
1684 irq_mask |= BIT(22); /* FRAMEDONE2 */
1685 if (disable && lcd2_en)
1686 sysc_write(ddata, dispc_offset + 0x238,
1687 val & ~lcd_en_mask);
1688
1689 if (manager_count <= 3)
1690 return irq_mask;
1691
1692 /* DISPC_CONTROL3 */
1693 val = sysc_read(ddata, dispc_offset + 0x848);
1694 lcd3_en = val & lcd_en_mask;
1695 if (lcd3_en)
1696 irq_mask |= BIT(30); /* FRAMEDONE3 */
1697 if (disable && lcd3_en)
1698 sysc_write(ddata, dispc_offset + 0x848,
1699 val & ~lcd_en_mask);
1700
1701 return irq_mask;
1702 }
1703
1704 /* DSS needs child outputs disabled and SDI registers cleared for reset */
1705 static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
1706 {
1707 const int dispc_offset = 0x1000;
1708 int error;
1709 u32 irq_mask, val;
1710
1711 /* Get enabled outputs */
1712 irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
1713 if (!irq_mask)
1714 return;
1715
1716 /* Clear IRQSTATUS */
1717 sysc_write(ddata, dispc_offset + 0x18, irq_mask);
1718
1719 /* Disable outputs */
1720 val = sysc_quirk_dispc(ddata, dispc_offset, true);
1721
1722 /* Poll IRQSTATUS */
1723 error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
1724 val, val != irq_mask, 100, 50);
1725 if (error)
1726 dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
1727 __func__, val, irq_mask);
1728
1729 if (sysc_soc->soc == SOC_3430) {
1730 /* Clear DSS_SDI_CONTROL */
1731 sysc_write(ddata, 0x44, 0);
1732
1733 /* Clear DSS_PLL_CONTROL */
1734 sysc_write(ddata, 0x48, 0);
1735 }
1736
1737 /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
1738 sysc_write(ddata, 0x40, 0);
1739 }
1740
1741 /* 1-wire needs module's internal clocks enabled for reset */
1742 static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1743 {
1744 int offset = 0x0c; /* HDQ_CTRL_STATUS */
1745 u16 val;
1746
1747 val = sysc_read(ddata, offset);
1748 val |= BIT(5);
1749 sysc_write(ddata, offset, val);
1750 }
1751
1752 /* AESS (Audio Engine SubSystem) needs autogating set after enable */
1753 static void sysc_module_enable_quirk_aess(struct sysc *ddata)
1754 {
1755 int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
1756
1757 sysc_write(ddata, offset, 1);
1758 }
1759
1760 /* I2C needs to be disabled for reset */
1761 static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
1762 {
1763 int offset;
1764 u16 val;
1765
1766 /* I2C_CON, omap2/3 is different from omap4 and later */
1767 if ((ddata->revision & 0xffffff00) == 0x001f0000)
1768 offset = 0x24;
1769 else
1770 offset = 0xa4;
1771
1772 /* I2C_EN */
1773 val = sysc_read(ddata, offset);
1774 if (enable)
1775 val |= BIT(15);
1776 else
1777 val &= ~BIT(15);
1778 sysc_write(ddata, offset, val);
1779 }
1780
1781 static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
1782 {
1783 sysc_clk_quirk_i2c(ddata, false);
1784 }
1785
1786 static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
1787 {
1788 sysc_clk_quirk_i2c(ddata, true);
1789 }
1790
1791 /* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
1792 static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
1793 {
1794 u32 val, kick0_val = 0, kick1_val = 0;
1795 unsigned long flags;
1796 int error;
1797
1798 if (!lock) {
1799 kick0_val = 0x83e70b13;
1800 kick1_val = 0x95a4f1e0;
1801 }
1802
1803 local_irq_save(flags);
1804 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
1805 error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
1806 !(val & BIT(0)), 100, 50);
1807 if (error)
1808 dev_warn(ddata->dev, "rtc busy timeout\n");
1809 /* Now we have ~15 microseconds to read/write various registers */
1810 sysc_write(ddata, 0x6c, kick0_val);
1811 sysc_write(ddata, 0x70, kick1_val);
1812 local_irq_restore(flags);
1813 }
1814
1815 static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
1816 {
1817 sysc_quirk_rtc(ddata, false);
1818 }
1819
1820 static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
1821 {
1822 sysc_quirk_rtc(ddata, true);
1823 }
1824
1825 /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
1826 static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
1827 {
1828 int offset = 0xff08; /* OCP_DEBUG_CONFIG */
1829 u32 val = BIT(31); /* THALIA_INT_BYPASS */
1830
1831 sysc_write(ddata, offset, val);
1832 }
1833
1834 /* Watchdog timer needs a disable sequence after reset */
1835 static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
1836 {
1837 int wps, spr, error;
1838 u32 val;
1839
1840 wps = 0x34;
1841 spr = 0x48;
1842
1843 sysc_write(ddata, spr, 0xaaaa);
1844 error = readl_poll_timeout(ddata->module_va + wps, val,
1845 !(val & 0x10), 100,
1846 MAX_MODULE_SOFTRESET_WAIT);
1847 if (error)
1848 dev_warn(ddata->dev, "wdt disable step1 failed\n");
1849
1850 sysc_write(ddata, spr, 0x5555);
1851 error = readl_poll_timeout(ddata->module_va + wps, val,
1852 !(val & 0x10), 100,
1853 MAX_MODULE_SOFTRESET_WAIT);
1854 if (error)
1855 dev_warn(ddata->dev, "wdt disable step2 failed\n");
1856 }
1857
1858 /* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
1859 static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
1860 {
1861 u32 reg;
1862
1863 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1864 reg |= SYSC_PRUSS_STANDBY_INIT;
1865 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
1866 }
1867
1868 static void sysc_init_module_quirks(struct sysc *ddata)
1869 {
1870 if (ddata->legacy_mode || !ddata->name)
1871 return;
1872
1873 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
1874 ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
1875
1876 return;
1877 }
1878
1879 #ifdef CONFIG_OMAP_GPMC_DEBUG
1880 if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
1881 ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
1882
1883 return;
1884 }
1885 #endif
1886
1887 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
1888 ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
1889 ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
1890
1891 return;
1892 }
1893
1894 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
1895 ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
1896
1897 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
1898 ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
1899
1900 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
1901 ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
1902 ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
1903
1904 return;
1905 }
1906
1907 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
1908 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
1909
1910 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
1911 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
1912 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
1913 }
1914
1915 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
1916 ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
1917 }
1918
1919 static int sysc_clockdomain_init(struct sysc *ddata)
1920 {
1921 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
1922 struct clk *fck = NULL, *ick = NULL;
1923 int error;
1924
1925 if (!pdata || !pdata->init_clockdomain)
1926 return 0;
1927
1928 switch (ddata->nr_clocks) {
1929 case 2:
1930 ick = ddata->clocks[SYSC_ICK];
1931 fallthrough;
1932 case 1:
1933 fck = ddata->clocks[SYSC_FCK];
1934 break;
1935 case 0:
1936 return 0;
1937 }
1938
1939 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
1940 if (!error || error == -ENODEV)
1941 return 0;
1942
1943 return error;
1944 }
1945
1946 /*
1947 * Note that pdata->init_module() typically does a reset first. After
1948 * pdata->init_module() is done, PM runtime can be used for the interconnect
1949 * target module.
1950 */
1951 static int sysc_legacy_init(struct sysc *ddata)
1952 {
1953 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
1954 int error;
1955
1956 if (!pdata || !pdata->init_module)
1957 return 0;
1958
1959 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
1960 if (error == -EEXIST)
1961 error = 0;
1962
1963 return error;
1964 }
1965
1966 /*
1967 * Note that the caller must ensure the interconnect target module is enabled
1968 * before calling reset. Otherwise reset will not complete.
1969 */
1970 static int sysc_reset(struct sysc *ddata)
1971 {
1972 int sysc_offset, sysc_val, error;
1973 u32 sysc_mask;
1974
1975 sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
1976
1977 if (ddata->legacy_mode ||
1978 ddata->cap->regbits->srst_shift < 0 ||
1979 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
1980 return 0;
1981
1982 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
1983
1984 if (ddata->pre_reset_quirk)
1985 ddata->pre_reset_quirk(ddata);
1986
1987 if (sysc_offset >= 0) {
1988 sysc_val = sysc_read_sysconfig(ddata);
1989 sysc_val |= sysc_mask;
1990 sysc_write(ddata, sysc_offset, sysc_val);
1991 }
1992
1993 if (ddata->cfg.srst_udelay)
1994 usleep_range(ddata->cfg.srst_udelay,
1995 ddata->cfg.srst_udelay * 2);
1996
1997 if (ddata->post_reset_quirk)
1998 ddata->post_reset_quirk(ddata);
1999
2000 error = sysc_wait_softreset(ddata);
2001 if (error)
2002 dev_warn(ddata->dev, "OCP softreset timed out\n");
2003
2004 if (ddata->reset_done_quirk)
2005 ddata->reset_done_quirk(ddata);
2006
2007 return error;
2008 }
2009
2010 /*
2011 * At this point the module is configured enough to read the revision but
2012 * module may not be completely configured yet to use PM runtime. Enable
2013 * all clocks directly during init to configure the quirks needed for PM
2014 * runtime based on the revision register.
2015 */
2016 static int sysc_init_module(struct sysc *ddata)
2017 {
2018 bool rstctrl_deasserted = false;
2019 int error = 0;
2020
2021 error = sysc_clockdomain_init(ddata);
2022 if (error)
2023 return error;
2024
2025 sysc_clkdm_deny_idle(ddata);
2026
2027 /*
2028 * Always enable clocks. The bootloader may or may not have enabled
2029 * the related clocks.
2030 */
2031 error = sysc_enable_opt_clocks(ddata);
2032 if (error)
2033 return error;
2034
2035 error = sysc_enable_main_clocks(ddata);
2036 if (error)
2037 goto err_opt_clocks;
2038
2039 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
2040 error = reset_control_deassert(ddata->rsts);
2041 if (error)
2042 goto err_main_clocks;
2043 rstctrl_deasserted = true;
2044 }
2045
2046 ddata->revision = sysc_read_revision(ddata);
2047 sysc_init_revision_quirks(ddata);
2048 sysc_init_module_quirks(ddata);
2049
2050 if (ddata->legacy_mode) {
2051 error = sysc_legacy_init(ddata);
2052 if (error)
2053 goto err_main_clocks;
2054 }
2055
2056 if (!ddata->legacy_mode) {
2057 error = sysc_enable_module(ddata->dev);
2058 if (error)
2059 goto err_main_clocks;
2060 }
2061
2062 error = sysc_reset(ddata);
2063 if (error)
2064 dev_err(ddata->dev, "Reset failed with %d\n", error);
2065
2066 if (error && !ddata->legacy_mode)
2067 sysc_disable_module(ddata->dev);
2068
2069 err_main_clocks:
2070 if (error)
2071 sysc_disable_main_clocks(ddata);
2072 err_opt_clocks:
2073 /* No re-enable of clockdomain autoidle to prevent module autoidle */
2074 if (error) {
2075 sysc_disable_opt_clocks(ddata);
2076 sysc_clkdm_allow_idle(ddata);
2077 }
2078
2079 if (error && rstctrl_deasserted &&
2080 !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
2081 reset_control_assert(ddata->rsts);
2082
2083 return error;
2084 }
2085
2086 static int sysc_init_sysc_mask(struct sysc *ddata)
2087 {
2088 struct device_node *np = ddata->dev->of_node;
2089 int error;
2090 u32 val;
2091
2092 error = of_property_read_u32(np, "ti,sysc-mask", &val);
2093 if (error)
2094 return 0;
2095
2096 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
2097
2098 return 0;
2099 }
2100
2101 static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
2102 const char *name)
2103 {
2104 struct device_node *np = ddata->dev->of_node;
2105 struct property *prop;
2106 const __be32 *p;
2107 u32 val;
2108
2109 of_property_for_each_u32(np, name, prop, p, val) {
2110 if (val >= SYSC_NR_IDLEMODES) {
2111 dev_err(ddata->dev, "invalid idlemode: %i\n", val);
2112 return -EINVAL;
2113 }
2114 *idlemodes |= (1 << val);
2115 }
2116
2117 return 0;
2118 }
2119
2120 static int sysc_init_idlemodes(struct sysc *ddata)
2121 {
2122 int error;
2123
2124 error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
2125 "ti,sysc-midle");
2126 if (error)
2127 return error;
2128
2129 error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
2130 "ti,sysc-sidle");
2131 if (error)
2132 return error;
2133
2134 return 0;
2135 }
2136
2137 /*
2138 * Only some devices on omap4 and later have SYSCONFIG reset done
2139 * bit. We can detect this if there is no SYSSTATUS at all, or the
2140 * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
2141 * have multiple bits for the child devices like OHCI and EHCI.
2142 * Depends on SYSC being parsed first.
2143 */
2144 static int sysc_init_syss_mask(struct sysc *ddata)
2145 {
2146 struct device_node *np = ddata->dev->of_node;
2147 int error;
2148 u32 val;
2149
2150 error = of_property_read_u32(np, "ti,syss-mask", &val);
2151 if (error) {
2152 if ((ddata->cap->type == TI_SYSC_OMAP4 ||
2153 ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
2154 (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2155 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2156
2157 return 0;
2158 }
2159
2160 if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2161 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2162
2163 ddata->cfg.syss_mask = val;
2164
2165 return 0;
2166 }
2167
2168 /*
2169 * Many child device drivers need to have fck and opt clocks available
2170 * to get the clock rate for device internal configuration etc.
2171 */
2172 static int sysc_child_add_named_clock(struct sysc *ddata,
2173 struct device *child,
2174 const char *name)
2175 {
2176 struct clk *clk;
2177 struct clk_lookup *l;
2178 int error = 0;
2179
2180 if (!name)
2181 return 0;
2182
2183 clk = clk_get(child, name);
2184 if (!IS_ERR(clk)) {
2185 error = -EEXIST;
2186 goto put_clk;
2187 }
2188
2189 clk = clk_get(ddata->dev, name);
2190 if (IS_ERR(clk))
2191 return -ENODEV;
2192
2193 l = clkdev_create(clk, name, dev_name(child));
2194 if (!l)
2195 error = -ENOMEM;
2196 put_clk:
2197 clk_put(clk);
2198
2199 return error;
2200 }
2201
2202 static int sysc_child_add_clocks(struct sysc *ddata,
2203 struct device *child)
2204 {
2205 int i, error;
2206
2207 for (i = 0; i < ddata->nr_clocks; i++) {
2208 error = sysc_child_add_named_clock(ddata,
2209 child,
2210 ddata->clock_roles[i]);
2211 if (error && error != -EEXIST) {
2212 dev_err(ddata->dev, "could not add child clock %s: %i\n",
2213 ddata->clock_roles[i], error);
2214
2215 return error;
2216 }
2217 }
2218
2219 return 0;
2220 }
2221
2222 static struct device_type sysc_device_type = {
2223 };
2224
2225 static struct sysc *sysc_child_to_parent(struct device *dev)
2226 {
2227 struct device *parent = dev->parent;
2228
2229 if (!parent || parent->type != &sysc_device_type)
2230 return NULL;
2231
2232 return dev_get_drvdata(parent);
2233 }
2234
2235 static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
2236 {
2237 struct sysc *ddata;
2238 int error;
2239
2240 ddata = sysc_child_to_parent(dev);
2241
2242 error = pm_generic_runtime_suspend(dev);
2243 if (error)
2244 return error;
2245
2246 if (!ddata->enabled)
2247 return 0;
2248
2249 return sysc_runtime_suspend(ddata->dev);
2250 }
2251
2252 static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
2253 {
2254 struct sysc *ddata;
2255 int error;
2256
2257 ddata = sysc_child_to_parent(dev);
2258
2259 if (!ddata->enabled) {
2260 error = sysc_runtime_resume(ddata->dev);
2261 if (error < 0)
2262 dev_err(ddata->dev,
2263 "%s error: %i\n", __func__, error);
2264 }
2265
2266 return pm_generic_runtime_resume(dev);
2267 }
2268
2269 #ifdef CONFIG_PM_SLEEP
2270 static int sysc_child_suspend_noirq(struct device *dev)
2271 {
2272 struct sysc *ddata;
2273 int error;
2274
2275 ddata = sysc_child_to_parent(dev);
2276
2277 dev_dbg(ddata->dev, "%s %s\n", __func__,
2278 ddata->name ? ddata->name : "");
2279
2280 error = pm_generic_suspend_noirq(dev);
2281 if (error) {
2282 dev_err(dev, "%s error at %i: %i\n",
2283 __func__, __LINE__, error);
2284
2285 return error;
2286 }
2287
2288 if (!pm_runtime_status_suspended(dev)) {
2289 error = pm_generic_runtime_suspend(dev);
2290 if (error) {
2291 dev_dbg(dev, "%s busy at %i: %i\n",
2292 __func__, __LINE__, error);
2293
2294 return 0;
2295 }
2296
2297 error = sysc_runtime_suspend(ddata->dev);
2298 if (error) {
2299 dev_err(dev, "%s error at %i: %i\n",
2300 __func__, __LINE__, error);
2301
2302 return error;
2303 }
2304
2305 ddata->child_needs_resume = true;
2306 }
2307
2308 return 0;
2309 }
2310
2311 static int sysc_child_resume_noirq(struct device *dev)
2312 {
2313 struct sysc *ddata;
2314 int error;
2315
2316 ddata = sysc_child_to_parent(dev);
2317
2318 dev_dbg(ddata->dev, "%s %s\n", __func__,
2319 ddata->name ? ddata->name : "");
2320
2321 if (ddata->child_needs_resume) {
2322 ddata->child_needs_resume = false;
2323
2324 error = sysc_runtime_resume(ddata->dev);
2325 if (error)
2326 dev_err(ddata->dev,
2327 "%s runtime resume error: %i\n",
2328 __func__, error);
2329
2330 error = pm_generic_runtime_resume(dev);
2331 if (error)
2332 dev_err(ddata->dev,
2333 "%s generic runtime resume: %i\n",
2334 __func__, error);
2335 }
2336
2337 return pm_generic_resume_noirq(dev);
2338 }
2339 #endif
2340
2341 static struct dev_pm_domain sysc_child_pm_domain = {
2342 .ops = {
2343 SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
2344 sysc_child_runtime_resume,
2345 NULL)
2346 USE_PLATFORM_PM_SLEEP_OPS
2347 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
2348 sysc_child_resume_noirq)
2349 }
2350 };
2351
2352 /**
2353 * sysc_legacy_idle_quirk - handle children in omap_device compatible way
2354 * @ddata: device driver data
2355 * @child: child device driver
2356 *
2357 * Allow idle for child devices as done with _od_runtime_suspend().
2358 * Otherwise many child devices will not idle because of the permanent
2359 * parent usecount set in pm_runtime_irq_safe().
2360 *
2361 * Note that the long term solution is to just modify the child device
2362 * drivers to not set pm_runtime_irq_safe() and then this can be just
2363 * dropped.
2364 */
2365 static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
2366 {
2367 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
2368 dev_pm_domain_set(child, &sysc_child_pm_domain);
2369 }
2370
2371 static int sysc_notifier_call(struct notifier_block *nb,
2372 unsigned long event, void *device)
2373 {
2374 struct device *dev = device;
2375 struct sysc *ddata;
2376 int error;
2377
2378 ddata = sysc_child_to_parent(dev);
2379 if (!ddata)
2380 return NOTIFY_DONE;
2381
2382 switch (event) {
2383 case BUS_NOTIFY_ADD_DEVICE:
2384 error = sysc_child_add_clocks(ddata, dev);
2385 if (error)
2386 return error;
2387 sysc_legacy_idle_quirk(ddata, dev);
2388 break;
2389 default:
2390 break;
2391 }
2392
2393 return NOTIFY_DONE;
2394 }
2395
2396 static struct notifier_block sysc_nb = {
2397 .notifier_call = sysc_notifier_call,
2398 };
2399
2400 /* Device tree configured quirks */
2401 struct sysc_dts_quirk {
2402 const char *name;
2403 u32 mask;
2404 };
2405
2406 static const struct sysc_dts_quirk sysc_dts_quirks[] = {
2407 { .name = "ti,no-idle-on-init",
2408 .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
2409 { .name = "ti,no-reset-on-init",
2410 .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
2411 { .name = "ti,no-idle",
2412 .mask = SYSC_QUIRK_NO_IDLE, },
2413 };
2414
2415 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
2416 bool is_child)
2417 {
2418 const struct property *prop;
2419 int i, len;
2420
2421 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
2422 const char *name = sysc_dts_quirks[i].name;
2423
2424 prop = of_get_property(np, name, &len);
2425 if (!prop)
2426 continue;
2427
2428 ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
2429 if (is_child) {
2430 dev_warn(ddata->dev,
2431 "dts flag should be at module level for %s\n",
2432 name);
2433 }
2434 }
2435 }
2436
2437 static int sysc_init_dts_quirks(struct sysc *ddata)
2438 {
2439 struct device_node *np = ddata->dev->of_node;
2440 int error;
2441 u32 val;
2442
2443 ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
2444
2445 sysc_parse_dts_quirks(ddata, np, false);
2446 error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
2447 if (!error) {
2448 if (val > 255) {
2449 dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
2450 val);
2451 }
2452
2453 ddata->cfg.srst_udelay = (u8)val;
2454 }
2455
2456 return 0;
2457 }
2458
2459 static void sysc_unprepare(struct sysc *ddata)
2460 {
2461 int i;
2462
2463 if (!ddata->clocks)
2464 return;
2465
2466 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
2467 if (!IS_ERR_OR_NULL(ddata->clocks[i]))
2468 clk_unprepare(ddata->clocks[i]);
2469 }
2470 }
2471
2472 /*
2473 * Common sysc register bits found on omap2, also known as type1
2474 */
2475 static const struct sysc_regbits sysc_regbits_omap2 = {
2476 .dmadisable_shift = -ENODEV,
2477 .midle_shift = 12,
2478 .sidle_shift = 3,
2479 .clkact_shift = 8,
2480 .emufree_shift = 5,
2481 .enwkup_shift = 2,
2482 .srst_shift = 1,
2483 .autoidle_shift = 0,
2484 };
2485
2486 static const struct sysc_capabilities sysc_omap2 = {
2487 .type = TI_SYSC_OMAP2,
2488 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2489 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2490 SYSC_OMAP2_AUTOIDLE,
2491 .regbits = &sysc_regbits_omap2,
2492 };
2493
2494 /* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
2495 static const struct sysc_capabilities sysc_omap2_timer = {
2496 .type = TI_SYSC_OMAP2_TIMER,
2497 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2498 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2499 SYSC_OMAP2_AUTOIDLE,
2500 .regbits = &sysc_regbits_omap2,
2501 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
2502 };
2503
2504 /*
2505 * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
2506 * with different sidle position
2507 */
2508 static const struct sysc_regbits sysc_regbits_omap3_sham = {
2509 .dmadisable_shift = -ENODEV,
2510 .midle_shift = -ENODEV,
2511 .sidle_shift = 4,
2512 .clkact_shift = -ENODEV,
2513 .enwkup_shift = -ENODEV,
2514 .srst_shift = 1,
2515 .autoidle_shift = 0,
2516 .emufree_shift = -ENODEV,
2517 };
2518
2519 static const struct sysc_capabilities sysc_omap3_sham = {
2520 .type = TI_SYSC_OMAP3_SHAM,
2521 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2522 .regbits = &sysc_regbits_omap3_sham,
2523 };
2524
2525 /*
2526 * AES register bits found on omap3 and later, a variant of
2527 * sysc_regbits_omap2 with different sidle position
2528 */
2529 static const struct sysc_regbits sysc_regbits_omap3_aes = {
2530 .dmadisable_shift = -ENODEV,
2531 .midle_shift = -ENODEV,
2532 .sidle_shift = 6,
2533 .clkact_shift = -ENODEV,
2534 .enwkup_shift = -ENODEV,
2535 .srst_shift = 1,
2536 .autoidle_shift = 0,
2537 .emufree_shift = -ENODEV,
2538 };
2539
2540 static const struct sysc_capabilities sysc_omap3_aes = {
2541 .type = TI_SYSC_OMAP3_AES,
2542 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2543 .regbits = &sysc_regbits_omap3_aes,
2544 };
2545
2546 /*
2547 * Common sysc register bits found on omap4, also known as type2
2548 */
2549 static const struct sysc_regbits sysc_regbits_omap4 = {
2550 .dmadisable_shift = 16,
2551 .midle_shift = 4,
2552 .sidle_shift = 2,
2553 .clkact_shift = -ENODEV,
2554 .enwkup_shift = -ENODEV,
2555 .emufree_shift = 1,
2556 .srst_shift = 0,
2557 .autoidle_shift = -ENODEV,
2558 };
2559
2560 static const struct sysc_capabilities sysc_omap4 = {
2561 .type = TI_SYSC_OMAP4,
2562 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2563 SYSC_OMAP4_SOFTRESET,
2564 .regbits = &sysc_regbits_omap4,
2565 };
2566
2567 static const struct sysc_capabilities sysc_omap4_timer = {
2568 .type = TI_SYSC_OMAP4_TIMER,
2569 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2570 SYSC_OMAP4_SOFTRESET,
2571 .regbits = &sysc_regbits_omap4,
2572 };
2573
2574 /*
2575 * Common sysc register bits found on omap4, also known as type3
2576 */
2577 static const struct sysc_regbits sysc_regbits_omap4_simple = {
2578 .dmadisable_shift = -ENODEV,
2579 .midle_shift = 2,
2580 .sidle_shift = 0,
2581 .clkact_shift = -ENODEV,
2582 .enwkup_shift = -ENODEV,
2583 .srst_shift = -ENODEV,
2584 .emufree_shift = -ENODEV,
2585 .autoidle_shift = -ENODEV,
2586 };
2587
2588 static const struct sysc_capabilities sysc_omap4_simple = {
2589 .type = TI_SYSC_OMAP4_SIMPLE,
2590 .regbits = &sysc_regbits_omap4_simple,
2591 };
2592
2593 /*
2594 * SmartReflex sysc found on omap34xx
2595 */
2596 static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
2597 .dmadisable_shift = -ENODEV,
2598 .midle_shift = -ENODEV,
2599 .sidle_shift = -ENODEV,
2600 .clkact_shift = 20,
2601 .enwkup_shift = -ENODEV,
2602 .srst_shift = -ENODEV,
2603 .emufree_shift = -ENODEV,
2604 .autoidle_shift = -ENODEV,
2605 };
2606
2607 static const struct sysc_capabilities sysc_34xx_sr = {
2608 .type = TI_SYSC_OMAP34XX_SR,
2609 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
2610 .regbits = &sysc_regbits_omap34xx_sr,
2611 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
2612 SYSC_QUIRK_LEGACY_IDLE,
2613 };
2614
2615 /*
2616 * SmartReflex sysc found on omap36xx and later
2617 */
2618 static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
2619 .dmadisable_shift = -ENODEV,
2620 .midle_shift = -ENODEV,
2621 .sidle_shift = 24,
2622 .clkact_shift = -ENODEV,
2623 .enwkup_shift = 26,
2624 .srst_shift = -ENODEV,
2625 .emufree_shift = -ENODEV,
2626 .autoidle_shift = -ENODEV,
2627 };
2628
2629 static const struct sysc_capabilities sysc_36xx_sr = {
2630 .type = TI_SYSC_OMAP36XX_SR,
2631 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
2632 .regbits = &sysc_regbits_omap36xx_sr,
2633 .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
2634 };
2635
2636 static const struct sysc_capabilities sysc_omap4_sr = {
2637 .type = TI_SYSC_OMAP4_SR,
2638 .regbits = &sysc_regbits_omap36xx_sr,
2639 .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
2640 };
2641
2642 /*
2643 * McASP register bits found on omap4 and later
2644 */
2645 static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
2646 .dmadisable_shift = -ENODEV,
2647 .midle_shift = -ENODEV,
2648 .sidle_shift = 0,
2649 .clkact_shift = -ENODEV,
2650 .enwkup_shift = -ENODEV,
2651 .srst_shift = -ENODEV,
2652 .emufree_shift = -ENODEV,
2653 .autoidle_shift = -ENODEV,
2654 };
2655
2656 static const struct sysc_capabilities sysc_omap4_mcasp = {
2657 .type = TI_SYSC_OMAP4_MCASP,
2658 .regbits = &sysc_regbits_omap4_mcasp,
2659 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2660 };
2661
2662 /*
2663 * McASP found on dra7 and later
2664 */
2665 static const struct sysc_capabilities sysc_dra7_mcasp = {
2666 .type = TI_SYSC_OMAP4_SIMPLE,
2667 .regbits = &sysc_regbits_omap4_simple,
2668 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2669 };
2670
2671 /*
2672 * FS USB host found on omap4 and later
2673 */
2674 static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
2675 .dmadisable_shift = -ENODEV,
2676 .midle_shift = -ENODEV,
2677 .sidle_shift = 24,
2678 .clkact_shift = -ENODEV,
2679 .enwkup_shift = 26,
2680 .srst_shift = -ENODEV,
2681 .emufree_shift = -ENODEV,
2682 .autoidle_shift = -ENODEV,
2683 };
2684
2685 static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
2686 .type = TI_SYSC_OMAP4_USB_HOST_FS,
2687 .sysc_mask = SYSC_OMAP2_ENAWAKEUP,
2688 .regbits = &sysc_regbits_omap4_usb_host_fs,
2689 };
2690
2691 static const struct sysc_regbits sysc_regbits_dra7_mcan = {
2692 .dmadisable_shift = -ENODEV,
2693 .midle_shift = -ENODEV,
2694 .sidle_shift = -ENODEV,
2695 .clkact_shift = -ENODEV,
2696 .enwkup_shift = 4,
2697 .srst_shift = 0,
2698 .emufree_shift = -ENODEV,
2699 .autoidle_shift = -ENODEV,
2700 };
2701
2702 static const struct sysc_capabilities sysc_dra7_mcan = {
2703 .type = TI_SYSC_DRA7_MCAN,
2704 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
2705 .regbits = &sysc_regbits_dra7_mcan,
2706 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
2707 };
2708
2709 /*
2710 * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
2711 */
2712 static const struct sysc_capabilities sysc_pruss = {
2713 .type = TI_SYSC_PRUSS,
2714 .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
2715 .regbits = &sysc_regbits_omap4_simple,
2716 .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
2717 };
2718
2719 static int sysc_init_pdata(struct sysc *ddata)
2720 {
2721 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2722 struct ti_sysc_module_data *mdata;
2723
2724 if (!pdata)
2725 return 0;
2726
2727 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
2728 if (!mdata)
2729 return -ENOMEM;
2730
2731 if (ddata->legacy_mode) {
2732 mdata->name = ddata->legacy_mode;
2733 mdata->module_pa = ddata->module_pa;
2734 mdata->module_size = ddata->module_size;
2735 mdata->offsets = ddata->offsets;
2736 mdata->nr_offsets = SYSC_MAX_REGS;
2737 mdata->cap = ddata->cap;
2738 mdata->cfg = &ddata->cfg;
2739 }
2740
2741 ddata->mdata = mdata;
2742
2743 return 0;
2744 }
2745
2746 static int sysc_init_match(struct sysc *ddata)
2747 {
2748 const struct sysc_capabilities *cap;
2749
2750 cap = of_device_get_match_data(ddata->dev);
2751 if (!cap)
2752 return -EINVAL;
2753
2754 ddata->cap = cap;
2755 if (ddata->cap)
2756 ddata->cfg.quirks |= ddata->cap->mod_quirks;
2757
2758 return 0;
2759 }
2760
2761 static void ti_sysc_idle(struct work_struct *work)
2762 {
2763 struct sysc *ddata;
2764
2765 ddata = container_of(work, struct sysc, idle_work.work);
2766
2767 /*
2768 * One time decrement of clock usage counts if left on from init.
2769 * Note that we disable opt clocks unconditionally in this case
2770 * as they are enabled unconditionally during init without
2771 * considering sysc_opt_clks_needed() at that point.
2772 */
2773 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
2774 SYSC_QUIRK_NO_IDLE_ON_INIT)) {
2775 sysc_disable_main_clocks(ddata);
2776 sysc_disable_opt_clocks(ddata);
2777 sysc_clkdm_allow_idle(ddata);
2778 }
2779
2780 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
2781 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
2782 return;
2783
2784 /*
2785 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
2786 * and SYSC_QUIRK_NO_RESET_ON_INIT
2787 */
2788 if (pm_runtime_active(ddata->dev))
2789 pm_runtime_put_sync(ddata->dev);
2790 }
2791
2792 /*
2793 * SoC model and features detection. Only needed for SoCs that need
2794 * special handling for quirks, no need to list others.
2795 */
2796 static const struct soc_device_attribute sysc_soc_match[] = {
2797 SOC_FLAG("OMAP242*", SOC_2420),
2798 SOC_FLAG("OMAP243*", SOC_2430),
2799 SOC_FLAG("OMAP3[45]*", SOC_3430),
2800 SOC_FLAG("OMAP3[67]*", SOC_3630),
2801 SOC_FLAG("OMAP443*", SOC_4430),
2802 SOC_FLAG("OMAP446*", SOC_4460),
2803 SOC_FLAG("OMAP447*", SOC_4470),
2804 SOC_FLAG("OMAP54*", SOC_5430),
2805 SOC_FLAG("AM433", SOC_AM3),
2806 SOC_FLAG("AM43*", SOC_AM4),
2807 SOC_FLAG("DRA7*", SOC_DRA7),
2808
2809 { /* sentinel */ },
2810 };
2811
2812 /*
2813 * List of SoCs variants with disabled features. By default we assume all
2814 * devices in the device tree are available so no need to list those SoCs.
2815 */
2816 static const struct soc_device_attribute sysc_soc_feat_match[] = {
2817 /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
2818 SOC_FLAG("AM3505", DIS_SGX),
2819 SOC_FLAG("OMAP3525", DIS_SGX),
2820 SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
2821 SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
2822
2823 /* OMAP3630/DM3730 variants with some accelerators disabled */
2824 SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
2825 SOC_FLAG("DM3725", DIS_SGX),
2826 SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
2827 SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
2828 SOC_FLAG("OMAP3621", DIS_ISP),
2829
2830 { /* sentinel */ },
2831 };
2832
2833 static int sysc_add_disabled(unsigned long base)
2834 {
2835 struct sysc_address *disabled_module;
2836
2837 disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
2838 if (!disabled_module)
2839 return -ENOMEM;
2840
2841 disabled_module->base = base;
2842
2843 mutex_lock(&sysc_soc->list_lock);
2844 list_add(&disabled_module->node, &sysc_soc->disabled_modules);
2845 mutex_unlock(&sysc_soc->list_lock);
2846
2847 return 0;
2848 }
2849
2850 /*
2851 * One time init to detect the booted SoC and disable unavailable features.
2852 * Note that we initialize static data shared across all ti-sysc instances
2853 * so ddata is only used for SoC type. This can be called from module_init
2854 * once we no longer need to rely on platform data.
2855 */
2856 static int sysc_init_soc(struct sysc *ddata)
2857 {
2858 const struct soc_device_attribute *match;
2859 struct ti_sysc_platform_data *pdata;
2860 unsigned long features = 0;
2861 struct device_node *np;
2862
2863 if (sysc_soc)
2864 return 0;
2865
2866 sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
2867 if (!sysc_soc)
2868 return -ENOMEM;
2869
2870 mutex_init(&sysc_soc->list_lock);
2871 INIT_LIST_HEAD(&sysc_soc->disabled_modules);
2872 sysc_soc->general_purpose = true;
2873
2874 pdata = dev_get_platdata(ddata->dev);
2875 if (pdata && pdata->soc_type_gp)
2876 sysc_soc->general_purpose = pdata->soc_type_gp();
2877
2878 match = soc_device_match(sysc_soc_match);
2879 if (match && match->data)
2880 sysc_soc->soc = (int)match->data;
2881
2882 /*
2883 * Check and warn about possible old incomplete dtb. We now want to see
2884 * simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
2885 */
2886 switch (sysc_soc->soc) {
2887 case SOC_AM3:
2888 case SOC_AM4:
2889 case SOC_4430 ... SOC_4470:
2890 case SOC_5430:
2891 case SOC_DRA7:
2892 np = of_find_node_by_path("/ocp");
2893 WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
2894 "ti-sysc: Incomplete old dtb, please update\n");
2895 break;
2896 default:
2897 break;
2898 }
2899
2900 /* Ignore devices that are not available on HS and EMU SoCs */
2901 if (!sysc_soc->general_purpose) {
2902 switch (sysc_soc->soc) {
2903 case SOC_3430 ... SOC_3630:
2904 sysc_add_disabled(0x48304000); /* timer12 */
2905 break;
2906 default:
2907 break;
2908 }
2909 }
2910
2911 match = soc_device_match(sysc_soc_feat_match);
2912 if (!match)
2913 return 0;
2914
2915 if (match->data)
2916 features = (unsigned long)match->data;
2917
2918 /*
2919 * Add disabled devices to the list based on the module base.
2920 * Note that this must be done before we attempt to access the
2921 * device and have module revision checks working.
2922 */
2923 if (features & DIS_ISP)
2924 sysc_add_disabled(0x480bd400);
2925 if (features & DIS_IVA)
2926 sysc_add_disabled(0x5d000000);
2927 if (features & DIS_SGX)
2928 sysc_add_disabled(0x50000000);
2929
2930 return 0;
2931 }
2932
2933 static void sysc_cleanup_soc(void)
2934 {
2935 struct sysc_address *disabled_module;
2936 struct list_head *pos, *tmp;
2937
2938 if (!sysc_soc)
2939 return;
2940
2941 mutex_lock(&sysc_soc->list_lock);
2942 list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
2943 disabled_module = list_entry(pos, struct sysc_address, node);
2944 list_del(pos);
2945 kfree(disabled_module);
2946 }
2947 mutex_unlock(&sysc_soc->list_lock);
2948 }
2949
2950 static int sysc_check_disabled_devices(struct sysc *ddata)
2951 {
2952 struct sysc_address *disabled_module;
2953 struct list_head *pos;
2954 int error = 0;
2955
2956 mutex_lock(&sysc_soc->list_lock);
2957 list_for_each(pos, &sysc_soc->disabled_modules) {
2958 disabled_module = list_entry(pos, struct sysc_address, node);
2959 if (ddata->module_pa == disabled_module->base) {
2960 dev_dbg(ddata->dev, "module disabled for this SoC\n");
2961 error = -ENODEV;
2962 break;
2963 }
2964 }
2965 mutex_unlock(&sysc_soc->list_lock);
2966
2967 return error;
2968 }
2969
2970 /*
2971 * Ignore timers tagged with no-reset and no-idle. These are likely in use,
2972 * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
2973 * are needed, we could also look at the timer register configuration.
2974 */
2975 static int sysc_check_active_timer(struct sysc *ddata)
2976 {
2977 if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
2978 ddata->cap->type != TI_SYSC_OMAP4_TIMER)
2979 return 0;
2980
2981 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
2982 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
2983 return -ENXIO;
2984
2985 return 0;
2986 }
2987
2988 static const struct of_device_id sysc_match_table[] = {
2989 { .compatible = "simple-bus", },
2990 { /* sentinel */ },
2991 };
2992
2993 static int sysc_probe(struct platform_device *pdev)
2994 {
2995 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
2996 struct sysc *ddata;
2997 int error;
2998
2999 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
3000 if (!ddata)
3001 return -ENOMEM;
3002
3003 ddata->offsets[SYSC_REVISION] = -ENODEV;
3004 ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
3005 ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
3006 ddata->dev = &pdev->dev;
3007 platform_set_drvdata(pdev, ddata);
3008
3009 error = sysc_init_soc(ddata);
3010 if (error)
3011 return error;
3012
3013 error = sysc_init_match(ddata);
3014 if (error)
3015 return error;
3016
3017 error = sysc_init_dts_quirks(ddata);
3018 if (error)
3019 return error;
3020
3021 error = sysc_map_and_check_registers(ddata);
3022 if (error)
3023 return error;
3024
3025 error = sysc_init_sysc_mask(ddata);
3026 if (error)
3027 return error;
3028
3029 error = sysc_init_idlemodes(ddata);
3030 if (error)
3031 return error;
3032
3033 error = sysc_init_syss_mask(ddata);
3034 if (error)
3035 return error;
3036
3037 error = sysc_init_pdata(ddata);
3038 if (error)
3039 return error;
3040
3041 sysc_init_early_quirks(ddata);
3042
3043 error = sysc_check_disabled_devices(ddata);
3044 if (error)
3045 return error;
3046
3047 error = sysc_check_active_timer(ddata);
3048 if (error)
3049 return error;
3050
3051 error = sysc_get_clocks(ddata);
3052 if (error)
3053 return error;
3054
3055 error = sysc_init_resets(ddata);
3056 if (error)
3057 goto unprepare;
3058
3059 error = sysc_init_module(ddata);
3060 if (error)
3061 goto unprepare;
3062
3063 pm_runtime_enable(ddata->dev);
3064 error = pm_runtime_get_sync(ddata->dev);
3065 if (error < 0) {
3066 pm_runtime_put_noidle(ddata->dev);
3067 pm_runtime_disable(ddata->dev);
3068 goto unprepare;
3069 }
3070
3071 /* Balance use counts as PM runtime should have enabled these all */
3072 if (!(ddata->cfg.quirks &
3073 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
3074 sysc_disable_main_clocks(ddata);
3075 sysc_disable_opt_clocks(ddata);
3076 sysc_clkdm_allow_idle(ddata);
3077 }
3078
3079 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
3080 reset_control_assert(ddata->rsts);
3081
3082 sysc_show_registers(ddata);
3083
3084 ddata->dev->type = &sysc_device_type;
3085 error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
3086 pdata ? pdata->auxdata : NULL,
3087 ddata->dev);
3088 if (error)
3089 goto err;
3090
3091 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
3092
3093 /* At least earlycon won't survive without deferred idle */
3094 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
3095 SYSC_QUIRK_NO_IDLE_ON_INIT |
3096 SYSC_QUIRK_NO_RESET_ON_INIT)) {
3097 schedule_delayed_work(&ddata->idle_work, 3000);
3098 } else {
3099 pm_runtime_put(&pdev->dev);
3100 }
3101
3102 return 0;
3103
3104 err:
3105 pm_runtime_put_sync(&pdev->dev);
3106 pm_runtime_disable(&pdev->dev);
3107 unprepare:
3108 sysc_unprepare(ddata);
3109
3110 return error;
3111 }
3112
3113 static int sysc_remove(struct platform_device *pdev)
3114 {
3115 struct sysc *ddata = platform_get_drvdata(pdev);
3116 int error;
3117
3118 cancel_delayed_work_sync(&ddata->idle_work);
3119
3120 error = pm_runtime_get_sync(ddata->dev);
3121 if (error < 0) {
3122 pm_runtime_put_noidle(ddata->dev);
3123 pm_runtime_disable(ddata->dev);
3124 goto unprepare;
3125 }
3126
3127 of_platform_depopulate(&pdev->dev);
3128
3129 pm_runtime_put_sync(&pdev->dev);
3130 pm_runtime_disable(&pdev->dev);
3131
3132 if (!reset_control_status(ddata->rsts))
3133 reset_control_assert(ddata->rsts);
3134
3135 unprepare:
3136 sysc_unprepare(ddata);
3137
3138 return 0;
3139 }
3140
3141 static const struct of_device_id sysc_match[] = {
3142 { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
3143 { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
3144 { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
3145 { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
3146 { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
3147 { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
3148 { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
3149 { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
3150 { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
3151 { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
3152 { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
3153 { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
3154 { .compatible = "ti,sysc-usb-host-fs",
3155 .data = &sysc_omap4_usb_host_fs, },
3156 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
3157 { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
3158 { },
3159 };
3160 MODULE_DEVICE_TABLE(of, sysc_match);
3161
3162 static struct platform_driver sysc_driver = {
3163 .probe = sysc_probe,
3164 .remove = sysc_remove,
3165 .driver = {
3166 .name = "ti-sysc",
3167 .of_match_table = sysc_match,
3168 .pm = &sysc_pm_ops,
3169 },
3170 };
3171
3172 static int __init sysc_init(void)
3173 {
3174 bus_register_notifier(&platform_bus_type, &sysc_nb);
3175
3176 return platform_driver_register(&sysc_driver);
3177 }
3178 module_init(sysc_init);
3179
3180 static void __exit sysc_exit(void)
3181 {
3182 bus_unregister_notifier(&platform_bus_type, &sysc_nb);
3183 platform_driver_unregister(&sysc_driver);
3184 sysc_cleanup_soc();
3185 }
3186 module_exit(sysc_exit);
3187
3188 MODULE_DESCRIPTION("TI sysc interconnect target driver");
3189 MODULE_LICENSE("GPL v2");