]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/clk/clk-qoriq.c
x86/msr: Add definitions for new speculation control MSRs
[mirror_ubuntu-artful-kernel.git] / drivers / clk / clk-qoriq.c
1 /*
2 * Copyright 2013 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * clock driver for Freescale QorIQ SoCs.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/fsl/guts.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of.h>
22 #include <linux/slab.h>
23
24 #define PLL_DIV1 0
25 #define PLL_DIV2 1
26 #define PLL_DIV3 2
27 #define PLL_DIV4 3
28
29 #define PLATFORM_PLL 0
30 #define CGA_PLL1 1
31 #define CGA_PLL2 2
32 #define CGA_PLL3 3
33 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
34 #define CGB_PLL1 4
35 #define CGB_PLL2 5
36
37 struct clockgen_pll_div {
38 struct clk *clk;
39 char name[32];
40 };
41
42 struct clockgen_pll {
43 struct clockgen_pll_div div[4];
44 };
45
46 #define CLKSEL_VALID 1
47 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
48
49 struct clockgen_sourceinfo {
50 u32 flags; /* CLKSEL_xxx */
51 int pll; /* CGx_PLLn */
52 int div; /* PLL_DIVn */
53 };
54
55 #define NUM_MUX_PARENTS 16
56
57 struct clockgen_muxinfo {
58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
59 };
60
61 #define NUM_HWACCEL 5
62 #define NUM_CMUX 8
63
64 struct clockgen;
65
66 /*
67 * cmux freq must be >= platform pll.
68 * If not set, cmux freq must be >= platform pll/2
69 */
70 #define CG_CMUX_GE_PLAT 1
71
72 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
73 #define CG_VER3 4 /* version 3 cg: reg layout different */
74 #define CG_LITTLE_ENDIAN 8
75
76 struct clockgen_chipinfo {
77 const char *compat, *guts_compat;
78 const struct clockgen_muxinfo *cmux_groups[2];
79 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
80 void (*init_periph)(struct clockgen *cg);
81 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
82 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
83 u32 flags; /* CG_xxx */
84 };
85
86 struct clockgen {
87 struct device_node *node;
88 void __iomem *regs;
89 struct clockgen_chipinfo info; /* mutable copy */
90 struct clk *sysclk, *coreclk;
91 struct clockgen_pll pll[6];
92 struct clk *cmux[NUM_CMUX];
93 struct clk *hwaccel[NUM_HWACCEL];
94 struct clk *fman[2];
95 struct ccsr_guts __iomem *guts;
96 };
97
98 static struct clockgen clockgen;
99
100 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
101 {
102 if (cg->info.flags & CG_LITTLE_ENDIAN)
103 iowrite32(val, reg);
104 else
105 iowrite32be(val, reg);
106 }
107
108 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
109 {
110 u32 val;
111
112 if (cg->info.flags & CG_LITTLE_ENDIAN)
113 val = ioread32(reg);
114 else
115 val = ioread32be(reg);
116
117 return val;
118 }
119
120 static const struct clockgen_muxinfo p2041_cmux_grp1 = {
121 {
122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
125 }
126 };
127
128 static const struct clockgen_muxinfo p2041_cmux_grp2 = {
129 {
130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
133 }
134 };
135
136 static const struct clockgen_muxinfo p5020_cmux_grp1 = {
137 {
138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
141 }
142 };
143
144 static const struct clockgen_muxinfo p5020_cmux_grp2 = {
145 {
146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
149 }
150 };
151
152 static const struct clockgen_muxinfo p5040_cmux_grp1 = {
153 {
154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
158 }
159 };
160
161 static const struct clockgen_muxinfo p5040_cmux_grp2 = {
162 {
163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
167 }
168 };
169
170 static const struct clockgen_muxinfo p4080_cmux_grp1 = {
171 {
172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
177 }
178 };
179
180 static const struct clockgen_muxinfo p4080_cmux_grp2 = {
181 {
182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
187 }
188 };
189
190 static const struct clockgen_muxinfo t1023_cmux = {
191 {
192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
194 }
195 };
196
197 static const struct clockgen_muxinfo t1040_cmux = {
198 {
199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
203 }
204 };
205
206
207 static const struct clockgen_muxinfo clockgen2_cmux_cga = {
208 {
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
212 {},
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
216 {},
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
220 },
221 };
222
223 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
224 {
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
228 {},
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
232 },
233 };
234
235 static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
236 {
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
240 {},
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
244 },
245 };
246
247 static const struct clockgen_muxinfo ls1043a_hwa1 = {
248 {
249 {},
250 {},
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
253 {},
254 {},
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
257 },
258 };
259
260 static const struct clockgen_muxinfo ls1043a_hwa2 = {
261 {
262 {},
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
264 {},
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
266 },
267 };
268
269 static const struct clockgen_muxinfo ls1046a_hwa1 = {
270 {
271 {},
272 {},
273 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
274 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
276 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
277 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
278 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
279 },
280 };
281
282 static const struct clockgen_muxinfo ls1046a_hwa2 = {
283 {
284 {},
285 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
286 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
287 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
288 {},
289 {},
290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
291 },
292 };
293
294 static const struct clockgen_muxinfo ls1012a_cmux = {
295 {
296 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
297 {},
298 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
299 }
300 };
301
302 static const struct clockgen_muxinfo t1023_hwa1 = {
303 {
304 {},
305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
307 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
308 },
309 };
310
311 static const struct clockgen_muxinfo t1023_hwa2 = {
312 {
313 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
314 },
315 };
316
317 static const struct clockgen_muxinfo t2080_hwa1 = {
318 {
319 {},
320 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
321 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
322 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
323 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
324 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
325 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
326 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
327 },
328 };
329
330 static const struct clockgen_muxinfo t2080_hwa2 = {
331 {
332 {},
333 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
334 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
335 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
337 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
338 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
339 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
340 },
341 };
342
343 static const struct clockgen_muxinfo t4240_hwa1 = {
344 {
345 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
346 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
347 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
348 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
349 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
350 {},
351 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
352 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
353 },
354 };
355
356 static const struct clockgen_muxinfo t4240_hwa4 = {
357 {
358 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
359 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
360 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
361 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
362 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
363 },
364 };
365
366 static const struct clockgen_muxinfo t4240_hwa5 = {
367 {
368 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
369 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
370 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
371 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
372 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
373 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
374 },
375 };
376
377 #define RCWSR7_FM1_CLK_SEL 0x40000000
378 #define RCWSR7_FM2_CLK_SEL 0x20000000
379 #define RCWSR7_HWA_ASYNC_DIV 0x04000000
380
381 static void __init p2041_init_periph(struct clockgen *cg)
382 {
383 u32 reg;
384
385 reg = ioread32be(&cg->guts->rcwsr[7]);
386
387 if (reg & RCWSR7_FM1_CLK_SEL)
388 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
389 else
390 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
391 }
392
393 static void __init p4080_init_periph(struct clockgen *cg)
394 {
395 u32 reg;
396
397 reg = ioread32be(&cg->guts->rcwsr[7]);
398
399 if (reg & RCWSR7_FM1_CLK_SEL)
400 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
401 else
402 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
403
404 if (reg & RCWSR7_FM2_CLK_SEL)
405 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
406 else
407 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
408 }
409
410 static void __init p5020_init_periph(struct clockgen *cg)
411 {
412 u32 reg;
413 int div = PLL_DIV2;
414
415 reg = ioread32be(&cg->guts->rcwsr[7]);
416 if (reg & RCWSR7_HWA_ASYNC_DIV)
417 div = PLL_DIV4;
418
419 if (reg & RCWSR7_FM1_CLK_SEL)
420 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
421 else
422 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
423 }
424
425 static void __init p5040_init_periph(struct clockgen *cg)
426 {
427 u32 reg;
428 int div = PLL_DIV2;
429
430 reg = ioread32be(&cg->guts->rcwsr[7]);
431 if (reg & RCWSR7_HWA_ASYNC_DIV)
432 div = PLL_DIV4;
433
434 if (reg & RCWSR7_FM1_CLK_SEL)
435 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
436 else
437 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
438
439 if (reg & RCWSR7_FM2_CLK_SEL)
440 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
441 else
442 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
443 }
444
445 static void __init t1023_init_periph(struct clockgen *cg)
446 {
447 cg->fman[0] = cg->hwaccel[1];
448 }
449
450 static void __init t1040_init_periph(struct clockgen *cg)
451 {
452 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
453 }
454
455 static void __init t2080_init_periph(struct clockgen *cg)
456 {
457 cg->fman[0] = cg->hwaccel[0];
458 }
459
460 static void __init t4240_init_periph(struct clockgen *cg)
461 {
462 cg->fman[0] = cg->hwaccel[3];
463 cg->fman[1] = cg->hwaccel[4];
464 }
465
466 static const struct clockgen_chipinfo chipinfo[] = {
467 {
468 .compat = "fsl,b4420-clockgen",
469 .guts_compat = "fsl,b4860-device-config",
470 .init_periph = t2080_init_periph,
471 .cmux_groups = {
472 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
473 },
474 .hwaccel = {
475 &t2080_hwa1
476 },
477 .cmux_to_group = {
478 0, 1, 1, 1, -1
479 },
480 .pll_mask = 0x3f,
481 .flags = CG_PLL_8BIT,
482 },
483 {
484 .compat = "fsl,b4860-clockgen",
485 .guts_compat = "fsl,b4860-device-config",
486 .init_periph = t2080_init_periph,
487 .cmux_groups = {
488 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
489 },
490 .hwaccel = {
491 &t2080_hwa1
492 },
493 .cmux_to_group = {
494 0, 1, 1, 1, -1
495 },
496 .pll_mask = 0x3f,
497 .flags = CG_PLL_8BIT,
498 },
499 {
500 .compat = "fsl,ls1021a-clockgen",
501 .cmux_groups = {
502 &t1023_cmux
503 },
504 .cmux_to_group = {
505 0, -1
506 },
507 .pll_mask = 0x03,
508 },
509 {
510 .compat = "fsl,ls1043a-clockgen",
511 .init_periph = t2080_init_periph,
512 .cmux_groups = {
513 &t1040_cmux
514 },
515 .hwaccel = {
516 &ls1043a_hwa1, &ls1043a_hwa2
517 },
518 .cmux_to_group = {
519 0, -1
520 },
521 .pll_mask = 0x07,
522 .flags = CG_PLL_8BIT,
523 },
524 {
525 .compat = "fsl,ls1046a-clockgen",
526 .init_periph = t2080_init_periph,
527 .cmux_groups = {
528 &t1040_cmux
529 },
530 .hwaccel = {
531 &ls1046a_hwa1, &ls1046a_hwa2
532 },
533 .cmux_to_group = {
534 0, -1
535 },
536 .pll_mask = 0x07,
537 .flags = CG_PLL_8BIT,
538 },
539 {
540 .compat = "fsl,ls1012a-clockgen",
541 .cmux_groups = {
542 &ls1012a_cmux
543 },
544 .cmux_to_group = {
545 0, -1
546 },
547 .pll_mask = 0x03,
548 },
549 {
550 .compat = "fsl,ls2080a-clockgen",
551 .cmux_groups = {
552 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
553 },
554 .cmux_to_group = {
555 0, 0, 1, 1, -1
556 },
557 .pll_mask = 0x37,
558 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
559 },
560 {
561 .compat = "fsl,p2041-clockgen",
562 .guts_compat = "fsl,qoriq-device-config-1.0",
563 .init_periph = p2041_init_periph,
564 .cmux_groups = {
565 &p2041_cmux_grp1, &p2041_cmux_grp2
566 },
567 .cmux_to_group = {
568 0, 0, 1, 1, -1
569 },
570 .pll_mask = 0x07,
571 },
572 {
573 .compat = "fsl,p3041-clockgen",
574 .guts_compat = "fsl,qoriq-device-config-1.0",
575 .init_periph = p2041_init_periph,
576 .cmux_groups = {
577 &p2041_cmux_grp1, &p2041_cmux_grp2
578 },
579 .cmux_to_group = {
580 0, 0, 1, 1, -1
581 },
582 .pll_mask = 0x07,
583 },
584 {
585 .compat = "fsl,p4080-clockgen",
586 .guts_compat = "fsl,qoriq-device-config-1.0",
587 .init_periph = p4080_init_periph,
588 .cmux_groups = {
589 &p4080_cmux_grp1, &p4080_cmux_grp2
590 },
591 .cmux_to_group = {
592 0, 0, 0, 0, 1, 1, 1, 1
593 },
594 .pll_mask = 0x1f,
595 },
596 {
597 .compat = "fsl,p5020-clockgen",
598 .guts_compat = "fsl,qoriq-device-config-1.0",
599 .init_periph = p5020_init_periph,
600 .cmux_groups = {
601 &p2041_cmux_grp1, &p2041_cmux_grp2
602 },
603 .cmux_to_group = {
604 0, 1, -1
605 },
606 .pll_mask = 0x07,
607 },
608 {
609 .compat = "fsl,p5040-clockgen",
610 .guts_compat = "fsl,p5040-device-config",
611 .init_periph = p5040_init_periph,
612 .cmux_groups = {
613 &p5040_cmux_grp1, &p5040_cmux_grp2
614 },
615 .cmux_to_group = {
616 0, 0, 1, 1, -1
617 },
618 .pll_mask = 0x0f,
619 },
620 {
621 .compat = "fsl,t1023-clockgen",
622 .guts_compat = "fsl,t1023-device-config",
623 .init_periph = t1023_init_periph,
624 .cmux_groups = {
625 &t1023_cmux
626 },
627 .hwaccel = {
628 &t1023_hwa1, &t1023_hwa2
629 },
630 .cmux_to_group = {
631 0, 0, -1
632 },
633 .pll_mask = 0x03,
634 .flags = CG_PLL_8BIT,
635 },
636 {
637 .compat = "fsl,t1040-clockgen",
638 .guts_compat = "fsl,t1040-device-config",
639 .init_periph = t1040_init_periph,
640 .cmux_groups = {
641 &t1040_cmux
642 },
643 .cmux_to_group = {
644 0, 0, 0, 0, -1
645 },
646 .pll_mask = 0x07,
647 .flags = CG_PLL_8BIT,
648 },
649 {
650 .compat = "fsl,t2080-clockgen",
651 .guts_compat = "fsl,t2080-device-config",
652 .init_periph = t2080_init_periph,
653 .cmux_groups = {
654 &clockgen2_cmux_cga12
655 },
656 .hwaccel = {
657 &t2080_hwa1, &t2080_hwa2
658 },
659 .cmux_to_group = {
660 0, -1
661 },
662 .pll_mask = 0x07,
663 .flags = CG_PLL_8BIT,
664 },
665 {
666 .compat = "fsl,t4240-clockgen",
667 .guts_compat = "fsl,t4240-device-config",
668 .init_periph = t4240_init_periph,
669 .cmux_groups = {
670 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
671 },
672 .hwaccel = {
673 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
674 },
675 .cmux_to_group = {
676 0, 0, 1, -1
677 },
678 .pll_mask = 0x3f,
679 .flags = CG_PLL_8BIT,
680 },
681 {},
682 };
683
684 struct mux_hwclock {
685 struct clk_hw hw;
686 struct clockgen *cg;
687 const struct clockgen_muxinfo *info;
688 u32 __iomem *reg;
689 u8 parent_to_clksel[NUM_MUX_PARENTS];
690 s8 clksel_to_parent[NUM_MUX_PARENTS];
691 int num_parents;
692 };
693
694 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
695 #define CLKSEL_MASK 0x78000000
696 #define CLKSEL_SHIFT 27
697
698 static int mux_set_parent(struct clk_hw *hw, u8 idx)
699 {
700 struct mux_hwclock *hwc = to_mux_hwclock(hw);
701 u32 clksel;
702
703 if (idx >= hwc->num_parents)
704 return -EINVAL;
705
706 clksel = hwc->parent_to_clksel[idx];
707 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
708
709 return 0;
710 }
711
712 static u8 mux_get_parent(struct clk_hw *hw)
713 {
714 struct mux_hwclock *hwc = to_mux_hwclock(hw);
715 u32 clksel;
716 s8 ret;
717
718 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
719
720 ret = hwc->clksel_to_parent[clksel];
721 if (ret < 0) {
722 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
723 return 0;
724 }
725
726 return ret;
727 }
728
729 static const struct clk_ops cmux_ops = {
730 .get_parent = mux_get_parent,
731 .set_parent = mux_set_parent,
732 };
733
734 /*
735 * Don't allow setting for now, as the clock options haven't been
736 * sanitized for additional restrictions.
737 */
738 static const struct clk_ops hwaccel_ops = {
739 .get_parent = mux_get_parent,
740 };
741
742 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
743 struct mux_hwclock *hwc,
744 int idx)
745 {
746 int pll, div;
747
748 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
749 return NULL;
750
751 pll = hwc->info->clksel[idx].pll;
752 div = hwc->info->clksel[idx].div;
753
754 return &cg->pll[pll].div[div];
755 }
756
757 static struct clk * __init create_mux_common(struct clockgen *cg,
758 struct mux_hwclock *hwc,
759 const struct clk_ops *ops,
760 unsigned long min_rate,
761 unsigned long max_rate,
762 unsigned long pct80_rate,
763 const char *fmt, int idx)
764 {
765 struct clk_init_data init = {};
766 struct clk *clk;
767 const struct clockgen_pll_div *div;
768 const char *parent_names[NUM_MUX_PARENTS];
769 char name[32];
770 int i, j;
771
772 snprintf(name, sizeof(name), fmt, idx);
773
774 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
775 unsigned long rate;
776
777 hwc->clksel_to_parent[i] = -1;
778
779 div = get_pll_div(cg, hwc, i);
780 if (!div)
781 continue;
782
783 rate = clk_get_rate(div->clk);
784
785 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
786 rate > pct80_rate)
787 continue;
788 if (rate < min_rate)
789 continue;
790 if (rate > max_rate)
791 continue;
792
793 parent_names[j] = div->name;
794 hwc->parent_to_clksel[j] = i;
795 hwc->clksel_to_parent[i] = j;
796 j++;
797 }
798
799 init.name = name;
800 init.ops = ops;
801 init.parent_names = parent_names;
802 init.num_parents = hwc->num_parents = j;
803 init.flags = 0;
804 hwc->hw.init = &init;
805 hwc->cg = cg;
806
807 clk = clk_register(NULL, &hwc->hw);
808 if (IS_ERR(clk)) {
809 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
810 PTR_ERR(clk));
811 kfree(hwc);
812 return NULL;
813 }
814
815 return clk;
816 }
817
818 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
819 {
820 struct mux_hwclock *hwc;
821 const struct clockgen_pll_div *div;
822 unsigned long plat_rate, min_rate;
823 u64 max_rate, pct80_rate;
824 u32 clksel;
825
826 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
827 if (!hwc)
828 return NULL;
829
830 if (cg->info.flags & CG_VER3)
831 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
832 else
833 hwc->reg = cg->regs + 0x20 * idx;
834
835 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
836
837 /*
838 * Find the rate for the default clksel, and treat it as the
839 * maximum rated core frequency. If this is an incorrect
840 * assumption, certain clock options (possibly including the
841 * default clksel) may be inappropriately excluded on certain
842 * chips.
843 */
844 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
845 div = get_pll_div(cg, hwc, clksel);
846 if (!div) {
847 kfree(hwc);
848 return NULL;
849 }
850
851 max_rate = clk_get_rate(div->clk);
852 pct80_rate = max_rate * 8;
853 do_div(pct80_rate, 10);
854
855 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
856
857 if (cg->info.flags & CG_CMUX_GE_PLAT)
858 min_rate = plat_rate;
859 else
860 min_rate = plat_rate / 2;
861
862 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
863 pct80_rate, "cg-cmux%d", idx);
864 }
865
866 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
867 {
868 struct mux_hwclock *hwc;
869
870 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
871 if (!hwc)
872 return NULL;
873
874 hwc->reg = cg->regs + 0x20 * idx + 0x10;
875 hwc->info = cg->info.hwaccel[idx];
876
877 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
878 "cg-hwaccel%d", idx);
879 }
880
881 static void __init create_muxes(struct clockgen *cg)
882 {
883 int i;
884
885 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
886 if (cg->info.cmux_to_group[i] < 0)
887 break;
888 if (cg->info.cmux_to_group[i] >=
889 ARRAY_SIZE(cg->info.cmux_groups)) {
890 WARN_ON_ONCE(1);
891 continue;
892 }
893
894 cg->cmux[i] = create_one_cmux(cg, i);
895 }
896
897 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
898 if (!cg->info.hwaccel[i])
899 continue;
900
901 cg->hwaccel[i] = create_one_hwaccel(cg, i);
902 }
903 }
904
905 static void __init clockgen_init(struct device_node *np);
906
907 /*
908 * Legacy nodes may get probed before the parent clockgen node.
909 * It is assumed that device trees with legacy nodes will not
910 * contain a "clocks" property -- otherwise the input clocks may
911 * not be initialized at this point.
912 */
913 static void __init legacy_init_clockgen(struct device_node *np)
914 {
915 if (!clockgen.node)
916 clockgen_init(of_get_parent(np));
917 }
918
919 /* Legacy node */
920 static void __init core_mux_init(struct device_node *np)
921 {
922 struct clk *clk;
923 struct resource res;
924 int idx, rc;
925
926 legacy_init_clockgen(np);
927
928 if (of_address_to_resource(np, 0, &res))
929 return;
930
931 idx = (res.start & 0xf0) >> 5;
932 clk = clockgen.cmux[idx];
933
934 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
935 if (rc) {
936 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
937 __func__, np->name, rc);
938 return;
939 }
940 }
941
942 static struct clk __init
943 *sysclk_from_fixed(struct device_node *node, const char *name)
944 {
945 u32 rate;
946
947 if (of_property_read_u32(node, "clock-frequency", &rate))
948 return ERR_PTR(-ENODEV);
949
950 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
951 }
952
953 static struct clk __init *input_clock(const char *name, struct clk *clk)
954 {
955 const char *input_name;
956
957 /* Register the input clock under the desired name. */
958 input_name = __clk_get_name(clk);
959 clk = clk_register_fixed_factor(NULL, name, input_name,
960 0, 1, 1);
961 if (IS_ERR(clk))
962 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
963 PTR_ERR(clk));
964
965 return clk;
966 }
967
968 static struct clk __init *input_clock_by_name(const char *name,
969 const char *dtname)
970 {
971 struct clk *clk;
972
973 clk = of_clk_get_by_name(clockgen.node, dtname);
974 if (IS_ERR(clk))
975 return clk;
976
977 return input_clock(name, clk);
978 }
979
980 static struct clk __init *input_clock_by_index(const char *name, int idx)
981 {
982 struct clk *clk;
983
984 clk = of_clk_get(clockgen.node, 0);
985 if (IS_ERR(clk))
986 return clk;
987
988 return input_clock(name, clk);
989 }
990
991 static struct clk * __init create_sysclk(const char *name)
992 {
993 struct device_node *sysclk;
994 struct clk *clk;
995
996 clk = sysclk_from_fixed(clockgen.node, name);
997 if (!IS_ERR(clk))
998 return clk;
999
1000 clk = input_clock_by_name(name, "sysclk");
1001 if (!IS_ERR(clk))
1002 return clk;
1003
1004 clk = input_clock_by_index(name, 0);
1005 if (!IS_ERR(clk))
1006 return clk;
1007
1008 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1009 if (sysclk) {
1010 clk = sysclk_from_fixed(sysclk, name);
1011 if (!IS_ERR(clk))
1012 return clk;
1013 }
1014
1015 pr_err("%s: No input sysclk\n", __func__);
1016 return NULL;
1017 }
1018
1019 static struct clk * __init create_coreclk(const char *name)
1020 {
1021 struct clk *clk;
1022
1023 clk = input_clock_by_name(name, "coreclk");
1024 if (!IS_ERR(clk))
1025 return clk;
1026
1027 /*
1028 * This indicates a mix of legacy nodes with the new coreclk
1029 * mechanism, which should never happen. If this error occurs,
1030 * don't use the wrong input clock just because coreclk isn't
1031 * ready yet.
1032 */
1033 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1034 return clk;
1035
1036 return NULL;
1037 }
1038
1039 /* Legacy node */
1040 static void __init sysclk_init(struct device_node *node)
1041 {
1042 struct clk *clk;
1043
1044 legacy_init_clockgen(node);
1045
1046 clk = clockgen.sysclk;
1047 if (clk)
1048 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1049 }
1050
1051 #define PLL_KILL BIT(31)
1052
1053 static void __init create_one_pll(struct clockgen *cg, int idx)
1054 {
1055 u32 __iomem *reg;
1056 u32 mult;
1057 struct clockgen_pll *pll = &cg->pll[idx];
1058 const char *input = "cg-sysclk";
1059 int i;
1060
1061 if (!(cg->info.pll_mask & (1 << idx)))
1062 return;
1063
1064 if (cg->coreclk && idx != PLATFORM_PLL) {
1065 if (IS_ERR(cg->coreclk))
1066 return;
1067
1068 input = "cg-coreclk";
1069 }
1070
1071 if (cg->info.flags & CG_VER3) {
1072 switch (idx) {
1073 case PLATFORM_PLL:
1074 reg = cg->regs + 0x60080;
1075 break;
1076 case CGA_PLL1:
1077 reg = cg->regs + 0x80;
1078 break;
1079 case CGA_PLL2:
1080 reg = cg->regs + 0xa0;
1081 break;
1082 case CGB_PLL1:
1083 reg = cg->regs + 0x10080;
1084 break;
1085 case CGB_PLL2:
1086 reg = cg->regs + 0x100a0;
1087 break;
1088 default:
1089 WARN_ONCE(1, "index %d\n", idx);
1090 return;
1091 }
1092 } else {
1093 if (idx == PLATFORM_PLL)
1094 reg = cg->regs + 0xc00;
1095 else
1096 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1097 }
1098
1099 /* Get the multiple of PLL */
1100 mult = cg_in(cg, reg);
1101
1102 /* Check if this PLL is disabled */
1103 if (mult & PLL_KILL) {
1104 pr_debug("%s(): pll %p disabled\n", __func__, reg);
1105 return;
1106 }
1107
1108 if ((cg->info.flags & CG_VER3) ||
1109 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
1110 mult = (mult & GENMASK(8, 1)) >> 1;
1111 else
1112 mult = (mult & GENMASK(6, 1)) >> 1;
1113
1114 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1115 struct clk *clk;
1116
1117 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1118 "cg-pll%d-div%d", idx, i + 1);
1119
1120 clk = clk_register_fixed_factor(NULL,
1121 pll->div[i].name, input, 0, mult, i + 1);
1122 if (IS_ERR(clk)) {
1123 pr_err("%s: %s: register failed %ld\n",
1124 __func__, pll->div[i].name, PTR_ERR(clk));
1125 continue;
1126 }
1127
1128 pll->div[i].clk = clk;
1129 }
1130 }
1131
1132 static void __init create_plls(struct clockgen *cg)
1133 {
1134 int i;
1135
1136 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1137 create_one_pll(cg, i);
1138 }
1139
1140 static void __init legacy_pll_init(struct device_node *np, int idx)
1141 {
1142 struct clockgen_pll *pll;
1143 struct clk_onecell_data *onecell_data;
1144 struct clk **subclks;
1145 int count, rc;
1146
1147 legacy_init_clockgen(np);
1148
1149 pll = &clockgen.pll[idx];
1150 count = of_property_count_strings(np, "clock-output-names");
1151
1152 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1153 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
1154 if (!subclks)
1155 return;
1156
1157 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
1158 if (!onecell_data)
1159 goto err_clks;
1160
1161 if (count <= 3) {
1162 subclks[0] = pll->div[0].clk;
1163 subclks[1] = pll->div[1].clk;
1164 subclks[2] = pll->div[3].clk;
1165 } else {
1166 subclks[0] = pll->div[0].clk;
1167 subclks[1] = pll->div[1].clk;
1168 subclks[2] = pll->div[2].clk;
1169 subclks[3] = pll->div[3].clk;
1170 }
1171
1172 onecell_data->clks = subclks;
1173 onecell_data->clk_num = count;
1174
1175 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1176 if (rc) {
1177 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1178 __func__, np->name, rc);
1179 goto err_cell;
1180 }
1181
1182 return;
1183 err_cell:
1184 kfree(onecell_data);
1185 err_clks:
1186 kfree(subclks);
1187 }
1188
1189 /* Legacy node */
1190 static void __init pltfrm_pll_init(struct device_node *np)
1191 {
1192 legacy_pll_init(np, PLATFORM_PLL);
1193 }
1194
1195 /* Legacy node */
1196 static void __init core_pll_init(struct device_node *np)
1197 {
1198 struct resource res;
1199 int idx;
1200
1201 if (of_address_to_resource(np, 0, &res))
1202 return;
1203
1204 if ((res.start & 0xfff) == 0xc00) {
1205 /*
1206 * ls1021a devtree labels the platform PLL
1207 * with the core PLL compatible
1208 */
1209 pltfrm_pll_init(np);
1210 } else {
1211 idx = (res.start & 0xf0) >> 5;
1212 legacy_pll_init(np, CGA_PLL1 + idx);
1213 }
1214 }
1215
1216 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1217 {
1218 struct clockgen *cg = data;
1219 struct clk *clk;
1220 struct clockgen_pll *pll;
1221 u32 type, idx;
1222
1223 if (clkspec->args_count < 2) {
1224 pr_err("%s: insufficient phandle args\n", __func__);
1225 return ERR_PTR(-EINVAL);
1226 }
1227
1228 type = clkspec->args[0];
1229 idx = clkspec->args[1];
1230
1231 switch (type) {
1232 case 0:
1233 if (idx != 0)
1234 goto bad_args;
1235 clk = cg->sysclk;
1236 break;
1237 case 1:
1238 if (idx >= ARRAY_SIZE(cg->cmux))
1239 goto bad_args;
1240 clk = cg->cmux[idx];
1241 break;
1242 case 2:
1243 if (idx >= ARRAY_SIZE(cg->hwaccel))
1244 goto bad_args;
1245 clk = cg->hwaccel[idx];
1246 break;
1247 case 3:
1248 if (idx >= ARRAY_SIZE(cg->fman))
1249 goto bad_args;
1250 clk = cg->fman[idx];
1251 break;
1252 case 4:
1253 pll = &cg->pll[PLATFORM_PLL];
1254 if (idx >= ARRAY_SIZE(pll->div))
1255 goto bad_args;
1256 clk = pll->div[idx].clk;
1257 break;
1258 case 5:
1259 if (idx != 0)
1260 goto bad_args;
1261 clk = cg->coreclk;
1262 if (IS_ERR(clk))
1263 clk = NULL;
1264 break;
1265 default:
1266 goto bad_args;
1267 }
1268
1269 if (!clk)
1270 return ERR_PTR(-ENOENT);
1271 return clk;
1272
1273 bad_args:
1274 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1275 return ERR_PTR(-EINVAL);
1276 }
1277
1278 #ifdef CONFIG_PPC
1279 #include <asm/mpc85xx.h>
1280
1281 static const u32 a4510_svrs[] __initconst = {
1282 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1283 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1284 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1285 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1286 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1287 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1288 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1289 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1290 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1291 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1292 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1293 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1294 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1295 };
1296
1297 #define SVR_SECURITY 0x80000 /* The Security (E) bit */
1298
1299 static bool __init has_erratum_a4510(void)
1300 {
1301 u32 svr = mfspr(SPRN_SVR);
1302 int i;
1303
1304 svr &= ~SVR_SECURITY;
1305
1306 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1307 if (svr == a4510_svrs[i])
1308 return true;
1309 }
1310
1311 return false;
1312 }
1313 #else
1314 static bool __init has_erratum_a4510(void)
1315 {
1316 return false;
1317 }
1318 #endif
1319
1320 static void __init clockgen_init(struct device_node *np)
1321 {
1322 int i, ret;
1323 bool is_old_ls1021a = false;
1324
1325 /* May have already been called by a legacy probe */
1326 if (clockgen.node)
1327 return;
1328
1329 clockgen.node = np;
1330 clockgen.regs = of_iomap(np, 0);
1331 if (!clockgen.regs &&
1332 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1333 /* Compatibility hack for old, broken device trees */
1334 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1335 is_old_ls1021a = true;
1336 }
1337 if (!clockgen.regs) {
1338 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
1339 return;
1340 }
1341
1342 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1343 if (of_device_is_compatible(np, chipinfo[i].compat))
1344 break;
1345 if (is_old_ls1021a &&
1346 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1347 break;
1348 }
1349
1350 if (i == ARRAY_SIZE(chipinfo)) {
1351 pr_err("%s: unknown clockgen node %s\n", __func__,
1352 np->full_name);
1353 goto err;
1354 }
1355 clockgen.info = chipinfo[i];
1356
1357 if (clockgen.info.guts_compat) {
1358 struct device_node *guts;
1359
1360 guts = of_find_compatible_node(NULL, NULL,
1361 clockgen.info.guts_compat);
1362 if (guts) {
1363 clockgen.guts = of_iomap(guts, 0);
1364 if (!clockgen.guts) {
1365 pr_err("%s: Couldn't map %s regs\n", __func__,
1366 guts->full_name);
1367 }
1368 }
1369
1370 }
1371
1372 if (has_erratum_a4510())
1373 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1374
1375 clockgen.sysclk = create_sysclk("cg-sysclk");
1376 clockgen.coreclk = create_coreclk("cg-coreclk");
1377 create_plls(&clockgen);
1378 create_muxes(&clockgen);
1379
1380 if (clockgen.info.init_periph)
1381 clockgen.info.init_periph(&clockgen);
1382
1383 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1384 if (ret) {
1385 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1386 __func__, np->name, ret);
1387 }
1388
1389 return;
1390 err:
1391 iounmap(clockgen.regs);
1392 clockgen.regs = NULL;
1393 }
1394
1395 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1396 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1397 CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
1398 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
1399 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
1400 CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
1401 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
1402
1403 /* Legacy nodes */
1404 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1405 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1406 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1407 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1408 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1409 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
1410 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1411 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);