]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/clk/st/clkgen-fsyn.c
Merge remote-tracking branches 'asoc/fix/arizona', 'asoc/fix/dpcm', 'asoc/fix/dwc...
[mirror_ubuntu-artful-kernel.git] / drivers / clk / st / clkgen-fsyn.c
1 /*
2 * Copyright (C) 2014 STMicroelectronics R&D Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10 /*
11 * Authors:
12 * Stephen Gallimore <stephen.gallimore@st.com>,
13 * Pankaj Dev <pankaj.dev@st.com>.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/of_address.h>
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20
21 #include "clkgen.h"
22
23 /*
24 * Maximum input clock to the PLL before we divide it down by 2
25 * although in reality in actual systems this has never been seen to
26 * be used.
27 */
28 #define QUADFS_NDIV_THRESHOLD 30000000
29
30 #define PLL_BW_GOODREF (0L)
31 #define PLL_BW_VBADREF (1L)
32 #define PLL_BW_BADREF (2L)
33 #define PLL_BW_VGOODREF (3L)
34
35 #define QUADFS_MAX_CHAN 4
36
37 struct stm_fs {
38 unsigned long ndiv;
39 unsigned long mdiv;
40 unsigned long pe;
41 unsigned long sdiv;
42 unsigned long nsdiv;
43 };
44
45 struct clkgen_quadfs_data {
46 bool reset_present;
47 bool bwfilter_present;
48 bool lockstatus_present;
49 bool powerup_polarity;
50 bool standby_polarity;
51 bool nsdiv_present;
52 bool nrst_present;
53 struct clkgen_field ndiv;
54 struct clkgen_field ref_bw;
55 struct clkgen_field nreset;
56 struct clkgen_field npda;
57 struct clkgen_field lock_status;
58
59 struct clkgen_field nrst[QUADFS_MAX_CHAN];
60 struct clkgen_field nsb[QUADFS_MAX_CHAN];
61 struct clkgen_field en[QUADFS_MAX_CHAN];
62 struct clkgen_field mdiv[QUADFS_MAX_CHAN];
63 struct clkgen_field pe[QUADFS_MAX_CHAN];
64 struct clkgen_field sdiv[QUADFS_MAX_CHAN];
65 struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
66
67 const struct clk_ops *pll_ops;
68 int (*get_params)(unsigned long, unsigned long, struct stm_fs *);
69 int (*get_rate)(unsigned long , const struct stm_fs *,
70 unsigned long *);
71 };
72
73 static const struct clk_ops st_quadfs_pll_c32_ops;
74 static const struct clk_ops st_quadfs_fs660c32_ops;
75
76 static int clk_fs660c32_dig_get_params(unsigned long input,
77 unsigned long output, struct stm_fs *fs);
78 static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
79 unsigned long *);
80
81 static const struct clkgen_quadfs_data st_fs660c32_C = {
82 .nrst_present = true,
83 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0),
84 CLKGEN_FIELD(0x2f0, 0x1, 1),
85 CLKGEN_FIELD(0x2f0, 0x1, 2),
86 CLKGEN_FIELD(0x2f0, 0x1, 3) },
87 .npda = CLKGEN_FIELD(0x2f0, 0x1, 12),
88 .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8),
89 CLKGEN_FIELD(0x2f0, 0x1, 9),
90 CLKGEN_FIELD(0x2f0, 0x1, 10),
91 CLKGEN_FIELD(0x2f0, 0x1, 11) },
92 .nsdiv_present = true,
93 .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24),
94 CLKGEN_FIELD(0x308, 0x1, 24),
95 CLKGEN_FIELD(0x30c, 0x1, 24),
96 CLKGEN_FIELD(0x310, 0x1, 24) },
97 .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15),
98 CLKGEN_FIELD(0x308, 0x1f, 15),
99 CLKGEN_FIELD(0x30c, 0x1f, 15),
100 CLKGEN_FIELD(0x310, 0x1f, 15) },
101 .en = { CLKGEN_FIELD(0x2fc, 0x1, 0),
102 CLKGEN_FIELD(0x2fc, 0x1, 1),
103 CLKGEN_FIELD(0x2fc, 0x1, 2),
104 CLKGEN_FIELD(0x2fc, 0x1, 3) },
105 .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16),
106 .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0),
107 CLKGEN_FIELD(0x308, 0x7fff, 0),
108 CLKGEN_FIELD(0x30c, 0x7fff, 0),
109 CLKGEN_FIELD(0x310, 0x7fff, 0) },
110 .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20),
111 CLKGEN_FIELD(0x308, 0xf, 20),
112 CLKGEN_FIELD(0x30c, 0xf, 20),
113 CLKGEN_FIELD(0x310, 0xf, 20) },
114 .lockstatus_present = true,
115 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
116 .powerup_polarity = 1,
117 .standby_polarity = 1,
118 .pll_ops = &st_quadfs_pll_c32_ops,
119 .get_params = clk_fs660c32_dig_get_params,
120 .get_rate = clk_fs660c32_dig_get_rate,
121 };
122
123 static const struct clkgen_quadfs_data st_fs660c32_D = {
124 .nrst_present = true,
125 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0),
126 CLKGEN_FIELD(0x2a0, 0x1, 1),
127 CLKGEN_FIELD(0x2a0, 0x1, 2),
128 CLKGEN_FIELD(0x2a0, 0x1, 3) },
129 .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16),
130 .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
131 CLKGEN_FIELD(0x2b8, 0x7fff, 0),
132 CLKGEN_FIELD(0x2bc, 0x7fff, 0),
133 CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
134 .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20),
135 CLKGEN_FIELD(0x2b8, 0xf, 20),
136 CLKGEN_FIELD(0x2bc, 0xf, 20),
137 CLKGEN_FIELD(0x2c0, 0xf, 20) },
138 .npda = CLKGEN_FIELD(0x2a0, 0x1, 12),
139 .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8),
140 CLKGEN_FIELD(0x2a0, 0x1, 9),
141 CLKGEN_FIELD(0x2a0, 0x1, 10),
142 CLKGEN_FIELD(0x2a0, 0x1, 11) },
143 .nsdiv_present = true,
144 .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24),
145 CLKGEN_FIELD(0x2b8, 0x1, 24),
146 CLKGEN_FIELD(0x2bc, 0x1, 24),
147 CLKGEN_FIELD(0x2c0, 0x1, 24) },
148 .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15),
149 CLKGEN_FIELD(0x2b8, 0x1f, 15),
150 CLKGEN_FIELD(0x2bc, 0x1f, 15),
151 CLKGEN_FIELD(0x2c0, 0x1f, 15) },
152 .en = { CLKGEN_FIELD(0x2ac, 0x1, 0),
153 CLKGEN_FIELD(0x2ac, 0x1, 1),
154 CLKGEN_FIELD(0x2ac, 0x1, 2),
155 CLKGEN_FIELD(0x2ac, 0x1, 3) },
156 .lockstatus_present = true,
157 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
158 .powerup_polarity = 1,
159 .standby_polarity = 1,
160 .pll_ops = &st_quadfs_pll_c32_ops,
161 .get_params = clk_fs660c32_dig_get_params,
162 .get_rate = clk_fs660c32_dig_get_rate,};
163
164 /**
165 * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
166 *
167 * Traits of this clock:
168 * prepare - clk_(un)prepare only ensures parent is (un)prepared
169 * enable - clk_enable and clk_disable are functional & control the Fsyn
170 * rate - inherits rate from parent. set_rate/round_rate/recalc_rate
171 * parent - fixed parent. No clk_set_parent support
172 */
173
174 /**
175 * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of
176 * its parent clock, found inside a type of
177 * ST quad channel frequency synthesizer block
178 *
179 * @hw: handle between common and hardware-specific interfaces.
180 * @ndiv: regmap field for the ndiv control.
181 * @regs_base: base address of the configuration registers.
182 * @lock: spinlock.
183 *
184 */
185 struct st_clk_quadfs_pll {
186 struct clk_hw hw;
187 void __iomem *regs_base;
188 spinlock_t *lock;
189 struct clkgen_quadfs_data *data;
190 u32 ndiv;
191 };
192
193 #define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
194
195 static int quadfs_pll_enable(struct clk_hw *hw)
196 {
197 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
198 unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
199
200 if (pll->lock)
201 spin_lock_irqsave(pll->lock, flags);
202
203 /*
204 * Bring block out of reset if we have reset control.
205 */
206 if (pll->data->reset_present)
207 CLKGEN_WRITE(pll, nreset, 1);
208
209 /*
210 * Use a fixed input clock noise bandwidth filter for the moment
211 */
212 if (pll->data->bwfilter_present)
213 CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
214
215
216 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
217
218 /*
219 * Power up the PLL
220 */
221 CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
222
223 if (pll->lock)
224 spin_unlock_irqrestore(pll->lock, flags);
225
226 if (pll->data->lockstatus_present)
227 while (!CLKGEN_READ(pll, lock_status)) {
228 if (time_after(jiffies, timeout))
229 return -ETIMEDOUT;
230 cpu_relax();
231 }
232
233 return 0;
234 }
235
236 static void quadfs_pll_disable(struct clk_hw *hw)
237 {
238 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
239 unsigned long flags = 0;
240
241 if (pll->lock)
242 spin_lock_irqsave(pll->lock, flags);
243
244 /*
245 * Powerdown the PLL and then put block into soft reset if we have
246 * reset control.
247 */
248 CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
249
250 if (pll->data->reset_present)
251 CLKGEN_WRITE(pll, nreset, 0);
252
253 if (pll->lock)
254 spin_unlock_irqrestore(pll->lock, flags);
255 }
256
257 static int quadfs_pll_is_enabled(struct clk_hw *hw)
258 {
259 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
260 u32 npda = CLKGEN_READ(pll, npda);
261
262 return pll->data->powerup_polarity ? !npda : !!npda;
263 }
264
265 static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
266 unsigned long *rate)
267 {
268 unsigned long nd = fs->ndiv + 16; /* ndiv value */
269
270 *rate = input * nd;
271
272 return 0;
273 }
274
275 static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
276 unsigned long parent_rate)
277 {
278 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
279 unsigned long rate = 0;
280 struct stm_fs params;
281
282 params.ndiv = CLKGEN_READ(pll, ndiv);
283 if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
284 pr_err("%s:%s error calculating rate\n",
285 clk_hw_get_name(hw), __func__);
286
287 pll->ndiv = params.ndiv;
288
289 return rate;
290 }
291
292 static int clk_fs660c32_vco_get_params(unsigned long input,
293 unsigned long output, struct stm_fs *fs)
294 {
295 /* Formula
296 VCO frequency = (fin x ndiv) / pdiv
297 ndiv = VCOfreq * pdiv / fin
298 */
299 unsigned long pdiv = 1, n;
300
301 /* Output clock range: 384Mhz to 660Mhz */
302 if (output < 384000000 || output > 660000000)
303 return -EINVAL;
304
305 if (input > 40000000)
306 /* This means that PDIV would be 2 instead of 1.
307 Not supported today. */
308 return -EINVAL;
309
310 input /= 1000;
311 output /= 1000;
312
313 n = output * pdiv / input;
314 if (n < 16)
315 n = 16;
316 fs->ndiv = n - 16; /* Converting formula value to reg value */
317
318 return 0;
319 }
320
321 static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
322 unsigned long rate,
323 unsigned long *prate)
324 {
325 struct stm_fs params;
326
327 if (clk_fs660c32_vco_get_params(*prate, rate, &params))
328 return rate;
329
330 clk_fs660c32_vco_get_rate(*prate, &params, &rate);
331
332 pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
333 __func__, clk_hw_get_name(hw),
334 rate, (unsigned int)params.ndiv);
335
336 return rate;
337 }
338
339 static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
340 unsigned long parent_rate)
341 {
342 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
343 struct stm_fs params;
344 long hwrate = 0;
345 unsigned long flags = 0;
346 int ret;
347
348 if (!rate || !parent_rate)
349 return -EINVAL;
350
351 ret = clk_fs660c32_vco_get_params(parent_rate, rate, &params);
352 if (ret)
353 return ret;
354
355 clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
356
357 pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
358 __func__, clk_hw_get_name(hw),
359 hwrate, (unsigned int)params.ndiv);
360
361 if (!hwrate)
362 return -EINVAL;
363
364 pll->ndiv = params.ndiv;
365
366 if (pll->lock)
367 spin_lock_irqsave(pll->lock, flags);
368
369 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
370
371 if (pll->lock)
372 spin_unlock_irqrestore(pll->lock, flags);
373
374 return 0;
375 }
376
377 static const struct clk_ops st_quadfs_pll_c32_ops = {
378 .enable = quadfs_pll_enable,
379 .disable = quadfs_pll_disable,
380 .is_enabled = quadfs_pll_is_enabled,
381 .recalc_rate = quadfs_pll_fs660c32_recalc_rate,
382 .round_rate = quadfs_pll_fs660c32_round_rate,
383 .set_rate = quadfs_pll_fs660c32_set_rate,
384 };
385
386 static struct clk * __init st_clk_register_quadfs_pll(
387 const char *name, const char *parent_name,
388 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
389 spinlock_t *lock)
390 {
391 struct st_clk_quadfs_pll *pll;
392 struct clk *clk;
393 struct clk_init_data init;
394
395 /*
396 * Sanity check required pointers.
397 */
398 if (WARN_ON(!name || !parent_name))
399 return ERR_PTR(-EINVAL);
400
401 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
402 if (!pll)
403 return ERR_PTR(-ENOMEM);
404
405 init.name = name;
406 init.ops = quadfs->pll_ops;
407 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
408 init.parent_names = &parent_name;
409 init.num_parents = 1;
410
411 pll->data = quadfs;
412 pll->regs_base = reg;
413 pll->lock = lock;
414 pll->hw.init = &init;
415
416 clk = clk_register(NULL, &pll->hw);
417
418 if (IS_ERR(clk))
419 kfree(pll);
420
421 return clk;
422 }
423
424 /**
425 * DOC: A digital frequency synthesizer
426 *
427 * Traits of this clock:
428 * prepare - clk_(un)prepare only ensures parent is (un)prepared
429 * enable - clk_enable and clk_disable are functional
430 * rate - set rate is functional
431 * parent - fixed parent. No clk_set_parent support
432 */
433
434 /**
435 * struct st_clk_quadfs_fsynth - One clock output from a four channel digital
436 * frequency synthesizer (fsynth) block.
437 *
438 * @hw: handle between common and hardware-specific interfaces
439 *
440 * @nsb: regmap field in the output control register for the digital
441 * standby of this fsynth channel. This control is active low so
442 * the channel is in standby when the control bit is cleared.
443 *
444 * @nsdiv: regmap field in the output control register for
445 * for the optional divide by 3 of this fsynth channel. This control
446 * is active low so the divide by 3 is active when the control bit is
447 * cleared and the divide is bypassed when the bit is set.
448 */
449 struct st_clk_quadfs_fsynth {
450 struct clk_hw hw;
451 void __iomem *regs_base;
452 spinlock_t *lock;
453 struct clkgen_quadfs_data *data;
454
455 u32 chan;
456 /*
457 * Cached hardware values from set_rate so we can program the
458 * hardware in enable. There are two reasons for this:
459 *
460 * 1. The registers may not be writable until the parent has been
461 * enabled.
462 *
463 * 2. It restores the clock rate when a driver does an enable
464 * on PM restore, after a suspend to RAM has lost the hardware
465 * setup.
466 */
467 u32 md;
468 u32 pe;
469 u32 sdiv;
470 u32 nsdiv;
471 };
472
473 #define to_quadfs_fsynth(_hw) \
474 container_of(_hw, struct st_clk_quadfs_fsynth, hw)
475
476 static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
477 {
478 /*
479 * Pulse the program enable register lsb to make the hardware take
480 * notice of the new md/pe values with a glitchless transition.
481 */
482 CLKGEN_WRITE(fs, en[fs->chan], 1);
483 CLKGEN_WRITE(fs, en[fs->chan], 0);
484 }
485
486 static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
487 {
488 unsigned long flags = 0;
489
490 /*
491 * Ensure the md/pe parameters are ignored while we are
492 * reprogramming them so we can get a glitchless change
493 * when fine tuning the speed of a running clock.
494 */
495 CLKGEN_WRITE(fs, en[fs->chan], 0);
496
497 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
498 CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
499 CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
500
501 if (fs->lock)
502 spin_lock_irqsave(fs->lock, flags);
503
504 if (fs->data->nsdiv_present)
505 CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
506
507 if (fs->lock)
508 spin_unlock_irqrestore(fs->lock, flags);
509 }
510
511 static int quadfs_fsynth_enable(struct clk_hw *hw)
512 {
513 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
514 unsigned long flags = 0;
515
516 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
517
518 quadfs_fsynth_program_rate(fs);
519
520 if (fs->lock)
521 spin_lock_irqsave(fs->lock, flags);
522
523 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
524
525 if (fs->data->nrst_present)
526 CLKGEN_WRITE(fs, nrst[fs->chan], 0);
527
528 if (fs->lock)
529 spin_unlock_irqrestore(fs->lock, flags);
530
531 quadfs_fsynth_program_enable(fs);
532
533 return 0;
534 }
535
536 static void quadfs_fsynth_disable(struct clk_hw *hw)
537 {
538 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
539 unsigned long flags = 0;
540
541 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
542
543 if (fs->lock)
544 spin_lock_irqsave(fs->lock, flags);
545
546 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
547
548 if (fs->lock)
549 spin_unlock_irqrestore(fs->lock, flags);
550 }
551
552 static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
553 {
554 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
555 u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
556
557 pr_debug("%s: %s enable bit = 0x%x\n",
558 __func__, clk_hw_get_name(hw), nsb);
559
560 return fs->data->standby_polarity ? !nsb : !!nsb;
561 }
562
563 #define P20 (uint64_t)(1 << 20)
564
565 static int clk_fs660c32_dig_get_rate(unsigned long input,
566 const struct stm_fs *fs, unsigned long *rate)
567 {
568 unsigned long s = (1 << fs->sdiv);
569 unsigned long ns;
570 uint64_t res;
571
572 /*
573 * 'nsdiv' is a register value ('BIN') which is translated
574 * to a decimal value according to following rules.
575 *
576 * nsdiv ns.dec
577 * 0 3
578 * 1 1
579 */
580 ns = (fs->nsdiv == 1) ? 1 : 3;
581
582 res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
583 *rate = (unsigned long)div64_u64(input * P20 * 32, res);
584
585 return 0;
586 }
587
588
589 static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation,
590 signed long input, unsigned long output, uint64_t *p,
591 struct stm_fs *fs)
592 {
593 unsigned long new_freq, new_deviation;
594 struct stm_fs fs_tmp;
595 uint64_t val;
596
597 val = (uint64_t)output << si;
598
599 *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL);
600
601 *p = div64_u64(*p, val);
602
603 if (*p > 32767LL)
604 return 1;
605
606 fs_tmp.mdiv = (unsigned long) m;
607 fs_tmp.pe = (unsigned long)*p;
608 fs_tmp.sdiv = si;
609 fs_tmp.nsdiv = 1;
610
611 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
612
613 new_deviation = abs(output - new_freq);
614
615 if (new_deviation < *deviation) {
616 fs->mdiv = m;
617 fs->pe = (unsigned long)*p;
618 fs->sdiv = si;
619 fs->nsdiv = 1;
620 *deviation = new_deviation;
621 }
622 return 0;
623 }
624
625 static int clk_fs660c32_dig_get_params(unsigned long input,
626 unsigned long output, struct stm_fs *fs)
627 {
628 int si; /* sdiv_reg (8 downto 0) */
629 int m; /* md value */
630 unsigned long new_freq, new_deviation;
631 /* initial condition to say: "infinite deviation" */
632 unsigned long deviation = ~0;
633 uint64_t p, p1, p2; /* pe value */
634 int r1, r2;
635
636 struct stm_fs fs_tmp;
637
638 for (si = 0; (si <= 8) && deviation; si++) {
639
640 /* Boundary test to avoid useless iteration */
641 r1 = clk_fs660c32_get_pe(0, si, &deviation,
642 input, output, &p1, fs);
643 r2 = clk_fs660c32_get_pe(31, si, &deviation,
644 input, output, &p2, fs);
645
646 /* No solution */
647 if (r1 && r2 && (p1 > p2))
648 continue;
649
650 /* Try to find best deviation */
651 for (m = 1; (m < 31) && deviation; m++)
652 clk_fs660c32_get_pe(m, si, &deviation,
653 input, output, &p, fs);
654
655 }
656
657 if (deviation == ~0) /* No solution found */
658 return -1;
659
660 /* pe fine tuning if deviation not 0: +/- 2 around computed pe value */
661 if (deviation) {
662 fs_tmp.mdiv = fs->mdiv;
663 fs_tmp.sdiv = fs->sdiv;
664 fs_tmp.nsdiv = fs->nsdiv;
665
666 if (fs->pe > 2)
667 p2 = fs->pe - 2;
668 else
669 p2 = 0;
670
671 for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) {
672 fs_tmp.pe = (unsigned long)p2;
673
674 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
675
676 new_deviation = abs(output - new_freq);
677
678 /* Check if this is a better solution */
679 if (new_deviation < deviation) {
680 fs->pe = (unsigned long)p2;
681 deviation = new_deviation;
682
683 }
684 }
685 }
686 return 0;
687 }
688
689 static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
690 struct stm_fs *params)
691 {
692 /*
693 * Get the initial hardware values for recalc_rate
694 */
695 params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]);
696 params->pe = CLKGEN_READ(fs, pe[fs->chan]);
697 params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]);
698
699 if (fs->data->nsdiv_present)
700 params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
701 else
702 params->nsdiv = 1;
703
704 /*
705 * If All are NULL then assume no clock rate is programmed.
706 */
707 if (!params->mdiv && !params->pe && !params->sdiv)
708 return 1;
709
710 fs->md = params->mdiv;
711 fs->pe = params->pe;
712 fs->sdiv = params->sdiv;
713 fs->nsdiv = params->nsdiv;
714
715 return 0;
716 }
717
718 static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
719 unsigned long prate, struct stm_fs *params)
720 {
721 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
722 int (*clk_fs_get_rate)(unsigned long ,
723 const struct stm_fs *, unsigned long *);
724 int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *);
725 unsigned long rate = 0;
726
727 clk_fs_get_rate = fs->data->get_rate;
728 clk_fs_get_params = fs->data->get_params;
729
730 if (!clk_fs_get_params(prate, drate, params))
731 clk_fs_get_rate(prate, params, &rate);
732
733 return rate;
734 }
735
736 static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
737 unsigned long parent_rate)
738 {
739 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
740 unsigned long rate = 0;
741 struct stm_fs params;
742 int (*clk_fs_get_rate)(unsigned long ,
743 const struct stm_fs *, unsigned long *);
744
745 clk_fs_get_rate = fs->data->get_rate;
746
747 if (quadfs_fsynt_get_hw_value_for_recalc(fs, &params))
748 return 0;
749
750 if (clk_fs_get_rate(parent_rate, &params, &rate)) {
751 pr_err("%s:%s error calculating rate\n",
752 clk_hw_get_name(hw), __func__);
753 }
754
755 pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
756
757 return rate;
758 }
759
760 static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
761 unsigned long *prate)
762 {
763 struct stm_fs params;
764
765 rate = quadfs_find_best_rate(hw, rate, *prate, &params);
766
767 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
768 __func__, clk_hw_get_name(hw),
769 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
770 (unsigned int)params.pe, (unsigned int)params.nsdiv);
771
772 return rate;
773 }
774
775
776 static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
777 struct stm_fs *params)
778 {
779 fs->md = params->mdiv;
780 fs->pe = params->pe;
781 fs->sdiv = params->sdiv;
782 fs->nsdiv = params->nsdiv;
783
784 /*
785 * In some integrations you can only change the fsynth programming when
786 * the parent entity containing it is enabled.
787 */
788 quadfs_fsynth_program_rate(fs);
789 quadfs_fsynth_program_enable(fs);
790 }
791
792 static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
793 unsigned long parent_rate)
794 {
795 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
796 struct stm_fs params;
797 long hwrate;
798 int uninitialized_var(i);
799
800 if (!rate || !parent_rate)
801 return -EINVAL;
802
803 memset(&params, 0, sizeof(struct stm_fs));
804
805 hwrate = quadfs_find_best_rate(hw, rate, parent_rate, &params);
806 if (!hwrate)
807 return -EINVAL;
808
809 quadfs_program_and_enable(fs, &params);
810
811 return 0;
812 }
813
814
815
816 static const struct clk_ops st_quadfs_ops = {
817 .enable = quadfs_fsynth_enable,
818 .disable = quadfs_fsynth_disable,
819 .is_enabled = quadfs_fsynth_is_enabled,
820 .round_rate = quadfs_round_rate,
821 .set_rate = quadfs_set_rate,
822 .recalc_rate = quadfs_recalc_rate,
823 };
824
825 static struct clk * __init st_clk_register_quadfs_fsynth(
826 const char *name, const char *parent_name,
827 struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
828 unsigned long flags, spinlock_t *lock)
829 {
830 struct st_clk_quadfs_fsynth *fs;
831 struct clk *clk;
832 struct clk_init_data init;
833
834 /*
835 * Sanity check required pointers, note that nsdiv3 is optional.
836 */
837 if (WARN_ON(!name || !parent_name))
838 return ERR_PTR(-EINVAL);
839
840 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
841 if (!fs)
842 return ERR_PTR(-ENOMEM);
843
844 init.name = name;
845 init.ops = &st_quadfs_ops;
846 init.flags = flags | CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
847 init.parent_names = &parent_name;
848 init.num_parents = 1;
849
850 fs->data = quadfs;
851 fs->regs_base = reg;
852 fs->chan = chan;
853 fs->lock = lock;
854 fs->hw.init = &init;
855
856 clk = clk_register(NULL, &fs->hw);
857
858 if (IS_ERR(clk))
859 kfree(fs);
860
861 return clk;
862 }
863
864 static void __init st_of_create_quadfs_fsynths(
865 struct device_node *np, const char *pll_name,
866 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
867 spinlock_t *lock)
868 {
869 struct clk_onecell_data *clk_data;
870 int fschan;
871
872 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
873 if (!clk_data)
874 return;
875
876 clk_data->clk_num = QUADFS_MAX_CHAN;
877 clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *),
878 GFP_KERNEL);
879
880 if (!clk_data->clks) {
881 kfree(clk_data);
882 return;
883 }
884
885 for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
886 struct clk *clk;
887 const char *clk_name;
888 unsigned long flags = 0;
889
890 if (of_property_read_string_index(np, "clock-output-names",
891 fschan, &clk_name)) {
892 break;
893 }
894
895 /*
896 * If we read an empty clock name then the channel is unused
897 */
898 if (*clk_name == '\0')
899 continue;
900
901 of_clk_detect_critical(np, fschan, &flags);
902
903 clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
904 quadfs, reg, fschan,
905 flags, lock);
906
907 /*
908 * If there was an error registering this clock output, clean
909 * up and move on to the next one.
910 */
911 if (!IS_ERR(clk)) {
912 clk_data->clks[fschan] = clk;
913 pr_debug("%s: parent %s rate %u\n",
914 __clk_get_name(clk),
915 __clk_get_name(clk_get_parent(clk)),
916 (unsigned int)clk_get_rate(clk));
917 }
918 }
919
920 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
921 }
922
923 static void __init st_of_quadfs_setup(struct device_node *np,
924 struct clkgen_quadfs_data *data)
925 {
926 struct clk *clk;
927 const char *pll_name, *clk_parent_name;
928 void __iomem *reg;
929 spinlock_t *lock;
930
931 reg = of_iomap(np, 0);
932 if (!reg)
933 return;
934
935 clk_parent_name = of_clk_get_parent_name(np, 0);
936 if (!clk_parent_name)
937 return;
938
939 pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name);
940 if (!pll_name)
941 return;
942
943 lock = kzalloc(sizeof(*lock), GFP_KERNEL);
944 if (!lock)
945 goto err_exit;
946
947 spin_lock_init(lock);
948
949 clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
950 reg, lock);
951 if (IS_ERR(clk))
952 goto err_exit;
953 else
954 pr_debug("%s: parent %s rate %u\n",
955 __clk_get_name(clk),
956 __clk_get_name(clk_get_parent(clk)),
957 (unsigned int)clk_get_rate(clk));
958
959 st_of_create_quadfs_fsynths(np, pll_name, data, reg, lock);
960
961 err_exit:
962 kfree(pll_name); /* No longer need local copy of the PLL name */
963 }
964
965 static void __init st_of_quadfs660C_setup(struct device_node *np)
966 {
967 st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_C);
968 }
969 CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup);
970
971 static void __init st_of_quadfs660D_setup(struct device_node *np)
972 {
973 st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_D);
974 }
975 CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup);