]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/clk/rockchip/clk.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-bionic-kernel.git] / drivers / clk / rockchip / clk.c
1 /*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * based on
6 *
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23 #include <linux/slab.h>
24 #include <linux/clk.h>
25 #include <linux/clk-provider.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/regmap.h>
28 #include <linux/reboot.h>
29 #include "clk.h"
30
31 /**
32 * Register a clock branch.
33 * Most clock branches have a form like
34 *
35 * src1 --|--\
36 * |M |--[GATE]-[DIV]-
37 * src2 --|--/
38 *
39 * sometimes without one of those components.
40 */
41 static struct clk *rockchip_clk_register_branch(const char *name,
42 const char *const *parent_names, u8 num_parents, void __iomem *base,
43 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 u8 div_shift, u8 div_width, u8 div_flags,
45 struct clk_div_table *div_table, int gate_offset,
46 u8 gate_shift, u8 gate_flags, unsigned long flags,
47 spinlock_t *lock)
48 {
49 struct clk *clk;
50 struct clk_mux *mux = NULL;
51 struct clk_gate *gate = NULL;
52 struct clk_divider *div = NULL;
53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 *gate_ops = NULL;
55
56 if (num_parents > 1) {
57 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 if (!mux)
59 return ERR_PTR(-ENOMEM);
60
61 mux->reg = base + muxdiv_offset;
62 mux->shift = mux_shift;
63 mux->mask = BIT(mux_width) - 1;
64 mux->flags = mux_flags;
65 mux->lock = lock;
66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 : &clk_mux_ops;
68 }
69
70 if (gate_offset >= 0) {
71 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 if (!gate)
73 goto err_gate;
74
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
80 }
81
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
84 if (!div)
85 goto err_div;
86
87 div->flags = div_flags;
88 div->reg = base + muxdiv_offset;
89 div->shift = div_shift;
90 div->width = div_width;
91 div->lock = lock;
92 div->table = div_table;
93 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
94 ? &clk_divider_ro_ops
95 : &clk_divider_ops;
96 }
97
98 clk = clk_register_composite(NULL, name, parent_names, num_parents,
99 mux ? &mux->hw : NULL, mux_ops,
100 div ? &div->hw : NULL, div_ops,
101 gate ? &gate->hw : NULL, gate_ops,
102 flags);
103
104 return clk;
105 err_div:
106 kfree(gate);
107 err_gate:
108 kfree(mux);
109 return ERR_PTR(-ENOMEM);
110 }
111
112 struct rockchip_clk_frac {
113 struct notifier_block clk_nb;
114 struct clk_fractional_divider div;
115 struct clk_gate gate;
116
117 struct clk_mux mux;
118 const struct clk_ops *mux_ops;
119 int mux_frac_idx;
120
121 bool rate_change_remuxed;
122 int rate_change_idx;
123 };
124
125 #define to_rockchip_clk_frac_nb(nb) \
126 container_of(nb, struct rockchip_clk_frac, clk_nb)
127
128 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
129 unsigned long event, void *data)
130 {
131 struct clk_notifier_data *ndata = data;
132 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
133 struct clk_mux *frac_mux = &frac->mux;
134 int ret = 0;
135
136 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
137 __func__, event, ndata->old_rate, ndata->new_rate);
138 if (event == PRE_RATE_CHANGE) {
139 frac->rate_change_idx = frac->mux_ops->get_parent(&frac_mux->hw);
140 if (frac->rate_change_idx != frac->mux_frac_idx) {
141 frac->mux_ops->set_parent(&frac_mux->hw, frac->mux_frac_idx);
142 frac->rate_change_remuxed = 1;
143 }
144 } else if (event == POST_RATE_CHANGE) {
145 /*
146 * The POST_RATE_CHANGE notifier runs directly after the
147 * divider clock is set in clk_change_rate, so we'll have
148 * remuxed back to the original parent before clk_change_rate
149 * reaches the mux itself.
150 */
151 if (frac->rate_change_remuxed) {
152 frac->mux_ops->set_parent(&frac_mux->hw, frac->rate_change_idx);
153 frac->rate_change_remuxed = 0;
154 }
155 }
156
157 return notifier_from_errno(ret);
158 }
159
160 static struct clk *rockchip_clk_register_frac_branch(const char *name,
161 const char *const *parent_names, u8 num_parents,
162 void __iomem *base, int muxdiv_offset, u8 div_flags,
163 int gate_offset, u8 gate_shift, u8 gate_flags,
164 unsigned long flags, struct rockchip_clk_branch *child,
165 spinlock_t *lock)
166 {
167 struct rockchip_clk_frac *frac;
168 struct clk *clk;
169 struct clk_gate *gate = NULL;
170 struct clk_fractional_divider *div = NULL;
171 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
172
173 if (muxdiv_offset < 0)
174 return ERR_PTR(-EINVAL);
175
176 if (child && child->branch_type != branch_mux) {
177 pr_err("%s: fractional child clock for %s can only be a mux\n",
178 __func__, name);
179 return ERR_PTR(-EINVAL);
180 }
181
182 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
183 if (!frac)
184 return ERR_PTR(-ENOMEM);
185
186 if (gate_offset >= 0) {
187 gate = &frac->gate;
188 gate->flags = gate_flags;
189 gate->reg = base + gate_offset;
190 gate->bit_idx = gate_shift;
191 gate->lock = lock;
192 gate_ops = &clk_gate_ops;
193 }
194
195 div = &frac->div;
196 div->flags = div_flags;
197 div->reg = base + muxdiv_offset;
198 div->mshift = 16;
199 div->mwidth = 16;
200 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
201 div->nshift = 0;
202 div->nwidth = 16;
203 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
204 div->lock = lock;
205 div_ops = &clk_fractional_divider_ops;
206
207 clk = clk_register_composite(NULL, name, parent_names, num_parents,
208 NULL, NULL,
209 &div->hw, div_ops,
210 gate ? &gate->hw : NULL, gate_ops,
211 flags | CLK_SET_RATE_UNGATE);
212 if (IS_ERR(clk)) {
213 kfree(frac);
214 return clk;
215 }
216
217 if (child) {
218 struct clk_mux *frac_mux = &frac->mux;
219 struct clk_init_data init;
220 struct clk *mux_clk;
221 int i, ret;
222
223 frac->mux_frac_idx = -1;
224 for (i = 0; i < child->num_parents; i++) {
225 if (!strcmp(name, child->parent_names[i])) {
226 pr_debug("%s: found fractional parent in mux at pos %d\n",
227 __func__, i);
228 frac->mux_frac_idx = i;
229 break;
230 }
231 }
232
233 frac->mux_ops = &clk_mux_ops;
234 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
235
236 frac_mux->reg = base + child->muxdiv_offset;
237 frac_mux->shift = child->mux_shift;
238 frac_mux->mask = BIT(child->mux_width) - 1;
239 frac_mux->flags = child->mux_flags;
240 frac_mux->lock = lock;
241 frac_mux->hw.init = &init;
242
243 init.name = child->name;
244 init.flags = child->flags | CLK_SET_RATE_PARENT;
245 init.ops = frac->mux_ops;
246 init.parent_names = child->parent_names;
247 init.num_parents = child->num_parents;
248
249 mux_clk = clk_register(NULL, &frac_mux->hw);
250 if (IS_ERR(mux_clk))
251 return clk;
252
253 rockchip_clk_add_lookup(mux_clk, child->id);
254
255 /* notifier on the fraction divider to catch rate changes */
256 if (frac->mux_frac_idx >= 0) {
257 ret = clk_notifier_register(clk, &frac->clk_nb);
258 if (ret)
259 pr_err("%s: failed to register clock notifier for %s\n",
260 __func__, name);
261 } else {
262 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
263 __func__, name, child->name);
264 }
265 }
266
267 return clk;
268 }
269
270 static struct clk *rockchip_clk_register_factor_branch(const char *name,
271 const char *const *parent_names, u8 num_parents,
272 void __iomem *base, unsigned int mult, unsigned int div,
273 int gate_offset, u8 gate_shift, u8 gate_flags,
274 unsigned long flags, spinlock_t *lock)
275 {
276 struct clk *clk;
277 struct clk_gate *gate = NULL;
278 struct clk_fixed_factor *fix = NULL;
279
280 /* without gate, register a simple factor clock */
281 if (gate_offset == 0) {
282 return clk_register_fixed_factor(NULL, name,
283 parent_names[0], flags, mult,
284 div);
285 }
286
287 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
288 if (!gate)
289 return ERR_PTR(-ENOMEM);
290
291 gate->flags = gate_flags;
292 gate->reg = base + gate_offset;
293 gate->bit_idx = gate_shift;
294 gate->lock = lock;
295
296 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
297 if (!fix) {
298 kfree(gate);
299 return ERR_PTR(-ENOMEM);
300 }
301
302 fix->mult = mult;
303 fix->div = div;
304
305 clk = clk_register_composite(NULL, name, parent_names, num_parents,
306 NULL, NULL,
307 &fix->hw, &clk_fixed_factor_ops,
308 &gate->hw, &clk_gate_ops, flags);
309 if (IS_ERR(clk)) {
310 kfree(fix);
311 kfree(gate);
312 }
313
314 return clk;
315 }
316
317 static DEFINE_SPINLOCK(clk_lock);
318 static struct clk **clk_table;
319 static void __iomem *reg_base;
320 static struct clk_onecell_data clk_data;
321 static struct device_node *cru_node;
322 static struct regmap *grf;
323
324 void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
325 unsigned long nr_clks)
326 {
327 reg_base = base;
328 cru_node = np;
329 grf = ERR_PTR(-EPROBE_DEFER);
330
331 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
332 if (!clk_table)
333 pr_err("%s: could not allocate clock lookup table\n", __func__);
334
335 clk_data.clks = clk_table;
336 clk_data.clk_num = nr_clks;
337 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
338 }
339
340 struct regmap *rockchip_clk_get_grf(void)
341 {
342 if (IS_ERR(grf))
343 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
344 return grf;
345 }
346
347 void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
348 {
349 if (clk_table && id)
350 clk_table[id] = clk;
351 }
352
353 void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
354 unsigned int nr_pll, int grf_lock_offset)
355 {
356 struct clk *clk;
357 int idx;
358
359 for (idx = 0; idx < nr_pll; idx++, list++) {
360 clk = rockchip_clk_register_pll(list->type, list->name,
361 list->parent_names, list->num_parents,
362 reg_base, list->con_offset, grf_lock_offset,
363 list->lock_shift, list->mode_offset,
364 list->mode_shift, list->rate_table,
365 list->pll_flags, &clk_lock);
366 if (IS_ERR(clk)) {
367 pr_err("%s: failed to register clock %s\n", __func__,
368 list->name);
369 continue;
370 }
371
372 rockchip_clk_add_lookup(clk, list->id);
373 }
374 }
375
376 void __init rockchip_clk_register_branches(
377 struct rockchip_clk_branch *list,
378 unsigned int nr_clk)
379 {
380 struct clk *clk = NULL;
381 unsigned int idx;
382 unsigned long flags;
383
384 for (idx = 0; idx < nr_clk; idx++, list++) {
385 flags = list->flags;
386
387 /* catch simple muxes */
388 switch (list->branch_type) {
389 case branch_mux:
390 clk = clk_register_mux(NULL, list->name,
391 list->parent_names, list->num_parents,
392 flags, reg_base + list->muxdiv_offset,
393 list->mux_shift, list->mux_width,
394 list->mux_flags, &clk_lock);
395 break;
396 case branch_divider:
397 if (list->div_table)
398 clk = clk_register_divider_table(NULL,
399 list->name, list->parent_names[0],
400 flags, reg_base + list->muxdiv_offset,
401 list->div_shift, list->div_width,
402 list->div_flags, list->div_table,
403 &clk_lock);
404 else
405 clk = clk_register_divider(NULL, list->name,
406 list->parent_names[0], flags,
407 reg_base + list->muxdiv_offset,
408 list->div_shift, list->div_width,
409 list->div_flags, &clk_lock);
410 break;
411 case branch_fraction_divider:
412 clk = rockchip_clk_register_frac_branch(list->name,
413 list->parent_names, list->num_parents,
414 reg_base, list->muxdiv_offset, list->div_flags,
415 list->gate_offset, list->gate_shift,
416 list->gate_flags, flags, list->child,
417 &clk_lock);
418 break;
419 case branch_gate:
420 flags |= CLK_SET_RATE_PARENT;
421
422 clk = clk_register_gate(NULL, list->name,
423 list->parent_names[0], flags,
424 reg_base + list->gate_offset,
425 list->gate_shift, list->gate_flags, &clk_lock);
426 break;
427 case branch_composite:
428 clk = rockchip_clk_register_branch(list->name,
429 list->parent_names, list->num_parents,
430 reg_base, list->muxdiv_offset, list->mux_shift,
431 list->mux_width, list->mux_flags,
432 list->div_shift, list->div_width,
433 list->div_flags, list->div_table,
434 list->gate_offset, list->gate_shift,
435 list->gate_flags, flags, &clk_lock);
436 break;
437 case branch_mmc:
438 clk = rockchip_clk_register_mmc(
439 list->name,
440 list->parent_names, list->num_parents,
441 reg_base + list->muxdiv_offset,
442 list->div_shift
443 );
444 break;
445 case branch_inverter:
446 clk = rockchip_clk_register_inverter(
447 list->name, list->parent_names,
448 list->num_parents,
449 reg_base + list->muxdiv_offset,
450 list->div_shift, list->div_flags, &clk_lock);
451 break;
452 case branch_factor:
453 clk = rockchip_clk_register_factor_branch(
454 list->name, list->parent_names,
455 list->num_parents, reg_base,
456 list->div_shift, list->div_width,
457 list->gate_offset, list->gate_shift,
458 list->gate_flags, flags, &clk_lock);
459 break;
460 }
461
462 /* none of the cases above matched */
463 if (!clk) {
464 pr_err("%s: unknown clock type %d\n",
465 __func__, list->branch_type);
466 continue;
467 }
468
469 if (IS_ERR(clk)) {
470 pr_err("%s: failed to register clock %s: %ld\n",
471 __func__, list->name, PTR_ERR(clk));
472 continue;
473 }
474
475 rockchip_clk_add_lookup(clk, list->id);
476 }
477 }
478
479 void __init rockchip_clk_register_armclk(unsigned int lookup_id,
480 const char *name, const char *const *parent_names,
481 u8 num_parents,
482 const struct rockchip_cpuclk_reg_data *reg_data,
483 const struct rockchip_cpuclk_rate_table *rates,
484 int nrates)
485 {
486 struct clk *clk;
487
488 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
489 reg_data, rates, nrates, reg_base,
490 &clk_lock);
491 if (IS_ERR(clk)) {
492 pr_err("%s: failed to register clock %s: %ld\n",
493 __func__, name, PTR_ERR(clk));
494 return;
495 }
496
497 rockchip_clk_add_lookup(clk, lookup_id);
498 }
499
500 void __init rockchip_clk_protect_critical(const char *const clocks[],
501 int nclocks)
502 {
503 int i;
504
505 /* Protect the clocks that needs to stay on */
506 for (i = 0; i < nclocks; i++) {
507 struct clk *clk = __clk_lookup(clocks[i]);
508
509 if (clk)
510 clk_prepare_enable(clk);
511 }
512 }
513
514 static unsigned int reg_restart;
515 static void (*cb_restart)(void);
516 static int rockchip_restart_notify(struct notifier_block *this,
517 unsigned long mode, void *cmd)
518 {
519 if (cb_restart)
520 cb_restart();
521
522 writel(0xfdb9, reg_base + reg_restart);
523 return NOTIFY_DONE;
524 }
525
526 static struct notifier_block rockchip_restart_handler = {
527 .notifier_call = rockchip_restart_notify,
528 .priority = 128,
529 };
530
531 void __init rockchip_register_restart_notifier(unsigned int reg, void (*cb)(void))
532 {
533 int ret;
534
535 reg_restart = reg;
536 cb_restart = cb;
537 ret = register_restart_handler(&rockchip_restart_handler);
538 if (ret)
539 pr_err("%s: cannot register restart handler, %d\n",
540 __func__, ret);
541 }