]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/clk/mmp/clk-mix.c
Linux 4.2-rc1
[mirror_ubuntu-bionic-kernel.git] / drivers / clk / mmp / clk-mix.c
CommitLineData
ee81f4ee
CX
1/*
2 * mmp mix(div and mux) clock operation source file
3 *
4 * Copyright (C) 2014 Marvell
5 * Chao Xie <chao.xie@marvell.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16
17#include "clk.h"
18
19/*
20 * The mix clock is a clock combined mux and div type clock.
21 * Because the div field and mux field need to be set at same
22 * time, we can not divide it into 2 types of clock
23 */
24
25#define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
26
27static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
28{
29 unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
30 unsigned int maxdiv = 0;
31 struct clk_div_table *clkt;
32
33 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
34 return div_mask;
35 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
36 return 1 << div_mask;
37 if (mix->div_table) {
38 for (clkt = mix->div_table; clkt->div; clkt++)
39 if (clkt->div > maxdiv)
40 maxdiv = clkt->div;
41 return maxdiv;
42 }
43 return div_mask + 1;
44}
45
46static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
47{
48 struct clk_div_table *clkt;
49
50 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
51 return val;
52 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
53 return 1 << val;
54 if (mix->div_table) {
55 for (clkt = mix->div_table; clkt->div; clkt++)
56 if (clkt->val == val)
57 return clkt->div;
58 if (clkt->div == 0)
59 return 0;
60 }
61 return val + 1;
62}
63
64static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
65{
66 int num_parents = __clk_get_num_parents(mix->hw.clk);
67 int i;
68
69 if (mix->mux_flags & CLK_MUX_INDEX_BIT)
70 return ffs(val) - 1;
71 if (mix->mux_flags & CLK_MUX_INDEX_ONE)
72 return val - 1;
73 if (mix->mux_table) {
74 for (i = 0; i < num_parents; i++)
75 if (mix->mux_table[i] == val)
76 return i;
77 if (i == num_parents)
78 return 0;
79 }
80
81 return val;
82}
83static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
84{
85 struct clk_div_table *clkt;
86
87 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
88 return div;
89 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
90 return __ffs(div);
91 if (mix->div_table) {
92 for (clkt = mix->div_table; clkt->div; clkt++)
93 if (clkt->div == div)
94 return clkt->val;
95 if (clkt->div == 0)
96 return 0;
97 }
98
99 return div - 1;
100}
101
102static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
103{
104 if (mix->mux_table)
105 return mix->mux_table[mux];
106
107 return mux;
108}
109
110static void _filter_clk_table(struct mmp_clk_mix *mix,
111 struct mmp_clk_mix_clk_table *table,
112 unsigned int table_size)
113{
114 int i;
115 struct mmp_clk_mix_clk_table *item;
116 struct clk *parent, *clk;
117 unsigned long parent_rate;
118
119 clk = mix->hw.clk;
120
121 for (i = 0; i < table_size; i++) {
122 item = &table[i];
123 parent = clk_get_parent_by_index(clk, item->parent_index);
124 parent_rate = __clk_get_rate(parent);
125 if (parent_rate % item->rate) {
126 item->valid = 0;
127 } else {
128 item->divisor = parent_rate / item->rate;
129 item->valid = 1;
130 }
131 }
132}
133
134static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
135 unsigned int change_mux, unsigned int change_div)
136{
137 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
138 u8 width, shift;
139 u32 mux_div, fc_req;
140 int ret, timeout = 50;
141 unsigned long flags = 0;
142
143 if (!change_mux && !change_div)
144 return -EINVAL;
145
146 if (mix->lock)
147 spin_lock_irqsave(mix->lock, flags);
148
149 if (mix->type == MMP_CLK_MIX_TYPE_V1
150 || mix->type == MMP_CLK_MIX_TYPE_V2)
151 mux_div = readl(ri->reg_clk_ctrl);
152 else
153 mux_div = readl(ri->reg_clk_sel);
154
155 if (change_div) {
156 width = ri->width_div;
157 shift = ri->shift_div;
158 mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
159 mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
160 }
161
162 if (change_mux) {
163 width = ri->width_mux;
164 shift = ri->shift_mux;
165 mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
166 mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
167 }
168
169 if (mix->type == MMP_CLK_MIX_TYPE_V1) {
170 writel(mux_div, ri->reg_clk_ctrl);
171 } else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
172 mux_div |= (1 << ri->bit_fc);
173 writel(mux_div, ri->reg_clk_ctrl);
174
175 do {
176 fc_req = readl(ri->reg_clk_ctrl);
177 timeout--;
178 if (!(fc_req & (1 << ri->bit_fc)))
179 break;
180 } while (timeout);
181
182 if (timeout == 0) {
183 pr_err("%s:%s cannot do frequency change\n",
184 __func__, __clk_get_name(mix->hw.clk));
185 ret = -EBUSY;
186 goto error;
187 }
188 } else {
189 fc_req = readl(ri->reg_clk_ctrl);
190 fc_req |= 1 << ri->bit_fc;
191 writel(fc_req, ri->reg_clk_ctrl);
192 writel(mux_div, ri->reg_clk_sel);
193 fc_req &= ~(1 << ri->bit_fc);
194 }
195
196 ret = 0;
197error:
198 if (mix->lock)
199 spin_unlock_irqrestore(mix->lock, flags);
200
201 return ret;
202}
203
204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
1c8e6004
TV
205 unsigned long min_rate,
206 unsigned long max_rate,
ee81f4ee 207 unsigned long *best_parent_rate,
646cafc6 208 struct clk_hw **best_parent_clk)
ee81f4ee
CX
209{
210 struct mmp_clk_mix *mix = to_clk_mix(hw);
211 struct mmp_clk_mix_clk_table *item;
212 struct clk *parent, *parent_best, *mix_clk;
213 unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
214 unsigned long gap, gap_best;
215 u32 div_val_max;
216 unsigned int div;
217 int i, j;
218
219 mix_clk = hw->clk;
220
221 parent = NULL;
222 mix_rate_best = 0;
223 parent_rate_best = 0;
224 gap_best = rate;
225 parent_best = NULL;
226
227 if (mix->table) {
228 for (i = 0; i < mix->table_size; i++) {
229 item = &mix->table[i];
230 if (item->valid == 0)
231 continue;
232 parent = clk_get_parent_by_index(mix_clk,
233 item->parent_index);
234 parent_rate = __clk_get_rate(parent);
235 mix_rate = parent_rate / item->divisor;
236 gap = abs(mix_rate - rate);
237 if (parent_best == NULL || gap < gap_best) {
238 parent_best = parent;
239 parent_rate_best = parent_rate;
240 mix_rate_best = mix_rate;
241 gap_best = gap;
242 if (gap_best == 0)
243 goto found;
244 }
245 }
246 } else {
247 for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
248 parent = clk_get_parent_by_index(mix_clk, i);
249 parent_rate = __clk_get_rate(parent);
250 div_val_max = _get_maxdiv(mix);
251 for (j = 0; j < div_val_max; j++) {
252 div = _get_div(mix, j);
253 mix_rate = parent_rate / div;
254 gap = abs(mix_rate - rate);
255 if (parent_best == NULL || gap < gap_best) {
256 parent_best = parent;
257 parent_rate_best = parent_rate;
258 mix_rate_best = mix_rate;
259 gap_best = gap;
260 if (gap_best == 0)
261 goto found;
262 }
263 }
264 }
265 }
266
267found:
268 *best_parent_rate = parent_rate_best;
646cafc6 269 *best_parent_clk = __clk_get_hw(parent_best);
ee81f4ee
CX
270
271 return mix_rate_best;
272}
273
274static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
275 unsigned long rate,
276 unsigned long parent_rate,
277 u8 index)
278{
279 struct mmp_clk_mix *mix = to_clk_mix(hw);
280 unsigned int div;
281 u32 div_val, mux_val;
282
283 div = parent_rate / rate;
284 div_val = _get_div_val(mix, div);
285 mux_val = _get_mux_val(mix, index);
286
287 return _set_rate(mix, mux_val, div_val, 1, 1);
288}
289
290static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
291{
292 struct mmp_clk_mix *mix = to_clk_mix(hw);
293 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
294 unsigned long flags = 0;
295 u32 mux_div = 0;
296 u8 width, shift;
297 u32 mux_val;
298
299 if (mix->lock)
300 spin_lock_irqsave(mix->lock, flags);
301
302 if (mix->type == MMP_CLK_MIX_TYPE_V1
303 || mix->type == MMP_CLK_MIX_TYPE_V2)
304 mux_div = readl(ri->reg_clk_ctrl);
305 else
306 mux_div = readl(ri->reg_clk_sel);
307
308 if (mix->lock)
309 spin_unlock_irqrestore(mix->lock, flags);
310
311 width = mix->reg_info.width_mux;
312 shift = mix->reg_info.shift_mux;
313
314 mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
315
316 return _get_mux(mix, mux_val);
317}
318
319static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
320 unsigned long parent_rate)
321{
322 struct mmp_clk_mix *mix = to_clk_mix(hw);
323 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
324 unsigned long flags = 0;
325 u32 mux_div = 0;
326 u8 width, shift;
327 unsigned int div;
328
329 if (mix->lock)
330 spin_lock_irqsave(mix->lock, flags);
331
332 if (mix->type == MMP_CLK_MIX_TYPE_V1
333 || mix->type == MMP_CLK_MIX_TYPE_V2)
334 mux_div = readl(ri->reg_clk_ctrl);
335 else
336 mux_div = readl(ri->reg_clk_sel);
337
338 if (mix->lock)
339 spin_unlock_irqrestore(mix->lock, flags);
340
341 width = mix->reg_info.width_div;
342 shift = mix->reg_info.shift_div;
343
344 div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
345
346 return parent_rate / div;
347}
348
349static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
350{
351 struct mmp_clk_mix *mix = to_clk_mix(hw);
352 struct mmp_clk_mix_clk_table *item;
353 int i;
354 u32 div_val, mux_val;
355
356 if (mix->table) {
357 for (i = 0; i < mix->table_size; i++) {
358 item = &mix->table[i];
359 if (item->valid == 0)
360 continue;
361 if (item->parent_index == index)
362 break;
363 }
364 if (i < mix->table_size) {
365 div_val = _get_div_val(mix, item->divisor);
366 mux_val = _get_mux_val(mix, item->parent_index);
367 } else
368 return -EINVAL;
369 } else {
370 mux_val = _get_mux_val(mix, index);
371 div_val = 0;
372 }
373
374 return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
375}
376
377static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
378 unsigned long best_parent_rate)
379{
380 struct mmp_clk_mix *mix = to_clk_mix(hw);
381 struct mmp_clk_mix_clk_table *item;
382 unsigned long parent_rate;
383 unsigned int best_divisor;
384 struct clk *mix_clk, *parent;
385 int i;
386
387 best_divisor = best_parent_rate / rate;
388
389 mix_clk = hw->clk;
390 if (mix->table) {
391 for (i = 0; i < mix->table_size; i++) {
392 item = &mix->table[i];
393 if (item->valid == 0)
394 continue;
395 parent = clk_get_parent_by_index(mix_clk,
396 item->parent_index);
397 parent_rate = __clk_get_rate(parent);
398 if (parent_rate == best_parent_rate
399 && item->divisor == best_divisor)
400 break;
401 }
402 if (i < mix->table_size)
403 return _set_rate(mix,
404 _get_mux_val(mix, item->parent_index),
405 _get_div_val(mix, item->divisor),
406 1, 1);
407 else
408 return -EINVAL;
409 } else {
410 for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
411 parent = clk_get_parent_by_index(mix_clk, i);
412 parent_rate = __clk_get_rate(parent);
413 if (parent_rate == best_parent_rate)
414 break;
415 }
416 if (i < __clk_get_num_parents(mix_clk))
417 return _set_rate(mix, _get_mux_val(mix, i),
418 _get_div_val(mix, best_divisor), 1, 1);
419 else
420 return -EINVAL;
421 }
422}
423
424static void mmp_clk_mix_init(struct clk_hw *hw)
425{
426 struct mmp_clk_mix *mix = to_clk_mix(hw);
427
428 if (mix->table)
429 _filter_clk_table(mix, mix->table, mix->table_size);
430}
431
432const struct clk_ops mmp_clk_mix_ops = {
433 .determine_rate = mmp_clk_mix_determine_rate,
434 .set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
435 .set_rate = mmp_clk_set_rate,
436 .set_parent = mmp_clk_set_parent,
437 .get_parent = mmp_clk_mix_get_parent,
438 .recalc_rate = mmp_clk_mix_recalc_rate,
439 .init = mmp_clk_mix_init,
440};
441
442struct clk *mmp_clk_register_mix(struct device *dev,
443 const char *name,
444 const char **parent_names,
445 u8 num_parents,
446 unsigned long flags,
447 struct mmp_clk_mix_config *config,
448 spinlock_t *lock)
449{
450 struct mmp_clk_mix *mix;
451 struct clk *clk;
452 struct clk_init_data init;
453 size_t table_bytes;
454
455 mix = kzalloc(sizeof(*mix), GFP_KERNEL);
456 if (!mix) {
457 pr_err("%s:%s: could not allocate mmp mix clk\n",
458 __func__, name);
459 return ERR_PTR(-ENOMEM);
460 }
461
462 init.name = name;
463 init.flags = flags | CLK_GET_RATE_NOCACHE;
464 init.parent_names = parent_names;
465 init.num_parents = num_parents;
466 init.ops = &mmp_clk_mix_ops;
467
468 memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
469 if (config->table) {
470 table_bytes = sizeof(*config->table) * config->table_size;
471 mix->table = kzalloc(table_bytes, GFP_KERNEL);
472 if (!mix->table) {
473 pr_err("%s:%s: could not allocate mmp mix table\n",
474 __func__, name);
475 kfree(mix);
476 return ERR_PTR(-ENOMEM);
477 }
478 memcpy(mix->table, config->table, table_bytes);
479 mix->table_size = config->table_size;
480 }
481
482 if (config->mux_table) {
483 table_bytes = sizeof(u32) * num_parents;
484 mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
485 if (!mix->mux_table) {
486 pr_err("%s:%s: could not allocate mmp mix mux-table\n",
487 __func__, name);
488 kfree(mix->table);
489 kfree(mix);
490 return ERR_PTR(-ENOMEM);
491 }
492 memcpy(mix->mux_table, config->mux_table, table_bytes);
493 }
494
495 mix->div_flags = config->div_flags;
496 mix->mux_flags = config->mux_flags;
497 mix->lock = lock;
498 mix->hw.init = &init;
499
500 if (config->reg_info.bit_fc >= 32)
501 mix->type = MMP_CLK_MIX_TYPE_V1;
502 else if (config->reg_info.reg_clk_sel)
503 mix->type = MMP_CLK_MIX_TYPE_V3;
504 else
505 mix->type = MMP_CLK_MIX_TYPE_V2;
506 clk = clk_register(dev, &mix->hw);
507
508 if (IS_ERR(clk)) {
509 kfree(mix->mux_table);
510 kfree(mix->table);
511 kfree(mix);
512 }
513
514 return clk;
515}