]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/clk/ti/apll.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-artful-kernel.git] / drivers / clk / ti / apll.c
1 /*
2 * OMAP APLL clock support
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * J Keerthy <j-keerthy@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/clk-provider.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/io.h>
22 #include <linux/err.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/clk/ti.h>
28 #include <linux/delay.h>
29
30 #define APLL_FORCE_LOCK 0x1
31 #define APLL_AUTO_IDLE 0x2
32 #define MAX_APLL_WAIT_TRIES 1000000
33
34 #undef pr_fmt
35 #define pr_fmt(fmt) "%s: " fmt, __func__
36
37 static int dra7_apll_enable(struct clk_hw *hw)
38 {
39 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
40 int r = 0, i = 0;
41 struct dpll_data *ad;
42 const char *clk_name;
43 u8 state = 1;
44 u32 v;
45
46 ad = clk->dpll_data;
47 if (!ad)
48 return -EINVAL;
49
50 clk_name = __clk_get_name(clk->hw.clk);
51
52 state <<= __ffs(ad->idlest_mask);
53
54 /* Check is already locked */
55 v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
56
57 if ((v & ad->idlest_mask) == state)
58 return r;
59
60 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
61 v &= ~ad->enable_mask;
62 v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
63 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
64
65 state <<= __ffs(ad->idlest_mask);
66
67 while (1) {
68 v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
69 if ((v & ad->idlest_mask) == state)
70 break;
71 if (i > MAX_APLL_WAIT_TRIES)
72 break;
73 i++;
74 udelay(1);
75 }
76
77 if (i == MAX_APLL_WAIT_TRIES) {
78 pr_warn("clock: %s failed transition to '%s'\n",
79 clk_name, (state) ? "locked" : "bypassed");
80 r = -EBUSY;
81 } else
82 pr_debug("clock: %s transition to '%s' in %d loops\n",
83 clk_name, (state) ? "locked" : "bypassed", i);
84
85 return r;
86 }
87
88 static void dra7_apll_disable(struct clk_hw *hw)
89 {
90 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
91 struct dpll_data *ad;
92 u8 state = 1;
93 u32 v;
94
95 ad = clk->dpll_data;
96
97 state <<= __ffs(ad->idlest_mask);
98
99 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
100 v &= ~ad->enable_mask;
101 v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
102 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
103 }
104
105 static int dra7_apll_is_enabled(struct clk_hw *hw)
106 {
107 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
108 struct dpll_data *ad;
109 u32 v;
110
111 ad = clk->dpll_data;
112
113 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
114 v &= ad->enable_mask;
115
116 v >>= __ffs(ad->enable_mask);
117
118 return v == APLL_AUTO_IDLE ? 0 : 1;
119 }
120
121 static u8 dra7_init_apll_parent(struct clk_hw *hw)
122 {
123 return 0;
124 }
125
126 static const struct clk_ops apll_ck_ops = {
127 .enable = &dra7_apll_enable,
128 .disable = &dra7_apll_disable,
129 .is_enabled = &dra7_apll_is_enabled,
130 .get_parent = &dra7_init_apll_parent,
131 };
132
133 static void __init omap_clk_register_apll(struct clk_hw *hw,
134 struct device_node *node)
135 {
136 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
137 struct dpll_data *ad = clk_hw->dpll_data;
138 struct clk *clk;
139
140 ad->clk_ref = of_clk_get(node, 0);
141 ad->clk_bypass = of_clk_get(node, 1);
142
143 if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) {
144 pr_debug("clk-ref or clk-bypass for %s not ready, retry\n",
145 node->name);
146 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
147 return;
148
149 goto cleanup;
150 }
151
152 clk = clk_register(NULL, &clk_hw->hw);
153 if (!IS_ERR(clk)) {
154 of_clk_add_provider(node, of_clk_src_simple_get, clk);
155 kfree(clk_hw->hw.init->parent_names);
156 kfree(clk_hw->hw.init);
157 return;
158 }
159
160 cleanup:
161 kfree(clk_hw->dpll_data);
162 kfree(clk_hw->hw.init->parent_names);
163 kfree(clk_hw->hw.init);
164 kfree(clk_hw);
165 }
166
167 static void __init of_dra7_apll_setup(struct device_node *node)
168 {
169 struct dpll_data *ad = NULL;
170 struct clk_hw_omap *clk_hw = NULL;
171 struct clk_init_data *init = NULL;
172 const char **parent_names = NULL;
173 int i;
174
175 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
176 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
177 init = kzalloc(sizeof(*init), GFP_KERNEL);
178 if (!ad || !clk_hw || !init)
179 goto cleanup;
180
181 clk_hw->dpll_data = ad;
182 clk_hw->hw.init = init;
183 clk_hw->flags = MEMMAP_ADDRESSING;
184
185 init->name = node->name;
186 init->ops = &apll_ck_ops;
187
188 init->num_parents = of_clk_get_parent_count(node);
189 if (init->num_parents < 1) {
190 pr_err("dra7 apll %s must have parent(s)\n", node->name);
191 goto cleanup;
192 }
193
194 parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
195 if (!parent_names)
196 goto cleanup;
197
198 for (i = 0; i < init->num_parents; i++)
199 parent_names[i] = of_clk_get_parent_name(node, i);
200
201 init->parent_names = parent_names;
202
203 ad->control_reg = ti_clk_get_reg_addr(node, 0);
204 ad->idlest_reg = ti_clk_get_reg_addr(node, 1);
205
206 if (!ad->control_reg || !ad->idlest_reg)
207 goto cleanup;
208
209 ad->idlest_mask = 0x1;
210 ad->enable_mask = 0x3;
211
212 omap_clk_register_apll(&clk_hw->hw, node);
213 return;
214
215 cleanup:
216 kfree(parent_names);
217 kfree(ad);
218 kfree(clk_hw);
219 kfree(init);
220 }
221 CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
222
223 #define OMAP2_EN_APLL_LOCKED 0x3
224 #define OMAP2_EN_APLL_STOPPED 0x0
225
226 static int omap2_apll_is_enabled(struct clk_hw *hw)
227 {
228 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
229 struct dpll_data *ad = clk->dpll_data;
230 u32 v;
231
232 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
233 v &= ad->enable_mask;
234
235 v >>= __ffs(ad->enable_mask);
236
237 return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
238 }
239
240 static unsigned long omap2_apll_recalc(struct clk_hw *hw,
241 unsigned long parent_rate)
242 {
243 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
244
245 if (omap2_apll_is_enabled(hw))
246 return clk->fixed_rate;
247
248 return 0;
249 }
250
251 static int omap2_apll_enable(struct clk_hw *hw)
252 {
253 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
254 struct dpll_data *ad = clk->dpll_data;
255 u32 v;
256 int i = 0;
257
258 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
259 v &= ~ad->enable_mask;
260 v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
261 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
262
263 while (1) {
264 v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
265 if (v & ad->idlest_mask)
266 break;
267 if (i > MAX_APLL_WAIT_TRIES)
268 break;
269 i++;
270 udelay(1);
271 }
272
273 if (i == MAX_APLL_WAIT_TRIES) {
274 pr_warn("%s failed to transition to locked\n",
275 __clk_get_name(clk->hw.clk));
276 return -EBUSY;
277 }
278
279 return 0;
280 }
281
282 static void omap2_apll_disable(struct clk_hw *hw)
283 {
284 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
285 struct dpll_data *ad = clk->dpll_data;
286 u32 v;
287
288 v = ti_clk_ll_ops->clk_readl(ad->control_reg);
289 v &= ~ad->enable_mask;
290 v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
291 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
292 }
293
294 static struct clk_ops omap2_apll_ops = {
295 .enable = &omap2_apll_enable,
296 .disable = &omap2_apll_disable,
297 .is_enabled = &omap2_apll_is_enabled,
298 .recalc_rate = &omap2_apll_recalc,
299 };
300
301 static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
302 {
303 struct dpll_data *ad = clk->dpll_data;
304 u32 v;
305
306 v = ti_clk_ll_ops->clk_readl(ad->autoidle_reg);
307 v &= ~ad->autoidle_mask;
308 v |= val << __ffs(ad->autoidle_mask);
309 ti_clk_ll_ops->clk_writel(v, ad->control_reg);
310 }
311
312 #define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
313 #define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
314
315 static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
316 {
317 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
318 }
319
320 static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
321 {
322 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
323 }
324
325 static struct clk_hw_omap_ops omap2_apll_hwops = {
326 .allow_idle = &omap2_apll_allow_idle,
327 .deny_idle = &omap2_apll_deny_idle,
328 };
329
330 static void __init of_omap2_apll_setup(struct device_node *node)
331 {
332 struct dpll_data *ad = NULL;
333 struct clk_hw_omap *clk_hw = NULL;
334 struct clk_init_data *init = NULL;
335 struct clk *clk;
336 const char *parent_name;
337 u32 val;
338
339 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
340 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
341 init = kzalloc(sizeof(*init), GFP_KERNEL);
342
343 if (!ad || !clk_hw || !init)
344 goto cleanup;
345
346 clk_hw->dpll_data = ad;
347 clk_hw->hw.init = init;
348 init->ops = &omap2_apll_ops;
349 init->name = node->name;
350 clk_hw->ops = &omap2_apll_hwops;
351
352 init->num_parents = of_clk_get_parent_count(node);
353 if (init->num_parents != 1) {
354 pr_err("%s must have one parent\n", node->name);
355 goto cleanup;
356 }
357
358 parent_name = of_clk_get_parent_name(node, 0);
359 init->parent_names = &parent_name;
360
361 if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
362 pr_err("%s missing clock-frequency\n", node->name);
363 goto cleanup;
364 }
365 clk_hw->fixed_rate = val;
366
367 if (of_property_read_u32(node, "ti,bit-shift", &val)) {
368 pr_err("%s missing bit-shift\n", node->name);
369 goto cleanup;
370 }
371
372 clk_hw->enable_bit = val;
373 ad->enable_mask = 0x3 << val;
374 ad->autoidle_mask = 0x3 << val;
375
376 if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
377 pr_err("%s missing idlest-shift\n", node->name);
378 goto cleanup;
379 }
380
381 ad->idlest_mask = 1 << val;
382
383 ad->control_reg = ti_clk_get_reg_addr(node, 0);
384 ad->autoidle_reg = ti_clk_get_reg_addr(node, 1);
385 ad->idlest_reg = ti_clk_get_reg_addr(node, 2);
386
387 if (!ad->control_reg || !ad->autoidle_reg || !ad->idlest_reg)
388 goto cleanup;
389
390 clk = clk_register(NULL, &clk_hw->hw);
391 if (!IS_ERR(clk)) {
392 of_clk_add_provider(node, of_clk_src_simple_get, clk);
393 kfree(init);
394 return;
395 }
396 cleanup:
397 kfree(ad);
398 kfree(clk_hw);
399 kfree(init);
400 }
401 CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
402 of_omap2_apll_setup);