]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/clk/ti/apll.c
Merge branches 'acpi-button' and 'acpi-tools'
[mirror_ubuntu-artful-kernel.git] / drivers / clk / ti / apll.c
1 /*
2 * OMAP APLL clock support
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * J Keerthy <j-keerthy@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/err.h>
24 #include <linux/string.h>
25 #include <linux/log2.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/clk/ti.h>
29 #include <linux/delay.h>
30
31 #include "clock.h"
32
33 #define APLL_FORCE_LOCK 0x1
34 #define APLL_AUTO_IDLE 0x2
35 #define MAX_APLL_WAIT_TRIES 1000000
36
37 #undef pr_fmt
38 #define pr_fmt(fmt) "%s: " fmt, __func__
39
40 static int dra7_apll_enable(struct clk_hw *hw)
41 {
42 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
43 int r = 0, i = 0;
44 struct dpll_data *ad;
45 const char *clk_name;
46 u8 state = 1;
47 u32 v;
48
49 ad = clk->dpll_data;
50 if (!ad)
51 return -EINVAL;
52
53 clk_name = clk_hw_get_name(&clk->hw);
54
55 state <<= __ffs(ad->idlest_mask);
56
57 /* Check is already locked */
58 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
59
60 if ((v & ad->idlest_mask) == state)
61 return r;
62
63 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
64 v &= ~ad->enable_mask;
65 v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
66 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
67
68 state <<= __ffs(ad->idlest_mask);
69
70 while (1) {
71 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
72 if ((v & ad->idlest_mask) == state)
73 break;
74 if (i > MAX_APLL_WAIT_TRIES)
75 break;
76 i++;
77 udelay(1);
78 }
79
80 if (i == MAX_APLL_WAIT_TRIES) {
81 pr_warn("clock: %s failed transition to '%s'\n",
82 clk_name, (state) ? "locked" : "bypassed");
83 r = -EBUSY;
84 } else
85 pr_debug("clock: %s transition to '%s' in %d loops\n",
86 clk_name, (state) ? "locked" : "bypassed", i);
87
88 return r;
89 }
90
91 static void dra7_apll_disable(struct clk_hw *hw)
92 {
93 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
94 struct dpll_data *ad;
95 u8 state = 1;
96 u32 v;
97
98 ad = clk->dpll_data;
99
100 state <<= __ffs(ad->idlest_mask);
101
102 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
103 v &= ~ad->enable_mask;
104 v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
105 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
106 }
107
108 static int dra7_apll_is_enabled(struct clk_hw *hw)
109 {
110 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
111 struct dpll_data *ad;
112 u32 v;
113
114 ad = clk->dpll_data;
115
116 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
117 v &= ad->enable_mask;
118
119 v >>= __ffs(ad->enable_mask);
120
121 return v == APLL_AUTO_IDLE ? 0 : 1;
122 }
123
124 static u8 dra7_init_apll_parent(struct clk_hw *hw)
125 {
126 return 0;
127 }
128
129 static const struct clk_ops apll_ck_ops = {
130 .enable = &dra7_apll_enable,
131 .disable = &dra7_apll_disable,
132 .is_enabled = &dra7_apll_is_enabled,
133 .get_parent = &dra7_init_apll_parent,
134 };
135
136 static void __init omap_clk_register_apll(struct clk_hw *hw,
137 struct device_node *node)
138 {
139 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
140 struct dpll_data *ad = clk_hw->dpll_data;
141 struct clk *clk;
142
143 clk = of_clk_get(node, 0);
144 if (IS_ERR(clk)) {
145 pr_debug("clk-ref for %s not ready, retry\n",
146 node->name);
147 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
148 return;
149
150 goto cleanup;
151 }
152
153 ad->clk_ref = __clk_get_hw(clk);
154
155 clk = of_clk_get(node, 1);
156 if (IS_ERR(clk)) {
157 pr_debug("clk-bypass for %s not ready, retry\n",
158 node->name);
159 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
160 return;
161
162 goto cleanup;
163 }
164
165 ad->clk_bypass = __clk_get_hw(clk);
166
167 clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
168 if (!IS_ERR(clk)) {
169 of_clk_add_provider(node, of_clk_src_simple_get, clk);
170 kfree(clk_hw->hw.init->parent_names);
171 kfree(clk_hw->hw.init);
172 return;
173 }
174
175 cleanup:
176 kfree(clk_hw->dpll_data);
177 kfree(clk_hw->hw.init->parent_names);
178 kfree(clk_hw->hw.init);
179 kfree(clk_hw);
180 }
181
182 static void __init of_dra7_apll_setup(struct device_node *node)
183 {
184 struct dpll_data *ad = NULL;
185 struct clk_hw_omap *clk_hw = NULL;
186 struct clk_init_data *init = NULL;
187 const char **parent_names = NULL;
188 int ret;
189
190 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
191 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
192 init = kzalloc(sizeof(*init), GFP_KERNEL);
193 if (!ad || !clk_hw || !init)
194 goto cleanup;
195
196 clk_hw->dpll_data = ad;
197 clk_hw->hw.init = init;
198
199 init->name = node->name;
200 init->ops = &apll_ck_ops;
201
202 init->num_parents = of_clk_get_parent_count(node);
203 if (init->num_parents < 1) {
204 pr_err("dra7 apll %s must have parent(s)\n", node->name);
205 goto cleanup;
206 }
207
208 parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
209 if (!parent_names)
210 goto cleanup;
211
212 of_clk_parent_fill(node, parent_names, init->num_parents);
213
214 init->parent_names = parent_names;
215
216 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
217 ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg);
218
219 if (ret)
220 goto cleanup;
221
222 ad->idlest_mask = 0x1;
223 ad->enable_mask = 0x3;
224
225 omap_clk_register_apll(&clk_hw->hw, node);
226 return;
227
228 cleanup:
229 kfree(parent_names);
230 kfree(ad);
231 kfree(clk_hw);
232 kfree(init);
233 }
234 CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
235
236 #define OMAP2_EN_APLL_LOCKED 0x3
237 #define OMAP2_EN_APLL_STOPPED 0x0
238
239 static int omap2_apll_is_enabled(struct clk_hw *hw)
240 {
241 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
242 struct dpll_data *ad = clk->dpll_data;
243 u32 v;
244
245 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
246 v &= ad->enable_mask;
247
248 v >>= __ffs(ad->enable_mask);
249
250 return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
251 }
252
253 static unsigned long omap2_apll_recalc(struct clk_hw *hw,
254 unsigned long parent_rate)
255 {
256 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
257
258 if (omap2_apll_is_enabled(hw))
259 return clk->fixed_rate;
260
261 return 0;
262 }
263
264 static int omap2_apll_enable(struct clk_hw *hw)
265 {
266 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
267 struct dpll_data *ad = clk->dpll_data;
268 u32 v;
269 int i = 0;
270
271 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
272 v &= ~ad->enable_mask;
273 v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
274 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
275
276 while (1) {
277 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
278 if (v & ad->idlest_mask)
279 break;
280 if (i > MAX_APLL_WAIT_TRIES)
281 break;
282 i++;
283 udelay(1);
284 }
285
286 if (i == MAX_APLL_WAIT_TRIES) {
287 pr_warn("%s failed to transition to locked\n",
288 clk_hw_get_name(&clk->hw));
289 return -EBUSY;
290 }
291
292 return 0;
293 }
294
295 static void omap2_apll_disable(struct clk_hw *hw)
296 {
297 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
298 struct dpll_data *ad = clk->dpll_data;
299 u32 v;
300
301 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
302 v &= ~ad->enable_mask;
303 v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
304 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
305 }
306
307 static struct clk_ops omap2_apll_ops = {
308 .enable = &omap2_apll_enable,
309 .disable = &omap2_apll_disable,
310 .is_enabled = &omap2_apll_is_enabled,
311 .recalc_rate = &omap2_apll_recalc,
312 };
313
314 static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
315 {
316 struct dpll_data *ad = clk->dpll_data;
317 u32 v;
318
319 v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg);
320 v &= ~ad->autoidle_mask;
321 v |= val << __ffs(ad->autoidle_mask);
322 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
323 }
324
325 #define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
326 #define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
327
328 static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
329 {
330 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
331 }
332
333 static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
334 {
335 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
336 }
337
338 static const struct clk_hw_omap_ops omap2_apll_hwops = {
339 .allow_idle = &omap2_apll_allow_idle,
340 .deny_idle = &omap2_apll_deny_idle,
341 };
342
343 static void __init of_omap2_apll_setup(struct device_node *node)
344 {
345 struct dpll_data *ad = NULL;
346 struct clk_hw_omap *clk_hw = NULL;
347 struct clk_init_data *init = NULL;
348 struct clk *clk;
349 const char *parent_name;
350 u32 val;
351 int ret;
352
353 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
354 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
355 init = kzalloc(sizeof(*init), GFP_KERNEL);
356
357 if (!ad || !clk_hw || !init)
358 goto cleanup;
359
360 clk_hw->dpll_data = ad;
361 clk_hw->hw.init = init;
362 init->ops = &omap2_apll_ops;
363 init->name = node->name;
364 clk_hw->ops = &omap2_apll_hwops;
365
366 init->num_parents = of_clk_get_parent_count(node);
367 if (init->num_parents != 1) {
368 pr_err("%s must have one parent\n", node->name);
369 goto cleanup;
370 }
371
372 parent_name = of_clk_get_parent_name(node, 0);
373 init->parent_names = &parent_name;
374
375 if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
376 pr_err("%s missing clock-frequency\n", node->name);
377 goto cleanup;
378 }
379 clk_hw->fixed_rate = val;
380
381 if (of_property_read_u32(node, "ti,bit-shift", &val)) {
382 pr_err("%s missing bit-shift\n", node->name);
383 goto cleanup;
384 }
385
386 clk_hw->enable_bit = val;
387 ad->enable_mask = 0x3 << val;
388 ad->autoidle_mask = 0x3 << val;
389
390 if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
391 pr_err("%s missing idlest-shift\n", node->name);
392 goto cleanup;
393 }
394
395 ad->idlest_mask = 1 << val;
396
397 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
398 ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg);
399 ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg);
400
401 if (ret)
402 goto cleanup;
403
404 clk = clk_register(NULL, &clk_hw->hw);
405 if (!IS_ERR(clk)) {
406 of_clk_add_provider(node, of_clk_src_simple_get, clk);
407 kfree(init);
408 return;
409 }
410 cleanup:
411 kfree(ad);
412 kfree(clk_hw);
413 kfree(init);
414 }
415 CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
416 of_omap2_apll_setup);