]>
Commit | Line | Data |
---|---|---|
ebafb63d | 1 | // SPDX-License-Identifier: GPL-2.0 |
b2476490 MT |
2 | /* |
3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> | |
4 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> | |
5 | * | |
5fb94e9c | 6 | * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst |
b2476490 MT |
7 | */ |
8 | ||
3c373117 | 9 | #include <linux/clk.h> |
b09d6d99 | 10 | #include <linux/clk-provider.h> |
86be408b | 11 | #include <linux/clk/clk-conf.h> |
b2476490 MT |
12 | #include <linux/module.h> |
13 | #include <linux/mutex.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/slab.h> | |
766e6a4e | 18 | #include <linux/of.h> |
46c8773a | 19 | #include <linux/device.h> |
f2f6c255 | 20 | #include <linux/init.h> |
9a34b453 | 21 | #include <linux/pm_runtime.h> |
533ddeb1 | 22 | #include <linux/sched.h> |
562ef0b0 | 23 | #include <linux/clkdev.h> |
b2476490 | 24 | |
d6782c26 SN |
25 | #include "clk.h" |
26 | ||
b2476490 MT |
27 | static DEFINE_SPINLOCK(enable_lock); |
28 | static DEFINE_MUTEX(prepare_lock); | |
29 | ||
533ddeb1 MT |
30 | static struct task_struct *prepare_owner; |
31 | static struct task_struct *enable_owner; | |
32 | ||
33 | static int prepare_refcnt; | |
34 | static int enable_refcnt; | |
35 | ||
b2476490 MT |
36 | static HLIST_HEAD(clk_root_list); |
37 | static HLIST_HEAD(clk_orphan_list); | |
38 | static LIST_HEAD(clk_notifier_list); | |
39 | ||
b09d6d99 MT |
40 | /*** private data structures ***/ |
41 | ||
42 | struct clk_core { | |
43 | const char *name; | |
44 | const struct clk_ops *ops; | |
45 | struct clk_hw *hw; | |
46 | struct module *owner; | |
9a34b453 | 47 | struct device *dev; |
b09d6d99 MT |
48 | struct clk_core *parent; |
49 | const char **parent_names; | |
50 | struct clk_core **parents; | |
51 | u8 num_parents; | |
52 | u8 new_parent_index; | |
53 | unsigned long rate; | |
1c8e6004 | 54 | unsigned long req_rate; |
b09d6d99 MT |
55 | unsigned long new_rate; |
56 | struct clk_core *new_parent; | |
57 | struct clk_core *new_child; | |
58 | unsigned long flags; | |
e6500344 | 59 | bool orphan; |
24478839 | 60 | bool rpm_enabled; |
b09d6d99 MT |
61 | unsigned int enable_count; |
62 | unsigned int prepare_count; | |
e55a839a | 63 | unsigned int protect_count; |
9783c0d9 SB |
64 | unsigned long min_rate; |
65 | unsigned long max_rate; | |
b09d6d99 MT |
66 | unsigned long accuracy; |
67 | int phase; | |
9fba738a | 68 | struct clk_duty duty; |
b09d6d99 MT |
69 | struct hlist_head children; |
70 | struct hlist_node child_node; | |
1c8e6004 | 71 | struct hlist_head clks; |
b09d6d99 MT |
72 | unsigned int notifier_count; |
73 | #ifdef CONFIG_DEBUG_FS | |
74 | struct dentry *dentry; | |
8c9a8a8f | 75 | struct hlist_node debug_node; |
b09d6d99 MT |
76 | #endif |
77 | struct kref ref; | |
78 | }; | |
79 | ||
dfc202ea SB |
80 | #define CREATE_TRACE_POINTS |
81 | #include <trace/events/clk.h> | |
82 | ||
b09d6d99 MT |
83 | struct clk { |
84 | struct clk_core *core; | |
efa85048 | 85 | struct device *dev; |
b09d6d99 MT |
86 | const char *dev_id; |
87 | const char *con_id; | |
1c8e6004 TV |
88 | unsigned long min_rate; |
89 | unsigned long max_rate; | |
55e9b8b7 | 90 | unsigned int exclusive_count; |
50595f8b | 91 | struct hlist_node clks_node; |
b09d6d99 MT |
92 | }; |
93 | ||
9a34b453 MS |
94 | /*** runtime pm ***/ |
95 | static int clk_pm_runtime_get(struct clk_core *core) | |
96 | { | |
24478839 | 97 | int ret; |
9a34b453 | 98 | |
24478839 | 99 | if (!core->rpm_enabled) |
9a34b453 MS |
100 | return 0; |
101 | ||
102 | ret = pm_runtime_get_sync(core->dev); | |
103 | return ret < 0 ? ret : 0; | |
104 | } | |
105 | ||
106 | static void clk_pm_runtime_put(struct clk_core *core) | |
107 | { | |
24478839 | 108 | if (!core->rpm_enabled) |
9a34b453 MS |
109 | return; |
110 | ||
111 | pm_runtime_put_sync(core->dev); | |
112 | } | |
113 | ||
eab89f69 MT |
114 | /*** locking ***/ |
115 | static void clk_prepare_lock(void) | |
116 | { | |
533ddeb1 MT |
117 | if (!mutex_trylock(&prepare_lock)) { |
118 | if (prepare_owner == current) { | |
119 | prepare_refcnt++; | |
120 | return; | |
121 | } | |
122 | mutex_lock(&prepare_lock); | |
123 | } | |
124 | WARN_ON_ONCE(prepare_owner != NULL); | |
125 | WARN_ON_ONCE(prepare_refcnt != 0); | |
126 | prepare_owner = current; | |
127 | prepare_refcnt = 1; | |
eab89f69 MT |
128 | } |
129 | ||
130 | static void clk_prepare_unlock(void) | |
131 | { | |
533ddeb1 MT |
132 | WARN_ON_ONCE(prepare_owner != current); |
133 | WARN_ON_ONCE(prepare_refcnt == 0); | |
134 | ||
135 | if (--prepare_refcnt) | |
136 | return; | |
137 | prepare_owner = NULL; | |
eab89f69 MT |
138 | mutex_unlock(&prepare_lock); |
139 | } | |
140 | ||
141 | static unsigned long clk_enable_lock(void) | |
a57aa185 | 142 | __acquires(enable_lock) |
eab89f69 MT |
143 | { |
144 | unsigned long flags; | |
533ddeb1 | 145 | |
a12aa8a6 DL |
146 | /* |
147 | * On UP systems, spin_trylock_irqsave() always returns true, even if | |
148 | * we already hold the lock. So, in that case, we rely only on | |
149 | * reference counting. | |
150 | */ | |
151 | if (!IS_ENABLED(CONFIG_SMP) || | |
152 | !spin_trylock_irqsave(&enable_lock, flags)) { | |
533ddeb1 MT |
153 | if (enable_owner == current) { |
154 | enable_refcnt++; | |
a57aa185 | 155 | __acquire(enable_lock); |
a12aa8a6 DL |
156 | if (!IS_ENABLED(CONFIG_SMP)) |
157 | local_save_flags(flags); | |
533ddeb1 MT |
158 | return flags; |
159 | } | |
160 | spin_lock_irqsave(&enable_lock, flags); | |
161 | } | |
162 | WARN_ON_ONCE(enable_owner != NULL); | |
163 | WARN_ON_ONCE(enable_refcnt != 0); | |
164 | enable_owner = current; | |
165 | enable_refcnt = 1; | |
eab89f69 MT |
166 | return flags; |
167 | } | |
168 | ||
169 | static void clk_enable_unlock(unsigned long flags) | |
a57aa185 | 170 | __releases(enable_lock) |
eab89f69 | 171 | { |
533ddeb1 MT |
172 | WARN_ON_ONCE(enable_owner != current); |
173 | WARN_ON_ONCE(enable_refcnt == 0); | |
174 | ||
a57aa185 SB |
175 | if (--enable_refcnt) { |
176 | __release(enable_lock); | |
533ddeb1 | 177 | return; |
a57aa185 | 178 | } |
533ddeb1 | 179 | enable_owner = NULL; |
eab89f69 MT |
180 | spin_unlock_irqrestore(&enable_lock, flags); |
181 | } | |
182 | ||
e55a839a JB |
183 | static bool clk_core_rate_is_protected(struct clk_core *core) |
184 | { | |
185 | return core->protect_count; | |
186 | } | |
187 | ||
4dff95dc SB |
188 | static bool clk_core_is_prepared(struct clk_core *core) |
189 | { | |
9a34b453 MS |
190 | bool ret = false; |
191 | ||
4dff95dc SB |
192 | /* |
193 | * .is_prepared is optional for clocks that can prepare | |
194 | * fall back to software usage counter if it is missing | |
195 | */ | |
196 | if (!core->ops->is_prepared) | |
197 | return core->prepare_count; | |
b2476490 | 198 | |
9a34b453 MS |
199 | if (!clk_pm_runtime_get(core)) { |
200 | ret = core->ops->is_prepared(core->hw); | |
201 | clk_pm_runtime_put(core); | |
202 | } | |
203 | ||
204 | return ret; | |
4dff95dc | 205 | } |
b2476490 | 206 | |
4dff95dc SB |
207 | static bool clk_core_is_enabled(struct clk_core *core) |
208 | { | |
9a34b453 MS |
209 | bool ret = false; |
210 | ||
4dff95dc SB |
211 | /* |
212 | * .is_enabled is only mandatory for clocks that gate | |
213 | * fall back to software usage counter if .is_enabled is missing | |
214 | */ | |
215 | if (!core->ops->is_enabled) | |
216 | return core->enable_count; | |
6b44c854 | 217 | |
9a34b453 MS |
218 | /* |
219 | * Check if clock controller's device is runtime active before | |
220 | * calling .is_enabled callback. If not, assume that clock is | |
221 | * disabled, because we might be called from atomic context, from | |
222 | * which pm_runtime_get() is not allowed. | |
223 | * This function is called mainly from clk_disable_unused_subtree, | |
224 | * which ensures proper runtime pm activation of controller before | |
225 | * taking enable spinlock, but the below check is needed if one tries | |
226 | * to call it from other places. | |
227 | */ | |
24478839 | 228 | if (core->rpm_enabled) { |
9a34b453 MS |
229 | pm_runtime_get_noresume(core->dev); |
230 | if (!pm_runtime_active(core->dev)) { | |
231 | ret = false; | |
232 | goto done; | |
233 | } | |
234 | } | |
235 | ||
236 | ret = core->ops->is_enabled(core->hw); | |
237 | done: | |
24478839 | 238 | if (core->rpm_enabled) |
756efe13 | 239 | pm_runtime_put(core->dev); |
9a34b453 MS |
240 | |
241 | return ret; | |
4dff95dc | 242 | } |
6b44c854 | 243 | |
4dff95dc | 244 | /*** helper functions ***/ |
1af599df | 245 | |
b76281cb | 246 | const char *__clk_get_name(const struct clk *clk) |
1af599df | 247 | { |
4dff95dc | 248 | return !clk ? NULL : clk->core->name; |
1af599df | 249 | } |
4dff95dc | 250 | EXPORT_SYMBOL_GPL(__clk_get_name); |
1af599df | 251 | |
e7df6f6e | 252 | const char *clk_hw_get_name(const struct clk_hw *hw) |
1a9c069c SB |
253 | { |
254 | return hw->core->name; | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(clk_hw_get_name); | |
257 | ||
4dff95dc SB |
258 | struct clk_hw *__clk_get_hw(struct clk *clk) |
259 | { | |
260 | return !clk ? NULL : clk->core->hw; | |
261 | } | |
262 | EXPORT_SYMBOL_GPL(__clk_get_hw); | |
1af599df | 263 | |
e7df6f6e | 264 | unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) |
1a9c069c SB |
265 | { |
266 | return hw->core->num_parents; | |
267 | } | |
268 | EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); | |
269 | ||
e7df6f6e | 270 | struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) |
1a9c069c SB |
271 | { |
272 | return hw->core->parent ? hw->core->parent->hw : NULL; | |
273 | } | |
274 | EXPORT_SYMBOL_GPL(clk_hw_get_parent); | |
275 | ||
4dff95dc SB |
276 | static struct clk_core *__clk_lookup_subtree(const char *name, |
277 | struct clk_core *core) | |
bddca894 | 278 | { |
035a61c3 | 279 | struct clk_core *child; |
4dff95dc | 280 | struct clk_core *ret; |
bddca894 | 281 | |
4dff95dc SB |
282 | if (!strcmp(core->name, name)) |
283 | return core; | |
bddca894 | 284 | |
4dff95dc SB |
285 | hlist_for_each_entry(child, &core->children, child_node) { |
286 | ret = __clk_lookup_subtree(name, child); | |
287 | if (ret) | |
288 | return ret; | |
bddca894 PG |
289 | } |
290 | ||
4dff95dc | 291 | return NULL; |
bddca894 PG |
292 | } |
293 | ||
4dff95dc | 294 | static struct clk_core *clk_core_lookup(const char *name) |
bddca894 | 295 | { |
4dff95dc SB |
296 | struct clk_core *root_clk; |
297 | struct clk_core *ret; | |
bddca894 | 298 | |
4dff95dc SB |
299 | if (!name) |
300 | return NULL; | |
bddca894 | 301 | |
4dff95dc SB |
302 | /* search the 'proper' clk tree first */ |
303 | hlist_for_each_entry(root_clk, &clk_root_list, child_node) { | |
304 | ret = __clk_lookup_subtree(name, root_clk); | |
305 | if (ret) | |
306 | return ret; | |
bddca894 PG |
307 | } |
308 | ||
4dff95dc SB |
309 | /* if not found, then search the orphan tree */ |
310 | hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { | |
311 | ret = __clk_lookup_subtree(name, root_clk); | |
312 | if (ret) | |
313 | return ret; | |
314 | } | |
bddca894 | 315 | |
4dff95dc | 316 | return NULL; |
bddca894 PG |
317 | } |
318 | ||
4dff95dc SB |
319 | static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, |
320 | u8 index) | |
bddca894 | 321 | { |
4dff95dc SB |
322 | if (!core || index >= core->num_parents) |
323 | return NULL; | |
88cfbef2 MY |
324 | |
325 | if (!core->parents[index]) | |
326 | core->parents[index] = | |
327 | clk_core_lookup(core->parent_names[index]); | |
328 | ||
329 | return core->parents[index]; | |
bddca894 PG |
330 | } |
331 | ||
e7df6f6e SB |
332 | struct clk_hw * |
333 | clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) | |
1a9c069c SB |
334 | { |
335 | struct clk_core *parent; | |
336 | ||
337 | parent = clk_core_get_parent_by_index(hw->core, index); | |
338 | ||
339 | return !parent ? NULL : parent->hw; | |
340 | } | |
341 | EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); | |
342 | ||
4dff95dc SB |
343 | unsigned int __clk_get_enable_count(struct clk *clk) |
344 | { | |
345 | return !clk ? 0 : clk->core->enable_count; | |
346 | } | |
b2476490 | 347 | |
4dff95dc SB |
348 | static unsigned long clk_core_get_rate_nolock(struct clk_core *core) |
349 | { | |
73d4f945 SB |
350 | if (!core) |
351 | return 0; | |
c646cbf1 | 352 | |
73d4f945 SB |
353 | if (!core->num_parents || core->parent) |
354 | return core->rate; | |
b2476490 | 355 | |
73d4f945 SB |
356 | /* |
357 | * Clk must have a parent because num_parents > 0 but the parent isn't | |
358 | * known yet. Best to return 0 as the rate of this clk until we can | |
359 | * properly recalc the rate based on the parent's rate. | |
360 | */ | |
361 | return 0; | |
b2476490 MT |
362 | } |
363 | ||
e7df6f6e | 364 | unsigned long clk_hw_get_rate(const struct clk_hw *hw) |
1a9c069c SB |
365 | { |
366 | return clk_core_get_rate_nolock(hw->core); | |
367 | } | |
368 | EXPORT_SYMBOL_GPL(clk_hw_get_rate); | |
369 | ||
4dff95dc SB |
370 | static unsigned long __clk_get_accuracy(struct clk_core *core) |
371 | { | |
372 | if (!core) | |
373 | return 0; | |
b2476490 | 374 | |
4dff95dc | 375 | return core->accuracy; |
b2476490 MT |
376 | } |
377 | ||
4dff95dc | 378 | unsigned long __clk_get_flags(struct clk *clk) |
fcb0ee6a | 379 | { |
4dff95dc | 380 | return !clk ? 0 : clk->core->flags; |
fcb0ee6a | 381 | } |
4dff95dc | 382 | EXPORT_SYMBOL_GPL(__clk_get_flags); |
fcb0ee6a | 383 | |
e7df6f6e | 384 | unsigned long clk_hw_get_flags(const struct clk_hw *hw) |
1a9c069c SB |
385 | { |
386 | return hw->core->flags; | |
387 | } | |
388 | EXPORT_SYMBOL_GPL(clk_hw_get_flags); | |
389 | ||
e7df6f6e | 390 | bool clk_hw_is_prepared(const struct clk_hw *hw) |
1a9c069c SB |
391 | { |
392 | return clk_core_is_prepared(hw->core); | |
393 | } | |
12aa377b | 394 | EXPORT_SYMBOL_GPL(clk_hw_is_prepared); |
1a9c069c | 395 | |
e55a839a JB |
396 | bool clk_hw_rate_is_protected(const struct clk_hw *hw) |
397 | { | |
398 | return clk_core_rate_is_protected(hw->core); | |
399 | } | |
12aa377b | 400 | EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); |
e55a839a | 401 | |
be68bf88 JE |
402 | bool clk_hw_is_enabled(const struct clk_hw *hw) |
403 | { | |
404 | return clk_core_is_enabled(hw->core); | |
405 | } | |
12aa377b | 406 | EXPORT_SYMBOL_GPL(clk_hw_is_enabled); |
be68bf88 | 407 | |
4dff95dc | 408 | bool __clk_is_enabled(struct clk *clk) |
b2476490 | 409 | { |
4dff95dc SB |
410 | if (!clk) |
411 | return false; | |
b2476490 | 412 | |
4dff95dc SB |
413 | return clk_core_is_enabled(clk->core); |
414 | } | |
415 | EXPORT_SYMBOL_GPL(__clk_is_enabled); | |
b2476490 | 416 | |
4dff95dc SB |
417 | static bool mux_is_better_rate(unsigned long rate, unsigned long now, |
418 | unsigned long best, unsigned long flags) | |
419 | { | |
420 | if (flags & CLK_MUX_ROUND_CLOSEST) | |
421 | return abs(now - rate) < abs(best - rate); | |
1af599df | 422 | |
4dff95dc SB |
423 | return now <= rate && now > best; |
424 | } | |
bddca894 | 425 | |
4ad69b80 JB |
426 | int clk_mux_determine_rate_flags(struct clk_hw *hw, |
427 | struct clk_rate_request *req, | |
428 | unsigned long flags) | |
4dff95dc SB |
429 | { |
430 | struct clk_core *core = hw->core, *parent, *best_parent = NULL; | |
0817b62c BB |
431 | int i, num_parents, ret; |
432 | unsigned long best = 0; | |
433 | struct clk_rate_request parent_req = *req; | |
b2476490 | 434 | |
4dff95dc SB |
435 | /* if NO_REPARENT flag set, pass through to current parent */ |
436 | if (core->flags & CLK_SET_RATE_NO_REPARENT) { | |
437 | parent = core->parent; | |
0817b62c BB |
438 | if (core->flags & CLK_SET_RATE_PARENT) { |
439 | ret = __clk_determine_rate(parent ? parent->hw : NULL, | |
440 | &parent_req); | |
441 | if (ret) | |
442 | return ret; | |
443 | ||
444 | best = parent_req.rate; | |
445 | } else if (parent) { | |
4dff95dc | 446 | best = clk_core_get_rate_nolock(parent); |
0817b62c | 447 | } else { |
4dff95dc | 448 | best = clk_core_get_rate_nolock(core); |
0817b62c BB |
449 | } |
450 | ||
4dff95dc SB |
451 | goto out; |
452 | } | |
b2476490 | 453 | |
4dff95dc SB |
454 | /* find the parent that can provide the fastest rate <= rate */ |
455 | num_parents = core->num_parents; | |
456 | for (i = 0; i < num_parents; i++) { | |
457 | parent = clk_core_get_parent_by_index(core, i); | |
458 | if (!parent) | |
459 | continue; | |
0817b62c BB |
460 | |
461 | if (core->flags & CLK_SET_RATE_PARENT) { | |
462 | parent_req = *req; | |
463 | ret = __clk_determine_rate(parent->hw, &parent_req); | |
464 | if (ret) | |
465 | continue; | |
466 | } else { | |
467 | parent_req.rate = clk_core_get_rate_nolock(parent); | |
468 | } | |
469 | ||
470 | if (mux_is_better_rate(req->rate, parent_req.rate, | |
471 | best, flags)) { | |
4dff95dc | 472 | best_parent = parent; |
0817b62c | 473 | best = parent_req.rate; |
4dff95dc SB |
474 | } |
475 | } | |
b2476490 | 476 | |
57d866e6 BB |
477 | if (!best_parent) |
478 | return -EINVAL; | |
479 | ||
4dff95dc SB |
480 | out: |
481 | if (best_parent) | |
0817b62c BB |
482 | req->best_parent_hw = best_parent->hw; |
483 | req->best_parent_rate = best; | |
484 | req->rate = best; | |
b2476490 | 485 | |
0817b62c | 486 | return 0; |
b33d212f | 487 | } |
4ad69b80 | 488 | EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); |
4dff95dc SB |
489 | |
490 | struct clk *__clk_lookup(const char *name) | |
fcb0ee6a | 491 | { |
4dff95dc SB |
492 | struct clk_core *core = clk_core_lookup(name); |
493 | ||
494 | return !core ? NULL : core->hw->clk; | |
fcb0ee6a | 495 | } |
b2476490 | 496 | |
4dff95dc SB |
497 | static void clk_core_get_boundaries(struct clk_core *core, |
498 | unsigned long *min_rate, | |
499 | unsigned long *max_rate) | |
1c155b3d | 500 | { |
4dff95dc | 501 | struct clk *clk_user; |
1c155b3d | 502 | |
9783c0d9 SB |
503 | *min_rate = core->min_rate; |
504 | *max_rate = core->max_rate; | |
496eadf8 | 505 | |
4dff95dc SB |
506 | hlist_for_each_entry(clk_user, &core->clks, clks_node) |
507 | *min_rate = max(*min_rate, clk_user->min_rate); | |
1c155b3d | 508 | |
4dff95dc SB |
509 | hlist_for_each_entry(clk_user, &core->clks, clks_node) |
510 | *max_rate = min(*max_rate, clk_user->max_rate); | |
511 | } | |
1c155b3d | 512 | |
9783c0d9 SB |
513 | void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, |
514 | unsigned long max_rate) | |
515 | { | |
516 | hw->core->min_rate = min_rate; | |
517 | hw->core->max_rate = max_rate; | |
518 | } | |
519 | EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); | |
520 | ||
4dff95dc | 521 | /* |
777c1a40 SB |
522 | * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk |
523 | * @hw: mux type clk to determine rate on | |
524 | * @req: rate request, also used to return preferred parent and frequencies | |
525 | * | |
4dff95dc SB |
526 | * Helper for finding best parent to provide a given frequency. This can be used |
527 | * directly as a determine_rate callback (e.g. for a mux), or from a more | |
528 | * complex clock that may combine a mux with other operations. | |
777c1a40 SB |
529 | * |
530 | * Returns: 0 on success, -EERROR value on error | |
4dff95dc | 531 | */ |
0817b62c BB |
532 | int __clk_mux_determine_rate(struct clk_hw *hw, |
533 | struct clk_rate_request *req) | |
4dff95dc | 534 | { |
0817b62c | 535 | return clk_mux_determine_rate_flags(hw, req, 0); |
1c155b3d | 536 | } |
4dff95dc | 537 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); |
1c155b3d | 538 | |
0817b62c BB |
539 | int __clk_mux_determine_rate_closest(struct clk_hw *hw, |
540 | struct clk_rate_request *req) | |
b2476490 | 541 | { |
0817b62c | 542 | return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); |
4dff95dc SB |
543 | } |
544 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); | |
b2476490 | 545 | |
4dff95dc | 546 | /*** clk api ***/ |
496eadf8 | 547 | |
e55a839a JB |
548 | static void clk_core_rate_unprotect(struct clk_core *core) |
549 | { | |
550 | lockdep_assert_held(&prepare_lock); | |
551 | ||
552 | if (!core) | |
553 | return; | |
554 | ||
ab525dcc FE |
555 | if (WARN(core->protect_count == 0, |
556 | "%s already unprotected\n", core->name)) | |
e55a839a JB |
557 | return; |
558 | ||
559 | if (--core->protect_count > 0) | |
560 | return; | |
561 | ||
562 | clk_core_rate_unprotect(core->parent); | |
563 | } | |
564 | ||
565 | static int clk_core_rate_nuke_protect(struct clk_core *core) | |
566 | { | |
567 | int ret; | |
568 | ||
569 | lockdep_assert_held(&prepare_lock); | |
570 | ||
571 | if (!core) | |
572 | return -EINVAL; | |
573 | ||
574 | if (core->protect_count == 0) | |
575 | return 0; | |
576 | ||
577 | ret = core->protect_count; | |
578 | core->protect_count = 1; | |
579 | clk_core_rate_unprotect(core); | |
580 | ||
581 | return ret; | |
582 | } | |
583 | ||
55e9b8b7 JB |
584 | /** |
585 | * clk_rate_exclusive_put - release exclusivity over clock rate control | |
586 | * @clk: the clk over which the exclusivity is released | |
587 | * | |
588 | * clk_rate_exclusive_put() completes a critical section during which a clock | |
589 | * consumer cannot tolerate any other consumer making any operation on the | |
590 | * clock which could result in a rate change or rate glitch. Exclusive clocks | |
591 | * cannot have their rate changed, either directly or indirectly due to changes | |
592 | * further up the parent chain of clocks. As a result, clocks up parent chain | |
593 | * also get under exclusive control of the calling consumer. | |
594 | * | |
595 | * If exlusivity is claimed more than once on clock, even by the same consumer, | |
596 | * the rate effectively gets locked as exclusivity can't be preempted. | |
597 | * | |
598 | * Calls to clk_rate_exclusive_put() must be balanced with calls to | |
599 | * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return | |
600 | * error status. | |
601 | */ | |
602 | void clk_rate_exclusive_put(struct clk *clk) | |
603 | { | |
604 | if (!clk) | |
605 | return; | |
606 | ||
607 | clk_prepare_lock(); | |
608 | ||
609 | /* | |
610 | * if there is something wrong with this consumer protect count, stop | |
611 | * here before messing with the provider | |
612 | */ | |
613 | if (WARN_ON(clk->exclusive_count <= 0)) | |
614 | goto out; | |
615 | ||
616 | clk_core_rate_unprotect(clk->core); | |
617 | clk->exclusive_count--; | |
618 | out: | |
619 | clk_prepare_unlock(); | |
620 | } | |
621 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); | |
622 | ||
e55a839a JB |
623 | static void clk_core_rate_protect(struct clk_core *core) |
624 | { | |
625 | lockdep_assert_held(&prepare_lock); | |
626 | ||
627 | if (!core) | |
628 | return; | |
629 | ||
630 | if (core->protect_count == 0) | |
631 | clk_core_rate_protect(core->parent); | |
632 | ||
633 | core->protect_count++; | |
634 | } | |
635 | ||
636 | static void clk_core_rate_restore_protect(struct clk_core *core, int count) | |
637 | { | |
638 | lockdep_assert_held(&prepare_lock); | |
639 | ||
640 | if (!core) | |
641 | return; | |
642 | ||
643 | if (count == 0) | |
644 | return; | |
645 | ||
646 | clk_core_rate_protect(core); | |
647 | core->protect_count = count; | |
648 | } | |
649 | ||
55e9b8b7 JB |
650 | /** |
651 | * clk_rate_exclusive_get - get exclusivity over the clk rate control | |
652 | * @clk: the clk over which the exclusity of rate control is requested | |
653 | * | |
654 | * clk_rate_exlusive_get() begins a critical section during which a clock | |
655 | * consumer cannot tolerate any other consumer making any operation on the | |
656 | * clock which could result in a rate change or rate glitch. Exclusive clocks | |
657 | * cannot have their rate changed, either directly or indirectly due to changes | |
658 | * further up the parent chain of clocks. As a result, clocks up parent chain | |
659 | * also get under exclusive control of the calling consumer. | |
660 | * | |
661 | * If exlusivity is claimed more than once on clock, even by the same consumer, | |
662 | * the rate effectively gets locked as exclusivity can't be preempted. | |
663 | * | |
664 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | |
665 | * clk_rate_exclusive_put(). Calls to this function may sleep. | |
666 | * Returns 0 on success, -EERROR otherwise | |
667 | */ | |
668 | int clk_rate_exclusive_get(struct clk *clk) | |
669 | { | |
670 | if (!clk) | |
671 | return 0; | |
672 | ||
673 | clk_prepare_lock(); | |
674 | clk_core_rate_protect(clk->core); | |
675 | clk->exclusive_count++; | |
676 | clk_prepare_unlock(); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); | |
681 | ||
4dff95dc SB |
682 | static void clk_core_unprepare(struct clk_core *core) |
683 | { | |
a6334725 SB |
684 | lockdep_assert_held(&prepare_lock); |
685 | ||
4dff95dc SB |
686 | if (!core) |
687 | return; | |
b2476490 | 688 | |
ab525dcc FE |
689 | if (WARN(core->prepare_count == 0, |
690 | "%s already unprepared\n", core->name)) | |
4dff95dc | 691 | return; |
b2476490 | 692 | |
ab525dcc FE |
693 | if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, |
694 | "Unpreparing critical %s\n", core->name)) | |
2e20fbf5 LJ |
695 | return; |
696 | ||
9461f7b3 JB |
697 | if (core->flags & CLK_SET_RATE_GATE) |
698 | clk_core_rate_unprotect(core); | |
699 | ||
4dff95dc SB |
700 | if (--core->prepare_count > 0) |
701 | return; | |
b2476490 | 702 | |
ab525dcc | 703 | WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); |
b2476490 | 704 | |
4dff95dc | 705 | trace_clk_unprepare(core); |
b2476490 | 706 | |
4dff95dc SB |
707 | if (core->ops->unprepare) |
708 | core->ops->unprepare(core->hw); | |
709 | ||
9a34b453 MS |
710 | clk_pm_runtime_put(core); |
711 | ||
4dff95dc SB |
712 | trace_clk_unprepare_complete(core); |
713 | clk_core_unprepare(core->parent); | |
b2476490 MT |
714 | } |
715 | ||
a6adc30b DA |
716 | static void clk_core_unprepare_lock(struct clk_core *core) |
717 | { | |
718 | clk_prepare_lock(); | |
719 | clk_core_unprepare(core); | |
720 | clk_prepare_unlock(); | |
721 | } | |
722 | ||
4dff95dc SB |
723 | /** |
724 | * clk_unprepare - undo preparation of a clock source | |
725 | * @clk: the clk being unprepared | |
726 | * | |
727 | * clk_unprepare may sleep, which differentiates it from clk_disable. In a | |
728 | * simple case, clk_unprepare can be used instead of clk_disable to gate a clk | |
729 | * if the operation may sleep. One example is a clk which is accessed over | |
730 | * I2c. In the complex case a clk gate operation may require a fast and a slow | |
731 | * part. It is this reason that clk_unprepare and clk_disable are not mutually | |
732 | * exclusive. In fact clk_disable must be called before clk_unprepare. | |
733 | */ | |
734 | void clk_unprepare(struct clk *clk) | |
1e435256 | 735 | { |
4dff95dc SB |
736 | if (IS_ERR_OR_NULL(clk)) |
737 | return; | |
738 | ||
a6adc30b | 739 | clk_core_unprepare_lock(clk->core); |
1e435256 | 740 | } |
4dff95dc | 741 | EXPORT_SYMBOL_GPL(clk_unprepare); |
1e435256 | 742 | |
4dff95dc | 743 | static int clk_core_prepare(struct clk_core *core) |
b2476490 | 744 | { |
4dff95dc | 745 | int ret = 0; |
b2476490 | 746 | |
a6334725 SB |
747 | lockdep_assert_held(&prepare_lock); |
748 | ||
4dff95dc | 749 | if (!core) |
1e435256 | 750 | return 0; |
1e435256 | 751 | |
4dff95dc | 752 | if (core->prepare_count == 0) { |
9a34b453 | 753 | ret = clk_pm_runtime_get(core); |
4dff95dc SB |
754 | if (ret) |
755 | return ret; | |
b2476490 | 756 | |
9a34b453 MS |
757 | ret = clk_core_prepare(core->parent); |
758 | if (ret) | |
759 | goto runtime_put; | |
760 | ||
4dff95dc | 761 | trace_clk_prepare(core); |
b2476490 | 762 | |
4dff95dc SB |
763 | if (core->ops->prepare) |
764 | ret = core->ops->prepare(core->hw); | |
b2476490 | 765 | |
4dff95dc | 766 | trace_clk_prepare_complete(core); |
1c155b3d | 767 | |
9a34b453 MS |
768 | if (ret) |
769 | goto unprepare; | |
4dff95dc | 770 | } |
1c155b3d | 771 | |
4dff95dc | 772 | core->prepare_count++; |
b2476490 | 773 | |
9461f7b3 JB |
774 | /* |
775 | * CLK_SET_RATE_GATE is a special case of clock protection | |
776 | * Instead of a consumer claiming exclusive rate control, it is | |
777 | * actually the provider which prevents any consumer from making any | |
778 | * operation which could result in a rate change or rate glitch while | |
779 | * the clock is prepared. | |
780 | */ | |
781 | if (core->flags & CLK_SET_RATE_GATE) | |
782 | clk_core_rate_protect(core); | |
783 | ||
b2476490 | 784 | return 0; |
9a34b453 MS |
785 | unprepare: |
786 | clk_core_unprepare(core->parent); | |
787 | runtime_put: | |
788 | clk_pm_runtime_put(core); | |
789 | return ret; | |
b2476490 | 790 | } |
b2476490 | 791 | |
a6adc30b DA |
792 | static int clk_core_prepare_lock(struct clk_core *core) |
793 | { | |
794 | int ret; | |
795 | ||
796 | clk_prepare_lock(); | |
797 | ret = clk_core_prepare(core); | |
798 | clk_prepare_unlock(); | |
799 | ||
800 | return ret; | |
801 | } | |
802 | ||
4dff95dc SB |
803 | /** |
804 | * clk_prepare - prepare a clock source | |
805 | * @clk: the clk being prepared | |
806 | * | |
807 | * clk_prepare may sleep, which differentiates it from clk_enable. In a simple | |
808 | * case, clk_prepare can be used instead of clk_enable to ungate a clk if the | |
809 | * operation may sleep. One example is a clk which is accessed over I2c. In | |
810 | * the complex case a clk ungate operation may require a fast and a slow part. | |
811 | * It is this reason that clk_prepare and clk_enable are not mutually | |
812 | * exclusive. In fact clk_prepare must be called before clk_enable. | |
813 | * Returns 0 on success, -EERROR otherwise. | |
814 | */ | |
815 | int clk_prepare(struct clk *clk) | |
b2476490 | 816 | { |
4dff95dc SB |
817 | if (!clk) |
818 | return 0; | |
b2476490 | 819 | |
a6adc30b | 820 | return clk_core_prepare_lock(clk->core); |
b2476490 | 821 | } |
4dff95dc | 822 | EXPORT_SYMBOL_GPL(clk_prepare); |
b2476490 | 823 | |
4dff95dc | 824 | static void clk_core_disable(struct clk_core *core) |
b2476490 | 825 | { |
a6334725 SB |
826 | lockdep_assert_held(&enable_lock); |
827 | ||
4dff95dc SB |
828 | if (!core) |
829 | return; | |
035a61c3 | 830 | |
ab525dcc | 831 | if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) |
4dff95dc | 832 | return; |
b2476490 | 833 | |
ab525dcc FE |
834 | if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, |
835 | "Disabling critical %s\n", core->name)) | |
2e20fbf5 LJ |
836 | return; |
837 | ||
4dff95dc SB |
838 | if (--core->enable_count > 0) |
839 | return; | |
035a61c3 | 840 | |
2f87a6ea | 841 | trace_clk_disable_rcuidle(core); |
035a61c3 | 842 | |
4dff95dc SB |
843 | if (core->ops->disable) |
844 | core->ops->disable(core->hw); | |
035a61c3 | 845 | |
2f87a6ea | 846 | trace_clk_disable_complete_rcuidle(core); |
035a61c3 | 847 | |
4dff95dc | 848 | clk_core_disable(core->parent); |
035a61c3 | 849 | } |
7ef3dcc8 | 850 | |
a6adc30b DA |
851 | static void clk_core_disable_lock(struct clk_core *core) |
852 | { | |
853 | unsigned long flags; | |
854 | ||
855 | flags = clk_enable_lock(); | |
856 | clk_core_disable(core); | |
857 | clk_enable_unlock(flags); | |
858 | } | |
859 | ||
4dff95dc SB |
860 | /** |
861 | * clk_disable - gate a clock | |
862 | * @clk: the clk being gated | |
863 | * | |
864 | * clk_disable must not sleep, which differentiates it from clk_unprepare. In | |
865 | * a simple case, clk_disable can be used instead of clk_unprepare to gate a | |
866 | * clk if the operation is fast and will never sleep. One example is a | |
867 | * SoC-internal clk which is controlled via simple register writes. In the | |
868 | * complex case a clk gate operation may require a fast and a slow part. It is | |
869 | * this reason that clk_unprepare and clk_disable are not mutually exclusive. | |
870 | * In fact clk_disable must be called before clk_unprepare. | |
871 | */ | |
872 | void clk_disable(struct clk *clk) | |
b2476490 | 873 | { |
4dff95dc SB |
874 | if (IS_ERR_OR_NULL(clk)) |
875 | return; | |
876 | ||
a6adc30b | 877 | clk_core_disable_lock(clk->core); |
b2476490 | 878 | } |
4dff95dc | 879 | EXPORT_SYMBOL_GPL(clk_disable); |
b2476490 | 880 | |
4dff95dc | 881 | static int clk_core_enable(struct clk_core *core) |
b2476490 | 882 | { |
4dff95dc | 883 | int ret = 0; |
b2476490 | 884 | |
a6334725 SB |
885 | lockdep_assert_held(&enable_lock); |
886 | ||
4dff95dc SB |
887 | if (!core) |
888 | return 0; | |
b2476490 | 889 | |
ab525dcc FE |
890 | if (WARN(core->prepare_count == 0, |
891 | "Enabling unprepared %s\n", core->name)) | |
4dff95dc | 892 | return -ESHUTDOWN; |
b2476490 | 893 | |
4dff95dc SB |
894 | if (core->enable_count == 0) { |
895 | ret = clk_core_enable(core->parent); | |
b2476490 | 896 | |
4dff95dc SB |
897 | if (ret) |
898 | return ret; | |
b2476490 | 899 | |
f17a0dd1 | 900 | trace_clk_enable_rcuidle(core); |
035a61c3 | 901 | |
4dff95dc SB |
902 | if (core->ops->enable) |
903 | ret = core->ops->enable(core->hw); | |
035a61c3 | 904 | |
f17a0dd1 | 905 | trace_clk_enable_complete_rcuidle(core); |
4dff95dc SB |
906 | |
907 | if (ret) { | |
908 | clk_core_disable(core->parent); | |
909 | return ret; | |
910 | } | |
911 | } | |
912 | ||
913 | core->enable_count++; | |
914 | return 0; | |
035a61c3 | 915 | } |
b2476490 | 916 | |
a6adc30b DA |
917 | static int clk_core_enable_lock(struct clk_core *core) |
918 | { | |
919 | unsigned long flags; | |
920 | int ret; | |
921 | ||
922 | flags = clk_enable_lock(); | |
923 | ret = clk_core_enable(core); | |
924 | clk_enable_unlock(flags); | |
925 | ||
926 | return ret; | |
927 | } | |
928 | ||
43536548 K |
929 | /** |
930 | * clk_gate_restore_context - restore context for poweroff | |
931 | * @hw: the clk_hw pointer of clock whose state is to be restored | |
932 | * | |
933 | * The clock gate restore context function enables or disables | |
934 | * the gate clocks based on the enable_count. This is done in cases | |
935 | * where the clock context is lost and based on the enable_count | |
936 | * the clock either needs to be enabled/disabled. This | |
937 | * helps restore the state of gate clocks. | |
938 | */ | |
939 | void clk_gate_restore_context(struct clk_hw *hw) | |
940 | { | |
9be76627 SB |
941 | struct clk_core *core = hw->core; |
942 | ||
943 | if (core->enable_count) | |
944 | core->ops->enable(hw); | |
43536548 | 945 | else |
9be76627 | 946 | core->ops->disable(hw); |
43536548 K |
947 | } |
948 | EXPORT_SYMBOL_GPL(clk_gate_restore_context); | |
949 | ||
9be76627 | 950 | static int clk_core_save_context(struct clk_core *core) |
8b95d1ce RD |
951 | { |
952 | struct clk_core *child; | |
953 | int ret = 0; | |
954 | ||
9be76627 SB |
955 | hlist_for_each_entry(child, &core->children, child_node) { |
956 | ret = clk_core_save_context(child); | |
8b95d1ce RD |
957 | if (ret < 0) |
958 | return ret; | |
959 | } | |
960 | ||
9be76627 SB |
961 | if (core->ops && core->ops->save_context) |
962 | ret = core->ops->save_context(core->hw); | |
8b95d1ce RD |
963 | |
964 | return ret; | |
965 | } | |
966 | ||
9be76627 | 967 | static void clk_core_restore_context(struct clk_core *core) |
8b95d1ce RD |
968 | { |
969 | struct clk_core *child; | |
970 | ||
9be76627 SB |
971 | if (core->ops && core->ops->restore_context) |
972 | core->ops->restore_context(core->hw); | |
8b95d1ce | 973 | |
9be76627 SB |
974 | hlist_for_each_entry(child, &core->children, child_node) |
975 | clk_core_restore_context(child); | |
8b95d1ce RD |
976 | } |
977 | ||
978 | /** | |
979 | * clk_save_context - save clock context for poweroff | |
980 | * | |
981 | * Saves the context of the clock register for powerstates in which the | |
982 | * contents of the registers will be lost. Occurs deep within the suspend | |
983 | * code. Returns 0 on success. | |
984 | */ | |
985 | int clk_save_context(void) | |
986 | { | |
987 | struct clk_core *clk; | |
988 | int ret; | |
989 | ||
990 | hlist_for_each_entry(clk, &clk_root_list, child_node) { | |
9be76627 | 991 | ret = clk_core_save_context(clk); |
8b95d1ce RD |
992 | if (ret < 0) |
993 | return ret; | |
994 | } | |
995 | ||
996 | hlist_for_each_entry(clk, &clk_orphan_list, child_node) { | |
9be76627 | 997 | ret = clk_core_save_context(clk); |
8b95d1ce RD |
998 | if (ret < 0) |
999 | return ret; | |
1000 | } | |
1001 | ||
1002 | return 0; | |
1003 | } | |
1004 | EXPORT_SYMBOL_GPL(clk_save_context); | |
1005 | ||
1006 | /** | |
1007 | * clk_restore_context - restore clock context after poweroff | |
1008 | * | |
1009 | * Restore the saved clock context upon resume. | |
1010 | * | |
1011 | */ | |
1012 | void clk_restore_context(void) | |
1013 | { | |
9be76627 | 1014 | struct clk_core *core; |
8b95d1ce | 1015 | |
9be76627 SB |
1016 | hlist_for_each_entry(core, &clk_root_list, child_node) |
1017 | clk_core_restore_context(core); | |
8b95d1ce | 1018 | |
9be76627 SB |
1019 | hlist_for_each_entry(core, &clk_orphan_list, child_node) |
1020 | clk_core_restore_context(core); | |
8b95d1ce RD |
1021 | } |
1022 | EXPORT_SYMBOL_GPL(clk_restore_context); | |
1023 | ||
4dff95dc SB |
1024 | /** |
1025 | * clk_enable - ungate a clock | |
1026 | * @clk: the clk being ungated | |
1027 | * | |
1028 | * clk_enable must not sleep, which differentiates it from clk_prepare. In a | |
1029 | * simple case, clk_enable can be used instead of clk_prepare to ungate a clk | |
1030 | * if the operation will never sleep. One example is a SoC-internal clk which | |
1031 | * is controlled via simple register writes. In the complex case a clk ungate | |
1032 | * operation may require a fast and a slow part. It is this reason that | |
1033 | * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare | |
1034 | * must be called before clk_enable. Returns 0 on success, -EERROR | |
1035 | * otherwise. | |
1036 | */ | |
1037 | int clk_enable(struct clk *clk) | |
5279fc40 | 1038 | { |
4dff95dc | 1039 | if (!clk) |
5279fc40 BB |
1040 | return 0; |
1041 | ||
a6adc30b DA |
1042 | return clk_core_enable_lock(clk->core); |
1043 | } | |
1044 | EXPORT_SYMBOL_GPL(clk_enable); | |
1045 | ||
1046 | static int clk_core_prepare_enable(struct clk_core *core) | |
1047 | { | |
1048 | int ret; | |
1049 | ||
1050 | ret = clk_core_prepare_lock(core); | |
1051 | if (ret) | |
1052 | return ret; | |
1053 | ||
1054 | ret = clk_core_enable_lock(core); | |
1055 | if (ret) | |
1056 | clk_core_unprepare_lock(core); | |
5279fc40 | 1057 | |
4dff95dc | 1058 | return ret; |
b2476490 | 1059 | } |
a6adc30b DA |
1060 | |
1061 | static void clk_core_disable_unprepare(struct clk_core *core) | |
1062 | { | |
1063 | clk_core_disable_lock(core); | |
1064 | clk_core_unprepare_lock(core); | |
1065 | } | |
b2476490 | 1066 | |
7ec986ef DA |
1067 | static void clk_unprepare_unused_subtree(struct clk_core *core) |
1068 | { | |
1069 | struct clk_core *child; | |
1070 | ||
1071 | lockdep_assert_held(&prepare_lock); | |
1072 | ||
1073 | hlist_for_each_entry(child, &core->children, child_node) | |
1074 | clk_unprepare_unused_subtree(child); | |
1075 | ||
1076 | if (core->prepare_count) | |
1077 | return; | |
1078 | ||
1079 | if (core->flags & CLK_IGNORE_UNUSED) | |
1080 | return; | |
1081 | ||
9a34b453 MS |
1082 | if (clk_pm_runtime_get(core)) |
1083 | return; | |
1084 | ||
7ec986ef DA |
1085 | if (clk_core_is_prepared(core)) { |
1086 | trace_clk_unprepare(core); | |
1087 | if (core->ops->unprepare_unused) | |
1088 | core->ops->unprepare_unused(core->hw); | |
1089 | else if (core->ops->unprepare) | |
1090 | core->ops->unprepare(core->hw); | |
1091 | trace_clk_unprepare_complete(core); | |
1092 | } | |
9a34b453 MS |
1093 | |
1094 | clk_pm_runtime_put(core); | |
7ec986ef DA |
1095 | } |
1096 | ||
1097 | static void clk_disable_unused_subtree(struct clk_core *core) | |
1098 | { | |
1099 | struct clk_core *child; | |
1100 | unsigned long flags; | |
1101 | ||
1102 | lockdep_assert_held(&prepare_lock); | |
1103 | ||
1104 | hlist_for_each_entry(child, &core->children, child_node) | |
1105 | clk_disable_unused_subtree(child); | |
1106 | ||
a4b3518d DA |
1107 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1108 | clk_core_prepare_enable(core->parent); | |
1109 | ||
9a34b453 MS |
1110 | if (clk_pm_runtime_get(core)) |
1111 | goto unprepare_out; | |
1112 | ||
7ec986ef DA |
1113 | flags = clk_enable_lock(); |
1114 | ||
1115 | if (core->enable_count) | |
1116 | goto unlock_out; | |
1117 | ||
1118 | if (core->flags & CLK_IGNORE_UNUSED) | |
1119 | goto unlock_out; | |
1120 | ||
1121 | /* | |
1122 | * some gate clocks have special needs during the disable-unused | |
1123 | * sequence. call .disable_unused if available, otherwise fall | |
1124 | * back to .disable | |
1125 | */ | |
1126 | if (clk_core_is_enabled(core)) { | |
1127 | trace_clk_disable(core); | |
1128 | if (core->ops->disable_unused) | |
1129 | core->ops->disable_unused(core->hw); | |
1130 | else if (core->ops->disable) | |
1131 | core->ops->disable(core->hw); | |
1132 | trace_clk_disable_complete(core); | |
1133 | } | |
1134 | ||
1135 | unlock_out: | |
1136 | clk_enable_unlock(flags); | |
9a34b453 MS |
1137 | clk_pm_runtime_put(core); |
1138 | unprepare_out: | |
a4b3518d DA |
1139 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1140 | clk_core_disable_unprepare(core->parent); | |
7ec986ef DA |
1141 | } |
1142 | ||
1143 | static bool clk_ignore_unused; | |
1144 | static int __init clk_ignore_unused_setup(char *__unused) | |
1145 | { | |
1146 | clk_ignore_unused = true; | |
1147 | return 1; | |
1148 | } | |
1149 | __setup("clk_ignore_unused", clk_ignore_unused_setup); | |
1150 | ||
1151 | static int clk_disable_unused(void) | |
1152 | { | |
1153 | struct clk_core *core; | |
1154 | ||
1155 | if (clk_ignore_unused) { | |
1156 | pr_warn("clk: Not disabling unused clocks\n"); | |
1157 | return 0; | |
1158 | } | |
1159 | ||
1160 | clk_prepare_lock(); | |
1161 | ||
1162 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1163 | clk_disable_unused_subtree(core); | |
1164 | ||
1165 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1166 | clk_disable_unused_subtree(core); | |
1167 | ||
1168 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1169 | clk_unprepare_unused_subtree(core); | |
1170 | ||
1171 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1172 | clk_unprepare_unused_subtree(core); | |
1173 | ||
1174 | clk_prepare_unlock(); | |
1175 | ||
1176 | return 0; | |
1177 | } | |
1178 | late_initcall_sync(clk_disable_unused); | |
1179 | ||
0f6cc2b8 JB |
1180 | static int clk_core_determine_round_nolock(struct clk_core *core, |
1181 | struct clk_rate_request *req) | |
3d6ee287 | 1182 | { |
0817b62c | 1183 | long rate; |
4dff95dc SB |
1184 | |
1185 | lockdep_assert_held(&prepare_lock); | |
3d6ee287 | 1186 | |
d6968fca | 1187 | if (!core) |
4dff95dc | 1188 | return 0; |
3d6ee287 | 1189 | |
55e9b8b7 JB |
1190 | /* |
1191 | * At this point, core protection will be disabled if | |
1192 | * - if the provider is not protected at all | |
1193 | * - if the calling consumer is the only one which has exclusivity | |
1194 | * over the provider | |
1195 | */ | |
e55a839a JB |
1196 | if (clk_core_rate_is_protected(core)) { |
1197 | req->rate = core->rate; | |
1198 | } else if (core->ops->determine_rate) { | |
0817b62c BB |
1199 | return core->ops->determine_rate(core->hw, req); |
1200 | } else if (core->ops->round_rate) { | |
1201 | rate = core->ops->round_rate(core->hw, req->rate, | |
1202 | &req->best_parent_rate); | |
1203 | if (rate < 0) | |
1204 | return rate; | |
1205 | ||
1206 | req->rate = rate; | |
0817b62c | 1207 | } else { |
0f6cc2b8 | 1208 | return -EINVAL; |
0817b62c BB |
1209 | } |
1210 | ||
1211 | return 0; | |
3d6ee287 UH |
1212 | } |
1213 | ||
0f6cc2b8 JB |
1214 | static void clk_core_init_rate_req(struct clk_core * const core, |
1215 | struct clk_rate_request *req) | |
1216 | { | |
1217 | struct clk_core *parent; | |
1218 | ||
1219 | if (WARN_ON(!core || !req)) | |
1220 | return; | |
1221 | ||
1222 | parent = core->parent; | |
1223 | if (parent) { | |
1224 | req->best_parent_hw = parent->hw; | |
1225 | req->best_parent_rate = parent->rate; | |
1226 | } else { | |
1227 | req->best_parent_hw = NULL; | |
1228 | req->best_parent_rate = 0; | |
0817b62c | 1229 | } |
0f6cc2b8 | 1230 | } |
0817b62c | 1231 | |
0f6cc2b8 JB |
1232 | static bool clk_core_can_round(struct clk_core * const core) |
1233 | { | |
1234 | if (core->ops->determine_rate || core->ops->round_rate) | |
1235 | return true; | |
1236 | ||
1237 | return false; | |
1238 | } | |
1239 | ||
1240 | static int clk_core_round_rate_nolock(struct clk_core *core, | |
1241 | struct clk_rate_request *req) | |
1242 | { | |
1243 | lockdep_assert_held(&prepare_lock); | |
1244 | ||
04bf9ab3 JB |
1245 | if (!core) { |
1246 | req->rate = 0; | |
0f6cc2b8 | 1247 | return 0; |
04bf9ab3 | 1248 | } |
0817b62c | 1249 | |
0f6cc2b8 JB |
1250 | clk_core_init_rate_req(core, req); |
1251 | ||
1252 | if (clk_core_can_round(core)) | |
1253 | return clk_core_determine_round_nolock(core, req); | |
1254 | else if (core->flags & CLK_SET_RATE_PARENT) | |
1255 | return clk_core_round_rate_nolock(core->parent, req); | |
1256 | ||
1257 | req->rate = core->rate; | |
0817b62c | 1258 | return 0; |
3d6ee287 UH |
1259 | } |
1260 | ||
4dff95dc SB |
1261 | /** |
1262 | * __clk_determine_rate - get the closest rate actually supported by a clock | |
1263 | * @hw: determine the rate of this clock | |
2d5b520c | 1264 | * @req: target rate request |
4dff95dc | 1265 | * |
6e5ab41b | 1266 | * Useful for clk_ops such as .set_rate and .determine_rate. |
4dff95dc | 1267 | */ |
0817b62c | 1268 | int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) |
035a61c3 | 1269 | { |
0817b62c BB |
1270 | if (!hw) { |
1271 | req->rate = 0; | |
4dff95dc | 1272 | return 0; |
0817b62c | 1273 | } |
035a61c3 | 1274 | |
0817b62c | 1275 | return clk_core_round_rate_nolock(hw->core, req); |
035a61c3 | 1276 | } |
4dff95dc | 1277 | EXPORT_SYMBOL_GPL(__clk_determine_rate); |
035a61c3 | 1278 | |
1a9c069c SB |
1279 | unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) |
1280 | { | |
1281 | int ret; | |
1282 | struct clk_rate_request req; | |
1283 | ||
1284 | clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); | |
1285 | req.rate = rate; | |
1286 | ||
1287 | ret = clk_core_round_rate_nolock(hw->core, &req); | |
1288 | if (ret) | |
1289 | return 0; | |
1290 | ||
1291 | return req.rate; | |
1292 | } | |
1293 | EXPORT_SYMBOL_GPL(clk_hw_round_rate); | |
1294 | ||
4dff95dc SB |
1295 | /** |
1296 | * clk_round_rate - round the given rate for a clk | |
1297 | * @clk: the clk for which we are rounding a rate | |
1298 | * @rate: the rate which is to be rounded | |
1299 | * | |
1300 | * Takes in a rate as input and rounds it to a rate that the clk can actually | |
1301 | * use which is then returned. If clk doesn't support round_rate operation | |
1302 | * then the parent rate is returned. | |
1303 | */ | |
1304 | long clk_round_rate(struct clk *clk, unsigned long rate) | |
035a61c3 | 1305 | { |
fc4a05d4 SB |
1306 | struct clk_rate_request req; |
1307 | int ret; | |
4dff95dc | 1308 | |
035a61c3 | 1309 | if (!clk) |
4dff95dc | 1310 | return 0; |
035a61c3 | 1311 | |
4dff95dc | 1312 | clk_prepare_lock(); |
fc4a05d4 | 1313 | |
55e9b8b7 JB |
1314 | if (clk->exclusive_count) |
1315 | clk_core_rate_unprotect(clk->core); | |
1316 | ||
fc4a05d4 SB |
1317 | clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); |
1318 | req.rate = rate; | |
1319 | ||
1320 | ret = clk_core_round_rate_nolock(clk->core, &req); | |
55e9b8b7 JB |
1321 | |
1322 | if (clk->exclusive_count) | |
1323 | clk_core_rate_protect(clk->core); | |
1324 | ||
4dff95dc SB |
1325 | clk_prepare_unlock(); |
1326 | ||
fc4a05d4 SB |
1327 | if (ret) |
1328 | return ret; | |
1329 | ||
1330 | return req.rate; | |
035a61c3 | 1331 | } |
4dff95dc | 1332 | EXPORT_SYMBOL_GPL(clk_round_rate); |
b2476490 | 1333 | |
4dff95dc SB |
1334 | /** |
1335 | * __clk_notify - call clk notifier chain | |
1336 | * @core: clk that is changing rate | |
1337 | * @msg: clk notifier type (see include/linux/clk.h) | |
1338 | * @old_rate: old clk rate | |
1339 | * @new_rate: new clk rate | |
1340 | * | |
1341 | * Triggers a notifier call chain on the clk rate-change notification | |
1342 | * for 'clk'. Passes a pointer to the struct clk and the previous | |
1343 | * and current rates to the notifier callback. Intended to be called by | |
1344 | * internal clock code only. Returns NOTIFY_DONE from the last driver | |
1345 | * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if | |
1346 | * a driver returns that. | |
1347 | */ | |
1348 | static int __clk_notify(struct clk_core *core, unsigned long msg, | |
1349 | unsigned long old_rate, unsigned long new_rate) | |
b2476490 | 1350 | { |
4dff95dc SB |
1351 | struct clk_notifier *cn; |
1352 | struct clk_notifier_data cnd; | |
1353 | int ret = NOTIFY_DONE; | |
b2476490 | 1354 | |
4dff95dc SB |
1355 | cnd.old_rate = old_rate; |
1356 | cnd.new_rate = new_rate; | |
b2476490 | 1357 | |
4dff95dc SB |
1358 | list_for_each_entry(cn, &clk_notifier_list, node) { |
1359 | if (cn->clk->core == core) { | |
1360 | cnd.clk = cn->clk; | |
1361 | ret = srcu_notifier_call_chain(&cn->notifier_head, msg, | |
1362 | &cnd); | |
17c34c56 PDS |
1363 | if (ret & NOTIFY_STOP_MASK) |
1364 | return ret; | |
4dff95dc | 1365 | } |
b2476490 MT |
1366 | } |
1367 | ||
4dff95dc | 1368 | return ret; |
b2476490 MT |
1369 | } |
1370 | ||
4dff95dc SB |
1371 | /** |
1372 | * __clk_recalc_accuracies | |
1373 | * @core: first clk in the subtree | |
1374 | * | |
1375 | * Walks the subtree of clks starting with clk and recalculates accuracies as | |
1376 | * it goes. Note that if a clk does not implement the .recalc_accuracy | |
6e5ab41b | 1377 | * callback then it is assumed that the clock will take on the accuracy of its |
4dff95dc | 1378 | * parent. |
4dff95dc SB |
1379 | */ |
1380 | static void __clk_recalc_accuracies(struct clk_core *core) | |
b2476490 | 1381 | { |
4dff95dc SB |
1382 | unsigned long parent_accuracy = 0; |
1383 | struct clk_core *child; | |
b2476490 | 1384 | |
4dff95dc | 1385 | lockdep_assert_held(&prepare_lock); |
b2476490 | 1386 | |
4dff95dc SB |
1387 | if (core->parent) |
1388 | parent_accuracy = core->parent->accuracy; | |
b2476490 | 1389 | |
4dff95dc SB |
1390 | if (core->ops->recalc_accuracy) |
1391 | core->accuracy = core->ops->recalc_accuracy(core->hw, | |
1392 | parent_accuracy); | |
1393 | else | |
1394 | core->accuracy = parent_accuracy; | |
b2476490 | 1395 | |
4dff95dc SB |
1396 | hlist_for_each_entry(child, &core->children, child_node) |
1397 | __clk_recalc_accuracies(child); | |
b2476490 MT |
1398 | } |
1399 | ||
4dff95dc | 1400 | static long clk_core_get_accuracy(struct clk_core *core) |
e366fdd7 | 1401 | { |
4dff95dc | 1402 | unsigned long accuracy; |
15a02c1f | 1403 | |
4dff95dc SB |
1404 | clk_prepare_lock(); |
1405 | if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) | |
1406 | __clk_recalc_accuracies(core); | |
15a02c1f | 1407 | |
4dff95dc SB |
1408 | accuracy = __clk_get_accuracy(core); |
1409 | clk_prepare_unlock(); | |
e366fdd7 | 1410 | |
4dff95dc | 1411 | return accuracy; |
e366fdd7 | 1412 | } |
15a02c1f | 1413 | |
4dff95dc SB |
1414 | /** |
1415 | * clk_get_accuracy - return the accuracy of clk | |
1416 | * @clk: the clk whose accuracy is being returned | |
1417 | * | |
1418 | * Simply returns the cached accuracy of the clk, unless | |
1419 | * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be | |
1420 | * issued. | |
1421 | * If clk is NULL then returns 0. | |
1422 | */ | |
1423 | long clk_get_accuracy(struct clk *clk) | |
035a61c3 | 1424 | { |
4dff95dc SB |
1425 | if (!clk) |
1426 | return 0; | |
035a61c3 | 1427 | |
4dff95dc | 1428 | return clk_core_get_accuracy(clk->core); |
035a61c3 | 1429 | } |
4dff95dc | 1430 | EXPORT_SYMBOL_GPL(clk_get_accuracy); |
035a61c3 | 1431 | |
4dff95dc SB |
1432 | static unsigned long clk_recalc(struct clk_core *core, |
1433 | unsigned long parent_rate) | |
1c8e6004 | 1434 | { |
9a34b453 MS |
1435 | unsigned long rate = parent_rate; |
1436 | ||
1437 | if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { | |
1438 | rate = core->ops->recalc_rate(core->hw, parent_rate); | |
1439 | clk_pm_runtime_put(core); | |
1440 | } | |
1441 | return rate; | |
1c8e6004 TV |
1442 | } |
1443 | ||
4dff95dc SB |
1444 | /** |
1445 | * __clk_recalc_rates | |
1446 | * @core: first clk in the subtree | |
1447 | * @msg: notification type (see include/linux/clk.h) | |
1448 | * | |
1449 | * Walks the subtree of clks starting with clk and recalculates rates as it | |
1450 | * goes. Note that if a clk does not implement the .recalc_rate callback then | |
1451 | * it is assumed that the clock will take on the rate of its parent. | |
1452 | * | |
1453 | * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, | |
1454 | * if necessary. | |
15a02c1f | 1455 | */ |
4dff95dc | 1456 | static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) |
15a02c1f | 1457 | { |
4dff95dc SB |
1458 | unsigned long old_rate; |
1459 | unsigned long parent_rate = 0; | |
1460 | struct clk_core *child; | |
e366fdd7 | 1461 | |
4dff95dc | 1462 | lockdep_assert_held(&prepare_lock); |
15a02c1f | 1463 | |
4dff95dc | 1464 | old_rate = core->rate; |
b2476490 | 1465 | |
4dff95dc SB |
1466 | if (core->parent) |
1467 | parent_rate = core->parent->rate; | |
b2476490 | 1468 | |
4dff95dc | 1469 | core->rate = clk_recalc(core, parent_rate); |
b2476490 | 1470 | |
4dff95dc SB |
1471 | /* |
1472 | * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE | |
1473 | * & ABORT_RATE_CHANGE notifiers | |
1474 | */ | |
1475 | if (core->notifier_count && msg) | |
1476 | __clk_notify(core, msg, old_rate, core->rate); | |
b2476490 | 1477 | |
4dff95dc SB |
1478 | hlist_for_each_entry(child, &core->children, child_node) |
1479 | __clk_recalc_rates(child, msg); | |
1480 | } | |
b2476490 | 1481 | |
4dff95dc SB |
1482 | static unsigned long clk_core_get_rate(struct clk_core *core) |
1483 | { | |
1484 | unsigned long rate; | |
dfc202ea | 1485 | |
4dff95dc | 1486 | clk_prepare_lock(); |
b2476490 | 1487 | |
4dff95dc SB |
1488 | if (core && (core->flags & CLK_GET_RATE_NOCACHE)) |
1489 | __clk_recalc_rates(core, 0); | |
1490 | ||
1491 | rate = clk_core_get_rate_nolock(core); | |
1492 | clk_prepare_unlock(); | |
1493 | ||
1494 | return rate; | |
b2476490 MT |
1495 | } |
1496 | ||
1497 | /** | |
4dff95dc SB |
1498 | * clk_get_rate - return the rate of clk |
1499 | * @clk: the clk whose rate is being returned | |
b2476490 | 1500 | * |
4dff95dc SB |
1501 | * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag |
1502 | * is set, which means a recalc_rate will be issued. | |
1503 | * If clk is NULL then returns 0. | |
b2476490 | 1504 | */ |
4dff95dc | 1505 | unsigned long clk_get_rate(struct clk *clk) |
b2476490 | 1506 | { |
4dff95dc SB |
1507 | if (!clk) |
1508 | return 0; | |
63589e92 | 1509 | |
4dff95dc | 1510 | return clk_core_get_rate(clk->core); |
b2476490 | 1511 | } |
4dff95dc | 1512 | EXPORT_SYMBOL_GPL(clk_get_rate); |
b2476490 | 1513 | |
4dff95dc SB |
1514 | static int clk_fetch_parent_index(struct clk_core *core, |
1515 | struct clk_core *parent) | |
b2476490 | 1516 | { |
4dff95dc | 1517 | int i; |
b2476490 | 1518 | |
508f884a MY |
1519 | if (!parent) |
1520 | return -EINVAL; | |
1521 | ||
ede77858 DB |
1522 | for (i = 0; i < core->num_parents; i++) { |
1523 | if (core->parents[i] == parent) | |
4dff95dc | 1524 | return i; |
b2476490 | 1525 | |
ede77858 DB |
1526 | if (core->parents[i]) |
1527 | continue; | |
1528 | ||
1529 | /* Fallback to comparing globally unique names */ | |
1530 | if (!strcmp(parent->name, core->parent_names[i])) { | |
1531 | core->parents[i] = parent; | |
1532 | return i; | |
1533 | } | |
1534 | } | |
1535 | ||
4dff95dc | 1536 | return -EINVAL; |
b2476490 MT |
1537 | } |
1538 | ||
e6500344 HS |
1539 | /* |
1540 | * Update the orphan status of @core and all its children. | |
1541 | */ | |
1542 | static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) | |
1543 | { | |
1544 | struct clk_core *child; | |
1545 | ||
1546 | core->orphan = is_orphan; | |
1547 | ||
1548 | hlist_for_each_entry(child, &core->children, child_node) | |
1549 | clk_core_update_orphan_status(child, is_orphan); | |
1550 | } | |
1551 | ||
4dff95dc | 1552 | static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) |
b2476490 | 1553 | { |
e6500344 HS |
1554 | bool was_orphan = core->orphan; |
1555 | ||
4dff95dc | 1556 | hlist_del(&core->child_node); |
035a61c3 | 1557 | |
4dff95dc | 1558 | if (new_parent) { |
e6500344 HS |
1559 | bool becomes_orphan = new_parent->orphan; |
1560 | ||
4dff95dc SB |
1561 | /* avoid duplicate POST_RATE_CHANGE notifications */ |
1562 | if (new_parent->new_child == core) | |
1563 | new_parent->new_child = NULL; | |
b2476490 | 1564 | |
4dff95dc | 1565 | hlist_add_head(&core->child_node, &new_parent->children); |
e6500344 HS |
1566 | |
1567 | if (was_orphan != becomes_orphan) | |
1568 | clk_core_update_orphan_status(core, becomes_orphan); | |
4dff95dc SB |
1569 | } else { |
1570 | hlist_add_head(&core->child_node, &clk_orphan_list); | |
e6500344 HS |
1571 | if (!was_orphan) |
1572 | clk_core_update_orphan_status(core, true); | |
4dff95dc | 1573 | } |
dfc202ea | 1574 | |
4dff95dc | 1575 | core->parent = new_parent; |
035a61c3 TV |
1576 | } |
1577 | ||
4dff95dc SB |
1578 | static struct clk_core *__clk_set_parent_before(struct clk_core *core, |
1579 | struct clk_core *parent) | |
b2476490 MT |
1580 | { |
1581 | unsigned long flags; | |
4dff95dc | 1582 | struct clk_core *old_parent = core->parent; |
b2476490 | 1583 | |
4dff95dc | 1584 | /* |
fc8726a2 DA |
1585 | * 1. enable parents for CLK_OPS_PARENT_ENABLE clock |
1586 | * | |
1587 | * 2. Migrate prepare state between parents and prevent race with | |
4dff95dc SB |
1588 | * clk_enable(). |
1589 | * | |
1590 | * If the clock is not prepared, then a race with | |
1591 | * clk_enable/disable() is impossible since we already have the | |
1592 | * prepare lock (future calls to clk_enable() need to be preceded by | |
1593 | * a clk_prepare()). | |
1594 | * | |
1595 | * If the clock is prepared, migrate the prepared state to the new | |
1596 | * parent and also protect against a race with clk_enable() by | |
1597 | * forcing the clock and the new parent on. This ensures that all | |
1598 | * future calls to clk_enable() are practically NOPs with respect to | |
1599 | * hardware and software states. | |
1600 | * | |
1601 | * See also: Comment for clk_set_parent() below. | |
1602 | */ | |
fc8726a2 DA |
1603 | |
1604 | /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ | |
1605 | if (core->flags & CLK_OPS_PARENT_ENABLE) { | |
1606 | clk_core_prepare_enable(old_parent); | |
1607 | clk_core_prepare_enable(parent); | |
1608 | } | |
1609 | ||
1610 | /* migrate prepare count if > 0 */ | |
4dff95dc | 1611 | if (core->prepare_count) { |
fc8726a2 DA |
1612 | clk_core_prepare_enable(parent); |
1613 | clk_core_enable_lock(core); | |
4dff95dc | 1614 | } |
63589e92 | 1615 | |
4dff95dc | 1616 | /* update the clk tree topology */ |
eab89f69 | 1617 | flags = clk_enable_lock(); |
4dff95dc | 1618 | clk_reparent(core, parent); |
eab89f69 | 1619 | clk_enable_unlock(flags); |
4dff95dc SB |
1620 | |
1621 | return old_parent; | |
b2476490 | 1622 | } |
b2476490 | 1623 | |
4dff95dc SB |
1624 | static void __clk_set_parent_after(struct clk_core *core, |
1625 | struct clk_core *parent, | |
1626 | struct clk_core *old_parent) | |
b2476490 | 1627 | { |
4dff95dc SB |
1628 | /* |
1629 | * Finish the migration of prepare state and undo the changes done | |
1630 | * for preventing a race with clk_enable(). | |
1631 | */ | |
1632 | if (core->prepare_count) { | |
fc8726a2 DA |
1633 | clk_core_disable_lock(core); |
1634 | clk_core_disable_unprepare(old_parent); | |
1635 | } | |
1636 | ||
1637 | /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ | |
1638 | if (core->flags & CLK_OPS_PARENT_ENABLE) { | |
1639 | clk_core_disable_unprepare(parent); | |
1640 | clk_core_disable_unprepare(old_parent); | |
4dff95dc SB |
1641 | } |
1642 | } | |
b2476490 | 1643 | |
4dff95dc SB |
1644 | static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, |
1645 | u8 p_index) | |
1646 | { | |
1647 | unsigned long flags; | |
1648 | int ret = 0; | |
1649 | struct clk_core *old_parent; | |
b2476490 | 1650 | |
4dff95dc | 1651 | old_parent = __clk_set_parent_before(core, parent); |
b2476490 | 1652 | |
4dff95dc | 1653 | trace_clk_set_parent(core, parent); |
b2476490 | 1654 | |
4dff95dc SB |
1655 | /* change clock input source */ |
1656 | if (parent && core->ops->set_parent) | |
1657 | ret = core->ops->set_parent(core->hw, p_index); | |
dfc202ea | 1658 | |
4dff95dc | 1659 | trace_clk_set_parent_complete(core, parent); |
dfc202ea | 1660 | |
4dff95dc SB |
1661 | if (ret) { |
1662 | flags = clk_enable_lock(); | |
1663 | clk_reparent(core, old_parent); | |
1664 | clk_enable_unlock(flags); | |
c660b2eb | 1665 | __clk_set_parent_after(core, old_parent, parent); |
dfc202ea | 1666 | |
4dff95dc | 1667 | return ret; |
b2476490 MT |
1668 | } |
1669 | ||
4dff95dc SB |
1670 | __clk_set_parent_after(core, parent, old_parent); |
1671 | ||
b2476490 MT |
1672 | return 0; |
1673 | } | |
1674 | ||
1675 | /** | |
4dff95dc SB |
1676 | * __clk_speculate_rates |
1677 | * @core: first clk in the subtree | |
1678 | * @parent_rate: the "future" rate of clk's parent | |
b2476490 | 1679 | * |
4dff95dc SB |
1680 | * Walks the subtree of clks starting with clk, speculating rates as it |
1681 | * goes and firing off PRE_RATE_CHANGE notifications as necessary. | |
1682 | * | |
1683 | * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending | |
1684 | * pre-rate change notifications and returns early if no clks in the | |
1685 | * subtree have subscribed to the notifications. Note that if a clk does not | |
1686 | * implement the .recalc_rate callback then it is assumed that the clock will | |
1687 | * take on the rate of its parent. | |
b2476490 | 1688 | */ |
4dff95dc SB |
1689 | static int __clk_speculate_rates(struct clk_core *core, |
1690 | unsigned long parent_rate) | |
b2476490 | 1691 | { |
4dff95dc SB |
1692 | struct clk_core *child; |
1693 | unsigned long new_rate; | |
1694 | int ret = NOTIFY_DONE; | |
b2476490 | 1695 | |
4dff95dc | 1696 | lockdep_assert_held(&prepare_lock); |
864e160a | 1697 | |
4dff95dc SB |
1698 | new_rate = clk_recalc(core, parent_rate); |
1699 | ||
1700 | /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ | |
1701 | if (core->notifier_count) | |
1702 | ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); | |
1703 | ||
1704 | if (ret & NOTIFY_STOP_MASK) { | |
1705 | pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", | |
1706 | __func__, core->name, ret); | |
1707 | goto out; | |
1708 | } | |
1709 | ||
1710 | hlist_for_each_entry(child, &core->children, child_node) { | |
1711 | ret = __clk_speculate_rates(child, new_rate); | |
1712 | if (ret & NOTIFY_STOP_MASK) | |
1713 | break; | |
1714 | } | |
b2476490 | 1715 | |
4dff95dc | 1716 | out: |
b2476490 MT |
1717 | return ret; |
1718 | } | |
b2476490 | 1719 | |
4dff95dc SB |
1720 | static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, |
1721 | struct clk_core *new_parent, u8 p_index) | |
b2476490 | 1722 | { |
4dff95dc | 1723 | struct clk_core *child; |
b2476490 | 1724 | |
4dff95dc SB |
1725 | core->new_rate = new_rate; |
1726 | core->new_parent = new_parent; | |
1727 | core->new_parent_index = p_index; | |
1728 | /* include clk in new parent's PRE_RATE_CHANGE notifications */ | |
1729 | core->new_child = NULL; | |
1730 | if (new_parent && new_parent != core->parent) | |
1731 | new_parent->new_child = core; | |
496eadf8 | 1732 | |
4dff95dc SB |
1733 | hlist_for_each_entry(child, &core->children, child_node) { |
1734 | child->new_rate = clk_recalc(child, new_rate); | |
1735 | clk_calc_subtree(child, child->new_rate, NULL, 0); | |
1736 | } | |
1737 | } | |
b2476490 | 1738 | |
4dff95dc SB |
1739 | /* |
1740 | * calculate the new rates returning the topmost clock that has to be | |
1741 | * changed. | |
1742 | */ | |
1743 | static struct clk_core *clk_calc_new_rates(struct clk_core *core, | |
1744 | unsigned long rate) | |
1745 | { | |
1746 | struct clk_core *top = core; | |
1747 | struct clk_core *old_parent, *parent; | |
4dff95dc SB |
1748 | unsigned long best_parent_rate = 0; |
1749 | unsigned long new_rate; | |
1750 | unsigned long min_rate; | |
1751 | unsigned long max_rate; | |
1752 | int p_index = 0; | |
1753 | long ret; | |
1754 | ||
1755 | /* sanity */ | |
1756 | if (IS_ERR_OR_NULL(core)) | |
1757 | return NULL; | |
1758 | ||
1759 | /* save parent rate, if it exists */ | |
1760 | parent = old_parent = core->parent; | |
71472c0c | 1761 | if (parent) |
4dff95dc | 1762 | best_parent_rate = parent->rate; |
71472c0c | 1763 | |
4dff95dc SB |
1764 | clk_core_get_boundaries(core, &min_rate, &max_rate); |
1765 | ||
1766 | /* find the closest rate and parent clk/rate */ | |
0f6cc2b8 | 1767 | if (clk_core_can_round(core)) { |
0817b62c BB |
1768 | struct clk_rate_request req; |
1769 | ||
1770 | req.rate = rate; | |
1771 | req.min_rate = min_rate; | |
1772 | req.max_rate = max_rate; | |
0817b62c | 1773 | |
0f6cc2b8 JB |
1774 | clk_core_init_rate_req(core, &req); |
1775 | ||
1776 | ret = clk_core_determine_round_nolock(core, &req); | |
4dff95dc SB |
1777 | if (ret < 0) |
1778 | return NULL; | |
1c8e6004 | 1779 | |
0817b62c BB |
1780 | best_parent_rate = req.best_parent_rate; |
1781 | new_rate = req.rate; | |
1782 | parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; | |
035a61c3 | 1783 | |
4dff95dc SB |
1784 | if (new_rate < min_rate || new_rate > max_rate) |
1785 | return NULL; | |
1786 | } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { | |
1787 | /* pass-through clock without adjustable parent */ | |
1788 | core->new_rate = core->rate; | |
1789 | return NULL; | |
1790 | } else { | |
1791 | /* pass-through clock with adjustable parent */ | |
1792 | top = clk_calc_new_rates(parent, rate); | |
1793 | new_rate = parent->new_rate; | |
1794 | goto out; | |
1795 | } | |
1c8e6004 | 1796 | |
4dff95dc SB |
1797 | /* some clocks must be gated to change parent */ |
1798 | if (parent != old_parent && | |
1799 | (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { | |
1800 | pr_debug("%s: %s not gated but wants to reparent\n", | |
1801 | __func__, core->name); | |
1802 | return NULL; | |
1803 | } | |
b2476490 | 1804 | |
4dff95dc SB |
1805 | /* try finding the new parent index */ |
1806 | if (parent && core->num_parents > 1) { | |
1807 | p_index = clk_fetch_parent_index(core, parent); | |
1808 | if (p_index < 0) { | |
1809 | pr_debug("%s: clk %s can not be parent of clk %s\n", | |
1810 | __func__, parent->name, core->name); | |
1811 | return NULL; | |
1812 | } | |
1813 | } | |
b2476490 | 1814 | |
4dff95dc SB |
1815 | if ((core->flags & CLK_SET_RATE_PARENT) && parent && |
1816 | best_parent_rate != parent->rate) | |
1817 | top = clk_calc_new_rates(parent, best_parent_rate); | |
035a61c3 | 1818 | |
4dff95dc SB |
1819 | out: |
1820 | clk_calc_subtree(core, new_rate, parent, p_index); | |
b2476490 | 1821 | |
4dff95dc | 1822 | return top; |
b2476490 | 1823 | } |
b2476490 | 1824 | |
4dff95dc SB |
1825 | /* |
1826 | * Notify about rate changes in a subtree. Always walk down the whole tree | |
1827 | * so that in case of an error we can walk down the whole tree again and | |
1828 | * abort the change. | |
b2476490 | 1829 | */ |
4dff95dc SB |
1830 | static struct clk_core *clk_propagate_rate_change(struct clk_core *core, |
1831 | unsigned long event) | |
b2476490 | 1832 | { |
4dff95dc | 1833 | struct clk_core *child, *tmp_clk, *fail_clk = NULL; |
b2476490 MT |
1834 | int ret = NOTIFY_DONE; |
1835 | ||
4dff95dc SB |
1836 | if (core->rate == core->new_rate) |
1837 | return NULL; | |
b2476490 | 1838 | |
4dff95dc SB |
1839 | if (core->notifier_count) { |
1840 | ret = __clk_notify(core, event, core->rate, core->new_rate); | |
1841 | if (ret & NOTIFY_STOP_MASK) | |
1842 | fail_clk = core; | |
b2476490 MT |
1843 | } |
1844 | ||
4dff95dc SB |
1845 | hlist_for_each_entry(child, &core->children, child_node) { |
1846 | /* Skip children who will be reparented to another clock */ | |
1847 | if (child->new_parent && child->new_parent != core) | |
1848 | continue; | |
1849 | tmp_clk = clk_propagate_rate_change(child, event); | |
1850 | if (tmp_clk) | |
1851 | fail_clk = tmp_clk; | |
1852 | } | |
5279fc40 | 1853 | |
4dff95dc SB |
1854 | /* handle the new child who might not be in core->children yet */ |
1855 | if (core->new_child) { | |
1856 | tmp_clk = clk_propagate_rate_change(core->new_child, event); | |
1857 | if (tmp_clk) | |
1858 | fail_clk = tmp_clk; | |
1859 | } | |
5279fc40 | 1860 | |
4dff95dc | 1861 | return fail_clk; |
5279fc40 BB |
1862 | } |
1863 | ||
4dff95dc SB |
1864 | /* |
1865 | * walk down a subtree and set the new rates notifying the rate | |
1866 | * change on the way | |
1867 | */ | |
1868 | static void clk_change_rate(struct clk_core *core) | |
035a61c3 | 1869 | { |
4dff95dc SB |
1870 | struct clk_core *child; |
1871 | struct hlist_node *tmp; | |
1872 | unsigned long old_rate; | |
1873 | unsigned long best_parent_rate = 0; | |
1874 | bool skip_set_rate = false; | |
1875 | struct clk_core *old_parent; | |
fc8726a2 | 1876 | struct clk_core *parent = NULL; |
035a61c3 | 1877 | |
4dff95dc | 1878 | old_rate = core->rate; |
035a61c3 | 1879 | |
fc8726a2 DA |
1880 | if (core->new_parent) { |
1881 | parent = core->new_parent; | |
4dff95dc | 1882 | best_parent_rate = core->new_parent->rate; |
fc8726a2 DA |
1883 | } else if (core->parent) { |
1884 | parent = core->parent; | |
4dff95dc | 1885 | best_parent_rate = core->parent->rate; |
fc8726a2 | 1886 | } |
035a61c3 | 1887 | |
588fb54b MS |
1888 | if (clk_pm_runtime_get(core)) |
1889 | return; | |
1890 | ||
2eb8c710 HS |
1891 | if (core->flags & CLK_SET_RATE_UNGATE) { |
1892 | unsigned long flags; | |
1893 | ||
1894 | clk_core_prepare(core); | |
1895 | flags = clk_enable_lock(); | |
1896 | clk_core_enable(core); | |
1897 | clk_enable_unlock(flags); | |
1898 | } | |
1899 | ||
4dff95dc SB |
1900 | if (core->new_parent && core->new_parent != core->parent) { |
1901 | old_parent = __clk_set_parent_before(core, core->new_parent); | |
1902 | trace_clk_set_parent(core, core->new_parent); | |
5279fc40 | 1903 | |
4dff95dc SB |
1904 | if (core->ops->set_rate_and_parent) { |
1905 | skip_set_rate = true; | |
1906 | core->ops->set_rate_and_parent(core->hw, core->new_rate, | |
1907 | best_parent_rate, | |
1908 | core->new_parent_index); | |
1909 | } else if (core->ops->set_parent) { | |
1910 | core->ops->set_parent(core->hw, core->new_parent_index); | |
1911 | } | |
5279fc40 | 1912 | |
4dff95dc SB |
1913 | trace_clk_set_parent_complete(core, core->new_parent); |
1914 | __clk_set_parent_after(core, core->new_parent, old_parent); | |
1915 | } | |
8f2c2db1 | 1916 | |
fc8726a2 DA |
1917 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1918 | clk_core_prepare_enable(parent); | |
1919 | ||
4dff95dc | 1920 | trace_clk_set_rate(core, core->new_rate); |
b2476490 | 1921 | |
4dff95dc SB |
1922 | if (!skip_set_rate && core->ops->set_rate) |
1923 | core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); | |
496eadf8 | 1924 | |
4dff95dc | 1925 | trace_clk_set_rate_complete(core, core->new_rate); |
b2476490 | 1926 | |
4dff95dc | 1927 | core->rate = clk_recalc(core, best_parent_rate); |
b2476490 | 1928 | |
2eb8c710 HS |
1929 | if (core->flags & CLK_SET_RATE_UNGATE) { |
1930 | unsigned long flags; | |
1931 | ||
1932 | flags = clk_enable_lock(); | |
1933 | clk_core_disable(core); | |
1934 | clk_enable_unlock(flags); | |
1935 | clk_core_unprepare(core); | |
1936 | } | |
1937 | ||
fc8726a2 DA |
1938 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1939 | clk_core_disable_unprepare(parent); | |
1940 | ||
4dff95dc SB |
1941 | if (core->notifier_count && old_rate != core->rate) |
1942 | __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); | |
b2476490 | 1943 | |
85e88fab MT |
1944 | if (core->flags & CLK_RECALC_NEW_RATES) |
1945 | (void)clk_calc_new_rates(core, core->new_rate); | |
d8d91987 | 1946 | |
b2476490 | 1947 | /* |
4dff95dc SB |
1948 | * Use safe iteration, as change_rate can actually swap parents |
1949 | * for certain clock types. | |
b2476490 | 1950 | */ |
4dff95dc SB |
1951 | hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { |
1952 | /* Skip children who will be reparented to another clock */ | |
1953 | if (child->new_parent && child->new_parent != core) | |
1954 | continue; | |
1955 | clk_change_rate(child); | |
1956 | } | |
b2476490 | 1957 | |
4dff95dc SB |
1958 | /* handle the new child who might not be in core->children yet */ |
1959 | if (core->new_child) | |
1960 | clk_change_rate(core->new_child); | |
588fb54b MS |
1961 | |
1962 | clk_pm_runtime_put(core); | |
b2476490 MT |
1963 | } |
1964 | ||
ca5e089a JB |
1965 | static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, |
1966 | unsigned long req_rate) | |
1967 | { | |
e55a839a | 1968 | int ret, cnt; |
ca5e089a JB |
1969 | struct clk_rate_request req; |
1970 | ||
1971 | lockdep_assert_held(&prepare_lock); | |
1972 | ||
1973 | if (!core) | |
1974 | return 0; | |
1975 | ||
e55a839a JB |
1976 | /* simulate what the rate would be if it could be freely set */ |
1977 | cnt = clk_core_rate_nuke_protect(core); | |
1978 | if (cnt < 0) | |
1979 | return cnt; | |
1980 | ||
ca5e089a JB |
1981 | clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); |
1982 | req.rate = req_rate; | |
1983 | ||
1984 | ret = clk_core_round_rate_nolock(core, &req); | |
1985 | ||
e55a839a JB |
1986 | /* restore the protection */ |
1987 | clk_core_rate_restore_protect(core, cnt); | |
1988 | ||
ca5e089a | 1989 | return ret ? 0 : req.rate; |
b2476490 MT |
1990 | } |
1991 | ||
4dff95dc SB |
1992 | static int clk_core_set_rate_nolock(struct clk_core *core, |
1993 | unsigned long req_rate) | |
a093bde2 | 1994 | { |
4dff95dc | 1995 | struct clk_core *top, *fail_clk; |
ca5e089a | 1996 | unsigned long rate; |
9a34b453 | 1997 | int ret = 0; |
a093bde2 | 1998 | |
4dff95dc SB |
1999 | if (!core) |
2000 | return 0; | |
a093bde2 | 2001 | |
ca5e089a JB |
2002 | rate = clk_core_req_round_rate_nolock(core, req_rate); |
2003 | ||
4dff95dc SB |
2004 | /* bail early if nothing to do */ |
2005 | if (rate == clk_core_get_rate_nolock(core)) | |
2006 | return 0; | |
a093bde2 | 2007 | |
e55a839a JB |
2008 | /* fail on a direct rate set of a protected provider */ |
2009 | if (clk_core_rate_is_protected(core)) | |
2010 | return -EBUSY; | |
2011 | ||
4dff95dc | 2012 | /* calculate new rates and get the topmost changed clock */ |
ca5e089a | 2013 | top = clk_calc_new_rates(core, req_rate); |
4dff95dc SB |
2014 | if (!top) |
2015 | return -EINVAL; | |
2016 | ||
9a34b453 MS |
2017 | ret = clk_pm_runtime_get(core); |
2018 | if (ret) | |
2019 | return ret; | |
2020 | ||
4dff95dc SB |
2021 | /* notify that we are about to change rates */ |
2022 | fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); | |
2023 | if (fail_clk) { | |
2024 | pr_debug("%s: failed to set %s rate\n", __func__, | |
2025 | fail_clk->name); | |
2026 | clk_propagate_rate_change(top, ABORT_RATE_CHANGE); | |
9a34b453 MS |
2027 | ret = -EBUSY; |
2028 | goto err; | |
4dff95dc SB |
2029 | } |
2030 | ||
2031 | /* change the rates */ | |
2032 | clk_change_rate(top); | |
2033 | ||
2034 | core->req_rate = req_rate; | |
9a34b453 MS |
2035 | err: |
2036 | clk_pm_runtime_put(core); | |
4dff95dc | 2037 | |
9a34b453 | 2038 | return ret; |
a093bde2 | 2039 | } |
035a61c3 TV |
2040 | |
2041 | /** | |
4dff95dc SB |
2042 | * clk_set_rate - specify a new rate for clk |
2043 | * @clk: the clk whose rate is being changed | |
2044 | * @rate: the new rate for clk | |
035a61c3 | 2045 | * |
4dff95dc SB |
2046 | * In the simplest case clk_set_rate will only adjust the rate of clk. |
2047 | * | |
2048 | * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to | |
2049 | * propagate up to clk's parent; whether or not this happens depends on the | |
2050 | * outcome of clk's .round_rate implementation. If *parent_rate is unchanged | |
2051 | * after calling .round_rate then upstream parent propagation is ignored. If | |
2052 | * *parent_rate comes back with a new rate for clk's parent then we propagate | |
2053 | * up to clk's parent and set its rate. Upward propagation will continue | |
2054 | * until either a clk does not support the CLK_SET_RATE_PARENT flag or | |
2055 | * .round_rate stops requesting changes to clk's parent_rate. | |
2056 | * | |
2057 | * Rate changes are accomplished via tree traversal that also recalculates the | |
2058 | * rates for the clocks and fires off POST_RATE_CHANGE notifiers. | |
2059 | * | |
2060 | * Returns 0 on success, -EERROR otherwise. | |
035a61c3 | 2061 | */ |
4dff95dc | 2062 | int clk_set_rate(struct clk *clk, unsigned long rate) |
035a61c3 | 2063 | { |
4dff95dc SB |
2064 | int ret; |
2065 | ||
035a61c3 TV |
2066 | if (!clk) |
2067 | return 0; | |
2068 | ||
4dff95dc SB |
2069 | /* prevent racing with updates to the clock topology */ |
2070 | clk_prepare_lock(); | |
da0f0b2c | 2071 | |
55e9b8b7 JB |
2072 | if (clk->exclusive_count) |
2073 | clk_core_rate_unprotect(clk->core); | |
2074 | ||
4dff95dc | 2075 | ret = clk_core_set_rate_nolock(clk->core, rate); |
da0f0b2c | 2076 | |
55e9b8b7 JB |
2077 | if (clk->exclusive_count) |
2078 | clk_core_rate_protect(clk->core); | |
2079 | ||
4dff95dc | 2080 | clk_prepare_unlock(); |
4935b22c | 2081 | |
4dff95dc | 2082 | return ret; |
4935b22c | 2083 | } |
4dff95dc | 2084 | EXPORT_SYMBOL_GPL(clk_set_rate); |
4935b22c | 2085 | |
55e9b8b7 JB |
2086 | /** |
2087 | * clk_set_rate_exclusive - specify a new rate get exclusive control | |
2088 | * @clk: the clk whose rate is being changed | |
2089 | * @rate: the new rate for clk | |
2090 | * | |
2091 | * This is a combination of clk_set_rate() and clk_rate_exclusive_get() | |
2092 | * within a critical section | |
2093 | * | |
2094 | * This can be used initially to ensure that at least 1 consumer is | |
2095 | * statisfied when several consumers are competing for exclusivity over the | |
2096 | * same clock provider. | |
2097 | * | |
2098 | * The exclusivity is not applied if setting the rate failed. | |
2099 | * | |
2100 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | |
2101 | * clk_rate_exclusive_put(). | |
2102 | * | |
2103 | * Returns 0 on success, -EERROR otherwise. | |
2104 | */ | |
2105 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) | |
2106 | { | |
2107 | int ret; | |
2108 | ||
2109 | if (!clk) | |
2110 | return 0; | |
2111 | ||
2112 | /* prevent racing with updates to the clock topology */ | |
2113 | clk_prepare_lock(); | |
2114 | ||
2115 | /* | |
2116 | * The temporary protection removal is not here, on purpose | |
2117 | * This function is meant to be used instead of clk_rate_protect, | |
2118 | * so before the consumer code path protect the clock provider | |
2119 | */ | |
2120 | ||
2121 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2122 | if (!ret) { | |
2123 | clk_core_rate_protect(clk->core); | |
2124 | clk->exclusive_count++; | |
2125 | } | |
2126 | ||
2127 | clk_prepare_unlock(); | |
2128 | ||
2129 | return ret; | |
2130 | } | |
2131 | EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); | |
2132 | ||
4dff95dc SB |
2133 | /** |
2134 | * clk_set_rate_range - set a rate range for a clock source | |
2135 | * @clk: clock source | |
2136 | * @min: desired minimum clock rate in Hz, inclusive | |
2137 | * @max: desired maximum clock rate in Hz, inclusive | |
2138 | * | |
2139 | * Returns success (0) or negative errno. | |
2140 | */ | |
2141 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) | |
4935b22c | 2142 | { |
4dff95dc | 2143 | int ret = 0; |
6562fbcf | 2144 | unsigned long old_min, old_max, rate; |
4935b22c | 2145 | |
4dff95dc SB |
2146 | if (!clk) |
2147 | return 0; | |
903efc55 | 2148 | |
4dff95dc SB |
2149 | if (min > max) { |
2150 | pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", | |
2151 | __func__, clk->core->name, clk->dev_id, clk->con_id, | |
2152 | min, max); | |
2153 | return -EINVAL; | |
903efc55 | 2154 | } |
4935b22c | 2155 | |
4dff95dc | 2156 | clk_prepare_lock(); |
4935b22c | 2157 | |
55e9b8b7 JB |
2158 | if (clk->exclusive_count) |
2159 | clk_core_rate_unprotect(clk->core); | |
2160 | ||
6562fbcf JB |
2161 | /* Save the current values in case we need to rollback the change */ |
2162 | old_min = clk->min_rate; | |
2163 | old_max = clk->max_rate; | |
2164 | clk->min_rate = min; | |
2165 | clk->max_rate = max; | |
2166 | ||
2167 | rate = clk_core_get_rate_nolock(clk->core); | |
2168 | if (rate < min || rate > max) { | |
2169 | /* | |
2170 | * FIXME: | |
2171 | * We are in bit of trouble here, current rate is outside the | |
2172 | * the requested range. We are going try to request appropriate | |
2173 | * range boundary but there is a catch. It may fail for the | |
2174 | * usual reason (clock broken, clock protected, etc) but also | |
2175 | * because: | |
2176 | * - round_rate() was not favorable and fell on the wrong | |
2177 | * side of the boundary | |
2178 | * - the determine_rate() callback does not really check for | |
2179 | * this corner case when determining the rate | |
2180 | */ | |
2181 | ||
2182 | if (rate < min) | |
2183 | rate = min; | |
2184 | else | |
2185 | rate = max; | |
2186 | ||
2187 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2188 | if (ret) { | |
2189 | /* rollback the changes */ | |
2190 | clk->min_rate = old_min; | |
2191 | clk->max_rate = old_max; | |
2192 | } | |
4935b22c JH |
2193 | } |
2194 | ||
55e9b8b7 JB |
2195 | if (clk->exclusive_count) |
2196 | clk_core_rate_protect(clk->core); | |
2197 | ||
4dff95dc | 2198 | clk_prepare_unlock(); |
4935b22c | 2199 | |
4dff95dc | 2200 | return ret; |
3fa2252b | 2201 | } |
4dff95dc | 2202 | EXPORT_SYMBOL_GPL(clk_set_rate_range); |
3fa2252b | 2203 | |
4dff95dc SB |
2204 | /** |
2205 | * clk_set_min_rate - set a minimum clock rate for a clock source | |
2206 | * @clk: clock source | |
2207 | * @rate: desired minimum clock rate in Hz, inclusive | |
2208 | * | |
2209 | * Returns success (0) or negative errno. | |
2210 | */ | |
2211 | int clk_set_min_rate(struct clk *clk, unsigned long rate) | |
3fa2252b | 2212 | { |
4dff95dc SB |
2213 | if (!clk) |
2214 | return 0; | |
2215 | ||
2216 | return clk_set_rate_range(clk, rate, clk->max_rate); | |
3fa2252b | 2217 | } |
4dff95dc | 2218 | EXPORT_SYMBOL_GPL(clk_set_min_rate); |
3fa2252b | 2219 | |
4dff95dc SB |
2220 | /** |
2221 | * clk_set_max_rate - set a maximum clock rate for a clock source | |
2222 | * @clk: clock source | |
2223 | * @rate: desired maximum clock rate in Hz, inclusive | |
2224 | * | |
2225 | * Returns success (0) or negative errno. | |
2226 | */ | |
2227 | int clk_set_max_rate(struct clk *clk, unsigned long rate) | |
3fa2252b | 2228 | { |
4dff95dc SB |
2229 | if (!clk) |
2230 | return 0; | |
4935b22c | 2231 | |
4dff95dc | 2232 | return clk_set_rate_range(clk, clk->min_rate, rate); |
4935b22c | 2233 | } |
4dff95dc | 2234 | EXPORT_SYMBOL_GPL(clk_set_max_rate); |
4935b22c | 2235 | |
b2476490 | 2236 | /** |
4dff95dc SB |
2237 | * clk_get_parent - return the parent of a clk |
2238 | * @clk: the clk whose parent gets returned | |
b2476490 | 2239 | * |
4dff95dc | 2240 | * Simply returns clk->parent. Returns NULL if clk is NULL. |
b2476490 | 2241 | */ |
4dff95dc | 2242 | struct clk *clk_get_parent(struct clk *clk) |
b2476490 | 2243 | { |
4dff95dc | 2244 | struct clk *parent; |
b2476490 | 2245 | |
fc4a05d4 SB |
2246 | if (!clk) |
2247 | return NULL; | |
2248 | ||
4dff95dc | 2249 | clk_prepare_lock(); |
fc4a05d4 SB |
2250 | /* TODO: Create a per-user clk and change callers to call clk_put */ |
2251 | parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; | |
4dff95dc | 2252 | clk_prepare_unlock(); |
496eadf8 | 2253 | |
4dff95dc SB |
2254 | return parent; |
2255 | } | |
2256 | EXPORT_SYMBOL_GPL(clk_get_parent); | |
b2476490 | 2257 | |
4dff95dc SB |
2258 | static struct clk_core *__clk_init_parent(struct clk_core *core) |
2259 | { | |
5146e0b0 | 2260 | u8 index = 0; |
4dff95dc | 2261 | |
2430a94d | 2262 | if (core->num_parents > 1 && core->ops->get_parent) |
5146e0b0 | 2263 | index = core->ops->get_parent(core->hw); |
b2476490 | 2264 | |
5146e0b0 | 2265 | return clk_core_get_parent_by_index(core, index); |
b2476490 MT |
2266 | } |
2267 | ||
4dff95dc SB |
2268 | static void clk_core_reparent(struct clk_core *core, |
2269 | struct clk_core *new_parent) | |
b2476490 | 2270 | { |
4dff95dc SB |
2271 | clk_reparent(core, new_parent); |
2272 | __clk_recalc_accuracies(core); | |
2273 | __clk_recalc_rates(core, POST_RATE_CHANGE); | |
b2476490 MT |
2274 | } |
2275 | ||
42c86547 TV |
2276 | void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) |
2277 | { | |
2278 | if (!hw) | |
2279 | return; | |
2280 | ||
2281 | clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); | |
2282 | } | |
2283 | ||
4dff95dc SB |
2284 | /** |
2285 | * clk_has_parent - check if a clock is a possible parent for another | |
2286 | * @clk: clock source | |
2287 | * @parent: parent clock source | |
2288 | * | |
2289 | * This function can be used in drivers that need to check that a clock can be | |
2290 | * the parent of another without actually changing the parent. | |
2291 | * | |
2292 | * Returns true if @parent is a possible parent for @clk, false otherwise. | |
b2476490 | 2293 | */ |
4dff95dc | 2294 | bool clk_has_parent(struct clk *clk, struct clk *parent) |
b2476490 | 2295 | { |
4dff95dc | 2296 | struct clk_core *core, *parent_core; |
b2476490 | 2297 | |
4dff95dc SB |
2298 | /* NULL clocks should be nops, so return success if either is NULL. */ |
2299 | if (!clk || !parent) | |
2300 | return true; | |
7452b219 | 2301 | |
4dff95dc SB |
2302 | core = clk->core; |
2303 | parent_core = parent->core; | |
71472c0c | 2304 | |
4dff95dc SB |
2305 | /* Optimize for the case where the parent is already the parent. */ |
2306 | if (core->parent == parent_core) | |
2307 | return true; | |
1c8e6004 | 2308 | |
d6347445 YX |
2309 | return match_string(core->parent_names, core->num_parents, |
2310 | parent_core->name) >= 0; | |
4dff95dc SB |
2311 | } |
2312 | EXPORT_SYMBOL_GPL(clk_has_parent); | |
03bc10ab | 2313 | |
91baa9ff JB |
2314 | static int clk_core_set_parent_nolock(struct clk_core *core, |
2315 | struct clk_core *parent) | |
4dff95dc SB |
2316 | { |
2317 | int ret = 0; | |
2318 | int p_index = 0; | |
2319 | unsigned long p_rate = 0; | |
2320 | ||
91baa9ff JB |
2321 | lockdep_assert_held(&prepare_lock); |
2322 | ||
4dff95dc SB |
2323 | if (!core) |
2324 | return 0; | |
2325 | ||
4dff95dc | 2326 | if (core->parent == parent) |
91baa9ff | 2327 | return 0; |
4dff95dc SB |
2328 | |
2329 | /* verify ops for for multi-parent clks */ | |
91baa9ff JB |
2330 | if (core->num_parents > 1 && !core->ops->set_parent) |
2331 | return -EPERM; | |
7452b219 | 2332 | |
4dff95dc | 2333 | /* check that we are allowed to re-parent if the clock is in use */ |
91baa9ff JB |
2334 | if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) |
2335 | return -EBUSY; | |
b2476490 | 2336 | |
e55a839a JB |
2337 | if (clk_core_rate_is_protected(core)) |
2338 | return -EBUSY; | |
b2476490 | 2339 | |
71472c0c | 2340 | /* try finding the new parent index */ |
4dff95dc | 2341 | if (parent) { |
d6968fca | 2342 | p_index = clk_fetch_parent_index(core, parent); |
f1c8b2ed | 2343 | if (p_index < 0) { |
71472c0c | 2344 | pr_debug("%s: clk %s can not be parent of clk %s\n", |
4dff95dc | 2345 | __func__, parent->name, core->name); |
91baa9ff | 2346 | return p_index; |
71472c0c | 2347 | } |
e8f0e68e | 2348 | p_rate = parent->rate; |
b2476490 MT |
2349 | } |
2350 | ||
9a34b453 MS |
2351 | ret = clk_pm_runtime_get(core); |
2352 | if (ret) | |
91baa9ff | 2353 | return ret; |
9a34b453 | 2354 | |
4dff95dc SB |
2355 | /* propagate PRE_RATE_CHANGE notifications */ |
2356 | ret = __clk_speculate_rates(core, p_rate); | |
b2476490 | 2357 | |
4dff95dc SB |
2358 | /* abort if a driver objects */ |
2359 | if (ret & NOTIFY_STOP_MASK) | |
9a34b453 | 2360 | goto runtime_put; |
b2476490 | 2361 | |
4dff95dc SB |
2362 | /* do the re-parent */ |
2363 | ret = __clk_set_parent(core, parent, p_index); | |
b2476490 | 2364 | |
4dff95dc SB |
2365 | /* propagate rate an accuracy recalculation accordingly */ |
2366 | if (ret) { | |
2367 | __clk_recalc_rates(core, ABORT_RATE_CHANGE); | |
2368 | } else { | |
2369 | __clk_recalc_rates(core, POST_RATE_CHANGE); | |
2370 | __clk_recalc_accuracies(core); | |
b2476490 MT |
2371 | } |
2372 | ||
9a34b453 MS |
2373 | runtime_put: |
2374 | clk_pm_runtime_put(core); | |
71472c0c | 2375 | |
4dff95dc SB |
2376 | return ret; |
2377 | } | |
b2476490 | 2378 | |
4dff95dc SB |
2379 | /** |
2380 | * clk_set_parent - switch the parent of a mux clk | |
2381 | * @clk: the mux clk whose input we are switching | |
2382 | * @parent: the new input to clk | |
2383 | * | |
2384 | * Re-parent clk to use parent as its new input source. If clk is in | |
2385 | * prepared state, the clk will get enabled for the duration of this call. If | |
2386 | * that's not acceptable for a specific clk (Eg: the consumer can't handle | |
2387 | * that, the reparenting is glitchy in hardware, etc), use the | |
2388 | * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. | |
2389 | * | |
2390 | * After successfully changing clk's parent clk_set_parent will update the | |
2391 | * clk topology, sysfs topology and propagate rate recalculation via | |
2392 | * __clk_recalc_rates. | |
2393 | * | |
2394 | * Returns 0 on success, -EERROR otherwise. | |
2395 | */ | |
2396 | int clk_set_parent(struct clk *clk, struct clk *parent) | |
2397 | { | |
91baa9ff JB |
2398 | int ret; |
2399 | ||
4dff95dc SB |
2400 | if (!clk) |
2401 | return 0; | |
2402 | ||
91baa9ff | 2403 | clk_prepare_lock(); |
55e9b8b7 JB |
2404 | |
2405 | if (clk->exclusive_count) | |
2406 | clk_core_rate_unprotect(clk->core); | |
2407 | ||
91baa9ff JB |
2408 | ret = clk_core_set_parent_nolock(clk->core, |
2409 | parent ? parent->core : NULL); | |
55e9b8b7 JB |
2410 | |
2411 | if (clk->exclusive_count) | |
2412 | clk_core_rate_protect(clk->core); | |
2413 | ||
91baa9ff JB |
2414 | clk_prepare_unlock(); |
2415 | ||
2416 | return ret; | |
b2476490 | 2417 | } |
4dff95dc | 2418 | EXPORT_SYMBOL_GPL(clk_set_parent); |
b2476490 | 2419 | |
9e4d04ad JB |
2420 | static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) |
2421 | { | |
2422 | int ret = -EINVAL; | |
2423 | ||
2424 | lockdep_assert_held(&prepare_lock); | |
2425 | ||
2426 | if (!core) | |
2427 | return 0; | |
2428 | ||
e55a839a JB |
2429 | if (clk_core_rate_is_protected(core)) |
2430 | return -EBUSY; | |
2431 | ||
9e4d04ad JB |
2432 | trace_clk_set_phase(core, degrees); |
2433 | ||
7f95beea | 2434 | if (core->ops->set_phase) { |
9e4d04ad | 2435 | ret = core->ops->set_phase(core->hw, degrees); |
7f95beea SL |
2436 | if (!ret) |
2437 | core->phase = degrees; | |
2438 | } | |
9e4d04ad JB |
2439 | |
2440 | trace_clk_set_phase_complete(core, degrees); | |
2441 | ||
2442 | return ret; | |
2443 | } | |
2444 | ||
4dff95dc SB |
2445 | /** |
2446 | * clk_set_phase - adjust the phase shift of a clock signal | |
2447 | * @clk: clock signal source | |
2448 | * @degrees: number of degrees the signal is shifted | |
2449 | * | |
2450 | * Shifts the phase of a clock signal by the specified | |
2451 | * degrees. Returns 0 on success, -EERROR otherwise. | |
2452 | * | |
2453 | * This function makes no distinction about the input or reference | |
2454 | * signal that we adjust the clock signal phase against. For example | |
2455 | * phase locked-loop clock signal generators we may shift phase with | |
2456 | * respect to feedback clock signal input, but for other cases the | |
2457 | * clock phase may be shifted with respect to some other, unspecified | |
2458 | * signal. | |
2459 | * | |
2460 | * Additionally the concept of phase shift does not propagate through | |
2461 | * the clock tree hierarchy, which sets it apart from clock rates and | |
2462 | * clock accuracy. A parent clock phase attribute does not have an | |
2463 | * impact on the phase attribute of a child clock. | |
b2476490 | 2464 | */ |
4dff95dc | 2465 | int clk_set_phase(struct clk *clk, int degrees) |
b2476490 | 2466 | { |
9e4d04ad | 2467 | int ret; |
b2476490 | 2468 | |
4dff95dc SB |
2469 | if (!clk) |
2470 | return 0; | |
b2476490 | 2471 | |
4dff95dc SB |
2472 | /* sanity check degrees */ |
2473 | degrees %= 360; | |
2474 | if (degrees < 0) | |
2475 | degrees += 360; | |
bf47b4fd | 2476 | |
4dff95dc | 2477 | clk_prepare_lock(); |
3fa2252b | 2478 | |
55e9b8b7 JB |
2479 | if (clk->exclusive_count) |
2480 | clk_core_rate_unprotect(clk->core); | |
3fa2252b | 2481 | |
9e4d04ad | 2482 | ret = clk_core_set_phase_nolock(clk->core, degrees); |
3fa2252b | 2483 | |
55e9b8b7 JB |
2484 | if (clk->exclusive_count) |
2485 | clk_core_rate_protect(clk->core); | |
b2476490 | 2486 | |
4dff95dc | 2487 | clk_prepare_unlock(); |
dfc202ea | 2488 | |
4dff95dc SB |
2489 | return ret; |
2490 | } | |
2491 | EXPORT_SYMBOL_GPL(clk_set_phase); | |
b2476490 | 2492 | |
4dff95dc SB |
2493 | static int clk_core_get_phase(struct clk_core *core) |
2494 | { | |
2495 | int ret; | |
b2476490 | 2496 | |
4dff95dc | 2497 | clk_prepare_lock(); |
1f9c63e8 SL |
2498 | /* Always try to update cached phase if possible */ |
2499 | if (core->ops->get_phase) | |
2500 | core->phase = core->ops->get_phase(core->hw); | |
4dff95dc SB |
2501 | ret = core->phase; |
2502 | clk_prepare_unlock(); | |
71472c0c | 2503 | |
4dff95dc | 2504 | return ret; |
b2476490 MT |
2505 | } |
2506 | ||
4dff95dc SB |
2507 | /** |
2508 | * clk_get_phase - return the phase shift of a clock signal | |
2509 | * @clk: clock signal source | |
2510 | * | |
2511 | * Returns the phase shift of a clock node in degrees, otherwise returns | |
2512 | * -EERROR. | |
2513 | */ | |
2514 | int clk_get_phase(struct clk *clk) | |
1c8e6004 | 2515 | { |
4dff95dc | 2516 | if (!clk) |
1c8e6004 TV |
2517 | return 0; |
2518 | ||
4dff95dc SB |
2519 | return clk_core_get_phase(clk->core); |
2520 | } | |
2521 | EXPORT_SYMBOL_GPL(clk_get_phase); | |
1c8e6004 | 2522 | |
9fba738a JB |
2523 | static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) |
2524 | { | |
2525 | /* Assume a default value of 50% */ | |
2526 | core->duty.num = 1; | |
2527 | core->duty.den = 2; | |
2528 | } | |
2529 | ||
2530 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); | |
2531 | ||
2532 | static int clk_core_update_duty_cycle_nolock(struct clk_core *core) | |
2533 | { | |
2534 | struct clk_duty *duty = &core->duty; | |
2535 | int ret = 0; | |
2536 | ||
2537 | if (!core->ops->get_duty_cycle) | |
2538 | return clk_core_update_duty_cycle_parent_nolock(core); | |
2539 | ||
2540 | ret = core->ops->get_duty_cycle(core->hw, duty); | |
2541 | if (ret) | |
2542 | goto reset; | |
2543 | ||
2544 | /* Don't trust the clock provider too much */ | |
2545 | if (duty->den == 0 || duty->num > duty->den) { | |
2546 | ret = -EINVAL; | |
2547 | goto reset; | |
2548 | } | |
2549 | ||
2550 | return 0; | |
2551 | ||
2552 | reset: | |
2553 | clk_core_reset_duty_cycle_nolock(core); | |
2554 | return ret; | |
2555 | } | |
2556 | ||
2557 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) | |
2558 | { | |
2559 | int ret = 0; | |
2560 | ||
2561 | if (core->parent && | |
2562 | core->flags & CLK_DUTY_CYCLE_PARENT) { | |
2563 | ret = clk_core_update_duty_cycle_nolock(core->parent); | |
2564 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); | |
2565 | } else { | |
2566 | clk_core_reset_duty_cycle_nolock(core); | |
2567 | } | |
2568 | ||
2569 | return ret; | |
2570 | } | |
2571 | ||
2572 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, | |
2573 | struct clk_duty *duty); | |
2574 | ||
2575 | static int clk_core_set_duty_cycle_nolock(struct clk_core *core, | |
2576 | struct clk_duty *duty) | |
2577 | { | |
2578 | int ret; | |
2579 | ||
2580 | lockdep_assert_held(&prepare_lock); | |
2581 | ||
2582 | if (clk_core_rate_is_protected(core)) | |
2583 | return -EBUSY; | |
2584 | ||
2585 | trace_clk_set_duty_cycle(core, duty); | |
2586 | ||
2587 | if (!core->ops->set_duty_cycle) | |
2588 | return clk_core_set_duty_cycle_parent_nolock(core, duty); | |
2589 | ||
2590 | ret = core->ops->set_duty_cycle(core->hw, duty); | |
2591 | if (!ret) | |
2592 | memcpy(&core->duty, duty, sizeof(*duty)); | |
2593 | ||
2594 | trace_clk_set_duty_cycle_complete(core, duty); | |
2595 | ||
2596 | return ret; | |
2597 | } | |
2598 | ||
2599 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, | |
2600 | struct clk_duty *duty) | |
2601 | { | |
2602 | int ret = 0; | |
2603 | ||
2604 | if (core->parent && | |
2605 | core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { | |
2606 | ret = clk_core_set_duty_cycle_nolock(core->parent, duty); | |
2607 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); | |
2608 | } | |
2609 | ||
2610 | return ret; | |
2611 | } | |
2612 | ||
2613 | /** | |
2614 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal | |
2615 | * @clk: clock signal source | |
2616 | * @num: numerator of the duty cycle ratio to be applied | |
2617 | * @den: denominator of the duty cycle ratio to be applied | |
2618 | * | |
2619 | * Apply the duty cycle ratio if the ratio is valid and the clock can | |
2620 | * perform this operation | |
2621 | * | |
2622 | * Returns (0) on success, a negative errno otherwise. | |
2623 | */ | |
2624 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) | |
2625 | { | |
2626 | int ret; | |
2627 | struct clk_duty duty; | |
2628 | ||
2629 | if (!clk) | |
2630 | return 0; | |
2631 | ||
2632 | /* sanity check the ratio */ | |
2633 | if (den == 0 || num > den) | |
2634 | return -EINVAL; | |
2635 | ||
2636 | duty.num = num; | |
2637 | duty.den = den; | |
2638 | ||
2639 | clk_prepare_lock(); | |
2640 | ||
2641 | if (clk->exclusive_count) | |
2642 | clk_core_rate_unprotect(clk->core); | |
2643 | ||
2644 | ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); | |
2645 | ||
2646 | if (clk->exclusive_count) | |
2647 | clk_core_rate_protect(clk->core); | |
2648 | ||
2649 | clk_prepare_unlock(); | |
2650 | ||
2651 | return ret; | |
2652 | } | |
2653 | EXPORT_SYMBOL_GPL(clk_set_duty_cycle); | |
2654 | ||
2655 | static int clk_core_get_scaled_duty_cycle(struct clk_core *core, | |
2656 | unsigned int scale) | |
2657 | { | |
2658 | struct clk_duty *duty = &core->duty; | |
2659 | int ret; | |
2660 | ||
2661 | clk_prepare_lock(); | |
2662 | ||
2663 | ret = clk_core_update_duty_cycle_nolock(core); | |
2664 | if (!ret) | |
2665 | ret = mult_frac(scale, duty->num, duty->den); | |
2666 | ||
2667 | clk_prepare_unlock(); | |
2668 | ||
2669 | return ret; | |
2670 | } | |
2671 | ||
2672 | /** | |
2673 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal | |
2674 | * @clk: clock signal source | |
2675 | * @scale: scaling factor to be applied to represent the ratio as an integer | |
2676 | * | |
2677 | * Returns the duty cycle ratio of a clock node multiplied by the provided | |
2678 | * scaling factor, or negative errno on error. | |
2679 | */ | |
2680 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) | |
2681 | { | |
2682 | if (!clk) | |
2683 | return 0; | |
2684 | ||
2685 | return clk_core_get_scaled_duty_cycle(clk->core, scale); | |
2686 | } | |
2687 | EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); | |
2688 | ||
4dff95dc SB |
2689 | /** |
2690 | * clk_is_match - check if two clk's point to the same hardware clock | |
2691 | * @p: clk compared against q | |
2692 | * @q: clk compared against p | |
2693 | * | |
2694 | * Returns true if the two struct clk pointers both point to the same hardware | |
2695 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | |
2696 | * share the same struct clk_core object. | |
2697 | * | |
2698 | * Returns false otherwise. Note that two NULL clks are treated as matching. | |
2699 | */ | |
2700 | bool clk_is_match(const struct clk *p, const struct clk *q) | |
2701 | { | |
2702 | /* trivial case: identical struct clk's or both NULL */ | |
2703 | if (p == q) | |
2704 | return true; | |
1c8e6004 | 2705 | |
3fe003f9 | 2706 | /* true if clk->core pointers match. Avoid dereferencing garbage */ |
4dff95dc SB |
2707 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) |
2708 | if (p->core == q->core) | |
2709 | return true; | |
1c8e6004 | 2710 | |
4dff95dc SB |
2711 | return false; |
2712 | } | |
2713 | EXPORT_SYMBOL_GPL(clk_is_match); | |
1c8e6004 | 2714 | |
4dff95dc | 2715 | /*** debugfs support ***/ |
1c8e6004 | 2716 | |
4dff95dc SB |
2717 | #ifdef CONFIG_DEBUG_FS |
2718 | #include <linux/debugfs.h> | |
1c8e6004 | 2719 | |
4dff95dc SB |
2720 | static struct dentry *rootdir; |
2721 | static int inited = 0; | |
2722 | static DEFINE_MUTEX(clk_debug_lock); | |
2723 | static HLIST_HEAD(clk_debug_list); | |
1c8e6004 | 2724 | |
4dff95dc SB |
2725 | static struct hlist_head *all_lists[] = { |
2726 | &clk_root_list, | |
2727 | &clk_orphan_list, | |
2728 | NULL, | |
2729 | }; | |
2730 | ||
2731 | static struct hlist_head *orphan_list[] = { | |
2732 | &clk_orphan_list, | |
2733 | NULL, | |
2734 | }; | |
2735 | ||
2736 | static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, | |
2737 | int level) | |
b2476490 | 2738 | { |
4dff95dc SB |
2739 | if (!c) |
2740 | return; | |
b2476490 | 2741 | |
9fba738a | 2742 | seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", |
4dff95dc SB |
2743 | level * 3 + 1, "", |
2744 | 30 - level * 3, c->name, | |
e55a839a JB |
2745 | c->enable_count, c->prepare_count, c->protect_count, |
2746 | clk_core_get_rate(c), clk_core_get_accuracy(c), | |
9fba738a JB |
2747 | clk_core_get_phase(c), |
2748 | clk_core_get_scaled_duty_cycle(c, 100000)); | |
4dff95dc | 2749 | } |
89ac8d7a | 2750 | |
4dff95dc SB |
2751 | static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, |
2752 | int level) | |
2753 | { | |
2754 | struct clk_core *child; | |
b2476490 | 2755 | |
4dff95dc SB |
2756 | if (!c) |
2757 | return; | |
b2476490 | 2758 | |
4dff95dc | 2759 | clk_summary_show_one(s, c, level); |
0e1c0301 | 2760 | |
4dff95dc SB |
2761 | hlist_for_each_entry(child, &c->children, child_node) |
2762 | clk_summary_show_subtree(s, child, level + 1); | |
1c8e6004 | 2763 | } |
b2476490 | 2764 | |
4dff95dc | 2765 | static int clk_summary_show(struct seq_file *s, void *data) |
1c8e6004 | 2766 | { |
4dff95dc SB |
2767 | struct clk_core *c; |
2768 | struct hlist_head **lists = (struct hlist_head **)s->private; | |
1c8e6004 | 2769 | |
9fba738a JB |
2770 | seq_puts(s, " enable prepare protect duty\n"); |
2771 | seq_puts(s, " clock count count count rate accuracy phase cycle\n"); | |
2772 | seq_puts(s, "---------------------------------------------------------------------------------------------\n"); | |
b2476490 | 2773 | |
1c8e6004 TV |
2774 | clk_prepare_lock(); |
2775 | ||
4dff95dc SB |
2776 | for (; *lists; lists++) |
2777 | hlist_for_each_entry(c, *lists, child_node) | |
2778 | clk_summary_show_subtree(s, c, 0); | |
b2476490 | 2779 | |
eab89f69 | 2780 | clk_prepare_unlock(); |
b2476490 | 2781 | |
4dff95dc | 2782 | return 0; |
b2476490 | 2783 | } |
fec0ef3f | 2784 | DEFINE_SHOW_ATTRIBUTE(clk_summary); |
b2476490 | 2785 | |
4dff95dc SB |
2786 | static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) |
2787 | { | |
2788 | if (!c) | |
2789 | return; | |
b2476490 | 2790 | |
7cb81136 | 2791 | /* This should be JSON format, i.e. elements separated with a comma */ |
4dff95dc SB |
2792 | seq_printf(s, "\"%s\": { ", c->name); |
2793 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); | |
2794 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); | |
e55a839a | 2795 | seq_printf(s, "\"protect_count\": %d,", c->protect_count); |
7cb81136 SW |
2796 | seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); |
2797 | seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); | |
c6e90997 | 2798 | seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); |
9fba738a JB |
2799 | seq_printf(s, "\"duty_cycle\": %u", |
2800 | clk_core_get_scaled_duty_cycle(c, 100000)); | |
b2476490 | 2801 | } |
b2476490 | 2802 | |
4dff95dc | 2803 | static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) |
b2476490 | 2804 | { |
4dff95dc | 2805 | struct clk_core *child; |
b2476490 | 2806 | |
4dff95dc SB |
2807 | if (!c) |
2808 | return; | |
b2476490 | 2809 | |
4dff95dc | 2810 | clk_dump_one(s, c, level); |
b2476490 | 2811 | |
4dff95dc | 2812 | hlist_for_each_entry(child, &c->children, child_node) { |
4d327586 | 2813 | seq_putc(s, ','); |
4dff95dc | 2814 | clk_dump_subtree(s, child, level + 1); |
b2476490 MT |
2815 | } |
2816 | ||
4d327586 | 2817 | seq_putc(s, '}'); |
b2476490 MT |
2818 | } |
2819 | ||
fec0ef3f | 2820 | static int clk_dump_show(struct seq_file *s, void *data) |
4e88f3de | 2821 | { |
4dff95dc SB |
2822 | struct clk_core *c; |
2823 | bool first_node = true; | |
2824 | struct hlist_head **lists = (struct hlist_head **)s->private; | |
4e88f3de | 2825 | |
4d327586 | 2826 | seq_putc(s, '{'); |
4dff95dc | 2827 | clk_prepare_lock(); |
035a61c3 | 2828 | |
4dff95dc SB |
2829 | for (; *lists; lists++) { |
2830 | hlist_for_each_entry(c, *lists, child_node) { | |
2831 | if (!first_node) | |
4d327586 | 2832 | seq_putc(s, ','); |
4dff95dc SB |
2833 | first_node = false; |
2834 | clk_dump_subtree(s, c, 0); | |
2835 | } | |
2836 | } | |
4e88f3de | 2837 | |
4dff95dc | 2838 | clk_prepare_unlock(); |
4e88f3de | 2839 | |
70e9f4dd | 2840 | seq_puts(s, "}\n"); |
4dff95dc | 2841 | return 0; |
4e88f3de | 2842 | } |
fec0ef3f | 2843 | DEFINE_SHOW_ATTRIBUTE(clk_dump); |
89ac8d7a | 2844 | |
a6059ab9 GU |
2845 | static const struct { |
2846 | unsigned long flag; | |
2847 | const char *name; | |
2848 | } clk_flags[] = { | |
40dd71c7 | 2849 | #define ENTRY(f) { f, #f } |
a6059ab9 GU |
2850 | ENTRY(CLK_SET_RATE_GATE), |
2851 | ENTRY(CLK_SET_PARENT_GATE), | |
2852 | ENTRY(CLK_SET_RATE_PARENT), | |
2853 | ENTRY(CLK_IGNORE_UNUSED), | |
a6059ab9 GU |
2854 | ENTRY(CLK_GET_RATE_NOCACHE), |
2855 | ENTRY(CLK_SET_RATE_NO_REPARENT), | |
2856 | ENTRY(CLK_GET_ACCURACY_NOCACHE), | |
2857 | ENTRY(CLK_RECALC_NEW_RATES), | |
2858 | ENTRY(CLK_SET_RATE_UNGATE), | |
2859 | ENTRY(CLK_IS_CRITICAL), | |
2860 | ENTRY(CLK_OPS_PARENT_ENABLE), | |
9fba738a | 2861 | ENTRY(CLK_DUTY_CYCLE_PARENT), |
a6059ab9 GU |
2862 | #undef ENTRY |
2863 | }; | |
2864 | ||
fec0ef3f | 2865 | static int clk_flags_show(struct seq_file *s, void *data) |
a6059ab9 GU |
2866 | { |
2867 | struct clk_core *core = s->private; | |
2868 | unsigned long flags = core->flags; | |
2869 | unsigned int i; | |
2870 | ||
2871 | for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { | |
2872 | if (flags & clk_flags[i].flag) { | |
2873 | seq_printf(s, "%s\n", clk_flags[i].name); | |
2874 | flags &= ~clk_flags[i].flag; | |
2875 | } | |
2876 | } | |
2877 | if (flags) { | |
2878 | /* Unknown flags */ | |
2879 | seq_printf(s, "0x%lx\n", flags); | |
2880 | } | |
2881 | ||
2882 | return 0; | |
2883 | } | |
fec0ef3f | 2884 | DEFINE_SHOW_ATTRIBUTE(clk_flags); |
a6059ab9 | 2885 | |
fec0ef3f | 2886 | static int possible_parents_show(struct seq_file *s, void *data) |
92031575 PDS |
2887 | { |
2888 | struct clk_core *core = s->private; | |
2889 | int i; | |
2890 | ||
2891 | for (i = 0; i < core->num_parents - 1; i++) | |
2892 | seq_printf(s, "%s ", core->parent_names[i]); | |
2893 | ||
2894 | seq_printf(s, "%s\n", core->parent_names[i]); | |
2895 | ||
2896 | return 0; | |
2897 | } | |
fec0ef3f | 2898 | DEFINE_SHOW_ATTRIBUTE(possible_parents); |
92031575 | 2899 | |
9fba738a JB |
2900 | static int clk_duty_cycle_show(struct seq_file *s, void *data) |
2901 | { | |
2902 | struct clk_core *core = s->private; | |
2903 | struct clk_duty *duty = &core->duty; | |
2904 | ||
2905 | seq_printf(s, "%u/%u\n", duty->num, duty->den); | |
2906 | ||
2907 | return 0; | |
2908 | } | |
2909 | DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); | |
2910 | ||
8a26bbbb | 2911 | static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) |
4dff95dc | 2912 | { |
8a26bbbb | 2913 | struct dentry *root; |
b61c43c0 | 2914 | |
8a26bbbb GKH |
2915 | if (!core || !pdentry) |
2916 | return; | |
b2476490 | 2917 | |
8a26bbbb GKH |
2918 | root = debugfs_create_dir(core->name, pdentry); |
2919 | core->dentry = root; | |
92031575 | 2920 | |
8a26bbbb GKH |
2921 | debugfs_create_ulong("clk_rate", 0444, root, &core->rate); |
2922 | debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); | |
2923 | debugfs_create_u32("clk_phase", 0444, root, &core->phase); | |
2924 | debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); | |
2925 | debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); | |
2926 | debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); | |
2927 | debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); | |
2928 | debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); | |
9fba738a JB |
2929 | debugfs_create_file("clk_duty_cycle", 0444, root, core, |
2930 | &clk_duty_cycle_fops); | |
b2476490 | 2931 | |
8a26bbbb GKH |
2932 | if (core->num_parents > 1) |
2933 | debugfs_create_file("clk_possible_parents", 0444, root, core, | |
2934 | &possible_parents_fops); | |
b2476490 | 2935 | |
8a26bbbb GKH |
2936 | if (core->ops->debug_init) |
2937 | core->ops->debug_init(core->hw, core->dentry); | |
b2476490 | 2938 | } |
035a61c3 TV |
2939 | |
2940 | /** | |
6e5ab41b SB |
2941 | * clk_debug_register - add a clk node to the debugfs clk directory |
2942 | * @core: the clk being added to the debugfs clk directory | |
035a61c3 | 2943 | * |
6e5ab41b SB |
2944 | * Dynamically adds a clk to the debugfs clk directory if debugfs has been |
2945 | * initialized. Otherwise it bails out early since the debugfs clk directory | |
4dff95dc | 2946 | * will be created lazily by clk_debug_init as part of a late_initcall. |
035a61c3 | 2947 | */ |
8a26bbbb | 2948 | static void clk_debug_register(struct clk_core *core) |
035a61c3 | 2949 | { |
4dff95dc SB |
2950 | mutex_lock(&clk_debug_lock); |
2951 | hlist_add_head(&core->debug_node, &clk_debug_list); | |
db3188fa | 2952 | if (inited) |
8a26bbbb | 2953 | clk_debug_create_one(core, rootdir); |
4dff95dc | 2954 | mutex_unlock(&clk_debug_lock); |
035a61c3 | 2955 | } |
b2476490 | 2956 | |
4dff95dc | 2957 | /** |
6e5ab41b SB |
2958 | * clk_debug_unregister - remove a clk node from the debugfs clk directory |
2959 | * @core: the clk being removed from the debugfs clk directory | |
e59c5371 | 2960 | * |
6e5ab41b SB |
2961 | * Dynamically removes a clk and all its child nodes from the |
2962 | * debugfs clk directory if clk->dentry points to debugfs created by | |
706d5c73 | 2963 | * clk_debug_register in __clk_core_init. |
e59c5371 | 2964 | */ |
4dff95dc | 2965 | static void clk_debug_unregister(struct clk_core *core) |
e59c5371 | 2966 | { |
4dff95dc SB |
2967 | mutex_lock(&clk_debug_lock); |
2968 | hlist_del_init(&core->debug_node); | |
2969 | debugfs_remove_recursive(core->dentry); | |
2970 | core->dentry = NULL; | |
2971 | mutex_unlock(&clk_debug_lock); | |
2972 | } | |
e59c5371 | 2973 | |
4dff95dc | 2974 | /** |
6e5ab41b | 2975 | * clk_debug_init - lazily populate the debugfs clk directory |
4dff95dc | 2976 | * |
6e5ab41b SB |
2977 | * clks are often initialized very early during boot before memory can be |
2978 | * dynamically allocated and well before debugfs is setup. This function | |
2979 | * populates the debugfs clk directory once at boot-time when we know that | |
2980 | * debugfs is setup. It should only be called once at boot-time, all other clks | |
2981 | * added dynamically will be done so with clk_debug_register. | |
4dff95dc SB |
2982 | */ |
2983 | static int __init clk_debug_init(void) | |
2984 | { | |
2985 | struct clk_core *core; | |
dfc202ea | 2986 | |
4dff95dc | 2987 | rootdir = debugfs_create_dir("clk", NULL); |
e59c5371 | 2988 | |
8a26bbbb GKH |
2989 | debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, |
2990 | &clk_summary_fops); | |
2991 | debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, | |
2992 | &clk_dump_fops); | |
2993 | debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, | |
2994 | &clk_summary_fops); | |
2995 | debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, | |
2996 | &clk_dump_fops); | |
e59c5371 | 2997 | |
4dff95dc SB |
2998 | mutex_lock(&clk_debug_lock); |
2999 | hlist_for_each_entry(core, &clk_debug_list, debug_node) | |
3000 | clk_debug_create_one(core, rootdir); | |
e59c5371 | 3001 | |
4dff95dc SB |
3002 | inited = 1; |
3003 | mutex_unlock(&clk_debug_lock); | |
e59c5371 | 3004 | |
4dff95dc SB |
3005 | return 0; |
3006 | } | |
3007 | late_initcall(clk_debug_init); | |
3008 | #else | |
8a26bbbb | 3009 | static inline void clk_debug_register(struct clk_core *core) { } |
4dff95dc SB |
3010 | static inline void clk_debug_reparent(struct clk_core *core, |
3011 | struct clk_core *new_parent) | |
035a61c3 | 3012 | { |
035a61c3 | 3013 | } |
4dff95dc | 3014 | static inline void clk_debug_unregister(struct clk_core *core) |
3d3801ef | 3015 | { |
3d3801ef | 3016 | } |
4dff95dc | 3017 | #endif |
3d3801ef | 3018 | |
b2476490 | 3019 | /** |
be45ebf2 | 3020 | * __clk_core_init - initialize the data structures in a struct clk_core |
d35c80c2 | 3021 | * @core: clk_core being initialized |
b2476490 | 3022 | * |
035a61c3 | 3023 | * Initializes the lists in struct clk_core, queries the hardware for the |
b2476490 | 3024 | * parent and rate and sets them both. |
b2476490 | 3025 | */ |
be45ebf2 | 3026 | static int __clk_core_init(struct clk_core *core) |
b2476490 | 3027 | { |
9a34b453 | 3028 | int i, ret; |
035a61c3 | 3029 | struct clk_core *orphan; |
b67bfe0d | 3030 | struct hlist_node *tmp2; |
1c8e6004 | 3031 | unsigned long rate; |
b2476490 | 3032 | |
d35c80c2 | 3033 | if (!core) |
d1302a36 | 3034 | return -EINVAL; |
b2476490 | 3035 | |
eab89f69 | 3036 | clk_prepare_lock(); |
b2476490 | 3037 | |
9a34b453 MS |
3038 | ret = clk_pm_runtime_get(core); |
3039 | if (ret) | |
3040 | goto unlock; | |
3041 | ||
b2476490 | 3042 | /* check to see if a clock with this name is already registered */ |
d6968fca | 3043 | if (clk_core_lookup(core->name)) { |
d1302a36 | 3044 | pr_debug("%s: clk %s already initialized\n", |
d6968fca | 3045 | __func__, core->name); |
d1302a36 | 3046 | ret = -EEXIST; |
b2476490 | 3047 | goto out; |
d1302a36 | 3048 | } |
b2476490 | 3049 | |
5fb94e9c | 3050 | /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ |
d6968fca SB |
3051 | if (core->ops->set_rate && |
3052 | !((core->ops->round_rate || core->ops->determine_rate) && | |
3053 | core->ops->recalc_rate)) { | |
c44fccb5 MY |
3054 | pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", |
3055 | __func__, core->name); | |
d1302a36 | 3056 | ret = -EINVAL; |
d4d7e3dd MT |
3057 | goto out; |
3058 | } | |
3059 | ||
d6968fca | 3060 | if (core->ops->set_parent && !core->ops->get_parent) { |
c44fccb5 MY |
3061 | pr_err("%s: %s must implement .get_parent & .set_parent\n", |
3062 | __func__, core->name); | |
d1302a36 | 3063 | ret = -EINVAL; |
d4d7e3dd MT |
3064 | goto out; |
3065 | } | |
3066 | ||
3c8e77dd MY |
3067 | if (core->num_parents > 1 && !core->ops->get_parent) { |
3068 | pr_err("%s: %s must implement .get_parent as it has multi parents\n", | |
3069 | __func__, core->name); | |
3070 | ret = -EINVAL; | |
3071 | goto out; | |
3072 | } | |
3073 | ||
d6968fca SB |
3074 | if (core->ops->set_rate_and_parent && |
3075 | !(core->ops->set_parent && core->ops->set_rate)) { | |
c44fccb5 | 3076 | pr_err("%s: %s must implement .set_parent & .set_rate\n", |
d6968fca | 3077 | __func__, core->name); |
3fa2252b SB |
3078 | ret = -EINVAL; |
3079 | goto out; | |
3080 | } | |
3081 | ||
b2476490 | 3082 | /* throw a WARN if any entries in parent_names are NULL */ |
d6968fca SB |
3083 | for (i = 0; i < core->num_parents; i++) |
3084 | WARN(!core->parent_names[i], | |
b2476490 | 3085 | "%s: invalid NULL in %s's .parent_names\n", |
d6968fca | 3086 | __func__, core->name); |
b2476490 | 3087 | |
d6968fca | 3088 | core->parent = __clk_init_parent(core); |
b2476490 MT |
3089 | |
3090 | /* | |
706d5c73 SB |
3091 | * Populate core->parent if parent has already been clk_core_init'd. If |
3092 | * parent has not yet been clk_core_init'd then place clk in the orphan | |
47b0eeb3 | 3093 | * list. If clk doesn't have any parents then place it in the root |
b2476490 MT |
3094 | * clk list. |
3095 | * | |
3096 | * Every time a new clk is clk_init'd then we walk the list of orphan | |
3097 | * clocks and re-parent any that are children of the clock currently | |
3098 | * being clk_init'd. | |
3099 | */ | |
e6500344 | 3100 | if (core->parent) { |
d6968fca SB |
3101 | hlist_add_head(&core->child_node, |
3102 | &core->parent->children); | |
e6500344 | 3103 | core->orphan = core->parent->orphan; |
47b0eeb3 | 3104 | } else if (!core->num_parents) { |
d6968fca | 3105 | hlist_add_head(&core->child_node, &clk_root_list); |
e6500344 HS |
3106 | core->orphan = false; |
3107 | } else { | |
d6968fca | 3108 | hlist_add_head(&core->child_node, &clk_orphan_list); |
e6500344 HS |
3109 | core->orphan = true; |
3110 | } | |
b2476490 | 3111 | |
541debae JB |
3112 | /* |
3113 | * optional platform-specific magic | |
3114 | * | |
3115 | * The .init callback is not used by any of the basic clock types, but | |
3116 | * exists for weird hardware that must perform initialization magic. | |
3117 | * Please consider other ways of solving initialization problems before | |
3118 | * using this callback, as its use is discouraged. | |
3119 | */ | |
3120 | if (core->ops->init) | |
3121 | core->ops->init(core->hw); | |
3122 | ||
5279fc40 BB |
3123 | /* |
3124 | * Set clk's accuracy. The preferred method is to use | |
3125 | * .recalc_accuracy. For simple clocks and lazy developers the default | |
3126 | * fallback is to use the parent's accuracy. If a clock doesn't have a | |
3127 | * parent (or is orphaned) then accuracy is set to zero (perfect | |
3128 | * clock). | |
3129 | */ | |
d6968fca SB |
3130 | if (core->ops->recalc_accuracy) |
3131 | core->accuracy = core->ops->recalc_accuracy(core->hw, | |
3132 | __clk_get_accuracy(core->parent)); | |
3133 | else if (core->parent) | |
3134 | core->accuracy = core->parent->accuracy; | |
5279fc40 | 3135 | else |
d6968fca | 3136 | core->accuracy = 0; |
5279fc40 | 3137 | |
9824cf73 MR |
3138 | /* |
3139 | * Set clk's phase. | |
3140 | * Since a phase is by definition relative to its parent, just | |
3141 | * query the current clock phase, or just assume it's in phase. | |
3142 | */ | |
d6968fca SB |
3143 | if (core->ops->get_phase) |
3144 | core->phase = core->ops->get_phase(core->hw); | |
9824cf73 | 3145 | else |
d6968fca | 3146 | core->phase = 0; |
9824cf73 | 3147 | |
9fba738a JB |
3148 | /* |
3149 | * Set clk's duty cycle. | |
3150 | */ | |
3151 | clk_core_update_duty_cycle_nolock(core); | |
3152 | ||
b2476490 MT |
3153 | /* |
3154 | * Set clk's rate. The preferred method is to use .recalc_rate. For | |
3155 | * simple clocks and lazy developers the default fallback is to use the | |
3156 | * parent's rate. If a clock doesn't have a parent (or is orphaned) | |
3157 | * then rate is set to zero. | |
3158 | */ | |
d6968fca SB |
3159 | if (core->ops->recalc_rate) |
3160 | rate = core->ops->recalc_rate(core->hw, | |
3161 | clk_core_get_rate_nolock(core->parent)); | |
3162 | else if (core->parent) | |
3163 | rate = core->parent->rate; | |
b2476490 | 3164 | else |
1c8e6004 | 3165 | rate = 0; |
d6968fca | 3166 | core->rate = core->req_rate = rate; |
b2476490 | 3167 | |
99652a46 JB |
3168 | /* |
3169 | * Enable CLK_IS_CRITICAL clocks so newly added critical clocks | |
3170 | * don't get accidentally disabled when walking the orphan tree and | |
3171 | * reparenting clocks | |
3172 | */ | |
3173 | if (core->flags & CLK_IS_CRITICAL) { | |
3174 | unsigned long flags; | |
3175 | ||
3176 | clk_core_prepare(core); | |
3177 | ||
3178 | flags = clk_enable_lock(); | |
3179 | clk_core_enable(core); | |
3180 | clk_enable_unlock(flags); | |
3181 | } | |
3182 | ||
b2476490 | 3183 | /* |
0e8f6e49 MY |
3184 | * walk the list of orphan clocks and reparent any that newly finds a |
3185 | * parent. | |
b2476490 | 3186 | */ |
b67bfe0d | 3187 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { |
0e8f6e49 | 3188 | struct clk_core *parent = __clk_init_parent(orphan); |
1f61e5f1 | 3189 | |
904e6ead | 3190 | /* |
99652a46 JB |
3191 | * We need to use __clk_set_parent_before() and _after() to |
3192 | * to properly migrate any prepare/enable count of the orphan | |
3193 | * clock. This is important for CLK_IS_CRITICAL clocks, which | |
3194 | * are enabled during init but might not have a parent yet. | |
904e6ead MT |
3195 | */ |
3196 | if (parent) { | |
f8f8f1d0 | 3197 | /* update the clk tree topology */ |
99652a46 JB |
3198 | __clk_set_parent_before(orphan, parent); |
3199 | __clk_set_parent_after(orphan, parent, NULL); | |
904e6ead MT |
3200 | __clk_recalc_accuracies(orphan); |
3201 | __clk_recalc_rates(orphan, 0); | |
3202 | } | |
0e8f6e49 | 3203 | } |
b2476490 | 3204 | |
d6968fca | 3205 | kref_init(&core->ref); |
b2476490 | 3206 | out: |
9a34b453 MS |
3207 | clk_pm_runtime_put(core); |
3208 | unlock: | |
eab89f69 | 3209 | clk_prepare_unlock(); |
b2476490 | 3210 | |
89f7e9de | 3211 | if (!ret) |
d6968fca | 3212 | clk_debug_register(core); |
89f7e9de | 3213 | |
d1302a36 | 3214 | return ret; |
b2476490 MT |
3215 | } |
3216 | ||
1df4046a SB |
3217 | /** |
3218 | * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core | |
3219 | * @core: clk to add consumer to | |
3220 | * @clk: consumer to link to a clk | |
3221 | */ | |
3222 | static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) | |
3223 | { | |
3224 | clk_prepare_lock(); | |
3225 | hlist_add_head(&clk->clks_node, &core->clks); | |
3226 | clk_prepare_unlock(); | |
3227 | } | |
3228 | ||
3229 | /** | |
3230 | * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core | |
3231 | * @clk: consumer to unlink | |
3232 | */ | |
3233 | static void clk_core_unlink_consumer(struct clk *clk) | |
3234 | { | |
3235 | lockdep_assert_held(&prepare_lock); | |
3236 | hlist_del(&clk->clks_node); | |
3237 | } | |
3238 | ||
3239 | /** | |
3240 | * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core | |
3241 | * @core: clk to allocate a consumer for | |
3242 | * @dev_id: string describing device name | |
3243 | * @con_id: connection ID string on device | |
3244 | * | |
3245 | * Returns: clk consumer left unlinked from the consumer list | |
3246 | */ | |
3247 | static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, | |
035a61c3 | 3248 | const char *con_id) |
0197b3ea | 3249 | { |
0197b3ea SK |
3250 | struct clk *clk; |
3251 | ||
035a61c3 TV |
3252 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); |
3253 | if (!clk) | |
3254 | return ERR_PTR(-ENOMEM); | |
3255 | ||
1df4046a | 3256 | clk->core = core; |
035a61c3 | 3257 | clk->dev_id = dev_id; |
253160a8 | 3258 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); |
1c8e6004 TV |
3259 | clk->max_rate = ULONG_MAX; |
3260 | ||
0197b3ea SK |
3261 | return clk; |
3262 | } | |
035a61c3 | 3263 | |
1df4046a SB |
3264 | /** |
3265 | * free_clk - Free a clk consumer | |
3266 | * @clk: clk consumer to free | |
3267 | * | |
3268 | * Note, this assumes the clk has been unlinked from the clk_core consumer | |
3269 | * list. | |
3270 | */ | |
3271 | static void free_clk(struct clk *clk) | |
1c8e6004 | 3272 | { |
253160a8 | 3273 | kfree_const(clk->con_id); |
1c8e6004 TV |
3274 | kfree(clk); |
3275 | } | |
0197b3ea | 3276 | |
1df4046a SB |
3277 | /** |
3278 | * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given | |
3279 | * a clk_hw | |
efa85048 | 3280 | * @dev: clk consumer device |
1df4046a SB |
3281 | * @hw: clk_hw associated with the clk being consumed |
3282 | * @dev_id: string describing device name | |
3283 | * @con_id: connection ID string on device | |
3284 | * | |
3285 | * This is the main function used to create a clk pointer for use by clk | |
3286 | * consumers. It connects a consumer to the clk_core and clk_hw structures | |
3287 | * used by the framework and clk provider respectively. | |
3288 | */ | |
efa85048 | 3289 | struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, |
1df4046a SB |
3290 | const char *dev_id, const char *con_id) |
3291 | { | |
3292 | struct clk *clk; | |
3293 | struct clk_core *core; | |
3294 | ||
3295 | /* This is to allow this function to be chained to others */ | |
3296 | if (IS_ERR_OR_NULL(hw)) | |
3297 | return ERR_CAST(hw); | |
3298 | ||
3299 | core = hw->core; | |
3300 | clk = alloc_clk(core, dev_id, con_id); | |
3301 | if (IS_ERR(clk)) | |
3302 | return clk; | |
efa85048 | 3303 | clk->dev = dev; |
1df4046a SB |
3304 | |
3305 | if (!try_module_get(core->owner)) { | |
3306 | free_clk(clk); | |
3307 | return ERR_PTR(-ENOENT); | |
3308 | } | |
3309 | ||
3310 | kref_get(&core->ref); | |
3311 | clk_core_link_consumer(core, clk); | |
3312 | ||
3313 | return clk; | |
3314 | } | |
3315 | ||
293ba3b4 SB |
3316 | /** |
3317 | * clk_register - allocate a new clock, register it and return an opaque cookie | |
3318 | * @dev: device that is registering this clock | |
3319 | * @hw: link to hardware-specific clock data | |
3320 | * | |
9fe9b7ab SB |
3321 | * clk_register is the *deprecated* interface for populating the clock tree with |
3322 | * new clock nodes. Use clk_hw_register() instead. | |
3323 | * | |
3324 | * Returns: a pointer to the newly allocated struct clk which | |
a59a5163 | 3325 | * cannot be dereferenced by driver code but may be used in conjunction with the |
293ba3b4 SB |
3326 | * rest of the clock API. In the event of an error clk_register will return an |
3327 | * error code; drivers must test for an error code after calling clk_register. | |
3328 | */ | |
3329 | struct clk *clk_register(struct device *dev, struct clk_hw *hw) | |
b2476490 | 3330 | { |
d1302a36 | 3331 | int i, ret; |
d6968fca | 3332 | struct clk_core *core; |
293ba3b4 | 3333 | |
d6968fca SB |
3334 | core = kzalloc(sizeof(*core), GFP_KERNEL); |
3335 | if (!core) { | |
293ba3b4 SB |
3336 | ret = -ENOMEM; |
3337 | goto fail_out; | |
3338 | } | |
b2476490 | 3339 | |
d6968fca SB |
3340 | core->name = kstrdup_const(hw->init->name, GFP_KERNEL); |
3341 | if (!core->name) { | |
0197b3ea SK |
3342 | ret = -ENOMEM; |
3343 | goto fail_name; | |
3344 | } | |
29fd2a34 JB |
3345 | |
3346 | if (WARN_ON(!hw->init->ops)) { | |
3347 | ret = -EINVAL; | |
3348 | goto fail_ops; | |
3349 | } | |
d6968fca | 3350 | core->ops = hw->init->ops; |
29fd2a34 | 3351 | |
9a34b453 | 3352 | if (dev && pm_runtime_enabled(dev)) |
24478839 MR |
3353 | core->rpm_enabled = true; |
3354 | core->dev = dev; | |
ac2df527 | 3355 | if (dev && dev->driver) |
d6968fca SB |
3356 | core->owner = dev->driver->owner; |
3357 | core->hw = hw; | |
3358 | core->flags = hw->init->flags; | |
3359 | core->num_parents = hw->init->num_parents; | |
9783c0d9 SB |
3360 | core->min_rate = 0; |
3361 | core->max_rate = ULONG_MAX; | |
d6968fca | 3362 | hw->core = core; |
b2476490 | 3363 | |
d1302a36 | 3364 | /* allocate local copy in case parent_names is __initdata */ |
d6968fca | 3365 | core->parent_names = kcalloc(core->num_parents, sizeof(char *), |
96a7ed90 | 3366 | GFP_KERNEL); |
d1302a36 | 3367 | |
d6968fca | 3368 | if (!core->parent_names) { |
d1302a36 MT |
3369 | ret = -ENOMEM; |
3370 | goto fail_parent_names; | |
3371 | } | |
3372 | ||
3373 | ||
3374 | /* copy each string name in case parent_names is __initdata */ | |
d6968fca SB |
3375 | for (i = 0; i < core->num_parents; i++) { |
3376 | core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], | |
0197b3ea | 3377 | GFP_KERNEL); |
d6968fca | 3378 | if (!core->parent_names[i]) { |
d1302a36 MT |
3379 | ret = -ENOMEM; |
3380 | goto fail_parent_names_copy; | |
3381 | } | |
3382 | } | |
3383 | ||
176d1169 MY |
3384 | /* avoid unnecessary string look-ups of clk_core's possible parents. */ |
3385 | core->parents = kcalloc(core->num_parents, sizeof(*core->parents), | |
3386 | GFP_KERNEL); | |
3387 | if (!core->parents) { | |
3388 | ret = -ENOMEM; | |
3389 | goto fail_parents; | |
3390 | }; | |
3391 | ||
d6968fca | 3392 | INIT_HLIST_HEAD(&core->clks); |
1c8e6004 | 3393 | |
1df4046a SB |
3394 | /* |
3395 | * Don't call clk_hw_create_clk() here because that would pin the | |
3396 | * provider module to itself and prevent it from ever being removed. | |
3397 | */ | |
3398 | hw->clk = alloc_clk(core, NULL, NULL); | |
035a61c3 | 3399 | if (IS_ERR(hw->clk)) { |
035a61c3 | 3400 | ret = PTR_ERR(hw->clk); |
176d1169 | 3401 | goto fail_parents; |
035a61c3 TV |
3402 | } |
3403 | ||
1df4046a SB |
3404 | clk_core_link_consumer(hw->core, hw->clk); |
3405 | ||
be45ebf2 | 3406 | ret = __clk_core_init(core); |
d1302a36 | 3407 | if (!ret) |
035a61c3 | 3408 | return hw->clk; |
b2476490 | 3409 | |
1df4046a SB |
3410 | clk_prepare_lock(); |
3411 | clk_core_unlink_consumer(hw->clk); | |
3412 | clk_prepare_unlock(); | |
3413 | ||
3414 | free_clk(hw->clk); | |
035a61c3 | 3415 | hw->clk = NULL; |
b2476490 | 3416 | |
176d1169 MY |
3417 | fail_parents: |
3418 | kfree(core->parents); | |
d1302a36 MT |
3419 | fail_parent_names_copy: |
3420 | while (--i >= 0) | |
d6968fca SB |
3421 | kfree_const(core->parent_names[i]); |
3422 | kfree(core->parent_names); | |
d1302a36 | 3423 | fail_parent_names: |
29fd2a34 | 3424 | fail_ops: |
d6968fca | 3425 | kfree_const(core->name); |
0197b3ea | 3426 | fail_name: |
d6968fca | 3427 | kfree(core); |
d1302a36 MT |
3428 | fail_out: |
3429 | return ERR_PTR(ret); | |
b2476490 MT |
3430 | } |
3431 | EXPORT_SYMBOL_GPL(clk_register); | |
3432 | ||
4143804c SB |
3433 | /** |
3434 | * clk_hw_register - register a clk_hw and return an error code | |
3435 | * @dev: device that is registering this clock | |
3436 | * @hw: link to hardware-specific clock data | |
3437 | * | |
3438 | * clk_hw_register is the primary interface for populating the clock tree with | |
3439 | * new clock nodes. It returns an integer equal to zero indicating success or | |
3440 | * less than zero indicating failure. Drivers must test for an error code after | |
3441 | * calling clk_hw_register(). | |
3442 | */ | |
3443 | int clk_hw_register(struct device *dev, struct clk_hw *hw) | |
3444 | { | |
3445 | return PTR_ERR_OR_ZERO(clk_register(dev, hw)); | |
3446 | } | |
3447 | EXPORT_SYMBOL_GPL(clk_hw_register); | |
3448 | ||
6e5ab41b | 3449 | /* Free memory allocated for a clock. */ |
fcb0ee6a SN |
3450 | static void __clk_release(struct kref *ref) |
3451 | { | |
d6968fca SB |
3452 | struct clk_core *core = container_of(ref, struct clk_core, ref); |
3453 | int i = core->num_parents; | |
fcb0ee6a | 3454 | |
496eadf8 KK |
3455 | lockdep_assert_held(&prepare_lock); |
3456 | ||
d6968fca | 3457 | kfree(core->parents); |
fcb0ee6a | 3458 | while (--i >= 0) |
d6968fca | 3459 | kfree_const(core->parent_names[i]); |
fcb0ee6a | 3460 | |
d6968fca SB |
3461 | kfree(core->parent_names); |
3462 | kfree_const(core->name); | |
3463 | kfree(core); | |
fcb0ee6a SN |
3464 | } |
3465 | ||
3466 | /* | |
3467 | * Empty clk_ops for unregistered clocks. These are used temporarily | |
3468 | * after clk_unregister() was called on a clock and until last clock | |
3469 | * consumer calls clk_put() and the struct clk object is freed. | |
3470 | */ | |
3471 | static int clk_nodrv_prepare_enable(struct clk_hw *hw) | |
3472 | { | |
3473 | return -ENXIO; | |
3474 | } | |
3475 | ||
3476 | static void clk_nodrv_disable_unprepare(struct clk_hw *hw) | |
3477 | { | |
3478 | WARN_ON_ONCE(1); | |
3479 | } | |
3480 | ||
3481 | static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, | |
3482 | unsigned long parent_rate) | |
3483 | { | |
3484 | return -ENXIO; | |
3485 | } | |
3486 | ||
3487 | static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) | |
3488 | { | |
3489 | return -ENXIO; | |
3490 | } | |
3491 | ||
3492 | static const struct clk_ops clk_nodrv_ops = { | |
3493 | .enable = clk_nodrv_prepare_enable, | |
3494 | .disable = clk_nodrv_disable_unprepare, | |
3495 | .prepare = clk_nodrv_prepare_enable, | |
3496 | .unprepare = clk_nodrv_disable_unprepare, | |
3497 | .set_rate = clk_nodrv_set_rate, | |
3498 | .set_parent = clk_nodrv_set_parent, | |
3499 | }; | |
3500 | ||
1df5c939 MB |
3501 | /** |
3502 | * clk_unregister - unregister a currently registered clock | |
3503 | * @clk: clock to unregister | |
1df5c939 | 3504 | */ |
fcb0ee6a SN |
3505 | void clk_unregister(struct clk *clk) |
3506 | { | |
3507 | unsigned long flags; | |
3508 | ||
6314b679 SB |
3509 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
3510 | return; | |
3511 | ||
035a61c3 | 3512 | clk_debug_unregister(clk->core); |
fcb0ee6a SN |
3513 | |
3514 | clk_prepare_lock(); | |
3515 | ||
035a61c3 TV |
3516 | if (clk->core->ops == &clk_nodrv_ops) { |
3517 | pr_err("%s: unregistered clock: %s\n", __func__, | |
3518 | clk->core->name); | |
4106a3d9 | 3519 | goto unlock; |
fcb0ee6a SN |
3520 | } |
3521 | /* | |
3522 | * Assign empty clock ops for consumers that might still hold | |
3523 | * a reference to this clock. | |
3524 | */ | |
3525 | flags = clk_enable_lock(); | |
035a61c3 | 3526 | clk->core->ops = &clk_nodrv_ops; |
fcb0ee6a SN |
3527 | clk_enable_unlock(flags); |
3528 | ||
035a61c3 TV |
3529 | if (!hlist_empty(&clk->core->children)) { |
3530 | struct clk_core *child; | |
874f224c | 3531 | struct hlist_node *t; |
fcb0ee6a SN |
3532 | |
3533 | /* Reparent all children to the orphan list. */ | |
035a61c3 TV |
3534 | hlist_for_each_entry_safe(child, t, &clk->core->children, |
3535 | child_node) | |
91baa9ff | 3536 | clk_core_set_parent_nolock(child, NULL); |
fcb0ee6a SN |
3537 | } |
3538 | ||
035a61c3 | 3539 | hlist_del_init(&clk->core->child_node); |
fcb0ee6a | 3540 | |
035a61c3 | 3541 | if (clk->core->prepare_count) |
fcb0ee6a | 3542 | pr_warn("%s: unregistering prepared clock: %s\n", |
035a61c3 | 3543 | __func__, clk->core->name); |
e55a839a JB |
3544 | |
3545 | if (clk->core->protect_count) | |
3546 | pr_warn("%s: unregistering protected clock: %s\n", | |
3547 | __func__, clk->core->name); | |
3548 | ||
035a61c3 | 3549 | kref_put(&clk->core->ref, __clk_release); |
4106a3d9 | 3550 | unlock: |
fcb0ee6a SN |
3551 | clk_prepare_unlock(); |
3552 | } | |
1df5c939 MB |
3553 | EXPORT_SYMBOL_GPL(clk_unregister); |
3554 | ||
4143804c SB |
3555 | /** |
3556 | * clk_hw_unregister - unregister a currently registered clk_hw | |
3557 | * @hw: hardware-specific clock data to unregister | |
3558 | */ | |
3559 | void clk_hw_unregister(struct clk_hw *hw) | |
3560 | { | |
3561 | clk_unregister(hw->clk); | |
3562 | } | |
3563 | EXPORT_SYMBOL_GPL(clk_hw_unregister); | |
3564 | ||
46c8773a SB |
3565 | static void devm_clk_release(struct device *dev, void *res) |
3566 | { | |
293ba3b4 | 3567 | clk_unregister(*(struct clk **)res); |
46c8773a SB |
3568 | } |
3569 | ||
4143804c SB |
3570 | static void devm_clk_hw_release(struct device *dev, void *res) |
3571 | { | |
3572 | clk_hw_unregister(*(struct clk_hw **)res); | |
3573 | } | |
3574 | ||
46c8773a SB |
3575 | /** |
3576 | * devm_clk_register - resource managed clk_register() | |
3577 | * @dev: device that is registering this clock | |
3578 | * @hw: link to hardware-specific clock data | |
3579 | * | |
9fe9b7ab SB |
3580 | * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. |
3581 | * | |
3582 | * Clocks returned from this function are automatically clk_unregister()ed on | |
3583 | * driver detach. See clk_register() for more information. | |
46c8773a SB |
3584 | */ |
3585 | struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) | |
3586 | { | |
3587 | struct clk *clk; | |
293ba3b4 | 3588 | struct clk **clkp; |
46c8773a | 3589 | |
293ba3b4 SB |
3590 | clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); |
3591 | if (!clkp) | |
46c8773a SB |
3592 | return ERR_PTR(-ENOMEM); |
3593 | ||
293ba3b4 SB |
3594 | clk = clk_register(dev, hw); |
3595 | if (!IS_ERR(clk)) { | |
3596 | *clkp = clk; | |
3597 | devres_add(dev, clkp); | |
46c8773a | 3598 | } else { |
293ba3b4 | 3599 | devres_free(clkp); |
46c8773a SB |
3600 | } |
3601 | ||
3602 | return clk; | |
3603 | } | |
3604 | EXPORT_SYMBOL_GPL(devm_clk_register); | |
3605 | ||
4143804c SB |
3606 | /** |
3607 | * devm_clk_hw_register - resource managed clk_hw_register() | |
3608 | * @dev: device that is registering this clock | |
3609 | * @hw: link to hardware-specific clock data | |
3610 | * | |
c47265ad | 3611 | * Managed clk_hw_register(). Clocks registered by this function are |
4143804c SB |
3612 | * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() |
3613 | * for more information. | |
3614 | */ | |
3615 | int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) | |
3616 | { | |
3617 | struct clk_hw **hwp; | |
3618 | int ret; | |
3619 | ||
3620 | hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); | |
3621 | if (!hwp) | |
3622 | return -ENOMEM; | |
3623 | ||
3624 | ret = clk_hw_register(dev, hw); | |
3625 | if (!ret) { | |
3626 | *hwp = hw; | |
3627 | devres_add(dev, hwp); | |
3628 | } else { | |
3629 | devres_free(hwp); | |
3630 | } | |
3631 | ||
3632 | return ret; | |
3633 | } | |
3634 | EXPORT_SYMBOL_GPL(devm_clk_hw_register); | |
3635 | ||
46c8773a SB |
3636 | static int devm_clk_match(struct device *dev, void *res, void *data) |
3637 | { | |
3638 | struct clk *c = res; | |
3639 | if (WARN_ON(!c)) | |
3640 | return 0; | |
3641 | return c == data; | |
3642 | } | |
3643 | ||
4143804c SB |
3644 | static int devm_clk_hw_match(struct device *dev, void *res, void *data) |
3645 | { | |
3646 | struct clk_hw *hw = res; | |
3647 | ||
3648 | if (WARN_ON(!hw)) | |
3649 | return 0; | |
3650 | return hw == data; | |
3651 | } | |
3652 | ||
46c8773a SB |
3653 | /** |
3654 | * devm_clk_unregister - resource managed clk_unregister() | |
3655 | * @clk: clock to unregister | |
3656 | * | |
3657 | * Deallocate a clock allocated with devm_clk_register(). Normally | |
3658 | * this function will not need to be called and the resource management | |
3659 | * code will ensure that the resource is freed. | |
3660 | */ | |
3661 | void devm_clk_unregister(struct device *dev, struct clk *clk) | |
3662 | { | |
3663 | WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); | |
3664 | } | |
3665 | EXPORT_SYMBOL_GPL(devm_clk_unregister); | |
3666 | ||
4143804c SB |
3667 | /** |
3668 | * devm_clk_hw_unregister - resource managed clk_hw_unregister() | |
3669 | * @dev: device that is unregistering the hardware-specific clock data | |
3670 | * @hw: link to hardware-specific clock data | |
3671 | * | |
3672 | * Unregister a clk_hw registered with devm_clk_hw_register(). Normally | |
3673 | * this function will not need to be called and the resource management | |
3674 | * code will ensure that the resource is freed. | |
3675 | */ | |
3676 | void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) | |
3677 | { | |
3678 | WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, | |
3679 | hw)); | |
3680 | } | |
3681 | EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); | |
3682 | ||
ac2df527 SN |
3683 | /* |
3684 | * clkdev helpers | |
3685 | */ | |
ac2df527 SN |
3686 | |
3687 | void __clk_put(struct clk *clk) | |
3688 | { | |
10cdfe54 TV |
3689 | struct module *owner; |
3690 | ||
00efcb1c | 3691 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
ac2df527 SN |
3692 | return; |
3693 | ||
fcb0ee6a | 3694 | clk_prepare_lock(); |
1c8e6004 | 3695 | |
55e9b8b7 JB |
3696 | /* |
3697 | * Before calling clk_put, all calls to clk_rate_exclusive_get() from a | |
3698 | * given user should be balanced with calls to clk_rate_exclusive_put() | |
3699 | * and by that same consumer | |
3700 | */ | |
3701 | if (WARN_ON(clk->exclusive_count)) { | |
3702 | /* We voiced our concern, let's sanitize the situation */ | |
3703 | clk->core->protect_count -= (clk->exclusive_count - 1); | |
3704 | clk_core_rate_unprotect(clk->core); | |
3705 | clk->exclusive_count = 0; | |
3706 | } | |
3707 | ||
50595f8b | 3708 | hlist_del(&clk->clks_node); |
ec02ace8 TV |
3709 | if (clk->min_rate > clk->core->req_rate || |
3710 | clk->max_rate < clk->core->req_rate) | |
3711 | clk_core_set_rate_nolock(clk->core, clk->core->req_rate); | |
3712 | ||
1c8e6004 TV |
3713 | owner = clk->core->owner; |
3714 | kref_put(&clk->core->ref, __clk_release); | |
3715 | ||
fcb0ee6a SN |
3716 | clk_prepare_unlock(); |
3717 | ||
10cdfe54 | 3718 | module_put(owner); |
035a61c3 | 3719 | |
1df4046a | 3720 | free_clk(clk); |
ac2df527 SN |
3721 | } |
3722 | ||
b2476490 MT |
3723 | /*** clk rate change notifiers ***/ |
3724 | ||
3725 | /** | |
3726 | * clk_notifier_register - add a clk rate change notifier | |
3727 | * @clk: struct clk * to watch | |
3728 | * @nb: struct notifier_block * with callback info | |
3729 | * | |
3730 | * Request notification when clk's rate changes. This uses an SRCU | |
3731 | * notifier because we want it to block and notifier unregistrations are | |
3732 | * uncommon. The callbacks associated with the notifier must not | |
3733 | * re-enter into the clk framework by calling any top-level clk APIs; | |
3734 | * this will cause a nested prepare_lock mutex. | |
3735 | * | |
198bb594 MY |
3736 | * In all notification cases (pre, post and abort rate change) the original |
3737 | * clock rate is passed to the callback via struct clk_notifier_data.old_rate | |
3738 | * and the new frequency is passed via struct clk_notifier_data.new_rate. | |
b2476490 | 3739 | * |
b2476490 MT |
3740 | * clk_notifier_register() must be called from non-atomic context. |
3741 | * Returns -EINVAL if called with null arguments, -ENOMEM upon | |
3742 | * allocation failure; otherwise, passes along the return value of | |
3743 | * srcu_notifier_chain_register(). | |
3744 | */ | |
3745 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb) | |
3746 | { | |
3747 | struct clk_notifier *cn; | |
3748 | int ret = -ENOMEM; | |
3749 | ||
3750 | if (!clk || !nb) | |
3751 | return -EINVAL; | |
3752 | ||
eab89f69 | 3753 | clk_prepare_lock(); |
b2476490 MT |
3754 | |
3755 | /* search the list of notifiers for this clk */ | |
3756 | list_for_each_entry(cn, &clk_notifier_list, node) | |
3757 | if (cn->clk == clk) | |
3758 | break; | |
3759 | ||
3760 | /* if clk wasn't in the notifier list, allocate new clk_notifier */ | |
3761 | if (cn->clk != clk) { | |
1808a320 | 3762 | cn = kzalloc(sizeof(*cn), GFP_KERNEL); |
b2476490 MT |
3763 | if (!cn) |
3764 | goto out; | |
3765 | ||
3766 | cn->clk = clk; | |
3767 | srcu_init_notifier_head(&cn->notifier_head); | |
3768 | ||
3769 | list_add(&cn->node, &clk_notifier_list); | |
3770 | } | |
3771 | ||
3772 | ret = srcu_notifier_chain_register(&cn->notifier_head, nb); | |
3773 | ||
035a61c3 | 3774 | clk->core->notifier_count++; |
b2476490 MT |
3775 | |
3776 | out: | |
eab89f69 | 3777 | clk_prepare_unlock(); |
b2476490 MT |
3778 | |
3779 | return ret; | |
3780 | } | |
3781 | EXPORT_SYMBOL_GPL(clk_notifier_register); | |
3782 | ||
3783 | /** | |
3784 | * clk_notifier_unregister - remove a clk rate change notifier | |
3785 | * @clk: struct clk * | |
3786 | * @nb: struct notifier_block * with callback info | |
3787 | * | |
3788 | * Request no further notification for changes to 'clk' and frees memory | |
3789 | * allocated in clk_notifier_register. | |
3790 | * | |
3791 | * Returns -EINVAL if called with null arguments; otherwise, passes | |
3792 | * along the return value of srcu_notifier_chain_unregister(). | |
3793 | */ | |
3794 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) | |
3795 | { | |
3796 | struct clk_notifier *cn = NULL; | |
3797 | int ret = -EINVAL; | |
3798 | ||
3799 | if (!clk || !nb) | |
3800 | return -EINVAL; | |
3801 | ||
eab89f69 | 3802 | clk_prepare_lock(); |
b2476490 MT |
3803 | |
3804 | list_for_each_entry(cn, &clk_notifier_list, node) | |
3805 | if (cn->clk == clk) | |
3806 | break; | |
3807 | ||
3808 | if (cn->clk == clk) { | |
3809 | ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); | |
3810 | ||
035a61c3 | 3811 | clk->core->notifier_count--; |
b2476490 MT |
3812 | |
3813 | /* XXX the notifier code should handle this better */ | |
3814 | if (!cn->notifier_head.head) { | |
3815 | srcu_cleanup_notifier_head(&cn->notifier_head); | |
72b5322f | 3816 | list_del(&cn->node); |
b2476490 MT |
3817 | kfree(cn); |
3818 | } | |
3819 | ||
3820 | } else { | |
3821 | ret = -ENOENT; | |
3822 | } | |
3823 | ||
eab89f69 | 3824 | clk_prepare_unlock(); |
b2476490 MT |
3825 | |
3826 | return ret; | |
3827 | } | |
3828 | EXPORT_SYMBOL_GPL(clk_notifier_unregister); | |
766e6a4e GL |
3829 | |
3830 | #ifdef CONFIG_OF | |
3831 | /** | |
3832 | * struct of_clk_provider - Clock provider registration structure | |
3833 | * @link: Entry in global list of clock providers | |
3834 | * @node: Pointer to device tree node of clock provider | |
3835 | * @get: Get clock callback. Returns NULL or a struct clk for the | |
3836 | * given clock specifier | |
3837 | * @data: context pointer to be passed into @get callback | |
3838 | */ | |
3839 | struct of_clk_provider { | |
3840 | struct list_head link; | |
3841 | ||
3842 | struct device_node *node; | |
3843 | struct clk *(*get)(struct of_phandle_args *clkspec, void *data); | |
0861e5b8 | 3844 | struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); |
766e6a4e GL |
3845 | void *data; |
3846 | }; | |
3847 | ||
f2f6c255 PG |
3848 | static const struct of_device_id __clk_of_table_sentinel |
3849 | __used __section(__clk_of_table_end); | |
3850 | ||
766e6a4e | 3851 | static LIST_HEAD(of_clk_providers); |
d6782c26 SN |
3852 | static DEFINE_MUTEX(of_clk_mutex); |
3853 | ||
766e6a4e GL |
3854 | struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, |
3855 | void *data) | |
3856 | { | |
3857 | return data; | |
3858 | } | |
3859 | EXPORT_SYMBOL_GPL(of_clk_src_simple_get); | |
3860 | ||
0861e5b8 SB |
3861 | struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) |
3862 | { | |
3863 | return data; | |
3864 | } | |
3865 | EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); | |
3866 | ||
494bfec9 SG |
3867 | struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) |
3868 | { | |
3869 | struct clk_onecell_data *clk_data = data; | |
3870 | unsigned int idx = clkspec->args[0]; | |
3871 | ||
3872 | if (idx >= clk_data->clk_num) { | |
7e96353c | 3873 | pr_err("%s: invalid clock index %u\n", __func__, idx); |
494bfec9 SG |
3874 | return ERR_PTR(-EINVAL); |
3875 | } | |
3876 | ||
3877 | return clk_data->clks[idx]; | |
3878 | } | |
3879 | EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); | |
3880 | ||
0861e5b8 SB |
3881 | struct clk_hw * |
3882 | of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) | |
3883 | { | |
3884 | struct clk_hw_onecell_data *hw_data = data; | |
3885 | unsigned int idx = clkspec->args[0]; | |
3886 | ||
3887 | if (idx >= hw_data->num) { | |
3888 | pr_err("%s: invalid index %u\n", __func__, idx); | |
3889 | return ERR_PTR(-EINVAL); | |
3890 | } | |
3891 | ||
3892 | return hw_data->hws[idx]; | |
3893 | } | |
3894 | EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); | |
3895 | ||
766e6a4e GL |
3896 | /** |
3897 | * of_clk_add_provider() - Register a clock provider for a node | |
3898 | * @np: Device node pointer associated with clock provider | |
3899 | * @clk_src_get: callback for decoding clock | |
3900 | * @data: context pointer for @clk_src_get callback. | |
9fe9b7ab SB |
3901 | * |
3902 | * This function is *deprecated*. Use of_clk_add_hw_provider() instead. | |
766e6a4e GL |
3903 | */ |
3904 | int of_clk_add_provider(struct device_node *np, | |
3905 | struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, | |
3906 | void *data), | |
3907 | void *data) | |
3908 | { | |
3909 | struct of_clk_provider *cp; | |
86be408b | 3910 | int ret; |
766e6a4e | 3911 | |
1808a320 | 3912 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
766e6a4e GL |
3913 | if (!cp) |
3914 | return -ENOMEM; | |
3915 | ||
3916 | cp->node = of_node_get(np); | |
3917 | cp->data = data; | |
3918 | cp->get = clk_src_get; | |
3919 | ||
d6782c26 | 3920 | mutex_lock(&of_clk_mutex); |
766e6a4e | 3921 | list_add(&cp->link, &of_clk_providers); |
d6782c26 | 3922 | mutex_unlock(&of_clk_mutex); |
16673931 | 3923 | pr_debug("Added clock from %pOF\n", np); |
766e6a4e | 3924 | |
86be408b SN |
3925 | ret = of_clk_set_defaults(np, true); |
3926 | if (ret < 0) | |
3927 | of_clk_del_provider(np); | |
3928 | ||
3929 | return ret; | |
766e6a4e GL |
3930 | } |
3931 | EXPORT_SYMBOL_GPL(of_clk_add_provider); | |
3932 | ||
0861e5b8 SB |
3933 | /** |
3934 | * of_clk_add_hw_provider() - Register a clock provider for a node | |
3935 | * @np: Device node pointer associated with clock provider | |
3936 | * @get: callback for decoding clk_hw | |
3937 | * @data: context pointer for @get callback. | |
3938 | */ | |
3939 | int of_clk_add_hw_provider(struct device_node *np, | |
3940 | struct clk_hw *(*get)(struct of_phandle_args *clkspec, | |
3941 | void *data), | |
3942 | void *data) | |
3943 | { | |
3944 | struct of_clk_provider *cp; | |
3945 | int ret; | |
3946 | ||
3947 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | |
3948 | if (!cp) | |
3949 | return -ENOMEM; | |
3950 | ||
3951 | cp->node = of_node_get(np); | |
3952 | cp->data = data; | |
3953 | cp->get_hw = get; | |
3954 | ||
3955 | mutex_lock(&of_clk_mutex); | |
3956 | list_add(&cp->link, &of_clk_providers); | |
3957 | mutex_unlock(&of_clk_mutex); | |
16673931 | 3958 | pr_debug("Added clk_hw provider from %pOF\n", np); |
0861e5b8 SB |
3959 | |
3960 | ret = of_clk_set_defaults(np, true); | |
3961 | if (ret < 0) | |
3962 | of_clk_del_provider(np); | |
3963 | ||
3964 | return ret; | |
3965 | } | |
3966 | EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); | |
3967 | ||
aa795c41 SB |
3968 | static void devm_of_clk_release_provider(struct device *dev, void *res) |
3969 | { | |
3970 | of_clk_del_provider(*(struct device_node **)res); | |
3971 | } | |
3972 | ||
05502bf9 MV |
3973 | /* |
3974 | * We allow a child device to use its parent device as the clock provider node | |
3975 | * for cases like MFD sub-devices where the child device driver wants to use | |
3976 | * devm_*() APIs but not list the device in DT as a sub-node. | |
3977 | */ | |
3978 | static struct device_node *get_clk_provider_node(struct device *dev) | |
3979 | { | |
3980 | struct device_node *np, *parent_np; | |
3981 | ||
3982 | np = dev->of_node; | |
3983 | parent_np = dev->parent ? dev->parent->of_node : NULL; | |
3984 | ||
3985 | if (!of_find_property(np, "#clock-cells", NULL)) | |
3986 | if (of_find_property(parent_np, "#clock-cells", NULL)) | |
3987 | np = parent_np; | |
3988 | ||
3989 | return np; | |
3990 | } | |
3991 | ||
e45838b5 MV |
3992 | /** |
3993 | * devm_of_clk_add_hw_provider() - Managed clk provider node registration | |
3994 | * @dev: Device acting as the clock provider (used for DT node and lifetime) | |
3995 | * @get: callback for decoding clk_hw | |
3996 | * @data: context pointer for @get callback | |
3997 | * | |
05502bf9 MV |
3998 | * Registers clock provider for given device's node. If the device has no DT |
3999 | * node or if the device node lacks of clock provider information (#clock-cells) | |
4000 | * then the parent device's node is scanned for this information. If parent node | |
4001 | * has the #clock-cells then it is used in registration. Provider is | |
4002 | * automatically released at device exit. | |
e45838b5 MV |
4003 | * |
4004 | * Return: 0 on success or an errno on failure. | |
4005 | */ | |
aa795c41 SB |
4006 | int devm_of_clk_add_hw_provider(struct device *dev, |
4007 | struct clk_hw *(*get)(struct of_phandle_args *clkspec, | |
4008 | void *data), | |
4009 | void *data) | |
4010 | { | |
4011 | struct device_node **ptr, *np; | |
4012 | int ret; | |
4013 | ||
4014 | ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), | |
4015 | GFP_KERNEL); | |
4016 | if (!ptr) | |
4017 | return -ENOMEM; | |
4018 | ||
05502bf9 | 4019 | np = get_clk_provider_node(dev); |
aa795c41 SB |
4020 | ret = of_clk_add_hw_provider(np, get, data); |
4021 | if (!ret) { | |
4022 | *ptr = np; | |
4023 | devres_add(dev, ptr); | |
4024 | } else { | |
4025 | devres_free(ptr); | |
4026 | } | |
4027 | ||
4028 | return ret; | |
4029 | } | |
4030 | EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); | |
4031 | ||
766e6a4e GL |
4032 | /** |
4033 | * of_clk_del_provider() - Remove a previously registered clock provider | |
4034 | * @np: Device node pointer associated with clock provider | |
4035 | */ | |
4036 | void of_clk_del_provider(struct device_node *np) | |
4037 | { | |
4038 | struct of_clk_provider *cp; | |
4039 | ||
d6782c26 | 4040 | mutex_lock(&of_clk_mutex); |
766e6a4e GL |
4041 | list_for_each_entry(cp, &of_clk_providers, link) { |
4042 | if (cp->node == np) { | |
4043 | list_del(&cp->link); | |
4044 | of_node_put(cp->node); | |
4045 | kfree(cp); | |
4046 | break; | |
4047 | } | |
4048 | } | |
d6782c26 | 4049 | mutex_unlock(&of_clk_mutex); |
766e6a4e GL |
4050 | } |
4051 | EXPORT_SYMBOL_GPL(of_clk_del_provider); | |
4052 | ||
aa795c41 SB |
4053 | static int devm_clk_provider_match(struct device *dev, void *res, void *data) |
4054 | { | |
4055 | struct device_node **np = res; | |
4056 | ||
4057 | if (WARN_ON(!np || !*np)) | |
4058 | return 0; | |
4059 | ||
4060 | return *np == data; | |
4061 | } | |
4062 | ||
e45838b5 MV |
4063 | /** |
4064 | * devm_of_clk_del_provider() - Remove clock provider registered using devm | |
4065 | * @dev: Device to whose lifetime the clock provider was bound | |
4066 | */ | |
aa795c41 SB |
4067 | void devm_of_clk_del_provider(struct device *dev) |
4068 | { | |
4069 | int ret; | |
05502bf9 | 4070 | struct device_node *np = get_clk_provider_node(dev); |
aa795c41 SB |
4071 | |
4072 | ret = devres_release(dev, devm_of_clk_release_provider, | |
05502bf9 | 4073 | devm_clk_provider_match, np); |
aa795c41 SB |
4074 | |
4075 | WARN_ON(ret); | |
4076 | } | |
4077 | EXPORT_SYMBOL(devm_of_clk_del_provider); | |
4078 | ||
5dc7e842 SB |
4079 | /* |
4080 | * Beware the return values when np is valid, but no clock provider is found. | |
4081 | * If name == NULL, the function returns -ENOENT. | |
4082 | * If name != NULL, the function returns -EINVAL. This is because | |
4083 | * of_parse_phandle_with_args() is called even if of_property_match_string() | |
4084 | * returns an error. | |
4085 | */ | |
cf13f289 SB |
4086 | static int of_parse_clkspec(const struct device_node *np, int index, |
4087 | const char *name, struct of_phandle_args *out_args) | |
4472287a SB |
4088 | { |
4089 | int ret = -ENOENT; | |
4090 | ||
4091 | /* Walk up the tree of devices looking for a clock property that matches */ | |
4092 | while (np) { | |
4093 | /* | |
4094 | * For named clocks, first look up the name in the | |
4095 | * "clock-names" property. If it cannot be found, then index | |
4096 | * will be an error code and of_parse_phandle_with_args() will | |
4097 | * return -EINVAL. | |
4098 | */ | |
4099 | if (name) | |
4100 | index = of_property_match_string(np, "clock-names", name); | |
4101 | ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", | |
4102 | index, out_args); | |
4103 | if (!ret) | |
4104 | break; | |
4105 | if (name && index >= 0) | |
4106 | break; | |
4107 | ||
4108 | /* | |
4109 | * No matching clock found on this node. If the parent node | |
4110 | * has a "clock-ranges" property, then we can try one of its | |
4111 | * clocks. | |
4112 | */ | |
4113 | np = np->parent; | |
4114 | if (np && !of_get_property(np, "clock-ranges", NULL)) | |
4115 | break; | |
4116 | index = 0; | |
4117 | } | |
4118 | ||
4119 | return ret; | |
4120 | } | |
4121 | ||
0861e5b8 SB |
4122 | static struct clk_hw * |
4123 | __of_clk_get_hw_from_provider(struct of_clk_provider *provider, | |
4124 | struct of_phandle_args *clkspec) | |
4125 | { | |
4126 | struct clk *clk; | |
0861e5b8 | 4127 | |
74002fcd SB |
4128 | if (provider->get_hw) |
4129 | return provider->get_hw(clkspec, provider->data); | |
0861e5b8 | 4130 | |
74002fcd SB |
4131 | clk = provider->get(clkspec, provider->data); |
4132 | if (IS_ERR(clk)) | |
4133 | return ERR_CAST(clk); | |
4134 | return __clk_get_hw(clk); | |
0861e5b8 SB |
4135 | } |
4136 | ||
cf13f289 SB |
4137 | static struct clk_hw * |
4138 | of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) | |
766e6a4e GL |
4139 | { |
4140 | struct of_clk_provider *provider; | |
1df4046a | 4141 | struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); |
766e6a4e | 4142 | |
306c342f SB |
4143 | if (!clkspec) |
4144 | return ERR_PTR(-EINVAL); | |
4145 | ||
306c342f | 4146 | mutex_lock(&of_clk_mutex); |
766e6a4e | 4147 | list_for_each_entry(provider, &of_clk_providers, link) { |
f155d15b | 4148 | if (provider->node == clkspec->np) { |
0861e5b8 | 4149 | hw = __of_clk_get_hw_from_provider(provider, clkspec); |
1df4046a SB |
4150 | if (!IS_ERR(hw)) |
4151 | break; | |
73e0e496 | 4152 | } |
766e6a4e | 4153 | } |
306c342f | 4154 | mutex_unlock(&of_clk_mutex); |
d6782c26 | 4155 | |
4472287a | 4156 | return hw; |
d6782c26 SN |
4157 | } |
4158 | ||
306c342f SB |
4159 | /** |
4160 | * of_clk_get_from_provider() - Lookup a clock from a clock provider | |
4161 | * @clkspec: pointer to a clock specifier data structure | |
4162 | * | |
4163 | * This function looks up a struct clk from the registered list of clock | |
4164 | * providers, an input is a clock specifier data structure as returned | |
4165 | * from the of_parse_phandle_with_args() function call. | |
4166 | */ | |
d6782c26 SN |
4167 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) |
4168 | { | |
4472287a SB |
4169 | struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); |
4170 | ||
efa85048 | 4171 | return clk_hw_create_clk(NULL, hw, NULL, __func__); |
766e6a4e | 4172 | } |
fb4dd222 | 4173 | EXPORT_SYMBOL_GPL(of_clk_get_from_provider); |
766e6a4e | 4174 | |
cf13f289 SB |
4175 | struct clk_hw *of_clk_get_hw(struct device_node *np, int index, |
4176 | const char *con_id) | |
4177 | { | |
4178 | int ret; | |
4179 | struct clk_hw *hw; | |
4180 | struct of_phandle_args clkspec; | |
4181 | ||
4182 | ret = of_parse_clkspec(np, index, con_id, &clkspec); | |
4183 | if (ret) | |
4184 | return ERR_PTR(ret); | |
4185 | ||
4186 | hw = of_clk_get_hw_from_clkspec(&clkspec); | |
4187 | of_node_put(clkspec.np); | |
4188 | ||
4189 | return hw; | |
4190 | } | |
4191 | ||
4192 | static struct clk *__of_clk_get(struct device_node *np, | |
4193 | int index, const char *dev_id, | |
4194 | const char *con_id) | |
4195 | { | |
4196 | struct clk_hw *hw = of_clk_get_hw(np, index, con_id); | |
4197 | ||
4198 | return clk_hw_create_clk(NULL, hw, dev_id, con_id); | |
4199 | } | |
4200 | ||
4201 | struct clk *of_clk_get(struct device_node *np, int index) | |
4202 | { | |
4203 | return __of_clk_get(np, index, np->full_name, NULL); | |
4204 | } | |
4205 | EXPORT_SYMBOL(of_clk_get); | |
4206 | ||
4207 | /** | |
4208 | * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node | |
4209 | * @np: pointer to clock consumer node | |
4210 | * @name: name of consumer's clock input, or NULL for the first clock reference | |
4211 | * | |
4212 | * This function parses the clocks and clock-names properties, | |
4213 | * and uses them to look up the struct clk from the registered list of clock | |
4214 | * providers. | |
4215 | */ | |
4216 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name) | |
4217 | { | |
4218 | if (!np) | |
4219 | return ERR_PTR(-ENOENT); | |
4220 | ||
65cf20ad | 4221 | return __of_clk_get(np, 0, np->full_name, name); |
cf13f289 SB |
4222 | } |
4223 | EXPORT_SYMBOL(of_clk_get_by_name); | |
4224 | ||
929e7f3b SB |
4225 | /** |
4226 | * of_clk_get_parent_count() - Count the number of clocks a device node has | |
4227 | * @np: device node to count | |
4228 | * | |
4229 | * Returns: The number of clocks that are possible parents of this node | |
4230 | */ | |
4231 | unsigned int of_clk_get_parent_count(struct device_node *np) | |
f6102742 | 4232 | { |
929e7f3b SB |
4233 | int count; |
4234 | ||
4235 | count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); | |
4236 | if (count < 0) | |
4237 | return 0; | |
4238 | ||
4239 | return count; | |
f6102742 MT |
4240 | } |
4241 | EXPORT_SYMBOL_GPL(of_clk_get_parent_count); | |
4242 | ||
766e6a4e GL |
4243 | const char *of_clk_get_parent_name(struct device_node *np, int index) |
4244 | { | |
4245 | struct of_phandle_args clkspec; | |
7a0fc1a3 | 4246 | struct property *prop; |
766e6a4e | 4247 | const char *clk_name; |
7a0fc1a3 BD |
4248 | const __be32 *vp; |
4249 | u32 pv; | |
766e6a4e | 4250 | int rc; |
7a0fc1a3 | 4251 | int count; |
0a4807c2 | 4252 | struct clk *clk; |
766e6a4e | 4253 | |
766e6a4e GL |
4254 | rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, |
4255 | &clkspec); | |
4256 | if (rc) | |
4257 | return NULL; | |
4258 | ||
7a0fc1a3 BD |
4259 | index = clkspec.args_count ? clkspec.args[0] : 0; |
4260 | count = 0; | |
4261 | ||
4262 | /* if there is an indices property, use it to transfer the index | |
4263 | * specified into an array offset for the clock-output-names property. | |
4264 | */ | |
4265 | of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { | |
4266 | if (index == pv) { | |
4267 | index = count; | |
4268 | break; | |
4269 | } | |
4270 | count++; | |
4271 | } | |
8da411cc MY |
4272 | /* We went off the end of 'clock-indices' without finding it */ |
4273 | if (prop && !vp) | |
4274 | return NULL; | |
7a0fc1a3 | 4275 | |
766e6a4e | 4276 | if (of_property_read_string_index(clkspec.np, "clock-output-names", |
7a0fc1a3 | 4277 | index, |
0a4807c2 SB |
4278 | &clk_name) < 0) { |
4279 | /* | |
4280 | * Best effort to get the name if the clock has been | |
4281 | * registered with the framework. If the clock isn't | |
4282 | * registered, we return the node name as the name of | |
4283 | * the clock as long as #clock-cells = 0. | |
4284 | */ | |
4285 | clk = of_clk_get_from_provider(&clkspec); | |
4286 | if (IS_ERR(clk)) { | |
4287 | if (clkspec.args_count == 0) | |
4288 | clk_name = clkspec.np->name; | |
4289 | else | |
4290 | clk_name = NULL; | |
4291 | } else { | |
4292 | clk_name = __clk_get_name(clk); | |
4293 | clk_put(clk); | |
4294 | } | |
4295 | } | |
4296 | ||
766e6a4e GL |
4297 | |
4298 | of_node_put(clkspec.np); | |
4299 | return clk_name; | |
4300 | } | |
4301 | EXPORT_SYMBOL_GPL(of_clk_get_parent_name); | |
4302 | ||
2e61dfb3 DN |
4303 | /** |
4304 | * of_clk_parent_fill() - Fill @parents with names of @np's parents and return | |
4305 | * number of parents | |
4306 | * @np: Device node pointer associated with clock provider | |
4307 | * @parents: pointer to char array that hold the parents' names | |
4308 | * @size: size of the @parents array | |
4309 | * | |
4310 | * Return: number of parents for the clock node. | |
4311 | */ | |
4312 | int of_clk_parent_fill(struct device_node *np, const char **parents, | |
4313 | unsigned int size) | |
4314 | { | |
4315 | unsigned int i = 0; | |
4316 | ||
4317 | while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) | |
4318 | i++; | |
4319 | ||
4320 | return i; | |
4321 | } | |
4322 | EXPORT_SYMBOL_GPL(of_clk_parent_fill); | |
4323 | ||
1771b10d | 4324 | struct clock_provider { |
a5970433 | 4325 | void (*clk_init_cb)(struct device_node *); |
1771b10d GC |
4326 | struct device_node *np; |
4327 | struct list_head node; | |
4328 | }; | |
4329 | ||
1771b10d GC |
4330 | /* |
4331 | * This function looks for a parent clock. If there is one, then it | |
4332 | * checks that the provider for this parent clock was initialized, in | |
4333 | * this case the parent clock will be ready. | |
4334 | */ | |
4335 | static int parent_ready(struct device_node *np) | |
4336 | { | |
4337 | int i = 0; | |
4338 | ||
4339 | while (true) { | |
4340 | struct clk *clk = of_clk_get(np, i); | |
4341 | ||
4342 | /* this parent is ready we can check the next one */ | |
4343 | if (!IS_ERR(clk)) { | |
4344 | clk_put(clk); | |
4345 | i++; | |
4346 | continue; | |
4347 | } | |
4348 | ||
4349 | /* at least one parent is not ready, we exit now */ | |
4350 | if (PTR_ERR(clk) == -EPROBE_DEFER) | |
4351 | return 0; | |
4352 | ||
4353 | /* | |
4354 | * Here we make assumption that the device tree is | |
4355 | * written correctly. So an error means that there is | |
4356 | * no more parent. As we didn't exit yet, then the | |
4357 | * previous parent are ready. If there is no clock | |
4358 | * parent, no need to wait for them, then we can | |
4359 | * consider their absence as being ready | |
4360 | */ | |
4361 | return 1; | |
4362 | } | |
4363 | } | |
4364 | ||
d56f8994 LJ |
4365 | /** |
4366 | * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree | |
4367 | * @np: Device node pointer associated with clock provider | |
4368 | * @index: clock index | |
f7ae7503 | 4369 | * @flags: pointer to top-level framework flags |
d56f8994 LJ |
4370 | * |
4371 | * Detects if the clock-critical property exists and, if so, sets the | |
4372 | * corresponding CLK_IS_CRITICAL flag. | |
4373 | * | |
4374 | * Do not use this function. It exists only for legacy Device Tree | |
4375 | * bindings, such as the one-clock-per-node style that are outdated. | |
4376 | * Those bindings typically put all clock data into .dts and the Linux | |
4377 | * driver has no clock data, thus making it impossible to set this flag | |
4378 | * correctly from the driver. Only those drivers may call | |
4379 | * of_clk_detect_critical from their setup functions. | |
4380 | * | |
4381 | * Return: error code or zero on success | |
4382 | */ | |
4383 | int of_clk_detect_critical(struct device_node *np, | |
4384 | int index, unsigned long *flags) | |
4385 | { | |
4386 | struct property *prop; | |
4387 | const __be32 *cur; | |
4388 | uint32_t idx; | |
4389 | ||
4390 | if (!np || !flags) | |
4391 | return -EINVAL; | |
4392 | ||
4393 | of_property_for_each_u32(np, "clock-critical", prop, cur, idx) | |
4394 | if (index == idx) | |
4395 | *flags |= CLK_IS_CRITICAL; | |
4396 | ||
4397 | return 0; | |
4398 | } | |
4399 | ||
766e6a4e GL |
4400 | /** |
4401 | * of_clk_init() - Scan and init clock providers from the DT | |
4402 | * @matches: array of compatible values and init functions for providers. | |
4403 | * | |
1771b10d | 4404 | * This function scans the device tree for matching clock providers |
e5ca8fb4 | 4405 | * and calls their initialization functions. It also does it by trying |
1771b10d | 4406 | * to follow the dependencies. |
766e6a4e GL |
4407 | */ |
4408 | void __init of_clk_init(const struct of_device_id *matches) | |
4409 | { | |
7f7ed584 | 4410 | const struct of_device_id *match; |
766e6a4e | 4411 | struct device_node *np; |
1771b10d GC |
4412 | struct clock_provider *clk_provider, *next; |
4413 | bool is_init_done; | |
4414 | bool force = false; | |
2573a02a | 4415 | LIST_HEAD(clk_provider_list); |
766e6a4e | 4416 | |
f2f6c255 | 4417 | if (!matches) |
819b4861 | 4418 | matches = &__clk_of_table; |
f2f6c255 | 4419 | |
1771b10d | 4420 | /* First prepare the list of the clocks providers */ |
7f7ed584 | 4421 | for_each_matching_node_and_match(np, matches, &match) { |
2e3b19f1 SB |
4422 | struct clock_provider *parent; |
4423 | ||
3e5dd6f6 GU |
4424 | if (!of_device_is_available(np)) |
4425 | continue; | |
4426 | ||
2e3b19f1 SB |
4427 | parent = kzalloc(sizeof(*parent), GFP_KERNEL); |
4428 | if (!parent) { | |
4429 | list_for_each_entry_safe(clk_provider, next, | |
4430 | &clk_provider_list, node) { | |
4431 | list_del(&clk_provider->node); | |
6bc9d9d6 | 4432 | of_node_put(clk_provider->np); |
2e3b19f1 SB |
4433 | kfree(clk_provider); |
4434 | } | |
6bc9d9d6 | 4435 | of_node_put(np); |
2e3b19f1 SB |
4436 | return; |
4437 | } | |
1771b10d GC |
4438 | |
4439 | parent->clk_init_cb = match->data; | |
6bc9d9d6 | 4440 | parent->np = of_node_get(np); |
3f6d439f | 4441 | list_add_tail(&parent->node, &clk_provider_list); |
1771b10d GC |
4442 | } |
4443 | ||
4444 | while (!list_empty(&clk_provider_list)) { | |
4445 | is_init_done = false; | |
4446 | list_for_each_entry_safe(clk_provider, next, | |
4447 | &clk_provider_list, node) { | |
4448 | if (force || parent_ready(clk_provider->np)) { | |
86be408b | 4449 | |
989eafd0 RRD |
4450 | /* Don't populate platform devices */ |
4451 | of_node_set_flag(clk_provider->np, | |
4452 | OF_POPULATED); | |
4453 | ||
1771b10d | 4454 | clk_provider->clk_init_cb(clk_provider->np); |
86be408b SN |
4455 | of_clk_set_defaults(clk_provider->np, true); |
4456 | ||
1771b10d | 4457 | list_del(&clk_provider->node); |
6bc9d9d6 | 4458 | of_node_put(clk_provider->np); |
1771b10d GC |
4459 | kfree(clk_provider); |
4460 | is_init_done = true; | |
4461 | } | |
4462 | } | |
4463 | ||
4464 | /* | |
e5ca8fb4 | 4465 | * We didn't manage to initialize any of the |
1771b10d GC |
4466 | * remaining providers during the last loop, so now we |
4467 | * initialize all the remaining ones unconditionally | |
4468 | * in case the clock parent was not mandatory | |
4469 | */ | |
4470 | if (!is_init_done) | |
4471 | force = true; | |
766e6a4e GL |
4472 | } |
4473 | } | |
4474 | #endif |