]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> | |
4 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> | |
5 | * | |
6 | * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst | |
7 | */ | |
8 | ||
9 | #include <linux/clk.h> | |
10 | #include <linux/clk-provider.h> | |
11 | #include <linux/clk/clk-conf.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/mutex.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/device.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/pm_runtime.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/clkdev.h> | |
24 | ||
25 | #include "clk.h" | |
26 | ||
27 | static DEFINE_SPINLOCK(enable_lock); | |
28 | static DEFINE_MUTEX(prepare_lock); | |
29 | ||
30 | static struct task_struct *prepare_owner; | |
31 | static struct task_struct *enable_owner; | |
32 | ||
33 | static int prepare_refcnt; | |
34 | static int enable_refcnt; | |
35 | ||
36 | static HLIST_HEAD(clk_root_list); | |
37 | static HLIST_HEAD(clk_orphan_list); | |
38 | static LIST_HEAD(clk_notifier_list); | |
39 | ||
40 | /*** private data structures ***/ | |
41 | ||
42 | struct clk_parent_map { | |
43 | const struct clk_hw *hw; | |
44 | struct clk_core *core; | |
45 | const char *fw_name; | |
46 | const char *name; | |
47 | int index; | |
48 | }; | |
49 | ||
50 | struct clk_core { | |
51 | const char *name; | |
52 | const struct clk_ops *ops; | |
53 | struct clk_hw *hw; | |
54 | struct module *owner; | |
55 | struct device *dev; | |
56 | struct device_node *of_node; | |
57 | struct clk_core *parent; | |
58 | struct clk_parent_map *parents; | |
59 | u8 num_parents; | |
60 | u8 new_parent_index; | |
61 | unsigned long rate; | |
62 | unsigned long req_rate; | |
63 | unsigned long new_rate; | |
64 | struct clk_core *new_parent; | |
65 | struct clk_core *new_child; | |
66 | unsigned long flags; | |
67 | bool orphan; | |
68 | bool rpm_enabled; | |
69 | unsigned int enable_count; | |
70 | unsigned int prepare_count; | |
71 | unsigned int protect_count; | |
72 | unsigned long min_rate; | |
73 | unsigned long max_rate; | |
74 | unsigned long accuracy; | |
75 | int phase; | |
76 | struct clk_duty duty; | |
77 | struct hlist_head children; | |
78 | struct hlist_node child_node; | |
79 | struct hlist_head clks; | |
80 | unsigned int notifier_count; | |
81 | #ifdef CONFIG_DEBUG_FS | |
82 | struct dentry *dentry; | |
83 | struct hlist_node debug_node; | |
84 | #endif | |
85 | struct kref ref; | |
86 | }; | |
87 | ||
88 | #define CREATE_TRACE_POINTS | |
89 | #include <trace/events/clk.h> | |
90 | ||
91 | struct clk { | |
92 | struct clk_core *core; | |
93 | struct device *dev; | |
94 | const char *dev_id; | |
95 | const char *con_id; | |
96 | unsigned long min_rate; | |
97 | unsigned long max_rate; | |
98 | unsigned int exclusive_count; | |
99 | struct hlist_node clks_node; | |
100 | }; | |
101 | ||
102 | /*** runtime pm ***/ | |
103 | static int clk_pm_runtime_get(struct clk_core *core) | |
104 | { | |
105 | int ret; | |
106 | ||
107 | if (!core->rpm_enabled) | |
108 | return 0; | |
109 | ||
110 | ret = pm_runtime_get_sync(core->dev); | |
111 | return ret < 0 ? ret : 0; | |
112 | } | |
113 | ||
114 | static void clk_pm_runtime_put(struct clk_core *core) | |
115 | { | |
116 | if (!core->rpm_enabled) | |
117 | return; | |
118 | ||
119 | pm_runtime_put_sync(core->dev); | |
120 | } | |
121 | ||
122 | /*** locking ***/ | |
123 | static void clk_prepare_lock(void) | |
124 | { | |
125 | if (!mutex_trylock(&prepare_lock)) { | |
126 | if (prepare_owner == current) { | |
127 | prepare_refcnt++; | |
128 | return; | |
129 | } | |
130 | mutex_lock(&prepare_lock); | |
131 | } | |
132 | WARN_ON_ONCE(prepare_owner != NULL); | |
133 | WARN_ON_ONCE(prepare_refcnt != 0); | |
134 | prepare_owner = current; | |
135 | prepare_refcnt = 1; | |
136 | } | |
137 | ||
138 | static void clk_prepare_unlock(void) | |
139 | { | |
140 | WARN_ON_ONCE(prepare_owner != current); | |
141 | WARN_ON_ONCE(prepare_refcnt == 0); | |
142 | ||
143 | if (--prepare_refcnt) | |
144 | return; | |
145 | prepare_owner = NULL; | |
146 | mutex_unlock(&prepare_lock); | |
147 | } | |
148 | ||
149 | static unsigned long clk_enable_lock(void) | |
150 | __acquires(enable_lock) | |
151 | { | |
152 | unsigned long flags; | |
153 | ||
154 | /* | |
155 | * On UP systems, spin_trylock_irqsave() always returns true, even if | |
156 | * we already hold the lock. So, in that case, we rely only on | |
157 | * reference counting. | |
158 | */ | |
159 | if (!IS_ENABLED(CONFIG_SMP) || | |
160 | !spin_trylock_irqsave(&enable_lock, flags)) { | |
161 | if (enable_owner == current) { | |
162 | enable_refcnt++; | |
163 | __acquire(enable_lock); | |
164 | if (!IS_ENABLED(CONFIG_SMP)) | |
165 | local_save_flags(flags); | |
166 | return flags; | |
167 | } | |
168 | spin_lock_irqsave(&enable_lock, flags); | |
169 | } | |
170 | WARN_ON_ONCE(enable_owner != NULL); | |
171 | WARN_ON_ONCE(enable_refcnt != 0); | |
172 | enable_owner = current; | |
173 | enable_refcnt = 1; | |
174 | return flags; | |
175 | } | |
176 | ||
177 | static void clk_enable_unlock(unsigned long flags) | |
178 | __releases(enable_lock) | |
179 | { | |
180 | WARN_ON_ONCE(enable_owner != current); | |
181 | WARN_ON_ONCE(enable_refcnt == 0); | |
182 | ||
183 | if (--enable_refcnt) { | |
184 | __release(enable_lock); | |
185 | return; | |
186 | } | |
187 | enable_owner = NULL; | |
188 | spin_unlock_irqrestore(&enable_lock, flags); | |
189 | } | |
190 | ||
191 | static bool clk_core_rate_is_protected(struct clk_core *core) | |
192 | { | |
193 | return core->protect_count; | |
194 | } | |
195 | ||
196 | static bool clk_core_is_prepared(struct clk_core *core) | |
197 | { | |
198 | bool ret = false; | |
199 | ||
200 | /* | |
201 | * .is_prepared is optional for clocks that can prepare | |
202 | * fall back to software usage counter if it is missing | |
203 | */ | |
204 | if (!core->ops->is_prepared) | |
205 | return core->prepare_count; | |
206 | ||
207 | if (!clk_pm_runtime_get(core)) { | |
208 | ret = core->ops->is_prepared(core->hw); | |
209 | clk_pm_runtime_put(core); | |
210 | } | |
211 | ||
212 | return ret; | |
213 | } | |
214 | ||
215 | static bool clk_core_is_enabled(struct clk_core *core) | |
216 | { | |
217 | bool ret = false; | |
218 | ||
219 | /* | |
220 | * .is_enabled is only mandatory for clocks that gate | |
221 | * fall back to software usage counter if .is_enabled is missing | |
222 | */ | |
223 | if (!core->ops->is_enabled) | |
224 | return core->enable_count; | |
225 | ||
226 | /* | |
227 | * Check if clock controller's device is runtime active before | |
228 | * calling .is_enabled callback. If not, assume that clock is | |
229 | * disabled, because we might be called from atomic context, from | |
230 | * which pm_runtime_get() is not allowed. | |
231 | * This function is called mainly from clk_disable_unused_subtree, | |
232 | * which ensures proper runtime pm activation of controller before | |
233 | * taking enable spinlock, but the below check is needed if one tries | |
234 | * to call it from other places. | |
235 | */ | |
236 | if (core->rpm_enabled) { | |
237 | pm_runtime_get_noresume(core->dev); | |
238 | if (!pm_runtime_active(core->dev)) { | |
239 | ret = false; | |
240 | goto done; | |
241 | } | |
242 | } | |
243 | ||
244 | ret = core->ops->is_enabled(core->hw); | |
245 | done: | |
246 | if (core->rpm_enabled) | |
247 | pm_runtime_put(core->dev); | |
248 | ||
249 | return ret; | |
250 | } | |
251 | ||
252 | /*** helper functions ***/ | |
253 | ||
254 | const char *__clk_get_name(const struct clk *clk) | |
255 | { | |
256 | return !clk ? NULL : clk->core->name; | |
257 | } | |
258 | EXPORT_SYMBOL_GPL(__clk_get_name); | |
259 | ||
260 | const char *clk_hw_get_name(const struct clk_hw *hw) | |
261 | { | |
262 | return hw->core->name; | |
263 | } | |
264 | EXPORT_SYMBOL_GPL(clk_hw_get_name); | |
265 | ||
266 | struct clk_hw *__clk_get_hw(struct clk *clk) | |
267 | { | |
268 | return !clk ? NULL : clk->core->hw; | |
269 | } | |
270 | EXPORT_SYMBOL_GPL(__clk_get_hw); | |
271 | ||
272 | unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) | |
273 | { | |
274 | return hw->core->num_parents; | |
275 | } | |
276 | EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); | |
277 | ||
278 | struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) | |
279 | { | |
280 | return hw->core->parent ? hw->core->parent->hw : NULL; | |
281 | } | |
282 | EXPORT_SYMBOL_GPL(clk_hw_get_parent); | |
283 | ||
284 | static struct clk_core *__clk_lookup_subtree(const char *name, | |
285 | struct clk_core *core) | |
286 | { | |
287 | struct clk_core *child; | |
288 | struct clk_core *ret; | |
289 | ||
290 | if (!strcmp(core->name, name)) | |
291 | return core; | |
292 | ||
293 | hlist_for_each_entry(child, &core->children, child_node) { | |
294 | ret = __clk_lookup_subtree(name, child); | |
295 | if (ret) | |
296 | return ret; | |
297 | } | |
298 | ||
299 | return NULL; | |
300 | } | |
301 | ||
302 | static struct clk_core *clk_core_lookup(const char *name) | |
303 | { | |
304 | struct clk_core *root_clk; | |
305 | struct clk_core *ret; | |
306 | ||
307 | if (!name) | |
308 | return NULL; | |
309 | ||
310 | /* search the 'proper' clk tree first */ | |
311 | hlist_for_each_entry(root_clk, &clk_root_list, child_node) { | |
312 | ret = __clk_lookup_subtree(name, root_clk); | |
313 | if (ret) | |
314 | return ret; | |
315 | } | |
316 | ||
317 | /* if not found, then search the orphan tree */ | |
318 | hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { | |
319 | ret = __clk_lookup_subtree(name, root_clk); | |
320 | if (ret) | |
321 | return ret; | |
322 | } | |
323 | ||
324 | return NULL; | |
325 | } | |
326 | ||
327 | #ifdef CONFIG_OF | |
328 | static int of_parse_clkspec(const struct device_node *np, int index, | |
329 | const char *name, struct of_phandle_args *out_args); | |
330 | static struct clk_hw * | |
331 | of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); | |
332 | #else | |
333 | static inline int of_parse_clkspec(const struct device_node *np, int index, | |
334 | const char *name, | |
335 | struct of_phandle_args *out_args) | |
336 | { | |
337 | return -ENOENT; | |
338 | } | |
339 | static inline struct clk_hw * | |
340 | of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) | |
341 | { | |
342 | return ERR_PTR(-ENOENT); | |
343 | } | |
344 | #endif | |
345 | ||
346 | /** | |
347 | * clk_core_get - Find the clk_core parent of a clk | |
348 | * @core: clk to find parent of | |
349 | * @p_index: parent index to search for | |
350 | * | |
351 | * This is the preferred method for clk providers to find the parent of a | |
352 | * clk when that parent is external to the clk controller. The parent_names | |
353 | * array is indexed and treated as a local name matching a string in the device | |
354 | * node's 'clock-names' property or as the 'con_id' matching the device's | |
355 | * dev_name() in a clk_lookup. This allows clk providers to use their own | |
356 | * namespace instead of looking for a globally unique parent string. | |
357 | * | |
358 | * For example the following DT snippet would allow a clock registered by the | |
359 | * clock-controller@c001 that has a clk_init_data::parent_data array | |
360 | * with 'xtal' in the 'name' member to find the clock provided by the | |
361 | * clock-controller@f00abcd without needing to get the globally unique name of | |
362 | * the xtal clk. | |
363 | * | |
364 | * parent: clock-controller@f00abcd { | |
365 | * reg = <0xf00abcd 0xabcd>; | |
366 | * #clock-cells = <0>; | |
367 | * }; | |
368 | * | |
369 | * clock-controller@c001 { | |
370 | * reg = <0xc001 0xf00d>; | |
371 | * clocks = <&parent>; | |
372 | * clock-names = "xtal"; | |
373 | * #clock-cells = <1>; | |
374 | * }; | |
375 | * | |
376 | * Returns: -ENOENT when the provider can't be found or the clk doesn't | |
377 | * exist in the provider or the name can't be found in the DT node or | |
378 | * in a clkdev lookup. NULL when the provider knows about the clk but it | |
379 | * isn't provided on this system. | |
380 | * A valid clk_core pointer when the clk can be found in the provider. | |
381 | */ | |
382 | static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) | |
383 | { | |
384 | const char *name = core->parents[p_index].fw_name; | |
385 | int index = core->parents[p_index].index; | |
386 | struct clk_hw *hw = ERR_PTR(-ENOENT); | |
387 | struct device *dev = core->dev; | |
388 | const char *dev_id = dev ? dev_name(dev) : NULL; | |
389 | struct device_node *np = core->of_node; | |
390 | struct of_phandle_args clkspec; | |
391 | ||
392 | if (np && (name || index >= 0) && | |
393 | !of_parse_clkspec(np, index, name, &clkspec)) { | |
394 | hw = of_clk_get_hw_from_clkspec(&clkspec); | |
395 | of_node_put(clkspec.np); | |
396 | } else if (name) { | |
397 | /* | |
398 | * If the DT search above couldn't find the provider fallback to | |
399 | * looking up via clkdev based clk_lookups. | |
400 | */ | |
401 | hw = clk_find_hw(dev_id, name); | |
402 | } | |
403 | ||
404 | if (IS_ERR(hw)) | |
405 | return ERR_CAST(hw); | |
406 | ||
407 | return hw->core; | |
408 | } | |
409 | ||
410 | static void clk_core_fill_parent_index(struct clk_core *core, u8 index) | |
411 | { | |
412 | struct clk_parent_map *entry = &core->parents[index]; | |
413 | struct clk_core *parent = ERR_PTR(-ENOENT); | |
414 | ||
415 | if (entry->hw) { | |
416 | parent = entry->hw->core; | |
417 | /* | |
418 | * We have a direct reference but it isn't registered yet? | |
419 | * Orphan it and let clk_reparent() update the orphan status | |
420 | * when the parent is registered. | |
421 | */ | |
422 | if (!parent) | |
423 | parent = ERR_PTR(-EPROBE_DEFER); | |
424 | } else { | |
425 | parent = clk_core_get(core, index); | |
426 | if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name) | |
427 | parent = clk_core_lookup(entry->name); | |
428 | } | |
429 | ||
430 | /* Only cache it if it's not an error */ | |
431 | if (!IS_ERR(parent)) | |
432 | entry->core = parent; | |
433 | } | |
434 | ||
435 | static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, | |
436 | u8 index) | |
437 | { | |
438 | if (!core || index >= core->num_parents || !core->parents) | |
439 | return NULL; | |
440 | ||
441 | if (!core->parents[index].core) | |
442 | clk_core_fill_parent_index(core, index); | |
443 | ||
444 | return core->parents[index].core; | |
445 | } | |
446 | ||
447 | struct clk_hw * | |
448 | clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) | |
449 | { | |
450 | struct clk_core *parent; | |
451 | ||
452 | parent = clk_core_get_parent_by_index(hw->core, index); | |
453 | ||
454 | return !parent ? NULL : parent->hw; | |
455 | } | |
456 | EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); | |
457 | ||
458 | unsigned int __clk_get_enable_count(struct clk *clk) | |
459 | { | |
460 | return !clk ? 0 : clk->core->enable_count; | |
461 | } | |
462 | ||
463 | static unsigned long clk_core_get_rate_nolock(struct clk_core *core) | |
464 | { | |
465 | if (!core) | |
466 | return 0; | |
467 | ||
468 | if (!core->num_parents || core->parent) | |
469 | return core->rate; | |
470 | ||
471 | /* | |
472 | * Clk must have a parent because num_parents > 0 but the parent isn't | |
473 | * known yet. Best to return 0 as the rate of this clk until we can | |
474 | * properly recalc the rate based on the parent's rate. | |
475 | */ | |
476 | return 0; | |
477 | } | |
478 | ||
479 | unsigned long clk_hw_get_rate(const struct clk_hw *hw) | |
480 | { | |
481 | return clk_core_get_rate_nolock(hw->core); | |
482 | } | |
483 | EXPORT_SYMBOL_GPL(clk_hw_get_rate); | |
484 | ||
485 | static unsigned long __clk_get_accuracy(struct clk_core *core) | |
486 | { | |
487 | if (!core) | |
488 | return 0; | |
489 | ||
490 | return core->accuracy; | |
491 | } | |
492 | ||
493 | unsigned long __clk_get_flags(struct clk *clk) | |
494 | { | |
495 | return !clk ? 0 : clk->core->flags; | |
496 | } | |
497 | EXPORT_SYMBOL_GPL(__clk_get_flags); | |
498 | ||
499 | unsigned long clk_hw_get_flags(const struct clk_hw *hw) | |
500 | { | |
501 | return hw->core->flags; | |
502 | } | |
503 | EXPORT_SYMBOL_GPL(clk_hw_get_flags); | |
504 | ||
505 | bool clk_hw_is_prepared(const struct clk_hw *hw) | |
506 | { | |
507 | return clk_core_is_prepared(hw->core); | |
508 | } | |
509 | EXPORT_SYMBOL_GPL(clk_hw_is_prepared); | |
510 | ||
511 | bool clk_hw_rate_is_protected(const struct clk_hw *hw) | |
512 | { | |
513 | return clk_core_rate_is_protected(hw->core); | |
514 | } | |
515 | EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); | |
516 | ||
517 | bool clk_hw_is_enabled(const struct clk_hw *hw) | |
518 | { | |
519 | return clk_core_is_enabled(hw->core); | |
520 | } | |
521 | EXPORT_SYMBOL_GPL(clk_hw_is_enabled); | |
522 | ||
523 | bool __clk_is_enabled(struct clk *clk) | |
524 | { | |
525 | if (!clk) | |
526 | return false; | |
527 | ||
528 | return clk_core_is_enabled(clk->core); | |
529 | } | |
530 | EXPORT_SYMBOL_GPL(__clk_is_enabled); | |
531 | ||
532 | static bool mux_is_better_rate(unsigned long rate, unsigned long now, | |
533 | unsigned long best, unsigned long flags) | |
534 | { | |
535 | if (flags & CLK_MUX_ROUND_CLOSEST) | |
536 | return abs(now - rate) < abs(best - rate); | |
537 | ||
538 | return now <= rate && now > best; | |
539 | } | |
540 | ||
541 | int clk_mux_determine_rate_flags(struct clk_hw *hw, | |
542 | struct clk_rate_request *req, | |
543 | unsigned long flags) | |
544 | { | |
545 | struct clk_core *core = hw->core, *parent, *best_parent = NULL; | |
546 | int i, num_parents, ret; | |
547 | unsigned long best = 0; | |
548 | struct clk_rate_request parent_req = *req; | |
549 | ||
550 | /* if NO_REPARENT flag set, pass through to current parent */ | |
551 | if (core->flags & CLK_SET_RATE_NO_REPARENT) { | |
552 | parent = core->parent; | |
553 | if (core->flags & CLK_SET_RATE_PARENT) { | |
554 | ret = __clk_determine_rate(parent ? parent->hw : NULL, | |
555 | &parent_req); | |
556 | if (ret) | |
557 | return ret; | |
558 | ||
559 | best = parent_req.rate; | |
560 | } else if (parent) { | |
561 | best = clk_core_get_rate_nolock(parent); | |
562 | } else { | |
563 | best = clk_core_get_rate_nolock(core); | |
564 | } | |
565 | ||
566 | goto out; | |
567 | } | |
568 | ||
569 | /* find the parent that can provide the fastest rate <= rate */ | |
570 | num_parents = core->num_parents; | |
571 | for (i = 0; i < num_parents; i++) { | |
572 | parent = clk_core_get_parent_by_index(core, i); | |
573 | if (!parent) | |
574 | continue; | |
575 | ||
576 | if (core->flags & CLK_SET_RATE_PARENT) { | |
577 | parent_req = *req; | |
578 | ret = __clk_determine_rate(parent->hw, &parent_req); | |
579 | if (ret) | |
580 | continue; | |
581 | } else { | |
582 | parent_req.rate = clk_core_get_rate_nolock(parent); | |
583 | } | |
584 | ||
585 | if (mux_is_better_rate(req->rate, parent_req.rate, | |
586 | best, flags)) { | |
587 | best_parent = parent; | |
588 | best = parent_req.rate; | |
589 | } | |
590 | } | |
591 | ||
592 | if (!best_parent) | |
593 | return -EINVAL; | |
594 | ||
595 | out: | |
596 | if (best_parent) | |
597 | req->best_parent_hw = best_parent->hw; | |
598 | req->best_parent_rate = best; | |
599 | req->rate = best; | |
600 | ||
601 | return 0; | |
602 | } | |
603 | EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); | |
604 | ||
605 | struct clk *__clk_lookup(const char *name) | |
606 | { | |
607 | struct clk_core *core = clk_core_lookup(name); | |
608 | ||
609 | return !core ? NULL : core->hw->clk; | |
610 | } | |
611 | ||
612 | static void clk_core_get_boundaries(struct clk_core *core, | |
613 | unsigned long *min_rate, | |
614 | unsigned long *max_rate) | |
615 | { | |
616 | struct clk *clk_user; | |
617 | ||
618 | *min_rate = core->min_rate; | |
619 | *max_rate = core->max_rate; | |
620 | ||
621 | hlist_for_each_entry(clk_user, &core->clks, clks_node) | |
622 | *min_rate = max(*min_rate, clk_user->min_rate); | |
623 | ||
624 | hlist_for_each_entry(clk_user, &core->clks, clks_node) | |
625 | *max_rate = min(*max_rate, clk_user->max_rate); | |
626 | } | |
627 | ||
628 | void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, | |
629 | unsigned long max_rate) | |
630 | { | |
631 | hw->core->min_rate = min_rate; | |
632 | hw->core->max_rate = max_rate; | |
633 | } | |
634 | EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); | |
635 | ||
636 | /* | |
637 | * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk | |
638 | * @hw: mux type clk to determine rate on | |
639 | * @req: rate request, also used to return preferred parent and frequencies | |
640 | * | |
641 | * Helper for finding best parent to provide a given frequency. This can be used | |
642 | * directly as a determine_rate callback (e.g. for a mux), or from a more | |
643 | * complex clock that may combine a mux with other operations. | |
644 | * | |
645 | * Returns: 0 on success, -EERROR value on error | |
646 | */ | |
647 | int __clk_mux_determine_rate(struct clk_hw *hw, | |
648 | struct clk_rate_request *req) | |
649 | { | |
650 | return clk_mux_determine_rate_flags(hw, req, 0); | |
651 | } | |
652 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); | |
653 | ||
654 | int __clk_mux_determine_rate_closest(struct clk_hw *hw, | |
655 | struct clk_rate_request *req) | |
656 | { | |
657 | return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); | |
658 | } | |
659 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); | |
660 | ||
661 | /*** clk api ***/ | |
662 | ||
663 | static void clk_core_rate_unprotect(struct clk_core *core) | |
664 | { | |
665 | lockdep_assert_held(&prepare_lock); | |
666 | ||
667 | if (!core) | |
668 | return; | |
669 | ||
670 | if (WARN(core->protect_count == 0, | |
671 | "%s already unprotected\n", core->name)) | |
672 | return; | |
673 | ||
674 | if (--core->protect_count > 0) | |
675 | return; | |
676 | ||
677 | clk_core_rate_unprotect(core->parent); | |
678 | } | |
679 | ||
680 | static int clk_core_rate_nuke_protect(struct clk_core *core) | |
681 | { | |
682 | int ret; | |
683 | ||
684 | lockdep_assert_held(&prepare_lock); | |
685 | ||
686 | if (!core) | |
687 | return -EINVAL; | |
688 | ||
689 | if (core->protect_count == 0) | |
690 | return 0; | |
691 | ||
692 | ret = core->protect_count; | |
693 | core->protect_count = 1; | |
694 | clk_core_rate_unprotect(core); | |
695 | ||
696 | return ret; | |
697 | } | |
698 | ||
699 | /** | |
700 | * clk_rate_exclusive_put - release exclusivity over clock rate control | |
701 | * @clk: the clk over which the exclusivity is released | |
702 | * | |
703 | * clk_rate_exclusive_put() completes a critical section during which a clock | |
704 | * consumer cannot tolerate any other consumer making any operation on the | |
705 | * clock which could result in a rate change or rate glitch. Exclusive clocks | |
706 | * cannot have their rate changed, either directly or indirectly due to changes | |
707 | * further up the parent chain of clocks. As a result, clocks up parent chain | |
708 | * also get under exclusive control of the calling consumer. | |
709 | * | |
710 | * If exlusivity is claimed more than once on clock, even by the same consumer, | |
711 | * the rate effectively gets locked as exclusivity can't be preempted. | |
712 | * | |
713 | * Calls to clk_rate_exclusive_put() must be balanced with calls to | |
714 | * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return | |
715 | * error status. | |
716 | */ | |
717 | void clk_rate_exclusive_put(struct clk *clk) | |
718 | { | |
719 | if (!clk) | |
720 | return; | |
721 | ||
722 | clk_prepare_lock(); | |
723 | ||
724 | /* | |
725 | * if there is something wrong with this consumer protect count, stop | |
726 | * here before messing with the provider | |
727 | */ | |
728 | if (WARN_ON(clk->exclusive_count <= 0)) | |
729 | goto out; | |
730 | ||
731 | clk_core_rate_unprotect(clk->core); | |
732 | clk->exclusive_count--; | |
733 | out: | |
734 | clk_prepare_unlock(); | |
735 | } | |
736 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); | |
737 | ||
738 | static void clk_core_rate_protect(struct clk_core *core) | |
739 | { | |
740 | lockdep_assert_held(&prepare_lock); | |
741 | ||
742 | if (!core) | |
743 | return; | |
744 | ||
745 | if (core->protect_count == 0) | |
746 | clk_core_rate_protect(core->parent); | |
747 | ||
748 | core->protect_count++; | |
749 | } | |
750 | ||
751 | static void clk_core_rate_restore_protect(struct clk_core *core, int count) | |
752 | { | |
753 | lockdep_assert_held(&prepare_lock); | |
754 | ||
755 | if (!core) | |
756 | return; | |
757 | ||
758 | if (count == 0) | |
759 | return; | |
760 | ||
761 | clk_core_rate_protect(core); | |
762 | core->protect_count = count; | |
763 | } | |
764 | ||
765 | /** | |
766 | * clk_rate_exclusive_get - get exclusivity over the clk rate control | |
767 | * @clk: the clk over which the exclusity of rate control is requested | |
768 | * | |
769 | * clk_rate_exlusive_get() begins a critical section during which a clock | |
770 | * consumer cannot tolerate any other consumer making any operation on the | |
771 | * clock which could result in a rate change or rate glitch. Exclusive clocks | |
772 | * cannot have their rate changed, either directly or indirectly due to changes | |
773 | * further up the parent chain of clocks. As a result, clocks up parent chain | |
774 | * also get under exclusive control of the calling consumer. | |
775 | * | |
776 | * If exlusivity is claimed more than once on clock, even by the same consumer, | |
777 | * the rate effectively gets locked as exclusivity can't be preempted. | |
778 | * | |
779 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | |
780 | * clk_rate_exclusive_put(). Calls to this function may sleep. | |
781 | * Returns 0 on success, -EERROR otherwise | |
782 | */ | |
783 | int clk_rate_exclusive_get(struct clk *clk) | |
784 | { | |
785 | if (!clk) | |
786 | return 0; | |
787 | ||
788 | clk_prepare_lock(); | |
789 | clk_core_rate_protect(clk->core); | |
790 | clk->exclusive_count++; | |
791 | clk_prepare_unlock(); | |
792 | ||
793 | return 0; | |
794 | } | |
795 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); | |
796 | ||
797 | static void clk_core_unprepare(struct clk_core *core) | |
798 | { | |
799 | lockdep_assert_held(&prepare_lock); | |
800 | ||
801 | if (!core) | |
802 | return; | |
803 | ||
804 | if (WARN(core->prepare_count == 0, | |
805 | "%s already unprepared\n", core->name)) | |
806 | return; | |
807 | ||
808 | if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, | |
809 | "Unpreparing critical %s\n", core->name)) | |
810 | return; | |
811 | ||
812 | if (core->flags & CLK_SET_RATE_GATE) | |
813 | clk_core_rate_unprotect(core); | |
814 | ||
815 | if (--core->prepare_count > 0) | |
816 | return; | |
817 | ||
818 | WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); | |
819 | ||
820 | trace_clk_unprepare(core); | |
821 | ||
822 | if (core->ops->unprepare) | |
823 | core->ops->unprepare(core->hw); | |
824 | ||
825 | clk_pm_runtime_put(core); | |
826 | ||
827 | trace_clk_unprepare_complete(core); | |
828 | clk_core_unprepare(core->parent); | |
829 | } | |
830 | ||
831 | static void clk_core_unprepare_lock(struct clk_core *core) | |
832 | { | |
833 | clk_prepare_lock(); | |
834 | clk_core_unprepare(core); | |
835 | clk_prepare_unlock(); | |
836 | } | |
837 | ||
838 | /** | |
839 | * clk_unprepare - undo preparation of a clock source | |
840 | * @clk: the clk being unprepared | |
841 | * | |
842 | * clk_unprepare may sleep, which differentiates it from clk_disable. In a | |
843 | * simple case, clk_unprepare can be used instead of clk_disable to gate a clk | |
844 | * if the operation may sleep. One example is a clk which is accessed over | |
845 | * I2c. In the complex case a clk gate operation may require a fast and a slow | |
846 | * part. It is this reason that clk_unprepare and clk_disable are not mutually | |
847 | * exclusive. In fact clk_disable must be called before clk_unprepare. | |
848 | */ | |
849 | void clk_unprepare(struct clk *clk) | |
850 | { | |
851 | if (IS_ERR_OR_NULL(clk)) | |
852 | return; | |
853 | ||
854 | clk_core_unprepare_lock(clk->core); | |
855 | } | |
856 | EXPORT_SYMBOL_GPL(clk_unprepare); | |
857 | ||
858 | static int clk_core_prepare(struct clk_core *core) | |
859 | { | |
860 | int ret = 0; | |
861 | ||
862 | lockdep_assert_held(&prepare_lock); | |
863 | ||
864 | if (!core) | |
865 | return 0; | |
866 | ||
867 | if (core->prepare_count == 0) { | |
868 | ret = clk_pm_runtime_get(core); | |
869 | if (ret) | |
870 | return ret; | |
871 | ||
872 | ret = clk_core_prepare(core->parent); | |
873 | if (ret) | |
874 | goto runtime_put; | |
875 | ||
876 | trace_clk_prepare(core); | |
877 | ||
878 | if (core->ops->prepare) | |
879 | ret = core->ops->prepare(core->hw); | |
880 | ||
881 | trace_clk_prepare_complete(core); | |
882 | ||
883 | if (ret) | |
884 | goto unprepare; | |
885 | } | |
886 | ||
887 | core->prepare_count++; | |
888 | ||
889 | /* | |
890 | * CLK_SET_RATE_GATE is a special case of clock protection | |
891 | * Instead of a consumer claiming exclusive rate control, it is | |
892 | * actually the provider which prevents any consumer from making any | |
893 | * operation which could result in a rate change or rate glitch while | |
894 | * the clock is prepared. | |
895 | */ | |
896 | if (core->flags & CLK_SET_RATE_GATE) | |
897 | clk_core_rate_protect(core); | |
898 | ||
899 | return 0; | |
900 | unprepare: | |
901 | clk_core_unprepare(core->parent); | |
902 | runtime_put: | |
903 | clk_pm_runtime_put(core); | |
904 | return ret; | |
905 | } | |
906 | ||
907 | static int clk_core_prepare_lock(struct clk_core *core) | |
908 | { | |
909 | int ret; | |
910 | ||
911 | clk_prepare_lock(); | |
912 | ret = clk_core_prepare(core); | |
913 | clk_prepare_unlock(); | |
914 | ||
915 | return ret; | |
916 | } | |
917 | ||
918 | /** | |
919 | * clk_prepare - prepare a clock source | |
920 | * @clk: the clk being prepared | |
921 | * | |
922 | * clk_prepare may sleep, which differentiates it from clk_enable. In a simple | |
923 | * case, clk_prepare can be used instead of clk_enable to ungate a clk if the | |
924 | * operation may sleep. One example is a clk which is accessed over I2c. In | |
925 | * the complex case a clk ungate operation may require a fast and a slow part. | |
926 | * It is this reason that clk_prepare and clk_enable are not mutually | |
927 | * exclusive. In fact clk_prepare must be called before clk_enable. | |
928 | * Returns 0 on success, -EERROR otherwise. | |
929 | */ | |
930 | int clk_prepare(struct clk *clk) | |
931 | { | |
932 | if (!clk) | |
933 | return 0; | |
934 | ||
935 | return clk_core_prepare_lock(clk->core); | |
936 | } | |
937 | EXPORT_SYMBOL_GPL(clk_prepare); | |
938 | ||
939 | static void clk_core_disable(struct clk_core *core) | |
940 | { | |
941 | lockdep_assert_held(&enable_lock); | |
942 | ||
943 | if (!core) | |
944 | return; | |
945 | ||
946 | if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) | |
947 | return; | |
948 | ||
949 | if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, | |
950 | "Disabling critical %s\n", core->name)) | |
951 | return; | |
952 | ||
953 | if (--core->enable_count > 0) | |
954 | return; | |
955 | ||
956 | trace_clk_disable_rcuidle(core); | |
957 | ||
958 | if (core->ops->disable) | |
959 | core->ops->disable(core->hw); | |
960 | ||
961 | trace_clk_disable_complete_rcuidle(core); | |
962 | ||
963 | clk_core_disable(core->parent); | |
964 | } | |
965 | ||
966 | static void clk_core_disable_lock(struct clk_core *core) | |
967 | { | |
968 | unsigned long flags; | |
969 | ||
970 | flags = clk_enable_lock(); | |
971 | clk_core_disable(core); | |
972 | clk_enable_unlock(flags); | |
973 | } | |
974 | ||
975 | /** | |
976 | * clk_disable - gate a clock | |
977 | * @clk: the clk being gated | |
978 | * | |
979 | * clk_disable must not sleep, which differentiates it from clk_unprepare. In | |
980 | * a simple case, clk_disable can be used instead of clk_unprepare to gate a | |
981 | * clk if the operation is fast and will never sleep. One example is a | |
982 | * SoC-internal clk which is controlled via simple register writes. In the | |
983 | * complex case a clk gate operation may require a fast and a slow part. It is | |
984 | * this reason that clk_unprepare and clk_disable are not mutually exclusive. | |
985 | * In fact clk_disable must be called before clk_unprepare. | |
986 | */ | |
987 | void clk_disable(struct clk *clk) | |
988 | { | |
989 | if (IS_ERR_OR_NULL(clk)) | |
990 | return; | |
991 | ||
992 | clk_core_disable_lock(clk->core); | |
993 | } | |
994 | EXPORT_SYMBOL_GPL(clk_disable); | |
995 | ||
996 | static int clk_core_enable(struct clk_core *core) | |
997 | { | |
998 | int ret = 0; | |
999 | ||
1000 | lockdep_assert_held(&enable_lock); | |
1001 | ||
1002 | if (!core) | |
1003 | return 0; | |
1004 | ||
1005 | if (WARN(core->prepare_count == 0, | |
1006 | "Enabling unprepared %s\n", core->name)) | |
1007 | return -ESHUTDOWN; | |
1008 | ||
1009 | if (core->enable_count == 0) { | |
1010 | ret = clk_core_enable(core->parent); | |
1011 | ||
1012 | if (ret) | |
1013 | return ret; | |
1014 | ||
1015 | trace_clk_enable_rcuidle(core); | |
1016 | ||
1017 | if (core->ops->enable) | |
1018 | ret = core->ops->enable(core->hw); | |
1019 | ||
1020 | trace_clk_enable_complete_rcuidle(core); | |
1021 | ||
1022 | if (ret) { | |
1023 | clk_core_disable(core->parent); | |
1024 | return ret; | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | core->enable_count++; | |
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | static int clk_core_enable_lock(struct clk_core *core) | |
1033 | { | |
1034 | unsigned long flags; | |
1035 | int ret; | |
1036 | ||
1037 | flags = clk_enable_lock(); | |
1038 | ret = clk_core_enable(core); | |
1039 | clk_enable_unlock(flags); | |
1040 | ||
1041 | return ret; | |
1042 | } | |
1043 | ||
1044 | /** | |
1045 | * clk_gate_restore_context - restore context for poweroff | |
1046 | * @hw: the clk_hw pointer of clock whose state is to be restored | |
1047 | * | |
1048 | * The clock gate restore context function enables or disables | |
1049 | * the gate clocks based on the enable_count. This is done in cases | |
1050 | * where the clock context is lost and based on the enable_count | |
1051 | * the clock either needs to be enabled/disabled. This | |
1052 | * helps restore the state of gate clocks. | |
1053 | */ | |
1054 | void clk_gate_restore_context(struct clk_hw *hw) | |
1055 | { | |
1056 | struct clk_core *core = hw->core; | |
1057 | ||
1058 | if (core->enable_count) | |
1059 | core->ops->enable(hw); | |
1060 | else | |
1061 | core->ops->disable(hw); | |
1062 | } | |
1063 | EXPORT_SYMBOL_GPL(clk_gate_restore_context); | |
1064 | ||
1065 | static int clk_core_save_context(struct clk_core *core) | |
1066 | { | |
1067 | struct clk_core *child; | |
1068 | int ret = 0; | |
1069 | ||
1070 | hlist_for_each_entry(child, &core->children, child_node) { | |
1071 | ret = clk_core_save_context(child); | |
1072 | if (ret < 0) | |
1073 | return ret; | |
1074 | } | |
1075 | ||
1076 | if (core->ops && core->ops->save_context) | |
1077 | ret = core->ops->save_context(core->hw); | |
1078 | ||
1079 | return ret; | |
1080 | } | |
1081 | ||
1082 | static void clk_core_restore_context(struct clk_core *core) | |
1083 | { | |
1084 | struct clk_core *child; | |
1085 | ||
1086 | if (core->ops && core->ops->restore_context) | |
1087 | core->ops->restore_context(core->hw); | |
1088 | ||
1089 | hlist_for_each_entry(child, &core->children, child_node) | |
1090 | clk_core_restore_context(child); | |
1091 | } | |
1092 | ||
1093 | /** | |
1094 | * clk_save_context - save clock context for poweroff | |
1095 | * | |
1096 | * Saves the context of the clock register for powerstates in which the | |
1097 | * contents of the registers will be lost. Occurs deep within the suspend | |
1098 | * code. Returns 0 on success. | |
1099 | */ | |
1100 | int clk_save_context(void) | |
1101 | { | |
1102 | struct clk_core *clk; | |
1103 | int ret; | |
1104 | ||
1105 | hlist_for_each_entry(clk, &clk_root_list, child_node) { | |
1106 | ret = clk_core_save_context(clk); | |
1107 | if (ret < 0) | |
1108 | return ret; | |
1109 | } | |
1110 | ||
1111 | hlist_for_each_entry(clk, &clk_orphan_list, child_node) { | |
1112 | ret = clk_core_save_context(clk); | |
1113 | if (ret < 0) | |
1114 | return ret; | |
1115 | } | |
1116 | ||
1117 | return 0; | |
1118 | } | |
1119 | EXPORT_SYMBOL_GPL(clk_save_context); | |
1120 | ||
1121 | /** | |
1122 | * clk_restore_context - restore clock context after poweroff | |
1123 | * | |
1124 | * Restore the saved clock context upon resume. | |
1125 | * | |
1126 | */ | |
1127 | void clk_restore_context(void) | |
1128 | { | |
1129 | struct clk_core *core; | |
1130 | ||
1131 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1132 | clk_core_restore_context(core); | |
1133 | ||
1134 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1135 | clk_core_restore_context(core); | |
1136 | } | |
1137 | EXPORT_SYMBOL_GPL(clk_restore_context); | |
1138 | ||
1139 | /** | |
1140 | * clk_enable - ungate a clock | |
1141 | * @clk: the clk being ungated | |
1142 | * | |
1143 | * clk_enable must not sleep, which differentiates it from clk_prepare. In a | |
1144 | * simple case, clk_enable can be used instead of clk_prepare to ungate a clk | |
1145 | * if the operation will never sleep. One example is a SoC-internal clk which | |
1146 | * is controlled via simple register writes. In the complex case a clk ungate | |
1147 | * operation may require a fast and a slow part. It is this reason that | |
1148 | * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare | |
1149 | * must be called before clk_enable. Returns 0 on success, -EERROR | |
1150 | * otherwise. | |
1151 | */ | |
1152 | int clk_enable(struct clk *clk) | |
1153 | { | |
1154 | if (!clk) | |
1155 | return 0; | |
1156 | ||
1157 | return clk_core_enable_lock(clk->core); | |
1158 | } | |
1159 | EXPORT_SYMBOL_GPL(clk_enable); | |
1160 | ||
1161 | static int clk_core_prepare_enable(struct clk_core *core) | |
1162 | { | |
1163 | int ret; | |
1164 | ||
1165 | ret = clk_core_prepare_lock(core); | |
1166 | if (ret) | |
1167 | return ret; | |
1168 | ||
1169 | ret = clk_core_enable_lock(core); | |
1170 | if (ret) | |
1171 | clk_core_unprepare_lock(core); | |
1172 | ||
1173 | return ret; | |
1174 | } | |
1175 | ||
1176 | static void clk_core_disable_unprepare(struct clk_core *core) | |
1177 | { | |
1178 | clk_core_disable_lock(core); | |
1179 | clk_core_unprepare_lock(core); | |
1180 | } | |
1181 | ||
1182 | static void clk_unprepare_unused_subtree(struct clk_core *core) | |
1183 | { | |
1184 | struct clk_core *child; | |
1185 | ||
1186 | lockdep_assert_held(&prepare_lock); | |
1187 | ||
1188 | hlist_for_each_entry(child, &core->children, child_node) | |
1189 | clk_unprepare_unused_subtree(child); | |
1190 | ||
1191 | if (core->prepare_count) | |
1192 | return; | |
1193 | ||
1194 | if (core->flags & CLK_IGNORE_UNUSED) | |
1195 | return; | |
1196 | ||
1197 | if (clk_pm_runtime_get(core)) | |
1198 | return; | |
1199 | ||
1200 | if (clk_core_is_prepared(core)) { | |
1201 | trace_clk_unprepare(core); | |
1202 | if (core->ops->unprepare_unused) | |
1203 | core->ops->unprepare_unused(core->hw); | |
1204 | else if (core->ops->unprepare) | |
1205 | core->ops->unprepare(core->hw); | |
1206 | trace_clk_unprepare_complete(core); | |
1207 | } | |
1208 | ||
1209 | clk_pm_runtime_put(core); | |
1210 | } | |
1211 | ||
1212 | static void clk_disable_unused_subtree(struct clk_core *core) | |
1213 | { | |
1214 | struct clk_core *child; | |
1215 | unsigned long flags; | |
1216 | ||
1217 | lockdep_assert_held(&prepare_lock); | |
1218 | ||
1219 | hlist_for_each_entry(child, &core->children, child_node) | |
1220 | clk_disable_unused_subtree(child); | |
1221 | ||
1222 | if (core->flags & CLK_OPS_PARENT_ENABLE) | |
1223 | clk_core_prepare_enable(core->parent); | |
1224 | ||
1225 | if (clk_pm_runtime_get(core)) | |
1226 | goto unprepare_out; | |
1227 | ||
1228 | flags = clk_enable_lock(); | |
1229 | ||
1230 | if (core->enable_count) | |
1231 | goto unlock_out; | |
1232 | ||
1233 | if (core->flags & CLK_IGNORE_UNUSED) | |
1234 | goto unlock_out; | |
1235 | ||
1236 | /* | |
1237 | * some gate clocks have special needs during the disable-unused | |
1238 | * sequence. call .disable_unused if available, otherwise fall | |
1239 | * back to .disable | |
1240 | */ | |
1241 | if (clk_core_is_enabled(core)) { | |
1242 | trace_clk_disable(core); | |
1243 | if (core->ops->disable_unused) | |
1244 | core->ops->disable_unused(core->hw); | |
1245 | else if (core->ops->disable) | |
1246 | core->ops->disable(core->hw); | |
1247 | trace_clk_disable_complete(core); | |
1248 | } | |
1249 | ||
1250 | unlock_out: | |
1251 | clk_enable_unlock(flags); | |
1252 | clk_pm_runtime_put(core); | |
1253 | unprepare_out: | |
1254 | if (core->flags & CLK_OPS_PARENT_ENABLE) | |
1255 | clk_core_disable_unprepare(core->parent); | |
1256 | } | |
1257 | ||
1258 | static bool clk_ignore_unused; | |
1259 | static int __init clk_ignore_unused_setup(char *__unused) | |
1260 | { | |
1261 | clk_ignore_unused = true; | |
1262 | return 1; | |
1263 | } | |
1264 | __setup("clk_ignore_unused", clk_ignore_unused_setup); | |
1265 | ||
1266 | static int clk_disable_unused(void) | |
1267 | { | |
1268 | struct clk_core *core; | |
1269 | ||
1270 | if (clk_ignore_unused) { | |
1271 | pr_warn("clk: Not disabling unused clocks\n"); | |
1272 | return 0; | |
1273 | } | |
1274 | ||
1275 | clk_prepare_lock(); | |
1276 | ||
1277 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1278 | clk_disable_unused_subtree(core); | |
1279 | ||
1280 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1281 | clk_disable_unused_subtree(core); | |
1282 | ||
1283 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1284 | clk_unprepare_unused_subtree(core); | |
1285 | ||
1286 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1287 | clk_unprepare_unused_subtree(core); | |
1288 | ||
1289 | clk_prepare_unlock(); | |
1290 | ||
1291 | return 0; | |
1292 | } | |
1293 | late_initcall_sync(clk_disable_unused); | |
1294 | ||
1295 | static int clk_core_determine_round_nolock(struct clk_core *core, | |
1296 | struct clk_rate_request *req) | |
1297 | { | |
1298 | long rate; | |
1299 | ||
1300 | lockdep_assert_held(&prepare_lock); | |
1301 | ||
1302 | if (!core) | |
1303 | return 0; | |
1304 | ||
1305 | /* | |
1306 | * At this point, core protection will be disabled if | |
1307 | * - if the provider is not protected at all | |
1308 | * - if the calling consumer is the only one which has exclusivity | |
1309 | * over the provider | |
1310 | */ | |
1311 | if (clk_core_rate_is_protected(core)) { | |
1312 | req->rate = core->rate; | |
1313 | } else if (core->ops->determine_rate) { | |
1314 | return core->ops->determine_rate(core->hw, req); | |
1315 | } else if (core->ops->round_rate) { | |
1316 | rate = core->ops->round_rate(core->hw, req->rate, | |
1317 | &req->best_parent_rate); | |
1318 | if (rate < 0) | |
1319 | return rate; | |
1320 | ||
1321 | req->rate = rate; | |
1322 | } else { | |
1323 | return -EINVAL; | |
1324 | } | |
1325 | ||
1326 | return 0; | |
1327 | } | |
1328 | ||
1329 | static void clk_core_init_rate_req(struct clk_core * const core, | |
1330 | struct clk_rate_request *req) | |
1331 | { | |
1332 | struct clk_core *parent; | |
1333 | ||
1334 | if (WARN_ON(!core || !req)) | |
1335 | return; | |
1336 | ||
1337 | parent = core->parent; | |
1338 | if (parent) { | |
1339 | req->best_parent_hw = parent->hw; | |
1340 | req->best_parent_rate = parent->rate; | |
1341 | } else { | |
1342 | req->best_parent_hw = NULL; | |
1343 | req->best_parent_rate = 0; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | static bool clk_core_can_round(struct clk_core * const core) | |
1348 | { | |
1349 | return core->ops->determine_rate || core->ops->round_rate; | |
1350 | } | |
1351 | ||
1352 | static int clk_core_round_rate_nolock(struct clk_core *core, | |
1353 | struct clk_rate_request *req) | |
1354 | { | |
1355 | lockdep_assert_held(&prepare_lock); | |
1356 | ||
1357 | if (!core) { | |
1358 | req->rate = 0; | |
1359 | return 0; | |
1360 | } | |
1361 | ||
1362 | clk_core_init_rate_req(core, req); | |
1363 | ||
1364 | if (clk_core_can_round(core)) | |
1365 | return clk_core_determine_round_nolock(core, req); | |
1366 | else if (core->flags & CLK_SET_RATE_PARENT) | |
1367 | return clk_core_round_rate_nolock(core->parent, req); | |
1368 | ||
1369 | req->rate = core->rate; | |
1370 | return 0; | |
1371 | } | |
1372 | ||
1373 | /** | |
1374 | * __clk_determine_rate - get the closest rate actually supported by a clock | |
1375 | * @hw: determine the rate of this clock | |
1376 | * @req: target rate request | |
1377 | * | |
1378 | * Useful for clk_ops such as .set_rate and .determine_rate. | |
1379 | */ | |
1380 | int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) | |
1381 | { | |
1382 | if (!hw) { | |
1383 | req->rate = 0; | |
1384 | return 0; | |
1385 | } | |
1386 | ||
1387 | return clk_core_round_rate_nolock(hw->core, req); | |
1388 | } | |
1389 | EXPORT_SYMBOL_GPL(__clk_determine_rate); | |
1390 | ||
1391 | unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) | |
1392 | { | |
1393 | int ret; | |
1394 | struct clk_rate_request req; | |
1395 | ||
1396 | clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); | |
1397 | req.rate = rate; | |
1398 | ||
1399 | ret = clk_core_round_rate_nolock(hw->core, &req); | |
1400 | if (ret) | |
1401 | return 0; | |
1402 | ||
1403 | return req.rate; | |
1404 | } | |
1405 | EXPORT_SYMBOL_GPL(clk_hw_round_rate); | |
1406 | ||
1407 | /** | |
1408 | * clk_round_rate - round the given rate for a clk | |
1409 | * @clk: the clk for which we are rounding a rate | |
1410 | * @rate: the rate which is to be rounded | |
1411 | * | |
1412 | * Takes in a rate as input and rounds it to a rate that the clk can actually | |
1413 | * use which is then returned. If clk doesn't support round_rate operation | |
1414 | * then the parent rate is returned. | |
1415 | */ | |
1416 | long clk_round_rate(struct clk *clk, unsigned long rate) | |
1417 | { | |
1418 | struct clk_rate_request req; | |
1419 | int ret; | |
1420 | ||
1421 | if (!clk) | |
1422 | return 0; | |
1423 | ||
1424 | clk_prepare_lock(); | |
1425 | ||
1426 | if (clk->exclusive_count) | |
1427 | clk_core_rate_unprotect(clk->core); | |
1428 | ||
1429 | clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); | |
1430 | req.rate = rate; | |
1431 | ||
1432 | ret = clk_core_round_rate_nolock(clk->core, &req); | |
1433 | ||
1434 | if (clk->exclusive_count) | |
1435 | clk_core_rate_protect(clk->core); | |
1436 | ||
1437 | clk_prepare_unlock(); | |
1438 | ||
1439 | if (ret) | |
1440 | return ret; | |
1441 | ||
1442 | return req.rate; | |
1443 | } | |
1444 | EXPORT_SYMBOL_GPL(clk_round_rate); | |
1445 | ||
1446 | /** | |
1447 | * __clk_notify - call clk notifier chain | |
1448 | * @core: clk that is changing rate | |
1449 | * @msg: clk notifier type (see include/linux/clk.h) | |
1450 | * @old_rate: old clk rate | |
1451 | * @new_rate: new clk rate | |
1452 | * | |
1453 | * Triggers a notifier call chain on the clk rate-change notification | |
1454 | * for 'clk'. Passes a pointer to the struct clk and the previous | |
1455 | * and current rates to the notifier callback. Intended to be called by | |
1456 | * internal clock code only. Returns NOTIFY_DONE from the last driver | |
1457 | * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if | |
1458 | * a driver returns that. | |
1459 | */ | |
1460 | static int __clk_notify(struct clk_core *core, unsigned long msg, | |
1461 | unsigned long old_rate, unsigned long new_rate) | |
1462 | { | |
1463 | struct clk_notifier *cn; | |
1464 | struct clk_notifier_data cnd; | |
1465 | int ret = NOTIFY_DONE; | |
1466 | ||
1467 | cnd.old_rate = old_rate; | |
1468 | cnd.new_rate = new_rate; | |
1469 | ||
1470 | list_for_each_entry(cn, &clk_notifier_list, node) { | |
1471 | if (cn->clk->core == core) { | |
1472 | cnd.clk = cn->clk; | |
1473 | ret = srcu_notifier_call_chain(&cn->notifier_head, msg, | |
1474 | &cnd); | |
1475 | if (ret & NOTIFY_STOP_MASK) | |
1476 | return ret; | |
1477 | } | |
1478 | } | |
1479 | ||
1480 | return ret; | |
1481 | } | |
1482 | ||
1483 | /** | |
1484 | * __clk_recalc_accuracies | |
1485 | * @core: first clk in the subtree | |
1486 | * | |
1487 | * Walks the subtree of clks starting with clk and recalculates accuracies as | |
1488 | * it goes. Note that if a clk does not implement the .recalc_accuracy | |
1489 | * callback then it is assumed that the clock will take on the accuracy of its | |
1490 | * parent. | |
1491 | */ | |
1492 | static void __clk_recalc_accuracies(struct clk_core *core) | |
1493 | { | |
1494 | unsigned long parent_accuracy = 0; | |
1495 | struct clk_core *child; | |
1496 | ||
1497 | lockdep_assert_held(&prepare_lock); | |
1498 | ||
1499 | if (core->parent) | |
1500 | parent_accuracy = core->parent->accuracy; | |
1501 | ||
1502 | if (core->ops->recalc_accuracy) | |
1503 | core->accuracy = core->ops->recalc_accuracy(core->hw, | |
1504 | parent_accuracy); | |
1505 | else | |
1506 | core->accuracy = parent_accuracy; | |
1507 | ||
1508 | hlist_for_each_entry(child, &core->children, child_node) | |
1509 | __clk_recalc_accuracies(child); | |
1510 | } | |
1511 | ||
1512 | static long clk_core_get_accuracy(struct clk_core *core) | |
1513 | { | |
1514 | unsigned long accuracy; | |
1515 | ||
1516 | clk_prepare_lock(); | |
1517 | if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) | |
1518 | __clk_recalc_accuracies(core); | |
1519 | ||
1520 | accuracy = __clk_get_accuracy(core); | |
1521 | clk_prepare_unlock(); | |
1522 | ||
1523 | return accuracy; | |
1524 | } | |
1525 | ||
1526 | /** | |
1527 | * clk_get_accuracy - return the accuracy of clk | |
1528 | * @clk: the clk whose accuracy is being returned | |
1529 | * | |
1530 | * Simply returns the cached accuracy of the clk, unless | |
1531 | * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be | |
1532 | * issued. | |
1533 | * If clk is NULL then returns 0. | |
1534 | */ | |
1535 | long clk_get_accuracy(struct clk *clk) | |
1536 | { | |
1537 | if (!clk) | |
1538 | return 0; | |
1539 | ||
1540 | return clk_core_get_accuracy(clk->core); | |
1541 | } | |
1542 | EXPORT_SYMBOL_GPL(clk_get_accuracy); | |
1543 | ||
1544 | static unsigned long clk_recalc(struct clk_core *core, | |
1545 | unsigned long parent_rate) | |
1546 | { | |
1547 | unsigned long rate = parent_rate; | |
1548 | ||
1549 | if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { | |
1550 | rate = core->ops->recalc_rate(core->hw, parent_rate); | |
1551 | clk_pm_runtime_put(core); | |
1552 | } | |
1553 | return rate; | |
1554 | } | |
1555 | ||
1556 | /** | |
1557 | * __clk_recalc_rates | |
1558 | * @core: first clk in the subtree | |
1559 | * @msg: notification type (see include/linux/clk.h) | |
1560 | * | |
1561 | * Walks the subtree of clks starting with clk and recalculates rates as it | |
1562 | * goes. Note that if a clk does not implement the .recalc_rate callback then | |
1563 | * it is assumed that the clock will take on the rate of its parent. | |
1564 | * | |
1565 | * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, | |
1566 | * if necessary. | |
1567 | */ | |
1568 | static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) | |
1569 | { | |
1570 | unsigned long old_rate; | |
1571 | unsigned long parent_rate = 0; | |
1572 | struct clk_core *child; | |
1573 | ||
1574 | lockdep_assert_held(&prepare_lock); | |
1575 | ||
1576 | old_rate = core->rate; | |
1577 | ||
1578 | if (core->parent) | |
1579 | parent_rate = core->parent->rate; | |
1580 | ||
1581 | core->rate = clk_recalc(core, parent_rate); | |
1582 | ||
1583 | /* | |
1584 | * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE | |
1585 | * & ABORT_RATE_CHANGE notifiers | |
1586 | */ | |
1587 | if (core->notifier_count && msg) | |
1588 | __clk_notify(core, msg, old_rate, core->rate); | |
1589 | ||
1590 | hlist_for_each_entry(child, &core->children, child_node) | |
1591 | __clk_recalc_rates(child, msg); | |
1592 | } | |
1593 | ||
1594 | static unsigned long clk_core_get_rate(struct clk_core *core) | |
1595 | { | |
1596 | unsigned long rate; | |
1597 | ||
1598 | clk_prepare_lock(); | |
1599 | ||
1600 | if (core && (core->flags & CLK_GET_RATE_NOCACHE)) | |
1601 | __clk_recalc_rates(core, 0); | |
1602 | ||
1603 | rate = clk_core_get_rate_nolock(core); | |
1604 | clk_prepare_unlock(); | |
1605 | ||
1606 | return rate; | |
1607 | } | |
1608 | ||
1609 | /** | |
1610 | * clk_get_rate - return the rate of clk | |
1611 | * @clk: the clk whose rate is being returned | |
1612 | * | |
1613 | * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag | |
1614 | * is set, which means a recalc_rate will be issued. | |
1615 | * If clk is NULL then returns 0. | |
1616 | */ | |
1617 | unsigned long clk_get_rate(struct clk *clk) | |
1618 | { | |
1619 | if (!clk) | |
1620 | return 0; | |
1621 | ||
1622 | return clk_core_get_rate(clk->core); | |
1623 | } | |
1624 | EXPORT_SYMBOL_GPL(clk_get_rate); | |
1625 | ||
1626 | static int clk_fetch_parent_index(struct clk_core *core, | |
1627 | struct clk_core *parent) | |
1628 | { | |
1629 | int i; | |
1630 | ||
1631 | if (!parent) | |
1632 | return -EINVAL; | |
1633 | ||
1634 | for (i = 0; i < core->num_parents; i++) { | |
1635 | /* Found it first try! */ | |
1636 | if (core->parents[i].core == parent) | |
1637 | return i; | |
1638 | ||
1639 | /* Something else is here, so keep looking */ | |
1640 | if (core->parents[i].core) | |
1641 | continue; | |
1642 | ||
1643 | /* Maybe core hasn't been cached but the hw is all we know? */ | |
1644 | if (core->parents[i].hw) { | |
1645 | if (core->parents[i].hw == parent->hw) | |
1646 | break; | |
1647 | ||
1648 | /* Didn't match, but we're expecting a clk_hw */ | |
1649 | continue; | |
1650 | } | |
1651 | ||
1652 | /* Maybe it hasn't been cached (clk_set_parent() path) */ | |
1653 | if (parent == clk_core_get(core, i)) | |
1654 | break; | |
1655 | ||
1656 | /* Fallback to comparing globally unique names */ | |
1657 | if (core->parents[i].name && | |
1658 | !strcmp(parent->name, core->parents[i].name)) | |
1659 | break; | |
1660 | } | |
1661 | ||
1662 | if (i == core->num_parents) | |
1663 | return -EINVAL; | |
1664 | ||
1665 | core->parents[i].core = parent; | |
1666 | return i; | |
1667 | } | |
1668 | ||
1669 | /* | |
1670 | * Update the orphan status of @core and all its children. | |
1671 | */ | |
1672 | static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) | |
1673 | { | |
1674 | struct clk_core *child; | |
1675 | ||
1676 | core->orphan = is_orphan; | |
1677 | ||
1678 | hlist_for_each_entry(child, &core->children, child_node) | |
1679 | clk_core_update_orphan_status(child, is_orphan); | |
1680 | } | |
1681 | ||
1682 | static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) | |
1683 | { | |
1684 | bool was_orphan = core->orphan; | |
1685 | ||
1686 | hlist_del(&core->child_node); | |
1687 | ||
1688 | if (new_parent) { | |
1689 | bool becomes_orphan = new_parent->orphan; | |
1690 | ||
1691 | /* avoid duplicate POST_RATE_CHANGE notifications */ | |
1692 | if (new_parent->new_child == core) | |
1693 | new_parent->new_child = NULL; | |
1694 | ||
1695 | hlist_add_head(&core->child_node, &new_parent->children); | |
1696 | ||
1697 | if (was_orphan != becomes_orphan) | |
1698 | clk_core_update_orphan_status(core, becomes_orphan); | |
1699 | } else { | |
1700 | hlist_add_head(&core->child_node, &clk_orphan_list); | |
1701 | if (!was_orphan) | |
1702 | clk_core_update_orphan_status(core, true); | |
1703 | } | |
1704 | ||
1705 | core->parent = new_parent; | |
1706 | } | |
1707 | ||
1708 | static struct clk_core *__clk_set_parent_before(struct clk_core *core, | |
1709 | struct clk_core *parent) | |
1710 | { | |
1711 | unsigned long flags; | |
1712 | struct clk_core *old_parent = core->parent; | |
1713 | ||
1714 | /* | |
1715 | * 1. enable parents for CLK_OPS_PARENT_ENABLE clock | |
1716 | * | |
1717 | * 2. Migrate prepare state between parents and prevent race with | |
1718 | * clk_enable(). | |
1719 | * | |
1720 | * If the clock is not prepared, then a race with | |
1721 | * clk_enable/disable() is impossible since we already have the | |
1722 | * prepare lock (future calls to clk_enable() need to be preceded by | |
1723 | * a clk_prepare()). | |
1724 | * | |
1725 | * If the clock is prepared, migrate the prepared state to the new | |
1726 | * parent and also protect against a race with clk_enable() by | |
1727 | * forcing the clock and the new parent on. This ensures that all | |
1728 | * future calls to clk_enable() are practically NOPs with respect to | |
1729 | * hardware and software states. | |
1730 | * | |
1731 | * See also: Comment for clk_set_parent() below. | |
1732 | */ | |
1733 | ||
1734 | /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ | |
1735 | if (core->flags & CLK_OPS_PARENT_ENABLE) { | |
1736 | clk_core_prepare_enable(old_parent); | |
1737 | clk_core_prepare_enable(parent); | |
1738 | } | |
1739 | ||
1740 | /* migrate prepare count if > 0 */ | |
1741 | if (core->prepare_count) { | |
1742 | clk_core_prepare_enable(parent); | |
1743 | clk_core_enable_lock(core); | |
1744 | } | |
1745 | ||
1746 | /* update the clk tree topology */ | |
1747 | flags = clk_enable_lock(); | |
1748 | clk_reparent(core, parent); | |
1749 | clk_enable_unlock(flags); | |
1750 | ||
1751 | return old_parent; | |
1752 | } | |
1753 | ||
1754 | static void __clk_set_parent_after(struct clk_core *core, | |
1755 | struct clk_core *parent, | |
1756 | struct clk_core *old_parent) | |
1757 | { | |
1758 | /* | |
1759 | * Finish the migration of prepare state and undo the changes done | |
1760 | * for preventing a race with clk_enable(). | |
1761 | */ | |
1762 | if (core->prepare_count) { | |
1763 | clk_core_disable_lock(core); | |
1764 | clk_core_disable_unprepare(old_parent); | |
1765 | } | |
1766 | ||
1767 | /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ | |
1768 | if (core->flags & CLK_OPS_PARENT_ENABLE) { | |
1769 | clk_core_disable_unprepare(parent); | |
1770 | clk_core_disable_unprepare(old_parent); | |
1771 | } | |
1772 | } | |
1773 | ||
1774 | static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, | |
1775 | u8 p_index) | |
1776 | { | |
1777 | unsigned long flags; | |
1778 | int ret = 0; | |
1779 | struct clk_core *old_parent; | |
1780 | ||
1781 | old_parent = __clk_set_parent_before(core, parent); | |
1782 | ||
1783 | trace_clk_set_parent(core, parent); | |
1784 | ||
1785 | /* change clock input source */ | |
1786 | if (parent && core->ops->set_parent) | |
1787 | ret = core->ops->set_parent(core->hw, p_index); | |
1788 | ||
1789 | trace_clk_set_parent_complete(core, parent); | |
1790 | ||
1791 | if (ret) { | |
1792 | flags = clk_enable_lock(); | |
1793 | clk_reparent(core, old_parent); | |
1794 | clk_enable_unlock(flags); | |
1795 | __clk_set_parent_after(core, old_parent, parent); | |
1796 | ||
1797 | return ret; | |
1798 | } | |
1799 | ||
1800 | __clk_set_parent_after(core, parent, old_parent); | |
1801 | ||
1802 | return 0; | |
1803 | } | |
1804 | ||
1805 | /** | |
1806 | * __clk_speculate_rates | |
1807 | * @core: first clk in the subtree | |
1808 | * @parent_rate: the "future" rate of clk's parent | |
1809 | * | |
1810 | * Walks the subtree of clks starting with clk, speculating rates as it | |
1811 | * goes and firing off PRE_RATE_CHANGE notifications as necessary. | |
1812 | * | |
1813 | * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending | |
1814 | * pre-rate change notifications and returns early if no clks in the | |
1815 | * subtree have subscribed to the notifications. Note that if a clk does not | |
1816 | * implement the .recalc_rate callback then it is assumed that the clock will | |
1817 | * take on the rate of its parent. | |
1818 | */ | |
1819 | static int __clk_speculate_rates(struct clk_core *core, | |
1820 | unsigned long parent_rate) | |
1821 | { | |
1822 | struct clk_core *child; | |
1823 | unsigned long new_rate; | |
1824 | int ret = NOTIFY_DONE; | |
1825 | ||
1826 | lockdep_assert_held(&prepare_lock); | |
1827 | ||
1828 | new_rate = clk_recalc(core, parent_rate); | |
1829 | ||
1830 | /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ | |
1831 | if (core->notifier_count) | |
1832 | ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); | |
1833 | ||
1834 | if (ret & NOTIFY_STOP_MASK) { | |
1835 | pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", | |
1836 | __func__, core->name, ret); | |
1837 | goto out; | |
1838 | } | |
1839 | ||
1840 | hlist_for_each_entry(child, &core->children, child_node) { | |
1841 | ret = __clk_speculate_rates(child, new_rate); | |
1842 | if (ret & NOTIFY_STOP_MASK) | |
1843 | break; | |
1844 | } | |
1845 | ||
1846 | out: | |
1847 | return ret; | |
1848 | } | |
1849 | ||
1850 | static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, | |
1851 | struct clk_core *new_parent, u8 p_index) | |
1852 | { | |
1853 | struct clk_core *child; | |
1854 | ||
1855 | core->new_rate = new_rate; | |
1856 | core->new_parent = new_parent; | |
1857 | core->new_parent_index = p_index; | |
1858 | /* include clk in new parent's PRE_RATE_CHANGE notifications */ | |
1859 | core->new_child = NULL; | |
1860 | if (new_parent && new_parent != core->parent) | |
1861 | new_parent->new_child = core; | |
1862 | ||
1863 | hlist_for_each_entry(child, &core->children, child_node) { | |
1864 | child->new_rate = clk_recalc(child, new_rate); | |
1865 | clk_calc_subtree(child, child->new_rate, NULL, 0); | |
1866 | } | |
1867 | } | |
1868 | ||
1869 | /* | |
1870 | * calculate the new rates returning the topmost clock that has to be | |
1871 | * changed. | |
1872 | */ | |
1873 | static struct clk_core *clk_calc_new_rates(struct clk_core *core, | |
1874 | unsigned long rate) | |
1875 | { | |
1876 | struct clk_core *top = core; | |
1877 | struct clk_core *old_parent, *parent; | |
1878 | unsigned long best_parent_rate = 0; | |
1879 | unsigned long new_rate; | |
1880 | unsigned long min_rate; | |
1881 | unsigned long max_rate; | |
1882 | int p_index = 0; | |
1883 | long ret; | |
1884 | ||
1885 | /* sanity */ | |
1886 | if (IS_ERR_OR_NULL(core)) | |
1887 | return NULL; | |
1888 | ||
1889 | /* save parent rate, if it exists */ | |
1890 | parent = old_parent = core->parent; | |
1891 | if (parent) | |
1892 | best_parent_rate = parent->rate; | |
1893 | ||
1894 | clk_core_get_boundaries(core, &min_rate, &max_rate); | |
1895 | ||
1896 | /* find the closest rate and parent clk/rate */ | |
1897 | if (clk_core_can_round(core)) { | |
1898 | struct clk_rate_request req; | |
1899 | ||
1900 | req.rate = rate; | |
1901 | req.min_rate = min_rate; | |
1902 | req.max_rate = max_rate; | |
1903 | ||
1904 | clk_core_init_rate_req(core, &req); | |
1905 | ||
1906 | ret = clk_core_determine_round_nolock(core, &req); | |
1907 | if (ret < 0) | |
1908 | return NULL; | |
1909 | ||
1910 | best_parent_rate = req.best_parent_rate; | |
1911 | new_rate = req.rate; | |
1912 | parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; | |
1913 | ||
1914 | if (new_rate < min_rate || new_rate > max_rate) | |
1915 | return NULL; | |
1916 | } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { | |
1917 | /* pass-through clock without adjustable parent */ | |
1918 | core->new_rate = core->rate; | |
1919 | return NULL; | |
1920 | } else { | |
1921 | /* pass-through clock with adjustable parent */ | |
1922 | top = clk_calc_new_rates(parent, rate); | |
1923 | new_rate = parent->new_rate; | |
1924 | goto out; | |
1925 | } | |
1926 | ||
1927 | /* some clocks must be gated to change parent */ | |
1928 | if (parent != old_parent && | |
1929 | (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { | |
1930 | pr_debug("%s: %s not gated but wants to reparent\n", | |
1931 | __func__, core->name); | |
1932 | return NULL; | |
1933 | } | |
1934 | ||
1935 | /* try finding the new parent index */ | |
1936 | if (parent && core->num_parents > 1) { | |
1937 | p_index = clk_fetch_parent_index(core, parent); | |
1938 | if (p_index < 0) { | |
1939 | pr_debug("%s: clk %s can not be parent of clk %s\n", | |
1940 | __func__, parent->name, core->name); | |
1941 | return NULL; | |
1942 | } | |
1943 | } | |
1944 | ||
1945 | if ((core->flags & CLK_SET_RATE_PARENT) && parent && | |
1946 | best_parent_rate != parent->rate) | |
1947 | top = clk_calc_new_rates(parent, best_parent_rate); | |
1948 | ||
1949 | out: | |
1950 | clk_calc_subtree(core, new_rate, parent, p_index); | |
1951 | ||
1952 | return top; | |
1953 | } | |
1954 | ||
1955 | /* | |
1956 | * Notify about rate changes in a subtree. Always walk down the whole tree | |
1957 | * so that in case of an error we can walk down the whole tree again and | |
1958 | * abort the change. | |
1959 | */ | |
1960 | static struct clk_core *clk_propagate_rate_change(struct clk_core *core, | |
1961 | unsigned long event) | |
1962 | { | |
1963 | struct clk_core *child, *tmp_clk, *fail_clk = NULL; | |
1964 | int ret = NOTIFY_DONE; | |
1965 | ||
1966 | if (core->rate == core->new_rate) | |
1967 | return NULL; | |
1968 | ||
1969 | if (core->notifier_count) { | |
1970 | ret = __clk_notify(core, event, core->rate, core->new_rate); | |
1971 | if (ret & NOTIFY_STOP_MASK) | |
1972 | fail_clk = core; | |
1973 | } | |
1974 | ||
1975 | hlist_for_each_entry(child, &core->children, child_node) { | |
1976 | /* Skip children who will be reparented to another clock */ | |
1977 | if (child->new_parent && child->new_parent != core) | |
1978 | continue; | |
1979 | tmp_clk = clk_propagate_rate_change(child, event); | |
1980 | if (tmp_clk) | |
1981 | fail_clk = tmp_clk; | |
1982 | } | |
1983 | ||
1984 | /* handle the new child who might not be in core->children yet */ | |
1985 | if (core->new_child) { | |
1986 | tmp_clk = clk_propagate_rate_change(core->new_child, event); | |
1987 | if (tmp_clk) | |
1988 | fail_clk = tmp_clk; | |
1989 | } | |
1990 | ||
1991 | return fail_clk; | |
1992 | } | |
1993 | ||
1994 | /* | |
1995 | * walk down a subtree and set the new rates notifying the rate | |
1996 | * change on the way | |
1997 | */ | |
1998 | static void clk_change_rate(struct clk_core *core) | |
1999 | { | |
2000 | struct clk_core *child; | |
2001 | struct hlist_node *tmp; | |
2002 | unsigned long old_rate; | |
2003 | unsigned long best_parent_rate = 0; | |
2004 | bool skip_set_rate = false; | |
2005 | struct clk_core *old_parent; | |
2006 | struct clk_core *parent = NULL; | |
2007 | ||
2008 | old_rate = core->rate; | |
2009 | ||
2010 | if (core->new_parent) { | |
2011 | parent = core->new_parent; | |
2012 | best_parent_rate = core->new_parent->rate; | |
2013 | } else if (core->parent) { | |
2014 | parent = core->parent; | |
2015 | best_parent_rate = core->parent->rate; | |
2016 | } | |
2017 | ||
2018 | if (clk_pm_runtime_get(core)) | |
2019 | return; | |
2020 | ||
2021 | if (core->flags & CLK_SET_RATE_UNGATE) { | |
2022 | unsigned long flags; | |
2023 | ||
2024 | clk_core_prepare(core); | |
2025 | flags = clk_enable_lock(); | |
2026 | clk_core_enable(core); | |
2027 | clk_enable_unlock(flags); | |
2028 | } | |
2029 | ||
2030 | if (core->new_parent && core->new_parent != core->parent) { | |
2031 | old_parent = __clk_set_parent_before(core, core->new_parent); | |
2032 | trace_clk_set_parent(core, core->new_parent); | |
2033 | ||
2034 | if (core->ops->set_rate_and_parent) { | |
2035 | skip_set_rate = true; | |
2036 | core->ops->set_rate_and_parent(core->hw, core->new_rate, | |
2037 | best_parent_rate, | |
2038 | core->new_parent_index); | |
2039 | } else if (core->ops->set_parent) { | |
2040 | core->ops->set_parent(core->hw, core->new_parent_index); | |
2041 | } | |
2042 | ||
2043 | trace_clk_set_parent_complete(core, core->new_parent); | |
2044 | __clk_set_parent_after(core, core->new_parent, old_parent); | |
2045 | } | |
2046 | ||
2047 | if (core->flags & CLK_OPS_PARENT_ENABLE) | |
2048 | clk_core_prepare_enable(parent); | |
2049 | ||
2050 | trace_clk_set_rate(core, core->new_rate); | |
2051 | ||
2052 | if (!skip_set_rate && core->ops->set_rate) | |
2053 | core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); | |
2054 | ||
2055 | trace_clk_set_rate_complete(core, core->new_rate); | |
2056 | ||
2057 | core->rate = clk_recalc(core, best_parent_rate); | |
2058 | ||
2059 | if (core->flags & CLK_SET_RATE_UNGATE) { | |
2060 | unsigned long flags; | |
2061 | ||
2062 | flags = clk_enable_lock(); | |
2063 | clk_core_disable(core); | |
2064 | clk_enable_unlock(flags); | |
2065 | clk_core_unprepare(core); | |
2066 | } | |
2067 | ||
2068 | if (core->flags & CLK_OPS_PARENT_ENABLE) | |
2069 | clk_core_disable_unprepare(parent); | |
2070 | ||
2071 | if (core->notifier_count && old_rate != core->rate) | |
2072 | __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); | |
2073 | ||
2074 | if (core->flags & CLK_RECALC_NEW_RATES) | |
2075 | (void)clk_calc_new_rates(core, core->new_rate); | |
2076 | ||
2077 | /* | |
2078 | * Use safe iteration, as change_rate can actually swap parents | |
2079 | * for certain clock types. | |
2080 | */ | |
2081 | hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { | |
2082 | /* Skip children who will be reparented to another clock */ | |
2083 | if (child->new_parent && child->new_parent != core) | |
2084 | continue; | |
2085 | clk_change_rate(child); | |
2086 | } | |
2087 | ||
2088 | /* handle the new child who might not be in core->children yet */ | |
2089 | if (core->new_child) | |
2090 | clk_change_rate(core->new_child); | |
2091 | ||
2092 | clk_pm_runtime_put(core); | |
2093 | } | |
2094 | ||
2095 | static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, | |
2096 | unsigned long req_rate) | |
2097 | { | |
2098 | int ret, cnt; | |
2099 | struct clk_rate_request req; | |
2100 | ||
2101 | lockdep_assert_held(&prepare_lock); | |
2102 | ||
2103 | if (!core) | |
2104 | return 0; | |
2105 | ||
2106 | /* simulate what the rate would be if it could be freely set */ | |
2107 | cnt = clk_core_rate_nuke_protect(core); | |
2108 | if (cnt < 0) | |
2109 | return cnt; | |
2110 | ||
2111 | clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); | |
2112 | req.rate = req_rate; | |
2113 | ||
2114 | ret = clk_core_round_rate_nolock(core, &req); | |
2115 | ||
2116 | /* restore the protection */ | |
2117 | clk_core_rate_restore_protect(core, cnt); | |
2118 | ||
2119 | return ret ? 0 : req.rate; | |
2120 | } | |
2121 | ||
2122 | static int clk_core_set_rate_nolock(struct clk_core *core, | |
2123 | unsigned long req_rate) | |
2124 | { | |
2125 | struct clk_core *top, *fail_clk; | |
2126 | unsigned long rate; | |
2127 | int ret = 0; | |
2128 | ||
2129 | if (!core) | |
2130 | return 0; | |
2131 | ||
2132 | rate = clk_core_req_round_rate_nolock(core, req_rate); | |
2133 | ||
2134 | /* bail early if nothing to do */ | |
2135 | if (rate == clk_core_get_rate_nolock(core)) | |
2136 | return 0; | |
2137 | ||
2138 | /* fail on a direct rate set of a protected provider */ | |
2139 | if (clk_core_rate_is_protected(core)) | |
2140 | return -EBUSY; | |
2141 | ||
2142 | /* calculate new rates and get the topmost changed clock */ | |
2143 | top = clk_calc_new_rates(core, req_rate); | |
2144 | if (!top) | |
2145 | return -EINVAL; | |
2146 | ||
2147 | ret = clk_pm_runtime_get(core); | |
2148 | if (ret) | |
2149 | return ret; | |
2150 | ||
2151 | /* notify that we are about to change rates */ | |
2152 | fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); | |
2153 | if (fail_clk) { | |
2154 | pr_debug("%s: failed to set %s rate\n", __func__, | |
2155 | fail_clk->name); | |
2156 | clk_propagate_rate_change(top, ABORT_RATE_CHANGE); | |
2157 | ret = -EBUSY; | |
2158 | goto err; | |
2159 | } | |
2160 | ||
2161 | /* change the rates */ | |
2162 | clk_change_rate(top); | |
2163 | ||
2164 | core->req_rate = req_rate; | |
2165 | err: | |
2166 | clk_pm_runtime_put(core); | |
2167 | ||
2168 | return ret; | |
2169 | } | |
2170 | ||
2171 | /** | |
2172 | * clk_set_rate - specify a new rate for clk | |
2173 | * @clk: the clk whose rate is being changed | |
2174 | * @rate: the new rate for clk | |
2175 | * | |
2176 | * In the simplest case clk_set_rate will only adjust the rate of clk. | |
2177 | * | |
2178 | * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to | |
2179 | * propagate up to clk's parent; whether or not this happens depends on the | |
2180 | * outcome of clk's .round_rate implementation. If *parent_rate is unchanged | |
2181 | * after calling .round_rate then upstream parent propagation is ignored. If | |
2182 | * *parent_rate comes back with a new rate for clk's parent then we propagate | |
2183 | * up to clk's parent and set its rate. Upward propagation will continue | |
2184 | * until either a clk does not support the CLK_SET_RATE_PARENT flag or | |
2185 | * .round_rate stops requesting changes to clk's parent_rate. | |
2186 | * | |
2187 | * Rate changes are accomplished via tree traversal that also recalculates the | |
2188 | * rates for the clocks and fires off POST_RATE_CHANGE notifiers. | |
2189 | * | |
2190 | * Returns 0 on success, -EERROR otherwise. | |
2191 | */ | |
2192 | int clk_set_rate(struct clk *clk, unsigned long rate) | |
2193 | { | |
2194 | int ret; | |
2195 | ||
2196 | if (!clk) | |
2197 | return 0; | |
2198 | ||
2199 | /* prevent racing with updates to the clock topology */ | |
2200 | clk_prepare_lock(); | |
2201 | ||
2202 | if (clk->exclusive_count) | |
2203 | clk_core_rate_unprotect(clk->core); | |
2204 | ||
2205 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2206 | ||
2207 | if (clk->exclusive_count) | |
2208 | clk_core_rate_protect(clk->core); | |
2209 | ||
2210 | clk_prepare_unlock(); | |
2211 | ||
2212 | return ret; | |
2213 | } | |
2214 | EXPORT_SYMBOL_GPL(clk_set_rate); | |
2215 | ||
2216 | /** | |
2217 | * clk_set_rate_exclusive - specify a new rate and get exclusive control | |
2218 | * @clk: the clk whose rate is being changed | |
2219 | * @rate: the new rate for clk | |
2220 | * | |
2221 | * This is a combination of clk_set_rate() and clk_rate_exclusive_get() | |
2222 | * within a critical section | |
2223 | * | |
2224 | * This can be used initially to ensure that at least 1 consumer is | |
2225 | * satisfied when several consumers are competing for exclusivity over the | |
2226 | * same clock provider. | |
2227 | * | |
2228 | * The exclusivity is not applied if setting the rate failed. | |
2229 | * | |
2230 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | |
2231 | * clk_rate_exclusive_put(). | |
2232 | * | |
2233 | * Returns 0 on success, -EERROR otherwise. | |
2234 | */ | |
2235 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) | |
2236 | { | |
2237 | int ret; | |
2238 | ||
2239 | if (!clk) | |
2240 | return 0; | |
2241 | ||
2242 | /* prevent racing with updates to the clock topology */ | |
2243 | clk_prepare_lock(); | |
2244 | ||
2245 | /* | |
2246 | * The temporary protection removal is not here, on purpose | |
2247 | * This function is meant to be used instead of clk_rate_protect, | |
2248 | * so before the consumer code path protect the clock provider | |
2249 | */ | |
2250 | ||
2251 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2252 | if (!ret) { | |
2253 | clk_core_rate_protect(clk->core); | |
2254 | clk->exclusive_count++; | |
2255 | } | |
2256 | ||
2257 | clk_prepare_unlock(); | |
2258 | ||
2259 | return ret; | |
2260 | } | |
2261 | EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); | |
2262 | ||
2263 | /** | |
2264 | * clk_set_rate_range - set a rate range for a clock source | |
2265 | * @clk: clock source | |
2266 | * @min: desired minimum clock rate in Hz, inclusive | |
2267 | * @max: desired maximum clock rate in Hz, inclusive | |
2268 | * | |
2269 | * Returns success (0) or negative errno. | |
2270 | */ | |
2271 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) | |
2272 | { | |
2273 | int ret = 0; | |
2274 | unsigned long old_min, old_max, rate; | |
2275 | ||
2276 | if (!clk) | |
2277 | return 0; | |
2278 | ||
2279 | if (min > max) { | |
2280 | pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", | |
2281 | __func__, clk->core->name, clk->dev_id, clk->con_id, | |
2282 | min, max); | |
2283 | return -EINVAL; | |
2284 | } | |
2285 | ||
2286 | clk_prepare_lock(); | |
2287 | ||
2288 | if (clk->exclusive_count) | |
2289 | clk_core_rate_unprotect(clk->core); | |
2290 | ||
2291 | /* Save the current values in case we need to rollback the change */ | |
2292 | old_min = clk->min_rate; | |
2293 | old_max = clk->max_rate; | |
2294 | clk->min_rate = min; | |
2295 | clk->max_rate = max; | |
2296 | ||
2297 | rate = clk_core_get_rate_nolock(clk->core); | |
2298 | if (rate < min || rate > max) { | |
2299 | /* | |
2300 | * FIXME: | |
2301 | * We are in bit of trouble here, current rate is outside the | |
2302 | * the requested range. We are going try to request appropriate | |
2303 | * range boundary but there is a catch. It may fail for the | |
2304 | * usual reason (clock broken, clock protected, etc) but also | |
2305 | * because: | |
2306 | * - round_rate() was not favorable and fell on the wrong | |
2307 | * side of the boundary | |
2308 | * - the determine_rate() callback does not really check for | |
2309 | * this corner case when determining the rate | |
2310 | */ | |
2311 | ||
2312 | if (rate < min) | |
2313 | rate = min; | |
2314 | else | |
2315 | rate = max; | |
2316 | ||
2317 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2318 | if (ret) { | |
2319 | /* rollback the changes */ | |
2320 | clk->min_rate = old_min; | |
2321 | clk->max_rate = old_max; | |
2322 | } | |
2323 | } | |
2324 | ||
2325 | if (clk->exclusive_count) | |
2326 | clk_core_rate_protect(clk->core); | |
2327 | ||
2328 | clk_prepare_unlock(); | |
2329 | ||
2330 | return ret; | |
2331 | } | |
2332 | EXPORT_SYMBOL_GPL(clk_set_rate_range); | |
2333 | ||
2334 | /** | |
2335 | * clk_set_min_rate - set a minimum clock rate for a clock source | |
2336 | * @clk: clock source | |
2337 | * @rate: desired minimum clock rate in Hz, inclusive | |
2338 | * | |
2339 | * Returns success (0) or negative errno. | |
2340 | */ | |
2341 | int clk_set_min_rate(struct clk *clk, unsigned long rate) | |
2342 | { | |
2343 | if (!clk) | |
2344 | return 0; | |
2345 | ||
2346 | return clk_set_rate_range(clk, rate, clk->max_rate); | |
2347 | } | |
2348 | EXPORT_SYMBOL_GPL(clk_set_min_rate); | |
2349 | ||
2350 | /** | |
2351 | * clk_set_max_rate - set a maximum clock rate for a clock source | |
2352 | * @clk: clock source | |
2353 | * @rate: desired maximum clock rate in Hz, inclusive | |
2354 | * | |
2355 | * Returns success (0) or negative errno. | |
2356 | */ | |
2357 | int clk_set_max_rate(struct clk *clk, unsigned long rate) | |
2358 | { | |
2359 | if (!clk) | |
2360 | return 0; | |
2361 | ||
2362 | return clk_set_rate_range(clk, clk->min_rate, rate); | |
2363 | } | |
2364 | EXPORT_SYMBOL_GPL(clk_set_max_rate); | |
2365 | ||
2366 | /** | |
2367 | * clk_get_parent - return the parent of a clk | |
2368 | * @clk: the clk whose parent gets returned | |
2369 | * | |
2370 | * Simply returns clk->parent. Returns NULL if clk is NULL. | |
2371 | */ | |
2372 | struct clk *clk_get_parent(struct clk *clk) | |
2373 | { | |
2374 | struct clk *parent; | |
2375 | ||
2376 | if (!clk) | |
2377 | return NULL; | |
2378 | ||
2379 | clk_prepare_lock(); | |
2380 | /* TODO: Create a per-user clk and change callers to call clk_put */ | |
2381 | parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; | |
2382 | clk_prepare_unlock(); | |
2383 | ||
2384 | return parent; | |
2385 | } | |
2386 | EXPORT_SYMBOL_GPL(clk_get_parent); | |
2387 | ||
2388 | static struct clk_core *__clk_init_parent(struct clk_core *core) | |
2389 | { | |
2390 | u8 index = 0; | |
2391 | ||
2392 | if (core->num_parents > 1 && core->ops->get_parent) | |
2393 | index = core->ops->get_parent(core->hw); | |
2394 | ||
2395 | return clk_core_get_parent_by_index(core, index); | |
2396 | } | |
2397 | ||
2398 | static void clk_core_reparent(struct clk_core *core, | |
2399 | struct clk_core *new_parent) | |
2400 | { | |
2401 | clk_reparent(core, new_parent); | |
2402 | __clk_recalc_accuracies(core); | |
2403 | __clk_recalc_rates(core, POST_RATE_CHANGE); | |
2404 | } | |
2405 | ||
2406 | void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) | |
2407 | { | |
2408 | if (!hw) | |
2409 | return; | |
2410 | ||
2411 | clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); | |
2412 | } | |
2413 | ||
2414 | /** | |
2415 | * clk_has_parent - check if a clock is a possible parent for another | |
2416 | * @clk: clock source | |
2417 | * @parent: parent clock source | |
2418 | * | |
2419 | * This function can be used in drivers that need to check that a clock can be | |
2420 | * the parent of another without actually changing the parent. | |
2421 | * | |
2422 | * Returns true if @parent is a possible parent for @clk, false otherwise. | |
2423 | */ | |
2424 | bool clk_has_parent(struct clk *clk, struct clk *parent) | |
2425 | { | |
2426 | struct clk_core *core, *parent_core; | |
2427 | int i; | |
2428 | ||
2429 | /* NULL clocks should be nops, so return success if either is NULL. */ | |
2430 | if (!clk || !parent) | |
2431 | return true; | |
2432 | ||
2433 | core = clk->core; | |
2434 | parent_core = parent->core; | |
2435 | ||
2436 | /* Optimize for the case where the parent is already the parent. */ | |
2437 | if (core->parent == parent_core) | |
2438 | return true; | |
2439 | ||
2440 | for (i = 0; i < core->num_parents; i++) | |
2441 | if (!strcmp(core->parents[i].name, parent_core->name)) | |
2442 | return true; | |
2443 | ||
2444 | return false; | |
2445 | } | |
2446 | EXPORT_SYMBOL_GPL(clk_has_parent); | |
2447 | ||
2448 | static int clk_core_set_parent_nolock(struct clk_core *core, | |
2449 | struct clk_core *parent) | |
2450 | { | |
2451 | int ret = 0; | |
2452 | int p_index = 0; | |
2453 | unsigned long p_rate = 0; | |
2454 | ||
2455 | lockdep_assert_held(&prepare_lock); | |
2456 | ||
2457 | if (!core) | |
2458 | return 0; | |
2459 | ||
2460 | if (core->parent == parent) | |
2461 | return 0; | |
2462 | ||
2463 | /* verify ops for for multi-parent clks */ | |
2464 | if (core->num_parents > 1 && !core->ops->set_parent) | |
2465 | return -EPERM; | |
2466 | ||
2467 | /* check that we are allowed to re-parent if the clock is in use */ | |
2468 | if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) | |
2469 | return -EBUSY; | |
2470 | ||
2471 | if (clk_core_rate_is_protected(core)) | |
2472 | return -EBUSY; | |
2473 | ||
2474 | /* try finding the new parent index */ | |
2475 | if (parent) { | |
2476 | p_index = clk_fetch_parent_index(core, parent); | |
2477 | if (p_index < 0) { | |
2478 | pr_debug("%s: clk %s can not be parent of clk %s\n", | |
2479 | __func__, parent->name, core->name); | |
2480 | return p_index; | |
2481 | } | |
2482 | p_rate = parent->rate; | |
2483 | } | |
2484 | ||
2485 | ret = clk_pm_runtime_get(core); | |
2486 | if (ret) | |
2487 | return ret; | |
2488 | ||
2489 | /* propagate PRE_RATE_CHANGE notifications */ | |
2490 | ret = __clk_speculate_rates(core, p_rate); | |
2491 | ||
2492 | /* abort if a driver objects */ | |
2493 | if (ret & NOTIFY_STOP_MASK) | |
2494 | goto runtime_put; | |
2495 | ||
2496 | /* do the re-parent */ | |
2497 | ret = __clk_set_parent(core, parent, p_index); | |
2498 | ||
2499 | /* propagate rate an accuracy recalculation accordingly */ | |
2500 | if (ret) { | |
2501 | __clk_recalc_rates(core, ABORT_RATE_CHANGE); | |
2502 | } else { | |
2503 | __clk_recalc_rates(core, POST_RATE_CHANGE); | |
2504 | __clk_recalc_accuracies(core); | |
2505 | } | |
2506 | ||
2507 | runtime_put: | |
2508 | clk_pm_runtime_put(core); | |
2509 | ||
2510 | return ret; | |
2511 | } | |
2512 | ||
2513 | /** | |
2514 | * clk_set_parent - switch the parent of a mux clk | |
2515 | * @clk: the mux clk whose input we are switching | |
2516 | * @parent: the new input to clk | |
2517 | * | |
2518 | * Re-parent clk to use parent as its new input source. If clk is in | |
2519 | * prepared state, the clk will get enabled for the duration of this call. If | |
2520 | * that's not acceptable for a specific clk (Eg: the consumer can't handle | |
2521 | * that, the reparenting is glitchy in hardware, etc), use the | |
2522 | * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. | |
2523 | * | |
2524 | * After successfully changing clk's parent clk_set_parent will update the | |
2525 | * clk topology, sysfs topology and propagate rate recalculation via | |
2526 | * __clk_recalc_rates. | |
2527 | * | |
2528 | * Returns 0 on success, -EERROR otherwise. | |
2529 | */ | |
2530 | int clk_set_parent(struct clk *clk, struct clk *parent) | |
2531 | { | |
2532 | int ret; | |
2533 | ||
2534 | if (!clk) | |
2535 | return 0; | |
2536 | ||
2537 | clk_prepare_lock(); | |
2538 | ||
2539 | if (clk->exclusive_count) | |
2540 | clk_core_rate_unprotect(clk->core); | |
2541 | ||
2542 | ret = clk_core_set_parent_nolock(clk->core, | |
2543 | parent ? parent->core : NULL); | |
2544 | ||
2545 | if (clk->exclusive_count) | |
2546 | clk_core_rate_protect(clk->core); | |
2547 | ||
2548 | clk_prepare_unlock(); | |
2549 | ||
2550 | return ret; | |
2551 | } | |
2552 | EXPORT_SYMBOL_GPL(clk_set_parent); | |
2553 | ||
2554 | static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) | |
2555 | { | |
2556 | int ret = -EINVAL; | |
2557 | ||
2558 | lockdep_assert_held(&prepare_lock); | |
2559 | ||
2560 | if (!core) | |
2561 | return 0; | |
2562 | ||
2563 | if (clk_core_rate_is_protected(core)) | |
2564 | return -EBUSY; | |
2565 | ||
2566 | trace_clk_set_phase(core, degrees); | |
2567 | ||
2568 | if (core->ops->set_phase) { | |
2569 | ret = core->ops->set_phase(core->hw, degrees); | |
2570 | if (!ret) | |
2571 | core->phase = degrees; | |
2572 | } | |
2573 | ||
2574 | trace_clk_set_phase_complete(core, degrees); | |
2575 | ||
2576 | return ret; | |
2577 | } | |
2578 | ||
2579 | /** | |
2580 | * clk_set_phase - adjust the phase shift of a clock signal | |
2581 | * @clk: clock signal source | |
2582 | * @degrees: number of degrees the signal is shifted | |
2583 | * | |
2584 | * Shifts the phase of a clock signal by the specified | |
2585 | * degrees. Returns 0 on success, -EERROR otherwise. | |
2586 | * | |
2587 | * This function makes no distinction about the input or reference | |
2588 | * signal that we adjust the clock signal phase against. For example | |
2589 | * phase locked-loop clock signal generators we may shift phase with | |
2590 | * respect to feedback clock signal input, but for other cases the | |
2591 | * clock phase may be shifted with respect to some other, unspecified | |
2592 | * signal. | |
2593 | * | |
2594 | * Additionally the concept of phase shift does not propagate through | |
2595 | * the clock tree hierarchy, which sets it apart from clock rates and | |
2596 | * clock accuracy. A parent clock phase attribute does not have an | |
2597 | * impact on the phase attribute of a child clock. | |
2598 | */ | |
2599 | int clk_set_phase(struct clk *clk, int degrees) | |
2600 | { | |
2601 | int ret; | |
2602 | ||
2603 | if (!clk) | |
2604 | return 0; | |
2605 | ||
2606 | /* sanity check degrees */ | |
2607 | degrees %= 360; | |
2608 | if (degrees < 0) | |
2609 | degrees += 360; | |
2610 | ||
2611 | clk_prepare_lock(); | |
2612 | ||
2613 | if (clk->exclusive_count) | |
2614 | clk_core_rate_unprotect(clk->core); | |
2615 | ||
2616 | ret = clk_core_set_phase_nolock(clk->core, degrees); | |
2617 | ||
2618 | if (clk->exclusive_count) | |
2619 | clk_core_rate_protect(clk->core); | |
2620 | ||
2621 | clk_prepare_unlock(); | |
2622 | ||
2623 | return ret; | |
2624 | } | |
2625 | EXPORT_SYMBOL_GPL(clk_set_phase); | |
2626 | ||
2627 | static int clk_core_get_phase(struct clk_core *core) | |
2628 | { | |
2629 | int ret; | |
2630 | ||
2631 | clk_prepare_lock(); | |
2632 | /* Always try to update cached phase if possible */ | |
2633 | if (core->ops->get_phase) | |
2634 | core->phase = core->ops->get_phase(core->hw); | |
2635 | ret = core->phase; | |
2636 | clk_prepare_unlock(); | |
2637 | ||
2638 | return ret; | |
2639 | } | |
2640 | ||
2641 | /** | |
2642 | * clk_get_phase - return the phase shift of a clock signal | |
2643 | * @clk: clock signal source | |
2644 | * | |
2645 | * Returns the phase shift of a clock node in degrees, otherwise returns | |
2646 | * -EERROR. | |
2647 | */ | |
2648 | int clk_get_phase(struct clk *clk) | |
2649 | { | |
2650 | if (!clk) | |
2651 | return 0; | |
2652 | ||
2653 | return clk_core_get_phase(clk->core); | |
2654 | } | |
2655 | EXPORT_SYMBOL_GPL(clk_get_phase); | |
2656 | ||
2657 | static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) | |
2658 | { | |
2659 | /* Assume a default value of 50% */ | |
2660 | core->duty.num = 1; | |
2661 | core->duty.den = 2; | |
2662 | } | |
2663 | ||
2664 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); | |
2665 | ||
2666 | static int clk_core_update_duty_cycle_nolock(struct clk_core *core) | |
2667 | { | |
2668 | struct clk_duty *duty = &core->duty; | |
2669 | int ret = 0; | |
2670 | ||
2671 | if (!core->ops->get_duty_cycle) | |
2672 | return clk_core_update_duty_cycle_parent_nolock(core); | |
2673 | ||
2674 | ret = core->ops->get_duty_cycle(core->hw, duty); | |
2675 | if (ret) | |
2676 | goto reset; | |
2677 | ||
2678 | /* Don't trust the clock provider too much */ | |
2679 | if (duty->den == 0 || duty->num > duty->den) { | |
2680 | ret = -EINVAL; | |
2681 | goto reset; | |
2682 | } | |
2683 | ||
2684 | return 0; | |
2685 | ||
2686 | reset: | |
2687 | clk_core_reset_duty_cycle_nolock(core); | |
2688 | return ret; | |
2689 | } | |
2690 | ||
2691 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) | |
2692 | { | |
2693 | int ret = 0; | |
2694 | ||
2695 | if (core->parent && | |
2696 | core->flags & CLK_DUTY_CYCLE_PARENT) { | |
2697 | ret = clk_core_update_duty_cycle_nolock(core->parent); | |
2698 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); | |
2699 | } else { | |
2700 | clk_core_reset_duty_cycle_nolock(core); | |
2701 | } | |
2702 | ||
2703 | return ret; | |
2704 | } | |
2705 | ||
2706 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, | |
2707 | struct clk_duty *duty); | |
2708 | ||
2709 | static int clk_core_set_duty_cycle_nolock(struct clk_core *core, | |
2710 | struct clk_duty *duty) | |
2711 | { | |
2712 | int ret; | |
2713 | ||
2714 | lockdep_assert_held(&prepare_lock); | |
2715 | ||
2716 | if (clk_core_rate_is_protected(core)) | |
2717 | return -EBUSY; | |
2718 | ||
2719 | trace_clk_set_duty_cycle(core, duty); | |
2720 | ||
2721 | if (!core->ops->set_duty_cycle) | |
2722 | return clk_core_set_duty_cycle_parent_nolock(core, duty); | |
2723 | ||
2724 | ret = core->ops->set_duty_cycle(core->hw, duty); | |
2725 | if (!ret) | |
2726 | memcpy(&core->duty, duty, sizeof(*duty)); | |
2727 | ||
2728 | trace_clk_set_duty_cycle_complete(core, duty); | |
2729 | ||
2730 | return ret; | |
2731 | } | |
2732 | ||
2733 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, | |
2734 | struct clk_duty *duty) | |
2735 | { | |
2736 | int ret = 0; | |
2737 | ||
2738 | if (core->parent && | |
2739 | core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { | |
2740 | ret = clk_core_set_duty_cycle_nolock(core->parent, duty); | |
2741 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); | |
2742 | } | |
2743 | ||
2744 | return ret; | |
2745 | } | |
2746 | ||
2747 | /** | |
2748 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal | |
2749 | * @clk: clock signal source | |
2750 | * @num: numerator of the duty cycle ratio to be applied | |
2751 | * @den: denominator of the duty cycle ratio to be applied | |
2752 | * | |
2753 | * Apply the duty cycle ratio if the ratio is valid and the clock can | |
2754 | * perform this operation | |
2755 | * | |
2756 | * Returns (0) on success, a negative errno otherwise. | |
2757 | */ | |
2758 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) | |
2759 | { | |
2760 | int ret; | |
2761 | struct clk_duty duty; | |
2762 | ||
2763 | if (!clk) | |
2764 | return 0; | |
2765 | ||
2766 | /* sanity check the ratio */ | |
2767 | if (den == 0 || num > den) | |
2768 | return -EINVAL; | |
2769 | ||
2770 | duty.num = num; | |
2771 | duty.den = den; | |
2772 | ||
2773 | clk_prepare_lock(); | |
2774 | ||
2775 | if (clk->exclusive_count) | |
2776 | clk_core_rate_unprotect(clk->core); | |
2777 | ||
2778 | ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); | |
2779 | ||
2780 | if (clk->exclusive_count) | |
2781 | clk_core_rate_protect(clk->core); | |
2782 | ||
2783 | clk_prepare_unlock(); | |
2784 | ||
2785 | return ret; | |
2786 | } | |
2787 | EXPORT_SYMBOL_GPL(clk_set_duty_cycle); | |
2788 | ||
2789 | static int clk_core_get_scaled_duty_cycle(struct clk_core *core, | |
2790 | unsigned int scale) | |
2791 | { | |
2792 | struct clk_duty *duty = &core->duty; | |
2793 | int ret; | |
2794 | ||
2795 | clk_prepare_lock(); | |
2796 | ||
2797 | ret = clk_core_update_duty_cycle_nolock(core); | |
2798 | if (!ret) | |
2799 | ret = mult_frac(scale, duty->num, duty->den); | |
2800 | ||
2801 | clk_prepare_unlock(); | |
2802 | ||
2803 | return ret; | |
2804 | } | |
2805 | ||
2806 | /** | |
2807 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal | |
2808 | * @clk: clock signal source | |
2809 | * @scale: scaling factor to be applied to represent the ratio as an integer | |
2810 | * | |
2811 | * Returns the duty cycle ratio of a clock node multiplied by the provided | |
2812 | * scaling factor, or negative errno on error. | |
2813 | */ | |
2814 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) | |
2815 | { | |
2816 | if (!clk) | |
2817 | return 0; | |
2818 | ||
2819 | return clk_core_get_scaled_duty_cycle(clk->core, scale); | |
2820 | } | |
2821 | EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); | |
2822 | ||
2823 | /** | |
2824 | * clk_is_match - check if two clk's point to the same hardware clock | |
2825 | * @p: clk compared against q | |
2826 | * @q: clk compared against p | |
2827 | * | |
2828 | * Returns true if the two struct clk pointers both point to the same hardware | |
2829 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | |
2830 | * share the same struct clk_core object. | |
2831 | * | |
2832 | * Returns false otherwise. Note that two NULL clks are treated as matching. | |
2833 | */ | |
2834 | bool clk_is_match(const struct clk *p, const struct clk *q) | |
2835 | { | |
2836 | /* trivial case: identical struct clk's or both NULL */ | |
2837 | if (p == q) | |
2838 | return true; | |
2839 | ||
2840 | /* true if clk->core pointers match. Avoid dereferencing garbage */ | |
2841 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) | |
2842 | if (p->core == q->core) | |
2843 | return true; | |
2844 | ||
2845 | return false; | |
2846 | } | |
2847 | EXPORT_SYMBOL_GPL(clk_is_match); | |
2848 | ||
2849 | /*** debugfs support ***/ | |
2850 | ||
2851 | #ifdef CONFIG_DEBUG_FS | |
2852 | #include <linux/debugfs.h> | |
2853 | ||
2854 | static struct dentry *rootdir; | |
2855 | static int inited = 0; | |
2856 | static DEFINE_MUTEX(clk_debug_lock); | |
2857 | static HLIST_HEAD(clk_debug_list); | |
2858 | ||
2859 | static struct hlist_head *all_lists[] = { | |
2860 | &clk_root_list, | |
2861 | &clk_orphan_list, | |
2862 | NULL, | |
2863 | }; | |
2864 | ||
2865 | static struct hlist_head *orphan_list[] = { | |
2866 | &clk_orphan_list, | |
2867 | NULL, | |
2868 | }; | |
2869 | ||
2870 | static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, | |
2871 | int level) | |
2872 | { | |
2873 | if (!c) | |
2874 | return; | |
2875 | ||
2876 | seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", | |
2877 | level * 3 + 1, "", | |
2878 | 30 - level * 3, c->name, | |
2879 | c->enable_count, c->prepare_count, c->protect_count, | |
2880 | clk_core_get_rate(c), clk_core_get_accuracy(c), | |
2881 | clk_core_get_phase(c), | |
2882 | clk_core_get_scaled_duty_cycle(c, 100000)); | |
2883 | } | |
2884 | ||
2885 | static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, | |
2886 | int level) | |
2887 | { | |
2888 | struct clk_core *child; | |
2889 | ||
2890 | if (!c) | |
2891 | return; | |
2892 | ||
2893 | clk_summary_show_one(s, c, level); | |
2894 | ||
2895 | hlist_for_each_entry(child, &c->children, child_node) | |
2896 | clk_summary_show_subtree(s, child, level + 1); | |
2897 | } | |
2898 | ||
2899 | static int clk_summary_show(struct seq_file *s, void *data) | |
2900 | { | |
2901 | struct clk_core *c; | |
2902 | struct hlist_head **lists = (struct hlist_head **)s->private; | |
2903 | ||
2904 | seq_puts(s, " enable prepare protect duty\n"); | |
2905 | seq_puts(s, " clock count count count rate accuracy phase cycle\n"); | |
2906 | seq_puts(s, "---------------------------------------------------------------------------------------------\n"); | |
2907 | ||
2908 | clk_prepare_lock(); | |
2909 | ||
2910 | for (; *lists; lists++) | |
2911 | hlist_for_each_entry(c, *lists, child_node) | |
2912 | clk_summary_show_subtree(s, c, 0); | |
2913 | ||
2914 | clk_prepare_unlock(); | |
2915 | ||
2916 | return 0; | |
2917 | } | |
2918 | DEFINE_SHOW_ATTRIBUTE(clk_summary); | |
2919 | ||
2920 | static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) | |
2921 | { | |
2922 | if (!c) | |
2923 | return; | |
2924 | ||
2925 | /* This should be JSON format, i.e. elements separated with a comma */ | |
2926 | seq_printf(s, "\"%s\": { ", c->name); | |
2927 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); | |
2928 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); | |
2929 | seq_printf(s, "\"protect_count\": %d,", c->protect_count); | |
2930 | seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); | |
2931 | seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); | |
2932 | seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); | |
2933 | seq_printf(s, "\"duty_cycle\": %u", | |
2934 | clk_core_get_scaled_duty_cycle(c, 100000)); | |
2935 | } | |
2936 | ||
2937 | static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) | |
2938 | { | |
2939 | struct clk_core *child; | |
2940 | ||
2941 | if (!c) | |
2942 | return; | |
2943 | ||
2944 | clk_dump_one(s, c, level); | |
2945 | ||
2946 | hlist_for_each_entry(child, &c->children, child_node) { | |
2947 | seq_putc(s, ','); | |
2948 | clk_dump_subtree(s, child, level + 1); | |
2949 | } | |
2950 | ||
2951 | seq_putc(s, '}'); | |
2952 | } | |
2953 | ||
2954 | static int clk_dump_show(struct seq_file *s, void *data) | |
2955 | { | |
2956 | struct clk_core *c; | |
2957 | bool first_node = true; | |
2958 | struct hlist_head **lists = (struct hlist_head **)s->private; | |
2959 | ||
2960 | seq_putc(s, '{'); | |
2961 | clk_prepare_lock(); | |
2962 | ||
2963 | for (; *lists; lists++) { | |
2964 | hlist_for_each_entry(c, *lists, child_node) { | |
2965 | if (!first_node) | |
2966 | seq_putc(s, ','); | |
2967 | first_node = false; | |
2968 | clk_dump_subtree(s, c, 0); | |
2969 | } | |
2970 | } | |
2971 | ||
2972 | clk_prepare_unlock(); | |
2973 | ||
2974 | seq_puts(s, "}\n"); | |
2975 | return 0; | |
2976 | } | |
2977 | DEFINE_SHOW_ATTRIBUTE(clk_dump); | |
2978 | ||
2979 | static const struct { | |
2980 | unsigned long flag; | |
2981 | const char *name; | |
2982 | } clk_flags[] = { | |
2983 | #define ENTRY(f) { f, #f } | |
2984 | ENTRY(CLK_SET_RATE_GATE), | |
2985 | ENTRY(CLK_SET_PARENT_GATE), | |
2986 | ENTRY(CLK_SET_RATE_PARENT), | |
2987 | ENTRY(CLK_IGNORE_UNUSED), | |
2988 | ENTRY(CLK_GET_RATE_NOCACHE), | |
2989 | ENTRY(CLK_SET_RATE_NO_REPARENT), | |
2990 | ENTRY(CLK_GET_ACCURACY_NOCACHE), | |
2991 | ENTRY(CLK_RECALC_NEW_RATES), | |
2992 | ENTRY(CLK_SET_RATE_UNGATE), | |
2993 | ENTRY(CLK_IS_CRITICAL), | |
2994 | ENTRY(CLK_OPS_PARENT_ENABLE), | |
2995 | ENTRY(CLK_DUTY_CYCLE_PARENT), | |
2996 | #undef ENTRY | |
2997 | }; | |
2998 | ||
2999 | static int clk_flags_show(struct seq_file *s, void *data) | |
3000 | { | |
3001 | struct clk_core *core = s->private; | |
3002 | unsigned long flags = core->flags; | |
3003 | unsigned int i; | |
3004 | ||
3005 | for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { | |
3006 | if (flags & clk_flags[i].flag) { | |
3007 | seq_printf(s, "%s\n", clk_flags[i].name); | |
3008 | flags &= ~clk_flags[i].flag; | |
3009 | } | |
3010 | } | |
3011 | if (flags) { | |
3012 | /* Unknown flags */ | |
3013 | seq_printf(s, "0x%lx\n", flags); | |
3014 | } | |
3015 | ||
3016 | return 0; | |
3017 | } | |
3018 | DEFINE_SHOW_ATTRIBUTE(clk_flags); | |
3019 | ||
3020 | static void possible_parent_show(struct seq_file *s, struct clk_core *core, | |
3021 | unsigned int i, char terminator) | |
3022 | { | |
3023 | struct clk_core *parent; | |
3024 | ||
3025 | /* | |
3026 | * Go through the following options to fetch a parent's name. | |
3027 | * | |
3028 | * 1. Fetch the registered parent clock and use its name | |
3029 | * 2. Use the global (fallback) name if specified | |
3030 | * 3. Use the local fw_name if provided | |
3031 | * 4. Fetch parent clock's clock-output-name if DT index was set | |
3032 | * | |
3033 | * This may still fail in some cases, such as when the parent is | |
3034 | * specified directly via a struct clk_hw pointer, but it isn't | |
3035 | * registered (yet). | |
3036 | */ | |
3037 | parent = clk_core_get_parent_by_index(core, i); | |
3038 | if (parent) | |
3039 | seq_printf(s, "%s", parent->name); | |
3040 | else if (core->parents[i].name) | |
3041 | seq_printf(s, "%s", core->parents[i].name); | |
3042 | else if (core->parents[i].fw_name) | |
3043 | seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); | |
3044 | else if (core->parents[i].index >= 0) | |
3045 | seq_printf(s, "%s", | |
3046 | of_clk_get_parent_name(core->of_node, | |
3047 | core->parents[i].index)); | |
3048 | else | |
3049 | seq_puts(s, "(missing)"); | |
3050 | ||
3051 | seq_putc(s, terminator); | |
3052 | } | |
3053 | ||
3054 | static int possible_parents_show(struct seq_file *s, void *data) | |
3055 | { | |
3056 | struct clk_core *core = s->private; | |
3057 | int i; | |
3058 | ||
3059 | for (i = 0; i < core->num_parents - 1; i++) | |
3060 | possible_parent_show(s, core, i, ' '); | |
3061 | ||
3062 | possible_parent_show(s, core, i, '\n'); | |
3063 | ||
3064 | return 0; | |
3065 | } | |
3066 | DEFINE_SHOW_ATTRIBUTE(possible_parents); | |
3067 | ||
3068 | static int current_parent_show(struct seq_file *s, void *data) | |
3069 | { | |
3070 | struct clk_core *core = s->private; | |
3071 | ||
3072 | if (core->parent) | |
3073 | seq_printf(s, "%s\n", core->parent->name); | |
3074 | ||
3075 | return 0; | |
3076 | } | |
3077 | DEFINE_SHOW_ATTRIBUTE(current_parent); | |
3078 | ||
3079 | static int clk_duty_cycle_show(struct seq_file *s, void *data) | |
3080 | { | |
3081 | struct clk_core *core = s->private; | |
3082 | struct clk_duty *duty = &core->duty; | |
3083 | ||
3084 | seq_printf(s, "%u/%u\n", duty->num, duty->den); | |
3085 | ||
3086 | return 0; | |
3087 | } | |
3088 | DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); | |
3089 | ||
3090 | static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) | |
3091 | { | |
3092 | struct dentry *root; | |
3093 | ||
3094 | if (!core || !pdentry) | |
3095 | return; | |
3096 | ||
3097 | root = debugfs_create_dir(core->name, pdentry); | |
3098 | core->dentry = root; | |
3099 | ||
3100 | debugfs_create_ulong("clk_rate", 0444, root, &core->rate); | |
3101 | debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); | |
3102 | debugfs_create_u32("clk_phase", 0444, root, &core->phase); | |
3103 | debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); | |
3104 | debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); | |
3105 | debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); | |
3106 | debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); | |
3107 | debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); | |
3108 | debugfs_create_file("clk_duty_cycle", 0444, root, core, | |
3109 | &clk_duty_cycle_fops); | |
3110 | ||
3111 | if (core->num_parents > 0) | |
3112 | debugfs_create_file("clk_parent", 0444, root, core, | |
3113 | ¤t_parent_fops); | |
3114 | ||
3115 | if (core->num_parents > 1) | |
3116 | debugfs_create_file("clk_possible_parents", 0444, root, core, | |
3117 | &possible_parents_fops); | |
3118 | ||
3119 | if (core->ops->debug_init) | |
3120 | core->ops->debug_init(core->hw, core->dentry); | |
3121 | } | |
3122 | ||
3123 | /** | |
3124 | * clk_debug_register - add a clk node to the debugfs clk directory | |
3125 | * @core: the clk being added to the debugfs clk directory | |
3126 | * | |
3127 | * Dynamically adds a clk to the debugfs clk directory if debugfs has been | |
3128 | * initialized. Otherwise it bails out early since the debugfs clk directory | |
3129 | * will be created lazily by clk_debug_init as part of a late_initcall. | |
3130 | */ | |
3131 | static void clk_debug_register(struct clk_core *core) | |
3132 | { | |
3133 | mutex_lock(&clk_debug_lock); | |
3134 | hlist_add_head(&core->debug_node, &clk_debug_list); | |
3135 | if (inited) | |
3136 | clk_debug_create_one(core, rootdir); | |
3137 | mutex_unlock(&clk_debug_lock); | |
3138 | } | |
3139 | ||
3140 | /** | |
3141 | * clk_debug_unregister - remove a clk node from the debugfs clk directory | |
3142 | * @core: the clk being removed from the debugfs clk directory | |
3143 | * | |
3144 | * Dynamically removes a clk and all its child nodes from the | |
3145 | * debugfs clk directory if clk->dentry points to debugfs created by | |
3146 | * clk_debug_register in __clk_core_init. | |
3147 | */ | |
3148 | static void clk_debug_unregister(struct clk_core *core) | |
3149 | { | |
3150 | mutex_lock(&clk_debug_lock); | |
3151 | hlist_del_init(&core->debug_node); | |
3152 | debugfs_remove_recursive(core->dentry); | |
3153 | core->dentry = NULL; | |
3154 | mutex_unlock(&clk_debug_lock); | |
3155 | } | |
3156 | ||
3157 | /** | |
3158 | * clk_debug_init - lazily populate the debugfs clk directory | |
3159 | * | |
3160 | * clks are often initialized very early during boot before memory can be | |
3161 | * dynamically allocated and well before debugfs is setup. This function | |
3162 | * populates the debugfs clk directory once at boot-time when we know that | |
3163 | * debugfs is setup. It should only be called once at boot-time, all other clks | |
3164 | * added dynamically will be done so with clk_debug_register. | |
3165 | */ | |
3166 | static int __init clk_debug_init(void) | |
3167 | { | |
3168 | struct clk_core *core; | |
3169 | ||
3170 | rootdir = debugfs_create_dir("clk", NULL); | |
3171 | ||
3172 | debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, | |
3173 | &clk_summary_fops); | |
3174 | debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, | |
3175 | &clk_dump_fops); | |
3176 | debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, | |
3177 | &clk_summary_fops); | |
3178 | debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, | |
3179 | &clk_dump_fops); | |
3180 | ||
3181 | mutex_lock(&clk_debug_lock); | |
3182 | hlist_for_each_entry(core, &clk_debug_list, debug_node) | |
3183 | clk_debug_create_one(core, rootdir); | |
3184 | ||
3185 | inited = 1; | |
3186 | mutex_unlock(&clk_debug_lock); | |
3187 | ||
3188 | return 0; | |
3189 | } | |
3190 | late_initcall(clk_debug_init); | |
3191 | #else | |
3192 | static inline void clk_debug_register(struct clk_core *core) { } | |
3193 | static inline void clk_debug_reparent(struct clk_core *core, | |
3194 | struct clk_core *new_parent) | |
3195 | { | |
3196 | } | |
3197 | static inline void clk_debug_unregister(struct clk_core *core) | |
3198 | { | |
3199 | } | |
3200 | #endif | |
3201 | ||
3202 | /** | |
3203 | * __clk_core_init - initialize the data structures in a struct clk_core | |
3204 | * @core: clk_core being initialized | |
3205 | * | |
3206 | * Initializes the lists in struct clk_core, queries the hardware for the | |
3207 | * parent and rate and sets them both. | |
3208 | */ | |
3209 | static int __clk_core_init(struct clk_core *core) | |
3210 | { | |
3211 | int ret; | |
3212 | struct clk_core *orphan; | |
3213 | struct hlist_node *tmp2; | |
3214 | unsigned long rate; | |
3215 | ||
3216 | if (!core) | |
3217 | return -EINVAL; | |
3218 | ||
3219 | clk_prepare_lock(); | |
3220 | ||
3221 | ret = clk_pm_runtime_get(core); | |
3222 | if (ret) | |
3223 | goto unlock; | |
3224 | ||
3225 | /* check to see if a clock with this name is already registered */ | |
3226 | if (clk_core_lookup(core->name)) { | |
3227 | pr_debug("%s: clk %s already initialized\n", | |
3228 | __func__, core->name); | |
3229 | ret = -EEXIST; | |
3230 | goto out; | |
3231 | } | |
3232 | ||
3233 | /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ | |
3234 | if (core->ops->set_rate && | |
3235 | !((core->ops->round_rate || core->ops->determine_rate) && | |
3236 | core->ops->recalc_rate)) { | |
3237 | pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", | |
3238 | __func__, core->name); | |
3239 | ret = -EINVAL; | |
3240 | goto out; | |
3241 | } | |
3242 | ||
3243 | if (core->ops->set_parent && !core->ops->get_parent) { | |
3244 | pr_err("%s: %s must implement .get_parent & .set_parent\n", | |
3245 | __func__, core->name); | |
3246 | ret = -EINVAL; | |
3247 | goto out; | |
3248 | } | |
3249 | ||
3250 | if (core->num_parents > 1 && !core->ops->get_parent) { | |
3251 | pr_err("%s: %s must implement .get_parent as it has multi parents\n", | |
3252 | __func__, core->name); | |
3253 | ret = -EINVAL; | |
3254 | goto out; | |
3255 | } | |
3256 | ||
3257 | if (core->ops->set_rate_and_parent && | |
3258 | !(core->ops->set_parent && core->ops->set_rate)) { | |
3259 | pr_err("%s: %s must implement .set_parent & .set_rate\n", | |
3260 | __func__, core->name); | |
3261 | ret = -EINVAL; | |
3262 | goto out; | |
3263 | } | |
3264 | ||
3265 | core->parent = __clk_init_parent(core); | |
3266 | ||
3267 | /* | |
3268 | * Populate core->parent if parent has already been clk_core_init'd. If | |
3269 | * parent has not yet been clk_core_init'd then place clk in the orphan | |
3270 | * list. If clk doesn't have any parents then place it in the root | |
3271 | * clk list. | |
3272 | * | |
3273 | * Every time a new clk is clk_init'd then we walk the list of orphan | |
3274 | * clocks and re-parent any that are children of the clock currently | |
3275 | * being clk_init'd. | |
3276 | */ | |
3277 | if (core->parent) { | |
3278 | hlist_add_head(&core->child_node, | |
3279 | &core->parent->children); | |
3280 | core->orphan = core->parent->orphan; | |
3281 | } else if (!core->num_parents) { | |
3282 | hlist_add_head(&core->child_node, &clk_root_list); | |
3283 | core->orphan = false; | |
3284 | } else { | |
3285 | hlist_add_head(&core->child_node, &clk_orphan_list); | |
3286 | core->orphan = true; | |
3287 | } | |
3288 | ||
3289 | /* | |
3290 | * optional platform-specific magic | |
3291 | * | |
3292 | * The .init callback is not used by any of the basic clock types, but | |
3293 | * exists for weird hardware that must perform initialization magic. | |
3294 | * Please consider other ways of solving initialization problems before | |
3295 | * using this callback, as its use is discouraged. | |
3296 | */ | |
3297 | if (core->ops->init) | |
3298 | core->ops->init(core->hw); | |
3299 | ||
3300 | /* | |
3301 | * Set clk's accuracy. The preferred method is to use | |
3302 | * .recalc_accuracy. For simple clocks and lazy developers the default | |
3303 | * fallback is to use the parent's accuracy. If a clock doesn't have a | |
3304 | * parent (or is orphaned) then accuracy is set to zero (perfect | |
3305 | * clock). | |
3306 | */ | |
3307 | if (core->ops->recalc_accuracy) | |
3308 | core->accuracy = core->ops->recalc_accuracy(core->hw, | |
3309 | __clk_get_accuracy(core->parent)); | |
3310 | else if (core->parent) | |
3311 | core->accuracy = core->parent->accuracy; | |
3312 | else | |
3313 | core->accuracy = 0; | |
3314 | ||
3315 | /* | |
3316 | * Set clk's phase. | |
3317 | * Since a phase is by definition relative to its parent, just | |
3318 | * query the current clock phase, or just assume it's in phase. | |
3319 | */ | |
3320 | if (core->ops->get_phase) | |
3321 | core->phase = core->ops->get_phase(core->hw); | |
3322 | else | |
3323 | core->phase = 0; | |
3324 | ||
3325 | /* | |
3326 | * Set clk's duty cycle. | |
3327 | */ | |
3328 | clk_core_update_duty_cycle_nolock(core); | |
3329 | ||
3330 | /* | |
3331 | * Set clk's rate. The preferred method is to use .recalc_rate. For | |
3332 | * simple clocks and lazy developers the default fallback is to use the | |
3333 | * parent's rate. If a clock doesn't have a parent (or is orphaned) | |
3334 | * then rate is set to zero. | |
3335 | */ | |
3336 | if (core->ops->recalc_rate) | |
3337 | rate = core->ops->recalc_rate(core->hw, | |
3338 | clk_core_get_rate_nolock(core->parent)); | |
3339 | else if (core->parent) | |
3340 | rate = core->parent->rate; | |
3341 | else | |
3342 | rate = 0; | |
3343 | core->rate = core->req_rate = rate; | |
3344 | ||
3345 | /* | |
3346 | * Enable CLK_IS_CRITICAL clocks so newly added critical clocks | |
3347 | * don't get accidentally disabled when walking the orphan tree and | |
3348 | * reparenting clocks | |
3349 | */ | |
3350 | if (core->flags & CLK_IS_CRITICAL) { | |
3351 | unsigned long flags; | |
3352 | ||
3353 | clk_core_prepare(core); | |
3354 | ||
3355 | flags = clk_enable_lock(); | |
3356 | clk_core_enable(core); | |
3357 | clk_enable_unlock(flags); | |
3358 | } | |
3359 | ||
3360 | /* | |
3361 | * walk the list of orphan clocks and reparent any that newly finds a | |
3362 | * parent. | |
3363 | */ | |
3364 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { | |
3365 | struct clk_core *parent = __clk_init_parent(orphan); | |
3366 | ||
3367 | /* | |
3368 | * We need to use __clk_set_parent_before() and _after() to | |
3369 | * to properly migrate any prepare/enable count of the orphan | |
3370 | * clock. This is important for CLK_IS_CRITICAL clocks, which | |
3371 | * are enabled during init but might not have a parent yet. | |
3372 | */ | |
3373 | if (parent) { | |
3374 | /* update the clk tree topology */ | |
3375 | __clk_set_parent_before(orphan, parent); | |
3376 | __clk_set_parent_after(orphan, parent, NULL); | |
3377 | __clk_recalc_accuracies(orphan); | |
3378 | __clk_recalc_rates(orphan, 0); | |
3379 | } | |
3380 | } | |
3381 | ||
3382 | kref_init(&core->ref); | |
3383 | out: | |
3384 | clk_pm_runtime_put(core); | |
3385 | unlock: | |
3386 | clk_prepare_unlock(); | |
3387 | ||
3388 | if (!ret) | |
3389 | clk_debug_register(core); | |
3390 | ||
3391 | return ret; | |
3392 | } | |
3393 | ||
3394 | /** | |
3395 | * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core | |
3396 | * @core: clk to add consumer to | |
3397 | * @clk: consumer to link to a clk | |
3398 | */ | |
3399 | static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) | |
3400 | { | |
3401 | clk_prepare_lock(); | |
3402 | hlist_add_head(&clk->clks_node, &core->clks); | |
3403 | clk_prepare_unlock(); | |
3404 | } | |
3405 | ||
3406 | /** | |
3407 | * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core | |
3408 | * @clk: consumer to unlink | |
3409 | */ | |
3410 | static void clk_core_unlink_consumer(struct clk *clk) | |
3411 | { | |
3412 | lockdep_assert_held(&prepare_lock); | |
3413 | hlist_del(&clk->clks_node); | |
3414 | } | |
3415 | ||
3416 | /** | |
3417 | * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core | |
3418 | * @core: clk to allocate a consumer for | |
3419 | * @dev_id: string describing device name | |
3420 | * @con_id: connection ID string on device | |
3421 | * | |
3422 | * Returns: clk consumer left unlinked from the consumer list | |
3423 | */ | |
3424 | static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, | |
3425 | const char *con_id) | |
3426 | { | |
3427 | struct clk *clk; | |
3428 | ||
3429 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); | |
3430 | if (!clk) | |
3431 | return ERR_PTR(-ENOMEM); | |
3432 | ||
3433 | clk->core = core; | |
3434 | clk->dev_id = dev_id; | |
3435 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); | |
3436 | clk->max_rate = ULONG_MAX; | |
3437 | ||
3438 | return clk; | |
3439 | } | |
3440 | ||
3441 | /** | |
3442 | * free_clk - Free a clk consumer | |
3443 | * @clk: clk consumer to free | |
3444 | * | |
3445 | * Note, this assumes the clk has been unlinked from the clk_core consumer | |
3446 | * list. | |
3447 | */ | |
3448 | static void free_clk(struct clk *clk) | |
3449 | { | |
3450 | kfree_const(clk->con_id); | |
3451 | kfree(clk); | |
3452 | } | |
3453 | ||
3454 | /** | |
3455 | * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given | |
3456 | * a clk_hw | |
3457 | * @dev: clk consumer device | |
3458 | * @hw: clk_hw associated with the clk being consumed | |
3459 | * @dev_id: string describing device name | |
3460 | * @con_id: connection ID string on device | |
3461 | * | |
3462 | * This is the main function used to create a clk pointer for use by clk | |
3463 | * consumers. It connects a consumer to the clk_core and clk_hw structures | |
3464 | * used by the framework and clk provider respectively. | |
3465 | */ | |
3466 | struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, | |
3467 | const char *dev_id, const char *con_id) | |
3468 | { | |
3469 | struct clk *clk; | |
3470 | struct clk_core *core; | |
3471 | ||
3472 | /* This is to allow this function to be chained to others */ | |
3473 | if (IS_ERR_OR_NULL(hw)) | |
3474 | return ERR_CAST(hw); | |
3475 | ||
3476 | core = hw->core; | |
3477 | clk = alloc_clk(core, dev_id, con_id); | |
3478 | if (IS_ERR(clk)) | |
3479 | return clk; | |
3480 | clk->dev = dev; | |
3481 | ||
3482 | if (!try_module_get(core->owner)) { | |
3483 | free_clk(clk); | |
3484 | return ERR_PTR(-ENOENT); | |
3485 | } | |
3486 | ||
3487 | kref_get(&core->ref); | |
3488 | clk_core_link_consumer(core, clk); | |
3489 | ||
3490 | return clk; | |
3491 | } | |
3492 | ||
3493 | static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) | |
3494 | { | |
3495 | const char *dst; | |
3496 | ||
3497 | if (!src) { | |
3498 | if (must_exist) | |
3499 | return -EINVAL; | |
3500 | return 0; | |
3501 | } | |
3502 | ||
3503 | *dst_p = dst = kstrdup_const(src, GFP_KERNEL); | |
3504 | if (!dst) | |
3505 | return -ENOMEM; | |
3506 | ||
3507 | return 0; | |
3508 | } | |
3509 | ||
3510 | static int clk_core_populate_parent_map(struct clk_core *core) | |
3511 | { | |
3512 | const struct clk_init_data *init = core->hw->init; | |
3513 | u8 num_parents = init->num_parents; | |
3514 | const char * const *parent_names = init->parent_names; | |
3515 | const struct clk_hw **parent_hws = init->parent_hws; | |
3516 | const struct clk_parent_data *parent_data = init->parent_data; | |
3517 | int i, ret = 0; | |
3518 | struct clk_parent_map *parents, *parent; | |
3519 | ||
3520 | if (!num_parents) | |
3521 | return 0; | |
3522 | ||
3523 | /* | |
3524 | * Avoid unnecessary string look-ups of clk_core's possible parents by | |
3525 | * having a cache of names/clk_hw pointers to clk_core pointers. | |
3526 | */ | |
3527 | parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); | |
3528 | core->parents = parents; | |
3529 | if (!parents) | |
3530 | return -ENOMEM; | |
3531 | ||
3532 | /* Copy everything over because it might be __initdata */ | |
3533 | for (i = 0, parent = parents; i < num_parents; i++, parent++) { | |
3534 | parent->index = -1; | |
3535 | if (parent_names) { | |
3536 | /* throw a WARN if any entries are NULL */ | |
3537 | WARN(!parent_names[i], | |
3538 | "%s: invalid NULL in %s's .parent_names\n", | |
3539 | __func__, core->name); | |
3540 | ret = clk_cpy_name(&parent->name, parent_names[i], | |
3541 | true); | |
3542 | } else if (parent_data) { | |
3543 | parent->hw = parent_data[i].hw; | |
3544 | parent->index = parent_data[i].index; | |
3545 | ret = clk_cpy_name(&parent->fw_name, | |
3546 | parent_data[i].fw_name, false); | |
3547 | if (!ret) | |
3548 | ret = clk_cpy_name(&parent->name, | |
3549 | parent_data[i].name, | |
3550 | false); | |
3551 | } else if (parent_hws) { | |
3552 | parent->hw = parent_hws[i]; | |
3553 | } else { | |
3554 | ret = -EINVAL; | |
3555 | WARN(1, "Must specify parents if num_parents > 0\n"); | |
3556 | } | |
3557 | ||
3558 | if (ret) { | |
3559 | do { | |
3560 | kfree_const(parents[i].name); | |
3561 | kfree_const(parents[i].fw_name); | |
3562 | } while (--i >= 0); | |
3563 | kfree(parents); | |
3564 | ||
3565 | return ret; | |
3566 | } | |
3567 | } | |
3568 | ||
3569 | return 0; | |
3570 | } | |
3571 | ||
3572 | static void clk_core_free_parent_map(struct clk_core *core) | |
3573 | { | |
3574 | int i = core->num_parents; | |
3575 | ||
3576 | if (!core->num_parents) | |
3577 | return; | |
3578 | ||
3579 | while (--i >= 0) { | |
3580 | kfree_const(core->parents[i].name); | |
3581 | kfree_const(core->parents[i].fw_name); | |
3582 | } | |
3583 | ||
3584 | kfree(core->parents); | |
3585 | } | |
3586 | ||
3587 | static struct clk * | |
3588 | __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) | |
3589 | { | |
3590 | int ret; | |
3591 | struct clk_core *core; | |
3592 | ||
3593 | core = kzalloc(sizeof(*core), GFP_KERNEL); | |
3594 | if (!core) { | |
3595 | ret = -ENOMEM; | |
3596 | goto fail_out; | |
3597 | } | |
3598 | ||
3599 | core->name = kstrdup_const(hw->init->name, GFP_KERNEL); | |
3600 | if (!core->name) { | |
3601 | ret = -ENOMEM; | |
3602 | goto fail_name; | |
3603 | } | |
3604 | ||
3605 | if (WARN_ON(!hw->init->ops)) { | |
3606 | ret = -EINVAL; | |
3607 | goto fail_ops; | |
3608 | } | |
3609 | core->ops = hw->init->ops; | |
3610 | ||
3611 | if (dev && pm_runtime_enabled(dev)) | |
3612 | core->rpm_enabled = true; | |
3613 | core->dev = dev; | |
3614 | core->of_node = np; | |
3615 | if (dev && dev->driver) | |
3616 | core->owner = dev->driver->owner; | |
3617 | core->hw = hw; | |
3618 | core->flags = hw->init->flags; | |
3619 | core->num_parents = hw->init->num_parents; | |
3620 | core->min_rate = 0; | |
3621 | core->max_rate = ULONG_MAX; | |
3622 | hw->core = core; | |
3623 | ||
3624 | ret = clk_core_populate_parent_map(core); | |
3625 | if (ret) | |
3626 | goto fail_parents; | |
3627 | ||
3628 | INIT_HLIST_HEAD(&core->clks); | |
3629 | ||
3630 | /* | |
3631 | * Don't call clk_hw_create_clk() here because that would pin the | |
3632 | * provider module to itself and prevent it from ever being removed. | |
3633 | */ | |
3634 | hw->clk = alloc_clk(core, NULL, NULL); | |
3635 | if (IS_ERR(hw->clk)) { | |
3636 | ret = PTR_ERR(hw->clk); | |
3637 | goto fail_create_clk; | |
3638 | } | |
3639 | ||
3640 | clk_core_link_consumer(hw->core, hw->clk); | |
3641 | ||
3642 | ret = __clk_core_init(core); | |
3643 | if (!ret) | |
3644 | return hw->clk; | |
3645 | ||
3646 | clk_prepare_lock(); | |
3647 | clk_core_unlink_consumer(hw->clk); | |
3648 | clk_prepare_unlock(); | |
3649 | ||
3650 | free_clk(hw->clk); | |
3651 | hw->clk = NULL; | |
3652 | ||
3653 | fail_create_clk: | |
3654 | clk_core_free_parent_map(core); | |
3655 | fail_parents: | |
3656 | fail_ops: | |
3657 | kfree_const(core->name); | |
3658 | fail_name: | |
3659 | kfree(core); | |
3660 | fail_out: | |
3661 | return ERR_PTR(ret); | |
3662 | } | |
3663 | ||
3664 | /** | |
3665 | * clk_register - allocate a new clock, register it and return an opaque cookie | |
3666 | * @dev: device that is registering this clock | |
3667 | * @hw: link to hardware-specific clock data | |
3668 | * | |
3669 | * clk_register is the *deprecated* interface for populating the clock tree with | |
3670 | * new clock nodes. Use clk_hw_register() instead. | |
3671 | * | |
3672 | * Returns: a pointer to the newly allocated struct clk which | |
3673 | * cannot be dereferenced by driver code but may be used in conjunction with the | |
3674 | * rest of the clock API. In the event of an error clk_register will return an | |
3675 | * error code; drivers must test for an error code after calling clk_register. | |
3676 | */ | |
3677 | struct clk *clk_register(struct device *dev, struct clk_hw *hw) | |
3678 | { | |
3679 | return __clk_register(dev, dev_of_node(dev), hw); | |
3680 | } | |
3681 | EXPORT_SYMBOL_GPL(clk_register); | |
3682 | ||
3683 | /** | |
3684 | * clk_hw_register - register a clk_hw and return an error code | |
3685 | * @dev: device that is registering this clock | |
3686 | * @hw: link to hardware-specific clock data | |
3687 | * | |
3688 | * clk_hw_register is the primary interface for populating the clock tree with | |
3689 | * new clock nodes. It returns an integer equal to zero indicating success or | |
3690 | * less than zero indicating failure. Drivers must test for an error code after | |
3691 | * calling clk_hw_register(). | |
3692 | */ | |
3693 | int clk_hw_register(struct device *dev, struct clk_hw *hw) | |
3694 | { | |
3695 | return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw)); | |
3696 | } | |
3697 | EXPORT_SYMBOL_GPL(clk_hw_register); | |
3698 | ||
3699 | /* | |
3700 | * of_clk_hw_register - register a clk_hw and return an error code | |
3701 | * @node: device_node of device that is registering this clock | |
3702 | * @hw: link to hardware-specific clock data | |
3703 | * | |
3704 | * of_clk_hw_register() is the primary interface for populating the clock tree | |
3705 | * with new clock nodes when a struct device is not available, but a struct | |
3706 | * device_node is. It returns an integer equal to zero indicating success or | |
3707 | * less than zero indicating failure. Drivers must test for an error code after | |
3708 | * calling of_clk_hw_register(). | |
3709 | */ | |
3710 | int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) | |
3711 | { | |
3712 | return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); | |
3713 | } | |
3714 | EXPORT_SYMBOL_GPL(of_clk_hw_register); | |
3715 | ||
3716 | /* Free memory allocated for a clock. */ | |
3717 | static void __clk_release(struct kref *ref) | |
3718 | { | |
3719 | struct clk_core *core = container_of(ref, struct clk_core, ref); | |
3720 | ||
3721 | lockdep_assert_held(&prepare_lock); | |
3722 | ||
3723 | clk_core_free_parent_map(core); | |
3724 | kfree_const(core->name); | |
3725 | kfree(core); | |
3726 | } | |
3727 | ||
3728 | /* | |
3729 | * Empty clk_ops for unregistered clocks. These are used temporarily | |
3730 | * after clk_unregister() was called on a clock and until last clock | |
3731 | * consumer calls clk_put() and the struct clk object is freed. | |
3732 | */ | |
3733 | static int clk_nodrv_prepare_enable(struct clk_hw *hw) | |
3734 | { | |
3735 | return -ENXIO; | |
3736 | } | |
3737 | ||
3738 | static void clk_nodrv_disable_unprepare(struct clk_hw *hw) | |
3739 | { | |
3740 | WARN_ON_ONCE(1); | |
3741 | } | |
3742 | ||
3743 | static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, | |
3744 | unsigned long parent_rate) | |
3745 | { | |
3746 | return -ENXIO; | |
3747 | } | |
3748 | ||
3749 | static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) | |
3750 | { | |
3751 | return -ENXIO; | |
3752 | } | |
3753 | ||
3754 | static const struct clk_ops clk_nodrv_ops = { | |
3755 | .enable = clk_nodrv_prepare_enable, | |
3756 | .disable = clk_nodrv_disable_unprepare, | |
3757 | .prepare = clk_nodrv_prepare_enable, | |
3758 | .unprepare = clk_nodrv_disable_unprepare, | |
3759 | .set_rate = clk_nodrv_set_rate, | |
3760 | .set_parent = clk_nodrv_set_parent, | |
3761 | }; | |
3762 | ||
3763 | /** | |
3764 | * clk_unregister - unregister a currently registered clock | |
3765 | * @clk: clock to unregister | |
3766 | */ | |
3767 | void clk_unregister(struct clk *clk) | |
3768 | { | |
3769 | unsigned long flags; | |
3770 | ||
3771 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) | |
3772 | return; | |
3773 | ||
3774 | clk_debug_unregister(clk->core); | |
3775 | ||
3776 | clk_prepare_lock(); | |
3777 | ||
3778 | if (clk->core->ops == &clk_nodrv_ops) { | |
3779 | pr_err("%s: unregistered clock: %s\n", __func__, | |
3780 | clk->core->name); | |
3781 | goto unlock; | |
3782 | } | |
3783 | /* | |
3784 | * Assign empty clock ops for consumers that might still hold | |
3785 | * a reference to this clock. | |
3786 | */ | |
3787 | flags = clk_enable_lock(); | |
3788 | clk->core->ops = &clk_nodrv_ops; | |
3789 | clk_enable_unlock(flags); | |
3790 | ||
3791 | if (!hlist_empty(&clk->core->children)) { | |
3792 | struct clk_core *child; | |
3793 | struct hlist_node *t; | |
3794 | ||
3795 | /* Reparent all children to the orphan list. */ | |
3796 | hlist_for_each_entry_safe(child, t, &clk->core->children, | |
3797 | child_node) | |
3798 | clk_core_set_parent_nolock(child, NULL); | |
3799 | } | |
3800 | ||
3801 | hlist_del_init(&clk->core->child_node); | |
3802 | ||
3803 | if (clk->core->prepare_count) | |
3804 | pr_warn("%s: unregistering prepared clock: %s\n", | |
3805 | __func__, clk->core->name); | |
3806 | ||
3807 | if (clk->core->protect_count) | |
3808 | pr_warn("%s: unregistering protected clock: %s\n", | |
3809 | __func__, clk->core->name); | |
3810 | ||
3811 | kref_put(&clk->core->ref, __clk_release); | |
3812 | unlock: | |
3813 | clk_prepare_unlock(); | |
3814 | } | |
3815 | EXPORT_SYMBOL_GPL(clk_unregister); | |
3816 | ||
3817 | /** | |
3818 | * clk_hw_unregister - unregister a currently registered clk_hw | |
3819 | * @hw: hardware-specific clock data to unregister | |
3820 | */ | |
3821 | void clk_hw_unregister(struct clk_hw *hw) | |
3822 | { | |
3823 | clk_unregister(hw->clk); | |
3824 | } | |
3825 | EXPORT_SYMBOL_GPL(clk_hw_unregister); | |
3826 | ||
3827 | static void devm_clk_release(struct device *dev, void *res) | |
3828 | { | |
3829 | clk_unregister(*(struct clk **)res); | |
3830 | } | |
3831 | ||
3832 | static void devm_clk_hw_release(struct device *dev, void *res) | |
3833 | { | |
3834 | clk_hw_unregister(*(struct clk_hw **)res); | |
3835 | } | |
3836 | ||
3837 | /** | |
3838 | * devm_clk_register - resource managed clk_register() | |
3839 | * @dev: device that is registering this clock | |
3840 | * @hw: link to hardware-specific clock data | |
3841 | * | |
3842 | * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. | |
3843 | * | |
3844 | * Clocks returned from this function are automatically clk_unregister()ed on | |
3845 | * driver detach. See clk_register() for more information. | |
3846 | */ | |
3847 | struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) | |
3848 | { | |
3849 | struct clk *clk; | |
3850 | struct clk **clkp; | |
3851 | ||
3852 | clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); | |
3853 | if (!clkp) | |
3854 | return ERR_PTR(-ENOMEM); | |
3855 | ||
3856 | clk = clk_register(dev, hw); | |
3857 | if (!IS_ERR(clk)) { | |
3858 | *clkp = clk; | |
3859 | devres_add(dev, clkp); | |
3860 | } else { | |
3861 | devres_free(clkp); | |
3862 | } | |
3863 | ||
3864 | return clk; | |
3865 | } | |
3866 | EXPORT_SYMBOL_GPL(devm_clk_register); | |
3867 | ||
3868 | /** | |
3869 | * devm_clk_hw_register - resource managed clk_hw_register() | |
3870 | * @dev: device that is registering this clock | |
3871 | * @hw: link to hardware-specific clock data | |
3872 | * | |
3873 | * Managed clk_hw_register(). Clocks registered by this function are | |
3874 | * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() | |
3875 | * for more information. | |
3876 | */ | |
3877 | int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) | |
3878 | { | |
3879 | struct clk_hw **hwp; | |
3880 | int ret; | |
3881 | ||
3882 | hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); | |
3883 | if (!hwp) | |
3884 | return -ENOMEM; | |
3885 | ||
3886 | ret = clk_hw_register(dev, hw); | |
3887 | if (!ret) { | |
3888 | *hwp = hw; | |
3889 | devres_add(dev, hwp); | |
3890 | } else { | |
3891 | devres_free(hwp); | |
3892 | } | |
3893 | ||
3894 | return ret; | |
3895 | } | |
3896 | EXPORT_SYMBOL_GPL(devm_clk_hw_register); | |
3897 | ||
3898 | static int devm_clk_match(struct device *dev, void *res, void *data) | |
3899 | { | |
3900 | struct clk *c = res; | |
3901 | if (WARN_ON(!c)) | |
3902 | return 0; | |
3903 | return c == data; | |
3904 | } | |
3905 | ||
3906 | static int devm_clk_hw_match(struct device *dev, void *res, void *data) | |
3907 | { | |
3908 | struct clk_hw *hw = res; | |
3909 | ||
3910 | if (WARN_ON(!hw)) | |
3911 | return 0; | |
3912 | return hw == data; | |
3913 | } | |
3914 | ||
3915 | /** | |
3916 | * devm_clk_unregister - resource managed clk_unregister() | |
3917 | * @clk: clock to unregister | |
3918 | * | |
3919 | * Deallocate a clock allocated with devm_clk_register(). Normally | |
3920 | * this function will not need to be called and the resource management | |
3921 | * code will ensure that the resource is freed. | |
3922 | */ | |
3923 | void devm_clk_unregister(struct device *dev, struct clk *clk) | |
3924 | { | |
3925 | WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); | |
3926 | } | |
3927 | EXPORT_SYMBOL_GPL(devm_clk_unregister); | |
3928 | ||
3929 | /** | |
3930 | * devm_clk_hw_unregister - resource managed clk_hw_unregister() | |
3931 | * @dev: device that is unregistering the hardware-specific clock data | |
3932 | * @hw: link to hardware-specific clock data | |
3933 | * | |
3934 | * Unregister a clk_hw registered with devm_clk_hw_register(). Normally | |
3935 | * this function will not need to be called and the resource management | |
3936 | * code will ensure that the resource is freed. | |
3937 | */ | |
3938 | void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) | |
3939 | { | |
3940 | WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, | |
3941 | hw)); | |
3942 | } | |
3943 | EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); | |
3944 | ||
3945 | /* | |
3946 | * clkdev helpers | |
3947 | */ | |
3948 | ||
3949 | void __clk_put(struct clk *clk) | |
3950 | { | |
3951 | struct module *owner; | |
3952 | ||
3953 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) | |
3954 | return; | |
3955 | ||
3956 | clk_prepare_lock(); | |
3957 | ||
3958 | /* | |
3959 | * Before calling clk_put, all calls to clk_rate_exclusive_get() from a | |
3960 | * given user should be balanced with calls to clk_rate_exclusive_put() | |
3961 | * and by that same consumer | |
3962 | */ | |
3963 | if (WARN_ON(clk->exclusive_count)) { | |
3964 | /* We voiced our concern, let's sanitize the situation */ | |
3965 | clk->core->protect_count -= (clk->exclusive_count - 1); | |
3966 | clk_core_rate_unprotect(clk->core); | |
3967 | clk->exclusive_count = 0; | |
3968 | } | |
3969 | ||
3970 | hlist_del(&clk->clks_node); | |
3971 | if (clk->min_rate > clk->core->req_rate || | |
3972 | clk->max_rate < clk->core->req_rate) | |
3973 | clk_core_set_rate_nolock(clk->core, clk->core->req_rate); | |
3974 | ||
3975 | owner = clk->core->owner; | |
3976 | kref_put(&clk->core->ref, __clk_release); | |
3977 | ||
3978 | clk_prepare_unlock(); | |
3979 | ||
3980 | module_put(owner); | |
3981 | ||
3982 | free_clk(clk); | |
3983 | } | |
3984 | ||
3985 | /*** clk rate change notifiers ***/ | |
3986 | ||
3987 | /** | |
3988 | * clk_notifier_register - add a clk rate change notifier | |
3989 | * @clk: struct clk * to watch | |
3990 | * @nb: struct notifier_block * with callback info | |
3991 | * | |
3992 | * Request notification when clk's rate changes. This uses an SRCU | |
3993 | * notifier because we want it to block and notifier unregistrations are | |
3994 | * uncommon. The callbacks associated with the notifier must not | |
3995 | * re-enter into the clk framework by calling any top-level clk APIs; | |
3996 | * this will cause a nested prepare_lock mutex. | |
3997 | * | |
3998 | * In all notification cases (pre, post and abort rate change) the original | |
3999 | * clock rate is passed to the callback via struct clk_notifier_data.old_rate | |
4000 | * and the new frequency is passed via struct clk_notifier_data.new_rate. | |
4001 | * | |
4002 | * clk_notifier_register() must be called from non-atomic context. | |
4003 | * Returns -EINVAL if called with null arguments, -ENOMEM upon | |
4004 | * allocation failure; otherwise, passes along the return value of | |
4005 | * srcu_notifier_chain_register(). | |
4006 | */ | |
4007 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb) | |
4008 | { | |
4009 | struct clk_notifier *cn; | |
4010 | int ret = -ENOMEM; | |
4011 | ||
4012 | if (!clk || !nb) | |
4013 | return -EINVAL; | |
4014 | ||
4015 | clk_prepare_lock(); | |
4016 | ||
4017 | /* search the list of notifiers for this clk */ | |
4018 | list_for_each_entry(cn, &clk_notifier_list, node) | |
4019 | if (cn->clk == clk) | |
4020 | break; | |
4021 | ||
4022 | /* if clk wasn't in the notifier list, allocate new clk_notifier */ | |
4023 | if (cn->clk != clk) { | |
4024 | cn = kzalloc(sizeof(*cn), GFP_KERNEL); | |
4025 | if (!cn) | |
4026 | goto out; | |
4027 | ||
4028 | cn->clk = clk; | |
4029 | srcu_init_notifier_head(&cn->notifier_head); | |
4030 | ||
4031 | list_add(&cn->node, &clk_notifier_list); | |
4032 | } | |
4033 | ||
4034 | ret = srcu_notifier_chain_register(&cn->notifier_head, nb); | |
4035 | ||
4036 | clk->core->notifier_count++; | |
4037 | ||
4038 | out: | |
4039 | clk_prepare_unlock(); | |
4040 | ||
4041 | return ret; | |
4042 | } | |
4043 | EXPORT_SYMBOL_GPL(clk_notifier_register); | |
4044 | ||
4045 | /** | |
4046 | * clk_notifier_unregister - remove a clk rate change notifier | |
4047 | * @clk: struct clk * | |
4048 | * @nb: struct notifier_block * with callback info | |
4049 | * | |
4050 | * Request no further notification for changes to 'clk' and frees memory | |
4051 | * allocated in clk_notifier_register. | |
4052 | * | |
4053 | * Returns -EINVAL if called with null arguments; otherwise, passes | |
4054 | * along the return value of srcu_notifier_chain_unregister(). | |
4055 | */ | |
4056 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) | |
4057 | { | |
4058 | struct clk_notifier *cn = NULL; | |
4059 | int ret = -EINVAL; | |
4060 | ||
4061 | if (!clk || !nb) | |
4062 | return -EINVAL; | |
4063 | ||
4064 | clk_prepare_lock(); | |
4065 | ||
4066 | list_for_each_entry(cn, &clk_notifier_list, node) | |
4067 | if (cn->clk == clk) | |
4068 | break; | |
4069 | ||
4070 | if (cn->clk == clk) { | |
4071 | ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); | |
4072 | ||
4073 | clk->core->notifier_count--; | |
4074 | ||
4075 | /* XXX the notifier code should handle this better */ | |
4076 | if (!cn->notifier_head.head) { | |
4077 | srcu_cleanup_notifier_head(&cn->notifier_head); | |
4078 | list_del(&cn->node); | |
4079 | kfree(cn); | |
4080 | } | |
4081 | ||
4082 | } else { | |
4083 | ret = -ENOENT; | |
4084 | } | |
4085 | ||
4086 | clk_prepare_unlock(); | |
4087 | ||
4088 | return ret; | |
4089 | } | |
4090 | EXPORT_SYMBOL_GPL(clk_notifier_unregister); | |
4091 | ||
4092 | #ifdef CONFIG_OF | |
4093 | /** | |
4094 | * struct of_clk_provider - Clock provider registration structure | |
4095 | * @link: Entry in global list of clock providers | |
4096 | * @node: Pointer to device tree node of clock provider | |
4097 | * @get: Get clock callback. Returns NULL or a struct clk for the | |
4098 | * given clock specifier | |
4099 | * @data: context pointer to be passed into @get callback | |
4100 | */ | |
4101 | struct of_clk_provider { | |
4102 | struct list_head link; | |
4103 | ||
4104 | struct device_node *node; | |
4105 | struct clk *(*get)(struct of_phandle_args *clkspec, void *data); | |
4106 | struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); | |
4107 | void *data; | |
4108 | }; | |
4109 | ||
4110 | extern struct of_device_id __clk_of_table; | |
4111 | static const struct of_device_id __clk_of_table_sentinel | |
4112 | __used __section(__clk_of_table_end); | |
4113 | ||
4114 | static LIST_HEAD(of_clk_providers); | |
4115 | static DEFINE_MUTEX(of_clk_mutex); | |
4116 | ||
4117 | struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, | |
4118 | void *data) | |
4119 | { | |
4120 | return data; | |
4121 | } | |
4122 | EXPORT_SYMBOL_GPL(of_clk_src_simple_get); | |
4123 | ||
4124 | struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) | |
4125 | { | |
4126 | return data; | |
4127 | } | |
4128 | EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); | |
4129 | ||
4130 | struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) | |
4131 | { | |
4132 | struct clk_onecell_data *clk_data = data; | |
4133 | unsigned int idx = clkspec->args[0]; | |
4134 | ||
4135 | if (idx >= clk_data->clk_num) { | |
4136 | pr_err("%s: invalid clock index %u\n", __func__, idx); | |
4137 | return ERR_PTR(-EINVAL); | |
4138 | } | |
4139 | ||
4140 | return clk_data->clks[idx]; | |
4141 | } | |
4142 | EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); | |
4143 | ||
4144 | struct clk_hw * | |
4145 | of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) | |
4146 | { | |
4147 | struct clk_hw_onecell_data *hw_data = data; | |
4148 | unsigned int idx = clkspec->args[0]; | |
4149 | ||
4150 | if (idx >= hw_data->num) { | |
4151 | pr_err("%s: invalid index %u\n", __func__, idx); | |
4152 | return ERR_PTR(-EINVAL); | |
4153 | } | |
4154 | ||
4155 | return hw_data->hws[idx]; | |
4156 | } | |
4157 | EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); | |
4158 | ||
4159 | /** | |
4160 | * of_clk_add_provider() - Register a clock provider for a node | |
4161 | * @np: Device node pointer associated with clock provider | |
4162 | * @clk_src_get: callback for decoding clock | |
4163 | * @data: context pointer for @clk_src_get callback. | |
4164 | * | |
4165 | * This function is *deprecated*. Use of_clk_add_hw_provider() instead. | |
4166 | */ | |
4167 | int of_clk_add_provider(struct device_node *np, | |
4168 | struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, | |
4169 | void *data), | |
4170 | void *data) | |
4171 | { | |
4172 | struct of_clk_provider *cp; | |
4173 | int ret; | |
4174 | ||
4175 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | |
4176 | if (!cp) | |
4177 | return -ENOMEM; | |
4178 | ||
4179 | cp->node = of_node_get(np); | |
4180 | cp->data = data; | |
4181 | cp->get = clk_src_get; | |
4182 | ||
4183 | mutex_lock(&of_clk_mutex); | |
4184 | list_add(&cp->link, &of_clk_providers); | |
4185 | mutex_unlock(&of_clk_mutex); | |
4186 | pr_debug("Added clock from %pOF\n", np); | |
4187 | ||
4188 | ret = of_clk_set_defaults(np, true); | |
4189 | if (ret < 0) | |
4190 | of_clk_del_provider(np); | |
4191 | ||
4192 | return ret; | |
4193 | } | |
4194 | EXPORT_SYMBOL_GPL(of_clk_add_provider); | |
4195 | ||
4196 | /** | |
4197 | * of_clk_add_hw_provider() - Register a clock provider for a node | |
4198 | * @np: Device node pointer associated with clock provider | |
4199 | * @get: callback for decoding clk_hw | |
4200 | * @data: context pointer for @get callback. | |
4201 | */ | |
4202 | int of_clk_add_hw_provider(struct device_node *np, | |
4203 | struct clk_hw *(*get)(struct of_phandle_args *clkspec, | |
4204 | void *data), | |
4205 | void *data) | |
4206 | { | |
4207 | struct of_clk_provider *cp; | |
4208 | int ret; | |
4209 | ||
4210 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | |
4211 | if (!cp) | |
4212 | return -ENOMEM; | |
4213 | ||
4214 | cp->node = of_node_get(np); | |
4215 | cp->data = data; | |
4216 | cp->get_hw = get; | |
4217 | ||
4218 | mutex_lock(&of_clk_mutex); | |
4219 | list_add(&cp->link, &of_clk_providers); | |
4220 | mutex_unlock(&of_clk_mutex); | |
4221 | pr_debug("Added clk_hw provider from %pOF\n", np); | |
4222 | ||
4223 | ret = of_clk_set_defaults(np, true); | |
4224 | if (ret < 0) | |
4225 | of_clk_del_provider(np); | |
4226 | ||
4227 | return ret; | |
4228 | } | |
4229 | EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); | |
4230 | ||
4231 | static void devm_of_clk_release_provider(struct device *dev, void *res) | |
4232 | { | |
4233 | of_clk_del_provider(*(struct device_node **)res); | |
4234 | } | |
4235 | ||
4236 | /* | |
4237 | * We allow a child device to use its parent device as the clock provider node | |
4238 | * for cases like MFD sub-devices where the child device driver wants to use | |
4239 | * devm_*() APIs but not list the device in DT as a sub-node. | |
4240 | */ | |
4241 | static struct device_node *get_clk_provider_node(struct device *dev) | |
4242 | { | |
4243 | struct device_node *np, *parent_np; | |
4244 | ||
4245 | np = dev->of_node; | |
4246 | parent_np = dev->parent ? dev->parent->of_node : NULL; | |
4247 | ||
4248 | if (!of_find_property(np, "#clock-cells", NULL)) | |
4249 | if (of_find_property(parent_np, "#clock-cells", NULL)) | |
4250 | np = parent_np; | |
4251 | ||
4252 | return np; | |
4253 | } | |
4254 | ||
4255 | /** | |
4256 | * devm_of_clk_add_hw_provider() - Managed clk provider node registration | |
4257 | * @dev: Device acting as the clock provider (used for DT node and lifetime) | |
4258 | * @get: callback for decoding clk_hw | |
4259 | * @data: context pointer for @get callback | |
4260 | * | |
4261 | * Registers clock provider for given device's node. If the device has no DT | |
4262 | * node or if the device node lacks of clock provider information (#clock-cells) | |
4263 | * then the parent device's node is scanned for this information. If parent node | |
4264 | * has the #clock-cells then it is used in registration. Provider is | |
4265 | * automatically released at device exit. | |
4266 | * | |
4267 | * Return: 0 on success or an errno on failure. | |
4268 | */ | |
4269 | int devm_of_clk_add_hw_provider(struct device *dev, | |
4270 | struct clk_hw *(*get)(struct of_phandle_args *clkspec, | |
4271 | void *data), | |
4272 | void *data) | |
4273 | { | |
4274 | struct device_node **ptr, *np; | |
4275 | int ret; | |
4276 | ||
4277 | ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), | |
4278 | GFP_KERNEL); | |
4279 | if (!ptr) | |
4280 | return -ENOMEM; | |
4281 | ||
4282 | np = get_clk_provider_node(dev); | |
4283 | ret = of_clk_add_hw_provider(np, get, data); | |
4284 | if (!ret) { | |
4285 | *ptr = np; | |
4286 | devres_add(dev, ptr); | |
4287 | } else { | |
4288 | devres_free(ptr); | |
4289 | } | |
4290 | ||
4291 | return ret; | |
4292 | } | |
4293 | EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); | |
4294 | ||
4295 | /** | |
4296 | * of_clk_del_provider() - Remove a previously registered clock provider | |
4297 | * @np: Device node pointer associated with clock provider | |
4298 | */ | |
4299 | void of_clk_del_provider(struct device_node *np) | |
4300 | { | |
4301 | struct of_clk_provider *cp; | |
4302 | ||
4303 | mutex_lock(&of_clk_mutex); | |
4304 | list_for_each_entry(cp, &of_clk_providers, link) { | |
4305 | if (cp->node == np) { | |
4306 | list_del(&cp->link); | |
4307 | of_node_put(cp->node); | |
4308 | kfree(cp); | |
4309 | break; | |
4310 | } | |
4311 | } | |
4312 | mutex_unlock(&of_clk_mutex); | |
4313 | } | |
4314 | EXPORT_SYMBOL_GPL(of_clk_del_provider); | |
4315 | ||
4316 | static int devm_clk_provider_match(struct device *dev, void *res, void *data) | |
4317 | { | |
4318 | struct device_node **np = res; | |
4319 | ||
4320 | if (WARN_ON(!np || !*np)) | |
4321 | return 0; | |
4322 | ||
4323 | return *np == data; | |
4324 | } | |
4325 | ||
4326 | /** | |
4327 | * devm_of_clk_del_provider() - Remove clock provider registered using devm | |
4328 | * @dev: Device to whose lifetime the clock provider was bound | |
4329 | */ | |
4330 | void devm_of_clk_del_provider(struct device *dev) | |
4331 | { | |
4332 | int ret; | |
4333 | struct device_node *np = get_clk_provider_node(dev); | |
4334 | ||
4335 | ret = devres_release(dev, devm_of_clk_release_provider, | |
4336 | devm_clk_provider_match, np); | |
4337 | ||
4338 | WARN_ON(ret); | |
4339 | } | |
4340 | EXPORT_SYMBOL(devm_of_clk_del_provider); | |
4341 | ||
4342 | /* | |
4343 | * Beware the return values when np is valid, but no clock provider is found. | |
4344 | * If name == NULL, the function returns -ENOENT. | |
4345 | * If name != NULL, the function returns -EINVAL. This is because | |
4346 | * of_parse_phandle_with_args() is called even if of_property_match_string() | |
4347 | * returns an error. | |
4348 | */ | |
4349 | static int of_parse_clkspec(const struct device_node *np, int index, | |
4350 | const char *name, struct of_phandle_args *out_args) | |
4351 | { | |
4352 | int ret = -ENOENT; | |
4353 | ||
4354 | /* Walk up the tree of devices looking for a clock property that matches */ | |
4355 | while (np) { | |
4356 | /* | |
4357 | * For named clocks, first look up the name in the | |
4358 | * "clock-names" property. If it cannot be found, then index | |
4359 | * will be an error code and of_parse_phandle_with_args() will | |
4360 | * return -EINVAL. | |
4361 | */ | |
4362 | if (name) | |
4363 | index = of_property_match_string(np, "clock-names", name); | |
4364 | ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", | |
4365 | index, out_args); | |
4366 | if (!ret) | |
4367 | break; | |
4368 | if (name && index >= 0) | |
4369 | break; | |
4370 | ||
4371 | /* | |
4372 | * No matching clock found on this node. If the parent node | |
4373 | * has a "clock-ranges" property, then we can try one of its | |
4374 | * clocks. | |
4375 | */ | |
4376 | np = np->parent; | |
4377 | if (np && !of_get_property(np, "clock-ranges", NULL)) | |
4378 | break; | |
4379 | index = 0; | |
4380 | } | |
4381 | ||
4382 | return ret; | |
4383 | } | |
4384 | ||
4385 | static struct clk_hw * | |
4386 | __of_clk_get_hw_from_provider(struct of_clk_provider *provider, | |
4387 | struct of_phandle_args *clkspec) | |
4388 | { | |
4389 | struct clk *clk; | |
4390 | ||
4391 | if (provider->get_hw) | |
4392 | return provider->get_hw(clkspec, provider->data); | |
4393 | ||
4394 | clk = provider->get(clkspec, provider->data); | |
4395 | if (IS_ERR(clk)) | |
4396 | return ERR_CAST(clk); | |
4397 | return __clk_get_hw(clk); | |
4398 | } | |
4399 | ||
4400 | static struct clk_hw * | |
4401 | of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) | |
4402 | { | |
4403 | struct of_clk_provider *provider; | |
4404 | struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); | |
4405 | ||
4406 | if (!clkspec) | |
4407 | return ERR_PTR(-EINVAL); | |
4408 | ||
4409 | mutex_lock(&of_clk_mutex); | |
4410 | list_for_each_entry(provider, &of_clk_providers, link) { | |
4411 | if (provider->node == clkspec->np) { | |
4412 | hw = __of_clk_get_hw_from_provider(provider, clkspec); | |
4413 | if (!IS_ERR(hw)) | |
4414 | break; | |
4415 | } | |
4416 | } | |
4417 | mutex_unlock(&of_clk_mutex); | |
4418 | ||
4419 | return hw; | |
4420 | } | |
4421 | ||
4422 | /** | |
4423 | * of_clk_get_from_provider() - Lookup a clock from a clock provider | |
4424 | * @clkspec: pointer to a clock specifier data structure | |
4425 | * | |
4426 | * This function looks up a struct clk from the registered list of clock | |
4427 | * providers, an input is a clock specifier data structure as returned | |
4428 | * from the of_parse_phandle_with_args() function call. | |
4429 | */ | |
4430 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) | |
4431 | { | |
4432 | struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); | |
4433 | ||
4434 | return clk_hw_create_clk(NULL, hw, NULL, __func__); | |
4435 | } | |
4436 | EXPORT_SYMBOL_GPL(of_clk_get_from_provider); | |
4437 | ||
4438 | struct clk_hw *of_clk_get_hw(struct device_node *np, int index, | |
4439 | const char *con_id) | |
4440 | { | |
4441 | int ret; | |
4442 | struct clk_hw *hw; | |
4443 | struct of_phandle_args clkspec; | |
4444 | ||
4445 | ret = of_parse_clkspec(np, index, con_id, &clkspec); | |
4446 | if (ret) | |
4447 | return ERR_PTR(ret); | |
4448 | ||
4449 | hw = of_clk_get_hw_from_clkspec(&clkspec); | |
4450 | of_node_put(clkspec.np); | |
4451 | ||
4452 | return hw; | |
4453 | } | |
4454 | ||
4455 | static struct clk *__of_clk_get(struct device_node *np, | |
4456 | int index, const char *dev_id, | |
4457 | const char *con_id) | |
4458 | { | |
4459 | struct clk_hw *hw = of_clk_get_hw(np, index, con_id); | |
4460 | ||
4461 | return clk_hw_create_clk(NULL, hw, dev_id, con_id); | |
4462 | } | |
4463 | ||
4464 | struct clk *of_clk_get(struct device_node *np, int index) | |
4465 | { | |
4466 | return __of_clk_get(np, index, np->full_name, NULL); | |
4467 | } | |
4468 | EXPORT_SYMBOL(of_clk_get); | |
4469 | ||
4470 | /** | |
4471 | * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node | |
4472 | * @np: pointer to clock consumer node | |
4473 | * @name: name of consumer's clock input, or NULL for the first clock reference | |
4474 | * | |
4475 | * This function parses the clocks and clock-names properties, | |
4476 | * and uses them to look up the struct clk from the registered list of clock | |
4477 | * providers. | |
4478 | */ | |
4479 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name) | |
4480 | { | |
4481 | if (!np) | |
4482 | return ERR_PTR(-ENOENT); | |
4483 | ||
4484 | return __of_clk_get(np, 0, np->full_name, name); | |
4485 | } | |
4486 | EXPORT_SYMBOL(of_clk_get_by_name); | |
4487 | ||
4488 | /** | |
4489 | * of_clk_get_parent_count() - Count the number of clocks a device node has | |
4490 | * @np: device node to count | |
4491 | * | |
4492 | * Returns: The number of clocks that are possible parents of this node | |
4493 | */ | |
4494 | unsigned int of_clk_get_parent_count(struct device_node *np) | |
4495 | { | |
4496 | int count; | |
4497 | ||
4498 | count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); | |
4499 | if (count < 0) | |
4500 | return 0; | |
4501 | ||
4502 | return count; | |
4503 | } | |
4504 | EXPORT_SYMBOL_GPL(of_clk_get_parent_count); | |
4505 | ||
4506 | const char *of_clk_get_parent_name(struct device_node *np, int index) | |
4507 | { | |
4508 | struct of_phandle_args clkspec; | |
4509 | struct property *prop; | |
4510 | const char *clk_name; | |
4511 | const __be32 *vp; | |
4512 | u32 pv; | |
4513 | int rc; | |
4514 | int count; | |
4515 | struct clk *clk; | |
4516 | ||
4517 | rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, | |
4518 | &clkspec); | |
4519 | if (rc) | |
4520 | return NULL; | |
4521 | ||
4522 | index = clkspec.args_count ? clkspec.args[0] : 0; | |
4523 | count = 0; | |
4524 | ||
4525 | /* if there is an indices property, use it to transfer the index | |
4526 | * specified into an array offset for the clock-output-names property. | |
4527 | */ | |
4528 | of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { | |
4529 | if (index == pv) { | |
4530 | index = count; | |
4531 | break; | |
4532 | } | |
4533 | count++; | |
4534 | } | |
4535 | /* We went off the end of 'clock-indices' without finding it */ | |
4536 | if (prop && !vp) | |
4537 | return NULL; | |
4538 | ||
4539 | if (of_property_read_string_index(clkspec.np, "clock-output-names", | |
4540 | index, | |
4541 | &clk_name) < 0) { | |
4542 | /* | |
4543 | * Best effort to get the name if the clock has been | |
4544 | * registered with the framework. If the clock isn't | |
4545 | * registered, we return the node name as the name of | |
4546 | * the clock as long as #clock-cells = 0. | |
4547 | */ | |
4548 | clk = of_clk_get_from_provider(&clkspec); | |
4549 | if (IS_ERR(clk)) { | |
4550 | if (clkspec.args_count == 0) | |
4551 | clk_name = clkspec.np->name; | |
4552 | else | |
4553 | clk_name = NULL; | |
4554 | } else { | |
4555 | clk_name = __clk_get_name(clk); | |
4556 | clk_put(clk); | |
4557 | } | |
4558 | } | |
4559 | ||
4560 | ||
4561 | of_node_put(clkspec.np); | |
4562 | return clk_name; | |
4563 | } | |
4564 | EXPORT_SYMBOL_GPL(of_clk_get_parent_name); | |
4565 | ||
4566 | /** | |
4567 | * of_clk_parent_fill() - Fill @parents with names of @np's parents and return | |
4568 | * number of parents | |
4569 | * @np: Device node pointer associated with clock provider | |
4570 | * @parents: pointer to char array that hold the parents' names | |
4571 | * @size: size of the @parents array | |
4572 | * | |
4573 | * Return: number of parents for the clock node. | |
4574 | */ | |
4575 | int of_clk_parent_fill(struct device_node *np, const char **parents, | |
4576 | unsigned int size) | |
4577 | { | |
4578 | unsigned int i = 0; | |
4579 | ||
4580 | while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) | |
4581 | i++; | |
4582 | ||
4583 | return i; | |
4584 | } | |
4585 | EXPORT_SYMBOL_GPL(of_clk_parent_fill); | |
4586 | ||
4587 | struct clock_provider { | |
4588 | void (*clk_init_cb)(struct device_node *); | |
4589 | struct device_node *np; | |
4590 | struct list_head node; | |
4591 | }; | |
4592 | ||
4593 | /* | |
4594 | * This function looks for a parent clock. If there is one, then it | |
4595 | * checks that the provider for this parent clock was initialized, in | |
4596 | * this case the parent clock will be ready. | |
4597 | */ | |
4598 | static int parent_ready(struct device_node *np) | |
4599 | { | |
4600 | int i = 0; | |
4601 | ||
4602 | while (true) { | |
4603 | struct clk *clk = of_clk_get(np, i); | |
4604 | ||
4605 | /* this parent is ready we can check the next one */ | |
4606 | if (!IS_ERR(clk)) { | |
4607 | clk_put(clk); | |
4608 | i++; | |
4609 | continue; | |
4610 | } | |
4611 | ||
4612 | /* at least one parent is not ready, we exit now */ | |
4613 | if (PTR_ERR(clk) == -EPROBE_DEFER) | |
4614 | return 0; | |
4615 | ||
4616 | /* | |
4617 | * Here we make assumption that the device tree is | |
4618 | * written correctly. So an error means that there is | |
4619 | * no more parent. As we didn't exit yet, then the | |
4620 | * previous parent are ready. If there is no clock | |
4621 | * parent, no need to wait for them, then we can | |
4622 | * consider their absence as being ready | |
4623 | */ | |
4624 | return 1; | |
4625 | } | |
4626 | } | |
4627 | ||
4628 | /** | |
4629 | * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree | |
4630 | * @np: Device node pointer associated with clock provider | |
4631 | * @index: clock index | |
4632 | * @flags: pointer to top-level framework flags | |
4633 | * | |
4634 | * Detects if the clock-critical property exists and, if so, sets the | |
4635 | * corresponding CLK_IS_CRITICAL flag. | |
4636 | * | |
4637 | * Do not use this function. It exists only for legacy Device Tree | |
4638 | * bindings, such as the one-clock-per-node style that are outdated. | |
4639 | * Those bindings typically put all clock data into .dts and the Linux | |
4640 | * driver has no clock data, thus making it impossible to set this flag | |
4641 | * correctly from the driver. Only those drivers may call | |
4642 | * of_clk_detect_critical from their setup functions. | |
4643 | * | |
4644 | * Return: error code or zero on success | |
4645 | */ | |
4646 | int of_clk_detect_critical(struct device_node *np, | |
4647 | int index, unsigned long *flags) | |
4648 | { | |
4649 | struct property *prop; | |
4650 | const __be32 *cur; | |
4651 | uint32_t idx; | |
4652 | ||
4653 | if (!np || !flags) | |
4654 | return -EINVAL; | |
4655 | ||
4656 | of_property_for_each_u32(np, "clock-critical", prop, cur, idx) | |
4657 | if (index == idx) | |
4658 | *flags |= CLK_IS_CRITICAL; | |
4659 | ||
4660 | return 0; | |
4661 | } | |
4662 | ||
4663 | /** | |
4664 | * of_clk_init() - Scan and init clock providers from the DT | |
4665 | * @matches: array of compatible values and init functions for providers. | |
4666 | * | |
4667 | * This function scans the device tree for matching clock providers | |
4668 | * and calls their initialization functions. It also does it by trying | |
4669 | * to follow the dependencies. | |
4670 | */ | |
4671 | void __init of_clk_init(const struct of_device_id *matches) | |
4672 | { | |
4673 | const struct of_device_id *match; | |
4674 | struct device_node *np; | |
4675 | struct clock_provider *clk_provider, *next; | |
4676 | bool is_init_done; | |
4677 | bool force = false; | |
4678 | LIST_HEAD(clk_provider_list); | |
4679 | ||
4680 | if (!matches) | |
4681 | matches = &__clk_of_table; | |
4682 | ||
4683 | /* First prepare the list of the clocks providers */ | |
4684 | for_each_matching_node_and_match(np, matches, &match) { | |
4685 | struct clock_provider *parent; | |
4686 | ||
4687 | if (!of_device_is_available(np)) | |
4688 | continue; | |
4689 | ||
4690 | parent = kzalloc(sizeof(*parent), GFP_KERNEL); | |
4691 | if (!parent) { | |
4692 | list_for_each_entry_safe(clk_provider, next, | |
4693 | &clk_provider_list, node) { | |
4694 | list_del(&clk_provider->node); | |
4695 | of_node_put(clk_provider->np); | |
4696 | kfree(clk_provider); | |
4697 | } | |
4698 | of_node_put(np); | |
4699 | return; | |
4700 | } | |
4701 | ||
4702 | parent->clk_init_cb = match->data; | |
4703 | parent->np = of_node_get(np); | |
4704 | list_add_tail(&parent->node, &clk_provider_list); | |
4705 | } | |
4706 | ||
4707 | while (!list_empty(&clk_provider_list)) { | |
4708 | is_init_done = false; | |
4709 | list_for_each_entry_safe(clk_provider, next, | |
4710 | &clk_provider_list, node) { | |
4711 | if (force || parent_ready(clk_provider->np)) { | |
4712 | ||
4713 | /* Don't populate platform devices */ | |
4714 | of_node_set_flag(clk_provider->np, | |
4715 | OF_POPULATED); | |
4716 | ||
4717 | clk_provider->clk_init_cb(clk_provider->np); | |
4718 | of_clk_set_defaults(clk_provider->np, true); | |
4719 | ||
4720 | list_del(&clk_provider->node); | |
4721 | of_node_put(clk_provider->np); | |
4722 | kfree(clk_provider); | |
4723 | is_init_done = true; | |
4724 | } | |
4725 | } | |
4726 | ||
4727 | /* | |
4728 | * We didn't manage to initialize any of the | |
4729 | * remaining providers during the last loop, so now we | |
4730 | * initialize all the remaining ones unconditionally | |
4731 | * in case the clock parent was not mandatory | |
4732 | */ | |
4733 | if (!is_init_done) | |
4734 | force = true; | |
4735 | } | |
4736 | } | |
4737 | #endif |