]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/clk/clk.c
docs: fix broken references with multiple hints
[mirror_ubuntu-focal-kernel.git] / drivers / clk / clk.c
CommitLineData
b2476490
MT
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
3c373117 12#include <linux/clk.h>
b09d6d99 13#include <linux/clk-provider.h>
86be408b 14#include <linux/clk/clk-conf.h>
b2476490
MT
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
766e6a4e 21#include <linux/of.h>
46c8773a 22#include <linux/device.h>
f2f6c255 23#include <linux/init.h>
9a34b453 24#include <linux/pm_runtime.h>
533ddeb1 25#include <linux/sched.h>
562ef0b0 26#include <linux/clkdev.h>
a6059ab9 27#include <linux/stringify.h>
b2476490 28
d6782c26
SN
29#include "clk.h"
30
b2476490
MT
31static DEFINE_SPINLOCK(enable_lock);
32static DEFINE_MUTEX(prepare_lock);
33
533ddeb1
MT
34static struct task_struct *prepare_owner;
35static struct task_struct *enable_owner;
36
37static int prepare_refcnt;
38static int enable_refcnt;
39
b2476490
MT
40static HLIST_HEAD(clk_root_list);
41static HLIST_HEAD(clk_orphan_list);
42static LIST_HEAD(clk_notifier_list);
43
b09d6d99
MT
44/*** private data structures ***/
45
46struct clk_core {
47 const char *name;
48 const struct clk_ops *ops;
49 struct clk_hw *hw;
50 struct module *owner;
9a34b453 51 struct device *dev;
b09d6d99
MT
52 struct clk_core *parent;
53 const char **parent_names;
54 struct clk_core **parents;
55 u8 num_parents;
56 u8 new_parent_index;
57 unsigned long rate;
1c8e6004 58 unsigned long req_rate;
b09d6d99
MT
59 unsigned long new_rate;
60 struct clk_core *new_parent;
61 struct clk_core *new_child;
62 unsigned long flags;
e6500344 63 bool orphan;
b09d6d99
MT
64 unsigned int enable_count;
65 unsigned int prepare_count;
e55a839a 66 unsigned int protect_count;
9783c0d9
SB
67 unsigned long min_rate;
68 unsigned long max_rate;
b09d6d99
MT
69 unsigned long accuracy;
70 int phase;
71 struct hlist_head children;
72 struct hlist_node child_node;
1c8e6004 73 struct hlist_head clks;
b09d6d99
MT
74 unsigned int notifier_count;
75#ifdef CONFIG_DEBUG_FS
76 struct dentry *dentry;
8c9a8a8f 77 struct hlist_node debug_node;
b09d6d99
MT
78#endif
79 struct kref ref;
80};
81
dfc202ea
SB
82#define CREATE_TRACE_POINTS
83#include <trace/events/clk.h>
84
b09d6d99
MT
85struct clk {
86 struct clk_core *core;
87 const char *dev_id;
88 const char *con_id;
1c8e6004
TV
89 unsigned long min_rate;
90 unsigned long max_rate;
55e9b8b7 91 unsigned int exclusive_count;
50595f8b 92 struct hlist_node clks_node;
b09d6d99
MT
93};
94
9a34b453
MS
95/*** runtime pm ***/
96static int clk_pm_runtime_get(struct clk_core *core)
97{
98 int ret = 0;
99
100 if (!core->dev)
101 return 0;
102
103 ret = pm_runtime_get_sync(core->dev);
104 return ret < 0 ? ret : 0;
105}
106
107static void clk_pm_runtime_put(struct clk_core *core)
108{
109 if (!core->dev)
110 return;
111
112 pm_runtime_put_sync(core->dev);
113}
114
eab89f69
MT
115/*** locking ***/
116static void clk_prepare_lock(void)
117{
533ddeb1
MT
118 if (!mutex_trylock(&prepare_lock)) {
119 if (prepare_owner == current) {
120 prepare_refcnt++;
121 return;
122 }
123 mutex_lock(&prepare_lock);
124 }
125 WARN_ON_ONCE(prepare_owner != NULL);
126 WARN_ON_ONCE(prepare_refcnt != 0);
127 prepare_owner = current;
128 prepare_refcnt = 1;
eab89f69
MT
129}
130
131static void clk_prepare_unlock(void)
132{
533ddeb1
MT
133 WARN_ON_ONCE(prepare_owner != current);
134 WARN_ON_ONCE(prepare_refcnt == 0);
135
136 if (--prepare_refcnt)
137 return;
138 prepare_owner = NULL;
eab89f69
MT
139 mutex_unlock(&prepare_lock);
140}
141
142static unsigned long clk_enable_lock(void)
a57aa185 143 __acquires(enable_lock)
eab89f69
MT
144{
145 unsigned long flags;
533ddeb1 146
a12aa8a6
DL
147 /*
148 * On UP systems, spin_trylock_irqsave() always returns true, even if
149 * we already hold the lock. So, in that case, we rely only on
150 * reference counting.
151 */
152 if (!IS_ENABLED(CONFIG_SMP) ||
153 !spin_trylock_irqsave(&enable_lock, flags)) {
533ddeb1
MT
154 if (enable_owner == current) {
155 enable_refcnt++;
a57aa185 156 __acquire(enable_lock);
a12aa8a6
DL
157 if (!IS_ENABLED(CONFIG_SMP))
158 local_save_flags(flags);
533ddeb1
MT
159 return flags;
160 }
161 spin_lock_irqsave(&enable_lock, flags);
162 }
163 WARN_ON_ONCE(enable_owner != NULL);
164 WARN_ON_ONCE(enable_refcnt != 0);
165 enable_owner = current;
166 enable_refcnt = 1;
eab89f69
MT
167 return flags;
168}
169
170static void clk_enable_unlock(unsigned long flags)
a57aa185 171 __releases(enable_lock)
eab89f69 172{
533ddeb1
MT
173 WARN_ON_ONCE(enable_owner != current);
174 WARN_ON_ONCE(enable_refcnt == 0);
175
a57aa185
SB
176 if (--enable_refcnt) {
177 __release(enable_lock);
533ddeb1 178 return;
a57aa185 179 }
533ddeb1 180 enable_owner = NULL;
eab89f69
MT
181 spin_unlock_irqrestore(&enable_lock, flags);
182}
183
e55a839a
JB
184static bool clk_core_rate_is_protected(struct clk_core *core)
185{
186 return core->protect_count;
187}
188
4dff95dc
SB
189static bool clk_core_is_prepared(struct clk_core *core)
190{
9a34b453
MS
191 bool ret = false;
192
4dff95dc
SB
193 /*
194 * .is_prepared is optional for clocks that can prepare
195 * fall back to software usage counter if it is missing
196 */
197 if (!core->ops->is_prepared)
198 return core->prepare_count;
b2476490 199
9a34b453
MS
200 if (!clk_pm_runtime_get(core)) {
201 ret = core->ops->is_prepared(core->hw);
202 clk_pm_runtime_put(core);
203 }
204
205 return ret;
4dff95dc 206}
b2476490 207
4dff95dc
SB
208static bool clk_core_is_enabled(struct clk_core *core)
209{
9a34b453
MS
210 bool ret = false;
211
4dff95dc
SB
212 /*
213 * .is_enabled is only mandatory for clocks that gate
214 * fall back to software usage counter if .is_enabled is missing
215 */
216 if (!core->ops->is_enabled)
217 return core->enable_count;
6b44c854 218
9a34b453
MS
219 /*
220 * Check if clock controller's device is runtime active before
221 * calling .is_enabled callback. If not, assume that clock is
222 * disabled, because we might be called from atomic context, from
223 * which pm_runtime_get() is not allowed.
224 * This function is called mainly from clk_disable_unused_subtree,
225 * which ensures proper runtime pm activation of controller before
226 * taking enable spinlock, but the below check is needed if one tries
227 * to call it from other places.
228 */
229 if (core->dev) {
230 pm_runtime_get_noresume(core->dev);
231 if (!pm_runtime_active(core->dev)) {
232 ret = false;
233 goto done;
234 }
235 }
236
237 ret = core->ops->is_enabled(core->hw);
238done:
756efe13
DA
239 if (core->dev)
240 pm_runtime_put(core->dev);
9a34b453
MS
241
242 return ret;
4dff95dc 243}
6b44c854 244
4dff95dc 245/*** helper functions ***/
1af599df 246
b76281cb 247const char *__clk_get_name(const struct clk *clk)
1af599df 248{
4dff95dc 249 return !clk ? NULL : clk->core->name;
1af599df 250}
4dff95dc 251EXPORT_SYMBOL_GPL(__clk_get_name);
1af599df 252
e7df6f6e 253const char *clk_hw_get_name(const struct clk_hw *hw)
1a9c069c
SB
254{
255 return hw->core->name;
256}
257EXPORT_SYMBOL_GPL(clk_hw_get_name);
258
4dff95dc
SB
259struct clk_hw *__clk_get_hw(struct clk *clk)
260{
261 return !clk ? NULL : clk->core->hw;
262}
263EXPORT_SYMBOL_GPL(__clk_get_hw);
1af599df 264
e7df6f6e 265unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
1a9c069c
SB
266{
267 return hw->core->num_parents;
268}
269EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
270
e7df6f6e 271struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
1a9c069c
SB
272{
273 return hw->core->parent ? hw->core->parent->hw : NULL;
274}
275EXPORT_SYMBOL_GPL(clk_hw_get_parent);
276
4dff95dc
SB
277static struct clk_core *__clk_lookup_subtree(const char *name,
278 struct clk_core *core)
bddca894 279{
035a61c3 280 struct clk_core *child;
4dff95dc 281 struct clk_core *ret;
bddca894 282
4dff95dc
SB
283 if (!strcmp(core->name, name))
284 return core;
bddca894 285
4dff95dc
SB
286 hlist_for_each_entry(child, &core->children, child_node) {
287 ret = __clk_lookup_subtree(name, child);
288 if (ret)
289 return ret;
bddca894
PG
290 }
291
4dff95dc 292 return NULL;
bddca894
PG
293}
294
4dff95dc 295static struct clk_core *clk_core_lookup(const char *name)
bddca894 296{
4dff95dc
SB
297 struct clk_core *root_clk;
298 struct clk_core *ret;
bddca894 299
4dff95dc
SB
300 if (!name)
301 return NULL;
bddca894 302
4dff95dc
SB
303 /* search the 'proper' clk tree first */
304 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
305 ret = __clk_lookup_subtree(name, root_clk);
306 if (ret)
307 return ret;
bddca894
PG
308 }
309
4dff95dc
SB
310 /* if not found, then search the orphan tree */
311 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
312 ret = __clk_lookup_subtree(name, root_clk);
313 if (ret)
314 return ret;
315 }
bddca894 316
4dff95dc 317 return NULL;
bddca894
PG
318}
319
4dff95dc
SB
320static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
321 u8 index)
bddca894 322{
4dff95dc
SB
323 if (!core || index >= core->num_parents)
324 return NULL;
88cfbef2
MY
325
326 if (!core->parents[index])
327 core->parents[index] =
328 clk_core_lookup(core->parent_names[index]);
329
330 return core->parents[index];
bddca894
PG
331}
332
e7df6f6e
SB
333struct clk_hw *
334clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
1a9c069c
SB
335{
336 struct clk_core *parent;
337
338 parent = clk_core_get_parent_by_index(hw->core, index);
339
340 return !parent ? NULL : parent->hw;
341}
342EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
343
4dff95dc
SB
344unsigned int __clk_get_enable_count(struct clk *clk)
345{
346 return !clk ? 0 : clk->core->enable_count;
347}
b2476490 348
4dff95dc
SB
349static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
350{
351 unsigned long ret;
b2476490 352
4dff95dc
SB
353 if (!core) {
354 ret = 0;
355 goto out;
356 }
b2476490 357
4dff95dc 358 ret = core->rate;
b2476490 359
47b0eeb3 360 if (!core->num_parents)
4dff95dc 361 goto out;
c646cbf1 362
4dff95dc
SB
363 if (!core->parent)
364 ret = 0;
b2476490 365
b2476490
MT
366out:
367 return ret;
368}
369
e7df6f6e 370unsigned long clk_hw_get_rate(const struct clk_hw *hw)
1a9c069c
SB
371{
372 return clk_core_get_rate_nolock(hw->core);
373}
374EXPORT_SYMBOL_GPL(clk_hw_get_rate);
375
4dff95dc
SB
376static unsigned long __clk_get_accuracy(struct clk_core *core)
377{
378 if (!core)
379 return 0;
b2476490 380
4dff95dc 381 return core->accuracy;
b2476490
MT
382}
383
4dff95dc 384unsigned long __clk_get_flags(struct clk *clk)
fcb0ee6a 385{
4dff95dc 386 return !clk ? 0 : clk->core->flags;
fcb0ee6a 387}
4dff95dc 388EXPORT_SYMBOL_GPL(__clk_get_flags);
fcb0ee6a 389
e7df6f6e 390unsigned long clk_hw_get_flags(const struct clk_hw *hw)
1a9c069c
SB
391{
392 return hw->core->flags;
393}
394EXPORT_SYMBOL_GPL(clk_hw_get_flags);
395
e7df6f6e 396bool clk_hw_is_prepared(const struct clk_hw *hw)
1a9c069c
SB
397{
398 return clk_core_is_prepared(hw->core);
399}
400
e55a839a
JB
401bool clk_hw_rate_is_protected(const struct clk_hw *hw)
402{
403 return clk_core_rate_is_protected(hw->core);
404}
405
be68bf88
JE
406bool clk_hw_is_enabled(const struct clk_hw *hw)
407{
408 return clk_core_is_enabled(hw->core);
409}
410
4dff95dc 411bool __clk_is_enabled(struct clk *clk)
b2476490 412{
4dff95dc
SB
413 if (!clk)
414 return false;
b2476490 415
4dff95dc
SB
416 return clk_core_is_enabled(clk->core);
417}
418EXPORT_SYMBOL_GPL(__clk_is_enabled);
b2476490 419
4dff95dc
SB
420static bool mux_is_better_rate(unsigned long rate, unsigned long now,
421 unsigned long best, unsigned long flags)
422{
423 if (flags & CLK_MUX_ROUND_CLOSEST)
424 return abs(now - rate) < abs(best - rate);
1af599df 425
4dff95dc
SB
426 return now <= rate && now > best;
427}
bddca894 428
4ad69b80
JB
429int clk_mux_determine_rate_flags(struct clk_hw *hw,
430 struct clk_rate_request *req,
431 unsigned long flags)
4dff95dc
SB
432{
433 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
0817b62c
BB
434 int i, num_parents, ret;
435 unsigned long best = 0;
436 struct clk_rate_request parent_req = *req;
b2476490 437
4dff95dc
SB
438 /* if NO_REPARENT flag set, pass through to current parent */
439 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
440 parent = core->parent;
0817b62c
BB
441 if (core->flags & CLK_SET_RATE_PARENT) {
442 ret = __clk_determine_rate(parent ? parent->hw : NULL,
443 &parent_req);
444 if (ret)
445 return ret;
446
447 best = parent_req.rate;
448 } else if (parent) {
4dff95dc 449 best = clk_core_get_rate_nolock(parent);
0817b62c 450 } else {
4dff95dc 451 best = clk_core_get_rate_nolock(core);
0817b62c
BB
452 }
453
4dff95dc
SB
454 goto out;
455 }
b2476490 456
4dff95dc
SB
457 /* find the parent that can provide the fastest rate <= rate */
458 num_parents = core->num_parents;
459 for (i = 0; i < num_parents; i++) {
460 parent = clk_core_get_parent_by_index(core, i);
461 if (!parent)
462 continue;
0817b62c
BB
463
464 if (core->flags & CLK_SET_RATE_PARENT) {
465 parent_req = *req;
466 ret = __clk_determine_rate(parent->hw, &parent_req);
467 if (ret)
468 continue;
469 } else {
470 parent_req.rate = clk_core_get_rate_nolock(parent);
471 }
472
473 if (mux_is_better_rate(req->rate, parent_req.rate,
474 best, flags)) {
4dff95dc 475 best_parent = parent;
0817b62c 476 best = parent_req.rate;
4dff95dc
SB
477 }
478 }
b2476490 479
57d866e6
BB
480 if (!best_parent)
481 return -EINVAL;
482
4dff95dc
SB
483out:
484 if (best_parent)
0817b62c
BB
485 req->best_parent_hw = best_parent->hw;
486 req->best_parent_rate = best;
487 req->rate = best;
b2476490 488
0817b62c 489 return 0;
b33d212f 490}
4ad69b80 491EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
4dff95dc
SB
492
493struct clk *__clk_lookup(const char *name)
fcb0ee6a 494{
4dff95dc
SB
495 struct clk_core *core = clk_core_lookup(name);
496
497 return !core ? NULL : core->hw->clk;
fcb0ee6a 498}
b2476490 499
4dff95dc
SB
500static void clk_core_get_boundaries(struct clk_core *core,
501 unsigned long *min_rate,
502 unsigned long *max_rate)
1c155b3d 503{
4dff95dc 504 struct clk *clk_user;
1c155b3d 505
9783c0d9
SB
506 *min_rate = core->min_rate;
507 *max_rate = core->max_rate;
496eadf8 508
4dff95dc
SB
509 hlist_for_each_entry(clk_user, &core->clks, clks_node)
510 *min_rate = max(*min_rate, clk_user->min_rate);
1c155b3d 511
4dff95dc
SB
512 hlist_for_each_entry(clk_user, &core->clks, clks_node)
513 *max_rate = min(*max_rate, clk_user->max_rate);
514}
1c155b3d 515
9783c0d9
SB
516void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
517 unsigned long max_rate)
518{
519 hw->core->min_rate = min_rate;
520 hw->core->max_rate = max_rate;
521}
522EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
523
4dff95dc
SB
524/*
525 * Helper for finding best parent to provide a given frequency. This can be used
526 * directly as a determine_rate callback (e.g. for a mux), or from a more
527 * complex clock that may combine a mux with other operations.
528 */
0817b62c
BB
529int __clk_mux_determine_rate(struct clk_hw *hw,
530 struct clk_rate_request *req)
4dff95dc 531{
0817b62c 532 return clk_mux_determine_rate_flags(hw, req, 0);
1c155b3d 533}
4dff95dc 534EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
1c155b3d 535
0817b62c
BB
536int __clk_mux_determine_rate_closest(struct clk_hw *hw,
537 struct clk_rate_request *req)
b2476490 538{
0817b62c 539 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
4dff95dc
SB
540}
541EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
b2476490 542
4dff95dc 543/*** clk api ***/
496eadf8 544
e55a839a
JB
545static void clk_core_rate_unprotect(struct clk_core *core)
546{
547 lockdep_assert_held(&prepare_lock);
548
549 if (!core)
550 return;
551
ab525dcc
FE
552 if (WARN(core->protect_count == 0,
553 "%s already unprotected\n", core->name))
e55a839a
JB
554 return;
555
556 if (--core->protect_count > 0)
557 return;
558
559 clk_core_rate_unprotect(core->parent);
560}
561
562static int clk_core_rate_nuke_protect(struct clk_core *core)
563{
564 int ret;
565
566 lockdep_assert_held(&prepare_lock);
567
568 if (!core)
569 return -EINVAL;
570
571 if (core->protect_count == 0)
572 return 0;
573
574 ret = core->protect_count;
575 core->protect_count = 1;
576 clk_core_rate_unprotect(core);
577
578 return ret;
579}
580
55e9b8b7
JB
581/**
582 * clk_rate_exclusive_put - release exclusivity over clock rate control
583 * @clk: the clk over which the exclusivity is released
584 *
585 * clk_rate_exclusive_put() completes a critical section during which a clock
586 * consumer cannot tolerate any other consumer making any operation on the
587 * clock which could result in a rate change or rate glitch. Exclusive clocks
588 * cannot have their rate changed, either directly or indirectly due to changes
589 * further up the parent chain of clocks. As a result, clocks up parent chain
590 * also get under exclusive control of the calling consumer.
591 *
592 * If exlusivity is claimed more than once on clock, even by the same consumer,
593 * the rate effectively gets locked as exclusivity can't be preempted.
594 *
595 * Calls to clk_rate_exclusive_put() must be balanced with calls to
596 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
597 * error status.
598 */
599void clk_rate_exclusive_put(struct clk *clk)
600{
601 if (!clk)
602 return;
603
604 clk_prepare_lock();
605
606 /*
607 * if there is something wrong with this consumer protect count, stop
608 * here before messing with the provider
609 */
610 if (WARN_ON(clk->exclusive_count <= 0))
611 goto out;
612
613 clk_core_rate_unprotect(clk->core);
614 clk->exclusive_count--;
615out:
616 clk_prepare_unlock();
617}
618EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
619
e55a839a
JB
620static void clk_core_rate_protect(struct clk_core *core)
621{
622 lockdep_assert_held(&prepare_lock);
623
624 if (!core)
625 return;
626
627 if (core->protect_count == 0)
628 clk_core_rate_protect(core->parent);
629
630 core->protect_count++;
631}
632
633static void clk_core_rate_restore_protect(struct clk_core *core, int count)
634{
635 lockdep_assert_held(&prepare_lock);
636
637 if (!core)
638 return;
639
640 if (count == 0)
641 return;
642
643 clk_core_rate_protect(core);
644 core->protect_count = count;
645}
646
55e9b8b7
JB
647/**
648 * clk_rate_exclusive_get - get exclusivity over the clk rate control
649 * @clk: the clk over which the exclusity of rate control is requested
650 *
651 * clk_rate_exlusive_get() begins a critical section during which a clock
652 * consumer cannot tolerate any other consumer making any operation on the
653 * clock which could result in a rate change or rate glitch. Exclusive clocks
654 * cannot have their rate changed, either directly or indirectly due to changes
655 * further up the parent chain of clocks. As a result, clocks up parent chain
656 * also get under exclusive control of the calling consumer.
657 *
658 * If exlusivity is claimed more than once on clock, even by the same consumer,
659 * the rate effectively gets locked as exclusivity can't be preempted.
660 *
661 * Calls to clk_rate_exclusive_get() should be balanced with calls to
662 * clk_rate_exclusive_put(). Calls to this function may sleep.
663 * Returns 0 on success, -EERROR otherwise
664 */
665int clk_rate_exclusive_get(struct clk *clk)
666{
667 if (!clk)
668 return 0;
669
670 clk_prepare_lock();
671 clk_core_rate_protect(clk->core);
672 clk->exclusive_count++;
673 clk_prepare_unlock();
674
675 return 0;
676}
677EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
678
4dff95dc
SB
679static void clk_core_unprepare(struct clk_core *core)
680{
a6334725
SB
681 lockdep_assert_held(&prepare_lock);
682
4dff95dc
SB
683 if (!core)
684 return;
b2476490 685
ab525dcc
FE
686 if (WARN(core->prepare_count == 0,
687 "%s already unprepared\n", core->name))
4dff95dc 688 return;
b2476490 689
ab525dcc
FE
690 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
691 "Unpreparing critical %s\n", core->name))
2e20fbf5
LJ
692 return;
693
4dff95dc
SB
694 if (--core->prepare_count > 0)
695 return;
b2476490 696
ab525dcc 697 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
b2476490 698
4dff95dc 699 trace_clk_unprepare(core);
b2476490 700
4dff95dc
SB
701 if (core->ops->unprepare)
702 core->ops->unprepare(core->hw);
703
9a34b453
MS
704 clk_pm_runtime_put(core);
705
4dff95dc
SB
706 trace_clk_unprepare_complete(core);
707 clk_core_unprepare(core->parent);
b2476490
MT
708}
709
a6adc30b
DA
710static void clk_core_unprepare_lock(struct clk_core *core)
711{
712 clk_prepare_lock();
713 clk_core_unprepare(core);
714 clk_prepare_unlock();
715}
716
4dff95dc
SB
717/**
718 * clk_unprepare - undo preparation of a clock source
719 * @clk: the clk being unprepared
720 *
721 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
722 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
723 * if the operation may sleep. One example is a clk which is accessed over
724 * I2c. In the complex case a clk gate operation may require a fast and a slow
725 * part. It is this reason that clk_unprepare and clk_disable are not mutually
726 * exclusive. In fact clk_disable must be called before clk_unprepare.
727 */
728void clk_unprepare(struct clk *clk)
1e435256 729{
4dff95dc
SB
730 if (IS_ERR_OR_NULL(clk))
731 return;
732
a6adc30b 733 clk_core_unprepare_lock(clk->core);
1e435256 734}
4dff95dc 735EXPORT_SYMBOL_GPL(clk_unprepare);
1e435256 736
4dff95dc 737static int clk_core_prepare(struct clk_core *core)
b2476490 738{
4dff95dc 739 int ret = 0;
b2476490 740
a6334725
SB
741 lockdep_assert_held(&prepare_lock);
742
4dff95dc 743 if (!core)
1e435256 744 return 0;
1e435256 745
4dff95dc 746 if (core->prepare_count == 0) {
9a34b453 747 ret = clk_pm_runtime_get(core);
4dff95dc
SB
748 if (ret)
749 return ret;
b2476490 750
9a34b453
MS
751 ret = clk_core_prepare(core->parent);
752 if (ret)
753 goto runtime_put;
754
4dff95dc 755 trace_clk_prepare(core);
b2476490 756
4dff95dc
SB
757 if (core->ops->prepare)
758 ret = core->ops->prepare(core->hw);
b2476490 759
4dff95dc 760 trace_clk_prepare_complete(core);
1c155b3d 761
9a34b453
MS
762 if (ret)
763 goto unprepare;
4dff95dc 764 }
1c155b3d 765
4dff95dc 766 core->prepare_count++;
b2476490
MT
767
768 return 0;
9a34b453
MS
769unprepare:
770 clk_core_unprepare(core->parent);
771runtime_put:
772 clk_pm_runtime_put(core);
773 return ret;
b2476490 774}
b2476490 775
a6adc30b
DA
776static int clk_core_prepare_lock(struct clk_core *core)
777{
778 int ret;
779
780 clk_prepare_lock();
781 ret = clk_core_prepare(core);
782 clk_prepare_unlock();
783
784 return ret;
785}
786
4dff95dc
SB
787/**
788 * clk_prepare - prepare a clock source
789 * @clk: the clk being prepared
790 *
791 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
792 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
793 * operation may sleep. One example is a clk which is accessed over I2c. In
794 * the complex case a clk ungate operation may require a fast and a slow part.
795 * It is this reason that clk_prepare and clk_enable are not mutually
796 * exclusive. In fact clk_prepare must be called before clk_enable.
797 * Returns 0 on success, -EERROR otherwise.
798 */
799int clk_prepare(struct clk *clk)
b2476490 800{
4dff95dc
SB
801 if (!clk)
802 return 0;
b2476490 803
a6adc30b 804 return clk_core_prepare_lock(clk->core);
b2476490 805}
4dff95dc 806EXPORT_SYMBOL_GPL(clk_prepare);
b2476490 807
4dff95dc 808static void clk_core_disable(struct clk_core *core)
b2476490 809{
a6334725
SB
810 lockdep_assert_held(&enable_lock);
811
4dff95dc
SB
812 if (!core)
813 return;
035a61c3 814
ab525dcc 815 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
4dff95dc 816 return;
b2476490 817
ab525dcc
FE
818 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
819 "Disabling critical %s\n", core->name))
2e20fbf5
LJ
820 return;
821
4dff95dc
SB
822 if (--core->enable_count > 0)
823 return;
035a61c3 824
2f87a6ea 825 trace_clk_disable_rcuidle(core);
035a61c3 826
4dff95dc
SB
827 if (core->ops->disable)
828 core->ops->disable(core->hw);
035a61c3 829
2f87a6ea 830 trace_clk_disable_complete_rcuidle(core);
035a61c3 831
4dff95dc 832 clk_core_disable(core->parent);
035a61c3 833}
7ef3dcc8 834
a6adc30b
DA
835static void clk_core_disable_lock(struct clk_core *core)
836{
837 unsigned long flags;
838
839 flags = clk_enable_lock();
840 clk_core_disable(core);
841 clk_enable_unlock(flags);
842}
843
4dff95dc
SB
844/**
845 * clk_disable - gate a clock
846 * @clk: the clk being gated
847 *
848 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
849 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
850 * clk if the operation is fast and will never sleep. One example is a
851 * SoC-internal clk which is controlled via simple register writes. In the
852 * complex case a clk gate operation may require a fast and a slow part. It is
853 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
854 * In fact clk_disable must be called before clk_unprepare.
855 */
856void clk_disable(struct clk *clk)
b2476490 857{
4dff95dc
SB
858 if (IS_ERR_OR_NULL(clk))
859 return;
860
a6adc30b 861 clk_core_disable_lock(clk->core);
b2476490 862}
4dff95dc 863EXPORT_SYMBOL_GPL(clk_disable);
b2476490 864
4dff95dc 865static int clk_core_enable(struct clk_core *core)
b2476490 866{
4dff95dc 867 int ret = 0;
b2476490 868
a6334725
SB
869 lockdep_assert_held(&enable_lock);
870
4dff95dc
SB
871 if (!core)
872 return 0;
b2476490 873
ab525dcc
FE
874 if (WARN(core->prepare_count == 0,
875 "Enabling unprepared %s\n", core->name))
4dff95dc 876 return -ESHUTDOWN;
b2476490 877
4dff95dc
SB
878 if (core->enable_count == 0) {
879 ret = clk_core_enable(core->parent);
b2476490 880
4dff95dc
SB
881 if (ret)
882 return ret;
b2476490 883
f17a0dd1 884 trace_clk_enable_rcuidle(core);
035a61c3 885
4dff95dc
SB
886 if (core->ops->enable)
887 ret = core->ops->enable(core->hw);
035a61c3 888
f17a0dd1 889 trace_clk_enable_complete_rcuidle(core);
4dff95dc
SB
890
891 if (ret) {
892 clk_core_disable(core->parent);
893 return ret;
894 }
895 }
896
897 core->enable_count++;
898 return 0;
035a61c3 899}
b2476490 900
a6adc30b
DA
901static int clk_core_enable_lock(struct clk_core *core)
902{
903 unsigned long flags;
904 int ret;
905
906 flags = clk_enable_lock();
907 ret = clk_core_enable(core);
908 clk_enable_unlock(flags);
909
910 return ret;
911}
912
4dff95dc
SB
913/**
914 * clk_enable - ungate a clock
915 * @clk: the clk being ungated
916 *
917 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
918 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
919 * if the operation will never sleep. One example is a SoC-internal clk which
920 * is controlled via simple register writes. In the complex case a clk ungate
921 * operation may require a fast and a slow part. It is this reason that
922 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
923 * must be called before clk_enable. Returns 0 on success, -EERROR
924 * otherwise.
925 */
926int clk_enable(struct clk *clk)
5279fc40 927{
4dff95dc 928 if (!clk)
5279fc40
BB
929 return 0;
930
a6adc30b
DA
931 return clk_core_enable_lock(clk->core);
932}
933EXPORT_SYMBOL_GPL(clk_enable);
934
935static int clk_core_prepare_enable(struct clk_core *core)
936{
937 int ret;
938
939 ret = clk_core_prepare_lock(core);
940 if (ret)
941 return ret;
942
943 ret = clk_core_enable_lock(core);
944 if (ret)
945 clk_core_unprepare_lock(core);
5279fc40 946
4dff95dc 947 return ret;
b2476490 948}
a6adc30b
DA
949
950static void clk_core_disable_unprepare(struct clk_core *core)
951{
952 clk_core_disable_lock(core);
953 clk_core_unprepare_lock(core);
954}
b2476490 955
7ec986ef
DA
956static void clk_unprepare_unused_subtree(struct clk_core *core)
957{
958 struct clk_core *child;
959
960 lockdep_assert_held(&prepare_lock);
961
962 hlist_for_each_entry(child, &core->children, child_node)
963 clk_unprepare_unused_subtree(child);
964
965 if (core->prepare_count)
966 return;
967
968 if (core->flags & CLK_IGNORE_UNUSED)
969 return;
970
9a34b453
MS
971 if (clk_pm_runtime_get(core))
972 return;
973
7ec986ef
DA
974 if (clk_core_is_prepared(core)) {
975 trace_clk_unprepare(core);
976 if (core->ops->unprepare_unused)
977 core->ops->unprepare_unused(core->hw);
978 else if (core->ops->unprepare)
979 core->ops->unprepare(core->hw);
980 trace_clk_unprepare_complete(core);
981 }
9a34b453
MS
982
983 clk_pm_runtime_put(core);
7ec986ef
DA
984}
985
986static void clk_disable_unused_subtree(struct clk_core *core)
987{
988 struct clk_core *child;
989 unsigned long flags;
990
991 lockdep_assert_held(&prepare_lock);
992
993 hlist_for_each_entry(child, &core->children, child_node)
994 clk_disable_unused_subtree(child);
995
a4b3518d
DA
996 if (core->flags & CLK_OPS_PARENT_ENABLE)
997 clk_core_prepare_enable(core->parent);
998
9a34b453
MS
999 if (clk_pm_runtime_get(core))
1000 goto unprepare_out;
1001
7ec986ef
DA
1002 flags = clk_enable_lock();
1003
1004 if (core->enable_count)
1005 goto unlock_out;
1006
1007 if (core->flags & CLK_IGNORE_UNUSED)
1008 goto unlock_out;
1009
1010 /*
1011 * some gate clocks have special needs during the disable-unused
1012 * sequence. call .disable_unused if available, otherwise fall
1013 * back to .disable
1014 */
1015 if (clk_core_is_enabled(core)) {
1016 trace_clk_disable(core);
1017 if (core->ops->disable_unused)
1018 core->ops->disable_unused(core->hw);
1019 else if (core->ops->disable)
1020 core->ops->disable(core->hw);
1021 trace_clk_disable_complete(core);
1022 }
1023
1024unlock_out:
1025 clk_enable_unlock(flags);
9a34b453
MS
1026 clk_pm_runtime_put(core);
1027unprepare_out:
a4b3518d
DA
1028 if (core->flags & CLK_OPS_PARENT_ENABLE)
1029 clk_core_disable_unprepare(core->parent);
7ec986ef
DA
1030}
1031
1032static bool clk_ignore_unused;
1033static int __init clk_ignore_unused_setup(char *__unused)
1034{
1035 clk_ignore_unused = true;
1036 return 1;
1037}
1038__setup("clk_ignore_unused", clk_ignore_unused_setup);
1039
1040static int clk_disable_unused(void)
1041{
1042 struct clk_core *core;
1043
1044 if (clk_ignore_unused) {
1045 pr_warn("clk: Not disabling unused clocks\n");
1046 return 0;
1047 }
1048
1049 clk_prepare_lock();
1050
1051 hlist_for_each_entry(core, &clk_root_list, child_node)
1052 clk_disable_unused_subtree(core);
1053
1054 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1055 clk_disable_unused_subtree(core);
1056
1057 hlist_for_each_entry(core, &clk_root_list, child_node)
1058 clk_unprepare_unused_subtree(core);
1059
1060 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1061 clk_unprepare_unused_subtree(core);
1062
1063 clk_prepare_unlock();
1064
1065 return 0;
1066}
1067late_initcall_sync(clk_disable_unused);
1068
0f6cc2b8
JB
1069static int clk_core_determine_round_nolock(struct clk_core *core,
1070 struct clk_rate_request *req)
3d6ee287 1071{
0817b62c 1072 long rate;
4dff95dc
SB
1073
1074 lockdep_assert_held(&prepare_lock);
3d6ee287 1075
d6968fca 1076 if (!core)
4dff95dc 1077 return 0;
3d6ee287 1078
55e9b8b7
JB
1079 /*
1080 * At this point, core protection will be disabled if
1081 * - if the provider is not protected at all
1082 * - if the calling consumer is the only one which has exclusivity
1083 * over the provider
1084 */
e55a839a
JB
1085 if (clk_core_rate_is_protected(core)) {
1086 req->rate = core->rate;
1087 } else if (core->ops->determine_rate) {
0817b62c
BB
1088 return core->ops->determine_rate(core->hw, req);
1089 } else if (core->ops->round_rate) {
1090 rate = core->ops->round_rate(core->hw, req->rate,
1091 &req->best_parent_rate);
1092 if (rate < 0)
1093 return rate;
1094
1095 req->rate = rate;
0817b62c 1096 } else {
0f6cc2b8 1097 return -EINVAL;
0817b62c
BB
1098 }
1099
1100 return 0;
3d6ee287
UH
1101}
1102
0f6cc2b8
JB
1103static void clk_core_init_rate_req(struct clk_core * const core,
1104 struct clk_rate_request *req)
1105{
1106 struct clk_core *parent;
1107
1108 if (WARN_ON(!core || !req))
1109 return;
1110
1111 parent = core->parent;
1112 if (parent) {
1113 req->best_parent_hw = parent->hw;
1114 req->best_parent_rate = parent->rate;
1115 } else {
1116 req->best_parent_hw = NULL;
1117 req->best_parent_rate = 0;
0817b62c 1118 }
0f6cc2b8 1119}
0817b62c 1120
0f6cc2b8
JB
1121static bool clk_core_can_round(struct clk_core * const core)
1122{
1123 if (core->ops->determine_rate || core->ops->round_rate)
1124 return true;
1125
1126 return false;
1127}
1128
1129static int clk_core_round_rate_nolock(struct clk_core *core,
1130 struct clk_rate_request *req)
1131{
1132 lockdep_assert_held(&prepare_lock);
1133
04bf9ab3
JB
1134 if (!core) {
1135 req->rate = 0;
0f6cc2b8 1136 return 0;
04bf9ab3 1137 }
0817b62c 1138
0f6cc2b8
JB
1139 clk_core_init_rate_req(core, req);
1140
1141 if (clk_core_can_round(core))
1142 return clk_core_determine_round_nolock(core, req);
1143 else if (core->flags & CLK_SET_RATE_PARENT)
1144 return clk_core_round_rate_nolock(core->parent, req);
1145
1146 req->rate = core->rate;
0817b62c 1147 return 0;
3d6ee287
UH
1148}
1149
4dff95dc
SB
1150/**
1151 * __clk_determine_rate - get the closest rate actually supported by a clock
1152 * @hw: determine the rate of this clock
2d5b520c 1153 * @req: target rate request
4dff95dc 1154 *
6e5ab41b 1155 * Useful for clk_ops such as .set_rate and .determine_rate.
4dff95dc 1156 */
0817b62c 1157int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
035a61c3 1158{
0817b62c
BB
1159 if (!hw) {
1160 req->rate = 0;
4dff95dc 1161 return 0;
0817b62c 1162 }
035a61c3 1163
0817b62c 1164 return clk_core_round_rate_nolock(hw->core, req);
035a61c3 1165}
4dff95dc 1166EXPORT_SYMBOL_GPL(__clk_determine_rate);
035a61c3 1167
1a9c069c
SB
1168unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1169{
1170 int ret;
1171 struct clk_rate_request req;
1172
1173 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1174 req.rate = rate;
1175
1176 ret = clk_core_round_rate_nolock(hw->core, &req);
1177 if (ret)
1178 return 0;
1179
1180 return req.rate;
1181}
1182EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1183
4dff95dc
SB
1184/**
1185 * clk_round_rate - round the given rate for a clk
1186 * @clk: the clk for which we are rounding a rate
1187 * @rate: the rate which is to be rounded
1188 *
1189 * Takes in a rate as input and rounds it to a rate that the clk can actually
1190 * use which is then returned. If clk doesn't support round_rate operation
1191 * then the parent rate is returned.
1192 */
1193long clk_round_rate(struct clk *clk, unsigned long rate)
035a61c3 1194{
fc4a05d4
SB
1195 struct clk_rate_request req;
1196 int ret;
4dff95dc 1197
035a61c3 1198 if (!clk)
4dff95dc 1199 return 0;
035a61c3 1200
4dff95dc 1201 clk_prepare_lock();
fc4a05d4 1202
55e9b8b7
JB
1203 if (clk->exclusive_count)
1204 clk_core_rate_unprotect(clk->core);
1205
fc4a05d4
SB
1206 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1207 req.rate = rate;
1208
1209 ret = clk_core_round_rate_nolock(clk->core, &req);
55e9b8b7
JB
1210
1211 if (clk->exclusive_count)
1212 clk_core_rate_protect(clk->core);
1213
4dff95dc
SB
1214 clk_prepare_unlock();
1215
fc4a05d4
SB
1216 if (ret)
1217 return ret;
1218
1219 return req.rate;
035a61c3 1220}
4dff95dc 1221EXPORT_SYMBOL_GPL(clk_round_rate);
b2476490 1222
4dff95dc
SB
1223/**
1224 * __clk_notify - call clk notifier chain
1225 * @core: clk that is changing rate
1226 * @msg: clk notifier type (see include/linux/clk.h)
1227 * @old_rate: old clk rate
1228 * @new_rate: new clk rate
1229 *
1230 * Triggers a notifier call chain on the clk rate-change notification
1231 * for 'clk'. Passes a pointer to the struct clk and the previous
1232 * and current rates to the notifier callback. Intended to be called by
1233 * internal clock code only. Returns NOTIFY_DONE from the last driver
1234 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1235 * a driver returns that.
1236 */
1237static int __clk_notify(struct clk_core *core, unsigned long msg,
1238 unsigned long old_rate, unsigned long new_rate)
b2476490 1239{
4dff95dc
SB
1240 struct clk_notifier *cn;
1241 struct clk_notifier_data cnd;
1242 int ret = NOTIFY_DONE;
b2476490 1243
4dff95dc
SB
1244 cnd.old_rate = old_rate;
1245 cnd.new_rate = new_rate;
b2476490 1246
4dff95dc
SB
1247 list_for_each_entry(cn, &clk_notifier_list, node) {
1248 if (cn->clk->core == core) {
1249 cnd.clk = cn->clk;
1250 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1251 &cnd);
17c34c56
PDS
1252 if (ret & NOTIFY_STOP_MASK)
1253 return ret;
4dff95dc 1254 }
b2476490
MT
1255 }
1256
4dff95dc 1257 return ret;
b2476490
MT
1258}
1259
4dff95dc
SB
1260/**
1261 * __clk_recalc_accuracies
1262 * @core: first clk in the subtree
1263 *
1264 * Walks the subtree of clks starting with clk and recalculates accuracies as
1265 * it goes. Note that if a clk does not implement the .recalc_accuracy
6e5ab41b 1266 * callback then it is assumed that the clock will take on the accuracy of its
4dff95dc 1267 * parent.
4dff95dc
SB
1268 */
1269static void __clk_recalc_accuracies(struct clk_core *core)
b2476490 1270{
4dff95dc
SB
1271 unsigned long parent_accuracy = 0;
1272 struct clk_core *child;
b2476490 1273
4dff95dc 1274 lockdep_assert_held(&prepare_lock);
b2476490 1275
4dff95dc
SB
1276 if (core->parent)
1277 parent_accuracy = core->parent->accuracy;
b2476490 1278
4dff95dc
SB
1279 if (core->ops->recalc_accuracy)
1280 core->accuracy = core->ops->recalc_accuracy(core->hw,
1281 parent_accuracy);
1282 else
1283 core->accuracy = parent_accuracy;
b2476490 1284
4dff95dc
SB
1285 hlist_for_each_entry(child, &core->children, child_node)
1286 __clk_recalc_accuracies(child);
b2476490
MT
1287}
1288
4dff95dc 1289static long clk_core_get_accuracy(struct clk_core *core)
e366fdd7 1290{
4dff95dc 1291 unsigned long accuracy;
15a02c1f 1292
4dff95dc
SB
1293 clk_prepare_lock();
1294 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1295 __clk_recalc_accuracies(core);
15a02c1f 1296
4dff95dc
SB
1297 accuracy = __clk_get_accuracy(core);
1298 clk_prepare_unlock();
e366fdd7 1299
4dff95dc 1300 return accuracy;
e366fdd7 1301}
15a02c1f 1302
4dff95dc
SB
1303/**
1304 * clk_get_accuracy - return the accuracy of clk
1305 * @clk: the clk whose accuracy is being returned
1306 *
1307 * Simply returns the cached accuracy of the clk, unless
1308 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1309 * issued.
1310 * If clk is NULL then returns 0.
1311 */
1312long clk_get_accuracy(struct clk *clk)
035a61c3 1313{
4dff95dc
SB
1314 if (!clk)
1315 return 0;
035a61c3 1316
4dff95dc 1317 return clk_core_get_accuracy(clk->core);
035a61c3 1318}
4dff95dc 1319EXPORT_SYMBOL_GPL(clk_get_accuracy);
035a61c3 1320
4dff95dc
SB
1321static unsigned long clk_recalc(struct clk_core *core,
1322 unsigned long parent_rate)
1c8e6004 1323{
9a34b453
MS
1324 unsigned long rate = parent_rate;
1325
1326 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1327 rate = core->ops->recalc_rate(core->hw, parent_rate);
1328 clk_pm_runtime_put(core);
1329 }
1330 return rate;
1c8e6004
TV
1331}
1332
4dff95dc
SB
1333/**
1334 * __clk_recalc_rates
1335 * @core: first clk in the subtree
1336 * @msg: notification type (see include/linux/clk.h)
1337 *
1338 * Walks the subtree of clks starting with clk and recalculates rates as it
1339 * goes. Note that if a clk does not implement the .recalc_rate callback then
1340 * it is assumed that the clock will take on the rate of its parent.
1341 *
1342 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1343 * if necessary.
15a02c1f 1344 */
4dff95dc 1345static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
15a02c1f 1346{
4dff95dc
SB
1347 unsigned long old_rate;
1348 unsigned long parent_rate = 0;
1349 struct clk_core *child;
e366fdd7 1350
4dff95dc 1351 lockdep_assert_held(&prepare_lock);
15a02c1f 1352
4dff95dc 1353 old_rate = core->rate;
b2476490 1354
4dff95dc
SB
1355 if (core->parent)
1356 parent_rate = core->parent->rate;
b2476490 1357
4dff95dc 1358 core->rate = clk_recalc(core, parent_rate);
b2476490 1359
4dff95dc
SB
1360 /*
1361 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1362 * & ABORT_RATE_CHANGE notifiers
1363 */
1364 if (core->notifier_count && msg)
1365 __clk_notify(core, msg, old_rate, core->rate);
b2476490 1366
4dff95dc
SB
1367 hlist_for_each_entry(child, &core->children, child_node)
1368 __clk_recalc_rates(child, msg);
1369}
b2476490 1370
4dff95dc
SB
1371static unsigned long clk_core_get_rate(struct clk_core *core)
1372{
1373 unsigned long rate;
dfc202ea 1374
4dff95dc 1375 clk_prepare_lock();
b2476490 1376
4dff95dc
SB
1377 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1378 __clk_recalc_rates(core, 0);
1379
1380 rate = clk_core_get_rate_nolock(core);
1381 clk_prepare_unlock();
1382
1383 return rate;
b2476490
MT
1384}
1385
1386/**
4dff95dc
SB
1387 * clk_get_rate - return the rate of clk
1388 * @clk: the clk whose rate is being returned
b2476490 1389 *
4dff95dc
SB
1390 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1391 * is set, which means a recalc_rate will be issued.
1392 * If clk is NULL then returns 0.
b2476490 1393 */
4dff95dc 1394unsigned long clk_get_rate(struct clk *clk)
b2476490 1395{
4dff95dc
SB
1396 if (!clk)
1397 return 0;
63589e92 1398
4dff95dc 1399 return clk_core_get_rate(clk->core);
b2476490 1400}
4dff95dc 1401EXPORT_SYMBOL_GPL(clk_get_rate);
b2476490 1402
4dff95dc
SB
1403static int clk_fetch_parent_index(struct clk_core *core,
1404 struct clk_core *parent)
b2476490 1405{
4dff95dc 1406 int i;
b2476490 1407
508f884a
MY
1408 if (!parent)
1409 return -EINVAL;
1410
470b5e2f
MY
1411 for (i = 0; i < core->num_parents; i++)
1412 if (clk_core_get_parent_by_index(core, i) == parent)
4dff95dc 1413 return i;
b2476490 1414
4dff95dc 1415 return -EINVAL;
b2476490
MT
1416}
1417
e6500344
HS
1418/*
1419 * Update the orphan status of @core and all its children.
1420 */
1421static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1422{
1423 struct clk_core *child;
1424
1425 core->orphan = is_orphan;
1426
1427 hlist_for_each_entry(child, &core->children, child_node)
1428 clk_core_update_orphan_status(child, is_orphan);
1429}
1430
4dff95dc 1431static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
b2476490 1432{
e6500344
HS
1433 bool was_orphan = core->orphan;
1434
4dff95dc 1435 hlist_del(&core->child_node);
035a61c3 1436
4dff95dc 1437 if (new_parent) {
e6500344
HS
1438 bool becomes_orphan = new_parent->orphan;
1439
4dff95dc
SB
1440 /* avoid duplicate POST_RATE_CHANGE notifications */
1441 if (new_parent->new_child == core)
1442 new_parent->new_child = NULL;
b2476490 1443
4dff95dc 1444 hlist_add_head(&core->child_node, &new_parent->children);
e6500344
HS
1445
1446 if (was_orphan != becomes_orphan)
1447 clk_core_update_orphan_status(core, becomes_orphan);
4dff95dc
SB
1448 } else {
1449 hlist_add_head(&core->child_node, &clk_orphan_list);
e6500344
HS
1450 if (!was_orphan)
1451 clk_core_update_orphan_status(core, true);
4dff95dc 1452 }
dfc202ea 1453
4dff95dc 1454 core->parent = new_parent;
035a61c3
TV
1455}
1456
4dff95dc
SB
1457static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1458 struct clk_core *parent)
b2476490
MT
1459{
1460 unsigned long flags;
4dff95dc 1461 struct clk_core *old_parent = core->parent;
b2476490 1462
4dff95dc 1463 /*
fc8726a2
DA
1464 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1465 *
1466 * 2. Migrate prepare state between parents and prevent race with
4dff95dc
SB
1467 * clk_enable().
1468 *
1469 * If the clock is not prepared, then a race with
1470 * clk_enable/disable() is impossible since we already have the
1471 * prepare lock (future calls to clk_enable() need to be preceded by
1472 * a clk_prepare()).
1473 *
1474 * If the clock is prepared, migrate the prepared state to the new
1475 * parent and also protect against a race with clk_enable() by
1476 * forcing the clock and the new parent on. This ensures that all
1477 * future calls to clk_enable() are practically NOPs with respect to
1478 * hardware and software states.
1479 *
1480 * See also: Comment for clk_set_parent() below.
1481 */
fc8726a2
DA
1482
1483 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1484 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1485 clk_core_prepare_enable(old_parent);
1486 clk_core_prepare_enable(parent);
1487 }
1488
1489 /* migrate prepare count if > 0 */
4dff95dc 1490 if (core->prepare_count) {
fc8726a2
DA
1491 clk_core_prepare_enable(parent);
1492 clk_core_enable_lock(core);
4dff95dc 1493 }
63589e92 1494
4dff95dc 1495 /* update the clk tree topology */
eab89f69 1496 flags = clk_enable_lock();
4dff95dc 1497 clk_reparent(core, parent);
eab89f69 1498 clk_enable_unlock(flags);
4dff95dc
SB
1499
1500 return old_parent;
b2476490 1501}
b2476490 1502
4dff95dc
SB
1503static void __clk_set_parent_after(struct clk_core *core,
1504 struct clk_core *parent,
1505 struct clk_core *old_parent)
b2476490 1506{
4dff95dc
SB
1507 /*
1508 * Finish the migration of prepare state and undo the changes done
1509 * for preventing a race with clk_enable().
1510 */
1511 if (core->prepare_count) {
fc8726a2
DA
1512 clk_core_disable_lock(core);
1513 clk_core_disable_unprepare(old_parent);
1514 }
1515
1516 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1517 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1518 clk_core_disable_unprepare(parent);
1519 clk_core_disable_unprepare(old_parent);
4dff95dc
SB
1520 }
1521}
b2476490 1522
4dff95dc
SB
1523static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1524 u8 p_index)
1525{
1526 unsigned long flags;
1527 int ret = 0;
1528 struct clk_core *old_parent;
b2476490 1529
4dff95dc 1530 old_parent = __clk_set_parent_before(core, parent);
b2476490 1531
4dff95dc 1532 trace_clk_set_parent(core, parent);
b2476490 1533
4dff95dc
SB
1534 /* change clock input source */
1535 if (parent && core->ops->set_parent)
1536 ret = core->ops->set_parent(core->hw, p_index);
dfc202ea 1537
4dff95dc 1538 trace_clk_set_parent_complete(core, parent);
dfc202ea 1539
4dff95dc
SB
1540 if (ret) {
1541 flags = clk_enable_lock();
1542 clk_reparent(core, old_parent);
1543 clk_enable_unlock(flags);
c660b2eb 1544 __clk_set_parent_after(core, old_parent, parent);
dfc202ea 1545
4dff95dc 1546 return ret;
b2476490
MT
1547 }
1548
4dff95dc
SB
1549 __clk_set_parent_after(core, parent, old_parent);
1550
b2476490
MT
1551 return 0;
1552}
1553
1554/**
4dff95dc
SB
1555 * __clk_speculate_rates
1556 * @core: first clk in the subtree
1557 * @parent_rate: the "future" rate of clk's parent
b2476490 1558 *
4dff95dc
SB
1559 * Walks the subtree of clks starting with clk, speculating rates as it
1560 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1561 *
1562 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1563 * pre-rate change notifications and returns early if no clks in the
1564 * subtree have subscribed to the notifications. Note that if a clk does not
1565 * implement the .recalc_rate callback then it is assumed that the clock will
1566 * take on the rate of its parent.
b2476490 1567 */
4dff95dc
SB
1568static int __clk_speculate_rates(struct clk_core *core,
1569 unsigned long parent_rate)
b2476490 1570{
4dff95dc
SB
1571 struct clk_core *child;
1572 unsigned long new_rate;
1573 int ret = NOTIFY_DONE;
b2476490 1574
4dff95dc 1575 lockdep_assert_held(&prepare_lock);
864e160a 1576
4dff95dc
SB
1577 new_rate = clk_recalc(core, parent_rate);
1578
1579 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1580 if (core->notifier_count)
1581 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1582
1583 if (ret & NOTIFY_STOP_MASK) {
1584 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1585 __func__, core->name, ret);
1586 goto out;
1587 }
1588
1589 hlist_for_each_entry(child, &core->children, child_node) {
1590 ret = __clk_speculate_rates(child, new_rate);
1591 if (ret & NOTIFY_STOP_MASK)
1592 break;
1593 }
b2476490 1594
4dff95dc 1595out:
b2476490
MT
1596 return ret;
1597}
b2476490 1598
4dff95dc
SB
1599static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1600 struct clk_core *new_parent, u8 p_index)
b2476490 1601{
4dff95dc 1602 struct clk_core *child;
b2476490 1603
4dff95dc
SB
1604 core->new_rate = new_rate;
1605 core->new_parent = new_parent;
1606 core->new_parent_index = p_index;
1607 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1608 core->new_child = NULL;
1609 if (new_parent && new_parent != core->parent)
1610 new_parent->new_child = core;
496eadf8 1611
4dff95dc
SB
1612 hlist_for_each_entry(child, &core->children, child_node) {
1613 child->new_rate = clk_recalc(child, new_rate);
1614 clk_calc_subtree(child, child->new_rate, NULL, 0);
1615 }
1616}
b2476490 1617
4dff95dc
SB
1618/*
1619 * calculate the new rates returning the topmost clock that has to be
1620 * changed.
1621 */
1622static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1623 unsigned long rate)
1624{
1625 struct clk_core *top = core;
1626 struct clk_core *old_parent, *parent;
4dff95dc
SB
1627 unsigned long best_parent_rate = 0;
1628 unsigned long new_rate;
1629 unsigned long min_rate;
1630 unsigned long max_rate;
1631 int p_index = 0;
1632 long ret;
1633
1634 /* sanity */
1635 if (IS_ERR_OR_NULL(core))
1636 return NULL;
1637
1638 /* save parent rate, if it exists */
1639 parent = old_parent = core->parent;
71472c0c 1640 if (parent)
4dff95dc 1641 best_parent_rate = parent->rate;
71472c0c 1642
4dff95dc
SB
1643 clk_core_get_boundaries(core, &min_rate, &max_rate);
1644
1645 /* find the closest rate and parent clk/rate */
0f6cc2b8 1646 if (clk_core_can_round(core)) {
0817b62c
BB
1647 struct clk_rate_request req;
1648
1649 req.rate = rate;
1650 req.min_rate = min_rate;
1651 req.max_rate = max_rate;
0817b62c 1652
0f6cc2b8
JB
1653 clk_core_init_rate_req(core, &req);
1654
1655 ret = clk_core_determine_round_nolock(core, &req);
4dff95dc
SB
1656 if (ret < 0)
1657 return NULL;
1c8e6004 1658
0817b62c
BB
1659 best_parent_rate = req.best_parent_rate;
1660 new_rate = req.rate;
1661 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
035a61c3 1662
4dff95dc
SB
1663 if (new_rate < min_rate || new_rate > max_rate)
1664 return NULL;
1665 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1666 /* pass-through clock without adjustable parent */
1667 core->new_rate = core->rate;
1668 return NULL;
1669 } else {
1670 /* pass-through clock with adjustable parent */
1671 top = clk_calc_new_rates(parent, rate);
1672 new_rate = parent->new_rate;
1673 goto out;
1674 }
1c8e6004 1675
4dff95dc
SB
1676 /* some clocks must be gated to change parent */
1677 if (parent != old_parent &&
1678 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1679 pr_debug("%s: %s not gated but wants to reparent\n",
1680 __func__, core->name);
1681 return NULL;
1682 }
b2476490 1683
4dff95dc
SB
1684 /* try finding the new parent index */
1685 if (parent && core->num_parents > 1) {
1686 p_index = clk_fetch_parent_index(core, parent);
1687 if (p_index < 0) {
1688 pr_debug("%s: clk %s can not be parent of clk %s\n",
1689 __func__, parent->name, core->name);
1690 return NULL;
1691 }
1692 }
b2476490 1693
4dff95dc
SB
1694 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1695 best_parent_rate != parent->rate)
1696 top = clk_calc_new_rates(parent, best_parent_rate);
035a61c3 1697
4dff95dc
SB
1698out:
1699 clk_calc_subtree(core, new_rate, parent, p_index);
b2476490 1700
4dff95dc 1701 return top;
b2476490 1702}
b2476490 1703
4dff95dc
SB
1704/*
1705 * Notify about rate changes in a subtree. Always walk down the whole tree
1706 * so that in case of an error we can walk down the whole tree again and
1707 * abort the change.
b2476490 1708 */
4dff95dc
SB
1709static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1710 unsigned long event)
b2476490 1711{
4dff95dc 1712 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
b2476490
MT
1713 int ret = NOTIFY_DONE;
1714
4dff95dc
SB
1715 if (core->rate == core->new_rate)
1716 return NULL;
b2476490 1717
4dff95dc
SB
1718 if (core->notifier_count) {
1719 ret = __clk_notify(core, event, core->rate, core->new_rate);
1720 if (ret & NOTIFY_STOP_MASK)
1721 fail_clk = core;
b2476490
MT
1722 }
1723
4dff95dc
SB
1724 hlist_for_each_entry(child, &core->children, child_node) {
1725 /* Skip children who will be reparented to another clock */
1726 if (child->new_parent && child->new_parent != core)
1727 continue;
1728 tmp_clk = clk_propagate_rate_change(child, event);
1729 if (tmp_clk)
1730 fail_clk = tmp_clk;
1731 }
5279fc40 1732
4dff95dc
SB
1733 /* handle the new child who might not be in core->children yet */
1734 if (core->new_child) {
1735 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1736 if (tmp_clk)
1737 fail_clk = tmp_clk;
1738 }
5279fc40 1739
4dff95dc 1740 return fail_clk;
5279fc40
BB
1741}
1742
4dff95dc
SB
1743/*
1744 * walk down a subtree and set the new rates notifying the rate
1745 * change on the way
1746 */
1747static void clk_change_rate(struct clk_core *core)
035a61c3 1748{
4dff95dc
SB
1749 struct clk_core *child;
1750 struct hlist_node *tmp;
1751 unsigned long old_rate;
1752 unsigned long best_parent_rate = 0;
1753 bool skip_set_rate = false;
1754 struct clk_core *old_parent;
fc8726a2 1755 struct clk_core *parent = NULL;
035a61c3 1756
4dff95dc 1757 old_rate = core->rate;
035a61c3 1758
fc8726a2
DA
1759 if (core->new_parent) {
1760 parent = core->new_parent;
4dff95dc 1761 best_parent_rate = core->new_parent->rate;
fc8726a2
DA
1762 } else if (core->parent) {
1763 parent = core->parent;
4dff95dc 1764 best_parent_rate = core->parent->rate;
fc8726a2 1765 }
035a61c3 1766
588fb54b
MS
1767 if (clk_pm_runtime_get(core))
1768 return;
1769
2eb8c710
HS
1770 if (core->flags & CLK_SET_RATE_UNGATE) {
1771 unsigned long flags;
1772
1773 clk_core_prepare(core);
1774 flags = clk_enable_lock();
1775 clk_core_enable(core);
1776 clk_enable_unlock(flags);
1777 }
1778
4dff95dc
SB
1779 if (core->new_parent && core->new_parent != core->parent) {
1780 old_parent = __clk_set_parent_before(core, core->new_parent);
1781 trace_clk_set_parent(core, core->new_parent);
5279fc40 1782
4dff95dc
SB
1783 if (core->ops->set_rate_and_parent) {
1784 skip_set_rate = true;
1785 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1786 best_parent_rate,
1787 core->new_parent_index);
1788 } else if (core->ops->set_parent) {
1789 core->ops->set_parent(core->hw, core->new_parent_index);
1790 }
5279fc40 1791
4dff95dc
SB
1792 trace_clk_set_parent_complete(core, core->new_parent);
1793 __clk_set_parent_after(core, core->new_parent, old_parent);
1794 }
8f2c2db1 1795
fc8726a2
DA
1796 if (core->flags & CLK_OPS_PARENT_ENABLE)
1797 clk_core_prepare_enable(parent);
1798
4dff95dc 1799 trace_clk_set_rate(core, core->new_rate);
b2476490 1800
4dff95dc
SB
1801 if (!skip_set_rate && core->ops->set_rate)
1802 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
496eadf8 1803
4dff95dc 1804 trace_clk_set_rate_complete(core, core->new_rate);
b2476490 1805
4dff95dc 1806 core->rate = clk_recalc(core, best_parent_rate);
b2476490 1807
2eb8c710
HS
1808 if (core->flags & CLK_SET_RATE_UNGATE) {
1809 unsigned long flags;
1810
1811 flags = clk_enable_lock();
1812 clk_core_disable(core);
1813 clk_enable_unlock(flags);
1814 clk_core_unprepare(core);
1815 }
1816
fc8726a2
DA
1817 if (core->flags & CLK_OPS_PARENT_ENABLE)
1818 clk_core_disable_unprepare(parent);
1819
4dff95dc
SB
1820 if (core->notifier_count && old_rate != core->rate)
1821 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
b2476490 1822
85e88fab
MT
1823 if (core->flags & CLK_RECALC_NEW_RATES)
1824 (void)clk_calc_new_rates(core, core->new_rate);
d8d91987 1825
b2476490 1826 /*
4dff95dc
SB
1827 * Use safe iteration, as change_rate can actually swap parents
1828 * for certain clock types.
b2476490 1829 */
4dff95dc
SB
1830 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1831 /* Skip children who will be reparented to another clock */
1832 if (child->new_parent && child->new_parent != core)
1833 continue;
1834 clk_change_rate(child);
1835 }
b2476490 1836
4dff95dc
SB
1837 /* handle the new child who might not be in core->children yet */
1838 if (core->new_child)
1839 clk_change_rate(core->new_child);
588fb54b
MS
1840
1841 clk_pm_runtime_put(core);
b2476490
MT
1842}
1843
ca5e089a
JB
1844static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
1845 unsigned long req_rate)
1846{
e55a839a 1847 int ret, cnt;
ca5e089a
JB
1848 struct clk_rate_request req;
1849
1850 lockdep_assert_held(&prepare_lock);
1851
1852 if (!core)
1853 return 0;
1854
e55a839a
JB
1855 /* simulate what the rate would be if it could be freely set */
1856 cnt = clk_core_rate_nuke_protect(core);
1857 if (cnt < 0)
1858 return cnt;
1859
ca5e089a
JB
1860 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
1861 req.rate = req_rate;
1862
1863 ret = clk_core_round_rate_nolock(core, &req);
1864
e55a839a
JB
1865 /* restore the protection */
1866 clk_core_rate_restore_protect(core, cnt);
1867
ca5e089a 1868 return ret ? 0 : req.rate;
b2476490
MT
1869}
1870
4dff95dc
SB
1871static int clk_core_set_rate_nolock(struct clk_core *core,
1872 unsigned long req_rate)
a093bde2 1873{
4dff95dc 1874 struct clk_core *top, *fail_clk;
ca5e089a 1875 unsigned long rate;
9a34b453 1876 int ret = 0;
a093bde2 1877
4dff95dc
SB
1878 if (!core)
1879 return 0;
a093bde2 1880
ca5e089a
JB
1881 rate = clk_core_req_round_rate_nolock(core, req_rate);
1882
4dff95dc
SB
1883 /* bail early if nothing to do */
1884 if (rate == clk_core_get_rate_nolock(core))
1885 return 0;
a093bde2 1886
e55a839a
JB
1887 /* fail on a direct rate set of a protected provider */
1888 if (clk_core_rate_is_protected(core))
1889 return -EBUSY;
1890
4dff95dc
SB
1891 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1892 return -EBUSY;
a093bde2 1893
4dff95dc 1894 /* calculate new rates and get the topmost changed clock */
ca5e089a 1895 top = clk_calc_new_rates(core, req_rate);
4dff95dc
SB
1896 if (!top)
1897 return -EINVAL;
1898
9a34b453
MS
1899 ret = clk_pm_runtime_get(core);
1900 if (ret)
1901 return ret;
1902
4dff95dc
SB
1903 /* notify that we are about to change rates */
1904 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1905 if (fail_clk) {
1906 pr_debug("%s: failed to set %s rate\n", __func__,
1907 fail_clk->name);
1908 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
9a34b453
MS
1909 ret = -EBUSY;
1910 goto err;
4dff95dc
SB
1911 }
1912
1913 /* change the rates */
1914 clk_change_rate(top);
1915
1916 core->req_rate = req_rate;
9a34b453
MS
1917err:
1918 clk_pm_runtime_put(core);
4dff95dc 1919
9a34b453 1920 return ret;
a093bde2 1921}
035a61c3
TV
1922
1923/**
4dff95dc
SB
1924 * clk_set_rate - specify a new rate for clk
1925 * @clk: the clk whose rate is being changed
1926 * @rate: the new rate for clk
035a61c3 1927 *
4dff95dc
SB
1928 * In the simplest case clk_set_rate will only adjust the rate of clk.
1929 *
1930 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1931 * propagate up to clk's parent; whether or not this happens depends on the
1932 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1933 * after calling .round_rate then upstream parent propagation is ignored. If
1934 * *parent_rate comes back with a new rate for clk's parent then we propagate
1935 * up to clk's parent and set its rate. Upward propagation will continue
1936 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1937 * .round_rate stops requesting changes to clk's parent_rate.
1938 *
1939 * Rate changes are accomplished via tree traversal that also recalculates the
1940 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1941 *
1942 * Returns 0 on success, -EERROR otherwise.
035a61c3 1943 */
4dff95dc 1944int clk_set_rate(struct clk *clk, unsigned long rate)
035a61c3 1945{
4dff95dc
SB
1946 int ret;
1947
035a61c3
TV
1948 if (!clk)
1949 return 0;
1950
4dff95dc
SB
1951 /* prevent racing with updates to the clock topology */
1952 clk_prepare_lock();
da0f0b2c 1953
55e9b8b7
JB
1954 if (clk->exclusive_count)
1955 clk_core_rate_unprotect(clk->core);
1956
4dff95dc 1957 ret = clk_core_set_rate_nolock(clk->core, rate);
da0f0b2c 1958
55e9b8b7
JB
1959 if (clk->exclusive_count)
1960 clk_core_rate_protect(clk->core);
1961
4dff95dc 1962 clk_prepare_unlock();
4935b22c 1963
4dff95dc 1964 return ret;
4935b22c 1965}
4dff95dc 1966EXPORT_SYMBOL_GPL(clk_set_rate);
4935b22c 1967
55e9b8b7
JB
1968/**
1969 * clk_set_rate_exclusive - specify a new rate get exclusive control
1970 * @clk: the clk whose rate is being changed
1971 * @rate: the new rate for clk
1972 *
1973 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
1974 * within a critical section
1975 *
1976 * This can be used initially to ensure that at least 1 consumer is
1977 * statisfied when several consumers are competing for exclusivity over the
1978 * same clock provider.
1979 *
1980 * The exclusivity is not applied if setting the rate failed.
1981 *
1982 * Calls to clk_rate_exclusive_get() should be balanced with calls to
1983 * clk_rate_exclusive_put().
1984 *
1985 * Returns 0 on success, -EERROR otherwise.
1986 */
1987int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
1988{
1989 int ret;
1990
1991 if (!clk)
1992 return 0;
1993
1994 /* prevent racing with updates to the clock topology */
1995 clk_prepare_lock();
1996
1997 /*
1998 * The temporary protection removal is not here, on purpose
1999 * This function is meant to be used instead of clk_rate_protect,
2000 * so before the consumer code path protect the clock provider
2001 */
2002
2003 ret = clk_core_set_rate_nolock(clk->core, rate);
2004 if (!ret) {
2005 clk_core_rate_protect(clk->core);
2006 clk->exclusive_count++;
2007 }
2008
2009 clk_prepare_unlock();
2010
2011 return ret;
2012}
2013EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2014
4dff95dc
SB
2015/**
2016 * clk_set_rate_range - set a rate range for a clock source
2017 * @clk: clock source
2018 * @min: desired minimum clock rate in Hz, inclusive
2019 * @max: desired maximum clock rate in Hz, inclusive
2020 *
2021 * Returns success (0) or negative errno.
2022 */
2023int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
4935b22c 2024{
4dff95dc 2025 int ret = 0;
6562fbcf 2026 unsigned long old_min, old_max, rate;
4935b22c 2027
4dff95dc
SB
2028 if (!clk)
2029 return 0;
903efc55 2030
4dff95dc
SB
2031 if (min > max) {
2032 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2033 __func__, clk->core->name, clk->dev_id, clk->con_id,
2034 min, max);
2035 return -EINVAL;
903efc55 2036 }
4935b22c 2037
4dff95dc 2038 clk_prepare_lock();
4935b22c 2039
55e9b8b7
JB
2040 if (clk->exclusive_count)
2041 clk_core_rate_unprotect(clk->core);
2042
6562fbcf
JB
2043 /* Save the current values in case we need to rollback the change */
2044 old_min = clk->min_rate;
2045 old_max = clk->max_rate;
2046 clk->min_rate = min;
2047 clk->max_rate = max;
2048
2049 rate = clk_core_get_rate_nolock(clk->core);
2050 if (rate < min || rate > max) {
2051 /*
2052 * FIXME:
2053 * We are in bit of trouble here, current rate is outside the
2054 * the requested range. We are going try to request appropriate
2055 * range boundary but there is a catch. It may fail for the
2056 * usual reason (clock broken, clock protected, etc) but also
2057 * because:
2058 * - round_rate() was not favorable and fell on the wrong
2059 * side of the boundary
2060 * - the determine_rate() callback does not really check for
2061 * this corner case when determining the rate
2062 */
2063
2064 if (rate < min)
2065 rate = min;
2066 else
2067 rate = max;
2068
2069 ret = clk_core_set_rate_nolock(clk->core, rate);
2070 if (ret) {
2071 /* rollback the changes */
2072 clk->min_rate = old_min;
2073 clk->max_rate = old_max;
2074 }
4935b22c
JH
2075 }
2076
55e9b8b7
JB
2077 if (clk->exclusive_count)
2078 clk_core_rate_protect(clk->core);
2079
4dff95dc 2080 clk_prepare_unlock();
4935b22c 2081
4dff95dc 2082 return ret;
3fa2252b 2083}
4dff95dc 2084EXPORT_SYMBOL_GPL(clk_set_rate_range);
3fa2252b 2085
4dff95dc
SB
2086/**
2087 * clk_set_min_rate - set a minimum clock rate for a clock source
2088 * @clk: clock source
2089 * @rate: desired minimum clock rate in Hz, inclusive
2090 *
2091 * Returns success (0) or negative errno.
2092 */
2093int clk_set_min_rate(struct clk *clk, unsigned long rate)
3fa2252b 2094{
4dff95dc
SB
2095 if (!clk)
2096 return 0;
2097
2098 return clk_set_rate_range(clk, rate, clk->max_rate);
3fa2252b 2099}
4dff95dc 2100EXPORT_SYMBOL_GPL(clk_set_min_rate);
3fa2252b 2101
4dff95dc
SB
2102/**
2103 * clk_set_max_rate - set a maximum clock rate for a clock source
2104 * @clk: clock source
2105 * @rate: desired maximum clock rate in Hz, inclusive
2106 *
2107 * Returns success (0) or negative errno.
2108 */
2109int clk_set_max_rate(struct clk *clk, unsigned long rate)
3fa2252b 2110{
4dff95dc
SB
2111 if (!clk)
2112 return 0;
4935b22c 2113
4dff95dc 2114 return clk_set_rate_range(clk, clk->min_rate, rate);
4935b22c 2115}
4dff95dc 2116EXPORT_SYMBOL_GPL(clk_set_max_rate);
4935b22c 2117
b2476490 2118/**
4dff95dc
SB
2119 * clk_get_parent - return the parent of a clk
2120 * @clk: the clk whose parent gets returned
b2476490 2121 *
4dff95dc 2122 * Simply returns clk->parent. Returns NULL if clk is NULL.
b2476490 2123 */
4dff95dc 2124struct clk *clk_get_parent(struct clk *clk)
b2476490 2125{
4dff95dc 2126 struct clk *parent;
b2476490 2127
fc4a05d4
SB
2128 if (!clk)
2129 return NULL;
2130
4dff95dc 2131 clk_prepare_lock();
fc4a05d4
SB
2132 /* TODO: Create a per-user clk and change callers to call clk_put */
2133 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
4dff95dc 2134 clk_prepare_unlock();
496eadf8 2135
4dff95dc
SB
2136 return parent;
2137}
2138EXPORT_SYMBOL_GPL(clk_get_parent);
b2476490 2139
4dff95dc
SB
2140static struct clk_core *__clk_init_parent(struct clk_core *core)
2141{
5146e0b0 2142 u8 index = 0;
4dff95dc 2143
2430a94d 2144 if (core->num_parents > 1 && core->ops->get_parent)
5146e0b0 2145 index = core->ops->get_parent(core->hw);
b2476490 2146
5146e0b0 2147 return clk_core_get_parent_by_index(core, index);
b2476490
MT
2148}
2149
4dff95dc
SB
2150static void clk_core_reparent(struct clk_core *core,
2151 struct clk_core *new_parent)
b2476490 2152{
4dff95dc
SB
2153 clk_reparent(core, new_parent);
2154 __clk_recalc_accuracies(core);
2155 __clk_recalc_rates(core, POST_RATE_CHANGE);
b2476490
MT
2156}
2157
42c86547
TV
2158void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2159{
2160 if (!hw)
2161 return;
2162
2163 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2164}
2165
4dff95dc
SB
2166/**
2167 * clk_has_parent - check if a clock is a possible parent for another
2168 * @clk: clock source
2169 * @parent: parent clock source
2170 *
2171 * This function can be used in drivers that need to check that a clock can be
2172 * the parent of another without actually changing the parent.
2173 *
2174 * Returns true if @parent is a possible parent for @clk, false otherwise.
b2476490 2175 */
4dff95dc 2176bool clk_has_parent(struct clk *clk, struct clk *parent)
b2476490 2177{
4dff95dc 2178 struct clk_core *core, *parent_core;
b2476490 2179
4dff95dc
SB
2180 /* NULL clocks should be nops, so return success if either is NULL. */
2181 if (!clk || !parent)
2182 return true;
7452b219 2183
4dff95dc
SB
2184 core = clk->core;
2185 parent_core = parent->core;
71472c0c 2186
4dff95dc
SB
2187 /* Optimize for the case where the parent is already the parent. */
2188 if (core->parent == parent_core)
2189 return true;
1c8e6004 2190
d6347445
YX
2191 return match_string(core->parent_names, core->num_parents,
2192 parent_core->name) >= 0;
4dff95dc
SB
2193}
2194EXPORT_SYMBOL_GPL(clk_has_parent);
03bc10ab 2195
91baa9ff
JB
2196static int clk_core_set_parent_nolock(struct clk_core *core,
2197 struct clk_core *parent)
4dff95dc
SB
2198{
2199 int ret = 0;
2200 int p_index = 0;
2201 unsigned long p_rate = 0;
2202
91baa9ff
JB
2203 lockdep_assert_held(&prepare_lock);
2204
4dff95dc
SB
2205 if (!core)
2206 return 0;
2207
4dff95dc 2208 if (core->parent == parent)
91baa9ff 2209 return 0;
4dff95dc
SB
2210
2211 /* verify ops for for multi-parent clks */
91baa9ff
JB
2212 if (core->num_parents > 1 && !core->ops->set_parent)
2213 return -EPERM;
7452b219 2214
4dff95dc 2215 /* check that we are allowed to re-parent if the clock is in use */
91baa9ff
JB
2216 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2217 return -EBUSY;
b2476490 2218
e55a839a
JB
2219 if (clk_core_rate_is_protected(core))
2220 return -EBUSY;
b2476490 2221
71472c0c 2222 /* try finding the new parent index */
4dff95dc 2223 if (parent) {
d6968fca 2224 p_index = clk_fetch_parent_index(core, parent);
f1c8b2ed 2225 if (p_index < 0) {
71472c0c 2226 pr_debug("%s: clk %s can not be parent of clk %s\n",
4dff95dc 2227 __func__, parent->name, core->name);
91baa9ff 2228 return p_index;
71472c0c 2229 }
e8f0e68e 2230 p_rate = parent->rate;
b2476490
MT
2231 }
2232
9a34b453
MS
2233 ret = clk_pm_runtime_get(core);
2234 if (ret)
91baa9ff 2235 return ret;
9a34b453 2236
4dff95dc
SB
2237 /* propagate PRE_RATE_CHANGE notifications */
2238 ret = __clk_speculate_rates(core, p_rate);
b2476490 2239
4dff95dc
SB
2240 /* abort if a driver objects */
2241 if (ret & NOTIFY_STOP_MASK)
9a34b453 2242 goto runtime_put;
b2476490 2243
4dff95dc
SB
2244 /* do the re-parent */
2245 ret = __clk_set_parent(core, parent, p_index);
b2476490 2246
4dff95dc
SB
2247 /* propagate rate an accuracy recalculation accordingly */
2248 if (ret) {
2249 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2250 } else {
2251 __clk_recalc_rates(core, POST_RATE_CHANGE);
2252 __clk_recalc_accuracies(core);
b2476490
MT
2253 }
2254
9a34b453
MS
2255runtime_put:
2256 clk_pm_runtime_put(core);
71472c0c 2257
4dff95dc
SB
2258 return ret;
2259}
b2476490 2260
4dff95dc
SB
2261/**
2262 * clk_set_parent - switch the parent of a mux clk
2263 * @clk: the mux clk whose input we are switching
2264 * @parent: the new input to clk
2265 *
2266 * Re-parent clk to use parent as its new input source. If clk is in
2267 * prepared state, the clk will get enabled for the duration of this call. If
2268 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2269 * that, the reparenting is glitchy in hardware, etc), use the
2270 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2271 *
2272 * After successfully changing clk's parent clk_set_parent will update the
2273 * clk topology, sysfs topology and propagate rate recalculation via
2274 * __clk_recalc_rates.
2275 *
2276 * Returns 0 on success, -EERROR otherwise.
2277 */
2278int clk_set_parent(struct clk *clk, struct clk *parent)
2279{
91baa9ff
JB
2280 int ret;
2281
4dff95dc
SB
2282 if (!clk)
2283 return 0;
2284
91baa9ff 2285 clk_prepare_lock();
55e9b8b7
JB
2286
2287 if (clk->exclusive_count)
2288 clk_core_rate_unprotect(clk->core);
2289
91baa9ff
JB
2290 ret = clk_core_set_parent_nolock(clk->core,
2291 parent ? parent->core : NULL);
55e9b8b7
JB
2292
2293 if (clk->exclusive_count)
2294 clk_core_rate_protect(clk->core);
2295
91baa9ff
JB
2296 clk_prepare_unlock();
2297
2298 return ret;
b2476490 2299}
4dff95dc 2300EXPORT_SYMBOL_GPL(clk_set_parent);
b2476490 2301
9e4d04ad
JB
2302static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2303{
2304 int ret = -EINVAL;
2305
2306 lockdep_assert_held(&prepare_lock);
2307
2308 if (!core)
2309 return 0;
2310
e55a839a
JB
2311 if (clk_core_rate_is_protected(core))
2312 return -EBUSY;
2313
9e4d04ad
JB
2314 trace_clk_set_phase(core, degrees);
2315
7f95beea 2316 if (core->ops->set_phase) {
9e4d04ad 2317 ret = core->ops->set_phase(core->hw, degrees);
7f95beea
SL
2318 if (!ret)
2319 core->phase = degrees;
2320 }
9e4d04ad
JB
2321
2322 trace_clk_set_phase_complete(core, degrees);
2323
2324 return ret;
2325}
2326
4dff95dc
SB
2327/**
2328 * clk_set_phase - adjust the phase shift of a clock signal
2329 * @clk: clock signal source
2330 * @degrees: number of degrees the signal is shifted
2331 *
2332 * Shifts the phase of a clock signal by the specified
2333 * degrees. Returns 0 on success, -EERROR otherwise.
2334 *
2335 * This function makes no distinction about the input or reference
2336 * signal that we adjust the clock signal phase against. For example
2337 * phase locked-loop clock signal generators we may shift phase with
2338 * respect to feedback clock signal input, but for other cases the
2339 * clock phase may be shifted with respect to some other, unspecified
2340 * signal.
2341 *
2342 * Additionally the concept of phase shift does not propagate through
2343 * the clock tree hierarchy, which sets it apart from clock rates and
2344 * clock accuracy. A parent clock phase attribute does not have an
2345 * impact on the phase attribute of a child clock.
b2476490 2346 */
4dff95dc 2347int clk_set_phase(struct clk *clk, int degrees)
b2476490 2348{
9e4d04ad 2349 int ret;
b2476490 2350
4dff95dc
SB
2351 if (!clk)
2352 return 0;
b2476490 2353
4dff95dc
SB
2354 /* sanity check degrees */
2355 degrees %= 360;
2356 if (degrees < 0)
2357 degrees += 360;
bf47b4fd 2358
4dff95dc 2359 clk_prepare_lock();
3fa2252b 2360
55e9b8b7
JB
2361 if (clk->exclusive_count)
2362 clk_core_rate_unprotect(clk->core);
3fa2252b 2363
9e4d04ad 2364 ret = clk_core_set_phase_nolock(clk->core, degrees);
3fa2252b 2365
55e9b8b7
JB
2366 if (clk->exclusive_count)
2367 clk_core_rate_protect(clk->core);
b2476490 2368
4dff95dc 2369 clk_prepare_unlock();
dfc202ea 2370
4dff95dc
SB
2371 return ret;
2372}
2373EXPORT_SYMBOL_GPL(clk_set_phase);
b2476490 2374
4dff95dc
SB
2375static int clk_core_get_phase(struct clk_core *core)
2376{
2377 int ret;
b2476490 2378
4dff95dc 2379 clk_prepare_lock();
1f9c63e8
SL
2380 /* Always try to update cached phase if possible */
2381 if (core->ops->get_phase)
2382 core->phase = core->ops->get_phase(core->hw);
4dff95dc
SB
2383 ret = core->phase;
2384 clk_prepare_unlock();
71472c0c 2385
4dff95dc 2386 return ret;
b2476490
MT
2387}
2388
4dff95dc
SB
2389/**
2390 * clk_get_phase - return the phase shift of a clock signal
2391 * @clk: clock signal source
2392 *
2393 * Returns the phase shift of a clock node in degrees, otherwise returns
2394 * -EERROR.
2395 */
2396int clk_get_phase(struct clk *clk)
1c8e6004 2397{
4dff95dc 2398 if (!clk)
1c8e6004
TV
2399 return 0;
2400
4dff95dc
SB
2401 return clk_core_get_phase(clk->core);
2402}
2403EXPORT_SYMBOL_GPL(clk_get_phase);
1c8e6004 2404
4dff95dc
SB
2405/**
2406 * clk_is_match - check if two clk's point to the same hardware clock
2407 * @p: clk compared against q
2408 * @q: clk compared against p
2409 *
2410 * Returns true if the two struct clk pointers both point to the same hardware
2411 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2412 * share the same struct clk_core object.
2413 *
2414 * Returns false otherwise. Note that two NULL clks are treated as matching.
2415 */
2416bool clk_is_match(const struct clk *p, const struct clk *q)
2417{
2418 /* trivial case: identical struct clk's or both NULL */
2419 if (p == q)
2420 return true;
1c8e6004 2421
3fe003f9 2422 /* true if clk->core pointers match. Avoid dereferencing garbage */
4dff95dc
SB
2423 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2424 if (p->core == q->core)
2425 return true;
1c8e6004 2426
4dff95dc
SB
2427 return false;
2428}
2429EXPORT_SYMBOL_GPL(clk_is_match);
1c8e6004 2430
4dff95dc 2431/*** debugfs support ***/
1c8e6004 2432
4dff95dc
SB
2433#ifdef CONFIG_DEBUG_FS
2434#include <linux/debugfs.h>
1c8e6004 2435
4dff95dc
SB
2436static struct dentry *rootdir;
2437static int inited = 0;
2438static DEFINE_MUTEX(clk_debug_lock);
2439static HLIST_HEAD(clk_debug_list);
1c8e6004 2440
4dff95dc
SB
2441static struct hlist_head *all_lists[] = {
2442 &clk_root_list,
2443 &clk_orphan_list,
2444 NULL,
2445};
2446
2447static struct hlist_head *orphan_list[] = {
2448 &clk_orphan_list,
2449 NULL,
2450};
2451
2452static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2453 int level)
b2476490 2454{
4dff95dc
SB
2455 if (!c)
2456 return;
b2476490 2457
c5ce26ed 2458 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
4dff95dc
SB
2459 level * 3 + 1, "",
2460 30 - level * 3, c->name,
e55a839a
JB
2461 c->enable_count, c->prepare_count, c->protect_count,
2462 clk_core_get_rate(c), clk_core_get_accuracy(c),
2463 clk_core_get_phase(c));
4dff95dc 2464}
89ac8d7a 2465
4dff95dc
SB
2466static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2467 int level)
2468{
2469 struct clk_core *child;
b2476490 2470
4dff95dc
SB
2471 if (!c)
2472 return;
b2476490 2473
4dff95dc 2474 clk_summary_show_one(s, c, level);
0e1c0301 2475
4dff95dc
SB
2476 hlist_for_each_entry(child, &c->children, child_node)
2477 clk_summary_show_subtree(s, child, level + 1);
1c8e6004 2478}
b2476490 2479
4dff95dc 2480static int clk_summary_show(struct seq_file *s, void *data)
1c8e6004 2481{
4dff95dc
SB
2482 struct clk_core *c;
2483 struct hlist_head **lists = (struct hlist_head **)s->private;
1c8e6004 2484
c5ce26ed
JB
2485 seq_puts(s, " enable prepare protect \n");
2486 seq_puts(s, " clock count count count rate accuracy phase\n");
4dff95dc 2487 seq_puts(s, "----------------------------------------------------------------------------------------\n");
b2476490 2488
1c8e6004
TV
2489 clk_prepare_lock();
2490
4dff95dc
SB
2491 for (; *lists; lists++)
2492 hlist_for_each_entry(c, *lists, child_node)
2493 clk_summary_show_subtree(s, c, 0);
b2476490 2494
eab89f69 2495 clk_prepare_unlock();
b2476490 2496
4dff95dc 2497 return 0;
b2476490 2498}
fec0ef3f 2499DEFINE_SHOW_ATTRIBUTE(clk_summary);
b2476490 2500
4dff95dc
SB
2501static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2502{
2503 if (!c)
2504 return;
b2476490 2505
7cb81136 2506 /* This should be JSON format, i.e. elements separated with a comma */
4dff95dc
SB
2507 seq_printf(s, "\"%s\": { ", c->name);
2508 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2509 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
e55a839a 2510 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
7cb81136
SW
2511 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2512 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
4dff95dc 2513 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
b2476490 2514}
b2476490 2515
4dff95dc 2516static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
b2476490 2517{
4dff95dc 2518 struct clk_core *child;
b2476490 2519
4dff95dc
SB
2520 if (!c)
2521 return;
b2476490 2522
4dff95dc 2523 clk_dump_one(s, c, level);
b2476490 2524
4dff95dc 2525 hlist_for_each_entry(child, &c->children, child_node) {
4d327586 2526 seq_putc(s, ',');
4dff95dc 2527 clk_dump_subtree(s, child, level + 1);
b2476490
MT
2528 }
2529
4d327586 2530 seq_putc(s, '}');
b2476490
MT
2531}
2532
fec0ef3f 2533static int clk_dump_show(struct seq_file *s, void *data)
4e88f3de 2534{
4dff95dc
SB
2535 struct clk_core *c;
2536 bool first_node = true;
2537 struct hlist_head **lists = (struct hlist_head **)s->private;
4e88f3de 2538
4d327586 2539 seq_putc(s, '{');
4dff95dc 2540 clk_prepare_lock();
035a61c3 2541
4dff95dc
SB
2542 for (; *lists; lists++) {
2543 hlist_for_each_entry(c, *lists, child_node) {
2544 if (!first_node)
4d327586 2545 seq_putc(s, ',');
4dff95dc
SB
2546 first_node = false;
2547 clk_dump_subtree(s, c, 0);
2548 }
2549 }
4e88f3de 2550
4dff95dc 2551 clk_prepare_unlock();
4e88f3de 2552
70e9f4dd 2553 seq_puts(s, "}\n");
4dff95dc 2554 return 0;
4e88f3de 2555}
fec0ef3f 2556DEFINE_SHOW_ATTRIBUTE(clk_dump);
89ac8d7a 2557
a6059ab9
GU
2558static const struct {
2559 unsigned long flag;
2560 const char *name;
2561} clk_flags[] = {
2562#define ENTRY(f) { f, __stringify(f) }
2563 ENTRY(CLK_SET_RATE_GATE),
2564 ENTRY(CLK_SET_PARENT_GATE),
2565 ENTRY(CLK_SET_RATE_PARENT),
2566 ENTRY(CLK_IGNORE_UNUSED),
2567 ENTRY(CLK_IS_BASIC),
2568 ENTRY(CLK_GET_RATE_NOCACHE),
2569 ENTRY(CLK_SET_RATE_NO_REPARENT),
2570 ENTRY(CLK_GET_ACCURACY_NOCACHE),
2571 ENTRY(CLK_RECALC_NEW_RATES),
2572 ENTRY(CLK_SET_RATE_UNGATE),
2573 ENTRY(CLK_IS_CRITICAL),
2574 ENTRY(CLK_OPS_PARENT_ENABLE),
2575#undef ENTRY
2576};
2577
fec0ef3f 2578static int clk_flags_show(struct seq_file *s, void *data)
a6059ab9
GU
2579{
2580 struct clk_core *core = s->private;
2581 unsigned long flags = core->flags;
2582 unsigned int i;
2583
2584 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
2585 if (flags & clk_flags[i].flag) {
2586 seq_printf(s, "%s\n", clk_flags[i].name);
2587 flags &= ~clk_flags[i].flag;
2588 }
2589 }
2590 if (flags) {
2591 /* Unknown flags */
2592 seq_printf(s, "0x%lx\n", flags);
2593 }
2594
2595 return 0;
2596}
fec0ef3f 2597DEFINE_SHOW_ATTRIBUTE(clk_flags);
a6059ab9 2598
fec0ef3f 2599static int possible_parents_show(struct seq_file *s, void *data)
92031575
PDS
2600{
2601 struct clk_core *core = s->private;
2602 int i;
2603
2604 for (i = 0; i < core->num_parents - 1; i++)
2605 seq_printf(s, "%s ", core->parent_names[i]);
2606
2607 seq_printf(s, "%s\n", core->parent_names[i]);
2608
2609 return 0;
2610}
fec0ef3f 2611DEFINE_SHOW_ATTRIBUTE(possible_parents);
92031575 2612
8a26bbbb 2613static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
4dff95dc 2614{
8a26bbbb 2615 struct dentry *root;
b61c43c0 2616
8a26bbbb
GKH
2617 if (!core || !pdentry)
2618 return;
b2476490 2619
8a26bbbb
GKH
2620 root = debugfs_create_dir(core->name, pdentry);
2621 core->dentry = root;
92031575 2622
8a26bbbb
GKH
2623 debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
2624 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
2625 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
2626 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
2627 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
2628 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
2629 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
2630 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
b2476490 2631
8a26bbbb
GKH
2632 if (core->num_parents > 1)
2633 debugfs_create_file("clk_possible_parents", 0444, root, core,
2634 &possible_parents_fops);
b2476490 2635
8a26bbbb
GKH
2636 if (core->ops->debug_init)
2637 core->ops->debug_init(core->hw, core->dentry);
b2476490 2638}
035a61c3
TV
2639
2640/**
6e5ab41b
SB
2641 * clk_debug_register - add a clk node to the debugfs clk directory
2642 * @core: the clk being added to the debugfs clk directory
035a61c3 2643 *
6e5ab41b
SB
2644 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2645 * initialized. Otherwise it bails out early since the debugfs clk directory
4dff95dc 2646 * will be created lazily by clk_debug_init as part of a late_initcall.
035a61c3 2647 */
8a26bbbb 2648static void clk_debug_register(struct clk_core *core)
035a61c3 2649{
4dff95dc
SB
2650 mutex_lock(&clk_debug_lock);
2651 hlist_add_head(&core->debug_node, &clk_debug_list);
db3188fa 2652 if (inited)
8a26bbbb 2653 clk_debug_create_one(core, rootdir);
4dff95dc 2654 mutex_unlock(&clk_debug_lock);
035a61c3 2655}
b2476490 2656
4dff95dc 2657 /**
6e5ab41b
SB
2658 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2659 * @core: the clk being removed from the debugfs clk directory
e59c5371 2660 *
6e5ab41b
SB
2661 * Dynamically removes a clk and all its child nodes from the
2662 * debugfs clk directory if clk->dentry points to debugfs created by
706d5c73 2663 * clk_debug_register in __clk_core_init.
e59c5371 2664 */
4dff95dc 2665static void clk_debug_unregister(struct clk_core *core)
e59c5371 2666{
4dff95dc
SB
2667 mutex_lock(&clk_debug_lock);
2668 hlist_del_init(&core->debug_node);
2669 debugfs_remove_recursive(core->dentry);
2670 core->dentry = NULL;
2671 mutex_unlock(&clk_debug_lock);
2672}
e59c5371 2673
4dff95dc 2674/**
6e5ab41b 2675 * clk_debug_init - lazily populate the debugfs clk directory
4dff95dc 2676 *
6e5ab41b
SB
2677 * clks are often initialized very early during boot before memory can be
2678 * dynamically allocated and well before debugfs is setup. This function
2679 * populates the debugfs clk directory once at boot-time when we know that
2680 * debugfs is setup. It should only be called once at boot-time, all other clks
2681 * added dynamically will be done so with clk_debug_register.
4dff95dc
SB
2682 */
2683static int __init clk_debug_init(void)
2684{
2685 struct clk_core *core;
dfc202ea 2686
4dff95dc 2687 rootdir = debugfs_create_dir("clk", NULL);
e59c5371 2688
8a26bbbb
GKH
2689 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
2690 &clk_summary_fops);
2691 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
2692 &clk_dump_fops);
2693 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
2694 &clk_summary_fops);
2695 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
2696 &clk_dump_fops);
e59c5371 2697
4dff95dc
SB
2698 mutex_lock(&clk_debug_lock);
2699 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2700 clk_debug_create_one(core, rootdir);
e59c5371 2701
4dff95dc
SB
2702 inited = 1;
2703 mutex_unlock(&clk_debug_lock);
e59c5371 2704
4dff95dc
SB
2705 return 0;
2706}
2707late_initcall(clk_debug_init);
2708#else
8a26bbbb 2709static inline void clk_debug_register(struct clk_core *core) { }
4dff95dc
SB
2710static inline void clk_debug_reparent(struct clk_core *core,
2711 struct clk_core *new_parent)
035a61c3 2712{
035a61c3 2713}
4dff95dc 2714static inline void clk_debug_unregister(struct clk_core *core)
3d3801ef 2715{
3d3801ef 2716}
4dff95dc 2717#endif
3d3801ef 2718
b2476490 2719/**
be45ebf2 2720 * __clk_core_init - initialize the data structures in a struct clk_core
d35c80c2 2721 * @core: clk_core being initialized
b2476490 2722 *
035a61c3 2723 * Initializes the lists in struct clk_core, queries the hardware for the
b2476490 2724 * parent and rate and sets them both.
b2476490 2725 */
be45ebf2 2726static int __clk_core_init(struct clk_core *core)
b2476490 2727{
9a34b453 2728 int i, ret;
035a61c3 2729 struct clk_core *orphan;
b67bfe0d 2730 struct hlist_node *tmp2;
1c8e6004 2731 unsigned long rate;
b2476490 2732
d35c80c2 2733 if (!core)
d1302a36 2734 return -EINVAL;
b2476490 2735
eab89f69 2736 clk_prepare_lock();
b2476490 2737
9a34b453
MS
2738 ret = clk_pm_runtime_get(core);
2739 if (ret)
2740 goto unlock;
2741
b2476490 2742 /* check to see if a clock with this name is already registered */
d6968fca 2743 if (clk_core_lookup(core->name)) {
d1302a36 2744 pr_debug("%s: clk %s already initialized\n",
d6968fca 2745 __func__, core->name);
d1302a36 2746 ret = -EEXIST;
b2476490 2747 goto out;
d1302a36 2748 }
b2476490 2749
d4d7e3dd 2750 /* check that clk_ops are sane. See Documentation/clk.txt */
d6968fca
SB
2751 if (core->ops->set_rate &&
2752 !((core->ops->round_rate || core->ops->determine_rate) &&
2753 core->ops->recalc_rate)) {
c44fccb5
MY
2754 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2755 __func__, core->name);
d1302a36 2756 ret = -EINVAL;
d4d7e3dd
MT
2757 goto out;
2758 }
2759
d6968fca 2760 if (core->ops->set_parent && !core->ops->get_parent) {
c44fccb5
MY
2761 pr_err("%s: %s must implement .get_parent & .set_parent\n",
2762 __func__, core->name);
d1302a36 2763 ret = -EINVAL;
d4d7e3dd
MT
2764 goto out;
2765 }
2766
3c8e77dd
MY
2767 if (core->num_parents > 1 && !core->ops->get_parent) {
2768 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
2769 __func__, core->name);
2770 ret = -EINVAL;
2771 goto out;
2772 }
2773
d6968fca
SB
2774 if (core->ops->set_rate_and_parent &&
2775 !(core->ops->set_parent && core->ops->set_rate)) {
c44fccb5 2776 pr_err("%s: %s must implement .set_parent & .set_rate\n",
d6968fca 2777 __func__, core->name);
3fa2252b
SB
2778 ret = -EINVAL;
2779 goto out;
2780 }
2781
b2476490 2782 /* throw a WARN if any entries in parent_names are NULL */
d6968fca
SB
2783 for (i = 0; i < core->num_parents; i++)
2784 WARN(!core->parent_names[i],
b2476490 2785 "%s: invalid NULL in %s's .parent_names\n",
d6968fca 2786 __func__, core->name);
b2476490 2787
d6968fca 2788 core->parent = __clk_init_parent(core);
b2476490
MT
2789
2790 /*
706d5c73
SB
2791 * Populate core->parent if parent has already been clk_core_init'd. If
2792 * parent has not yet been clk_core_init'd then place clk in the orphan
47b0eeb3 2793 * list. If clk doesn't have any parents then place it in the root
b2476490
MT
2794 * clk list.
2795 *
2796 * Every time a new clk is clk_init'd then we walk the list of orphan
2797 * clocks and re-parent any that are children of the clock currently
2798 * being clk_init'd.
2799 */
e6500344 2800 if (core->parent) {
d6968fca
SB
2801 hlist_add_head(&core->child_node,
2802 &core->parent->children);
e6500344 2803 core->orphan = core->parent->orphan;
47b0eeb3 2804 } else if (!core->num_parents) {
d6968fca 2805 hlist_add_head(&core->child_node, &clk_root_list);
e6500344
HS
2806 core->orphan = false;
2807 } else {
d6968fca 2808 hlist_add_head(&core->child_node, &clk_orphan_list);
e6500344
HS
2809 core->orphan = true;
2810 }
b2476490 2811
541debae
JB
2812 /*
2813 * optional platform-specific magic
2814 *
2815 * The .init callback is not used by any of the basic clock types, but
2816 * exists for weird hardware that must perform initialization magic.
2817 * Please consider other ways of solving initialization problems before
2818 * using this callback, as its use is discouraged.
2819 */
2820 if (core->ops->init)
2821 core->ops->init(core->hw);
2822
5279fc40
BB
2823 /*
2824 * Set clk's accuracy. The preferred method is to use
2825 * .recalc_accuracy. For simple clocks and lazy developers the default
2826 * fallback is to use the parent's accuracy. If a clock doesn't have a
2827 * parent (or is orphaned) then accuracy is set to zero (perfect
2828 * clock).
2829 */
d6968fca
SB
2830 if (core->ops->recalc_accuracy)
2831 core->accuracy = core->ops->recalc_accuracy(core->hw,
2832 __clk_get_accuracy(core->parent));
2833 else if (core->parent)
2834 core->accuracy = core->parent->accuracy;
5279fc40 2835 else
d6968fca 2836 core->accuracy = 0;
5279fc40 2837
9824cf73
MR
2838 /*
2839 * Set clk's phase.
2840 * Since a phase is by definition relative to its parent, just
2841 * query the current clock phase, or just assume it's in phase.
2842 */
d6968fca
SB
2843 if (core->ops->get_phase)
2844 core->phase = core->ops->get_phase(core->hw);
9824cf73 2845 else
d6968fca 2846 core->phase = 0;
9824cf73 2847
b2476490
MT
2848 /*
2849 * Set clk's rate. The preferred method is to use .recalc_rate. For
2850 * simple clocks and lazy developers the default fallback is to use the
2851 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2852 * then rate is set to zero.
2853 */
d6968fca
SB
2854 if (core->ops->recalc_rate)
2855 rate = core->ops->recalc_rate(core->hw,
2856 clk_core_get_rate_nolock(core->parent));
2857 else if (core->parent)
2858 rate = core->parent->rate;
b2476490 2859 else
1c8e6004 2860 rate = 0;
d6968fca 2861 core->rate = core->req_rate = rate;
b2476490 2862
99652a46
JB
2863 /*
2864 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
2865 * don't get accidentally disabled when walking the orphan tree and
2866 * reparenting clocks
2867 */
2868 if (core->flags & CLK_IS_CRITICAL) {
2869 unsigned long flags;
2870
2871 clk_core_prepare(core);
2872
2873 flags = clk_enable_lock();
2874 clk_core_enable(core);
2875 clk_enable_unlock(flags);
2876 }
2877
b2476490 2878 /*
0e8f6e49
MY
2879 * walk the list of orphan clocks and reparent any that newly finds a
2880 * parent.
b2476490 2881 */
b67bfe0d 2882 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
0e8f6e49 2883 struct clk_core *parent = __clk_init_parent(orphan);
1f61e5f1 2884
904e6ead 2885 /*
99652a46
JB
2886 * We need to use __clk_set_parent_before() and _after() to
2887 * to properly migrate any prepare/enable count of the orphan
2888 * clock. This is important for CLK_IS_CRITICAL clocks, which
2889 * are enabled during init but might not have a parent yet.
904e6ead
MT
2890 */
2891 if (parent) {
f8f8f1d0 2892 /* update the clk tree topology */
99652a46
JB
2893 __clk_set_parent_before(orphan, parent);
2894 __clk_set_parent_after(orphan, parent, NULL);
904e6ead
MT
2895 __clk_recalc_accuracies(orphan);
2896 __clk_recalc_rates(orphan, 0);
2897 }
0e8f6e49 2898 }
b2476490 2899
d6968fca 2900 kref_init(&core->ref);
b2476490 2901out:
9a34b453
MS
2902 clk_pm_runtime_put(core);
2903unlock:
eab89f69 2904 clk_prepare_unlock();
b2476490 2905
89f7e9de 2906 if (!ret)
d6968fca 2907 clk_debug_register(core);
89f7e9de 2908
d1302a36 2909 return ret;
b2476490
MT
2910}
2911
035a61c3
TV
2912struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2913 const char *con_id)
0197b3ea 2914{
0197b3ea
SK
2915 struct clk *clk;
2916
035a61c3 2917 /* This is to allow this function to be chained to others */
c1de1357 2918 if (IS_ERR_OR_NULL(hw))
8a23133c 2919 return ERR_CAST(hw);
0197b3ea 2920
035a61c3
TV
2921 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2922 if (!clk)
2923 return ERR_PTR(-ENOMEM);
2924
2925 clk->core = hw->core;
2926 clk->dev_id = dev_id;
253160a8 2927 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
1c8e6004
TV
2928 clk->max_rate = ULONG_MAX;
2929
2930 clk_prepare_lock();
50595f8b 2931 hlist_add_head(&clk->clks_node, &hw->core->clks);
1c8e6004 2932 clk_prepare_unlock();
0197b3ea
SK
2933
2934 return clk;
2935}
035a61c3 2936
73e0e496 2937void __clk_free_clk(struct clk *clk)
1c8e6004
TV
2938{
2939 clk_prepare_lock();
50595f8b 2940 hlist_del(&clk->clks_node);
1c8e6004
TV
2941 clk_prepare_unlock();
2942
253160a8 2943 kfree_const(clk->con_id);
1c8e6004
TV
2944 kfree(clk);
2945}
0197b3ea 2946
293ba3b4
SB
2947/**
2948 * clk_register - allocate a new clock, register it and return an opaque cookie
2949 * @dev: device that is registering this clock
2950 * @hw: link to hardware-specific clock data
2951 *
2952 * clk_register is the primary interface for populating the clock tree with new
2953 * clock nodes. It returns a pointer to the newly allocated struct clk which
a59a5163 2954 * cannot be dereferenced by driver code but may be used in conjunction with the
293ba3b4
SB
2955 * rest of the clock API. In the event of an error clk_register will return an
2956 * error code; drivers must test for an error code after calling clk_register.
2957 */
2958struct clk *clk_register(struct device *dev, struct clk_hw *hw)
b2476490 2959{
d1302a36 2960 int i, ret;
d6968fca 2961 struct clk_core *core;
293ba3b4 2962
d6968fca
SB
2963 core = kzalloc(sizeof(*core), GFP_KERNEL);
2964 if (!core) {
293ba3b4
SB
2965 ret = -ENOMEM;
2966 goto fail_out;
2967 }
b2476490 2968
d6968fca
SB
2969 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2970 if (!core->name) {
0197b3ea
SK
2971 ret = -ENOMEM;
2972 goto fail_name;
2973 }
29fd2a34
JB
2974
2975 if (WARN_ON(!hw->init->ops)) {
2976 ret = -EINVAL;
2977 goto fail_ops;
2978 }
d6968fca 2979 core->ops = hw->init->ops;
29fd2a34 2980
9a34b453
MS
2981 if (dev && pm_runtime_enabled(dev))
2982 core->dev = dev;
ac2df527 2983 if (dev && dev->driver)
d6968fca
SB
2984 core->owner = dev->driver->owner;
2985 core->hw = hw;
2986 core->flags = hw->init->flags;
2987 core->num_parents = hw->init->num_parents;
9783c0d9
SB
2988 core->min_rate = 0;
2989 core->max_rate = ULONG_MAX;
d6968fca 2990 hw->core = core;
b2476490 2991
d1302a36 2992 /* allocate local copy in case parent_names is __initdata */
d6968fca 2993 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
96a7ed90 2994 GFP_KERNEL);
d1302a36 2995
d6968fca 2996 if (!core->parent_names) {
d1302a36
MT
2997 ret = -ENOMEM;
2998 goto fail_parent_names;
2999 }
3000
3001
3002 /* copy each string name in case parent_names is __initdata */
d6968fca
SB
3003 for (i = 0; i < core->num_parents; i++) {
3004 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
0197b3ea 3005 GFP_KERNEL);
d6968fca 3006 if (!core->parent_names[i]) {
d1302a36
MT
3007 ret = -ENOMEM;
3008 goto fail_parent_names_copy;
3009 }
3010 }
3011
176d1169
MY
3012 /* avoid unnecessary string look-ups of clk_core's possible parents. */
3013 core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
3014 GFP_KERNEL);
3015 if (!core->parents) {
3016 ret = -ENOMEM;
3017 goto fail_parents;
3018 };
3019
d6968fca 3020 INIT_HLIST_HEAD(&core->clks);
1c8e6004 3021
035a61c3
TV
3022 hw->clk = __clk_create_clk(hw, NULL, NULL);
3023 if (IS_ERR(hw->clk)) {
035a61c3 3024 ret = PTR_ERR(hw->clk);
176d1169 3025 goto fail_parents;
035a61c3
TV
3026 }
3027
be45ebf2 3028 ret = __clk_core_init(core);
d1302a36 3029 if (!ret)
035a61c3 3030 return hw->clk;
b2476490 3031
1c8e6004 3032 __clk_free_clk(hw->clk);
035a61c3 3033 hw->clk = NULL;
b2476490 3034
176d1169
MY
3035fail_parents:
3036 kfree(core->parents);
d1302a36
MT
3037fail_parent_names_copy:
3038 while (--i >= 0)
d6968fca
SB
3039 kfree_const(core->parent_names[i]);
3040 kfree(core->parent_names);
d1302a36 3041fail_parent_names:
29fd2a34 3042fail_ops:
d6968fca 3043 kfree_const(core->name);
0197b3ea 3044fail_name:
d6968fca 3045 kfree(core);
d1302a36
MT
3046fail_out:
3047 return ERR_PTR(ret);
b2476490
MT
3048}
3049EXPORT_SYMBOL_GPL(clk_register);
3050
4143804c
SB
3051/**
3052 * clk_hw_register - register a clk_hw and return an error code
3053 * @dev: device that is registering this clock
3054 * @hw: link to hardware-specific clock data
3055 *
3056 * clk_hw_register is the primary interface for populating the clock tree with
3057 * new clock nodes. It returns an integer equal to zero indicating success or
3058 * less than zero indicating failure. Drivers must test for an error code after
3059 * calling clk_hw_register().
3060 */
3061int clk_hw_register(struct device *dev, struct clk_hw *hw)
3062{
3063 return PTR_ERR_OR_ZERO(clk_register(dev, hw));
3064}
3065EXPORT_SYMBOL_GPL(clk_hw_register);
3066
6e5ab41b 3067/* Free memory allocated for a clock. */
fcb0ee6a
SN
3068static void __clk_release(struct kref *ref)
3069{
d6968fca
SB
3070 struct clk_core *core = container_of(ref, struct clk_core, ref);
3071 int i = core->num_parents;
fcb0ee6a 3072
496eadf8
KK
3073 lockdep_assert_held(&prepare_lock);
3074
d6968fca 3075 kfree(core->parents);
fcb0ee6a 3076 while (--i >= 0)
d6968fca 3077 kfree_const(core->parent_names[i]);
fcb0ee6a 3078
d6968fca
SB
3079 kfree(core->parent_names);
3080 kfree_const(core->name);
3081 kfree(core);
fcb0ee6a
SN
3082}
3083
3084/*
3085 * Empty clk_ops for unregistered clocks. These are used temporarily
3086 * after clk_unregister() was called on a clock and until last clock
3087 * consumer calls clk_put() and the struct clk object is freed.
3088 */
3089static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3090{
3091 return -ENXIO;
3092}
3093
3094static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3095{
3096 WARN_ON_ONCE(1);
3097}
3098
3099static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3100 unsigned long parent_rate)
3101{
3102 return -ENXIO;
3103}
3104
3105static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3106{
3107 return -ENXIO;
3108}
3109
3110static const struct clk_ops clk_nodrv_ops = {
3111 .enable = clk_nodrv_prepare_enable,
3112 .disable = clk_nodrv_disable_unprepare,
3113 .prepare = clk_nodrv_prepare_enable,
3114 .unprepare = clk_nodrv_disable_unprepare,
3115 .set_rate = clk_nodrv_set_rate,
3116 .set_parent = clk_nodrv_set_parent,
3117};
3118
1df5c939
MB
3119/**
3120 * clk_unregister - unregister a currently registered clock
3121 * @clk: clock to unregister
1df5c939 3122 */
fcb0ee6a
SN
3123void clk_unregister(struct clk *clk)
3124{
3125 unsigned long flags;
3126
6314b679
SB
3127 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3128 return;
3129
035a61c3 3130 clk_debug_unregister(clk->core);
fcb0ee6a
SN
3131
3132 clk_prepare_lock();
3133
035a61c3
TV
3134 if (clk->core->ops == &clk_nodrv_ops) {
3135 pr_err("%s: unregistered clock: %s\n", __func__,
3136 clk->core->name);
4106a3d9 3137 goto unlock;
fcb0ee6a
SN
3138 }
3139 /*
3140 * Assign empty clock ops for consumers that might still hold
3141 * a reference to this clock.
3142 */
3143 flags = clk_enable_lock();
035a61c3 3144 clk->core->ops = &clk_nodrv_ops;
fcb0ee6a
SN
3145 clk_enable_unlock(flags);
3146
035a61c3
TV
3147 if (!hlist_empty(&clk->core->children)) {
3148 struct clk_core *child;
874f224c 3149 struct hlist_node *t;
fcb0ee6a
SN
3150
3151 /* Reparent all children to the orphan list. */
035a61c3
TV
3152 hlist_for_each_entry_safe(child, t, &clk->core->children,
3153 child_node)
91baa9ff 3154 clk_core_set_parent_nolock(child, NULL);
fcb0ee6a
SN
3155 }
3156
035a61c3 3157 hlist_del_init(&clk->core->child_node);
fcb0ee6a 3158
035a61c3 3159 if (clk->core->prepare_count)
fcb0ee6a 3160 pr_warn("%s: unregistering prepared clock: %s\n",
035a61c3 3161 __func__, clk->core->name);
e55a839a
JB
3162
3163 if (clk->core->protect_count)
3164 pr_warn("%s: unregistering protected clock: %s\n",
3165 __func__, clk->core->name);
3166
035a61c3 3167 kref_put(&clk->core->ref, __clk_release);
4106a3d9 3168unlock:
fcb0ee6a
SN
3169 clk_prepare_unlock();
3170}
1df5c939
MB
3171EXPORT_SYMBOL_GPL(clk_unregister);
3172
4143804c
SB
3173/**
3174 * clk_hw_unregister - unregister a currently registered clk_hw
3175 * @hw: hardware-specific clock data to unregister
3176 */
3177void clk_hw_unregister(struct clk_hw *hw)
3178{
3179 clk_unregister(hw->clk);
3180}
3181EXPORT_SYMBOL_GPL(clk_hw_unregister);
3182
46c8773a
SB
3183static void devm_clk_release(struct device *dev, void *res)
3184{
293ba3b4 3185 clk_unregister(*(struct clk **)res);
46c8773a
SB
3186}
3187
4143804c
SB
3188static void devm_clk_hw_release(struct device *dev, void *res)
3189{
3190 clk_hw_unregister(*(struct clk_hw **)res);
3191}
3192
46c8773a
SB
3193/**
3194 * devm_clk_register - resource managed clk_register()
3195 * @dev: device that is registering this clock
3196 * @hw: link to hardware-specific clock data
3197 *
3198 * Managed clk_register(). Clocks returned from this function are
3199 * automatically clk_unregister()ed on driver detach. See clk_register() for
3200 * more information.
3201 */
3202struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3203{
3204 struct clk *clk;
293ba3b4 3205 struct clk **clkp;
46c8773a 3206
293ba3b4
SB
3207 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3208 if (!clkp)
46c8773a
SB
3209 return ERR_PTR(-ENOMEM);
3210
293ba3b4
SB
3211 clk = clk_register(dev, hw);
3212 if (!IS_ERR(clk)) {
3213 *clkp = clk;
3214 devres_add(dev, clkp);
46c8773a 3215 } else {
293ba3b4 3216 devres_free(clkp);
46c8773a
SB
3217 }
3218
3219 return clk;
3220}
3221EXPORT_SYMBOL_GPL(devm_clk_register);
3222
4143804c
SB
3223/**
3224 * devm_clk_hw_register - resource managed clk_hw_register()
3225 * @dev: device that is registering this clock
3226 * @hw: link to hardware-specific clock data
3227 *
c47265ad 3228 * Managed clk_hw_register(). Clocks registered by this function are
4143804c
SB
3229 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
3230 * for more information.
3231 */
3232int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
3233{
3234 struct clk_hw **hwp;
3235 int ret;
3236
3237 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
3238 if (!hwp)
3239 return -ENOMEM;
3240
3241 ret = clk_hw_register(dev, hw);
3242 if (!ret) {
3243 *hwp = hw;
3244 devres_add(dev, hwp);
3245 } else {
3246 devres_free(hwp);
3247 }
3248
3249 return ret;
3250}
3251EXPORT_SYMBOL_GPL(devm_clk_hw_register);
3252
46c8773a
SB
3253static int devm_clk_match(struct device *dev, void *res, void *data)
3254{
3255 struct clk *c = res;
3256 if (WARN_ON(!c))
3257 return 0;
3258 return c == data;
3259}
3260
4143804c
SB
3261static int devm_clk_hw_match(struct device *dev, void *res, void *data)
3262{
3263 struct clk_hw *hw = res;
3264
3265 if (WARN_ON(!hw))
3266 return 0;
3267 return hw == data;
3268}
3269
46c8773a
SB
3270/**
3271 * devm_clk_unregister - resource managed clk_unregister()
3272 * @clk: clock to unregister
3273 *
3274 * Deallocate a clock allocated with devm_clk_register(). Normally
3275 * this function will not need to be called and the resource management
3276 * code will ensure that the resource is freed.
3277 */
3278void devm_clk_unregister(struct device *dev, struct clk *clk)
3279{
3280 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
3281}
3282EXPORT_SYMBOL_GPL(devm_clk_unregister);
3283
4143804c
SB
3284/**
3285 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3286 * @dev: device that is unregistering the hardware-specific clock data
3287 * @hw: link to hardware-specific clock data
3288 *
3289 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3290 * this function will not need to be called and the resource management
3291 * code will ensure that the resource is freed.
3292 */
3293void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
3294{
3295 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
3296 hw));
3297}
3298EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
3299
ac2df527
SN
3300/*
3301 * clkdev helpers
3302 */
3303int __clk_get(struct clk *clk)
3304{
035a61c3
TV
3305 struct clk_core *core = !clk ? NULL : clk->core;
3306
3307 if (core) {
3308 if (!try_module_get(core->owner))
00efcb1c 3309 return 0;
ac2df527 3310
035a61c3 3311 kref_get(&core->ref);
00efcb1c 3312 }
ac2df527
SN
3313 return 1;
3314}
3315
3316void __clk_put(struct clk *clk)
3317{
10cdfe54
TV
3318 struct module *owner;
3319
00efcb1c 3320 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
ac2df527
SN
3321 return;
3322
fcb0ee6a 3323 clk_prepare_lock();
1c8e6004 3324
55e9b8b7
JB
3325 /*
3326 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
3327 * given user should be balanced with calls to clk_rate_exclusive_put()
3328 * and by that same consumer
3329 */
3330 if (WARN_ON(clk->exclusive_count)) {
3331 /* We voiced our concern, let's sanitize the situation */
3332 clk->core->protect_count -= (clk->exclusive_count - 1);
3333 clk_core_rate_unprotect(clk->core);
3334 clk->exclusive_count = 0;
3335 }
3336
50595f8b 3337 hlist_del(&clk->clks_node);
ec02ace8
TV
3338 if (clk->min_rate > clk->core->req_rate ||
3339 clk->max_rate < clk->core->req_rate)
3340 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
3341
1c8e6004
TV
3342 owner = clk->core->owner;
3343 kref_put(&clk->core->ref, __clk_release);
3344
fcb0ee6a
SN
3345 clk_prepare_unlock();
3346
10cdfe54 3347 module_put(owner);
035a61c3 3348
035a61c3 3349 kfree(clk);
ac2df527
SN
3350}
3351
b2476490
MT
3352/*** clk rate change notifiers ***/
3353
3354/**
3355 * clk_notifier_register - add a clk rate change notifier
3356 * @clk: struct clk * to watch
3357 * @nb: struct notifier_block * with callback info
3358 *
3359 * Request notification when clk's rate changes. This uses an SRCU
3360 * notifier because we want it to block and notifier unregistrations are
3361 * uncommon. The callbacks associated with the notifier must not
3362 * re-enter into the clk framework by calling any top-level clk APIs;
3363 * this will cause a nested prepare_lock mutex.
3364 *
198bb594
MY
3365 * In all notification cases (pre, post and abort rate change) the original
3366 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
3367 * and the new frequency is passed via struct clk_notifier_data.new_rate.
b2476490 3368 *
b2476490
MT
3369 * clk_notifier_register() must be called from non-atomic context.
3370 * Returns -EINVAL if called with null arguments, -ENOMEM upon
3371 * allocation failure; otherwise, passes along the return value of
3372 * srcu_notifier_chain_register().
3373 */
3374int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
3375{
3376 struct clk_notifier *cn;
3377 int ret = -ENOMEM;
3378
3379 if (!clk || !nb)
3380 return -EINVAL;
3381
eab89f69 3382 clk_prepare_lock();
b2476490
MT
3383
3384 /* search the list of notifiers for this clk */
3385 list_for_each_entry(cn, &clk_notifier_list, node)
3386 if (cn->clk == clk)
3387 break;
3388
3389 /* if clk wasn't in the notifier list, allocate new clk_notifier */
3390 if (cn->clk != clk) {
1808a320 3391 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
b2476490
MT
3392 if (!cn)
3393 goto out;
3394
3395 cn->clk = clk;
3396 srcu_init_notifier_head(&cn->notifier_head);
3397
3398 list_add(&cn->node, &clk_notifier_list);
3399 }
3400
3401 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
3402
035a61c3 3403 clk->core->notifier_count++;
b2476490
MT
3404
3405out:
eab89f69 3406 clk_prepare_unlock();
b2476490
MT
3407
3408 return ret;
3409}
3410EXPORT_SYMBOL_GPL(clk_notifier_register);
3411
3412/**
3413 * clk_notifier_unregister - remove a clk rate change notifier
3414 * @clk: struct clk *
3415 * @nb: struct notifier_block * with callback info
3416 *
3417 * Request no further notification for changes to 'clk' and frees memory
3418 * allocated in clk_notifier_register.
3419 *
3420 * Returns -EINVAL if called with null arguments; otherwise, passes
3421 * along the return value of srcu_notifier_chain_unregister().
3422 */
3423int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
3424{
3425 struct clk_notifier *cn = NULL;
3426 int ret = -EINVAL;
3427
3428 if (!clk || !nb)
3429 return -EINVAL;
3430
eab89f69 3431 clk_prepare_lock();
b2476490
MT
3432
3433 list_for_each_entry(cn, &clk_notifier_list, node)
3434 if (cn->clk == clk)
3435 break;
3436
3437 if (cn->clk == clk) {
3438 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
3439
035a61c3 3440 clk->core->notifier_count--;
b2476490
MT
3441
3442 /* XXX the notifier code should handle this better */
3443 if (!cn->notifier_head.head) {
3444 srcu_cleanup_notifier_head(&cn->notifier_head);
72b5322f 3445 list_del(&cn->node);
b2476490
MT
3446 kfree(cn);
3447 }
3448
3449 } else {
3450 ret = -ENOENT;
3451 }
3452
eab89f69 3453 clk_prepare_unlock();
b2476490
MT
3454
3455 return ret;
3456}
3457EXPORT_SYMBOL_GPL(clk_notifier_unregister);
766e6a4e
GL
3458
3459#ifdef CONFIG_OF
3460/**
3461 * struct of_clk_provider - Clock provider registration structure
3462 * @link: Entry in global list of clock providers
3463 * @node: Pointer to device tree node of clock provider
3464 * @get: Get clock callback. Returns NULL or a struct clk for the
3465 * given clock specifier
3466 * @data: context pointer to be passed into @get callback
3467 */
3468struct of_clk_provider {
3469 struct list_head link;
3470
3471 struct device_node *node;
3472 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
0861e5b8 3473 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
766e6a4e
GL
3474 void *data;
3475};
3476
f2f6c255
PG
3477static const struct of_device_id __clk_of_table_sentinel
3478 __used __section(__clk_of_table_end);
3479
766e6a4e 3480static LIST_HEAD(of_clk_providers);
d6782c26
SN
3481static DEFINE_MUTEX(of_clk_mutex);
3482
766e6a4e
GL
3483struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
3484 void *data)
3485{
3486 return data;
3487}
3488EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
3489
0861e5b8
SB
3490struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
3491{
3492 return data;
3493}
3494EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
3495
494bfec9
SG
3496struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
3497{
3498 struct clk_onecell_data *clk_data = data;
3499 unsigned int idx = clkspec->args[0];
3500
3501 if (idx >= clk_data->clk_num) {
7e96353c 3502 pr_err("%s: invalid clock index %u\n", __func__, idx);
494bfec9
SG
3503 return ERR_PTR(-EINVAL);
3504 }
3505
3506 return clk_data->clks[idx];
3507}
3508EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
3509
0861e5b8
SB
3510struct clk_hw *
3511of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
3512{
3513 struct clk_hw_onecell_data *hw_data = data;
3514 unsigned int idx = clkspec->args[0];
3515
3516 if (idx >= hw_data->num) {
3517 pr_err("%s: invalid index %u\n", __func__, idx);
3518 return ERR_PTR(-EINVAL);
3519 }
3520
3521 return hw_data->hws[idx];
3522}
3523EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
3524
766e6a4e
GL
3525/**
3526 * of_clk_add_provider() - Register a clock provider for a node
3527 * @np: Device node pointer associated with clock provider
3528 * @clk_src_get: callback for decoding clock
3529 * @data: context pointer for @clk_src_get callback.
3530 */
3531int of_clk_add_provider(struct device_node *np,
3532 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
3533 void *data),
3534 void *data)
3535{
3536 struct of_clk_provider *cp;
86be408b 3537 int ret;
766e6a4e 3538
1808a320 3539 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
766e6a4e
GL
3540 if (!cp)
3541 return -ENOMEM;
3542
3543 cp->node = of_node_get(np);
3544 cp->data = data;
3545 cp->get = clk_src_get;
3546
d6782c26 3547 mutex_lock(&of_clk_mutex);
766e6a4e 3548 list_add(&cp->link, &of_clk_providers);
d6782c26 3549 mutex_unlock(&of_clk_mutex);
16673931 3550 pr_debug("Added clock from %pOF\n", np);
766e6a4e 3551
86be408b
SN
3552 ret = of_clk_set_defaults(np, true);
3553 if (ret < 0)
3554 of_clk_del_provider(np);
3555
3556 return ret;
766e6a4e
GL
3557}
3558EXPORT_SYMBOL_GPL(of_clk_add_provider);
3559
0861e5b8
SB
3560/**
3561 * of_clk_add_hw_provider() - Register a clock provider for a node
3562 * @np: Device node pointer associated with clock provider
3563 * @get: callback for decoding clk_hw
3564 * @data: context pointer for @get callback.
3565 */
3566int of_clk_add_hw_provider(struct device_node *np,
3567 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3568 void *data),
3569 void *data)
3570{
3571 struct of_clk_provider *cp;
3572 int ret;
3573
3574 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3575 if (!cp)
3576 return -ENOMEM;
3577
3578 cp->node = of_node_get(np);
3579 cp->data = data;
3580 cp->get_hw = get;
3581
3582 mutex_lock(&of_clk_mutex);
3583 list_add(&cp->link, &of_clk_providers);
3584 mutex_unlock(&of_clk_mutex);
16673931 3585 pr_debug("Added clk_hw provider from %pOF\n", np);
0861e5b8
SB
3586
3587 ret = of_clk_set_defaults(np, true);
3588 if (ret < 0)
3589 of_clk_del_provider(np);
3590
3591 return ret;
3592}
3593EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
3594
aa795c41
SB
3595static void devm_of_clk_release_provider(struct device *dev, void *res)
3596{
3597 of_clk_del_provider(*(struct device_node **)res);
3598}
3599
3600int devm_of_clk_add_hw_provider(struct device *dev,
3601 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3602 void *data),
3603 void *data)
3604{
3605 struct device_node **ptr, *np;
3606 int ret;
3607
3608 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
3609 GFP_KERNEL);
3610 if (!ptr)
3611 return -ENOMEM;
3612
3613 np = dev->of_node;
3614 ret = of_clk_add_hw_provider(np, get, data);
3615 if (!ret) {
3616 *ptr = np;
3617 devres_add(dev, ptr);
3618 } else {
3619 devres_free(ptr);
3620 }
3621
3622 return ret;
3623}
3624EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
3625
766e6a4e
GL
3626/**
3627 * of_clk_del_provider() - Remove a previously registered clock provider
3628 * @np: Device node pointer associated with clock provider
3629 */
3630void of_clk_del_provider(struct device_node *np)
3631{
3632 struct of_clk_provider *cp;
3633
d6782c26 3634 mutex_lock(&of_clk_mutex);
766e6a4e
GL
3635 list_for_each_entry(cp, &of_clk_providers, link) {
3636 if (cp->node == np) {
3637 list_del(&cp->link);
3638 of_node_put(cp->node);
3639 kfree(cp);
3640 break;
3641 }
3642 }
d6782c26 3643 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
3644}
3645EXPORT_SYMBOL_GPL(of_clk_del_provider);
3646
aa795c41
SB
3647static int devm_clk_provider_match(struct device *dev, void *res, void *data)
3648{
3649 struct device_node **np = res;
3650
3651 if (WARN_ON(!np || !*np))
3652 return 0;
3653
3654 return *np == data;
3655}
3656
3657void devm_of_clk_del_provider(struct device *dev)
3658{
3659 int ret;
3660
3661 ret = devres_release(dev, devm_of_clk_release_provider,
3662 devm_clk_provider_match, dev->of_node);
3663
3664 WARN_ON(ret);
3665}
3666EXPORT_SYMBOL(devm_of_clk_del_provider);
3667
0861e5b8
SB
3668static struct clk_hw *
3669__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
3670 struct of_phandle_args *clkspec)
3671{
3672 struct clk *clk;
0861e5b8 3673
74002fcd
SB
3674 if (provider->get_hw)
3675 return provider->get_hw(clkspec, provider->data);
0861e5b8 3676
74002fcd
SB
3677 clk = provider->get(clkspec, provider->data);
3678 if (IS_ERR(clk))
3679 return ERR_CAST(clk);
3680 return __clk_get_hw(clk);
0861e5b8
SB
3681}
3682
73e0e496
SB
3683struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
3684 const char *dev_id, const char *con_id)
766e6a4e
GL
3685{
3686 struct of_clk_provider *provider;
a34cd466 3687 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
f155d15b 3688 struct clk_hw *hw;
766e6a4e 3689
306c342f
SB
3690 if (!clkspec)
3691 return ERR_PTR(-EINVAL);
3692
766e6a4e 3693 /* Check if we have such a provider in our array */
306c342f 3694 mutex_lock(&of_clk_mutex);
766e6a4e 3695 list_for_each_entry(provider, &of_clk_providers, link) {
f155d15b 3696 if (provider->node == clkspec->np) {
0861e5b8 3697 hw = __of_clk_get_hw_from_provider(provider, clkspec);
0861e5b8 3698 clk = __clk_create_clk(hw, dev_id, con_id);
f155d15b 3699 }
73e0e496 3700
f155d15b
SB
3701 if (!IS_ERR(clk)) {
3702 if (!__clk_get(clk)) {
73e0e496
SB
3703 __clk_free_clk(clk);
3704 clk = ERR_PTR(-ENOENT);
3705 }
3706
766e6a4e 3707 break;
73e0e496 3708 }
766e6a4e 3709 }
306c342f 3710 mutex_unlock(&of_clk_mutex);
d6782c26
SN
3711
3712 return clk;
3713}
3714
306c342f
SB
3715/**
3716 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3717 * @clkspec: pointer to a clock specifier data structure
3718 *
3719 * This function looks up a struct clk from the registered list of clock
3720 * providers, an input is a clock specifier data structure as returned
3721 * from the of_parse_phandle_with_args() function call.
3722 */
d6782c26
SN
3723struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3724{
306c342f 3725 return __of_clk_get_from_provider(clkspec, NULL, __func__);
766e6a4e 3726}
fb4dd222 3727EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
766e6a4e 3728
929e7f3b
SB
3729/**
3730 * of_clk_get_parent_count() - Count the number of clocks a device node has
3731 * @np: device node to count
3732 *
3733 * Returns: The number of clocks that are possible parents of this node
3734 */
3735unsigned int of_clk_get_parent_count(struct device_node *np)
f6102742 3736{
929e7f3b
SB
3737 int count;
3738
3739 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
3740 if (count < 0)
3741 return 0;
3742
3743 return count;
f6102742
MT
3744}
3745EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3746
766e6a4e
GL
3747const char *of_clk_get_parent_name(struct device_node *np, int index)
3748{
3749 struct of_phandle_args clkspec;
7a0fc1a3 3750 struct property *prop;
766e6a4e 3751 const char *clk_name;
7a0fc1a3
BD
3752 const __be32 *vp;
3753 u32 pv;
766e6a4e 3754 int rc;
7a0fc1a3 3755 int count;
0a4807c2 3756 struct clk *clk;
766e6a4e 3757
766e6a4e
GL
3758 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3759 &clkspec);
3760 if (rc)
3761 return NULL;
3762
7a0fc1a3
BD
3763 index = clkspec.args_count ? clkspec.args[0] : 0;
3764 count = 0;
3765
3766 /* if there is an indices property, use it to transfer the index
3767 * specified into an array offset for the clock-output-names property.
3768 */
3769 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3770 if (index == pv) {
3771 index = count;
3772 break;
3773 }
3774 count++;
3775 }
8da411cc
MY
3776 /* We went off the end of 'clock-indices' without finding it */
3777 if (prop && !vp)
3778 return NULL;
7a0fc1a3 3779
766e6a4e 3780 if (of_property_read_string_index(clkspec.np, "clock-output-names",
7a0fc1a3 3781 index,
0a4807c2
SB
3782 &clk_name) < 0) {
3783 /*
3784 * Best effort to get the name if the clock has been
3785 * registered with the framework. If the clock isn't
3786 * registered, we return the node name as the name of
3787 * the clock as long as #clock-cells = 0.
3788 */
3789 clk = of_clk_get_from_provider(&clkspec);
3790 if (IS_ERR(clk)) {
3791 if (clkspec.args_count == 0)
3792 clk_name = clkspec.np->name;
3793 else
3794 clk_name = NULL;
3795 } else {
3796 clk_name = __clk_get_name(clk);
3797 clk_put(clk);
3798 }
3799 }
3800
766e6a4e
GL
3801
3802 of_node_put(clkspec.np);
3803 return clk_name;
3804}
3805EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3806
2e61dfb3
DN
3807/**
3808 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3809 * number of parents
3810 * @np: Device node pointer associated with clock provider
3811 * @parents: pointer to char array that hold the parents' names
3812 * @size: size of the @parents array
3813 *
3814 * Return: number of parents for the clock node.
3815 */
3816int of_clk_parent_fill(struct device_node *np, const char **parents,
3817 unsigned int size)
3818{
3819 unsigned int i = 0;
3820
3821 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3822 i++;
3823
3824 return i;
3825}
3826EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3827
1771b10d 3828struct clock_provider {
a5970433 3829 void (*clk_init_cb)(struct device_node *);
1771b10d
GC
3830 struct device_node *np;
3831 struct list_head node;
3832};
3833
1771b10d
GC
3834/*
3835 * This function looks for a parent clock. If there is one, then it
3836 * checks that the provider for this parent clock was initialized, in
3837 * this case the parent clock will be ready.
3838 */
3839static int parent_ready(struct device_node *np)
3840{
3841 int i = 0;
3842
3843 while (true) {
3844 struct clk *clk = of_clk_get(np, i);
3845
3846 /* this parent is ready we can check the next one */
3847 if (!IS_ERR(clk)) {
3848 clk_put(clk);
3849 i++;
3850 continue;
3851 }
3852
3853 /* at least one parent is not ready, we exit now */
3854 if (PTR_ERR(clk) == -EPROBE_DEFER)
3855 return 0;
3856
3857 /*
3858 * Here we make assumption that the device tree is
3859 * written correctly. So an error means that there is
3860 * no more parent. As we didn't exit yet, then the
3861 * previous parent are ready. If there is no clock
3862 * parent, no need to wait for them, then we can
3863 * consider their absence as being ready
3864 */
3865 return 1;
3866 }
3867}
3868
d56f8994
LJ
3869/**
3870 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
3871 * @np: Device node pointer associated with clock provider
3872 * @index: clock index
f7ae7503 3873 * @flags: pointer to top-level framework flags
d56f8994
LJ
3874 *
3875 * Detects if the clock-critical property exists and, if so, sets the
3876 * corresponding CLK_IS_CRITICAL flag.
3877 *
3878 * Do not use this function. It exists only for legacy Device Tree
3879 * bindings, such as the one-clock-per-node style that are outdated.
3880 * Those bindings typically put all clock data into .dts and the Linux
3881 * driver has no clock data, thus making it impossible to set this flag
3882 * correctly from the driver. Only those drivers may call
3883 * of_clk_detect_critical from their setup functions.
3884 *
3885 * Return: error code or zero on success
3886 */
3887int of_clk_detect_critical(struct device_node *np,
3888 int index, unsigned long *flags)
3889{
3890 struct property *prop;
3891 const __be32 *cur;
3892 uint32_t idx;
3893
3894 if (!np || !flags)
3895 return -EINVAL;
3896
3897 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
3898 if (index == idx)
3899 *flags |= CLK_IS_CRITICAL;
3900
3901 return 0;
3902}
3903
766e6a4e
GL
3904/**
3905 * of_clk_init() - Scan and init clock providers from the DT
3906 * @matches: array of compatible values and init functions for providers.
3907 *
1771b10d 3908 * This function scans the device tree for matching clock providers
e5ca8fb4 3909 * and calls their initialization functions. It also does it by trying
1771b10d 3910 * to follow the dependencies.
766e6a4e
GL
3911 */
3912void __init of_clk_init(const struct of_device_id *matches)
3913{
7f7ed584 3914 const struct of_device_id *match;
766e6a4e 3915 struct device_node *np;
1771b10d
GC
3916 struct clock_provider *clk_provider, *next;
3917 bool is_init_done;
3918 bool force = false;
2573a02a 3919 LIST_HEAD(clk_provider_list);
766e6a4e 3920
f2f6c255 3921 if (!matches)
819b4861 3922 matches = &__clk_of_table;
f2f6c255 3923
1771b10d 3924 /* First prepare the list of the clocks providers */
7f7ed584 3925 for_each_matching_node_and_match(np, matches, &match) {
2e3b19f1
SB
3926 struct clock_provider *parent;
3927
3e5dd6f6
GU
3928 if (!of_device_is_available(np))
3929 continue;
3930
2e3b19f1
SB
3931 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3932 if (!parent) {
3933 list_for_each_entry_safe(clk_provider, next,
3934 &clk_provider_list, node) {
3935 list_del(&clk_provider->node);
6bc9d9d6 3936 of_node_put(clk_provider->np);
2e3b19f1
SB
3937 kfree(clk_provider);
3938 }
6bc9d9d6 3939 of_node_put(np);
2e3b19f1
SB
3940 return;
3941 }
1771b10d
GC
3942
3943 parent->clk_init_cb = match->data;
6bc9d9d6 3944 parent->np = of_node_get(np);
3f6d439f 3945 list_add_tail(&parent->node, &clk_provider_list);
1771b10d
GC
3946 }
3947
3948 while (!list_empty(&clk_provider_list)) {
3949 is_init_done = false;
3950 list_for_each_entry_safe(clk_provider, next,
3951 &clk_provider_list, node) {
3952 if (force || parent_ready(clk_provider->np)) {
86be408b 3953
989eafd0
RRD
3954 /* Don't populate platform devices */
3955 of_node_set_flag(clk_provider->np,
3956 OF_POPULATED);
3957
1771b10d 3958 clk_provider->clk_init_cb(clk_provider->np);
86be408b
SN
3959 of_clk_set_defaults(clk_provider->np, true);
3960
1771b10d 3961 list_del(&clk_provider->node);
6bc9d9d6 3962 of_node_put(clk_provider->np);
1771b10d
GC
3963 kfree(clk_provider);
3964 is_init_done = true;
3965 }
3966 }
3967
3968 /*
e5ca8fb4 3969 * We didn't manage to initialize any of the
1771b10d
GC
3970 * remaining providers during the last loop, so now we
3971 * initialize all the remaining ones unconditionally
3972 * in case the clock parent was not mandatory
3973 */
3974 if (!is_init_done)
3975 force = true;
766e6a4e
GL
3976 }
3977}
3978#endif