]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/clk/clk.c
clk: change clk_ops' ->determine_rate() prototype
[mirror_ubuntu-bionic-kernel.git] / drivers / clk / clk.c
CommitLineData
b2476490
MT
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
b09d6d99 12#include <linux/clk-provider.h>
86be408b 13#include <linux/clk/clk-conf.h>
b2476490
MT
14#include <linux/module.h>
15#include <linux/mutex.h>
16#include <linux/spinlock.h>
17#include <linux/err.h>
18#include <linux/list.h>
19#include <linux/slab.h>
766e6a4e 20#include <linux/of.h>
46c8773a 21#include <linux/device.h>
f2f6c255 22#include <linux/init.h>
533ddeb1 23#include <linux/sched.h>
562ef0b0 24#include <linux/clkdev.h>
b2476490 25
d6782c26
SN
26#include "clk.h"
27
b2476490
MT
28static DEFINE_SPINLOCK(enable_lock);
29static DEFINE_MUTEX(prepare_lock);
30
533ddeb1
MT
31static struct task_struct *prepare_owner;
32static struct task_struct *enable_owner;
33
34static int prepare_refcnt;
35static int enable_refcnt;
36
b2476490
MT
37static HLIST_HEAD(clk_root_list);
38static HLIST_HEAD(clk_orphan_list);
39static LIST_HEAD(clk_notifier_list);
40
b09d6d99
MT
41/*** private data structures ***/
42
43struct clk_core {
44 const char *name;
45 const struct clk_ops *ops;
46 struct clk_hw *hw;
47 struct module *owner;
48 struct clk_core *parent;
49 const char **parent_names;
50 struct clk_core **parents;
51 u8 num_parents;
52 u8 new_parent_index;
53 unsigned long rate;
1c8e6004 54 unsigned long req_rate;
b09d6d99
MT
55 unsigned long new_rate;
56 struct clk_core *new_parent;
57 struct clk_core *new_child;
58 unsigned long flags;
59 unsigned int enable_count;
60 unsigned int prepare_count;
61 unsigned long accuracy;
62 int phase;
63 struct hlist_head children;
64 struct hlist_node child_node;
1c8e6004 65 struct hlist_head clks;
b09d6d99
MT
66 unsigned int notifier_count;
67#ifdef CONFIG_DEBUG_FS
68 struct dentry *dentry;
8c9a8a8f 69 struct hlist_node debug_node;
b09d6d99
MT
70#endif
71 struct kref ref;
72};
73
dfc202ea
SB
74#define CREATE_TRACE_POINTS
75#include <trace/events/clk.h>
76
b09d6d99
MT
77struct clk {
78 struct clk_core *core;
79 const char *dev_id;
80 const char *con_id;
1c8e6004
TV
81 unsigned long min_rate;
82 unsigned long max_rate;
50595f8b 83 struct hlist_node clks_node;
b09d6d99
MT
84};
85
eab89f69
MT
86/*** locking ***/
87static void clk_prepare_lock(void)
88{
533ddeb1
MT
89 if (!mutex_trylock(&prepare_lock)) {
90 if (prepare_owner == current) {
91 prepare_refcnt++;
92 return;
93 }
94 mutex_lock(&prepare_lock);
95 }
96 WARN_ON_ONCE(prepare_owner != NULL);
97 WARN_ON_ONCE(prepare_refcnt != 0);
98 prepare_owner = current;
99 prepare_refcnt = 1;
eab89f69
MT
100}
101
102static void clk_prepare_unlock(void)
103{
533ddeb1
MT
104 WARN_ON_ONCE(prepare_owner != current);
105 WARN_ON_ONCE(prepare_refcnt == 0);
106
107 if (--prepare_refcnt)
108 return;
109 prepare_owner = NULL;
eab89f69
MT
110 mutex_unlock(&prepare_lock);
111}
112
113static unsigned long clk_enable_lock(void)
114{
115 unsigned long flags;
533ddeb1
MT
116
117 if (!spin_trylock_irqsave(&enable_lock, flags)) {
118 if (enable_owner == current) {
119 enable_refcnt++;
120 return flags;
121 }
122 spin_lock_irqsave(&enable_lock, flags);
123 }
124 WARN_ON_ONCE(enable_owner != NULL);
125 WARN_ON_ONCE(enable_refcnt != 0);
126 enable_owner = current;
127 enable_refcnt = 1;
eab89f69
MT
128 return flags;
129}
130
131static void clk_enable_unlock(unsigned long flags)
132{
533ddeb1
MT
133 WARN_ON_ONCE(enable_owner != current);
134 WARN_ON_ONCE(enable_refcnt == 0);
135
136 if (--enable_refcnt)
137 return;
138 enable_owner = NULL;
eab89f69
MT
139 spin_unlock_irqrestore(&enable_lock, flags);
140}
141
4dff95dc
SB
142static bool clk_core_is_prepared(struct clk_core *core)
143{
144 /*
145 * .is_prepared is optional for clocks that can prepare
146 * fall back to software usage counter if it is missing
147 */
148 if (!core->ops->is_prepared)
149 return core->prepare_count;
b2476490 150
4dff95dc
SB
151 return core->ops->is_prepared(core->hw);
152}
b2476490 153
4dff95dc
SB
154static bool clk_core_is_enabled(struct clk_core *core)
155{
156 /*
157 * .is_enabled is only mandatory for clocks that gate
158 * fall back to software usage counter if .is_enabled is missing
159 */
160 if (!core->ops->is_enabled)
161 return core->enable_count;
6b44c854 162
4dff95dc
SB
163 return core->ops->is_enabled(core->hw);
164}
6b44c854 165
4dff95dc 166static void clk_unprepare_unused_subtree(struct clk_core *core)
1af599df 167{
4dff95dc
SB
168 struct clk_core *child;
169
170 lockdep_assert_held(&prepare_lock);
171
172 hlist_for_each_entry(child, &core->children, child_node)
173 clk_unprepare_unused_subtree(child);
174
175 if (core->prepare_count)
1af599df
PG
176 return;
177
4dff95dc
SB
178 if (core->flags & CLK_IGNORE_UNUSED)
179 return;
180
181 if (clk_core_is_prepared(core)) {
182 trace_clk_unprepare(core);
183 if (core->ops->unprepare_unused)
184 core->ops->unprepare_unused(core->hw);
185 else if (core->ops->unprepare)
186 core->ops->unprepare(core->hw);
187 trace_clk_unprepare_complete(core);
188 }
1af599df
PG
189}
190
4dff95dc 191static void clk_disable_unused_subtree(struct clk_core *core)
1af599df 192{
035a61c3 193 struct clk_core *child;
4dff95dc 194 unsigned long flags;
1af599df 195
4dff95dc 196 lockdep_assert_held(&prepare_lock);
1af599df 197
4dff95dc
SB
198 hlist_for_each_entry(child, &core->children, child_node)
199 clk_disable_unused_subtree(child);
1af599df 200
4dff95dc
SB
201 flags = clk_enable_lock();
202
203 if (core->enable_count)
204 goto unlock_out;
205
206 if (core->flags & CLK_IGNORE_UNUSED)
207 goto unlock_out;
208
209 /*
210 * some gate clocks have special needs during the disable-unused
211 * sequence. call .disable_unused if available, otherwise fall
212 * back to .disable
213 */
214 if (clk_core_is_enabled(core)) {
215 trace_clk_disable(core);
216 if (core->ops->disable_unused)
217 core->ops->disable_unused(core->hw);
218 else if (core->ops->disable)
219 core->ops->disable(core->hw);
220 trace_clk_disable_complete(core);
221 }
222
223unlock_out:
224 clk_enable_unlock(flags);
1af599df
PG
225}
226
4dff95dc
SB
227static bool clk_ignore_unused;
228static int __init clk_ignore_unused_setup(char *__unused)
1af599df 229{
4dff95dc
SB
230 clk_ignore_unused = true;
231 return 1;
232}
233__setup("clk_ignore_unused", clk_ignore_unused_setup);
1af599df 234
4dff95dc
SB
235static int clk_disable_unused(void)
236{
237 struct clk_core *core;
238
239 if (clk_ignore_unused) {
240 pr_warn("clk: Not disabling unused clocks\n");
241 return 0;
242 }
1af599df 243
eab89f69 244 clk_prepare_lock();
1af599df 245
4dff95dc
SB
246 hlist_for_each_entry(core, &clk_root_list, child_node)
247 clk_disable_unused_subtree(core);
248
249 hlist_for_each_entry(core, &clk_orphan_list, child_node)
250 clk_disable_unused_subtree(core);
251
252 hlist_for_each_entry(core, &clk_root_list, child_node)
253 clk_unprepare_unused_subtree(core);
254
255 hlist_for_each_entry(core, &clk_orphan_list, child_node)
256 clk_unprepare_unused_subtree(core);
1af599df 257
eab89f69 258 clk_prepare_unlock();
1af599df
PG
259
260 return 0;
261}
4dff95dc 262late_initcall_sync(clk_disable_unused);
1af599df 263
4dff95dc 264/*** helper functions ***/
1af599df 265
4dff95dc 266const char *__clk_get_name(struct clk *clk)
1af599df 267{
4dff95dc 268 return !clk ? NULL : clk->core->name;
1af599df 269}
4dff95dc 270EXPORT_SYMBOL_GPL(__clk_get_name);
1af599df 271
4dff95dc
SB
272struct clk_hw *__clk_get_hw(struct clk *clk)
273{
274 return !clk ? NULL : clk->core->hw;
275}
276EXPORT_SYMBOL_GPL(__clk_get_hw);
1af599df 277
4dff95dc 278u8 __clk_get_num_parents(struct clk *clk)
bddca894 279{
4dff95dc
SB
280 return !clk ? 0 : clk->core->num_parents;
281}
282EXPORT_SYMBOL_GPL(__clk_get_num_parents);
bddca894 283
4dff95dc
SB
284struct clk *__clk_get_parent(struct clk *clk)
285{
286 if (!clk)
287 return NULL;
288
289 /* TODO: Create a per-user clk and change callers to call clk_put */
290 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
bddca894 291}
4dff95dc 292EXPORT_SYMBOL_GPL(__clk_get_parent);
bddca894 293
4dff95dc
SB
294static struct clk_core *__clk_lookup_subtree(const char *name,
295 struct clk_core *core)
bddca894 296{
035a61c3 297 struct clk_core *child;
4dff95dc 298 struct clk_core *ret;
bddca894 299
4dff95dc
SB
300 if (!strcmp(core->name, name))
301 return core;
bddca894 302
4dff95dc
SB
303 hlist_for_each_entry(child, &core->children, child_node) {
304 ret = __clk_lookup_subtree(name, child);
305 if (ret)
306 return ret;
bddca894
PG
307 }
308
4dff95dc 309 return NULL;
bddca894
PG
310}
311
4dff95dc 312static struct clk_core *clk_core_lookup(const char *name)
bddca894 313{
4dff95dc
SB
314 struct clk_core *root_clk;
315 struct clk_core *ret;
bddca894 316
4dff95dc
SB
317 if (!name)
318 return NULL;
bddca894 319
4dff95dc
SB
320 /* search the 'proper' clk tree first */
321 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
322 ret = __clk_lookup_subtree(name, root_clk);
323 if (ret)
324 return ret;
bddca894
PG
325 }
326
4dff95dc
SB
327 /* if not found, then search the orphan tree */
328 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
329 ret = __clk_lookup_subtree(name, root_clk);
330 if (ret)
331 return ret;
332 }
bddca894 333
4dff95dc 334 return NULL;
bddca894
PG
335}
336
4dff95dc
SB
337static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
338 u8 index)
bddca894 339{
4dff95dc
SB
340 if (!core || index >= core->num_parents)
341 return NULL;
342 else if (!core->parents)
343 return clk_core_lookup(core->parent_names[index]);
344 else if (!core->parents[index])
345 return core->parents[index] =
346 clk_core_lookup(core->parent_names[index]);
347 else
348 return core->parents[index];
bddca894
PG
349}
350
4dff95dc 351struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
b2476490 352{
4dff95dc 353 struct clk_core *parent;
b2476490 354
4dff95dc
SB
355 if (!clk)
356 return NULL;
b2476490 357
4dff95dc 358 parent = clk_core_get_parent_by_index(clk->core, index);
5279fc40 359
4dff95dc
SB
360 return !parent ? NULL : parent->hw->clk;
361}
362EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
e59c5371 363
4dff95dc
SB
364unsigned int __clk_get_enable_count(struct clk *clk)
365{
366 return !clk ? 0 : clk->core->enable_count;
367}
b2476490 368
4dff95dc
SB
369static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
370{
371 unsigned long ret;
b2476490 372
4dff95dc
SB
373 if (!core) {
374 ret = 0;
375 goto out;
376 }
b2476490 377
4dff95dc 378 ret = core->rate;
b2476490 379
4dff95dc
SB
380 if (core->flags & CLK_IS_ROOT)
381 goto out;
c646cbf1 382
4dff95dc
SB
383 if (!core->parent)
384 ret = 0;
b2476490 385
b2476490
MT
386out:
387 return ret;
388}
389
4dff95dc 390unsigned long __clk_get_rate(struct clk *clk)
b2476490 391{
4dff95dc
SB
392 if (!clk)
393 return 0;
6314b679 394
4dff95dc
SB
395 return clk_core_get_rate_nolock(clk->core);
396}
397EXPORT_SYMBOL_GPL(__clk_get_rate);
b2476490 398
4dff95dc
SB
399static unsigned long __clk_get_accuracy(struct clk_core *core)
400{
401 if (!core)
402 return 0;
b2476490 403
4dff95dc 404 return core->accuracy;
b2476490
MT
405}
406
4dff95dc 407unsigned long __clk_get_flags(struct clk *clk)
fcb0ee6a 408{
4dff95dc 409 return !clk ? 0 : clk->core->flags;
fcb0ee6a 410}
4dff95dc 411EXPORT_SYMBOL_GPL(__clk_get_flags);
fcb0ee6a 412
4dff95dc 413bool __clk_is_prepared(struct clk *clk)
fb2b3c9f 414{
4dff95dc
SB
415 if (!clk)
416 return false;
fb2b3c9f 417
4dff95dc 418 return clk_core_is_prepared(clk->core);
fb2b3c9f 419}
fb2b3c9f 420
4dff95dc 421bool __clk_is_enabled(struct clk *clk)
b2476490 422{
4dff95dc
SB
423 if (!clk)
424 return false;
b2476490 425
4dff95dc
SB
426 return clk_core_is_enabled(clk->core);
427}
428EXPORT_SYMBOL_GPL(__clk_is_enabled);
b2476490 429
4dff95dc
SB
430static bool mux_is_better_rate(unsigned long rate, unsigned long now,
431 unsigned long best, unsigned long flags)
432{
433 if (flags & CLK_MUX_ROUND_CLOSEST)
434 return abs(now - rate) < abs(best - rate);
1af599df 435
4dff95dc
SB
436 return now <= rate && now > best;
437}
bddca894 438
0817b62c
BB
439static int
440clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
4dff95dc
SB
441 unsigned long flags)
442{
443 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
0817b62c
BB
444 int i, num_parents, ret;
445 unsigned long best = 0;
446 struct clk_rate_request parent_req = *req;
b2476490 447
4dff95dc
SB
448 /* if NO_REPARENT flag set, pass through to current parent */
449 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
450 parent = core->parent;
0817b62c
BB
451 if (core->flags & CLK_SET_RATE_PARENT) {
452 ret = __clk_determine_rate(parent ? parent->hw : NULL,
453 &parent_req);
454 if (ret)
455 return ret;
456
457 best = parent_req.rate;
458 } else if (parent) {
4dff95dc 459 best = clk_core_get_rate_nolock(parent);
0817b62c 460 } else {
4dff95dc 461 best = clk_core_get_rate_nolock(core);
0817b62c
BB
462 }
463
4dff95dc
SB
464 goto out;
465 }
b2476490 466
4dff95dc
SB
467 /* find the parent that can provide the fastest rate <= rate */
468 num_parents = core->num_parents;
469 for (i = 0; i < num_parents; i++) {
470 parent = clk_core_get_parent_by_index(core, i);
471 if (!parent)
472 continue;
0817b62c
BB
473
474 if (core->flags & CLK_SET_RATE_PARENT) {
475 parent_req = *req;
476 ret = __clk_determine_rate(parent->hw, &parent_req);
477 if (ret)
478 continue;
479 } else {
480 parent_req.rate = clk_core_get_rate_nolock(parent);
481 }
482
483 if (mux_is_better_rate(req->rate, parent_req.rate,
484 best, flags)) {
4dff95dc 485 best_parent = parent;
0817b62c 486 best = parent_req.rate;
4dff95dc
SB
487 }
488 }
b2476490 489
4dff95dc
SB
490out:
491 if (best_parent)
0817b62c
BB
492 req->best_parent_hw = best_parent->hw;
493 req->best_parent_rate = best;
494 req->rate = best;
b2476490 495
0817b62c 496 return 0;
b33d212f 497}
4dff95dc
SB
498
499struct clk *__clk_lookup(const char *name)
fcb0ee6a 500{
4dff95dc
SB
501 struct clk_core *core = clk_core_lookup(name);
502
503 return !core ? NULL : core->hw->clk;
fcb0ee6a 504}
b2476490 505
4dff95dc
SB
506static void clk_core_get_boundaries(struct clk_core *core,
507 unsigned long *min_rate,
508 unsigned long *max_rate)
1c155b3d 509{
4dff95dc 510 struct clk *clk_user;
1c155b3d 511
4dff95dc
SB
512 *min_rate = 0;
513 *max_rate = ULONG_MAX;
496eadf8 514
4dff95dc
SB
515 hlist_for_each_entry(clk_user, &core->clks, clks_node)
516 *min_rate = max(*min_rate, clk_user->min_rate);
1c155b3d 517
4dff95dc
SB
518 hlist_for_each_entry(clk_user, &core->clks, clks_node)
519 *max_rate = min(*max_rate, clk_user->max_rate);
520}
1c155b3d 521
4dff95dc
SB
522/*
523 * Helper for finding best parent to provide a given frequency. This can be used
524 * directly as a determine_rate callback (e.g. for a mux), or from a more
525 * complex clock that may combine a mux with other operations.
526 */
0817b62c
BB
527int __clk_mux_determine_rate(struct clk_hw *hw,
528 struct clk_rate_request *req)
4dff95dc 529{
0817b62c 530 return clk_mux_determine_rate_flags(hw, req, 0);
1c155b3d 531}
4dff95dc 532EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
1c155b3d 533
0817b62c
BB
534int __clk_mux_determine_rate_closest(struct clk_hw *hw,
535 struct clk_rate_request *req)
b2476490 536{
0817b62c 537 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
4dff95dc
SB
538}
539EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
b2476490 540
4dff95dc 541/*** clk api ***/
496eadf8 542
4dff95dc
SB
543static void clk_core_unprepare(struct clk_core *core)
544{
a6334725
SB
545 lockdep_assert_held(&prepare_lock);
546
4dff95dc
SB
547 if (!core)
548 return;
b2476490 549
4dff95dc
SB
550 if (WARN_ON(core->prepare_count == 0))
551 return;
b2476490 552
4dff95dc
SB
553 if (--core->prepare_count > 0)
554 return;
b2476490 555
4dff95dc 556 WARN_ON(core->enable_count > 0);
b2476490 557
4dff95dc 558 trace_clk_unprepare(core);
b2476490 559
4dff95dc
SB
560 if (core->ops->unprepare)
561 core->ops->unprepare(core->hw);
562
563 trace_clk_unprepare_complete(core);
564 clk_core_unprepare(core->parent);
b2476490
MT
565}
566
4dff95dc
SB
567/**
568 * clk_unprepare - undo preparation of a clock source
569 * @clk: the clk being unprepared
570 *
571 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
572 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
573 * if the operation may sleep. One example is a clk which is accessed over
574 * I2c. In the complex case a clk gate operation may require a fast and a slow
575 * part. It is this reason that clk_unprepare and clk_disable are not mutually
576 * exclusive. In fact clk_disable must be called before clk_unprepare.
577 */
578void clk_unprepare(struct clk *clk)
1e435256 579{
4dff95dc
SB
580 if (IS_ERR_OR_NULL(clk))
581 return;
582
583 clk_prepare_lock();
584 clk_core_unprepare(clk->core);
585 clk_prepare_unlock();
1e435256 586}
4dff95dc 587EXPORT_SYMBOL_GPL(clk_unprepare);
1e435256 588
4dff95dc 589static int clk_core_prepare(struct clk_core *core)
b2476490 590{
4dff95dc 591 int ret = 0;
b2476490 592
a6334725
SB
593 lockdep_assert_held(&prepare_lock);
594
4dff95dc 595 if (!core)
1e435256 596 return 0;
1e435256 597
4dff95dc
SB
598 if (core->prepare_count == 0) {
599 ret = clk_core_prepare(core->parent);
600 if (ret)
601 return ret;
b2476490 602
4dff95dc 603 trace_clk_prepare(core);
b2476490 604
4dff95dc
SB
605 if (core->ops->prepare)
606 ret = core->ops->prepare(core->hw);
b2476490 607
4dff95dc 608 trace_clk_prepare_complete(core);
1c155b3d 609
4dff95dc
SB
610 if (ret) {
611 clk_core_unprepare(core->parent);
612 return ret;
613 }
614 }
1c155b3d 615
4dff95dc 616 core->prepare_count++;
b2476490
MT
617
618 return 0;
619}
b2476490 620
4dff95dc
SB
621/**
622 * clk_prepare - prepare a clock source
623 * @clk: the clk being prepared
624 *
625 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
626 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
627 * operation may sleep. One example is a clk which is accessed over I2c. In
628 * the complex case a clk ungate operation may require a fast and a slow part.
629 * It is this reason that clk_prepare and clk_enable are not mutually
630 * exclusive. In fact clk_prepare must be called before clk_enable.
631 * Returns 0 on success, -EERROR otherwise.
632 */
633int clk_prepare(struct clk *clk)
b2476490 634{
4dff95dc 635 int ret;
b2476490 636
4dff95dc
SB
637 if (!clk)
638 return 0;
b2476490 639
4dff95dc
SB
640 clk_prepare_lock();
641 ret = clk_core_prepare(clk->core);
642 clk_prepare_unlock();
643
644 return ret;
b2476490 645}
4dff95dc 646EXPORT_SYMBOL_GPL(clk_prepare);
b2476490 647
4dff95dc 648static void clk_core_disable(struct clk_core *core)
b2476490 649{
a6334725
SB
650 lockdep_assert_held(&enable_lock);
651
4dff95dc
SB
652 if (!core)
653 return;
035a61c3 654
4dff95dc
SB
655 if (WARN_ON(core->enable_count == 0))
656 return;
b2476490 657
4dff95dc
SB
658 if (--core->enable_count > 0)
659 return;
035a61c3 660
4dff95dc 661 trace_clk_disable(core);
035a61c3 662
4dff95dc
SB
663 if (core->ops->disable)
664 core->ops->disable(core->hw);
035a61c3 665
4dff95dc 666 trace_clk_disable_complete(core);
035a61c3 667
4dff95dc 668 clk_core_disable(core->parent);
035a61c3 669}
7ef3dcc8 670
4dff95dc
SB
671/**
672 * clk_disable - gate a clock
673 * @clk: the clk being gated
674 *
675 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
676 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
677 * clk if the operation is fast and will never sleep. One example is a
678 * SoC-internal clk which is controlled via simple register writes. In the
679 * complex case a clk gate operation may require a fast and a slow part. It is
680 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
681 * In fact clk_disable must be called before clk_unprepare.
682 */
683void clk_disable(struct clk *clk)
b2476490 684{
4dff95dc
SB
685 unsigned long flags;
686
687 if (IS_ERR_OR_NULL(clk))
688 return;
689
690 flags = clk_enable_lock();
691 clk_core_disable(clk->core);
692 clk_enable_unlock(flags);
b2476490 693}
4dff95dc 694EXPORT_SYMBOL_GPL(clk_disable);
b2476490 695
4dff95dc 696static int clk_core_enable(struct clk_core *core)
b2476490 697{
4dff95dc 698 int ret = 0;
b2476490 699
a6334725
SB
700 lockdep_assert_held(&enable_lock);
701
4dff95dc
SB
702 if (!core)
703 return 0;
b2476490 704
4dff95dc
SB
705 if (WARN_ON(core->prepare_count == 0))
706 return -ESHUTDOWN;
b2476490 707
4dff95dc
SB
708 if (core->enable_count == 0) {
709 ret = clk_core_enable(core->parent);
b2476490 710
4dff95dc
SB
711 if (ret)
712 return ret;
b2476490 713
4dff95dc 714 trace_clk_enable(core);
035a61c3 715
4dff95dc
SB
716 if (core->ops->enable)
717 ret = core->ops->enable(core->hw);
035a61c3 718
4dff95dc
SB
719 trace_clk_enable_complete(core);
720
721 if (ret) {
722 clk_core_disable(core->parent);
723 return ret;
724 }
725 }
726
727 core->enable_count++;
728 return 0;
035a61c3 729}
b2476490 730
4dff95dc
SB
731/**
732 * clk_enable - ungate a clock
733 * @clk: the clk being ungated
734 *
735 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
736 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
737 * if the operation will never sleep. One example is a SoC-internal clk which
738 * is controlled via simple register writes. In the complex case a clk ungate
739 * operation may require a fast and a slow part. It is this reason that
740 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
741 * must be called before clk_enable. Returns 0 on success, -EERROR
742 * otherwise.
743 */
744int clk_enable(struct clk *clk)
5279fc40 745{
4dff95dc
SB
746 unsigned long flags;
747 int ret;
748
749 if (!clk)
5279fc40
BB
750 return 0;
751
4dff95dc
SB
752 flags = clk_enable_lock();
753 ret = clk_core_enable(clk->core);
754 clk_enable_unlock(flags);
5279fc40 755
4dff95dc 756 return ret;
b2476490 757}
4dff95dc 758EXPORT_SYMBOL_GPL(clk_enable);
b2476490 759
0817b62c
BB
760static int clk_core_round_rate_nolock(struct clk_core *core,
761 struct clk_rate_request *req)
3d6ee287 762{
4dff95dc 763 struct clk_core *parent;
0817b62c 764 long rate;
4dff95dc
SB
765
766 lockdep_assert_held(&prepare_lock);
3d6ee287 767
d6968fca 768 if (!core)
4dff95dc 769 return 0;
3d6ee287 770
4dff95dc 771 parent = core->parent;
0817b62c
BB
772 if (parent) {
773 req->best_parent_hw = parent->hw;
774 req->best_parent_rate = parent->rate;
775 } else {
776 req->best_parent_hw = NULL;
777 req->best_parent_rate = 0;
778 }
3d6ee287 779
4dff95dc 780 if (core->ops->determine_rate) {
0817b62c
BB
781 return core->ops->determine_rate(core->hw, req);
782 } else if (core->ops->round_rate) {
783 rate = core->ops->round_rate(core->hw, req->rate,
784 &req->best_parent_rate);
785 if (rate < 0)
786 return rate;
787
788 req->rate = rate;
789 } else if (core->flags & CLK_SET_RATE_PARENT) {
790 return clk_core_round_rate_nolock(parent, req);
791 } else {
792 req->rate = core->rate;
793 }
794
795 return 0;
3d6ee287
UH
796}
797
4dff95dc
SB
798/**
799 * __clk_determine_rate - get the closest rate actually supported by a clock
800 * @hw: determine the rate of this clock
801 * @rate: target rate
802 * @min_rate: returned rate must be greater than this rate
803 * @max_rate: returned rate must be less than this rate
804 *
6e5ab41b 805 * Useful for clk_ops such as .set_rate and .determine_rate.
4dff95dc 806 */
0817b62c 807int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
035a61c3 808{
0817b62c
BB
809 if (!hw) {
810 req->rate = 0;
4dff95dc 811 return 0;
0817b62c 812 }
035a61c3 813
0817b62c 814 return clk_core_round_rate_nolock(hw->core, req);
035a61c3 815}
4dff95dc 816EXPORT_SYMBOL_GPL(__clk_determine_rate);
035a61c3 817
4dff95dc
SB
818/**
819 * __clk_round_rate - round the given rate for a clk
820 * @clk: round the rate of this clock
821 * @rate: the rate which is to be rounded
822 *
6e5ab41b 823 * Useful for clk_ops such as .set_rate
4dff95dc
SB
824 */
825unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
b2476490 826{
0817b62c
BB
827 struct clk_rate_request req;
828 int ret;
b2476490 829
4dff95dc
SB
830 if (!clk)
831 return 0;
b2476490 832
0817b62c
BB
833 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
834 req.rate = rate;
835
836 ret = clk_core_round_rate_nolock(clk->core, &req);
837 if (ret)
838 return 0;
b2476490 839
0817b62c 840 return req.rate;
b2476490 841}
4dff95dc 842EXPORT_SYMBOL_GPL(__clk_round_rate);
035a61c3 843
4dff95dc
SB
844/**
845 * clk_round_rate - round the given rate for a clk
846 * @clk: the clk for which we are rounding a rate
847 * @rate: the rate which is to be rounded
848 *
849 * Takes in a rate as input and rounds it to a rate that the clk can actually
850 * use which is then returned. If clk doesn't support round_rate operation
851 * then the parent rate is returned.
852 */
853long clk_round_rate(struct clk *clk, unsigned long rate)
035a61c3 854{
4dff95dc
SB
855 unsigned long ret;
856
035a61c3 857 if (!clk)
4dff95dc 858 return 0;
035a61c3 859
4dff95dc
SB
860 clk_prepare_lock();
861 ret = __clk_round_rate(clk, rate);
862 clk_prepare_unlock();
863
864 return ret;
035a61c3 865}
4dff95dc 866EXPORT_SYMBOL_GPL(clk_round_rate);
b2476490 867
4dff95dc
SB
868/**
869 * __clk_notify - call clk notifier chain
870 * @core: clk that is changing rate
871 * @msg: clk notifier type (see include/linux/clk.h)
872 * @old_rate: old clk rate
873 * @new_rate: new clk rate
874 *
875 * Triggers a notifier call chain on the clk rate-change notification
876 * for 'clk'. Passes a pointer to the struct clk and the previous
877 * and current rates to the notifier callback. Intended to be called by
878 * internal clock code only. Returns NOTIFY_DONE from the last driver
879 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
880 * a driver returns that.
881 */
882static int __clk_notify(struct clk_core *core, unsigned long msg,
883 unsigned long old_rate, unsigned long new_rate)
b2476490 884{
4dff95dc
SB
885 struct clk_notifier *cn;
886 struct clk_notifier_data cnd;
887 int ret = NOTIFY_DONE;
b2476490 888
4dff95dc
SB
889 cnd.old_rate = old_rate;
890 cnd.new_rate = new_rate;
b2476490 891
4dff95dc
SB
892 list_for_each_entry(cn, &clk_notifier_list, node) {
893 if (cn->clk->core == core) {
894 cnd.clk = cn->clk;
895 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
896 &cnd);
897 }
b2476490
MT
898 }
899
4dff95dc 900 return ret;
b2476490
MT
901}
902
4dff95dc
SB
903/**
904 * __clk_recalc_accuracies
905 * @core: first clk in the subtree
906 *
907 * Walks the subtree of clks starting with clk and recalculates accuracies as
908 * it goes. Note that if a clk does not implement the .recalc_accuracy
6e5ab41b 909 * callback then it is assumed that the clock will take on the accuracy of its
4dff95dc 910 * parent.
4dff95dc
SB
911 */
912static void __clk_recalc_accuracies(struct clk_core *core)
b2476490 913{
4dff95dc
SB
914 unsigned long parent_accuracy = 0;
915 struct clk_core *child;
b2476490 916
4dff95dc 917 lockdep_assert_held(&prepare_lock);
b2476490 918
4dff95dc
SB
919 if (core->parent)
920 parent_accuracy = core->parent->accuracy;
b2476490 921
4dff95dc
SB
922 if (core->ops->recalc_accuracy)
923 core->accuracy = core->ops->recalc_accuracy(core->hw,
924 parent_accuracy);
925 else
926 core->accuracy = parent_accuracy;
b2476490 927
4dff95dc
SB
928 hlist_for_each_entry(child, &core->children, child_node)
929 __clk_recalc_accuracies(child);
b2476490
MT
930}
931
4dff95dc 932static long clk_core_get_accuracy(struct clk_core *core)
e366fdd7 933{
4dff95dc 934 unsigned long accuracy;
15a02c1f 935
4dff95dc
SB
936 clk_prepare_lock();
937 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
938 __clk_recalc_accuracies(core);
15a02c1f 939
4dff95dc
SB
940 accuracy = __clk_get_accuracy(core);
941 clk_prepare_unlock();
e366fdd7 942
4dff95dc 943 return accuracy;
e366fdd7 944}
15a02c1f 945
4dff95dc
SB
946/**
947 * clk_get_accuracy - return the accuracy of clk
948 * @clk: the clk whose accuracy is being returned
949 *
950 * Simply returns the cached accuracy of the clk, unless
951 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
952 * issued.
953 * If clk is NULL then returns 0.
954 */
955long clk_get_accuracy(struct clk *clk)
035a61c3 956{
4dff95dc
SB
957 if (!clk)
958 return 0;
035a61c3 959
4dff95dc 960 return clk_core_get_accuracy(clk->core);
035a61c3 961}
4dff95dc 962EXPORT_SYMBOL_GPL(clk_get_accuracy);
035a61c3 963
4dff95dc
SB
964static unsigned long clk_recalc(struct clk_core *core,
965 unsigned long parent_rate)
1c8e6004 966{
4dff95dc
SB
967 if (core->ops->recalc_rate)
968 return core->ops->recalc_rate(core->hw, parent_rate);
969 return parent_rate;
1c8e6004
TV
970}
971
4dff95dc
SB
972/**
973 * __clk_recalc_rates
974 * @core: first clk in the subtree
975 * @msg: notification type (see include/linux/clk.h)
976 *
977 * Walks the subtree of clks starting with clk and recalculates rates as it
978 * goes. Note that if a clk does not implement the .recalc_rate callback then
979 * it is assumed that the clock will take on the rate of its parent.
980 *
981 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
982 * if necessary.
15a02c1f 983 */
4dff95dc 984static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
15a02c1f 985{
4dff95dc
SB
986 unsigned long old_rate;
987 unsigned long parent_rate = 0;
988 struct clk_core *child;
e366fdd7 989
4dff95dc 990 lockdep_assert_held(&prepare_lock);
15a02c1f 991
4dff95dc 992 old_rate = core->rate;
b2476490 993
4dff95dc
SB
994 if (core->parent)
995 parent_rate = core->parent->rate;
b2476490 996
4dff95dc 997 core->rate = clk_recalc(core, parent_rate);
b2476490 998
4dff95dc
SB
999 /*
1000 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1001 * & ABORT_RATE_CHANGE notifiers
1002 */
1003 if (core->notifier_count && msg)
1004 __clk_notify(core, msg, old_rate, core->rate);
b2476490 1005
4dff95dc
SB
1006 hlist_for_each_entry(child, &core->children, child_node)
1007 __clk_recalc_rates(child, msg);
1008}
b2476490 1009
4dff95dc
SB
1010static unsigned long clk_core_get_rate(struct clk_core *core)
1011{
1012 unsigned long rate;
dfc202ea 1013
4dff95dc 1014 clk_prepare_lock();
b2476490 1015
4dff95dc
SB
1016 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1017 __clk_recalc_rates(core, 0);
1018
1019 rate = clk_core_get_rate_nolock(core);
1020 clk_prepare_unlock();
1021
1022 return rate;
b2476490
MT
1023}
1024
1025/**
4dff95dc
SB
1026 * clk_get_rate - return the rate of clk
1027 * @clk: the clk whose rate is being returned
b2476490 1028 *
4dff95dc
SB
1029 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1030 * is set, which means a recalc_rate will be issued.
1031 * If clk is NULL then returns 0.
b2476490 1032 */
4dff95dc 1033unsigned long clk_get_rate(struct clk *clk)
b2476490 1034{
4dff95dc
SB
1035 if (!clk)
1036 return 0;
63589e92 1037
4dff95dc 1038 return clk_core_get_rate(clk->core);
b2476490 1039}
4dff95dc 1040EXPORT_SYMBOL_GPL(clk_get_rate);
b2476490 1041
4dff95dc
SB
1042static int clk_fetch_parent_index(struct clk_core *core,
1043 struct clk_core *parent)
b2476490 1044{
4dff95dc 1045 int i;
b2476490 1046
4dff95dc
SB
1047 if (!core->parents) {
1048 core->parents = kcalloc(core->num_parents,
1049 sizeof(struct clk *), GFP_KERNEL);
1050 if (!core->parents)
1051 return -ENOMEM;
1052 }
dfc202ea 1053
4dff95dc
SB
1054 /*
1055 * find index of new parent clock using cached parent ptrs,
1056 * or if not yet cached, use string name comparison and cache
1057 * them now to avoid future calls to clk_core_lookup.
1058 */
1059 for (i = 0; i < core->num_parents; i++) {
1060 if (core->parents[i] == parent)
1061 return i;
dfc202ea 1062
4dff95dc
SB
1063 if (core->parents[i])
1064 continue;
dfc202ea 1065
4dff95dc
SB
1066 if (!strcmp(core->parent_names[i], parent->name)) {
1067 core->parents[i] = clk_core_lookup(parent->name);
1068 return i;
b2476490
MT
1069 }
1070 }
1071
4dff95dc 1072 return -EINVAL;
b2476490
MT
1073}
1074
4dff95dc 1075static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
b2476490 1076{
4dff95dc 1077 hlist_del(&core->child_node);
035a61c3 1078
4dff95dc
SB
1079 if (new_parent) {
1080 /* avoid duplicate POST_RATE_CHANGE notifications */
1081 if (new_parent->new_child == core)
1082 new_parent->new_child = NULL;
b2476490 1083
4dff95dc
SB
1084 hlist_add_head(&core->child_node, &new_parent->children);
1085 } else {
1086 hlist_add_head(&core->child_node, &clk_orphan_list);
1087 }
dfc202ea 1088
4dff95dc 1089 core->parent = new_parent;
035a61c3
TV
1090}
1091
4dff95dc
SB
1092static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1093 struct clk_core *parent)
b2476490
MT
1094{
1095 unsigned long flags;
4dff95dc 1096 struct clk_core *old_parent = core->parent;
b2476490 1097
4dff95dc
SB
1098 /*
1099 * Migrate prepare state between parents and prevent race with
1100 * clk_enable().
1101 *
1102 * If the clock is not prepared, then a race with
1103 * clk_enable/disable() is impossible since we already have the
1104 * prepare lock (future calls to clk_enable() need to be preceded by
1105 * a clk_prepare()).
1106 *
1107 * If the clock is prepared, migrate the prepared state to the new
1108 * parent and also protect against a race with clk_enable() by
1109 * forcing the clock and the new parent on. This ensures that all
1110 * future calls to clk_enable() are practically NOPs with respect to
1111 * hardware and software states.
1112 *
1113 * See also: Comment for clk_set_parent() below.
1114 */
1115 if (core->prepare_count) {
1116 clk_core_prepare(parent);
d2a5d46b 1117 flags = clk_enable_lock();
4dff95dc
SB
1118 clk_core_enable(parent);
1119 clk_core_enable(core);
d2a5d46b 1120 clk_enable_unlock(flags);
4dff95dc 1121 }
63589e92 1122
4dff95dc 1123 /* update the clk tree topology */
eab89f69 1124 flags = clk_enable_lock();
4dff95dc 1125 clk_reparent(core, parent);
eab89f69 1126 clk_enable_unlock(flags);
4dff95dc
SB
1127
1128 return old_parent;
b2476490 1129}
b2476490 1130
4dff95dc
SB
1131static void __clk_set_parent_after(struct clk_core *core,
1132 struct clk_core *parent,
1133 struct clk_core *old_parent)
b2476490 1134{
d2a5d46b
DA
1135 unsigned long flags;
1136
4dff95dc
SB
1137 /*
1138 * Finish the migration of prepare state and undo the changes done
1139 * for preventing a race with clk_enable().
1140 */
1141 if (core->prepare_count) {
d2a5d46b 1142 flags = clk_enable_lock();
4dff95dc
SB
1143 clk_core_disable(core);
1144 clk_core_disable(old_parent);
d2a5d46b 1145 clk_enable_unlock(flags);
4dff95dc
SB
1146 clk_core_unprepare(old_parent);
1147 }
1148}
b2476490 1149
4dff95dc
SB
1150static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1151 u8 p_index)
1152{
1153 unsigned long flags;
1154 int ret = 0;
1155 struct clk_core *old_parent;
b2476490 1156
4dff95dc 1157 old_parent = __clk_set_parent_before(core, parent);
b2476490 1158
4dff95dc 1159 trace_clk_set_parent(core, parent);
b2476490 1160
4dff95dc
SB
1161 /* change clock input source */
1162 if (parent && core->ops->set_parent)
1163 ret = core->ops->set_parent(core->hw, p_index);
dfc202ea 1164
4dff95dc 1165 trace_clk_set_parent_complete(core, parent);
dfc202ea 1166
4dff95dc
SB
1167 if (ret) {
1168 flags = clk_enable_lock();
1169 clk_reparent(core, old_parent);
1170 clk_enable_unlock(flags);
dfc202ea 1171
4dff95dc 1172 if (core->prepare_count) {
d2a5d46b 1173 flags = clk_enable_lock();
4dff95dc
SB
1174 clk_core_disable(core);
1175 clk_core_disable(parent);
d2a5d46b 1176 clk_enable_unlock(flags);
4dff95dc 1177 clk_core_unprepare(parent);
b2476490 1178 }
4dff95dc 1179 return ret;
b2476490
MT
1180 }
1181
4dff95dc
SB
1182 __clk_set_parent_after(core, parent, old_parent);
1183
b2476490
MT
1184 return 0;
1185}
1186
1187/**
4dff95dc
SB
1188 * __clk_speculate_rates
1189 * @core: first clk in the subtree
1190 * @parent_rate: the "future" rate of clk's parent
b2476490 1191 *
4dff95dc
SB
1192 * Walks the subtree of clks starting with clk, speculating rates as it
1193 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1194 *
1195 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1196 * pre-rate change notifications and returns early if no clks in the
1197 * subtree have subscribed to the notifications. Note that if a clk does not
1198 * implement the .recalc_rate callback then it is assumed that the clock will
1199 * take on the rate of its parent.
b2476490 1200 */
4dff95dc
SB
1201static int __clk_speculate_rates(struct clk_core *core,
1202 unsigned long parent_rate)
b2476490 1203{
4dff95dc
SB
1204 struct clk_core *child;
1205 unsigned long new_rate;
1206 int ret = NOTIFY_DONE;
b2476490 1207
4dff95dc 1208 lockdep_assert_held(&prepare_lock);
864e160a 1209
4dff95dc
SB
1210 new_rate = clk_recalc(core, parent_rate);
1211
1212 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1213 if (core->notifier_count)
1214 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1215
1216 if (ret & NOTIFY_STOP_MASK) {
1217 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1218 __func__, core->name, ret);
1219 goto out;
1220 }
1221
1222 hlist_for_each_entry(child, &core->children, child_node) {
1223 ret = __clk_speculate_rates(child, new_rate);
1224 if (ret & NOTIFY_STOP_MASK)
1225 break;
1226 }
b2476490 1227
4dff95dc 1228out:
b2476490
MT
1229 return ret;
1230}
b2476490 1231
4dff95dc
SB
1232static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1233 struct clk_core *new_parent, u8 p_index)
b2476490 1234{
4dff95dc 1235 struct clk_core *child;
b2476490 1236
4dff95dc
SB
1237 core->new_rate = new_rate;
1238 core->new_parent = new_parent;
1239 core->new_parent_index = p_index;
1240 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1241 core->new_child = NULL;
1242 if (new_parent && new_parent != core->parent)
1243 new_parent->new_child = core;
496eadf8 1244
4dff95dc
SB
1245 hlist_for_each_entry(child, &core->children, child_node) {
1246 child->new_rate = clk_recalc(child, new_rate);
1247 clk_calc_subtree(child, child->new_rate, NULL, 0);
1248 }
1249}
b2476490 1250
4dff95dc
SB
1251/*
1252 * calculate the new rates returning the topmost clock that has to be
1253 * changed.
1254 */
1255static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1256 unsigned long rate)
1257{
1258 struct clk_core *top = core;
1259 struct clk_core *old_parent, *parent;
4dff95dc
SB
1260 unsigned long best_parent_rate = 0;
1261 unsigned long new_rate;
1262 unsigned long min_rate;
1263 unsigned long max_rate;
1264 int p_index = 0;
1265 long ret;
1266
1267 /* sanity */
1268 if (IS_ERR_OR_NULL(core))
1269 return NULL;
1270
1271 /* save parent rate, if it exists */
1272 parent = old_parent = core->parent;
71472c0c 1273 if (parent)
4dff95dc 1274 best_parent_rate = parent->rate;
71472c0c 1275
4dff95dc
SB
1276 clk_core_get_boundaries(core, &min_rate, &max_rate);
1277
1278 /* find the closest rate and parent clk/rate */
d6968fca 1279 if (core->ops->determine_rate) {
0817b62c
BB
1280 struct clk_rate_request req;
1281
1282 req.rate = rate;
1283 req.min_rate = min_rate;
1284 req.max_rate = max_rate;
1285 if (parent) {
1286 req.best_parent_hw = parent->hw;
1287 req.best_parent_rate = parent->rate;
1288 } else {
1289 req.best_parent_hw = NULL;
1290 req.best_parent_rate = 0;
1291 }
1292
1293 ret = core->ops->determine_rate(core->hw, &req);
4dff95dc
SB
1294 if (ret < 0)
1295 return NULL;
1c8e6004 1296
0817b62c
BB
1297 best_parent_rate = req.best_parent_rate;
1298 new_rate = req.rate;
1299 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
4dff95dc
SB
1300 } else if (core->ops->round_rate) {
1301 ret = core->ops->round_rate(core->hw, rate,
0817b62c 1302 &best_parent_rate);
4dff95dc
SB
1303 if (ret < 0)
1304 return NULL;
035a61c3 1305
4dff95dc
SB
1306 new_rate = ret;
1307 if (new_rate < min_rate || new_rate > max_rate)
1308 return NULL;
1309 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1310 /* pass-through clock without adjustable parent */
1311 core->new_rate = core->rate;
1312 return NULL;
1313 } else {
1314 /* pass-through clock with adjustable parent */
1315 top = clk_calc_new_rates(parent, rate);
1316 new_rate = parent->new_rate;
1317 goto out;
1318 }
1c8e6004 1319
4dff95dc
SB
1320 /* some clocks must be gated to change parent */
1321 if (parent != old_parent &&
1322 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1323 pr_debug("%s: %s not gated but wants to reparent\n",
1324 __func__, core->name);
1325 return NULL;
1326 }
b2476490 1327
4dff95dc
SB
1328 /* try finding the new parent index */
1329 if (parent && core->num_parents > 1) {
1330 p_index = clk_fetch_parent_index(core, parent);
1331 if (p_index < 0) {
1332 pr_debug("%s: clk %s can not be parent of clk %s\n",
1333 __func__, parent->name, core->name);
1334 return NULL;
1335 }
1336 }
b2476490 1337
4dff95dc
SB
1338 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1339 best_parent_rate != parent->rate)
1340 top = clk_calc_new_rates(parent, best_parent_rate);
035a61c3 1341
4dff95dc
SB
1342out:
1343 clk_calc_subtree(core, new_rate, parent, p_index);
b2476490 1344
4dff95dc 1345 return top;
b2476490 1346}
b2476490 1347
4dff95dc
SB
1348/*
1349 * Notify about rate changes in a subtree. Always walk down the whole tree
1350 * so that in case of an error we can walk down the whole tree again and
1351 * abort the change.
b2476490 1352 */
4dff95dc
SB
1353static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1354 unsigned long event)
b2476490 1355{
4dff95dc 1356 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
b2476490
MT
1357 int ret = NOTIFY_DONE;
1358
4dff95dc
SB
1359 if (core->rate == core->new_rate)
1360 return NULL;
b2476490 1361
4dff95dc
SB
1362 if (core->notifier_count) {
1363 ret = __clk_notify(core, event, core->rate, core->new_rate);
1364 if (ret & NOTIFY_STOP_MASK)
1365 fail_clk = core;
b2476490
MT
1366 }
1367
4dff95dc
SB
1368 hlist_for_each_entry(child, &core->children, child_node) {
1369 /* Skip children who will be reparented to another clock */
1370 if (child->new_parent && child->new_parent != core)
1371 continue;
1372 tmp_clk = clk_propagate_rate_change(child, event);
1373 if (tmp_clk)
1374 fail_clk = tmp_clk;
1375 }
5279fc40 1376
4dff95dc
SB
1377 /* handle the new child who might not be in core->children yet */
1378 if (core->new_child) {
1379 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1380 if (tmp_clk)
1381 fail_clk = tmp_clk;
1382 }
5279fc40 1383
4dff95dc 1384 return fail_clk;
5279fc40
BB
1385}
1386
4dff95dc
SB
1387/*
1388 * walk down a subtree and set the new rates notifying the rate
1389 * change on the way
1390 */
1391static void clk_change_rate(struct clk_core *core)
035a61c3 1392{
4dff95dc
SB
1393 struct clk_core *child;
1394 struct hlist_node *tmp;
1395 unsigned long old_rate;
1396 unsigned long best_parent_rate = 0;
1397 bool skip_set_rate = false;
1398 struct clk_core *old_parent;
035a61c3 1399
4dff95dc 1400 old_rate = core->rate;
035a61c3 1401
4dff95dc
SB
1402 if (core->new_parent)
1403 best_parent_rate = core->new_parent->rate;
1404 else if (core->parent)
1405 best_parent_rate = core->parent->rate;
035a61c3 1406
4dff95dc
SB
1407 if (core->new_parent && core->new_parent != core->parent) {
1408 old_parent = __clk_set_parent_before(core, core->new_parent);
1409 trace_clk_set_parent(core, core->new_parent);
5279fc40 1410
4dff95dc
SB
1411 if (core->ops->set_rate_and_parent) {
1412 skip_set_rate = true;
1413 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1414 best_parent_rate,
1415 core->new_parent_index);
1416 } else if (core->ops->set_parent) {
1417 core->ops->set_parent(core->hw, core->new_parent_index);
1418 }
5279fc40 1419
4dff95dc
SB
1420 trace_clk_set_parent_complete(core, core->new_parent);
1421 __clk_set_parent_after(core, core->new_parent, old_parent);
1422 }
8f2c2db1 1423
4dff95dc 1424 trace_clk_set_rate(core, core->new_rate);
b2476490 1425
4dff95dc
SB
1426 if (!skip_set_rate && core->ops->set_rate)
1427 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
496eadf8 1428
4dff95dc 1429 trace_clk_set_rate_complete(core, core->new_rate);
b2476490 1430
4dff95dc 1431 core->rate = clk_recalc(core, best_parent_rate);
b2476490 1432
4dff95dc
SB
1433 if (core->notifier_count && old_rate != core->rate)
1434 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
b2476490 1435
85e88fab
MT
1436 if (core->flags & CLK_RECALC_NEW_RATES)
1437 (void)clk_calc_new_rates(core, core->new_rate);
d8d91987 1438
b2476490 1439 /*
4dff95dc
SB
1440 * Use safe iteration, as change_rate can actually swap parents
1441 * for certain clock types.
b2476490 1442 */
4dff95dc
SB
1443 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1444 /* Skip children who will be reparented to another clock */
1445 if (child->new_parent && child->new_parent != core)
1446 continue;
1447 clk_change_rate(child);
1448 }
b2476490 1449
4dff95dc
SB
1450 /* handle the new child who might not be in core->children yet */
1451 if (core->new_child)
1452 clk_change_rate(core->new_child);
b2476490
MT
1453}
1454
4dff95dc
SB
1455static int clk_core_set_rate_nolock(struct clk_core *core,
1456 unsigned long req_rate)
a093bde2 1457{
4dff95dc
SB
1458 struct clk_core *top, *fail_clk;
1459 unsigned long rate = req_rate;
1460 int ret = 0;
a093bde2 1461
4dff95dc
SB
1462 if (!core)
1463 return 0;
a093bde2 1464
4dff95dc
SB
1465 /* bail early if nothing to do */
1466 if (rate == clk_core_get_rate_nolock(core))
1467 return 0;
a093bde2 1468
4dff95dc
SB
1469 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1470 return -EBUSY;
a093bde2 1471
4dff95dc
SB
1472 /* calculate new rates and get the topmost changed clock */
1473 top = clk_calc_new_rates(core, rate);
1474 if (!top)
1475 return -EINVAL;
1476
1477 /* notify that we are about to change rates */
1478 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1479 if (fail_clk) {
1480 pr_debug("%s: failed to set %s rate\n", __func__,
1481 fail_clk->name);
1482 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1483 return -EBUSY;
1484 }
1485
1486 /* change the rates */
1487 clk_change_rate(top);
1488
1489 core->req_rate = req_rate;
1490
1491 return ret;
a093bde2 1492}
035a61c3
TV
1493
1494/**
4dff95dc
SB
1495 * clk_set_rate - specify a new rate for clk
1496 * @clk: the clk whose rate is being changed
1497 * @rate: the new rate for clk
035a61c3 1498 *
4dff95dc
SB
1499 * In the simplest case clk_set_rate will only adjust the rate of clk.
1500 *
1501 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1502 * propagate up to clk's parent; whether or not this happens depends on the
1503 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1504 * after calling .round_rate then upstream parent propagation is ignored. If
1505 * *parent_rate comes back with a new rate for clk's parent then we propagate
1506 * up to clk's parent and set its rate. Upward propagation will continue
1507 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1508 * .round_rate stops requesting changes to clk's parent_rate.
1509 *
1510 * Rate changes are accomplished via tree traversal that also recalculates the
1511 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1512 *
1513 * Returns 0 on success, -EERROR otherwise.
035a61c3 1514 */
4dff95dc 1515int clk_set_rate(struct clk *clk, unsigned long rate)
035a61c3 1516{
4dff95dc
SB
1517 int ret;
1518
035a61c3
TV
1519 if (!clk)
1520 return 0;
1521
4dff95dc
SB
1522 /* prevent racing with updates to the clock topology */
1523 clk_prepare_lock();
da0f0b2c 1524
4dff95dc 1525 ret = clk_core_set_rate_nolock(clk->core, rate);
da0f0b2c 1526
4dff95dc 1527 clk_prepare_unlock();
4935b22c 1528
4dff95dc 1529 return ret;
4935b22c 1530}
4dff95dc 1531EXPORT_SYMBOL_GPL(clk_set_rate);
4935b22c 1532
4dff95dc
SB
1533/**
1534 * clk_set_rate_range - set a rate range for a clock source
1535 * @clk: clock source
1536 * @min: desired minimum clock rate in Hz, inclusive
1537 * @max: desired maximum clock rate in Hz, inclusive
1538 *
1539 * Returns success (0) or negative errno.
1540 */
1541int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
4935b22c 1542{
4dff95dc 1543 int ret = 0;
4935b22c 1544
4dff95dc
SB
1545 if (!clk)
1546 return 0;
903efc55 1547
4dff95dc
SB
1548 if (min > max) {
1549 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1550 __func__, clk->core->name, clk->dev_id, clk->con_id,
1551 min, max);
1552 return -EINVAL;
903efc55 1553 }
4935b22c 1554
4dff95dc 1555 clk_prepare_lock();
4935b22c 1556
4dff95dc
SB
1557 if (min != clk->min_rate || max != clk->max_rate) {
1558 clk->min_rate = min;
1559 clk->max_rate = max;
1560 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4935b22c
JH
1561 }
1562
4dff95dc 1563 clk_prepare_unlock();
4935b22c 1564
4dff95dc 1565 return ret;
3fa2252b 1566}
4dff95dc 1567EXPORT_SYMBOL_GPL(clk_set_rate_range);
3fa2252b 1568
4dff95dc
SB
1569/**
1570 * clk_set_min_rate - set a minimum clock rate for a clock source
1571 * @clk: clock source
1572 * @rate: desired minimum clock rate in Hz, inclusive
1573 *
1574 * Returns success (0) or negative errno.
1575 */
1576int clk_set_min_rate(struct clk *clk, unsigned long rate)
3fa2252b 1577{
4dff95dc
SB
1578 if (!clk)
1579 return 0;
1580
1581 return clk_set_rate_range(clk, rate, clk->max_rate);
3fa2252b 1582}
4dff95dc 1583EXPORT_SYMBOL_GPL(clk_set_min_rate);
3fa2252b 1584
4dff95dc
SB
1585/**
1586 * clk_set_max_rate - set a maximum clock rate for a clock source
1587 * @clk: clock source
1588 * @rate: desired maximum clock rate in Hz, inclusive
1589 *
1590 * Returns success (0) or negative errno.
1591 */
1592int clk_set_max_rate(struct clk *clk, unsigned long rate)
3fa2252b 1593{
4dff95dc
SB
1594 if (!clk)
1595 return 0;
4935b22c 1596
4dff95dc 1597 return clk_set_rate_range(clk, clk->min_rate, rate);
4935b22c 1598}
4dff95dc 1599EXPORT_SYMBOL_GPL(clk_set_max_rate);
4935b22c 1600
b2476490 1601/**
4dff95dc
SB
1602 * clk_get_parent - return the parent of a clk
1603 * @clk: the clk whose parent gets returned
b2476490 1604 *
4dff95dc 1605 * Simply returns clk->parent. Returns NULL if clk is NULL.
b2476490 1606 */
4dff95dc 1607struct clk *clk_get_parent(struct clk *clk)
b2476490 1608{
4dff95dc 1609 struct clk *parent;
b2476490 1610
4dff95dc
SB
1611 clk_prepare_lock();
1612 parent = __clk_get_parent(clk);
1613 clk_prepare_unlock();
496eadf8 1614
4dff95dc
SB
1615 return parent;
1616}
1617EXPORT_SYMBOL_GPL(clk_get_parent);
b2476490 1618
4dff95dc
SB
1619/*
1620 * .get_parent is mandatory for clocks with multiple possible parents. It is
1621 * optional for single-parent clocks. Always call .get_parent if it is
1622 * available and WARN if it is missing for multi-parent clocks.
1623 *
1624 * For single-parent clocks without .get_parent, first check to see if the
1625 * .parents array exists, and if so use it to avoid an expensive tree
1626 * traversal. If .parents does not exist then walk the tree.
1627 */
1628static struct clk_core *__clk_init_parent(struct clk_core *core)
1629{
1630 struct clk_core *ret = NULL;
1631 u8 index;
b2476490 1632
4dff95dc
SB
1633 /* handle the trivial cases */
1634
1635 if (!core->num_parents)
b2476490
MT
1636 goto out;
1637
4dff95dc
SB
1638 if (core->num_parents == 1) {
1639 if (IS_ERR_OR_NULL(core->parent))
1640 core->parent = clk_core_lookup(core->parent_names[0]);
1641 ret = core->parent;
1642 goto out;
b2476490
MT
1643 }
1644
4dff95dc
SB
1645 if (!core->ops->get_parent) {
1646 WARN(!core->ops->get_parent,
1647 "%s: multi-parent clocks must implement .get_parent\n",
1648 __func__);
1649 goto out;
1650 };
1651
1652 /*
1653 * Do our best to cache parent clocks in core->parents. This prevents
1654 * unnecessary and expensive lookups. We don't set core->parent here;
1655 * that is done by the calling function.
1656 */
1657
1658 index = core->ops->get_parent(core->hw);
1659
1660 if (!core->parents)
1661 core->parents =
1662 kcalloc(core->num_parents, sizeof(struct clk *),
1663 GFP_KERNEL);
1664
1665 ret = clk_core_get_parent_by_index(core, index);
1666
b2476490
MT
1667out:
1668 return ret;
1669}
1670
4dff95dc
SB
1671static void clk_core_reparent(struct clk_core *core,
1672 struct clk_core *new_parent)
b2476490 1673{
4dff95dc
SB
1674 clk_reparent(core, new_parent);
1675 __clk_recalc_accuracies(core);
1676 __clk_recalc_rates(core, POST_RATE_CHANGE);
b2476490
MT
1677}
1678
42c86547
TV
1679void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1680{
1681 if (!hw)
1682 return;
1683
1684 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1685}
1686
4dff95dc
SB
1687/**
1688 * clk_has_parent - check if a clock is a possible parent for another
1689 * @clk: clock source
1690 * @parent: parent clock source
1691 *
1692 * This function can be used in drivers that need to check that a clock can be
1693 * the parent of another without actually changing the parent.
1694 *
1695 * Returns true if @parent is a possible parent for @clk, false otherwise.
b2476490 1696 */
4dff95dc 1697bool clk_has_parent(struct clk *clk, struct clk *parent)
b2476490 1698{
4dff95dc
SB
1699 struct clk_core *core, *parent_core;
1700 unsigned int i;
b2476490 1701
4dff95dc
SB
1702 /* NULL clocks should be nops, so return success if either is NULL. */
1703 if (!clk || !parent)
1704 return true;
7452b219 1705
4dff95dc
SB
1706 core = clk->core;
1707 parent_core = parent->core;
71472c0c 1708
4dff95dc
SB
1709 /* Optimize for the case where the parent is already the parent. */
1710 if (core->parent == parent_core)
1711 return true;
1c8e6004 1712
4dff95dc
SB
1713 for (i = 0; i < core->num_parents; i++)
1714 if (strcmp(core->parent_names[i], parent_core->name) == 0)
1715 return true;
03bc10ab 1716
4dff95dc
SB
1717 return false;
1718}
1719EXPORT_SYMBOL_GPL(clk_has_parent);
03bc10ab 1720
4dff95dc
SB
1721static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1722{
1723 int ret = 0;
1724 int p_index = 0;
1725 unsigned long p_rate = 0;
1726
1727 if (!core)
1728 return 0;
1729
1730 /* prevent racing with updates to the clock topology */
1731 clk_prepare_lock();
1732
1733 if (core->parent == parent)
1734 goto out;
1735
1736 /* verify ops for for multi-parent clks */
1737 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
1738 ret = -ENOSYS;
63f5c3b2 1739 goto out;
7452b219
MT
1740 }
1741
4dff95dc
SB
1742 /* check that we are allowed to re-parent if the clock is in use */
1743 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1744 ret = -EBUSY;
1745 goto out;
b2476490
MT
1746 }
1747
71472c0c 1748 /* try finding the new parent index */
4dff95dc 1749 if (parent) {
d6968fca 1750 p_index = clk_fetch_parent_index(core, parent);
4dff95dc 1751 p_rate = parent->rate;
f1c8b2ed 1752 if (p_index < 0) {
71472c0c 1753 pr_debug("%s: clk %s can not be parent of clk %s\n",
4dff95dc
SB
1754 __func__, parent->name, core->name);
1755 ret = p_index;
1756 goto out;
71472c0c 1757 }
b2476490
MT
1758 }
1759
4dff95dc
SB
1760 /* propagate PRE_RATE_CHANGE notifications */
1761 ret = __clk_speculate_rates(core, p_rate);
b2476490 1762
4dff95dc
SB
1763 /* abort if a driver objects */
1764 if (ret & NOTIFY_STOP_MASK)
1765 goto out;
b2476490 1766
4dff95dc
SB
1767 /* do the re-parent */
1768 ret = __clk_set_parent(core, parent, p_index);
b2476490 1769
4dff95dc
SB
1770 /* propagate rate an accuracy recalculation accordingly */
1771 if (ret) {
1772 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
1773 } else {
1774 __clk_recalc_rates(core, POST_RATE_CHANGE);
1775 __clk_recalc_accuracies(core);
b2476490
MT
1776 }
1777
4dff95dc
SB
1778out:
1779 clk_prepare_unlock();
71472c0c 1780
4dff95dc
SB
1781 return ret;
1782}
b2476490 1783
4dff95dc
SB
1784/**
1785 * clk_set_parent - switch the parent of a mux clk
1786 * @clk: the mux clk whose input we are switching
1787 * @parent: the new input to clk
1788 *
1789 * Re-parent clk to use parent as its new input source. If clk is in
1790 * prepared state, the clk will get enabled for the duration of this call. If
1791 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1792 * that, the reparenting is glitchy in hardware, etc), use the
1793 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1794 *
1795 * After successfully changing clk's parent clk_set_parent will update the
1796 * clk topology, sysfs topology and propagate rate recalculation via
1797 * __clk_recalc_rates.
1798 *
1799 * Returns 0 on success, -EERROR otherwise.
1800 */
1801int clk_set_parent(struct clk *clk, struct clk *parent)
1802{
1803 if (!clk)
1804 return 0;
1805
1806 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
b2476490 1807}
4dff95dc 1808EXPORT_SYMBOL_GPL(clk_set_parent);
b2476490 1809
4dff95dc
SB
1810/**
1811 * clk_set_phase - adjust the phase shift of a clock signal
1812 * @clk: clock signal source
1813 * @degrees: number of degrees the signal is shifted
1814 *
1815 * Shifts the phase of a clock signal by the specified
1816 * degrees. Returns 0 on success, -EERROR otherwise.
1817 *
1818 * This function makes no distinction about the input or reference
1819 * signal that we adjust the clock signal phase against. For example
1820 * phase locked-loop clock signal generators we may shift phase with
1821 * respect to feedback clock signal input, but for other cases the
1822 * clock phase may be shifted with respect to some other, unspecified
1823 * signal.
1824 *
1825 * Additionally the concept of phase shift does not propagate through
1826 * the clock tree hierarchy, which sets it apart from clock rates and
1827 * clock accuracy. A parent clock phase attribute does not have an
1828 * impact on the phase attribute of a child clock.
b2476490 1829 */
4dff95dc 1830int clk_set_phase(struct clk *clk, int degrees)
b2476490 1831{
4dff95dc 1832 int ret = -EINVAL;
b2476490 1833
4dff95dc
SB
1834 if (!clk)
1835 return 0;
b2476490 1836
4dff95dc
SB
1837 /* sanity check degrees */
1838 degrees %= 360;
1839 if (degrees < 0)
1840 degrees += 360;
bf47b4fd 1841
4dff95dc 1842 clk_prepare_lock();
3fa2252b 1843
4dff95dc 1844 trace_clk_set_phase(clk->core, degrees);
3fa2252b 1845
4dff95dc
SB
1846 if (clk->core->ops->set_phase)
1847 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
3fa2252b 1848
4dff95dc 1849 trace_clk_set_phase_complete(clk->core, degrees);
dfc202ea 1850
4dff95dc
SB
1851 if (!ret)
1852 clk->core->phase = degrees;
b2476490 1853
4dff95dc 1854 clk_prepare_unlock();
dfc202ea 1855
4dff95dc
SB
1856 return ret;
1857}
1858EXPORT_SYMBOL_GPL(clk_set_phase);
b2476490 1859
4dff95dc
SB
1860static int clk_core_get_phase(struct clk_core *core)
1861{
1862 int ret;
b2476490 1863
4dff95dc
SB
1864 clk_prepare_lock();
1865 ret = core->phase;
1866 clk_prepare_unlock();
71472c0c 1867
4dff95dc 1868 return ret;
b2476490
MT
1869}
1870
4dff95dc
SB
1871/**
1872 * clk_get_phase - return the phase shift of a clock signal
1873 * @clk: clock signal source
1874 *
1875 * Returns the phase shift of a clock node in degrees, otherwise returns
1876 * -EERROR.
1877 */
1878int clk_get_phase(struct clk *clk)
1c8e6004 1879{
4dff95dc 1880 if (!clk)
1c8e6004
TV
1881 return 0;
1882
4dff95dc
SB
1883 return clk_core_get_phase(clk->core);
1884}
1885EXPORT_SYMBOL_GPL(clk_get_phase);
1c8e6004 1886
4dff95dc
SB
1887/**
1888 * clk_is_match - check if two clk's point to the same hardware clock
1889 * @p: clk compared against q
1890 * @q: clk compared against p
1891 *
1892 * Returns true if the two struct clk pointers both point to the same hardware
1893 * clock node. Put differently, returns true if struct clk *p and struct clk *q
1894 * share the same struct clk_core object.
1895 *
1896 * Returns false otherwise. Note that two NULL clks are treated as matching.
1897 */
1898bool clk_is_match(const struct clk *p, const struct clk *q)
1899{
1900 /* trivial case: identical struct clk's or both NULL */
1901 if (p == q)
1902 return true;
1c8e6004 1903
4dff95dc
SB
1904 /* true if clk->core pointers match. Avoid derefing garbage */
1905 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
1906 if (p->core == q->core)
1907 return true;
1c8e6004 1908
4dff95dc
SB
1909 return false;
1910}
1911EXPORT_SYMBOL_GPL(clk_is_match);
1c8e6004 1912
4dff95dc 1913/*** debugfs support ***/
1c8e6004 1914
4dff95dc
SB
1915#ifdef CONFIG_DEBUG_FS
1916#include <linux/debugfs.h>
1c8e6004 1917
4dff95dc
SB
1918static struct dentry *rootdir;
1919static int inited = 0;
1920static DEFINE_MUTEX(clk_debug_lock);
1921static HLIST_HEAD(clk_debug_list);
1c8e6004 1922
4dff95dc
SB
1923static struct hlist_head *all_lists[] = {
1924 &clk_root_list,
1925 &clk_orphan_list,
1926 NULL,
1927};
1928
1929static struct hlist_head *orphan_list[] = {
1930 &clk_orphan_list,
1931 NULL,
1932};
1933
1934static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1935 int level)
b2476490 1936{
4dff95dc
SB
1937 if (!c)
1938 return;
b2476490 1939
4dff95dc
SB
1940 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
1941 level * 3 + 1, "",
1942 30 - level * 3, c->name,
1943 c->enable_count, c->prepare_count, clk_core_get_rate(c),
1944 clk_core_get_accuracy(c), clk_core_get_phase(c));
1945}
89ac8d7a 1946
4dff95dc
SB
1947static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
1948 int level)
1949{
1950 struct clk_core *child;
b2476490 1951
4dff95dc
SB
1952 if (!c)
1953 return;
b2476490 1954
4dff95dc 1955 clk_summary_show_one(s, c, level);
0e1c0301 1956
4dff95dc
SB
1957 hlist_for_each_entry(child, &c->children, child_node)
1958 clk_summary_show_subtree(s, child, level + 1);
1c8e6004 1959}
b2476490 1960
4dff95dc 1961static int clk_summary_show(struct seq_file *s, void *data)
1c8e6004 1962{
4dff95dc
SB
1963 struct clk_core *c;
1964 struct hlist_head **lists = (struct hlist_head **)s->private;
1c8e6004 1965
4dff95dc
SB
1966 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
1967 seq_puts(s, "----------------------------------------------------------------------------------------\n");
b2476490 1968
1c8e6004
TV
1969 clk_prepare_lock();
1970
4dff95dc
SB
1971 for (; *lists; lists++)
1972 hlist_for_each_entry(c, *lists, child_node)
1973 clk_summary_show_subtree(s, c, 0);
b2476490 1974
eab89f69 1975 clk_prepare_unlock();
b2476490 1976
4dff95dc 1977 return 0;
b2476490 1978}
1c8e6004 1979
1c8e6004 1980
4dff95dc 1981static int clk_summary_open(struct inode *inode, struct file *file)
1c8e6004 1982{
4dff95dc 1983 return single_open(file, clk_summary_show, inode->i_private);
1c8e6004 1984}
b2476490 1985
4dff95dc
SB
1986static const struct file_operations clk_summary_fops = {
1987 .open = clk_summary_open,
1988 .read = seq_read,
1989 .llseek = seq_lseek,
1990 .release = single_release,
1991};
b2476490 1992
4dff95dc
SB
1993static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
1994{
1995 if (!c)
1996 return;
b2476490 1997
7cb81136 1998 /* This should be JSON format, i.e. elements separated with a comma */
4dff95dc
SB
1999 seq_printf(s, "\"%s\": { ", c->name);
2000 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2001 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
7cb81136
SW
2002 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2003 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
4dff95dc 2004 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
b2476490 2005}
b2476490 2006
4dff95dc 2007static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
b2476490 2008{
4dff95dc 2009 struct clk_core *child;
b2476490 2010
4dff95dc
SB
2011 if (!c)
2012 return;
b2476490 2013
4dff95dc 2014 clk_dump_one(s, c, level);
b2476490 2015
4dff95dc
SB
2016 hlist_for_each_entry(child, &c->children, child_node) {
2017 seq_printf(s, ",");
2018 clk_dump_subtree(s, child, level + 1);
b2476490
MT
2019 }
2020
4dff95dc 2021 seq_printf(s, "}");
b2476490
MT
2022}
2023
4dff95dc 2024static int clk_dump(struct seq_file *s, void *data)
4e88f3de 2025{
4dff95dc
SB
2026 struct clk_core *c;
2027 bool first_node = true;
2028 struct hlist_head **lists = (struct hlist_head **)s->private;
4e88f3de 2029
4dff95dc 2030 seq_printf(s, "{");
4e88f3de 2031
4dff95dc 2032 clk_prepare_lock();
035a61c3 2033
4dff95dc
SB
2034 for (; *lists; lists++) {
2035 hlist_for_each_entry(c, *lists, child_node) {
2036 if (!first_node)
2037 seq_puts(s, ",");
2038 first_node = false;
2039 clk_dump_subtree(s, c, 0);
2040 }
2041 }
4e88f3de 2042
4dff95dc 2043 clk_prepare_unlock();
4e88f3de 2044
70e9f4dd 2045 seq_puts(s, "}\n");
4dff95dc 2046 return 0;
4e88f3de 2047}
4e88f3de 2048
4dff95dc
SB
2049
2050static int clk_dump_open(struct inode *inode, struct file *file)
b2476490 2051{
4dff95dc
SB
2052 return single_open(file, clk_dump, inode->i_private);
2053}
b2476490 2054
4dff95dc
SB
2055static const struct file_operations clk_dump_fops = {
2056 .open = clk_dump_open,
2057 .read = seq_read,
2058 .llseek = seq_lseek,
2059 .release = single_release,
2060};
89ac8d7a 2061
4dff95dc
SB
2062static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2063{
2064 struct dentry *d;
2065 int ret = -ENOMEM;
b2476490 2066
4dff95dc
SB
2067 if (!core || !pdentry) {
2068 ret = -EINVAL;
b2476490 2069 goto out;
4dff95dc 2070 }
b2476490 2071
4dff95dc
SB
2072 d = debugfs_create_dir(core->name, pdentry);
2073 if (!d)
b61c43c0 2074 goto out;
b61c43c0 2075
4dff95dc
SB
2076 core->dentry = d;
2077
2078 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2079 (u32 *)&core->rate);
2080 if (!d)
2081 goto err_out;
2082
2083 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2084 (u32 *)&core->accuracy);
2085 if (!d)
2086 goto err_out;
2087
2088 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2089 (u32 *)&core->phase);
2090 if (!d)
2091 goto err_out;
031dcc9b 2092
4dff95dc
SB
2093 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2094 (u32 *)&core->flags);
2095 if (!d)
2096 goto err_out;
031dcc9b 2097
4dff95dc
SB
2098 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2099 (u32 *)&core->prepare_count);
2100 if (!d)
2101 goto err_out;
b2476490 2102
4dff95dc
SB
2103 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2104 (u32 *)&core->enable_count);
2105 if (!d)
2106 goto err_out;
b2476490 2107
4dff95dc
SB
2108 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2109 (u32 *)&core->notifier_count);
2110 if (!d)
2111 goto err_out;
b2476490 2112
4dff95dc
SB
2113 if (core->ops->debug_init) {
2114 ret = core->ops->debug_init(core->hw, core->dentry);
2115 if (ret)
2116 goto err_out;
5279fc40 2117 }
b2476490 2118
4dff95dc
SB
2119 ret = 0;
2120 goto out;
b2476490 2121
4dff95dc
SB
2122err_out:
2123 debugfs_remove_recursive(core->dentry);
2124 core->dentry = NULL;
2125out:
b2476490
MT
2126 return ret;
2127}
035a61c3
TV
2128
2129/**
6e5ab41b
SB
2130 * clk_debug_register - add a clk node to the debugfs clk directory
2131 * @core: the clk being added to the debugfs clk directory
035a61c3 2132 *
6e5ab41b
SB
2133 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2134 * initialized. Otherwise it bails out early since the debugfs clk directory
4dff95dc 2135 * will be created lazily by clk_debug_init as part of a late_initcall.
035a61c3 2136 */
4dff95dc 2137static int clk_debug_register(struct clk_core *core)
035a61c3 2138{
4dff95dc 2139 int ret = 0;
035a61c3 2140
4dff95dc
SB
2141 mutex_lock(&clk_debug_lock);
2142 hlist_add_head(&core->debug_node, &clk_debug_list);
2143
2144 if (!inited)
2145 goto unlock;
2146
2147 ret = clk_debug_create_one(core, rootdir);
2148unlock:
2149 mutex_unlock(&clk_debug_lock);
2150
2151 return ret;
035a61c3 2152}
b2476490 2153
4dff95dc 2154 /**
6e5ab41b
SB
2155 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2156 * @core: the clk being removed from the debugfs clk directory
e59c5371 2157 *
6e5ab41b
SB
2158 * Dynamically removes a clk and all its child nodes from the
2159 * debugfs clk directory if clk->dentry points to debugfs created by
4dff95dc 2160 * clk_debug_register in __clk_init.
e59c5371 2161 */
4dff95dc 2162static void clk_debug_unregister(struct clk_core *core)
e59c5371 2163{
4dff95dc
SB
2164 mutex_lock(&clk_debug_lock);
2165 hlist_del_init(&core->debug_node);
2166 debugfs_remove_recursive(core->dentry);
2167 core->dentry = NULL;
2168 mutex_unlock(&clk_debug_lock);
2169}
e59c5371 2170
4dff95dc
SB
2171struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2172 void *data, const struct file_operations *fops)
2173{
2174 struct dentry *d = NULL;
e59c5371 2175
4dff95dc
SB
2176 if (hw->core->dentry)
2177 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2178 fops);
e59c5371 2179
4dff95dc
SB
2180 return d;
2181}
2182EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
e59c5371 2183
4dff95dc 2184/**
6e5ab41b 2185 * clk_debug_init - lazily populate the debugfs clk directory
4dff95dc 2186 *
6e5ab41b
SB
2187 * clks are often initialized very early during boot before memory can be
2188 * dynamically allocated and well before debugfs is setup. This function
2189 * populates the debugfs clk directory once at boot-time when we know that
2190 * debugfs is setup. It should only be called once at boot-time, all other clks
2191 * added dynamically will be done so with clk_debug_register.
4dff95dc
SB
2192 */
2193static int __init clk_debug_init(void)
2194{
2195 struct clk_core *core;
2196 struct dentry *d;
dfc202ea 2197
4dff95dc 2198 rootdir = debugfs_create_dir("clk", NULL);
e59c5371 2199
4dff95dc
SB
2200 if (!rootdir)
2201 return -ENOMEM;
dfc202ea 2202
4dff95dc
SB
2203 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2204 &clk_summary_fops);
2205 if (!d)
2206 return -ENOMEM;
e59c5371 2207
4dff95dc
SB
2208 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2209 &clk_dump_fops);
2210 if (!d)
2211 return -ENOMEM;
e59c5371 2212
4dff95dc
SB
2213 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2214 &orphan_list, &clk_summary_fops);
2215 if (!d)
2216 return -ENOMEM;
e59c5371 2217
4dff95dc
SB
2218 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2219 &orphan_list, &clk_dump_fops);
2220 if (!d)
2221 return -ENOMEM;
e59c5371 2222
4dff95dc
SB
2223 mutex_lock(&clk_debug_lock);
2224 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2225 clk_debug_create_one(core, rootdir);
e59c5371 2226
4dff95dc
SB
2227 inited = 1;
2228 mutex_unlock(&clk_debug_lock);
e59c5371 2229
4dff95dc
SB
2230 return 0;
2231}
2232late_initcall(clk_debug_init);
2233#else
2234static inline int clk_debug_register(struct clk_core *core) { return 0; }
2235static inline void clk_debug_reparent(struct clk_core *core,
2236 struct clk_core *new_parent)
035a61c3 2237{
035a61c3 2238}
4dff95dc 2239static inline void clk_debug_unregister(struct clk_core *core)
3d3801ef 2240{
3d3801ef 2241}
4dff95dc 2242#endif
3d3801ef 2243
b2476490
MT
2244/**
2245 * __clk_init - initialize the data structures in a struct clk
2246 * @dev: device initializing this clk, placeholder for now
2247 * @clk: clk being initialized
2248 *
035a61c3 2249 * Initializes the lists in struct clk_core, queries the hardware for the
b2476490 2250 * parent and rate and sets them both.
b2476490 2251 */
b09d6d99 2252static int __clk_init(struct device *dev, struct clk *clk_user)
b2476490 2253{
d1302a36 2254 int i, ret = 0;
035a61c3 2255 struct clk_core *orphan;
b67bfe0d 2256 struct hlist_node *tmp2;
d6968fca 2257 struct clk_core *core;
1c8e6004 2258 unsigned long rate;
b2476490 2259
035a61c3 2260 if (!clk_user)
d1302a36 2261 return -EINVAL;
b2476490 2262
d6968fca 2263 core = clk_user->core;
035a61c3 2264
eab89f69 2265 clk_prepare_lock();
b2476490
MT
2266
2267 /* check to see if a clock with this name is already registered */
d6968fca 2268 if (clk_core_lookup(core->name)) {
d1302a36 2269 pr_debug("%s: clk %s already initialized\n",
d6968fca 2270 __func__, core->name);
d1302a36 2271 ret = -EEXIST;
b2476490 2272 goto out;
d1302a36 2273 }
b2476490 2274
d4d7e3dd 2275 /* check that clk_ops are sane. See Documentation/clk.txt */
d6968fca
SB
2276 if (core->ops->set_rate &&
2277 !((core->ops->round_rate || core->ops->determine_rate) &&
2278 core->ops->recalc_rate)) {
71472c0c 2279 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
d6968fca 2280 __func__, core->name);
d1302a36 2281 ret = -EINVAL;
d4d7e3dd
MT
2282 goto out;
2283 }
2284
d6968fca 2285 if (core->ops->set_parent && !core->ops->get_parent) {
d4d7e3dd 2286 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
d6968fca 2287 __func__, core->name);
d1302a36 2288 ret = -EINVAL;
d4d7e3dd
MT
2289 goto out;
2290 }
2291
d6968fca
SB
2292 if (core->ops->set_rate_and_parent &&
2293 !(core->ops->set_parent && core->ops->set_rate)) {
3fa2252b 2294 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
d6968fca 2295 __func__, core->name);
3fa2252b
SB
2296 ret = -EINVAL;
2297 goto out;
2298 }
2299
b2476490 2300 /* throw a WARN if any entries in parent_names are NULL */
d6968fca
SB
2301 for (i = 0; i < core->num_parents; i++)
2302 WARN(!core->parent_names[i],
b2476490 2303 "%s: invalid NULL in %s's .parent_names\n",
d6968fca 2304 __func__, core->name);
b2476490
MT
2305
2306 /*
2307 * Allocate an array of struct clk *'s to avoid unnecessary string
2308 * look-ups of clk's possible parents. This can fail for clocks passed
d6968fca 2309 * in to clk_init during early boot; thus any access to core->parents[]
b2476490
MT
2310 * must always check for a NULL pointer and try to populate it if
2311 * necessary.
2312 *
d6968fca
SB
2313 * If core->parents is not NULL we skip this entire block. This allows
2314 * for clock drivers to statically initialize core->parents.
b2476490 2315 */
d6968fca
SB
2316 if (core->num_parents > 1 && !core->parents) {
2317 core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
96a7ed90 2318 GFP_KERNEL);
b2476490 2319 /*
035a61c3 2320 * clk_core_lookup returns NULL for parents that have not been
b2476490
MT
2321 * clk_init'd; thus any access to clk->parents[] must check
2322 * for a NULL pointer. We can always perform lazy lookups for
2323 * missing parents later on.
2324 */
d6968fca
SB
2325 if (core->parents)
2326 for (i = 0; i < core->num_parents; i++)
2327 core->parents[i] =
2328 clk_core_lookup(core->parent_names[i]);
b2476490
MT
2329 }
2330
d6968fca 2331 core->parent = __clk_init_parent(core);
b2476490
MT
2332
2333 /*
d6968fca 2334 * Populate core->parent if parent has already been __clk_init'd. If
b2476490
MT
2335 * parent has not yet been __clk_init'd then place clk in the orphan
2336 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
2337 * clk list.
2338 *
2339 * Every time a new clk is clk_init'd then we walk the list of orphan
2340 * clocks and re-parent any that are children of the clock currently
2341 * being clk_init'd.
2342 */
d6968fca
SB
2343 if (core->parent)
2344 hlist_add_head(&core->child_node,
2345 &core->parent->children);
2346 else if (core->flags & CLK_IS_ROOT)
2347 hlist_add_head(&core->child_node, &clk_root_list);
b2476490 2348 else
d6968fca 2349 hlist_add_head(&core->child_node, &clk_orphan_list);
b2476490 2350
5279fc40
BB
2351 /*
2352 * Set clk's accuracy. The preferred method is to use
2353 * .recalc_accuracy. For simple clocks and lazy developers the default
2354 * fallback is to use the parent's accuracy. If a clock doesn't have a
2355 * parent (or is orphaned) then accuracy is set to zero (perfect
2356 * clock).
2357 */
d6968fca
SB
2358 if (core->ops->recalc_accuracy)
2359 core->accuracy = core->ops->recalc_accuracy(core->hw,
2360 __clk_get_accuracy(core->parent));
2361 else if (core->parent)
2362 core->accuracy = core->parent->accuracy;
5279fc40 2363 else
d6968fca 2364 core->accuracy = 0;
5279fc40 2365
9824cf73
MR
2366 /*
2367 * Set clk's phase.
2368 * Since a phase is by definition relative to its parent, just
2369 * query the current clock phase, or just assume it's in phase.
2370 */
d6968fca
SB
2371 if (core->ops->get_phase)
2372 core->phase = core->ops->get_phase(core->hw);
9824cf73 2373 else
d6968fca 2374 core->phase = 0;
9824cf73 2375
b2476490
MT
2376 /*
2377 * Set clk's rate. The preferred method is to use .recalc_rate. For
2378 * simple clocks and lazy developers the default fallback is to use the
2379 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2380 * then rate is set to zero.
2381 */
d6968fca
SB
2382 if (core->ops->recalc_rate)
2383 rate = core->ops->recalc_rate(core->hw,
2384 clk_core_get_rate_nolock(core->parent));
2385 else if (core->parent)
2386 rate = core->parent->rate;
b2476490 2387 else
1c8e6004 2388 rate = 0;
d6968fca 2389 core->rate = core->req_rate = rate;
b2476490
MT
2390
2391 /*
2392 * walk the list of orphan clocks and reparent any that are children of
2393 * this clock
2394 */
b67bfe0d 2395 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
12d29886 2396 if (orphan->num_parents && orphan->ops->get_parent) {
1f61e5f1 2397 i = orphan->ops->get_parent(orphan->hw);
d6968fca
SB
2398 if (!strcmp(core->name, orphan->parent_names[i]))
2399 clk_core_reparent(orphan, core);
1f61e5f1
MF
2400 continue;
2401 }
2402
b2476490 2403 for (i = 0; i < orphan->num_parents; i++)
d6968fca
SB
2404 if (!strcmp(core->name, orphan->parent_names[i])) {
2405 clk_core_reparent(orphan, core);
b2476490
MT
2406 break;
2407 }
1f61e5f1 2408 }
b2476490
MT
2409
2410 /*
2411 * optional platform-specific magic
2412 *
2413 * The .init callback is not used by any of the basic clock types, but
2414 * exists for weird hardware that must perform initialization magic.
2415 * Please consider other ways of solving initialization problems before
24ee1a08 2416 * using this callback, as its use is discouraged.
b2476490 2417 */
d6968fca
SB
2418 if (core->ops->init)
2419 core->ops->init(core->hw);
b2476490 2420
d6968fca 2421 kref_init(&core->ref);
b2476490 2422out:
eab89f69 2423 clk_prepare_unlock();
b2476490 2424
89f7e9de 2425 if (!ret)
d6968fca 2426 clk_debug_register(core);
89f7e9de 2427
d1302a36 2428 return ret;
b2476490
MT
2429}
2430
035a61c3
TV
2431struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2432 const char *con_id)
0197b3ea 2433{
0197b3ea
SK
2434 struct clk *clk;
2435
035a61c3
TV
2436 /* This is to allow this function to be chained to others */
2437 if (!hw || IS_ERR(hw))
2438 return (struct clk *) hw;
0197b3ea 2439
035a61c3
TV
2440 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2441 if (!clk)
2442 return ERR_PTR(-ENOMEM);
2443
2444 clk->core = hw->core;
2445 clk->dev_id = dev_id;
2446 clk->con_id = con_id;
1c8e6004
TV
2447 clk->max_rate = ULONG_MAX;
2448
2449 clk_prepare_lock();
50595f8b 2450 hlist_add_head(&clk->clks_node, &hw->core->clks);
1c8e6004 2451 clk_prepare_unlock();
0197b3ea
SK
2452
2453 return clk;
2454}
035a61c3 2455
73e0e496 2456void __clk_free_clk(struct clk *clk)
1c8e6004
TV
2457{
2458 clk_prepare_lock();
50595f8b 2459 hlist_del(&clk->clks_node);
1c8e6004
TV
2460 clk_prepare_unlock();
2461
2462 kfree(clk);
2463}
0197b3ea 2464
293ba3b4
SB
2465/**
2466 * clk_register - allocate a new clock, register it and return an opaque cookie
2467 * @dev: device that is registering this clock
2468 * @hw: link to hardware-specific clock data
2469 *
2470 * clk_register is the primary interface for populating the clock tree with new
2471 * clock nodes. It returns a pointer to the newly allocated struct clk which
a59a5163 2472 * cannot be dereferenced by driver code but may be used in conjunction with the
293ba3b4
SB
2473 * rest of the clock API. In the event of an error clk_register will return an
2474 * error code; drivers must test for an error code after calling clk_register.
2475 */
2476struct clk *clk_register(struct device *dev, struct clk_hw *hw)
b2476490 2477{
d1302a36 2478 int i, ret;
d6968fca 2479 struct clk_core *core;
293ba3b4 2480
d6968fca
SB
2481 core = kzalloc(sizeof(*core), GFP_KERNEL);
2482 if (!core) {
293ba3b4
SB
2483 ret = -ENOMEM;
2484 goto fail_out;
2485 }
b2476490 2486
d6968fca
SB
2487 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2488 if (!core->name) {
0197b3ea
SK
2489 ret = -ENOMEM;
2490 goto fail_name;
2491 }
d6968fca 2492 core->ops = hw->init->ops;
ac2df527 2493 if (dev && dev->driver)
d6968fca
SB
2494 core->owner = dev->driver->owner;
2495 core->hw = hw;
2496 core->flags = hw->init->flags;
2497 core->num_parents = hw->init->num_parents;
2498 hw->core = core;
b2476490 2499
d1302a36 2500 /* allocate local copy in case parent_names is __initdata */
d6968fca 2501 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
96a7ed90 2502 GFP_KERNEL);
d1302a36 2503
d6968fca 2504 if (!core->parent_names) {
d1302a36
MT
2505 ret = -ENOMEM;
2506 goto fail_parent_names;
2507 }
2508
2509
2510 /* copy each string name in case parent_names is __initdata */
d6968fca
SB
2511 for (i = 0; i < core->num_parents; i++) {
2512 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
0197b3ea 2513 GFP_KERNEL);
d6968fca 2514 if (!core->parent_names[i]) {
d1302a36
MT
2515 ret = -ENOMEM;
2516 goto fail_parent_names_copy;
2517 }
2518 }
2519
d6968fca 2520 INIT_HLIST_HEAD(&core->clks);
1c8e6004 2521
035a61c3
TV
2522 hw->clk = __clk_create_clk(hw, NULL, NULL);
2523 if (IS_ERR(hw->clk)) {
035a61c3
TV
2524 ret = PTR_ERR(hw->clk);
2525 goto fail_parent_names_copy;
2526 }
2527
2528 ret = __clk_init(dev, hw->clk);
d1302a36 2529 if (!ret)
035a61c3 2530 return hw->clk;
b2476490 2531
1c8e6004 2532 __clk_free_clk(hw->clk);
035a61c3 2533 hw->clk = NULL;
b2476490 2534
d1302a36
MT
2535fail_parent_names_copy:
2536 while (--i >= 0)
d6968fca
SB
2537 kfree_const(core->parent_names[i]);
2538 kfree(core->parent_names);
d1302a36 2539fail_parent_names:
d6968fca 2540 kfree_const(core->name);
0197b3ea 2541fail_name:
d6968fca 2542 kfree(core);
d1302a36
MT
2543fail_out:
2544 return ERR_PTR(ret);
b2476490
MT
2545}
2546EXPORT_SYMBOL_GPL(clk_register);
2547
6e5ab41b 2548/* Free memory allocated for a clock. */
fcb0ee6a
SN
2549static void __clk_release(struct kref *ref)
2550{
d6968fca
SB
2551 struct clk_core *core = container_of(ref, struct clk_core, ref);
2552 int i = core->num_parents;
fcb0ee6a 2553
496eadf8
KK
2554 lockdep_assert_held(&prepare_lock);
2555
d6968fca 2556 kfree(core->parents);
fcb0ee6a 2557 while (--i >= 0)
d6968fca 2558 kfree_const(core->parent_names[i]);
fcb0ee6a 2559
d6968fca
SB
2560 kfree(core->parent_names);
2561 kfree_const(core->name);
2562 kfree(core);
fcb0ee6a
SN
2563}
2564
2565/*
2566 * Empty clk_ops for unregistered clocks. These are used temporarily
2567 * after clk_unregister() was called on a clock and until last clock
2568 * consumer calls clk_put() and the struct clk object is freed.
2569 */
2570static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2571{
2572 return -ENXIO;
2573}
2574
2575static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2576{
2577 WARN_ON_ONCE(1);
2578}
2579
2580static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2581 unsigned long parent_rate)
2582{
2583 return -ENXIO;
2584}
2585
2586static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2587{
2588 return -ENXIO;
2589}
2590
2591static const struct clk_ops clk_nodrv_ops = {
2592 .enable = clk_nodrv_prepare_enable,
2593 .disable = clk_nodrv_disable_unprepare,
2594 .prepare = clk_nodrv_prepare_enable,
2595 .unprepare = clk_nodrv_disable_unprepare,
2596 .set_rate = clk_nodrv_set_rate,
2597 .set_parent = clk_nodrv_set_parent,
2598};
2599
1df5c939
MB
2600/**
2601 * clk_unregister - unregister a currently registered clock
2602 * @clk: clock to unregister
1df5c939 2603 */
fcb0ee6a
SN
2604void clk_unregister(struct clk *clk)
2605{
2606 unsigned long flags;
2607
6314b679
SB
2608 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2609 return;
2610
035a61c3 2611 clk_debug_unregister(clk->core);
fcb0ee6a
SN
2612
2613 clk_prepare_lock();
2614
035a61c3
TV
2615 if (clk->core->ops == &clk_nodrv_ops) {
2616 pr_err("%s: unregistered clock: %s\n", __func__,
2617 clk->core->name);
6314b679 2618 return;
fcb0ee6a
SN
2619 }
2620 /*
2621 * Assign empty clock ops for consumers that might still hold
2622 * a reference to this clock.
2623 */
2624 flags = clk_enable_lock();
035a61c3 2625 clk->core->ops = &clk_nodrv_ops;
fcb0ee6a
SN
2626 clk_enable_unlock(flags);
2627
035a61c3
TV
2628 if (!hlist_empty(&clk->core->children)) {
2629 struct clk_core *child;
874f224c 2630 struct hlist_node *t;
fcb0ee6a
SN
2631
2632 /* Reparent all children to the orphan list. */
035a61c3
TV
2633 hlist_for_each_entry_safe(child, t, &clk->core->children,
2634 child_node)
2635 clk_core_set_parent(child, NULL);
fcb0ee6a
SN
2636 }
2637
035a61c3 2638 hlist_del_init(&clk->core->child_node);
fcb0ee6a 2639
035a61c3 2640 if (clk->core->prepare_count)
fcb0ee6a 2641 pr_warn("%s: unregistering prepared clock: %s\n",
035a61c3
TV
2642 __func__, clk->core->name);
2643 kref_put(&clk->core->ref, __clk_release);
6314b679 2644
fcb0ee6a
SN
2645 clk_prepare_unlock();
2646}
1df5c939
MB
2647EXPORT_SYMBOL_GPL(clk_unregister);
2648
46c8773a
SB
2649static void devm_clk_release(struct device *dev, void *res)
2650{
293ba3b4 2651 clk_unregister(*(struct clk **)res);
46c8773a
SB
2652}
2653
2654/**
2655 * devm_clk_register - resource managed clk_register()
2656 * @dev: device that is registering this clock
2657 * @hw: link to hardware-specific clock data
2658 *
2659 * Managed clk_register(). Clocks returned from this function are
2660 * automatically clk_unregister()ed on driver detach. See clk_register() for
2661 * more information.
2662 */
2663struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2664{
2665 struct clk *clk;
293ba3b4 2666 struct clk **clkp;
46c8773a 2667
293ba3b4
SB
2668 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2669 if (!clkp)
46c8773a
SB
2670 return ERR_PTR(-ENOMEM);
2671
293ba3b4
SB
2672 clk = clk_register(dev, hw);
2673 if (!IS_ERR(clk)) {
2674 *clkp = clk;
2675 devres_add(dev, clkp);
46c8773a 2676 } else {
293ba3b4 2677 devres_free(clkp);
46c8773a
SB
2678 }
2679
2680 return clk;
2681}
2682EXPORT_SYMBOL_GPL(devm_clk_register);
2683
2684static int devm_clk_match(struct device *dev, void *res, void *data)
2685{
2686 struct clk *c = res;
2687 if (WARN_ON(!c))
2688 return 0;
2689 return c == data;
2690}
2691
2692/**
2693 * devm_clk_unregister - resource managed clk_unregister()
2694 * @clk: clock to unregister
2695 *
2696 * Deallocate a clock allocated with devm_clk_register(). Normally
2697 * this function will not need to be called and the resource management
2698 * code will ensure that the resource is freed.
2699 */
2700void devm_clk_unregister(struct device *dev, struct clk *clk)
2701{
2702 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2703}
2704EXPORT_SYMBOL_GPL(devm_clk_unregister);
2705
ac2df527
SN
2706/*
2707 * clkdev helpers
2708 */
2709int __clk_get(struct clk *clk)
2710{
035a61c3
TV
2711 struct clk_core *core = !clk ? NULL : clk->core;
2712
2713 if (core) {
2714 if (!try_module_get(core->owner))
00efcb1c 2715 return 0;
ac2df527 2716
035a61c3 2717 kref_get(&core->ref);
00efcb1c 2718 }
ac2df527
SN
2719 return 1;
2720}
2721
2722void __clk_put(struct clk *clk)
2723{
10cdfe54
TV
2724 struct module *owner;
2725
00efcb1c 2726 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
ac2df527
SN
2727 return;
2728
fcb0ee6a 2729 clk_prepare_lock();
1c8e6004 2730
50595f8b 2731 hlist_del(&clk->clks_node);
ec02ace8
TV
2732 if (clk->min_rate > clk->core->req_rate ||
2733 clk->max_rate < clk->core->req_rate)
2734 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2735
1c8e6004
TV
2736 owner = clk->core->owner;
2737 kref_put(&clk->core->ref, __clk_release);
2738
fcb0ee6a
SN
2739 clk_prepare_unlock();
2740
10cdfe54 2741 module_put(owner);
035a61c3 2742
035a61c3 2743 kfree(clk);
ac2df527
SN
2744}
2745
b2476490
MT
2746/*** clk rate change notifiers ***/
2747
2748/**
2749 * clk_notifier_register - add a clk rate change notifier
2750 * @clk: struct clk * to watch
2751 * @nb: struct notifier_block * with callback info
2752 *
2753 * Request notification when clk's rate changes. This uses an SRCU
2754 * notifier because we want it to block and notifier unregistrations are
2755 * uncommon. The callbacks associated with the notifier must not
2756 * re-enter into the clk framework by calling any top-level clk APIs;
2757 * this will cause a nested prepare_lock mutex.
2758 *
5324fda7
SB
2759 * In all notification cases cases (pre, post and abort rate change) the
2760 * original clock rate is passed to the callback via struct
2761 * clk_notifier_data.old_rate and the new frequency is passed via struct
b2476490
MT
2762 * clk_notifier_data.new_rate.
2763 *
b2476490
MT
2764 * clk_notifier_register() must be called from non-atomic context.
2765 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2766 * allocation failure; otherwise, passes along the return value of
2767 * srcu_notifier_chain_register().
2768 */
2769int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2770{
2771 struct clk_notifier *cn;
2772 int ret = -ENOMEM;
2773
2774 if (!clk || !nb)
2775 return -EINVAL;
2776
eab89f69 2777 clk_prepare_lock();
b2476490
MT
2778
2779 /* search the list of notifiers for this clk */
2780 list_for_each_entry(cn, &clk_notifier_list, node)
2781 if (cn->clk == clk)
2782 break;
2783
2784 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2785 if (cn->clk != clk) {
2786 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2787 if (!cn)
2788 goto out;
2789
2790 cn->clk = clk;
2791 srcu_init_notifier_head(&cn->notifier_head);
2792
2793 list_add(&cn->node, &clk_notifier_list);
2794 }
2795
2796 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2797
035a61c3 2798 clk->core->notifier_count++;
b2476490
MT
2799
2800out:
eab89f69 2801 clk_prepare_unlock();
b2476490
MT
2802
2803 return ret;
2804}
2805EXPORT_SYMBOL_GPL(clk_notifier_register);
2806
2807/**
2808 * clk_notifier_unregister - remove a clk rate change notifier
2809 * @clk: struct clk *
2810 * @nb: struct notifier_block * with callback info
2811 *
2812 * Request no further notification for changes to 'clk' and frees memory
2813 * allocated in clk_notifier_register.
2814 *
2815 * Returns -EINVAL if called with null arguments; otherwise, passes
2816 * along the return value of srcu_notifier_chain_unregister().
2817 */
2818int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2819{
2820 struct clk_notifier *cn = NULL;
2821 int ret = -EINVAL;
2822
2823 if (!clk || !nb)
2824 return -EINVAL;
2825
eab89f69 2826 clk_prepare_lock();
b2476490
MT
2827
2828 list_for_each_entry(cn, &clk_notifier_list, node)
2829 if (cn->clk == clk)
2830 break;
2831
2832 if (cn->clk == clk) {
2833 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2834
035a61c3 2835 clk->core->notifier_count--;
b2476490
MT
2836
2837 /* XXX the notifier code should handle this better */
2838 if (!cn->notifier_head.head) {
2839 srcu_cleanup_notifier_head(&cn->notifier_head);
72b5322f 2840 list_del(&cn->node);
b2476490
MT
2841 kfree(cn);
2842 }
2843
2844 } else {
2845 ret = -ENOENT;
2846 }
2847
eab89f69 2848 clk_prepare_unlock();
b2476490
MT
2849
2850 return ret;
2851}
2852EXPORT_SYMBOL_GPL(clk_notifier_unregister);
766e6a4e
GL
2853
2854#ifdef CONFIG_OF
2855/**
2856 * struct of_clk_provider - Clock provider registration structure
2857 * @link: Entry in global list of clock providers
2858 * @node: Pointer to device tree node of clock provider
2859 * @get: Get clock callback. Returns NULL or a struct clk for the
2860 * given clock specifier
2861 * @data: context pointer to be passed into @get callback
2862 */
2863struct of_clk_provider {
2864 struct list_head link;
2865
2866 struct device_node *node;
2867 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2868 void *data;
2869};
2870
f2f6c255
PG
2871static const struct of_device_id __clk_of_table_sentinel
2872 __used __section(__clk_of_table_end);
2873
766e6a4e 2874static LIST_HEAD(of_clk_providers);
d6782c26
SN
2875static DEFINE_MUTEX(of_clk_mutex);
2876
766e6a4e
GL
2877struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2878 void *data)
2879{
2880 return data;
2881}
2882EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2883
494bfec9
SG
2884struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2885{
2886 struct clk_onecell_data *clk_data = data;
2887 unsigned int idx = clkspec->args[0];
2888
2889 if (idx >= clk_data->clk_num) {
2890 pr_err("%s: invalid clock index %d\n", __func__, idx);
2891 return ERR_PTR(-EINVAL);
2892 }
2893
2894 return clk_data->clks[idx];
2895}
2896EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2897
766e6a4e
GL
2898/**
2899 * of_clk_add_provider() - Register a clock provider for a node
2900 * @np: Device node pointer associated with clock provider
2901 * @clk_src_get: callback for decoding clock
2902 * @data: context pointer for @clk_src_get callback.
2903 */
2904int of_clk_add_provider(struct device_node *np,
2905 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2906 void *data),
2907 void *data)
2908{
2909 struct of_clk_provider *cp;
86be408b 2910 int ret;
766e6a4e
GL
2911
2912 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2913 if (!cp)
2914 return -ENOMEM;
2915
2916 cp->node = of_node_get(np);
2917 cp->data = data;
2918 cp->get = clk_src_get;
2919
d6782c26 2920 mutex_lock(&of_clk_mutex);
766e6a4e 2921 list_add(&cp->link, &of_clk_providers);
d6782c26 2922 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
2923 pr_debug("Added clock from %s\n", np->full_name);
2924
86be408b
SN
2925 ret = of_clk_set_defaults(np, true);
2926 if (ret < 0)
2927 of_clk_del_provider(np);
2928
2929 return ret;
766e6a4e
GL
2930}
2931EXPORT_SYMBOL_GPL(of_clk_add_provider);
2932
2933/**
2934 * of_clk_del_provider() - Remove a previously registered clock provider
2935 * @np: Device node pointer associated with clock provider
2936 */
2937void of_clk_del_provider(struct device_node *np)
2938{
2939 struct of_clk_provider *cp;
2940
d6782c26 2941 mutex_lock(&of_clk_mutex);
766e6a4e
GL
2942 list_for_each_entry(cp, &of_clk_providers, link) {
2943 if (cp->node == np) {
2944 list_del(&cp->link);
2945 of_node_put(cp->node);
2946 kfree(cp);
2947 break;
2948 }
2949 }
d6782c26 2950 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
2951}
2952EXPORT_SYMBOL_GPL(of_clk_del_provider);
2953
73e0e496
SB
2954struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2955 const char *dev_id, const char *con_id)
766e6a4e
GL
2956{
2957 struct of_clk_provider *provider;
a34cd466 2958 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
766e6a4e 2959
306c342f
SB
2960 if (!clkspec)
2961 return ERR_PTR(-EINVAL);
2962
766e6a4e 2963 /* Check if we have such a provider in our array */
306c342f 2964 mutex_lock(&of_clk_mutex);
766e6a4e
GL
2965 list_for_each_entry(provider, &of_clk_providers, link) {
2966 if (provider->node == clkspec->np)
2967 clk = provider->get(clkspec, provider->data);
73e0e496
SB
2968 if (!IS_ERR(clk)) {
2969 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2970 con_id);
2971
2972 if (!IS_ERR(clk) && !__clk_get(clk)) {
2973 __clk_free_clk(clk);
2974 clk = ERR_PTR(-ENOENT);
2975 }
2976
766e6a4e 2977 break;
73e0e496 2978 }
766e6a4e 2979 }
306c342f 2980 mutex_unlock(&of_clk_mutex);
d6782c26
SN
2981
2982 return clk;
2983}
2984
306c342f
SB
2985/**
2986 * of_clk_get_from_provider() - Lookup a clock from a clock provider
2987 * @clkspec: pointer to a clock specifier data structure
2988 *
2989 * This function looks up a struct clk from the registered list of clock
2990 * providers, an input is a clock specifier data structure as returned
2991 * from the of_parse_phandle_with_args() function call.
2992 */
d6782c26
SN
2993struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2994{
306c342f 2995 return __of_clk_get_from_provider(clkspec, NULL, __func__);
766e6a4e
GL
2996}
2997
f6102742
MT
2998int of_clk_get_parent_count(struct device_node *np)
2999{
3000 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
3001}
3002EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3003
766e6a4e
GL
3004const char *of_clk_get_parent_name(struct device_node *np, int index)
3005{
3006 struct of_phandle_args clkspec;
7a0fc1a3 3007 struct property *prop;
766e6a4e 3008 const char *clk_name;
7a0fc1a3
BD
3009 const __be32 *vp;
3010 u32 pv;
766e6a4e 3011 int rc;
7a0fc1a3 3012 int count;
766e6a4e
GL
3013
3014 if (index < 0)
3015 return NULL;
3016
3017 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3018 &clkspec);
3019 if (rc)
3020 return NULL;
3021
7a0fc1a3
BD
3022 index = clkspec.args_count ? clkspec.args[0] : 0;
3023 count = 0;
3024
3025 /* if there is an indices property, use it to transfer the index
3026 * specified into an array offset for the clock-output-names property.
3027 */
3028 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3029 if (index == pv) {
3030 index = count;
3031 break;
3032 }
3033 count++;
3034 }
3035
766e6a4e 3036 if (of_property_read_string_index(clkspec.np, "clock-output-names",
7a0fc1a3 3037 index,
766e6a4e
GL
3038 &clk_name) < 0)
3039 clk_name = clkspec.np->name;
3040
3041 of_node_put(clkspec.np);
3042 return clk_name;
3043}
3044EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3045
2e61dfb3
DN
3046/**
3047 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3048 * number of parents
3049 * @np: Device node pointer associated with clock provider
3050 * @parents: pointer to char array that hold the parents' names
3051 * @size: size of the @parents array
3052 *
3053 * Return: number of parents for the clock node.
3054 */
3055int of_clk_parent_fill(struct device_node *np, const char **parents,
3056 unsigned int size)
3057{
3058 unsigned int i = 0;
3059
3060 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3061 i++;
3062
3063 return i;
3064}
3065EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3066
1771b10d
GC
3067struct clock_provider {
3068 of_clk_init_cb_t clk_init_cb;
3069 struct device_node *np;
3070 struct list_head node;
3071};
3072
3073static LIST_HEAD(clk_provider_list);
3074
3075/*
3076 * This function looks for a parent clock. If there is one, then it
3077 * checks that the provider for this parent clock was initialized, in
3078 * this case the parent clock will be ready.
3079 */
3080static int parent_ready(struct device_node *np)
3081{
3082 int i = 0;
3083
3084 while (true) {
3085 struct clk *clk = of_clk_get(np, i);
3086
3087 /* this parent is ready we can check the next one */
3088 if (!IS_ERR(clk)) {
3089 clk_put(clk);
3090 i++;
3091 continue;
3092 }
3093
3094 /* at least one parent is not ready, we exit now */
3095 if (PTR_ERR(clk) == -EPROBE_DEFER)
3096 return 0;
3097
3098 /*
3099 * Here we make assumption that the device tree is
3100 * written correctly. So an error means that there is
3101 * no more parent. As we didn't exit yet, then the
3102 * previous parent are ready. If there is no clock
3103 * parent, no need to wait for them, then we can
3104 * consider their absence as being ready
3105 */
3106 return 1;
3107 }
3108}
3109
766e6a4e
GL
3110/**
3111 * of_clk_init() - Scan and init clock providers from the DT
3112 * @matches: array of compatible values and init functions for providers.
3113 *
1771b10d 3114 * This function scans the device tree for matching clock providers
e5ca8fb4 3115 * and calls their initialization functions. It also does it by trying
1771b10d 3116 * to follow the dependencies.
766e6a4e
GL
3117 */
3118void __init of_clk_init(const struct of_device_id *matches)
3119{
7f7ed584 3120 const struct of_device_id *match;
766e6a4e 3121 struct device_node *np;
1771b10d
GC
3122 struct clock_provider *clk_provider, *next;
3123 bool is_init_done;
3124 bool force = false;
766e6a4e 3125
f2f6c255 3126 if (!matches)
819b4861 3127 matches = &__clk_of_table;
f2f6c255 3128
1771b10d 3129 /* First prepare the list of the clocks providers */
7f7ed584 3130 for_each_matching_node_and_match(np, matches, &match) {
1771b10d
GC
3131 struct clock_provider *parent =
3132 kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
3133
3134 parent->clk_init_cb = match->data;
3135 parent->np = np;
3f6d439f 3136 list_add_tail(&parent->node, &clk_provider_list);
1771b10d
GC
3137 }
3138
3139 while (!list_empty(&clk_provider_list)) {
3140 is_init_done = false;
3141 list_for_each_entry_safe(clk_provider, next,
3142 &clk_provider_list, node) {
3143 if (force || parent_ready(clk_provider->np)) {
86be408b 3144
1771b10d 3145 clk_provider->clk_init_cb(clk_provider->np);
86be408b
SN
3146 of_clk_set_defaults(clk_provider->np, true);
3147
1771b10d
GC
3148 list_del(&clk_provider->node);
3149 kfree(clk_provider);
3150 is_init_done = true;
3151 }
3152 }
3153
3154 /*
e5ca8fb4 3155 * We didn't manage to initialize any of the
1771b10d
GC
3156 * remaining providers during the last loop, so now we
3157 * initialize all the remaining ones unconditionally
3158 * in case the clock parent was not mandatory
3159 */
3160 if (!is_init_done)
3161 force = true;
766e6a4e
GL
3162 }
3163}
3164#endif