]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/clk/clk.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / clk / clk.c
CommitLineData
b2476490
MT
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
3c373117 12#include <linux/clk.h>
b09d6d99 13#include <linux/clk-provider.h>
86be408b 14#include <linux/clk/clk-conf.h>
b2476490
MT
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
766e6a4e 21#include <linux/of.h>
46c8773a 22#include <linux/device.h>
f2f6c255 23#include <linux/init.h>
9a34b453 24#include <linux/pm_runtime.h>
533ddeb1 25#include <linux/sched.h>
562ef0b0 26#include <linux/clkdev.h>
b2476490 27
d6782c26
SN
28#include "clk.h"
29
b2476490
MT
30static DEFINE_SPINLOCK(enable_lock);
31static DEFINE_MUTEX(prepare_lock);
32
533ddeb1
MT
33static struct task_struct *prepare_owner;
34static struct task_struct *enable_owner;
35
36static int prepare_refcnt;
37static int enable_refcnt;
38
b2476490
MT
39static HLIST_HEAD(clk_root_list);
40static HLIST_HEAD(clk_orphan_list);
41static LIST_HEAD(clk_notifier_list);
42
b09d6d99
MT
43/*** private data structures ***/
44
45struct clk_core {
46 const char *name;
47 const struct clk_ops *ops;
48 struct clk_hw *hw;
49 struct module *owner;
9a34b453 50 struct device *dev;
b09d6d99
MT
51 struct clk_core *parent;
52 const char **parent_names;
53 struct clk_core **parents;
54 u8 num_parents;
55 u8 new_parent_index;
56 unsigned long rate;
1c8e6004 57 unsigned long req_rate;
b09d6d99
MT
58 unsigned long new_rate;
59 struct clk_core *new_parent;
60 struct clk_core *new_child;
61 unsigned long flags;
e6500344 62 bool orphan;
b09d6d99
MT
63 unsigned int enable_count;
64 unsigned int prepare_count;
9783c0d9
SB
65 unsigned long min_rate;
66 unsigned long max_rate;
b09d6d99
MT
67 unsigned long accuracy;
68 int phase;
69 struct hlist_head children;
70 struct hlist_node child_node;
1c8e6004 71 struct hlist_head clks;
b09d6d99
MT
72 unsigned int notifier_count;
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *dentry;
8c9a8a8f 75 struct hlist_node debug_node;
b09d6d99
MT
76#endif
77 struct kref ref;
78};
79
dfc202ea
SB
80#define CREATE_TRACE_POINTS
81#include <trace/events/clk.h>
82
b09d6d99
MT
83struct clk {
84 struct clk_core *core;
85 const char *dev_id;
86 const char *con_id;
1c8e6004
TV
87 unsigned long min_rate;
88 unsigned long max_rate;
50595f8b 89 struct hlist_node clks_node;
b09d6d99
MT
90};
91
9a34b453
MS
92/*** runtime pm ***/
93static int clk_pm_runtime_get(struct clk_core *core)
94{
95 int ret = 0;
96
97 if (!core->dev)
98 return 0;
99
100 ret = pm_runtime_get_sync(core->dev);
101 return ret < 0 ? ret : 0;
102}
103
104static void clk_pm_runtime_put(struct clk_core *core)
105{
106 if (!core->dev)
107 return;
108
109 pm_runtime_put_sync(core->dev);
110}
111
eab89f69
MT
112/*** locking ***/
113static void clk_prepare_lock(void)
114{
533ddeb1
MT
115 if (!mutex_trylock(&prepare_lock)) {
116 if (prepare_owner == current) {
117 prepare_refcnt++;
118 return;
119 }
120 mutex_lock(&prepare_lock);
121 }
122 WARN_ON_ONCE(prepare_owner != NULL);
123 WARN_ON_ONCE(prepare_refcnt != 0);
124 prepare_owner = current;
125 prepare_refcnt = 1;
eab89f69
MT
126}
127
128static void clk_prepare_unlock(void)
129{
533ddeb1
MT
130 WARN_ON_ONCE(prepare_owner != current);
131 WARN_ON_ONCE(prepare_refcnt == 0);
132
133 if (--prepare_refcnt)
134 return;
135 prepare_owner = NULL;
eab89f69
MT
136 mutex_unlock(&prepare_lock);
137}
138
139static unsigned long clk_enable_lock(void)
a57aa185 140 __acquires(enable_lock)
eab89f69
MT
141{
142 unsigned long flags;
533ddeb1
MT
143
144 if (!spin_trylock_irqsave(&enable_lock, flags)) {
145 if (enable_owner == current) {
146 enable_refcnt++;
a57aa185 147 __acquire(enable_lock);
533ddeb1
MT
148 return flags;
149 }
150 spin_lock_irqsave(&enable_lock, flags);
151 }
152 WARN_ON_ONCE(enable_owner != NULL);
153 WARN_ON_ONCE(enable_refcnt != 0);
154 enable_owner = current;
155 enable_refcnt = 1;
eab89f69
MT
156 return flags;
157}
158
159static void clk_enable_unlock(unsigned long flags)
a57aa185 160 __releases(enable_lock)
eab89f69 161{
533ddeb1
MT
162 WARN_ON_ONCE(enable_owner != current);
163 WARN_ON_ONCE(enable_refcnt == 0);
164
a57aa185
SB
165 if (--enable_refcnt) {
166 __release(enable_lock);
533ddeb1 167 return;
a57aa185 168 }
533ddeb1 169 enable_owner = NULL;
eab89f69
MT
170 spin_unlock_irqrestore(&enable_lock, flags);
171}
172
4dff95dc
SB
173static bool clk_core_is_prepared(struct clk_core *core)
174{
9a34b453
MS
175 bool ret = false;
176
4dff95dc
SB
177 /*
178 * .is_prepared is optional for clocks that can prepare
179 * fall back to software usage counter if it is missing
180 */
181 if (!core->ops->is_prepared)
182 return core->prepare_count;
b2476490 183
9a34b453
MS
184 if (!clk_pm_runtime_get(core)) {
185 ret = core->ops->is_prepared(core->hw);
186 clk_pm_runtime_put(core);
187 }
188
189 return ret;
4dff95dc 190}
b2476490 191
4dff95dc
SB
192static bool clk_core_is_enabled(struct clk_core *core)
193{
9a34b453
MS
194 bool ret = false;
195
4dff95dc
SB
196 /*
197 * .is_enabled is only mandatory for clocks that gate
198 * fall back to software usage counter if .is_enabled is missing
199 */
200 if (!core->ops->is_enabled)
201 return core->enable_count;
6b44c854 202
9a34b453
MS
203 /*
204 * Check if clock controller's device is runtime active before
205 * calling .is_enabled callback. If not, assume that clock is
206 * disabled, because we might be called from atomic context, from
207 * which pm_runtime_get() is not allowed.
208 * This function is called mainly from clk_disable_unused_subtree,
209 * which ensures proper runtime pm activation of controller before
210 * taking enable spinlock, but the below check is needed if one tries
211 * to call it from other places.
212 */
213 if (core->dev) {
214 pm_runtime_get_noresume(core->dev);
215 if (!pm_runtime_active(core->dev)) {
216 ret = false;
217 goto done;
218 }
219 }
220
221 ret = core->ops->is_enabled(core->hw);
222done:
756efe13
DA
223 if (core->dev)
224 pm_runtime_put(core->dev);
9a34b453
MS
225
226 return ret;
4dff95dc 227}
6b44c854 228
4dff95dc 229/*** helper functions ***/
1af599df 230
b76281cb 231const char *__clk_get_name(const struct clk *clk)
1af599df 232{
4dff95dc 233 return !clk ? NULL : clk->core->name;
1af599df 234}
4dff95dc 235EXPORT_SYMBOL_GPL(__clk_get_name);
1af599df 236
e7df6f6e 237const char *clk_hw_get_name(const struct clk_hw *hw)
1a9c069c
SB
238{
239 return hw->core->name;
240}
241EXPORT_SYMBOL_GPL(clk_hw_get_name);
242
4dff95dc
SB
243struct clk_hw *__clk_get_hw(struct clk *clk)
244{
245 return !clk ? NULL : clk->core->hw;
246}
247EXPORT_SYMBOL_GPL(__clk_get_hw);
1af599df 248
e7df6f6e 249unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
1a9c069c
SB
250{
251 return hw->core->num_parents;
252}
253EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
254
e7df6f6e 255struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
1a9c069c
SB
256{
257 return hw->core->parent ? hw->core->parent->hw : NULL;
258}
259EXPORT_SYMBOL_GPL(clk_hw_get_parent);
260
4dff95dc
SB
261static struct clk_core *__clk_lookup_subtree(const char *name,
262 struct clk_core *core)
bddca894 263{
035a61c3 264 struct clk_core *child;
4dff95dc 265 struct clk_core *ret;
bddca894 266
4dff95dc
SB
267 if (!strcmp(core->name, name))
268 return core;
bddca894 269
4dff95dc
SB
270 hlist_for_each_entry(child, &core->children, child_node) {
271 ret = __clk_lookup_subtree(name, child);
272 if (ret)
273 return ret;
bddca894
PG
274 }
275
4dff95dc 276 return NULL;
bddca894
PG
277}
278
4dff95dc 279static struct clk_core *clk_core_lookup(const char *name)
bddca894 280{
4dff95dc
SB
281 struct clk_core *root_clk;
282 struct clk_core *ret;
bddca894 283
4dff95dc
SB
284 if (!name)
285 return NULL;
bddca894 286
4dff95dc
SB
287 /* search the 'proper' clk tree first */
288 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
289 ret = __clk_lookup_subtree(name, root_clk);
290 if (ret)
291 return ret;
bddca894
PG
292 }
293
4dff95dc
SB
294 /* if not found, then search the orphan tree */
295 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
296 ret = __clk_lookup_subtree(name, root_clk);
297 if (ret)
298 return ret;
299 }
bddca894 300
4dff95dc 301 return NULL;
bddca894
PG
302}
303
4dff95dc
SB
304static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
305 u8 index)
bddca894 306{
4dff95dc
SB
307 if (!core || index >= core->num_parents)
308 return NULL;
88cfbef2
MY
309
310 if (!core->parents[index])
311 core->parents[index] =
312 clk_core_lookup(core->parent_names[index]);
313
314 return core->parents[index];
bddca894
PG
315}
316
e7df6f6e
SB
317struct clk_hw *
318clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
1a9c069c
SB
319{
320 struct clk_core *parent;
321
322 parent = clk_core_get_parent_by_index(hw->core, index);
323
324 return !parent ? NULL : parent->hw;
325}
326EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
327
4dff95dc
SB
328unsigned int __clk_get_enable_count(struct clk *clk)
329{
330 return !clk ? 0 : clk->core->enable_count;
331}
b2476490 332
4dff95dc
SB
333static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
334{
335 unsigned long ret;
b2476490 336
4dff95dc
SB
337 if (!core) {
338 ret = 0;
339 goto out;
340 }
b2476490 341
4dff95dc 342 ret = core->rate;
b2476490 343
47b0eeb3 344 if (!core->num_parents)
4dff95dc 345 goto out;
c646cbf1 346
4dff95dc
SB
347 if (!core->parent)
348 ret = 0;
b2476490 349
b2476490
MT
350out:
351 return ret;
352}
353
e7df6f6e 354unsigned long clk_hw_get_rate(const struct clk_hw *hw)
1a9c069c
SB
355{
356 return clk_core_get_rate_nolock(hw->core);
357}
358EXPORT_SYMBOL_GPL(clk_hw_get_rate);
359
4dff95dc
SB
360static unsigned long __clk_get_accuracy(struct clk_core *core)
361{
362 if (!core)
363 return 0;
b2476490 364
4dff95dc 365 return core->accuracy;
b2476490
MT
366}
367
4dff95dc 368unsigned long __clk_get_flags(struct clk *clk)
fcb0ee6a 369{
4dff95dc 370 return !clk ? 0 : clk->core->flags;
fcb0ee6a 371}
4dff95dc 372EXPORT_SYMBOL_GPL(__clk_get_flags);
fcb0ee6a 373
e7df6f6e 374unsigned long clk_hw_get_flags(const struct clk_hw *hw)
1a9c069c
SB
375{
376 return hw->core->flags;
377}
378EXPORT_SYMBOL_GPL(clk_hw_get_flags);
379
e7df6f6e 380bool clk_hw_is_prepared(const struct clk_hw *hw)
1a9c069c
SB
381{
382 return clk_core_is_prepared(hw->core);
383}
384
be68bf88
JE
385bool clk_hw_is_enabled(const struct clk_hw *hw)
386{
387 return clk_core_is_enabled(hw->core);
388}
389
4dff95dc 390bool __clk_is_enabled(struct clk *clk)
b2476490 391{
4dff95dc
SB
392 if (!clk)
393 return false;
b2476490 394
4dff95dc
SB
395 return clk_core_is_enabled(clk->core);
396}
397EXPORT_SYMBOL_GPL(__clk_is_enabled);
b2476490 398
4dff95dc
SB
399static bool mux_is_better_rate(unsigned long rate, unsigned long now,
400 unsigned long best, unsigned long flags)
401{
402 if (flags & CLK_MUX_ROUND_CLOSEST)
403 return abs(now - rate) < abs(best - rate);
1af599df 404
4dff95dc
SB
405 return now <= rate && now > best;
406}
bddca894 407
7c3c6097
JB
408int clk_mux_determine_rate_flags(struct clk_hw *hw,
409 struct clk_rate_request *req,
410 unsigned long flags)
4dff95dc
SB
411{
412 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
0817b62c
BB
413 int i, num_parents, ret;
414 unsigned long best = 0;
415 struct clk_rate_request parent_req = *req;
b2476490 416
4dff95dc
SB
417 /* if NO_REPARENT flag set, pass through to current parent */
418 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
419 parent = core->parent;
0817b62c
BB
420 if (core->flags & CLK_SET_RATE_PARENT) {
421 ret = __clk_determine_rate(parent ? parent->hw : NULL,
422 &parent_req);
423 if (ret)
424 return ret;
425
426 best = parent_req.rate;
427 } else if (parent) {
4dff95dc 428 best = clk_core_get_rate_nolock(parent);
0817b62c 429 } else {
4dff95dc 430 best = clk_core_get_rate_nolock(core);
0817b62c
BB
431 }
432
4dff95dc
SB
433 goto out;
434 }
b2476490 435
4dff95dc
SB
436 /* find the parent that can provide the fastest rate <= rate */
437 num_parents = core->num_parents;
438 for (i = 0; i < num_parents; i++) {
439 parent = clk_core_get_parent_by_index(core, i);
440 if (!parent)
441 continue;
0817b62c
BB
442
443 if (core->flags & CLK_SET_RATE_PARENT) {
444 parent_req = *req;
445 ret = __clk_determine_rate(parent->hw, &parent_req);
446 if (ret)
447 continue;
448 } else {
449 parent_req.rate = clk_core_get_rate_nolock(parent);
450 }
451
452 if (mux_is_better_rate(req->rate, parent_req.rate,
453 best, flags)) {
4dff95dc 454 best_parent = parent;
0817b62c 455 best = parent_req.rate;
4dff95dc
SB
456 }
457 }
b2476490 458
57d866e6
BB
459 if (!best_parent)
460 return -EINVAL;
461
4dff95dc
SB
462out:
463 if (best_parent)
0817b62c
BB
464 req->best_parent_hw = best_parent->hw;
465 req->best_parent_rate = best;
466 req->rate = best;
b2476490 467
0817b62c 468 return 0;
b33d212f 469}
7c3c6097 470EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
4dff95dc
SB
471
472struct clk *__clk_lookup(const char *name)
fcb0ee6a 473{
4dff95dc
SB
474 struct clk_core *core = clk_core_lookup(name);
475
476 return !core ? NULL : core->hw->clk;
fcb0ee6a 477}
b2476490 478
4dff95dc
SB
479static void clk_core_get_boundaries(struct clk_core *core,
480 unsigned long *min_rate,
481 unsigned long *max_rate)
1c155b3d 482{
4dff95dc 483 struct clk *clk_user;
1c155b3d 484
9783c0d9
SB
485 *min_rate = core->min_rate;
486 *max_rate = core->max_rate;
496eadf8 487
4dff95dc
SB
488 hlist_for_each_entry(clk_user, &core->clks, clks_node)
489 *min_rate = max(*min_rate, clk_user->min_rate);
1c155b3d 490
4dff95dc
SB
491 hlist_for_each_entry(clk_user, &core->clks, clks_node)
492 *max_rate = min(*max_rate, clk_user->max_rate);
493}
1c155b3d 494
9783c0d9
SB
495void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
496 unsigned long max_rate)
497{
498 hw->core->min_rate = min_rate;
499 hw->core->max_rate = max_rate;
500}
501EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
502
4dff95dc
SB
503/*
504 * Helper for finding best parent to provide a given frequency. This can be used
505 * directly as a determine_rate callback (e.g. for a mux), or from a more
506 * complex clock that may combine a mux with other operations.
507 */
0817b62c
BB
508int __clk_mux_determine_rate(struct clk_hw *hw,
509 struct clk_rate_request *req)
4dff95dc 510{
0817b62c 511 return clk_mux_determine_rate_flags(hw, req, 0);
1c155b3d 512}
4dff95dc 513EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
1c155b3d 514
0817b62c
BB
515int __clk_mux_determine_rate_closest(struct clk_hw *hw,
516 struct clk_rate_request *req)
b2476490 517{
0817b62c 518 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
4dff95dc
SB
519}
520EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
b2476490 521
4dff95dc 522/*** clk api ***/
496eadf8 523
4dff95dc
SB
524static void clk_core_unprepare(struct clk_core *core)
525{
a6334725
SB
526 lockdep_assert_held(&prepare_lock);
527
4dff95dc
SB
528 if (!core)
529 return;
b2476490 530
4dff95dc
SB
531 if (WARN_ON(core->prepare_count == 0))
532 return;
b2476490 533
2e20fbf5
LJ
534 if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
535 return;
536
4dff95dc
SB
537 if (--core->prepare_count > 0)
538 return;
b2476490 539
4dff95dc 540 WARN_ON(core->enable_count > 0);
b2476490 541
4dff95dc 542 trace_clk_unprepare(core);
b2476490 543
4dff95dc
SB
544 if (core->ops->unprepare)
545 core->ops->unprepare(core->hw);
546
9a34b453
MS
547 clk_pm_runtime_put(core);
548
4dff95dc
SB
549 trace_clk_unprepare_complete(core);
550 clk_core_unprepare(core->parent);
b2476490
MT
551}
552
a6adc30b
DA
553static void clk_core_unprepare_lock(struct clk_core *core)
554{
555 clk_prepare_lock();
556 clk_core_unprepare(core);
557 clk_prepare_unlock();
558}
559
4dff95dc
SB
560/**
561 * clk_unprepare - undo preparation of a clock source
562 * @clk: the clk being unprepared
563 *
564 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
565 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
566 * if the operation may sleep. One example is a clk which is accessed over
567 * I2c. In the complex case a clk gate operation may require a fast and a slow
568 * part. It is this reason that clk_unprepare and clk_disable are not mutually
569 * exclusive. In fact clk_disable must be called before clk_unprepare.
570 */
571void clk_unprepare(struct clk *clk)
1e435256 572{
4dff95dc
SB
573 if (IS_ERR_OR_NULL(clk))
574 return;
575
a6adc30b 576 clk_core_unprepare_lock(clk->core);
1e435256 577}
4dff95dc 578EXPORT_SYMBOL_GPL(clk_unprepare);
1e435256 579
4dff95dc 580static int clk_core_prepare(struct clk_core *core)
b2476490 581{
4dff95dc 582 int ret = 0;
b2476490 583
a6334725
SB
584 lockdep_assert_held(&prepare_lock);
585
4dff95dc 586 if (!core)
1e435256 587 return 0;
1e435256 588
4dff95dc 589 if (core->prepare_count == 0) {
9a34b453 590 ret = clk_pm_runtime_get(core);
4dff95dc
SB
591 if (ret)
592 return ret;
b2476490 593
9a34b453
MS
594 ret = clk_core_prepare(core->parent);
595 if (ret)
596 goto runtime_put;
597
4dff95dc 598 trace_clk_prepare(core);
b2476490 599
4dff95dc
SB
600 if (core->ops->prepare)
601 ret = core->ops->prepare(core->hw);
b2476490 602
4dff95dc 603 trace_clk_prepare_complete(core);
1c155b3d 604
9a34b453
MS
605 if (ret)
606 goto unprepare;
4dff95dc 607 }
1c155b3d 608
4dff95dc 609 core->prepare_count++;
b2476490
MT
610
611 return 0;
9a34b453
MS
612unprepare:
613 clk_core_unprepare(core->parent);
614runtime_put:
615 clk_pm_runtime_put(core);
616 return ret;
b2476490 617}
b2476490 618
a6adc30b
DA
619static int clk_core_prepare_lock(struct clk_core *core)
620{
621 int ret;
622
623 clk_prepare_lock();
624 ret = clk_core_prepare(core);
625 clk_prepare_unlock();
626
627 return ret;
628}
629
4dff95dc
SB
630/**
631 * clk_prepare - prepare a clock source
632 * @clk: the clk being prepared
633 *
634 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
635 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
636 * operation may sleep. One example is a clk which is accessed over I2c. In
637 * the complex case a clk ungate operation may require a fast and a slow part.
638 * It is this reason that clk_prepare and clk_enable are not mutually
639 * exclusive. In fact clk_prepare must be called before clk_enable.
640 * Returns 0 on success, -EERROR otherwise.
641 */
642int clk_prepare(struct clk *clk)
b2476490 643{
4dff95dc
SB
644 if (!clk)
645 return 0;
b2476490 646
a6adc30b 647 return clk_core_prepare_lock(clk->core);
b2476490 648}
4dff95dc 649EXPORT_SYMBOL_GPL(clk_prepare);
b2476490 650
4dff95dc 651static void clk_core_disable(struct clk_core *core)
b2476490 652{
a6334725
SB
653 lockdep_assert_held(&enable_lock);
654
4dff95dc
SB
655 if (!core)
656 return;
035a61c3 657
4dff95dc
SB
658 if (WARN_ON(core->enable_count == 0))
659 return;
b2476490 660
2e20fbf5
LJ
661 if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
662 return;
663
4dff95dc
SB
664 if (--core->enable_count > 0)
665 return;
035a61c3 666
2f87a6ea 667 trace_clk_disable_rcuidle(core);
035a61c3 668
4dff95dc
SB
669 if (core->ops->disable)
670 core->ops->disable(core->hw);
035a61c3 671
2f87a6ea 672 trace_clk_disable_complete_rcuidle(core);
035a61c3 673
4dff95dc 674 clk_core_disable(core->parent);
035a61c3 675}
7ef3dcc8 676
a6adc30b
DA
677static void clk_core_disable_lock(struct clk_core *core)
678{
679 unsigned long flags;
680
681 flags = clk_enable_lock();
682 clk_core_disable(core);
683 clk_enable_unlock(flags);
684}
685
4dff95dc
SB
686/**
687 * clk_disable - gate a clock
688 * @clk: the clk being gated
689 *
690 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
691 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
692 * clk if the operation is fast and will never sleep. One example is a
693 * SoC-internal clk which is controlled via simple register writes. In the
694 * complex case a clk gate operation may require a fast and a slow part. It is
695 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
696 * In fact clk_disable must be called before clk_unprepare.
697 */
698void clk_disable(struct clk *clk)
b2476490 699{
4dff95dc
SB
700 if (IS_ERR_OR_NULL(clk))
701 return;
702
a6adc30b 703 clk_core_disable_lock(clk->core);
b2476490 704}
4dff95dc 705EXPORT_SYMBOL_GPL(clk_disable);
b2476490 706
4dff95dc 707static int clk_core_enable(struct clk_core *core)
b2476490 708{
4dff95dc 709 int ret = 0;
b2476490 710
a6334725
SB
711 lockdep_assert_held(&enable_lock);
712
4dff95dc
SB
713 if (!core)
714 return 0;
b2476490 715
4dff95dc
SB
716 if (WARN_ON(core->prepare_count == 0))
717 return -ESHUTDOWN;
b2476490 718
4dff95dc
SB
719 if (core->enable_count == 0) {
720 ret = clk_core_enable(core->parent);
b2476490 721
4dff95dc
SB
722 if (ret)
723 return ret;
b2476490 724
f17a0dd1 725 trace_clk_enable_rcuidle(core);
035a61c3 726
4dff95dc
SB
727 if (core->ops->enable)
728 ret = core->ops->enable(core->hw);
035a61c3 729
f17a0dd1 730 trace_clk_enable_complete_rcuidle(core);
4dff95dc
SB
731
732 if (ret) {
733 clk_core_disable(core->parent);
734 return ret;
735 }
736 }
737
738 core->enable_count++;
739 return 0;
035a61c3 740}
b2476490 741
a6adc30b
DA
742static int clk_core_enable_lock(struct clk_core *core)
743{
744 unsigned long flags;
745 int ret;
746
747 flags = clk_enable_lock();
748 ret = clk_core_enable(core);
749 clk_enable_unlock(flags);
750
751 return ret;
752}
753
4dff95dc
SB
754/**
755 * clk_enable - ungate a clock
756 * @clk: the clk being ungated
757 *
758 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
759 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
760 * if the operation will never sleep. One example is a SoC-internal clk which
761 * is controlled via simple register writes. In the complex case a clk ungate
762 * operation may require a fast and a slow part. It is this reason that
763 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
764 * must be called before clk_enable. Returns 0 on success, -EERROR
765 * otherwise.
766 */
767int clk_enable(struct clk *clk)
5279fc40 768{
4dff95dc 769 if (!clk)
5279fc40
BB
770 return 0;
771
a6adc30b
DA
772 return clk_core_enable_lock(clk->core);
773}
774EXPORT_SYMBOL_GPL(clk_enable);
775
776static int clk_core_prepare_enable(struct clk_core *core)
777{
778 int ret;
779
780 ret = clk_core_prepare_lock(core);
781 if (ret)
782 return ret;
783
784 ret = clk_core_enable_lock(core);
785 if (ret)
786 clk_core_unprepare_lock(core);
5279fc40 787
4dff95dc 788 return ret;
b2476490 789}
a6adc30b
DA
790
791static void clk_core_disable_unprepare(struct clk_core *core)
792{
793 clk_core_disable_lock(core);
794 clk_core_unprepare_lock(core);
795}
b2476490 796
7ec986ef
DA
797static void clk_unprepare_unused_subtree(struct clk_core *core)
798{
799 struct clk_core *child;
800
801 lockdep_assert_held(&prepare_lock);
802
803 hlist_for_each_entry(child, &core->children, child_node)
804 clk_unprepare_unused_subtree(child);
805
806 if (core->prepare_count)
807 return;
808
809 if (core->flags & CLK_IGNORE_UNUSED)
810 return;
811
9a34b453
MS
812 if (clk_pm_runtime_get(core))
813 return;
814
7ec986ef
DA
815 if (clk_core_is_prepared(core)) {
816 trace_clk_unprepare(core);
817 if (core->ops->unprepare_unused)
818 core->ops->unprepare_unused(core->hw);
819 else if (core->ops->unprepare)
820 core->ops->unprepare(core->hw);
821 trace_clk_unprepare_complete(core);
822 }
9a34b453
MS
823
824 clk_pm_runtime_put(core);
7ec986ef
DA
825}
826
827static void clk_disable_unused_subtree(struct clk_core *core)
828{
829 struct clk_core *child;
830 unsigned long flags;
831
832 lockdep_assert_held(&prepare_lock);
833
834 hlist_for_each_entry(child, &core->children, child_node)
835 clk_disable_unused_subtree(child);
836
a4b3518d
DA
837 if (core->flags & CLK_OPS_PARENT_ENABLE)
838 clk_core_prepare_enable(core->parent);
839
9a34b453
MS
840 if (clk_pm_runtime_get(core))
841 goto unprepare_out;
842
7ec986ef
DA
843 flags = clk_enable_lock();
844
845 if (core->enable_count)
846 goto unlock_out;
847
848 if (core->flags & CLK_IGNORE_UNUSED)
849 goto unlock_out;
850
851 /*
852 * some gate clocks have special needs during the disable-unused
853 * sequence. call .disable_unused if available, otherwise fall
854 * back to .disable
855 */
856 if (clk_core_is_enabled(core)) {
857 trace_clk_disable(core);
858 if (core->ops->disable_unused)
859 core->ops->disable_unused(core->hw);
860 else if (core->ops->disable)
861 core->ops->disable(core->hw);
862 trace_clk_disable_complete(core);
863 }
864
865unlock_out:
866 clk_enable_unlock(flags);
9a34b453
MS
867 clk_pm_runtime_put(core);
868unprepare_out:
a4b3518d
DA
869 if (core->flags & CLK_OPS_PARENT_ENABLE)
870 clk_core_disable_unprepare(core->parent);
7ec986ef
DA
871}
872
873static bool clk_ignore_unused;
874static int __init clk_ignore_unused_setup(char *__unused)
875{
876 clk_ignore_unused = true;
877 return 1;
878}
879__setup("clk_ignore_unused", clk_ignore_unused_setup);
880
881static int clk_disable_unused(void)
882{
883 struct clk_core *core;
884
885 if (clk_ignore_unused) {
886 pr_warn("clk: Not disabling unused clocks\n");
887 return 0;
888 }
889
890 clk_prepare_lock();
891
892 hlist_for_each_entry(core, &clk_root_list, child_node)
893 clk_disable_unused_subtree(core);
894
895 hlist_for_each_entry(core, &clk_orphan_list, child_node)
896 clk_disable_unused_subtree(core);
897
898 hlist_for_each_entry(core, &clk_root_list, child_node)
899 clk_unprepare_unused_subtree(core);
900
901 hlist_for_each_entry(core, &clk_orphan_list, child_node)
902 clk_unprepare_unused_subtree(core);
903
904 clk_prepare_unlock();
905
906 return 0;
907}
908late_initcall_sync(clk_disable_unused);
909
0817b62c
BB
910static int clk_core_round_rate_nolock(struct clk_core *core,
911 struct clk_rate_request *req)
3d6ee287 912{
4dff95dc 913 struct clk_core *parent;
0817b62c 914 long rate;
4dff95dc
SB
915
916 lockdep_assert_held(&prepare_lock);
3d6ee287 917
d6968fca 918 if (!core)
4dff95dc 919 return 0;
3d6ee287 920
4dff95dc 921 parent = core->parent;
0817b62c
BB
922 if (parent) {
923 req->best_parent_hw = parent->hw;
924 req->best_parent_rate = parent->rate;
925 } else {
926 req->best_parent_hw = NULL;
927 req->best_parent_rate = 0;
928 }
3d6ee287 929
4dff95dc 930 if (core->ops->determine_rate) {
0817b62c
BB
931 return core->ops->determine_rate(core->hw, req);
932 } else if (core->ops->round_rate) {
933 rate = core->ops->round_rate(core->hw, req->rate,
934 &req->best_parent_rate);
935 if (rate < 0)
936 return rate;
937
938 req->rate = rate;
939 } else if (core->flags & CLK_SET_RATE_PARENT) {
940 return clk_core_round_rate_nolock(parent, req);
941 } else {
942 req->rate = core->rate;
943 }
944
945 return 0;
3d6ee287
UH
946}
947
4dff95dc
SB
948/**
949 * __clk_determine_rate - get the closest rate actually supported by a clock
950 * @hw: determine the rate of this clock
2d5b520c 951 * @req: target rate request
4dff95dc 952 *
6e5ab41b 953 * Useful for clk_ops such as .set_rate and .determine_rate.
4dff95dc 954 */
0817b62c 955int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
035a61c3 956{
0817b62c
BB
957 if (!hw) {
958 req->rate = 0;
4dff95dc 959 return 0;
0817b62c 960 }
035a61c3 961
0817b62c 962 return clk_core_round_rate_nolock(hw->core, req);
035a61c3 963}
4dff95dc 964EXPORT_SYMBOL_GPL(__clk_determine_rate);
035a61c3 965
1a9c069c
SB
966unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
967{
968 int ret;
969 struct clk_rate_request req;
970
971 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
972 req.rate = rate;
973
974 ret = clk_core_round_rate_nolock(hw->core, &req);
975 if (ret)
976 return 0;
977
978 return req.rate;
979}
980EXPORT_SYMBOL_GPL(clk_hw_round_rate);
981
4dff95dc
SB
982/**
983 * clk_round_rate - round the given rate for a clk
984 * @clk: the clk for which we are rounding a rate
985 * @rate: the rate which is to be rounded
986 *
987 * Takes in a rate as input and rounds it to a rate that the clk can actually
988 * use which is then returned. If clk doesn't support round_rate operation
989 * then the parent rate is returned.
990 */
991long clk_round_rate(struct clk *clk, unsigned long rate)
035a61c3 992{
fc4a05d4
SB
993 struct clk_rate_request req;
994 int ret;
4dff95dc 995
035a61c3 996 if (!clk)
4dff95dc 997 return 0;
035a61c3 998
4dff95dc 999 clk_prepare_lock();
fc4a05d4
SB
1000
1001 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1002 req.rate = rate;
1003
1004 ret = clk_core_round_rate_nolock(clk->core, &req);
4dff95dc
SB
1005 clk_prepare_unlock();
1006
fc4a05d4
SB
1007 if (ret)
1008 return ret;
1009
1010 return req.rate;
035a61c3 1011}
4dff95dc 1012EXPORT_SYMBOL_GPL(clk_round_rate);
b2476490 1013
4dff95dc
SB
1014/**
1015 * __clk_notify - call clk notifier chain
1016 * @core: clk that is changing rate
1017 * @msg: clk notifier type (see include/linux/clk.h)
1018 * @old_rate: old clk rate
1019 * @new_rate: new clk rate
1020 *
1021 * Triggers a notifier call chain on the clk rate-change notification
1022 * for 'clk'. Passes a pointer to the struct clk and the previous
1023 * and current rates to the notifier callback. Intended to be called by
1024 * internal clock code only. Returns NOTIFY_DONE from the last driver
1025 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1026 * a driver returns that.
1027 */
1028static int __clk_notify(struct clk_core *core, unsigned long msg,
1029 unsigned long old_rate, unsigned long new_rate)
b2476490 1030{
4dff95dc
SB
1031 struct clk_notifier *cn;
1032 struct clk_notifier_data cnd;
1033 int ret = NOTIFY_DONE;
b2476490 1034
4dff95dc
SB
1035 cnd.old_rate = old_rate;
1036 cnd.new_rate = new_rate;
b2476490 1037
4dff95dc
SB
1038 list_for_each_entry(cn, &clk_notifier_list, node) {
1039 if (cn->clk->core == core) {
1040 cnd.clk = cn->clk;
1041 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1042 &cnd);
17c34c56
PDS
1043 if (ret & NOTIFY_STOP_MASK)
1044 return ret;
4dff95dc 1045 }
b2476490
MT
1046 }
1047
4dff95dc 1048 return ret;
b2476490
MT
1049}
1050
4dff95dc
SB
1051/**
1052 * __clk_recalc_accuracies
1053 * @core: first clk in the subtree
1054 *
1055 * Walks the subtree of clks starting with clk and recalculates accuracies as
1056 * it goes. Note that if a clk does not implement the .recalc_accuracy
6e5ab41b 1057 * callback then it is assumed that the clock will take on the accuracy of its
4dff95dc 1058 * parent.
4dff95dc
SB
1059 */
1060static void __clk_recalc_accuracies(struct clk_core *core)
b2476490 1061{
4dff95dc
SB
1062 unsigned long parent_accuracy = 0;
1063 struct clk_core *child;
b2476490 1064
4dff95dc 1065 lockdep_assert_held(&prepare_lock);
b2476490 1066
4dff95dc
SB
1067 if (core->parent)
1068 parent_accuracy = core->parent->accuracy;
b2476490 1069
4dff95dc
SB
1070 if (core->ops->recalc_accuracy)
1071 core->accuracy = core->ops->recalc_accuracy(core->hw,
1072 parent_accuracy);
1073 else
1074 core->accuracy = parent_accuracy;
b2476490 1075
4dff95dc
SB
1076 hlist_for_each_entry(child, &core->children, child_node)
1077 __clk_recalc_accuracies(child);
b2476490
MT
1078}
1079
4dff95dc 1080static long clk_core_get_accuracy(struct clk_core *core)
e366fdd7 1081{
4dff95dc 1082 unsigned long accuracy;
15a02c1f 1083
4dff95dc
SB
1084 clk_prepare_lock();
1085 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1086 __clk_recalc_accuracies(core);
15a02c1f 1087
4dff95dc
SB
1088 accuracy = __clk_get_accuracy(core);
1089 clk_prepare_unlock();
e366fdd7 1090
4dff95dc 1091 return accuracy;
e366fdd7 1092}
15a02c1f 1093
4dff95dc
SB
1094/**
1095 * clk_get_accuracy - return the accuracy of clk
1096 * @clk: the clk whose accuracy is being returned
1097 *
1098 * Simply returns the cached accuracy of the clk, unless
1099 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1100 * issued.
1101 * If clk is NULL then returns 0.
1102 */
1103long clk_get_accuracy(struct clk *clk)
035a61c3 1104{
4dff95dc
SB
1105 if (!clk)
1106 return 0;
035a61c3 1107
4dff95dc 1108 return clk_core_get_accuracy(clk->core);
035a61c3 1109}
4dff95dc 1110EXPORT_SYMBOL_GPL(clk_get_accuracy);
035a61c3 1111
4dff95dc
SB
1112static unsigned long clk_recalc(struct clk_core *core,
1113 unsigned long parent_rate)
1c8e6004 1114{
9a34b453
MS
1115 unsigned long rate = parent_rate;
1116
1117 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1118 rate = core->ops->recalc_rate(core->hw, parent_rate);
1119 clk_pm_runtime_put(core);
1120 }
1121 return rate;
1c8e6004
TV
1122}
1123
4dff95dc
SB
1124/**
1125 * __clk_recalc_rates
1126 * @core: first clk in the subtree
1127 * @msg: notification type (see include/linux/clk.h)
1128 *
1129 * Walks the subtree of clks starting with clk and recalculates rates as it
1130 * goes. Note that if a clk does not implement the .recalc_rate callback then
1131 * it is assumed that the clock will take on the rate of its parent.
1132 *
1133 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1134 * if necessary.
15a02c1f 1135 */
4dff95dc 1136static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
15a02c1f 1137{
4dff95dc
SB
1138 unsigned long old_rate;
1139 unsigned long parent_rate = 0;
1140 struct clk_core *child;
e366fdd7 1141
4dff95dc 1142 lockdep_assert_held(&prepare_lock);
15a02c1f 1143
4dff95dc 1144 old_rate = core->rate;
b2476490 1145
4dff95dc
SB
1146 if (core->parent)
1147 parent_rate = core->parent->rate;
b2476490 1148
4dff95dc 1149 core->rate = clk_recalc(core, parent_rate);
b2476490 1150
4dff95dc
SB
1151 /*
1152 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1153 * & ABORT_RATE_CHANGE notifiers
1154 */
1155 if (core->notifier_count && msg)
1156 __clk_notify(core, msg, old_rate, core->rate);
b2476490 1157
4dff95dc
SB
1158 hlist_for_each_entry(child, &core->children, child_node)
1159 __clk_recalc_rates(child, msg);
1160}
b2476490 1161
4dff95dc
SB
1162static unsigned long clk_core_get_rate(struct clk_core *core)
1163{
1164 unsigned long rate;
dfc202ea 1165
4dff95dc 1166 clk_prepare_lock();
b2476490 1167
4dff95dc
SB
1168 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1169 __clk_recalc_rates(core, 0);
1170
1171 rate = clk_core_get_rate_nolock(core);
1172 clk_prepare_unlock();
1173
1174 return rate;
b2476490
MT
1175}
1176
1177/**
4dff95dc
SB
1178 * clk_get_rate - return the rate of clk
1179 * @clk: the clk whose rate is being returned
b2476490 1180 *
4dff95dc
SB
1181 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1182 * is set, which means a recalc_rate will be issued.
1183 * If clk is NULL then returns 0.
b2476490 1184 */
4dff95dc 1185unsigned long clk_get_rate(struct clk *clk)
b2476490 1186{
4dff95dc
SB
1187 if (!clk)
1188 return 0;
63589e92 1189
4dff95dc 1190 return clk_core_get_rate(clk->core);
b2476490 1191}
4dff95dc 1192EXPORT_SYMBOL_GPL(clk_get_rate);
b2476490 1193
4dff95dc
SB
1194static int clk_fetch_parent_index(struct clk_core *core,
1195 struct clk_core *parent)
b2476490 1196{
4dff95dc 1197 int i;
b2476490 1198
508f884a
MY
1199 if (!parent)
1200 return -EINVAL;
1201
470b5e2f
MY
1202 for (i = 0; i < core->num_parents; i++)
1203 if (clk_core_get_parent_by_index(core, i) == parent)
4dff95dc 1204 return i;
b2476490 1205
4dff95dc 1206 return -EINVAL;
b2476490
MT
1207}
1208
e6500344
HS
1209/*
1210 * Update the orphan status of @core and all its children.
1211 */
1212static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1213{
1214 struct clk_core *child;
1215
1216 core->orphan = is_orphan;
1217
1218 hlist_for_each_entry(child, &core->children, child_node)
1219 clk_core_update_orphan_status(child, is_orphan);
1220}
1221
4dff95dc 1222static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
b2476490 1223{
e6500344
HS
1224 bool was_orphan = core->orphan;
1225
4dff95dc 1226 hlist_del(&core->child_node);
035a61c3 1227
4dff95dc 1228 if (new_parent) {
e6500344
HS
1229 bool becomes_orphan = new_parent->orphan;
1230
4dff95dc
SB
1231 /* avoid duplicate POST_RATE_CHANGE notifications */
1232 if (new_parent->new_child == core)
1233 new_parent->new_child = NULL;
b2476490 1234
4dff95dc 1235 hlist_add_head(&core->child_node, &new_parent->children);
e6500344
HS
1236
1237 if (was_orphan != becomes_orphan)
1238 clk_core_update_orphan_status(core, becomes_orphan);
4dff95dc
SB
1239 } else {
1240 hlist_add_head(&core->child_node, &clk_orphan_list);
e6500344
HS
1241 if (!was_orphan)
1242 clk_core_update_orphan_status(core, true);
4dff95dc 1243 }
dfc202ea 1244
4dff95dc 1245 core->parent = new_parent;
035a61c3
TV
1246}
1247
4dff95dc
SB
1248static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1249 struct clk_core *parent)
b2476490
MT
1250{
1251 unsigned long flags;
4dff95dc 1252 struct clk_core *old_parent = core->parent;
b2476490 1253
4dff95dc 1254 /*
fc8726a2
DA
1255 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1256 *
1257 * 2. Migrate prepare state between parents and prevent race with
4dff95dc
SB
1258 * clk_enable().
1259 *
1260 * If the clock is not prepared, then a race with
1261 * clk_enable/disable() is impossible since we already have the
1262 * prepare lock (future calls to clk_enable() need to be preceded by
1263 * a clk_prepare()).
1264 *
1265 * If the clock is prepared, migrate the prepared state to the new
1266 * parent and also protect against a race with clk_enable() by
1267 * forcing the clock and the new parent on. This ensures that all
1268 * future calls to clk_enable() are practically NOPs with respect to
1269 * hardware and software states.
1270 *
1271 * See also: Comment for clk_set_parent() below.
1272 */
fc8726a2
DA
1273
1274 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1275 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1276 clk_core_prepare_enable(old_parent);
1277 clk_core_prepare_enable(parent);
1278 }
1279
1280 /* migrate prepare count if > 0 */
4dff95dc 1281 if (core->prepare_count) {
fc8726a2
DA
1282 clk_core_prepare_enable(parent);
1283 clk_core_enable_lock(core);
4dff95dc 1284 }
63589e92 1285
4dff95dc 1286 /* update the clk tree topology */
eab89f69 1287 flags = clk_enable_lock();
4dff95dc 1288 clk_reparent(core, parent);
eab89f69 1289 clk_enable_unlock(flags);
4dff95dc
SB
1290
1291 return old_parent;
b2476490 1292}
b2476490 1293
4dff95dc
SB
1294static void __clk_set_parent_after(struct clk_core *core,
1295 struct clk_core *parent,
1296 struct clk_core *old_parent)
b2476490 1297{
4dff95dc
SB
1298 /*
1299 * Finish the migration of prepare state and undo the changes done
1300 * for preventing a race with clk_enable().
1301 */
1302 if (core->prepare_count) {
fc8726a2
DA
1303 clk_core_disable_lock(core);
1304 clk_core_disable_unprepare(old_parent);
1305 }
1306
1307 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1308 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1309 clk_core_disable_unprepare(parent);
1310 clk_core_disable_unprepare(old_parent);
4dff95dc
SB
1311 }
1312}
b2476490 1313
4dff95dc
SB
1314static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1315 u8 p_index)
1316{
1317 unsigned long flags;
1318 int ret = 0;
1319 struct clk_core *old_parent;
b2476490 1320
4dff95dc 1321 old_parent = __clk_set_parent_before(core, parent);
b2476490 1322
4dff95dc 1323 trace_clk_set_parent(core, parent);
b2476490 1324
4dff95dc
SB
1325 /* change clock input source */
1326 if (parent && core->ops->set_parent)
1327 ret = core->ops->set_parent(core->hw, p_index);
dfc202ea 1328
4dff95dc 1329 trace_clk_set_parent_complete(core, parent);
dfc202ea 1330
4dff95dc
SB
1331 if (ret) {
1332 flags = clk_enable_lock();
1333 clk_reparent(core, old_parent);
1334 clk_enable_unlock(flags);
c660b2eb 1335 __clk_set_parent_after(core, old_parent, parent);
dfc202ea 1336
4dff95dc 1337 return ret;
b2476490
MT
1338 }
1339
4dff95dc
SB
1340 __clk_set_parent_after(core, parent, old_parent);
1341
b2476490
MT
1342 return 0;
1343}
1344
1345/**
4dff95dc
SB
1346 * __clk_speculate_rates
1347 * @core: first clk in the subtree
1348 * @parent_rate: the "future" rate of clk's parent
b2476490 1349 *
4dff95dc
SB
1350 * Walks the subtree of clks starting with clk, speculating rates as it
1351 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1352 *
1353 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1354 * pre-rate change notifications and returns early if no clks in the
1355 * subtree have subscribed to the notifications. Note that if a clk does not
1356 * implement the .recalc_rate callback then it is assumed that the clock will
1357 * take on the rate of its parent.
b2476490 1358 */
4dff95dc
SB
1359static int __clk_speculate_rates(struct clk_core *core,
1360 unsigned long parent_rate)
b2476490 1361{
4dff95dc
SB
1362 struct clk_core *child;
1363 unsigned long new_rate;
1364 int ret = NOTIFY_DONE;
b2476490 1365
4dff95dc 1366 lockdep_assert_held(&prepare_lock);
864e160a 1367
4dff95dc
SB
1368 new_rate = clk_recalc(core, parent_rate);
1369
1370 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1371 if (core->notifier_count)
1372 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1373
1374 if (ret & NOTIFY_STOP_MASK) {
1375 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1376 __func__, core->name, ret);
1377 goto out;
1378 }
1379
1380 hlist_for_each_entry(child, &core->children, child_node) {
1381 ret = __clk_speculate_rates(child, new_rate);
1382 if (ret & NOTIFY_STOP_MASK)
1383 break;
1384 }
b2476490 1385
4dff95dc 1386out:
b2476490
MT
1387 return ret;
1388}
b2476490 1389
4dff95dc
SB
1390static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1391 struct clk_core *new_parent, u8 p_index)
b2476490 1392{
4dff95dc 1393 struct clk_core *child;
b2476490 1394
4dff95dc
SB
1395 core->new_rate = new_rate;
1396 core->new_parent = new_parent;
1397 core->new_parent_index = p_index;
1398 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1399 core->new_child = NULL;
1400 if (new_parent && new_parent != core->parent)
1401 new_parent->new_child = core;
496eadf8 1402
4dff95dc
SB
1403 hlist_for_each_entry(child, &core->children, child_node) {
1404 child->new_rate = clk_recalc(child, new_rate);
1405 clk_calc_subtree(child, child->new_rate, NULL, 0);
1406 }
1407}
b2476490 1408
4dff95dc
SB
1409/*
1410 * calculate the new rates returning the topmost clock that has to be
1411 * changed.
1412 */
1413static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1414 unsigned long rate)
1415{
1416 struct clk_core *top = core;
1417 struct clk_core *old_parent, *parent;
4dff95dc
SB
1418 unsigned long best_parent_rate = 0;
1419 unsigned long new_rate;
1420 unsigned long min_rate;
1421 unsigned long max_rate;
1422 int p_index = 0;
1423 long ret;
1424
1425 /* sanity */
1426 if (IS_ERR_OR_NULL(core))
1427 return NULL;
1428
1429 /* save parent rate, if it exists */
1430 parent = old_parent = core->parent;
71472c0c 1431 if (parent)
4dff95dc 1432 best_parent_rate = parent->rate;
71472c0c 1433
4dff95dc
SB
1434 clk_core_get_boundaries(core, &min_rate, &max_rate);
1435
1436 /* find the closest rate and parent clk/rate */
d6968fca 1437 if (core->ops->determine_rate) {
0817b62c
BB
1438 struct clk_rate_request req;
1439
1440 req.rate = rate;
1441 req.min_rate = min_rate;
1442 req.max_rate = max_rate;
1443 if (parent) {
1444 req.best_parent_hw = parent->hw;
1445 req.best_parent_rate = parent->rate;
1446 } else {
1447 req.best_parent_hw = NULL;
1448 req.best_parent_rate = 0;
1449 }
1450
1451 ret = core->ops->determine_rate(core->hw, &req);
4dff95dc
SB
1452 if (ret < 0)
1453 return NULL;
1c8e6004 1454
0817b62c
BB
1455 best_parent_rate = req.best_parent_rate;
1456 new_rate = req.rate;
1457 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
4dff95dc
SB
1458 } else if (core->ops->round_rate) {
1459 ret = core->ops->round_rate(core->hw, rate,
0817b62c 1460 &best_parent_rate);
4dff95dc
SB
1461 if (ret < 0)
1462 return NULL;
035a61c3 1463
4dff95dc
SB
1464 new_rate = ret;
1465 if (new_rate < min_rate || new_rate > max_rate)
1466 return NULL;
1467 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1468 /* pass-through clock without adjustable parent */
1469 core->new_rate = core->rate;
1470 return NULL;
1471 } else {
1472 /* pass-through clock with adjustable parent */
1473 top = clk_calc_new_rates(parent, rate);
1474 new_rate = parent->new_rate;
1475 goto out;
1476 }
1c8e6004 1477
4dff95dc
SB
1478 /* some clocks must be gated to change parent */
1479 if (parent != old_parent &&
1480 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1481 pr_debug("%s: %s not gated but wants to reparent\n",
1482 __func__, core->name);
1483 return NULL;
1484 }
b2476490 1485
4dff95dc
SB
1486 /* try finding the new parent index */
1487 if (parent && core->num_parents > 1) {
1488 p_index = clk_fetch_parent_index(core, parent);
1489 if (p_index < 0) {
1490 pr_debug("%s: clk %s can not be parent of clk %s\n",
1491 __func__, parent->name, core->name);
1492 return NULL;
1493 }
1494 }
b2476490 1495
4dff95dc
SB
1496 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1497 best_parent_rate != parent->rate)
1498 top = clk_calc_new_rates(parent, best_parent_rate);
035a61c3 1499
4dff95dc
SB
1500out:
1501 clk_calc_subtree(core, new_rate, parent, p_index);
b2476490 1502
4dff95dc 1503 return top;
b2476490 1504}
b2476490 1505
4dff95dc
SB
1506/*
1507 * Notify about rate changes in a subtree. Always walk down the whole tree
1508 * so that in case of an error we can walk down the whole tree again and
1509 * abort the change.
b2476490 1510 */
4dff95dc
SB
1511static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1512 unsigned long event)
b2476490 1513{
4dff95dc 1514 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
b2476490
MT
1515 int ret = NOTIFY_DONE;
1516
4dff95dc
SB
1517 if (core->rate == core->new_rate)
1518 return NULL;
b2476490 1519
4dff95dc
SB
1520 if (core->notifier_count) {
1521 ret = __clk_notify(core, event, core->rate, core->new_rate);
1522 if (ret & NOTIFY_STOP_MASK)
1523 fail_clk = core;
b2476490
MT
1524 }
1525
4dff95dc
SB
1526 hlist_for_each_entry(child, &core->children, child_node) {
1527 /* Skip children who will be reparented to another clock */
1528 if (child->new_parent && child->new_parent != core)
1529 continue;
1530 tmp_clk = clk_propagate_rate_change(child, event);
1531 if (tmp_clk)
1532 fail_clk = tmp_clk;
1533 }
5279fc40 1534
4dff95dc
SB
1535 /* handle the new child who might not be in core->children yet */
1536 if (core->new_child) {
1537 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1538 if (tmp_clk)
1539 fail_clk = tmp_clk;
1540 }
5279fc40 1541
4dff95dc 1542 return fail_clk;
5279fc40
BB
1543}
1544
4dff95dc
SB
1545/*
1546 * walk down a subtree and set the new rates notifying the rate
1547 * change on the way
1548 */
1549static void clk_change_rate(struct clk_core *core)
035a61c3 1550{
4dff95dc
SB
1551 struct clk_core *child;
1552 struct hlist_node *tmp;
1553 unsigned long old_rate;
1554 unsigned long best_parent_rate = 0;
1555 bool skip_set_rate = false;
1556 struct clk_core *old_parent;
fc8726a2 1557 struct clk_core *parent = NULL;
035a61c3 1558
4dff95dc 1559 old_rate = core->rate;
035a61c3 1560
fc8726a2
DA
1561 if (core->new_parent) {
1562 parent = core->new_parent;
4dff95dc 1563 best_parent_rate = core->new_parent->rate;
fc8726a2
DA
1564 } else if (core->parent) {
1565 parent = core->parent;
4dff95dc 1566 best_parent_rate = core->parent->rate;
fc8726a2 1567 }
035a61c3 1568
588fb54b
MS
1569 if (clk_pm_runtime_get(core))
1570 return;
1571
2eb8c710
HS
1572 if (core->flags & CLK_SET_RATE_UNGATE) {
1573 unsigned long flags;
1574
1575 clk_core_prepare(core);
1576 flags = clk_enable_lock();
1577 clk_core_enable(core);
1578 clk_enable_unlock(flags);
1579 }
1580
4dff95dc
SB
1581 if (core->new_parent && core->new_parent != core->parent) {
1582 old_parent = __clk_set_parent_before(core, core->new_parent);
1583 trace_clk_set_parent(core, core->new_parent);
5279fc40 1584
4dff95dc
SB
1585 if (core->ops->set_rate_and_parent) {
1586 skip_set_rate = true;
1587 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1588 best_parent_rate,
1589 core->new_parent_index);
1590 } else if (core->ops->set_parent) {
1591 core->ops->set_parent(core->hw, core->new_parent_index);
1592 }
5279fc40 1593
4dff95dc
SB
1594 trace_clk_set_parent_complete(core, core->new_parent);
1595 __clk_set_parent_after(core, core->new_parent, old_parent);
1596 }
8f2c2db1 1597
fc8726a2
DA
1598 if (core->flags & CLK_OPS_PARENT_ENABLE)
1599 clk_core_prepare_enable(parent);
1600
4dff95dc 1601 trace_clk_set_rate(core, core->new_rate);
b2476490 1602
4dff95dc
SB
1603 if (!skip_set_rate && core->ops->set_rate)
1604 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
496eadf8 1605
4dff95dc 1606 trace_clk_set_rate_complete(core, core->new_rate);
b2476490 1607
4dff95dc 1608 core->rate = clk_recalc(core, best_parent_rate);
b2476490 1609
2eb8c710
HS
1610 if (core->flags & CLK_SET_RATE_UNGATE) {
1611 unsigned long flags;
1612
1613 flags = clk_enable_lock();
1614 clk_core_disable(core);
1615 clk_enable_unlock(flags);
1616 clk_core_unprepare(core);
1617 }
1618
fc8726a2
DA
1619 if (core->flags & CLK_OPS_PARENT_ENABLE)
1620 clk_core_disable_unprepare(parent);
1621
4dff95dc
SB
1622 if (core->notifier_count && old_rate != core->rate)
1623 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
b2476490 1624
85e88fab
MT
1625 if (core->flags & CLK_RECALC_NEW_RATES)
1626 (void)clk_calc_new_rates(core, core->new_rate);
d8d91987 1627
b2476490 1628 /*
4dff95dc
SB
1629 * Use safe iteration, as change_rate can actually swap parents
1630 * for certain clock types.
b2476490 1631 */
4dff95dc
SB
1632 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1633 /* Skip children who will be reparented to another clock */
1634 if (child->new_parent && child->new_parent != core)
1635 continue;
1636 clk_change_rate(child);
1637 }
b2476490 1638
4dff95dc
SB
1639 /* handle the new child who might not be in core->children yet */
1640 if (core->new_child)
1641 clk_change_rate(core->new_child);
588fb54b
MS
1642
1643 clk_pm_runtime_put(core);
b2476490
MT
1644}
1645
6af57a44
JB
1646static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
1647 unsigned long req_rate)
1648{
1649 int ret;
1650 struct clk_rate_request req;
1651
1652 lockdep_assert_held(&prepare_lock);
1653
1654 if (!core)
1655 return 0;
1656
1657 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
1658 req.rate = req_rate;
1659
1660 ret = clk_core_round_rate_nolock(core, &req);
1661
1662 return ret ? 0 : req.rate;
1663}
1664
4dff95dc
SB
1665static int clk_core_set_rate_nolock(struct clk_core *core,
1666 unsigned long req_rate)
a093bde2 1667{
4dff95dc 1668 struct clk_core *top, *fail_clk;
6af57a44 1669 unsigned long rate;
9a34b453 1670 int ret = 0;
a093bde2 1671
4dff95dc
SB
1672 if (!core)
1673 return 0;
a093bde2 1674
6af57a44
JB
1675 rate = clk_core_req_round_rate_nolock(core, req_rate);
1676
4dff95dc
SB
1677 /* bail early if nothing to do */
1678 if (rate == clk_core_get_rate_nolock(core))
1679 return 0;
a093bde2 1680
4dff95dc
SB
1681 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1682 return -EBUSY;
a093bde2 1683
4dff95dc 1684 /* calculate new rates and get the topmost changed clock */
6af57a44 1685 top = clk_calc_new_rates(core, req_rate);
4dff95dc
SB
1686 if (!top)
1687 return -EINVAL;
1688
9a34b453
MS
1689 ret = clk_pm_runtime_get(core);
1690 if (ret)
1691 return ret;
1692
4dff95dc
SB
1693 /* notify that we are about to change rates */
1694 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1695 if (fail_clk) {
1696 pr_debug("%s: failed to set %s rate\n", __func__,
1697 fail_clk->name);
1698 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
9a34b453
MS
1699 ret = -EBUSY;
1700 goto err;
4dff95dc
SB
1701 }
1702
1703 /* change the rates */
1704 clk_change_rate(top);
1705
1706 core->req_rate = req_rate;
9a34b453
MS
1707err:
1708 clk_pm_runtime_put(core);
4dff95dc 1709
9a34b453 1710 return ret;
a093bde2 1711}
035a61c3
TV
1712
1713/**
4dff95dc
SB
1714 * clk_set_rate - specify a new rate for clk
1715 * @clk: the clk whose rate is being changed
1716 * @rate: the new rate for clk
035a61c3 1717 *
4dff95dc
SB
1718 * In the simplest case clk_set_rate will only adjust the rate of clk.
1719 *
1720 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1721 * propagate up to clk's parent; whether or not this happens depends on the
1722 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1723 * after calling .round_rate then upstream parent propagation is ignored. If
1724 * *parent_rate comes back with a new rate for clk's parent then we propagate
1725 * up to clk's parent and set its rate. Upward propagation will continue
1726 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1727 * .round_rate stops requesting changes to clk's parent_rate.
1728 *
1729 * Rate changes are accomplished via tree traversal that also recalculates the
1730 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1731 *
1732 * Returns 0 on success, -EERROR otherwise.
035a61c3 1733 */
4dff95dc 1734int clk_set_rate(struct clk *clk, unsigned long rate)
035a61c3 1735{
4dff95dc
SB
1736 int ret;
1737
035a61c3
TV
1738 if (!clk)
1739 return 0;
1740
4dff95dc
SB
1741 /* prevent racing with updates to the clock topology */
1742 clk_prepare_lock();
da0f0b2c 1743
4dff95dc 1744 ret = clk_core_set_rate_nolock(clk->core, rate);
da0f0b2c 1745
4dff95dc 1746 clk_prepare_unlock();
4935b22c 1747
4dff95dc 1748 return ret;
4935b22c 1749}
4dff95dc 1750EXPORT_SYMBOL_GPL(clk_set_rate);
4935b22c 1751
4dff95dc
SB
1752/**
1753 * clk_set_rate_range - set a rate range for a clock source
1754 * @clk: clock source
1755 * @min: desired minimum clock rate in Hz, inclusive
1756 * @max: desired maximum clock rate in Hz, inclusive
1757 *
1758 * Returns success (0) or negative errno.
1759 */
1760int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
4935b22c 1761{
4dff95dc 1762 int ret = 0;
4935b22c 1763
4dff95dc
SB
1764 if (!clk)
1765 return 0;
903efc55 1766
4dff95dc
SB
1767 if (min > max) {
1768 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1769 __func__, clk->core->name, clk->dev_id, clk->con_id,
1770 min, max);
1771 return -EINVAL;
903efc55 1772 }
4935b22c 1773
4dff95dc 1774 clk_prepare_lock();
4935b22c 1775
4dff95dc
SB
1776 if (min != clk->min_rate || max != clk->max_rate) {
1777 clk->min_rate = min;
1778 clk->max_rate = max;
1779 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4935b22c
JH
1780 }
1781
4dff95dc 1782 clk_prepare_unlock();
4935b22c 1783
4dff95dc 1784 return ret;
3fa2252b 1785}
4dff95dc 1786EXPORT_SYMBOL_GPL(clk_set_rate_range);
3fa2252b 1787
4dff95dc
SB
1788/**
1789 * clk_set_min_rate - set a minimum clock rate for a clock source
1790 * @clk: clock source
1791 * @rate: desired minimum clock rate in Hz, inclusive
1792 *
1793 * Returns success (0) or negative errno.
1794 */
1795int clk_set_min_rate(struct clk *clk, unsigned long rate)
3fa2252b 1796{
4dff95dc
SB
1797 if (!clk)
1798 return 0;
1799
1800 return clk_set_rate_range(clk, rate, clk->max_rate);
3fa2252b 1801}
4dff95dc 1802EXPORT_SYMBOL_GPL(clk_set_min_rate);
3fa2252b 1803
4dff95dc
SB
1804/**
1805 * clk_set_max_rate - set a maximum clock rate for a clock source
1806 * @clk: clock source
1807 * @rate: desired maximum clock rate in Hz, inclusive
1808 *
1809 * Returns success (0) or negative errno.
1810 */
1811int clk_set_max_rate(struct clk *clk, unsigned long rate)
3fa2252b 1812{
4dff95dc
SB
1813 if (!clk)
1814 return 0;
4935b22c 1815
4dff95dc 1816 return clk_set_rate_range(clk, clk->min_rate, rate);
4935b22c 1817}
4dff95dc 1818EXPORT_SYMBOL_GPL(clk_set_max_rate);
4935b22c 1819
b2476490 1820/**
4dff95dc
SB
1821 * clk_get_parent - return the parent of a clk
1822 * @clk: the clk whose parent gets returned
b2476490 1823 *
4dff95dc 1824 * Simply returns clk->parent. Returns NULL if clk is NULL.
b2476490 1825 */
4dff95dc 1826struct clk *clk_get_parent(struct clk *clk)
b2476490 1827{
4dff95dc 1828 struct clk *parent;
b2476490 1829
fc4a05d4
SB
1830 if (!clk)
1831 return NULL;
1832
4dff95dc 1833 clk_prepare_lock();
fc4a05d4
SB
1834 /* TODO: Create a per-user clk and change callers to call clk_put */
1835 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
4dff95dc 1836 clk_prepare_unlock();
496eadf8 1837
4dff95dc
SB
1838 return parent;
1839}
1840EXPORT_SYMBOL_GPL(clk_get_parent);
b2476490 1841
4dff95dc
SB
1842static struct clk_core *__clk_init_parent(struct clk_core *core)
1843{
5146e0b0 1844 u8 index = 0;
4dff95dc 1845
2430a94d 1846 if (core->num_parents > 1 && core->ops->get_parent)
5146e0b0 1847 index = core->ops->get_parent(core->hw);
b2476490 1848
5146e0b0 1849 return clk_core_get_parent_by_index(core, index);
b2476490
MT
1850}
1851
4dff95dc
SB
1852static void clk_core_reparent(struct clk_core *core,
1853 struct clk_core *new_parent)
b2476490 1854{
4dff95dc
SB
1855 clk_reparent(core, new_parent);
1856 __clk_recalc_accuracies(core);
1857 __clk_recalc_rates(core, POST_RATE_CHANGE);
b2476490
MT
1858}
1859
42c86547
TV
1860void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1861{
1862 if (!hw)
1863 return;
1864
1865 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1866}
1867
4dff95dc
SB
1868/**
1869 * clk_has_parent - check if a clock is a possible parent for another
1870 * @clk: clock source
1871 * @parent: parent clock source
1872 *
1873 * This function can be used in drivers that need to check that a clock can be
1874 * the parent of another without actually changing the parent.
1875 *
1876 * Returns true if @parent is a possible parent for @clk, false otherwise.
b2476490 1877 */
4dff95dc 1878bool clk_has_parent(struct clk *clk, struct clk *parent)
b2476490 1879{
4dff95dc
SB
1880 struct clk_core *core, *parent_core;
1881 unsigned int i;
b2476490 1882
4dff95dc
SB
1883 /* NULL clocks should be nops, so return success if either is NULL. */
1884 if (!clk || !parent)
1885 return true;
7452b219 1886
4dff95dc
SB
1887 core = clk->core;
1888 parent_core = parent->core;
71472c0c 1889
4dff95dc
SB
1890 /* Optimize for the case where the parent is already the parent. */
1891 if (core->parent == parent_core)
1892 return true;
1c8e6004 1893
4dff95dc
SB
1894 for (i = 0; i < core->num_parents; i++)
1895 if (strcmp(core->parent_names[i], parent_core->name) == 0)
1896 return true;
03bc10ab 1897
4dff95dc
SB
1898 return false;
1899}
1900EXPORT_SYMBOL_GPL(clk_has_parent);
03bc10ab 1901
4dff95dc
SB
1902static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1903{
1904 int ret = 0;
1905 int p_index = 0;
1906 unsigned long p_rate = 0;
1907
1908 if (!core)
1909 return 0;
1910
1911 /* prevent racing with updates to the clock topology */
1912 clk_prepare_lock();
1913
1914 if (core->parent == parent)
1915 goto out;
1916
1917 /* verify ops for for multi-parent clks */
1918 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
1919 ret = -ENOSYS;
63f5c3b2 1920 goto out;
7452b219
MT
1921 }
1922
4dff95dc
SB
1923 /* check that we are allowed to re-parent if the clock is in use */
1924 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1925 ret = -EBUSY;
1926 goto out;
b2476490
MT
1927 }
1928
71472c0c 1929 /* try finding the new parent index */
4dff95dc 1930 if (parent) {
d6968fca 1931 p_index = clk_fetch_parent_index(core, parent);
f1c8b2ed 1932 if (p_index < 0) {
71472c0c 1933 pr_debug("%s: clk %s can not be parent of clk %s\n",
4dff95dc
SB
1934 __func__, parent->name, core->name);
1935 ret = p_index;
1936 goto out;
71472c0c 1937 }
e8f0e68e 1938 p_rate = parent->rate;
b2476490
MT
1939 }
1940
9a34b453
MS
1941 ret = clk_pm_runtime_get(core);
1942 if (ret)
1943 goto out;
1944
4dff95dc
SB
1945 /* propagate PRE_RATE_CHANGE notifications */
1946 ret = __clk_speculate_rates(core, p_rate);
b2476490 1947
4dff95dc
SB
1948 /* abort if a driver objects */
1949 if (ret & NOTIFY_STOP_MASK)
9a34b453 1950 goto runtime_put;
b2476490 1951
4dff95dc
SB
1952 /* do the re-parent */
1953 ret = __clk_set_parent(core, parent, p_index);
b2476490 1954
4dff95dc
SB
1955 /* propagate rate an accuracy recalculation accordingly */
1956 if (ret) {
1957 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
1958 } else {
1959 __clk_recalc_rates(core, POST_RATE_CHANGE);
1960 __clk_recalc_accuracies(core);
b2476490
MT
1961 }
1962
9a34b453
MS
1963runtime_put:
1964 clk_pm_runtime_put(core);
4dff95dc
SB
1965out:
1966 clk_prepare_unlock();
71472c0c 1967
4dff95dc
SB
1968 return ret;
1969}
b2476490 1970
4dff95dc
SB
1971/**
1972 * clk_set_parent - switch the parent of a mux clk
1973 * @clk: the mux clk whose input we are switching
1974 * @parent: the new input to clk
1975 *
1976 * Re-parent clk to use parent as its new input source. If clk is in
1977 * prepared state, the clk will get enabled for the duration of this call. If
1978 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1979 * that, the reparenting is glitchy in hardware, etc), use the
1980 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1981 *
1982 * After successfully changing clk's parent clk_set_parent will update the
1983 * clk topology, sysfs topology and propagate rate recalculation via
1984 * __clk_recalc_rates.
1985 *
1986 * Returns 0 on success, -EERROR otherwise.
1987 */
1988int clk_set_parent(struct clk *clk, struct clk *parent)
1989{
1990 if (!clk)
1991 return 0;
1992
1993 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
b2476490 1994}
4dff95dc 1995EXPORT_SYMBOL_GPL(clk_set_parent);
b2476490 1996
4dff95dc
SB
1997/**
1998 * clk_set_phase - adjust the phase shift of a clock signal
1999 * @clk: clock signal source
2000 * @degrees: number of degrees the signal is shifted
2001 *
2002 * Shifts the phase of a clock signal by the specified
2003 * degrees. Returns 0 on success, -EERROR otherwise.
2004 *
2005 * This function makes no distinction about the input or reference
2006 * signal that we adjust the clock signal phase against. For example
2007 * phase locked-loop clock signal generators we may shift phase with
2008 * respect to feedback clock signal input, but for other cases the
2009 * clock phase may be shifted with respect to some other, unspecified
2010 * signal.
2011 *
2012 * Additionally the concept of phase shift does not propagate through
2013 * the clock tree hierarchy, which sets it apart from clock rates and
2014 * clock accuracy. A parent clock phase attribute does not have an
2015 * impact on the phase attribute of a child clock.
b2476490 2016 */
4dff95dc 2017int clk_set_phase(struct clk *clk, int degrees)
b2476490 2018{
4dff95dc 2019 int ret = -EINVAL;
b2476490 2020
4dff95dc
SB
2021 if (!clk)
2022 return 0;
b2476490 2023
4dff95dc
SB
2024 /* sanity check degrees */
2025 degrees %= 360;
2026 if (degrees < 0)
2027 degrees += 360;
bf47b4fd 2028
4dff95dc 2029 clk_prepare_lock();
3fa2252b 2030
4dff95dc 2031 trace_clk_set_phase(clk->core, degrees);
3fa2252b 2032
4dff95dc
SB
2033 if (clk->core->ops->set_phase)
2034 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
3fa2252b 2035
4dff95dc 2036 trace_clk_set_phase_complete(clk->core, degrees);
dfc202ea 2037
4dff95dc
SB
2038 if (!ret)
2039 clk->core->phase = degrees;
b2476490 2040
4dff95dc 2041 clk_prepare_unlock();
dfc202ea 2042
4dff95dc
SB
2043 return ret;
2044}
2045EXPORT_SYMBOL_GPL(clk_set_phase);
b2476490 2046
4dff95dc
SB
2047static int clk_core_get_phase(struct clk_core *core)
2048{
2049 int ret;
b2476490 2050
4dff95dc 2051 clk_prepare_lock();
f8eb4e5a
SL
2052 /* Always try to update cached phase if possible */
2053 if (core->ops->get_phase)
2054 core->phase = core->ops->get_phase(core->hw);
4dff95dc
SB
2055 ret = core->phase;
2056 clk_prepare_unlock();
71472c0c 2057
4dff95dc 2058 return ret;
b2476490
MT
2059}
2060
4dff95dc
SB
2061/**
2062 * clk_get_phase - return the phase shift of a clock signal
2063 * @clk: clock signal source
2064 *
2065 * Returns the phase shift of a clock node in degrees, otherwise returns
2066 * -EERROR.
2067 */
2068int clk_get_phase(struct clk *clk)
1c8e6004 2069{
4dff95dc 2070 if (!clk)
1c8e6004
TV
2071 return 0;
2072
4dff95dc
SB
2073 return clk_core_get_phase(clk->core);
2074}
2075EXPORT_SYMBOL_GPL(clk_get_phase);
1c8e6004 2076
4dff95dc
SB
2077/**
2078 * clk_is_match - check if two clk's point to the same hardware clock
2079 * @p: clk compared against q
2080 * @q: clk compared against p
2081 *
2082 * Returns true if the two struct clk pointers both point to the same hardware
2083 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2084 * share the same struct clk_core object.
2085 *
2086 * Returns false otherwise. Note that two NULL clks are treated as matching.
2087 */
2088bool clk_is_match(const struct clk *p, const struct clk *q)
2089{
2090 /* trivial case: identical struct clk's or both NULL */
2091 if (p == q)
2092 return true;
1c8e6004 2093
3fe003f9 2094 /* true if clk->core pointers match. Avoid dereferencing garbage */
4dff95dc
SB
2095 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2096 if (p->core == q->core)
2097 return true;
1c8e6004 2098
4dff95dc
SB
2099 return false;
2100}
2101EXPORT_SYMBOL_GPL(clk_is_match);
1c8e6004 2102
4dff95dc 2103/*** debugfs support ***/
1c8e6004 2104
4dff95dc
SB
2105#ifdef CONFIG_DEBUG_FS
2106#include <linux/debugfs.h>
1c8e6004 2107
4dff95dc
SB
2108static struct dentry *rootdir;
2109static int inited = 0;
2110static DEFINE_MUTEX(clk_debug_lock);
2111static HLIST_HEAD(clk_debug_list);
1c8e6004 2112
4dff95dc
SB
2113static struct hlist_head *all_lists[] = {
2114 &clk_root_list,
2115 &clk_orphan_list,
2116 NULL,
2117};
2118
2119static struct hlist_head *orphan_list[] = {
2120 &clk_orphan_list,
2121 NULL,
2122};
2123
2124static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2125 int level)
b2476490 2126{
4dff95dc
SB
2127 if (!c)
2128 return;
b2476490 2129
4dff95dc
SB
2130 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
2131 level * 3 + 1, "",
2132 30 - level * 3, c->name,
2133 c->enable_count, c->prepare_count, clk_core_get_rate(c),
2134 clk_core_get_accuracy(c), clk_core_get_phase(c));
2135}
89ac8d7a 2136
4dff95dc
SB
2137static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2138 int level)
2139{
2140 struct clk_core *child;
b2476490 2141
4dff95dc
SB
2142 if (!c)
2143 return;
b2476490 2144
4dff95dc 2145 clk_summary_show_one(s, c, level);
0e1c0301 2146
4dff95dc
SB
2147 hlist_for_each_entry(child, &c->children, child_node)
2148 clk_summary_show_subtree(s, child, level + 1);
1c8e6004 2149}
b2476490 2150
4dff95dc 2151static int clk_summary_show(struct seq_file *s, void *data)
1c8e6004 2152{
4dff95dc
SB
2153 struct clk_core *c;
2154 struct hlist_head **lists = (struct hlist_head **)s->private;
1c8e6004 2155
4dff95dc
SB
2156 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
2157 seq_puts(s, "----------------------------------------------------------------------------------------\n");
b2476490 2158
1c8e6004
TV
2159 clk_prepare_lock();
2160
4dff95dc
SB
2161 for (; *lists; lists++)
2162 hlist_for_each_entry(c, *lists, child_node)
2163 clk_summary_show_subtree(s, c, 0);
b2476490 2164
eab89f69 2165 clk_prepare_unlock();
b2476490 2166
4dff95dc 2167 return 0;
b2476490 2168}
1c8e6004 2169
1c8e6004 2170
4dff95dc 2171static int clk_summary_open(struct inode *inode, struct file *file)
1c8e6004 2172{
4dff95dc 2173 return single_open(file, clk_summary_show, inode->i_private);
1c8e6004 2174}
b2476490 2175
4dff95dc
SB
2176static const struct file_operations clk_summary_fops = {
2177 .open = clk_summary_open,
2178 .read = seq_read,
2179 .llseek = seq_lseek,
2180 .release = single_release,
2181};
b2476490 2182
4dff95dc
SB
2183static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2184{
2185 if (!c)
2186 return;
b2476490 2187
7cb81136 2188 /* This should be JSON format, i.e. elements separated with a comma */
4dff95dc
SB
2189 seq_printf(s, "\"%s\": { ", c->name);
2190 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2191 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
7cb81136
SW
2192 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2193 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
4dff95dc 2194 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
b2476490 2195}
b2476490 2196
4dff95dc 2197static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
b2476490 2198{
4dff95dc 2199 struct clk_core *child;
b2476490 2200
4dff95dc
SB
2201 if (!c)
2202 return;
b2476490 2203
4dff95dc 2204 clk_dump_one(s, c, level);
b2476490 2205
4dff95dc 2206 hlist_for_each_entry(child, &c->children, child_node) {
4d327586 2207 seq_putc(s, ',');
4dff95dc 2208 clk_dump_subtree(s, child, level + 1);
b2476490
MT
2209 }
2210
4d327586 2211 seq_putc(s, '}');
b2476490
MT
2212}
2213
4dff95dc 2214static int clk_dump(struct seq_file *s, void *data)
4e88f3de 2215{
4dff95dc
SB
2216 struct clk_core *c;
2217 bool first_node = true;
2218 struct hlist_head **lists = (struct hlist_head **)s->private;
4e88f3de 2219
4d327586 2220 seq_putc(s, '{');
4dff95dc 2221 clk_prepare_lock();
035a61c3 2222
4dff95dc
SB
2223 for (; *lists; lists++) {
2224 hlist_for_each_entry(c, *lists, child_node) {
2225 if (!first_node)
4d327586 2226 seq_putc(s, ',');
4dff95dc
SB
2227 first_node = false;
2228 clk_dump_subtree(s, c, 0);
2229 }
2230 }
4e88f3de 2231
4dff95dc 2232 clk_prepare_unlock();
4e88f3de 2233
70e9f4dd 2234 seq_puts(s, "}\n");
4dff95dc 2235 return 0;
4e88f3de 2236}
4e88f3de 2237
4dff95dc
SB
2238
2239static int clk_dump_open(struct inode *inode, struct file *file)
b2476490 2240{
4dff95dc
SB
2241 return single_open(file, clk_dump, inode->i_private);
2242}
b2476490 2243
4dff95dc
SB
2244static const struct file_operations clk_dump_fops = {
2245 .open = clk_dump_open,
2246 .read = seq_read,
2247 .llseek = seq_lseek,
2248 .release = single_release,
2249};
89ac8d7a 2250
92031575
PDS
2251static int possible_parents_dump(struct seq_file *s, void *data)
2252{
2253 struct clk_core *core = s->private;
2254 int i;
2255
2256 for (i = 0; i < core->num_parents - 1; i++)
2257 seq_printf(s, "%s ", core->parent_names[i]);
2258
2259 seq_printf(s, "%s\n", core->parent_names[i]);
2260
2261 return 0;
2262}
2263
2264static int possible_parents_open(struct inode *inode, struct file *file)
2265{
2266 return single_open(file, possible_parents_dump, inode->i_private);
2267}
2268
2269static const struct file_operations possible_parents_fops = {
2270 .open = possible_parents_open,
2271 .read = seq_read,
2272 .llseek = seq_lseek,
2273 .release = single_release,
2274};
2275
4dff95dc
SB
2276static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2277{
2278 struct dentry *d;
2279 int ret = -ENOMEM;
b2476490 2280
4dff95dc
SB
2281 if (!core || !pdentry) {
2282 ret = -EINVAL;
b2476490 2283 goto out;
4dff95dc 2284 }
b2476490 2285
4dff95dc
SB
2286 d = debugfs_create_dir(core->name, pdentry);
2287 if (!d)
b61c43c0 2288 goto out;
b61c43c0 2289
4dff95dc
SB
2290 core->dentry = d;
2291
2292 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2293 (u32 *)&core->rate);
2294 if (!d)
2295 goto err_out;
2296
2297 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2298 (u32 *)&core->accuracy);
2299 if (!d)
2300 goto err_out;
2301
2302 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2303 (u32 *)&core->phase);
2304 if (!d)
2305 goto err_out;
031dcc9b 2306
4dff95dc
SB
2307 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2308 (u32 *)&core->flags);
2309 if (!d)
2310 goto err_out;
031dcc9b 2311
4dff95dc
SB
2312 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2313 (u32 *)&core->prepare_count);
2314 if (!d)
2315 goto err_out;
b2476490 2316
4dff95dc
SB
2317 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2318 (u32 *)&core->enable_count);
2319 if (!d)
2320 goto err_out;
b2476490 2321
4dff95dc
SB
2322 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2323 (u32 *)&core->notifier_count);
2324 if (!d)
2325 goto err_out;
b2476490 2326
92031575
PDS
2327 if (core->num_parents > 1) {
2328 d = debugfs_create_file("clk_possible_parents", S_IRUGO,
2329 core->dentry, core, &possible_parents_fops);
2330 if (!d)
2331 goto err_out;
2332 }
2333
4dff95dc
SB
2334 if (core->ops->debug_init) {
2335 ret = core->ops->debug_init(core->hw, core->dentry);
2336 if (ret)
2337 goto err_out;
5279fc40 2338 }
b2476490 2339
4dff95dc
SB
2340 ret = 0;
2341 goto out;
b2476490 2342
4dff95dc
SB
2343err_out:
2344 debugfs_remove_recursive(core->dentry);
2345 core->dentry = NULL;
2346out:
b2476490
MT
2347 return ret;
2348}
035a61c3
TV
2349
2350/**
6e5ab41b
SB
2351 * clk_debug_register - add a clk node to the debugfs clk directory
2352 * @core: the clk being added to the debugfs clk directory
035a61c3 2353 *
6e5ab41b
SB
2354 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2355 * initialized. Otherwise it bails out early since the debugfs clk directory
4dff95dc 2356 * will be created lazily by clk_debug_init as part of a late_initcall.
035a61c3 2357 */
4dff95dc 2358static int clk_debug_register(struct clk_core *core)
035a61c3 2359{
4dff95dc 2360 int ret = 0;
035a61c3 2361
4dff95dc
SB
2362 mutex_lock(&clk_debug_lock);
2363 hlist_add_head(&core->debug_node, &clk_debug_list);
2364
2365 if (!inited)
2366 goto unlock;
2367
2368 ret = clk_debug_create_one(core, rootdir);
2369unlock:
2370 mutex_unlock(&clk_debug_lock);
2371
2372 return ret;
035a61c3 2373}
b2476490 2374
4dff95dc 2375 /**
6e5ab41b
SB
2376 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2377 * @core: the clk being removed from the debugfs clk directory
e59c5371 2378 *
6e5ab41b
SB
2379 * Dynamically removes a clk and all its child nodes from the
2380 * debugfs clk directory if clk->dentry points to debugfs created by
706d5c73 2381 * clk_debug_register in __clk_core_init.
e59c5371 2382 */
4dff95dc 2383static void clk_debug_unregister(struct clk_core *core)
e59c5371 2384{
4dff95dc
SB
2385 mutex_lock(&clk_debug_lock);
2386 hlist_del_init(&core->debug_node);
2387 debugfs_remove_recursive(core->dentry);
2388 core->dentry = NULL;
2389 mutex_unlock(&clk_debug_lock);
2390}
e59c5371 2391
4dff95dc
SB
2392struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2393 void *data, const struct file_operations *fops)
2394{
2395 struct dentry *d = NULL;
e59c5371 2396
4dff95dc
SB
2397 if (hw->core->dentry)
2398 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2399 fops);
e59c5371 2400
4dff95dc
SB
2401 return d;
2402}
2403EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
e59c5371 2404
4dff95dc 2405/**
6e5ab41b 2406 * clk_debug_init - lazily populate the debugfs clk directory
4dff95dc 2407 *
6e5ab41b
SB
2408 * clks are often initialized very early during boot before memory can be
2409 * dynamically allocated and well before debugfs is setup. This function
2410 * populates the debugfs clk directory once at boot-time when we know that
2411 * debugfs is setup. It should only be called once at boot-time, all other clks
2412 * added dynamically will be done so with clk_debug_register.
4dff95dc
SB
2413 */
2414static int __init clk_debug_init(void)
2415{
2416 struct clk_core *core;
2417 struct dentry *d;
dfc202ea 2418
4dff95dc 2419 rootdir = debugfs_create_dir("clk", NULL);
e59c5371 2420
4dff95dc
SB
2421 if (!rootdir)
2422 return -ENOMEM;
dfc202ea 2423
4dff95dc
SB
2424 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2425 &clk_summary_fops);
2426 if (!d)
2427 return -ENOMEM;
e59c5371 2428
4dff95dc
SB
2429 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2430 &clk_dump_fops);
2431 if (!d)
2432 return -ENOMEM;
e59c5371 2433
4dff95dc
SB
2434 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2435 &orphan_list, &clk_summary_fops);
2436 if (!d)
2437 return -ENOMEM;
e59c5371 2438
4dff95dc
SB
2439 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2440 &orphan_list, &clk_dump_fops);
2441 if (!d)
2442 return -ENOMEM;
e59c5371 2443
4dff95dc
SB
2444 mutex_lock(&clk_debug_lock);
2445 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2446 clk_debug_create_one(core, rootdir);
e59c5371 2447
4dff95dc
SB
2448 inited = 1;
2449 mutex_unlock(&clk_debug_lock);
e59c5371 2450
4dff95dc
SB
2451 return 0;
2452}
2453late_initcall(clk_debug_init);
2454#else
2455static inline int clk_debug_register(struct clk_core *core) { return 0; }
2456static inline void clk_debug_reparent(struct clk_core *core,
2457 struct clk_core *new_parent)
035a61c3 2458{
035a61c3 2459}
4dff95dc 2460static inline void clk_debug_unregister(struct clk_core *core)
3d3801ef 2461{
3d3801ef 2462}
4dff95dc 2463#endif
3d3801ef 2464
b2476490 2465/**
be45ebf2 2466 * __clk_core_init - initialize the data structures in a struct clk_core
d35c80c2 2467 * @core: clk_core being initialized
b2476490 2468 *
035a61c3 2469 * Initializes the lists in struct clk_core, queries the hardware for the
b2476490 2470 * parent and rate and sets them both.
b2476490 2471 */
be45ebf2 2472static int __clk_core_init(struct clk_core *core)
b2476490 2473{
9a34b453 2474 int i, ret;
035a61c3 2475 struct clk_core *orphan;
b67bfe0d 2476 struct hlist_node *tmp2;
1c8e6004 2477 unsigned long rate;
b2476490 2478
d35c80c2 2479 if (!core)
d1302a36 2480 return -EINVAL;
b2476490 2481
eab89f69 2482 clk_prepare_lock();
b2476490 2483
9a34b453
MS
2484 ret = clk_pm_runtime_get(core);
2485 if (ret)
2486 goto unlock;
2487
b2476490 2488 /* check to see if a clock with this name is already registered */
d6968fca 2489 if (clk_core_lookup(core->name)) {
d1302a36 2490 pr_debug("%s: clk %s already initialized\n",
d6968fca 2491 __func__, core->name);
d1302a36 2492 ret = -EEXIST;
b2476490 2493 goto out;
d1302a36 2494 }
b2476490 2495
d4d7e3dd 2496 /* check that clk_ops are sane. See Documentation/clk.txt */
d6968fca
SB
2497 if (core->ops->set_rate &&
2498 !((core->ops->round_rate || core->ops->determine_rate) &&
2499 core->ops->recalc_rate)) {
c44fccb5
MY
2500 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2501 __func__, core->name);
d1302a36 2502 ret = -EINVAL;
d4d7e3dd
MT
2503 goto out;
2504 }
2505
d6968fca 2506 if (core->ops->set_parent && !core->ops->get_parent) {
c44fccb5
MY
2507 pr_err("%s: %s must implement .get_parent & .set_parent\n",
2508 __func__, core->name);
d1302a36 2509 ret = -EINVAL;
d4d7e3dd
MT
2510 goto out;
2511 }
2512
3c8e77dd
MY
2513 if (core->num_parents > 1 && !core->ops->get_parent) {
2514 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
2515 __func__, core->name);
2516 ret = -EINVAL;
2517 goto out;
2518 }
2519
d6968fca
SB
2520 if (core->ops->set_rate_and_parent &&
2521 !(core->ops->set_parent && core->ops->set_rate)) {
c44fccb5 2522 pr_err("%s: %s must implement .set_parent & .set_rate\n",
d6968fca 2523 __func__, core->name);
3fa2252b
SB
2524 ret = -EINVAL;
2525 goto out;
2526 }
2527
b2476490 2528 /* throw a WARN if any entries in parent_names are NULL */
d6968fca
SB
2529 for (i = 0; i < core->num_parents; i++)
2530 WARN(!core->parent_names[i],
b2476490 2531 "%s: invalid NULL in %s's .parent_names\n",
d6968fca 2532 __func__, core->name);
b2476490 2533
d6968fca 2534 core->parent = __clk_init_parent(core);
b2476490
MT
2535
2536 /*
706d5c73
SB
2537 * Populate core->parent if parent has already been clk_core_init'd. If
2538 * parent has not yet been clk_core_init'd then place clk in the orphan
47b0eeb3 2539 * list. If clk doesn't have any parents then place it in the root
b2476490
MT
2540 * clk list.
2541 *
2542 * Every time a new clk is clk_init'd then we walk the list of orphan
2543 * clocks and re-parent any that are children of the clock currently
2544 * being clk_init'd.
2545 */
e6500344 2546 if (core->parent) {
d6968fca
SB
2547 hlist_add_head(&core->child_node,
2548 &core->parent->children);
e6500344 2549 core->orphan = core->parent->orphan;
47b0eeb3 2550 } else if (!core->num_parents) {
d6968fca 2551 hlist_add_head(&core->child_node, &clk_root_list);
e6500344
HS
2552 core->orphan = false;
2553 } else {
d6968fca 2554 hlist_add_head(&core->child_node, &clk_orphan_list);
e6500344
HS
2555 core->orphan = true;
2556 }
b2476490 2557
5279fc40
BB
2558 /*
2559 * Set clk's accuracy. The preferred method is to use
2560 * .recalc_accuracy. For simple clocks and lazy developers the default
2561 * fallback is to use the parent's accuracy. If a clock doesn't have a
2562 * parent (or is orphaned) then accuracy is set to zero (perfect
2563 * clock).
2564 */
d6968fca
SB
2565 if (core->ops->recalc_accuracy)
2566 core->accuracy = core->ops->recalc_accuracy(core->hw,
2567 __clk_get_accuracy(core->parent));
2568 else if (core->parent)
2569 core->accuracy = core->parent->accuracy;
5279fc40 2570 else
d6968fca 2571 core->accuracy = 0;
5279fc40 2572
9824cf73
MR
2573 /*
2574 * Set clk's phase.
2575 * Since a phase is by definition relative to its parent, just
2576 * query the current clock phase, or just assume it's in phase.
2577 */
d6968fca
SB
2578 if (core->ops->get_phase)
2579 core->phase = core->ops->get_phase(core->hw);
9824cf73 2580 else
d6968fca 2581 core->phase = 0;
9824cf73 2582
b2476490
MT
2583 /*
2584 * Set clk's rate. The preferred method is to use .recalc_rate. For
2585 * simple clocks and lazy developers the default fallback is to use the
2586 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2587 * then rate is set to zero.
2588 */
d6968fca
SB
2589 if (core->ops->recalc_rate)
2590 rate = core->ops->recalc_rate(core->hw,
2591 clk_core_get_rate_nolock(core->parent));
2592 else if (core->parent)
2593 rate = core->parent->rate;
b2476490 2594 else
1c8e6004 2595 rate = 0;
d6968fca 2596 core->rate = core->req_rate = rate;
b2476490 2597
c76aacca
JB
2598 /*
2599 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
2600 * don't get accidentally disabled when walking the orphan tree and
2601 * reparenting clocks
2602 */
2603 if (core->flags & CLK_IS_CRITICAL) {
2604 unsigned long flags;
2605
1d9d2aae
GR
2606 ret = clk_core_prepare(core);
2607 if (ret)
2608 goto out;
c76aacca
JB
2609
2610 flags = clk_enable_lock();
1d9d2aae 2611 ret = clk_core_enable(core);
c76aacca 2612 clk_enable_unlock(flags);
1d9d2aae
GR
2613 if (ret) {
2614 clk_core_unprepare(core);
2615 goto out;
2616 }
c76aacca
JB
2617 }
2618
b2476490 2619 /*
0e8f6e49
MY
2620 * walk the list of orphan clocks and reparent any that newly finds a
2621 * parent.
b2476490 2622 */
b67bfe0d 2623 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
0e8f6e49 2624 struct clk_core *parent = __clk_init_parent(orphan);
1f61e5f1 2625
904e6ead 2626 /*
c76aacca
JB
2627 * We need to use __clk_set_parent_before() and _after() to
2628 * to properly migrate any prepare/enable count of the orphan
2629 * clock. This is important for CLK_IS_CRITICAL clocks, which
2630 * are enabled during init but might not have a parent yet.
904e6ead
MT
2631 */
2632 if (parent) {
019b121c 2633 /* update the clk tree topology */
c76aacca
JB
2634 __clk_set_parent_before(orphan, parent);
2635 __clk_set_parent_after(orphan, parent, NULL);
904e6ead
MT
2636 __clk_recalc_accuracies(orphan);
2637 __clk_recalc_rates(orphan, 0);
2638 }
0e8f6e49 2639 }
b2476490
MT
2640
2641 /*
2642 * optional platform-specific magic
2643 *
2644 * The .init callback is not used by any of the basic clock types, but
2645 * exists for weird hardware that must perform initialization magic.
2646 * Please consider other ways of solving initialization problems before
24ee1a08 2647 * using this callback, as its use is discouraged.
b2476490 2648 */
d6968fca
SB
2649 if (core->ops->init)
2650 core->ops->init(core->hw);
b2476490 2651
d6968fca 2652 kref_init(&core->ref);
b2476490 2653out:
9a34b453
MS
2654 clk_pm_runtime_put(core);
2655unlock:
eab89f69 2656 clk_prepare_unlock();
b2476490 2657
89f7e9de 2658 if (!ret)
d6968fca 2659 clk_debug_register(core);
89f7e9de 2660
d1302a36 2661 return ret;
b2476490
MT
2662}
2663
035a61c3
TV
2664struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2665 const char *con_id)
0197b3ea 2666{
0197b3ea
SK
2667 struct clk *clk;
2668
035a61c3 2669 /* This is to allow this function to be chained to others */
c1de1357 2670 if (IS_ERR_OR_NULL(hw))
8a23133c 2671 return ERR_CAST(hw);
0197b3ea 2672
035a61c3
TV
2673 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2674 if (!clk)
2675 return ERR_PTR(-ENOMEM);
2676
2677 clk->core = hw->core;
2678 clk->dev_id = dev_id;
253160a8 2679 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
1c8e6004
TV
2680 clk->max_rate = ULONG_MAX;
2681
2682 clk_prepare_lock();
50595f8b 2683 hlist_add_head(&clk->clks_node, &hw->core->clks);
1c8e6004 2684 clk_prepare_unlock();
0197b3ea
SK
2685
2686 return clk;
2687}
035a61c3 2688
c3beae99 2689/* keep in sync with __clk_put */
73e0e496 2690void __clk_free_clk(struct clk *clk)
1c8e6004
TV
2691{
2692 clk_prepare_lock();
50595f8b 2693 hlist_del(&clk->clks_node);
1c8e6004
TV
2694 clk_prepare_unlock();
2695
253160a8 2696 kfree_const(clk->con_id);
1c8e6004
TV
2697 kfree(clk);
2698}
0197b3ea 2699
293ba3b4
SB
2700/**
2701 * clk_register - allocate a new clock, register it and return an opaque cookie
2702 * @dev: device that is registering this clock
2703 * @hw: link to hardware-specific clock data
2704 *
2705 * clk_register is the primary interface for populating the clock tree with new
2706 * clock nodes. It returns a pointer to the newly allocated struct clk which
a59a5163 2707 * cannot be dereferenced by driver code but may be used in conjunction with the
293ba3b4
SB
2708 * rest of the clock API. In the event of an error clk_register will return an
2709 * error code; drivers must test for an error code after calling clk_register.
2710 */
2711struct clk *clk_register(struct device *dev, struct clk_hw *hw)
b2476490 2712{
d1302a36 2713 int i, ret;
d6968fca 2714 struct clk_core *core;
293ba3b4 2715
d6968fca
SB
2716 core = kzalloc(sizeof(*core), GFP_KERNEL);
2717 if (!core) {
293ba3b4
SB
2718 ret = -ENOMEM;
2719 goto fail_out;
2720 }
b2476490 2721
d6968fca
SB
2722 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2723 if (!core->name) {
0197b3ea
SK
2724 ret = -ENOMEM;
2725 goto fail_name;
2726 }
0690e62b
JB
2727
2728 if (WARN_ON(!hw->init->ops)) {
2729 ret = -EINVAL;
2730 goto fail_ops;
2731 }
d6968fca 2732 core->ops = hw->init->ops;
0690e62b 2733
9a34b453
MS
2734 if (dev && pm_runtime_enabled(dev))
2735 core->dev = dev;
ac2df527 2736 if (dev && dev->driver)
d6968fca
SB
2737 core->owner = dev->driver->owner;
2738 core->hw = hw;
2739 core->flags = hw->init->flags;
2740 core->num_parents = hw->init->num_parents;
9783c0d9
SB
2741 core->min_rate = 0;
2742 core->max_rate = ULONG_MAX;
d6968fca 2743 hw->core = core;
b2476490 2744
d1302a36 2745 /* allocate local copy in case parent_names is __initdata */
d6968fca 2746 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
96a7ed90 2747 GFP_KERNEL);
d1302a36 2748
d6968fca 2749 if (!core->parent_names) {
d1302a36
MT
2750 ret = -ENOMEM;
2751 goto fail_parent_names;
2752 }
2753
2754
2755 /* copy each string name in case parent_names is __initdata */
d6968fca
SB
2756 for (i = 0; i < core->num_parents; i++) {
2757 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
0197b3ea 2758 GFP_KERNEL);
d6968fca 2759 if (!core->parent_names[i]) {
d1302a36
MT
2760 ret = -ENOMEM;
2761 goto fail_parent_names_copy;
2762 }
2763 }
2764
176d1169
MY
2765 /* avoid unnecessary string look-ups of clk_core's possible parents. */
2766 core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
2767 GFP_KERNEL);
2768 if (!core->parents) {
2769 ret = -ENOMEM;
2770 goto fail_parents;
2771 };
2772
d6968fca 2773 INIT_HLIST_HEAD(&core->clks);
1c8e6004 2774
035a61c3
TV
2775 hw->clk = __clk_create_clk(hw, NULL, NULL);
2776 if (IS_ERR(hw->clk)) {
035a61c3 2777 ret = PTR_ERR(hw->clk);
176d1169 2778 goto fail_parents;
035a61c3
TV
2779 }
2780
be45ebf2 2781 ret = __clk_core_init(core);
d1302a36 2782 if (!ret)
035a61c3 2783 return hw->clk;
b2476490 2784
1c8e6004 2785 __clk_free_clk(hw->clk);
035a61c3 2786 hw->clk = NULL;
b2476490 2787
176d1169
MY
2788fail_parents:
2789 kfree(core->parents);
d1302a36
MT
2790fail_parent_names_copy:
2791 while (--i >= 0)
d6968fca
SB
2792 kfree_const(core->parent_names[i]);
2793 kfree(core->parent_names);
d1302a36 2794fail_parent_names:
0690e62b 2795fail_ops:
d6968fca 2796 kfree_const(core->name);
0197b3ea 2797fail_name:
d6968fca 2798 kfree(core);
d1302a36
MT
2799fail_out:
2800 return ERR_PTR(ret);
b2476490
MT
2801}
2802EXPORT_SYMBOL_GPL(clk_register);
2803
4143804c
SB
2804/**
2805 * clk_hw_register - register a clk_hw and return an error code
2806 * @dev: device that is registering this clock
2807 * @hw: link to hardware-specific clock data
2808 *
2809 * clk_hw_register is the primary interface for populating the clock tree with
2810 * new clock nodes. It returns an integer equal to zero indicating success or
2811 * less than zero indicating failure. Drivers must test for an error code after
2812 * calling clk_hw_register().
2813 */
2814int clk_hw_register(struct device *dev, struct clk_hw *hw)
2815{
2816 return PTR_ERR_OR_ZERO(clk_register(dev, hw));
2817}
2818EXPORT_SYMBOL_GPL(clk_hw_register);
2819
6e5ab41b 2820/* Free memory allocated for a clock. */
fcb0ee6a
SN
2821static void __clk_release(struct kref *ref)
2822{
d6968fca
SB
2823 struct clk_core *core = container_of(ref, struct clk_core, ref);
2824 int i = core->num_parents;
fcb0ee6a 2825
496eadf8
KK
2826 lockdep_assert_held(&prepare_lock);
2827
d6968fca 2828 kfree(core->parents);
fcb0ee6a 2829 while (--i >= 0)
d6968fca 2830 kfree_const(core->parent_names[i]);
fcb0ee6a 2831
d6968fca
SB
2832 kfree(core->parent_names);
2833 kfree_const(core->name);
2834 kfree(core);
fcb0ee6a
SN
2835}
2836
2837/*
2838 * Empty clk_ops for unregistered clocks. These are used temporarily
2839 * after clk_unregister() was called on a clock and until last clock
2840 * consumer calls clk_put() and the struct clk object is freed.
2841 */
2842static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2843{
2844 return -ENXIO;
2845}
2846
2847static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2848{
2849 WARN_ON_ONCE(1);
2850}
2851
2852static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2853 unsigned long parent_rate)
2854{
2855 return -ENXIO;
2856}
2857
2858static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2859{
2860 return -ENXIO;
2861}
2862
2863static const struct clk_ops clk_nodrv_ops = {
2864 .enable = clk_nodrv_prepare_enable,
2865 .disable = clk_nodrv_disable_unprepare,
2866 .prepare = clk_nodrv_prepare_enable,
2867 .unprepare = clk_nodrv_disable_unprepare,
2868 .set_rate = clk_nodrv_set_rate,
2869 .set_parent = clk_nodrv_set_parent,
2870};
2871
1df5c939
MB
2872/**
2873 * clk_unregister - unregister a currently registered clock
2874 * @clk: clock to unregister
1df5c939 2875 */
fcb0ee6a
SN
2876void clk_unregister(struct clk *clk)
2877{
2878 unsigned long flags;
2879
6314b679
SB
2880 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2881 return;
2882
035a61c3 2883 clk_debug_unregister(clk->core);
fcb0ee6a
SN
2884
2885 clk_prepare_lock();
2886
035a61c3
TV
2887 if (clk->core->ops == &clk_nodrv_ops) {
2888 pr_err("%s: unregistered clock: %s\n", __func__,
2889 clk->core->name);
4106a3d9 2890 goto unlock;
fcb0ee6a
SN
2891 }
2892 /*
2893 * Assign empty clock ops for consumers that might still hold
2894 * a reference to this clock.
2895 */
2896 flags = clk_enable_lock();
035a61c3 2897 clk->core->ops = &clk_nodrv_ops;
fcb0ee6a
SN
2898 clk_enable_unlock(flags);
2899
035a61c3
TV
2900 if (!hlist_empty(&clk->core->children)) {
2901 struct clk_core *child;
874f224c 2902 struct hlist_node *t;
fcb0ee6a
SN
2903
2904 /* Reparent all children to the orphan list. */
035a61c3
TV
2905 hlist_for_each_entry_safe(child, t, &clk->core->children,
2906 child_node)
2907 clk_core_set_parent(child, NULL);
fcb0ee6a
SN
2908 }
2909
035a61c3 2910 hlist_del_init(&clk->core->child_node);
fcb0ee6a 2911
035a61c3 2912 if (clk->core->prepare_count)
fcb0ee6a 2913 pr_warn("%s: unregistering prepared clock: %s\n",
035a61c3
TV
2914 __func__, clk->core->name);
2915 kref_put(&clk->core->ref, __clk_release);
4106a3d9 2916unlock:
fcb0ee6a
SN
2917 clk_prepare_unlock();
2918}
1df5c939
MB
2919EXPORT_SYMBOL_GPL(clk_unregister);
2920
4143804c
SB
2921/**
2922 * clk_hw_unregister - unregister a currently registered clk_hw
2923 * @hw: hardware-specific clock data to unregister
2924 */
2925void clk_hw_unregister(struct clk_hw *hw)
2926{
2927 clk_unregister(hw->clk);
2928}
2929EXPORT_SYMBOL_GPL(clk_hw_unregister);
2930
46c8773a
SB
2931static void devm_clk_release(struct device *dev, void *res)
2932{
293ba3b4 2933 clk_unregister(*(struct clk **)res);
46c8773a
SB
2934}
2935
4143804c
SB
2936static void devm_clk_hw_release(struct device *dev, void *res)
2937{
2938 clk_hw_unregister(*(struct clk_hw **)res);
2939}
2940
46c8773a
SB
2941/**
2942 * devm_clk_register - resource managed clk_register()
2943 * @dev: device that is registering this clock
2944 * @hw: link to hardware-specific clock data
2945 *
2946 * Managed clk_register(). Clocks returned from this function are
2947 * automatically clk_unregister()ed on driver detach. See clk_register() for
2948 * more information.
2949 */
2950struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2951{
2952 struct clk *clk;
293ba3b4 2953 struct clk **clkp;
46c8773a 2954
293ba3b4
SB
2955 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2956 if (!clkp)
46c8773a
SB
2957 return ERR_PTR(-ENOMEM);
2958
293ba3b4
SB
2959 clk = clk_register(dev, hw);
2960 if (!IS_ERR(clk)) {
2961 *clkp = clk;
2962 devres_add(dev, clkp);
46c8773a 2963 } else {
293ba3b4 2964 devres_free(clkp);
46c8773a
SB
2965 }
2966
2967 return clk;
2968}
2969EXPORT_SYMBOL_GPL(devm_clk_register);
2970
4143804c
SB
2971/**
2972 * devm_clk_hw_register - resource managed clk_hw_register()
2973 * @dev: device that is registering this clock
2974 * @hw: link to hardware-specific clock data
2975 *
c47265ad 2976 * Managed clk_hw_register(). Clocks registered by this function are
4143804c
SB
2977 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
2978 * for more information.
2979 */
2980int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
2981{
2982 struct clk_hw **hwp;
2983 int ret;
2984
2985 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
2986 if (!hwp)
2987 return -ENOMEM;
2988
2989 ret = clk_hw_register(dev, hw);
2990 if (!ret) {
2991 *hwp = hw;
2992 devres_add(dev, hwp);
2993 } else {
2994 devres_free(hwp);
2995 }
2996
2997 return ret;
2998}
2999EXPORT_SYMBOL_GPL(devm_clk_hw_register);
3000
46c8773a
SB
3001static int devm_clk_match(struct device *dev, void *res, void *data)
3002{
3003 struct clk *c = res;
3004 if (WARN_ON(!c))
3005 return 0;
3006 return c == data;
3007}
3008
4143804c
SB
3009static int devm_clk_hw_match(struct device *dev, void *res, void *data)
3010{
3011 struct clk_hw *hw = res;
3012
3013 if (WARN_ON(!hw))
3014 return 0;
3015 return hw == data;
3016}
3017
46c8773a
SB
3018/**
3019 * devm_clk_unregister - resource managed clk_unregister()
3020 * @clk: clock to unregister
3021 *
3022 * Deallocate a clock allocated with devm_clk_register(). Normally
3023 * this function will not need to be called and the resource management
3024 * code will ensure that the resource is freed.
3025 */
3026void devm_clk_unregister(struct device *dev, struct clk *clk)
3027{
3028 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
3029}
3030EXPORT_SYMBOL_GPL(devm_clk_unregister);
3031
4143804c
SB
3032/**
3033 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3034 * @dev: device that is unregistering the hardware-specific clock data
3035 * @hw: link to hardware-specific clock data
3036 *
3037 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3038 * this function will not need to be called and the resource management
3039 * code will ensure that the resource is freed.
3040 */
3041void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
3042{
3043 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
3044 hw));
3045}
3046EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
3047
ac2df527
SN
3048/*
3049 * clkdev helpers
3050 */
3051int __clk_get(struct clk *clk)
3052{
035a61c3
TV
3053 struct clk_core *core = !clk ? NULL : clk->core;
3054
3055 if (core) {
3056 if (!try_module_get(core->owner))
00efcb1c 3057 return 0;
ac2df527 3058
035a61c3 3059 kref_get(&core->ref);
00efcb1c 3060 }
ac2df527
SN
3061 return 1;
3062}
3063
c3beae99 3064/* keep in sync with __clk_free_clk */
ac2df527
SN
3065void __clk_put(struct clk *clk)
3066{
10cdfe54
TV
3067 struct module *owner;
3068
00efcb1c 3069 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
ac2df527
SN
3070 return;
3071
fcb0ee6a 3072 clk_prepare_lock();
1c8e6004 3073
50595f8b 3074 hlist_del(&clk->clks_node);
ec02ace8
TV
3075 if (clk->min_rate > clk->core->req_rate ||
3076 clk->max_rate < clk->core->req_rate)
3077 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
3078
1c8e6004
TV
3079 owner = clk->core->owner;
3080 kref_put(&clk->core->ref, __clk_release);
3081
fcb0ee6a
SN
3082 clk_prepare_unlock();
3083
10cdfe54 3084 module_put(owner);
035a61c3 3085
c3beae99 3086 kfree_const(clk->con_id);
035a61c3 3087 kfree(clk);
ac2df527
SN
3088}
3089
b2476490
MT
3090/*** clk rate change notifiers ***/
3091
3092/**
3093 * clk_notifier_register - add a clk rate change notifier
3094 * @clk: struct clk * to watch
3095 * @nb: struct notifier_block * with callback info
3096 *
3097 * Request notification when clk's rate changes. This uses an SRCU
3098 * notifier because we want it to block and notifier unregistrations are
3099 * uncommon. The callbacks associated with the notifier must not
3100 * re-enter into the clk framework by calling any top-level clk APIs;
3101 * this will cause a nested prepare_lock mutex.
3102 *
198bb594
MY
3103 * In all notification cases (pre, post and abort rate change) the original
3104 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
3105 * and the new frequency is passed via struct clk_notifier_data.new_rate.
b2476490 3106 *
b2476490
MT
3107 * clk_notifier_register() must be called from non-atomic context.
3108 * Returns -EINVAL if called with null arguments, -ENOMEM upon
3109 * allocation failure; otherwise, passes along the return value of
3110 * srcu_notifier_chain_register().
3111 */
3112int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
3113{
3114 struct clk_notifier *cn;
3115 int ret = -ENOMEM;
3116
3117 if (!clk || !nb)
3118 return -EINVAL;
3119
eab89f69 3120 clk_prepare_lock();
b2476490
MT
3121
3122 /* search the list of notifiers for this clk */
3123 list_for_each_entry(cn, &clk_notifier_list, node)
3124 if (cn->clk == clk)
3125 break;
3126
3127 /* if clk wasn't in the notifier list, allocate new clk_notifier */
3128 if (cn->clk != clk) {
1808a320 3129 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
b2476490
MT
3130 if (!cn)
3131 goto out;
3132
3133 cn->clk = clk;
3134 srcu_init_notifier_head(&cn->notifier_head);
3135
3136 list_add(&cn->node, &clk_notifier_list);
3137 }
3138
3139 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
3140
035a61c3 3141 clk->core->notifier_count++;
b2476490
MT
3142
3143out:
eab89f69 3144 clk_prepare_unlock();
b2476490
MT
3145
3146 return ret;
3147}
3148EXPORT_SYMBOL_GPL(clk_notifier_register);
3149
3150/**
3151 * clk_notifier_unregister - remove a clk rate change notifier
3152 * @clk: struct clk *
3153 * @nb: struct notifier_block * with callback info
3154 *
3155 * Request no further notification for changes to 'clk' and frees memory
3156 * allocated in clk_notifier_register.
3157 *
3158 * Returns -EINVAL if called with null arguments; otherwise, passes
3159 * along the return value of srcu_notifier_chain_unregister().
3160 */
3161int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
3162{
3163 struct clk_notifier *cn = NULL;
3164 int ret = -EINVAL;
3165
3166 if (!clk || !nb)
3167 return -EINVAL;
3168
eab89f69 3169 clk_prepare_lock();
b2476490
MT
3170
3171 list_for_each_entry(cn, &clk_notifier_list, node)
3172 if (cn->clk == clk)
3173 break;
3174
3175 if (cn->clk == clk) {
3176 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
3177
035a61c3 3178 clk->core->notifier_count--;
b2476490
MT
3179
3180 /* XXX the notifier code should handle this better */
3181 if (!cn->notifier_head.head) {
3182 srcu_cleanup_notifier_head(&cn->notifier_head);
72b5322f 3183 list_del(&cn->node);
b2476490
MT
3184 kfree(cn);
3185 }
3186
3187 } else {
3188 ret = -ENOENT;
3189 }
3190
eab89f69 3191 clk_prepare_unlock();
b2476490
MT
3192
3193 return ret;
3194}
3195EXPORT_SYMBOL_GPL(clk_notifier_unregister);
766e6a4e
GL
3196
3197#ifdef CONFIG_OF
3198/**
3199 * struct of_clk_provider - Clock provider registration structure
3200 * @link: Entry in global list of clock providers
3201 * @node: Pointer to device tree node of clock provider
3202 * @get: Get clock callback. Returns NULL or a struct clk for the
3203 * given clock specifier
3204 * @data: context pointer to be passed into @get callback
3205 */
3206struct of_clk_provider {
3207 struct list_head link;
3208
3209 struct device_node *node;
3210 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
0861e5b8 3211 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
766e6a4e
GL
3212 void *data;
3213};
3214
f2f6c255
PG
3215static const struct of_device_id __clk_of_table_sentinel
3216 __used __section(__clk_of_table_end);
3217
766e6a4e 3218static LIST_HEAD(of_clk_providers);
d6782c26
SN
3219static DEFINE_MUTEX(of_clk_mutex);
3220
766e6a4e
GL
3221struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
3222 void *data)
3223{
3224 return data;
3225}
3226EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
3227
0861e5b8
SB
3228struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
3229{
3230 return data;
3231}
3232EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
3233
494bfec9
SG
3234struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
3235{
3236 struct clk_onecell_data *clk_data = data;
3237 unsigned int idx = clkspec->args[0];
3238
3239 if (idx >= clk_data->clk_num) {
7e96353c 3240 pr_err("%s: invalid clock index %u\n", __func__, idx);
494bfec9
SG
3241 return ERR_PTR(-EINVAL);
3242 }
3243
3244 return clk_data->clks[idx];
3245}
3246EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
3247
0861e5b8
SB
3248struct clk_hw *
3249of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
3250{
3251 struct clk_hw_onecell_data *hw_data = data;
3252 unsigned int idx = clkspec->args[0];
3253
3254 if (idx >= hw_data->num) {
3255 pr_err("%s: invalid index %u\n", __func__, idx);
3256 return ERR_PTR(-EINVAL);
3257 }
3258
3259 return hw_data->hws[idx];
3260}
3261EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
3262
766e6a4e
GL
3263/**
3264 * of_clk_add_provider() - Register a clock provider for a node
3265 * @np: Device node pointer associated with clock provider
3266 * @clk_src_get: callback for decoding clock
3267 * @data: context pointer for @clk_src_get callback.
3268 */
3269int of_clk_add_provider(struct device_node *np,
3270 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
3271 void *data),
3272 void *data)
3273{
3274 struct of_clk_provider *cp;
86be408b 3275 int ret;
766e6a4e 3276
1808a320 3277 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
766e6a4e
GL
3278 if (!cp)
3279 return -ENOMEM;
3280
3281 cp->node = of_node_get(np);
3282 cp->data = data;
3283 cp->get = clk_src_get;
3284
d6782c26 3285 mutex_lock(&of_clk_mutex);
766e6a4e 3286 list_add(&cp->link, &of_clk_providers);
d6782c26 3287 mutex_unlock(&of_clk_mutex);
16673931 3288 pr_debug("Added clock from %pOF\n", np);
766e6a4e 3289
86be408b
SN
3290 ret = of_clk_set_defaults(np, true);
3291 if (ret < 0)
3292 of_clk_del_provider(np);
3293
3294 return ret;
766e6a4e
GL
3295}
3296EXPORT_SYMBOL_GPL(of_clk_add_provider);
3297
0861e5b8
SB
3298/**
3299 * of_clk_add_hw_provider() - Register a clock provider for a node
3300 * @np: Device node pointer associated with clock provider
3301 * @get: callback for decoding clk_hw
3302 * @data: context pointer for @get callback.
3303 */
3304int of_clk_add_hw_provider(struct device_node *np,
3305 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3306 void *data),
3307 void *data)
3308{
3309 struct of_clk_provider *cp;
3310 int ret;
3311
3312 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3313 if (!cp)
3314 return -ENOMEM;
3315
3316 cp->node = of_node_get(np);
3317 cp->data = data;
3318 cp->get_hw = get;
3319
3320 mutex_lock(&of_clk_mutex);
3321 list_add(&cp->link, &of_clk_providers);
3322 mutex_unlock(&of_clk_mutex);
16673931 3323 pr_debug("Added clk_hw provider from %pOF\n", np);
0861e5b8
SB
3324
3325 ret = of_clk_set_defaults(np, true);
3326 if (ret < 0)
3327 of_clk_del_provider(np);
3328
3329 return ret;
3330}
3331EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
3332
aa795c41
SB
3333static void devm_of_clk_release_provider(struct device *dev, void *res)
3334{
3335 of_clk_del_provider(*(struct device_node **)res);
3336}
3337
3338int devm_of_clk_add_hw_provider(struct device *dev,
3339 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3340 void *data),
3341 void *data)
3342{
3343 struct device_node **ptr, *np;
3344 int ret;
3345
3346 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
3347 GFP_KERNEL);
3348 if (!ptr)
3349 return -ENOMEM;
3350
3351 np = dev->of_node;
3352 ret = of_clk_add_hw_provider(np, get, data);
3353 if (!ret) {
3354 *ptr = np;
3355 devres_add(dev, ptr);
3356 } else {
3357 devres_free(ptr);
3358 }
3359
3360 return ret;
3361}
3362EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
3363
766e6a4e
GL
3364/**
3365 * of_clk_del_provider() - Remove a previously registered clock provider
3366 * @np: Device node pointer associated with clock provider
3367 */
3368void of_clk_del_provider(struct device_node *np)
3369{
3370 struct of_clk_provider *cp;
3371
d6782c26 3372 mutex_lock(&of_clk_mutex);
766e6a4e
GL
3373 list_for_each_entry(cp, &of_clk_providers, link) {
3374 if (cp->node == np) {
3375 list_del(&cp->link);
3376 of_node_put(cp->node);
3377 kfree(cp);
3378 break;
3379 }
3380 }
d6782c26 3381 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
3382}
3383EXPORT_SYMBOL_GPL(of_clk_del_provider);
3384
aa795c41
SB
3385static int devm_clk_provider_match(struct device *dev, void *res, void *data)
3386{
3387 struct device_node **np = res;
3388
3389 if (WARN_ON(!np || !*np))
3390 return 0;
3391
3392 return *np == data;
3393}
3394
3395void devm_of_clk_del_provider(struct device *dev)
3396{
3397 int ret;
3398
3399 ret = devres_release(dev, devm_of_clk_release_provider,
3400 devm_clk_provider_match, dev->of_node);
3401
3402 WARN_ON(ret);
3403}
3404EXPORT_SYMBOL(devm_of_clk_del_provider);
3405
0861e5b8
SB
3406static struct clk_hw *
3407__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
3408 struct of_phandle_args *clkspec)
3409{
3410 struct clk *clk;
0861e5b8 3411
74002fcd
SB
3412 if (provider->get_hw)
3413 return provider->get_hw(clkspec, provider->data);
0861e5b8 3414
74002fcd
SB
3415 clk = provider->get(clkspec, provider->data);
3416 if (IS_ERR(clk))
3417 return ERR_CAST(clk);
3418 return __clk_get_hw(clk);
0861e5b8
SB
3419}
3420
73e0e496
SB
3421struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
3422 const char *dev_id, const char *con_id)
766e6a4e
GL
3423{
3424 struct of_clk_provider *provider;
a34cd466 3425 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
f155d15b 3426 struct clk_hw *hw;
766e6a4e 3427
306c342f
SB
3428 if (!clkspec)
3429 return ERR_PTR(-EINVAL);
3430
766e6a4e 3431 /* Check if we have such a provider in our array */
306c342f 3432 mutex_lock(&of_clk_mutex);
766e6a4e 3433 list_for_each_entry(provider, &of_clk_providers, link) {
f155d15b 3434 if (provider->node == clkspec->np) {
0861e5b8 3435 hw = __of_clk_get_hw_from_provider(provider, clkspec);
0861e5b8 3436 clk = __clk_create_clk(hw, dev_id, con_id);
f155d15b 3437 }
73e0e496 3438
f155d15b
SB
3439 if (!IS_ERR(clk)) {
3440 if (!__clk_get(clk)) {
73e0e496
SB
3441 __clk_free_clk(clk);
3442 clk = ERR_PTR(-ENOENT);
3443 }
3444
766e6a4e 3445 break;
73e0e496 3446 }
766e6a4e 3447 }
306c342f 3448 mutex_unlock(&of_clk_mutex);
d6782c26
SN
3449
3450 return clk;
3451}
3452
306c342f
SB
3453/**
3454 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3455 * @clkspec: pointer to a clock specifier data structure
3456 *
3457 * This function looks up a struct clk from the registered list of clock
3458 * providers, an input is a clock specifier data structure as returned
3459 * from the of_parse_phandle_with_args() function call.
3460 */
d6782c26
SN
3461struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3462{
306c342f 3463 return __of_clk_get_from_provider(clkspec, NULL, __func__);
766e6a4e 3464}
fb4dd222 3465EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
766e6a4e 3466
929e7f3b
SB
3467/**
3468 * of_clk_get_parent_count() - Count the number of clocks a device node has
3469 * @np: device node to count
3470 *
3471 * Returns: The number of clocks that are possible parents of this node
3472 */
3473unsigned int of_clk_get_parent_count(struct device_node *np)
f6102742 3474{
929e7f3b
SB
3475 int count;
3476
3477 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
3478 if (count < 0)
3479 return 0;
3480
3481 return count;
f6102742
MT
3482}
3483EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3484
766e6a4e
GL
3485const char *of_clk_get_parent_name(struct device_node *np, int index)
3486{
3487 struct of_phandle_args clkspec;
7a0fc1a3 3488 struct property *prop;
766e6a4e 3489 const char *clk_name;
7a0fc1a3
BD
3490 const __be32 *vp;
3491 u32 pv;
766e6a4e 3492 int rc;
7a0fc1a3 3493 int count;
0a4807c2 3494 struct clk *clk;
766e6a4e 3495
766e6a4e
GL
3496 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3497 &clkspec);
3498 if (rc)
3499 return NULL;
3500
7a0fc1a3
BD
3501 index = clkspec.args_count ? clkspec.args[0] : 0;
3502 count = 0;
3503
3504 /* if there is an indices property, use it to transfer the index
3505 * specified into an array offset for the clock-output-names property.
3506 */
3507 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3508 if (index == pv) {
3509 index = count;
3510 break;
3511 }
3512 count++;
3513 }
8da411cc
MY
3514 /* We went off the end of 'clock-indices' without finding it */
3515 if (prop && !vp)
3516 return NULL;
7a0fc1a3 3517
766e6a4e 3518 if (of_property_read_string_index(clkspec.np, "clock-output-names",
7a0fc1a3 3519 index,
0a4807c2
SB
3520 &clk_name) < 0) {
3521 /*
3522 * Best effort to get the name if the clock has been
3523 * registered with the framework. If the clock isn't
3524 * registered, we return the node name as the name of
3525 * the clock as long as #clock-cells = 0.
3526 */
3527 clk = of_clk_get_from_provider(&clkspec);
3528 if (IS_ERR(clk)) {
3529 if (clkspec.args_count == 0)
3530 clk_name = clkspec.np->name;
3531 else
3532 clk_name = NULL;
3533 } else {
3534 clk_name = __clk_get_name(clk);
3535 clk_put(clk);
3536 }
3537 }
3538
766e6a4e
GL
3539
3540 of_node_put(clkspec.np);
3541 return clk_name;
3542}
3543EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3544
2e61dfb3
DN
3545/**
3546 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3547 * number of parents
3548 * @np: Device node pointer associated with clock provider
3549 * @parents: pointer to char array that hold the parents' names
3550 * @size: size of the @parents array
3551 *
3552 * Return: number of parents for the clock node.
3553 */
3554int of_clk_parent_fill(struct device_node *np, const char **parents,
3555 unsigned int size)
3556{
3557 unsigned int i = 0;
3558
3559 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3560 i++;
3561
3562 return i;
3563}
3564EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3565
1771b10d
GC
3566struct clock_provider {
3567 of_clk_init_cb_t clk_init_cb;
3568 struct device_node *np;
3569 struct list_head node;
3570};
3571
1771b10d
GC
3572/*
3573 * This function looks for a parent clock. If there is one, then it
3574 * checks that the provider for this parent clock was initialized, in
3575 * this case the parent clock will be ready.
3576 */
3577static int parent_ready(struct device_node *np)
3578{
3579 int i = 0;
3580
3581 while (true) {
3582 struct clk *clk = of_clk_get(np, i);
3583
3584 /* this parent is ready we can check the next one */
3585 if (!IS_ERR(clk)) {
3586 clk_put(clk);
3587 i++;
3588 continue;
3589 }
3590
3591 /* at least one parent is not ready, we exit now */
3592 if (PTR_ERR(clk) == -EPROBE_DEFER)
3593 return 0;
3594
3595 /*
3596 * Here we make assumption that the device tree is
3597 * written correctly. So an error means that there is
3598 * no more parent. As we didn't exit yet, then the
3599 * previous parent are ready. If there is no clock
3600 * parent, no need to wait for them, then we can
3601 * consider their absence as being ready
3602 */
3603 return 1;
3604 }
3605}
3606
d56f8994
LJ
3607/**
3608 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
3609 * @np: Device node pointer associated with clock provider
3610 * @index: clock index
3611 * @flags: pointer to clk_core->flags
3612 *
3613 * Detects if the clock-critical property exists and, if so, sets the
3614 * corresponding CLK_IS_CRITICAL flag.
3615 *
3616 * Do not use this function. It exists only for legacy Device Tree
3617 * bindings, such as the one-clock-per-node style that are outdated.
3618 * Those bindings typically put all clock data into .dts and the Linux
3619 * driver has no clock data, thus making it impossible to set this flag
3620 * correctly from the driver. Only those drivers may call
3621 * of_clk_detect_critical from their setup functions.
3622 *
3623 * Return: error code or zero on success
3624 */
3625int of_clk_detect_critical(struct device_node *np,
3626 int index, unsigned long *flags)
3627{
3628 struct property *prop;
3629 const __be32 *cur;
3630 uint32_t idx;
3631
3632 if (!np || !flags)
3633 return -EINVAL;
3634
3635 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
3636 if (index == idx)
3637 *flags |= CLK_IS_CRITICAL;
3638
3639 return 0;
3640}
3641
766e6a4e
GL
3642/**
3643 * of_clk_init() - Scan and init clock providers from the DT
3644 * @matches: array of compatible values and init functions for providers.
3645 *
1771b10d 3646 * This function scans the device tree for matching clock providers
e5ca8fb4 3647 * and calls their initialization functions. It also does it by trying
1771b10d 3648 * to follow the dependencies.
766e6a4e
GL
3649 */
3650void __init of_clk_init(const struct of_device_id *matches)
3651{
7f7ed584 3652 const struct of_device_id *match;
766e6a4e 3653 struct device_node *np;
1771b10d
GC
3654 struct clock_provider *clk_provider, *next;
3655 bool is_init_done;
3656 bool force = false;
2573a02a 3657 LIST_HEAD(clk_provider_list);
766e6a4e 3658
f2f6c255 3659 if (!matches)
819b4861 3660 matches = &__clk_of_table;
f2f6c255 3661
1771b10d 3662 /* First prepare the list of the clocks providers */
7f7ed584 3663 for_each_matching_node_and_match(np, matches, &match) {
2e3b19f1
SB
3664 struct clock_provider *parent;
3665
3e5dd6f6
GU
3666 if (!of_device_is_available(np))
3667 continue;
3668
2e3b19f1
SB
3669 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3670 if (!parent) {
3671 list_for_each_entry_safe(clk_provider, next,
3672 &clk_provider_list, node) {
3673 list_del(&clk_provider->node);
6bc9d9d6 3674 of_node_put(clk_provider->np);
2e3b19f1
SB
3675 kfree(clk_provider);
3676 }
6bc9d9d6 3677 of_node_put(np);
2e3b19f1
SB
3678 return;
3679 }
1771b10d
GC
3680
3681 parent->clk_init_cb = match->data;
6bc9d9d6 3682 parent->np = of_node_get(np);
3f6d439f 3683 list_add_tail(&parent->node, &clk_provider_list);
1771b10d
GC
3684 }
3685
3686 while (!list_empty(&clk_provider_list)) {
3687 is_init_done = false;
3688 list_for_each_entry_safe(clk_provider, next,
3689 &clk_provider_list, node) {
3690 if (force || parent_ready(clk_provider->np)) {
86be408b 3691
989eafd0
RRD
3692 /* Don't populate platform devices */
3693 of_node_set_flag(clk_provider->np,
3694 OF_POPULATED);
3695
1771b10d 3696 clk_provider->clk_init_cb(clk_provider->np);
86be408b
SN
3697 of_clk_set_defaults(clk_provider->np, true);
3698
1771b10d 3699 list_del(&clk_provider->node);
6bc9d9d6 3700 of_node_put(clk_provider->np);
1771b10d
GC
3701 kfree(clk_provider);
3702 is_init_done = true;
3703 }
3704 }
3705
3706 /*
e5ca8fb4 3707 * We didn't manage to initialize any of the
1771b10d
GC
3708 * remaining providers during the last loop, so now we
3709 * initialize all the remaining ones unconditionally
3710 * in case the clock parent was not mandatory
3711 */
3712 if (!is_init_done)
3713 force = true;
766e6a4e
GL
3714 }
3715}
3716#endif