]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/clk/clk.c
Merge branches 'acpi-apei', 'acpi-processor', 'acpi-tables', 'acpi-pci' and 'acpi...
[mirror_ubuntu-focal-kernel.git] / drivers / clk / clk.c
CommitLineData
ebafb63d 1// SPDX-License-Identifier: GPL-2.0
b2476490
MT
2/*
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 *
5fb94e9c 6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
b2476490
MT
7 */
8
3c373117 9#include <linux/clk.h>
b09d6d99 10#include <linux/clk-provider.h>
86be408b 11#include <linux/clk/clk-conf.h>
b2476490
MT
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/spinlock.h>
15#include <linux/err.h>
16#include <linux/list.h>
17#include <linux/slab.h>
766e6a4e 18#include <linux/of.h>
46c8773a 19#include <linux/device.h>
f2f6c255 20#include <linux/init.h>
9a34b453 21#include <linux/pm_runtime.h>
533ddeb1 22#include <linux/sched.h>
562ef0b0 23#include <linux/clkdev.h>
b2476490 24
d6782c26
SN
25#include "clk.h"
26
b2476490
MT
27static DEFINE_SPINLOCK(enable_lock);
28static DEFINE_MUTEX(prepare_lock);
29
533ddeb1
MT
30static struct task_struct *prepare_owner;
31static struct task_struct *enable_owner;
32
33static int prepare_refcnt;
34static int enable_refcnt;
35
b2476490
MT
36static HLIST_HEAD(clk_root_list);
37static HLIST_HEAD(clk_orphan_list);
38static LIST_HEAD(clk_notifier_list);
39
b09d6d99
MT
40/*** private data structures ***/
41
fc0c209c
SB
42struct clk_parent_map {
43 const struct clk_hw *hw;
44 struct clk_core *core;
45 const char *fw_name;
46 const char *name;
601b6e93 47 int index;
fc0c209c
SB
48};
49
b09d6d99
MT
50struct clk_core {
51 const char *name;
52 const struct clk_ops *ops;
53 struct clk_hw *hw;
54 struct module *owner;
9a34b453 55 struct device *dev;
89a5ddcc 56 struct device_node *of_node;
b09d6d99 57 struct clk_core *parent;
fc0c209c 58 struct clk_parent_map *parents;
b09d6d99
MT
59 u8 num_parents;
60 u8 new_parent_index;
61 unsigned long rate;
1c8e6004 62 unsigned long req_rate;
b09d6d99
MT
63 unsigned long new_rate;
64 struct clk_core *new_parent;
65 struct clk_core *new_child;
66 unsigned long flags;
e6500344 67 bool orphan;
24478839 68 bool rpm_enabled;
b09d6d99
MT
69 unsigned int enable_count;
70 unsigned int prepare_count;
e55a839a 71 unsigned int protect_count;
9783c0d9
SB
72 unsigned long min_rate;
73 unsigned long max_rate;
b09d6d99
MT
74 unsigned long accuracy;
75 int phase;
9fba738a 76 struct clk_duty duty;
b09d6d99
MT
77 struct hlist_head children;
78 struct hlist_node child_node;
1c8e6004 79 struct hlist_head clks;
b09d6d99
MT
80 unsigned int notifier_count;
81#ifdef CONFIG_DEBUG_FS
82 struct dentry *dentry;
8c9a8a8f 83 struct hlist_node debug_node;
b09d6d99
MT
84#endif
85 struct kref ref;
86};
87
dfc202ea
SB
88#define CREATE_TRACE_POINTS
89#include <trace/events/clk.h>
90
b09d6d99
MT
91struct clk {
92 struct clk_core *core;
efa85048 93 struct device *dev;
b09d6d99
MT
94 const char *dev_id;
95 const char *con_id;
1c8e6004
TV
96 unsigned long min_rate;
97 unsigned long max_rate;
55e9b8b7 98 unsigned int exclusive_count;
50595f8b 99 struct hlist_node clks_node;
b09d6d99
MT
100};
101
9a34b453
MS
102/*** runtime pm ***/
103static int clk_pm_runtime_get(struct clk_core *core)
104{
24478839 105 int ret;
9a34b453 106
24478839 107 if (!core->rpm_enabled)
9a34b453
MS
108 return 0;
109
110 ret = pm_runtime_get_sync(core->dev);
111 return ret < 0 ? ret : 0;
112}
113
114static void clk_pm_runtime_put(struct clk_core *core)
115{
24478839 116 if (!core->rpm_enabled)
9a34b453
MS
117 return;
118
119 pm_runtime_put_sync(core->dev);
120}
121
eab89f69
MT
122/*** locking ***/
123static void clk_prepare_lock(void)
124{
533ddeb1
MT
125 if (!mutex_trylock(&prepare_lock)) {
126 if (prepare_owner == current) {
127 prepare_refcnt++;
128 return;
129 }
130 mutex_lock(&prepare_lock);
131 }
132 WARN_ON_ONCE(prepare_owner != NULL);
133 WARN_ON_ONCE(prepare_refcnt != 0);
134 prepare_owner = current;
135 prepare_refcnt = 1;
eab89f69
MT
136}
137
138static void clk_prepare_unlock(void)
139{
533ddeb1
MT
140 WARN_ON_ONCE(prepare_owner != current);
141 WARN_ON_ONCE(prepare_refcnt == 0);
142
143 if (--prepare_refcnt)
144 return;
145 prepare_owner = NULL;
eab89f69
MT
146 mutex_unlock(&prepare_lock);
147}
148
149static unsigned long clk_enable_lock(void)
a57aa185 150 __acquires(enable_lock)
eab89f69
MT
151{
152 unsigned long flags;
533ddeb1 153
a12aa8a6
DL
154 /*
155 * On UP systems, spin_trylock_irqsave() always returns true, even if
156 * we already hold the lock. So, in that case, we rely only on
157 * reference counting.
158 */
159 if (!IS_ENABLED(CONFIG_SMP) ||
160 !spin_trylock_irqsave(&enable_lock, flags)) {
533ddeb1
MT
161 if (enable_owner == current) {
162 enable_refcnt++;
a57aa185 163 __acquire(enable_lock);
a12aa8a6
DL
164 if (!IS_ENABLED(CONFIG_SMP))
165 local_save_flags(flags);
533ddeb1
MT
166 return flags;
167 }
168 spin_lock_irqsave(&enable_lock, flags);
169 }
170 WARN_ON_ONCE(enable_owner != NULL);
171 WARN_ON_ONCE(enable_refcnt != 0);
172 enable_owner = current;
173 enable_refcnt = 1;
eab89f69
MT
174 return flags;
175}
176
177static void clk_enable_unlock(unsigned long flags)
a57aa185 178 __releases(enable_lock)
eab89f69 179{
533ddeb1
MT
180 WARN_ON_ONCE(enable_owner != current);
181 WARN_ON_ONCE(enable_refcnt == 0);
182
a57aa185
SB
183 if (--enable_refcnt) {
184 __release(enable_lock);
533ddeb1 185 return;
a57aa185 186 }
533ddeb1 187 enable_owner = NULL;
eab89f69
MT
188 spin_unlock_irqrestore(&enable_lock, flags);
189}
190
e55a839a
JB
191static bool clk_core_rate_is_protected(struct clk_core *core)
192{
193 return core->protect_count;
194}
195
4dff95dc
SB
196static bool clk_core_is_prepared(struct clk_core *core)
197{
9a34b453
MS
198 bool ret = false;
199
4dff95dc
SB
200 /*
201 * .is_prepared is optional for clocks that can prepare
202 * fall back to software usage counter if it is missing
203 */
204 if (!core->ops->is_prepared)
205 return core->prepare_count;
b2476490 206
9a34b453
MS
207 if (!clk_pm_runtime_get(core)) {
208 ret = core->ops->is_prepared(core->hw);
209 clk_pm_runtime_put(core);
210 }
211
212 return ret;
4dff95dc 213}
b2476490 214
4dff95dc
SB
215static bool clk_core_is_enabled(struct clk_core *core)
216{
9a34b453
MS
217 bool ret = false;
218
4dff95dc
SB
219 /*
220 * .is_enabled is only mandatory for clocks that gate
221 * fall back to software usage counter if .is_enabled is missing
222 */
223 if (!core->ops->is_enabled)
224 return core->enable_count;
6b44c854 225
9a34b453
MS
226 /*
227 * Check if clock controller's device is runtime active before
228 * calling .is_enabled callback. If not, assume that clock is
229 * disabled, because we might be called from atomic context, from
230 * which pm_runtime_get() is not allowed.
231 * This function is called mainly from clk_disable_unused_subtree,
232 * which ensures proper runtime pm activation of controller before
233 * taking enable spinlock, but the below check is needed if one tries
234 * to call it from other places.
235 */
24478839 236 if (core->rpm_enabled) {
9a34b453
MS
237 pm_runtime_get_noresume(core->dev);
238 if (!pm_runtime_active(core->dev)) {
239 ret = false;
240 goto done;
241 }
242 }
243
244 ret = core->ops->is_enabled(core->hw);
245done:
24478839 246 if (core->rpm_enabled)
756efe13 247 pm_runtime_put(core->dev);
9a34b453
MS
248
249 return ret;
4dff95dc 250}
6b44c854 251
4dff95dc 252/*** helper functions ***/
1af599df 253
b76281cb 254const char *__clk_get_name(const struct clk *clk)
1af599df 255{
4dff95dc 256 return !clk ? NULL : clk->core->name;
1af599df 257}
4dff95dc 258EXPORT_SYMBOL_GPL(__clk_get_name);
1af599df 259
e7df6f6e 260const char *clk_hw_get_name(const struct clk_hw *hw)
1a9c069c
SB
261{
262 return hw->core->name;
263}
264EXPORT_SYMBOL_GPL(clk_hw_get_name);
265
4dff95dc
SB
266struct clk_hw *__clk_get_hw(struct clk *clk)
267{
268 return !clk ? NULL : clk->core->hw;
269}
270EXPORT_SYMBOL_GPL(__clk_get_hw);
1af599df 271
e7df6f6e 272unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
1a9c069c
SB
273{
274 return hw->core->num_parents;
275}
276EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
277
e7df6f6e 278struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
1a9c069c
SB
279{
280 return hw->core->parent ? hw->core->parent->hw : NULL;
281}
282EXPORT_SYMBOL_GPL(clk_hw_get_parent);
283
4dff95dc
SB
284static struct clk_core *__clk_lookup_subtree(const char *name,
285 struct clk_core *core)
bddca894 286{
035a61c3 287 struct clk_core *child;
4dff95dc 288 struct clk_core *ret;
bddca894 289
4dff95dc
SB
290 if (!strcmp(core->name, name))
291 return core;
bddca894 292
4dff95dc
SB
293 hlist_for_each_entry(child, &core->children, child_node) {
294 ret = __clk_lookup_subtree(name, child);
295 if (ret)
296 return ret;
bddca894
PG
297 }
298
4dff95dc 299 return NULL;
bddca894
PG
300}
301
4dff95dc 302static struct clk_core *clk_core_lookup(const char *name)
bddca894 303{
4dff95dc
SB
304 struct clk_core *root_clk;
305 struct clk_core *ret;
bddca894 306
4dff95dc
SB
307 if (!name)
308 return NULL;
bddca894 309
4dff95dc
SB
310 /* search the 'proper' clk tree first */
311 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
312 ret = __clk_lookup_subtree(name, root_clk);
313 if (ret)
314 return ret;
bddca894
PG
315 }
316
4dff95dc
SB
317 /* if not found, then search the orphan tree */
318 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
319 ret = __clk_lookup_subtree(name, root_clk);
320 if (ret)
321 return ret;
322 }
bddca894 323
4dff95dc 324 return NULL;
bddca894
PG
325}
326
4f8c6aba
SB
327#ifdef CONFIG_OF
328static int of_parse_clkspec(const struct device_node *np, int index,
329 const char *name, struct of_phandle_args *out_args);
330static struct clk_hw *
331of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
332#else
333static inline int of_parse_clkspec(const struct device_node *np, int index,
334 const char *name,
335 struct of_phandle_args *out_args)
336{
337 return -ENOENT;
338}
339static inline struct clk_hw *
340of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
341{
342 return ERR_PTR(-ENOENT);
343}
344#endif
345
fc0c209c 346/**
dde4eff4 347 * clk_core_get - Find the clk_core parent of a clk
fc0c209c 348 * @core: clk to find parent of
1a079560 349 * @p_index: parent index to search for
fc0c209c
SB
350 *
351 * This is the preferred method for clk providers to find the parent of a
352 * clk when that parent is external to the clk controller. The parent_names
353 * array is indexed and treated as a local name matching a string in the device
dde4eff4
SB
354 * node's 'clock-names' property or as the 'con_id' matching the device's
355 * dev_name() in a clk_lookup. This allows clk providers to use their own
fc0c209c
SB
356 * namespace instead of looking for a globally unique parent string.
357 *
358 * For example the following DT snippet would allow a clock registered by the
359 * clock-controller@c001 that has a clk_init_data::parent_data array
360 * with 'xtal' in the 'name' member to find the clock provided by the
361 * clock-controller@f00abcd without needing to get the globally unique name of
362 * the xtal clk.
363 *
364 * parent: clock-controller@f00abcd {
365 * reg = <0xf00abcd 0xabcd>;
366 * #clock-cells = <0>;
367 * };
368 *
369 * clock-controller@c001 {
370 * reg = <0xc001 0xf00d>;
371 * clocks = <&parent>;
372 * clock-names = "xtal";
373 * #clock-cells = <1>;
374 * };
375 *
376 * Returns: -ENOENT when the provider can't be found or the clk doesn't
4f8c6aba
SB
377 * exist in the provider or the name can't be found in the DT node or
378 * in a clkdev lookup. NULL when the provider knows about the clk but it
379 * isn't provided on this system.
fc0c209c
SB
380 * A valid clk_core pointer when the clk can be found in the provider.
381 */
1a079560 382static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
fc0c209c 383{
1a079560
SB
384 const char *name = core->parents[p_index].fw_name;
385 int index = core->parents[p_index].index;
dde4eff4
SB
386 struct clk_hw *hw = ERR_PTR(-ENOENT);
387 struct device *dev = core->dev;
388 const char *dev_id = dev ? dev_name(dev) : NULL;
fc0c209c 389 struct device_node *np = core->of_node;
4f8c6aba 390 struct of_phandle_args clkspec;
fc0c209c 391
4f8c6aba
SB
392 if (np && (name || index >= 0) &&
393 !of_parse_clkspec(np, index, name, &clkspec)) {
394 hw = of_clk_get_hw_from_clkspec(&clkspec);
395 of_node_put(clkspec.np);
396 } else if (name) {
397 /*
398 * If the DT search above couldn't find the provider fallback to
399 * looking up via clkdev based clk_lookups.
400 */
dde4eff4 401 hw = clk_find_hw(dev_id, name);
4f8c6aba 402 }
dde4eff4
SB
403
404 if (IS_ERR(hw))
fc0c209c
SB
405 return ERR_CAST(hw);
406
407 return hw->core;
408}
409
410static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
411{
412 struct clk_parent_map *entry = &core->parents[index];
413 struct clk_core *parent = ERR_PTR(-ENOENT);
414
415 if (entry->hw) {
416 parent = entry->hw->core;
417 /*
418 * We have a direct reference but it isn't registered yet?
419 * Orphan it and let clk_reparent() update the orphan status
420 * when the parent is registered.
421 */
422 if (!parent)
423 parent = ERR_PTR(-EPROBE_DEFER);
424 } else {
1a079560 425 parent = clk_core_get(core, index);
4f8c6aba 426 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
fc0c209c
SB
427 parent = clk_core_lookup(entry->name);
428 }
429
430 /* Only cache it if it's not an error */
431 if (!IS_ERR(parent))
432 entry->core = parent;
433}
434
4dff95dc
SB
435static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
436 u8 index)
bddca894 437{
fc0c209c 438 if (!core || index >= core->num_parents || !core->parents)
4dff95dc 439 return NULL;
88cfbef2 440
fc0c209c
SB
441 if (!core->parents[index].core)
442 clk_core_fill_parent_index(core, index);
88cfbef2 443
fc0c209c 444 return core->parents[index].core;
bddca894
PG
445}
446
e7df6f6e
SB
447struct clk_hw *
448clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
1a9c069c
SB
449{
450 struct clk_core *parent;
451
452 parent = clk_core_get_parent_by_index(hw->core, index);
453
454 return !parent ? NULL : parent->hw;
455}
456EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
457
4dff95dc
SB
458unsigned int __clk_get_enable_count(struct clk *clk)
459{
460 return !clk ? 0 : clk->core->enable_count;
461}
b2476490 462
4dff95dc
SB
463static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
464{
73d4f945
SB
465 if (!core)
466 return 0;
c646cbf1 467
73d4f945
SB
468 if (!core->num_parents || core->parent)
469 return core->rate;
b2476490 470
73d4f945
SB
471 /*
472 * Clk must have a parent because num_parents > 0 but the parent isn't
473 * known yet. Best to return 0 as the rate of this clk until we can
474 * properly recalc the rate based on the parent's rate.
475 */
476 return 0;
b2476490
MT
477}
478
e7df6f6e 479unsigned long clk_hw_get_rate(const struct clk_hw *hw)
1a9c069c
SB
480{
481 return clk_core_get_rate_nolock(hw->core);
482}
483EXPORT_SYMBOL_GPL(clk_hw_get_rate);
484
4dff95dc
SB
485static unsigned long __clk_get_accuracy(struct clk_core *core)
486{
487 if (!core)
488 return 0;
b2476490 489
4dff95dc 490 return core->accuracy;
b2476490
MT
491}
492
4dff95dc 493unsigned long __clk_get_flags(struct clk *clk)
fcb0ee6a 494{
4dff95dc 495 return !clk ? 0 : clk->core->flags;
fcb0ee6a 496}
4dff95dc 497EXPORT_SYMBOL_GPL(__clk_get_flags);
fcb0ee6a 498
e7df6f6e 499unsigned long clk_hw_get_flags(const struct clk_hw *hw)
1a9c069c
SB
500{
501 return hw->core->flags;
502}
503EXPORT_SYMBOL_GPL(clk_hw_get_flags);
504
e7df6f6e 505bool clk_hw_is_prepared(const struct clk_hw *hw)
1a9c069c
SB
506{
507 return clk_core_is_prepared(hw->core);
508}
12aa377b 509EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
1a9c069c 510
e55a839a
JB
511bool clk_hw_rate_is_protected(const struct clk_hw *hw)
512{
513 return clk_core_rate_is_protected(hw->core);
514}
12aa377b 515EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
e55a839a 516
be68bf88
JE
517bool clk_hw_is_enabled(const struct clk_hw *hw)
518{
519 return clk_core_is_enabled(hw->core);
520}
12aa377b 521EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
be68bf88 522
4dff95dc 523bool __clk_is_enabled(struct clk *clk)
b2476490 524{
4dff95dc
SB
525 if (!clk)
526 return false;
b2476490 527
4dff95dc
SB
528 return clk_core_is_enabled(clk->core);
529}
530EXPORT_SYMBOL_GPL(__clk_is_enabled);
b2476490 531
4dff95dc
SB
532static bool mux_is_better_rate(unsigned long rate, unsigned long now,
533 unsigned long best, unsigned long flags)
534{
535 if (flags & CLK_MUX_ROUND_CLOSEST)
536 return abs(now - rate) < abs(best - rate);
1af599df 537
4dff95dc
SB
538 return now <= rate && now > best;
539}
bddca894 540
4ad69b80
JB
541int clk_mux_determine_rate_flags(struct clk_hw *hw,
542 struct clk_rate_request *req,
543 unsigned long flags)
4dff95dc
SB
544{
545 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
0817b62c
BB
546 int i, num_parents, ret;
547 unsigned long best = 0;
548 struct clk_rate_request parent_req = *req;
b2476490 549
4dff95dc
SB
550 /* if NO_REPARENT flag set, pass through to current parent */
551 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
552 parent = core->parent;
0817b62c
BB
553 if (core->flags & CLK_SET_RATE_PARENT) {
554 ret = __clk_determine_rate(parent ? parent->hw : NULL,
555 &parent_req);
556 if (ret)
557 return ret;
558
559 best = parent_req.rate;
560 } else if (parent) {
4dff95dc 561 best = clk_core_get_rate_nolock(parent);
0817b62c 562 } else {
4dff95dc 563 best = clk_core_get_rate_nolock(core);
0817b62c
BB
564 }
565
4dff95dc
SB
566 goto out;
567 }
b2476490 568
4dff95dc
SB
569 /* find the parent that can provide the fastest rate <= rate */
570 num_parents = core->num_parents;
571 for (i = 0; i < num_parents; i++) {
572 parent = clk_core_get_parent_by_index(core, i);
573 if (!parent)
574 continue;
0817b62c
BB
575
576 if (core->flags & CLK_SET_RATE_PARENT) {
577 parent_req = *req;
578 ret = __clk_determine_rate(parent->hw, &parent_req);
579 if (ret)
580 continue;
581 } else {
582 parent_req.rate = clk_core_get_rate_nolock(parent);
583 }
584
585 if (mux_is_better_rate(req->rate, parent_req.rate,
586 best, flags)) {
4dff95dc 587 best_parent = parent;
0817b62c 588 best = parent_req.rate;
4dff95dc
SB
589 }
590 }
b2476490 591
57d866e6
BB
592 if (!best_parent)
593 return -EINVAL;
594
4dff95dc
SB
595out:
596 if (best_parent)
0817b62c
BB
597 req->best_parent_hw = best_parent->hw;
598 req->best_parent_rate = best;
599 req->rate = best;
b2476490 600
0817b62c 601 return 0;
b33d212f 602}
4ad69b80 603EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
4dff95dc
SB
604
605struct clk *__clk_lookup(const char *name)
fcb0ee6a 606{
4dff95dc
SB
607 struct clk_core *core = clk_core_lookup(name);
608
609 return !core ? NULL : core->hw->clk;
fcb0ee6a 610}
b2476490 611
4dff95dc
SB
612static void clk_core_get_boundaries(struct clk_core *core,
613 unsigned long *min_rate,
614 unsigned long *max_rate)
1c155b3d 615{
4dff95dc 616 struct clk *clk_user;
1c155b3d 617
9783c0d9
SB
618 *min_rate = core->min_rate;
619 *max_rate = core->max_rate;
496eadf8 620
4dff95dc
SB
621 hlist_for_each_entry(clk_user, &core->clks, clks_node)
622 *min_rate = max(*min_rate, clk_user->min_rate);
1c155b3d 623
4dff95dc
SB
624 hlist_for_each_entry(clk_user, &core->clks, clks_node)
625 *max_rate = min(*max_rate, clk_user->max_rate);
626}
1c155b3d 627
9783c0d9
SB
628void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
629 unsigned long max_rate)
630{
631 hw->core->min_rate = min_rate;
632 hw->core->max_rate = max_rate;
633}
634EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
635
4dff95dc 636/*
777c1a40
SB
637 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
638 * @hw: mux type clk to determine rate on
639 * @req: rate request, also used to return preferred parent and frequencies
640 *
4dff95dc
SB
641 * Helper for finding best parent to provide a given frequency. This can be used
642 * directly as a determine_rate callback (e.g. for a mux), or from a more
643 * complex clock that may combine a mux with other operations.
777c1a40
SB
644 *
645 * Returns: 0 on success, -EERROR value on error
4dff95dc 646 */
0817b62c
BB
647int __clk_mux_determine_rate(struct clk_hw *hw,
648 struct clk_rate_request *req)
4dff95dc 649{
0817b62c 650 return clk_mux_determine_rate_flags(hw, req, 0);
1c155b3d 651}
4dff95dc 652EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
1c155b3d 653
0817b62c
BB
654int __clk_mux_determine_rate_closest(struct clk_hw *hw,
655 struct clk_rate_request *req)
b2476490 656{
0817b62c 657 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
4dff95dc
SB
658}
659EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
b2476490 660
4dff95dc 661/*** clk api ***/
496eadf8 662
e55a839a
JB
663static void clk_core_rate_unprotect(struct clk_core *core)
664{
665 lockdep_assert_held(&prepare_lock);
666
667 if (!core)
668 return;
669
ab525dcc
FE
670 if (WARN(core->protect_count == 0,
671 "%s already unprotected\n", core->name))
e55a839a
JB
672 return;
673
674 if (--core->protect_count > 0)
675 return;
676
677 clk_core_rate_unprotect(core->parent);
678}
679
680static int clk_core_rate_nuke_protect(struct clk_core *core)
681{
682 int ret;
683
684 lockdep_assert_held(&prepare_lock);
685
686 if (!core)
687 return -EINVAL;
688
689 if (core->protect_count == 0)
690 return 0;
691
692 ret = core->protect_count;
693 core->protect_count = 1;
694 clk_core_rate_unprotect(core);
695
696 return ret;
697}
698
55e9b8b7
JB
699/**
700 * clk_rate_exclusive_put - release exclusivity over clock rate control
701 * @clk: the clk over which the exclusivity is released
702 *
703 * clk_rate_exclusive_put() completes a critical section during which a clock
704 * consumer cannot tolerate any other consumer making any operation on the
705 * clock which could result in a rate change or rate glitch. Exclusive clocks
706 * cannot have their rate changed, either directly or indirectly due to changes
707 * further up the parent chain of clocks. As a result, clocks up parent chain
708 * also get under exclusive control of the calling consumer.
709 *
710 * If exlusivity is claimed more than once on clock, even by the same consumer,
711 * the rate effectively gets locked as exclusivity can't be preempted.
712 *
713 * Calls to clk_rate_exclusive_put() must be balanced with calls to
714 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
715 * error status.
716 */
717void clk_rate_exclusive_put(struct clk *clk)
718{
719 if (!clk)
720 return;
721
722 clk_prepare_lock();
723
724 /*
725 * if there is something wrong with this consumer protect count, stop
726 * here before messing with the provider
727 */
728 if (WARN_ON(clk->exclusive_count <= 0))
729 goto out;
730
731 clk_core_rate_unprotect(clk->core);
732 clk->exclusive_count--;
733out:
734 clk_prepare_unlock();
735}
736EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
737
e55a839a
JB
738static void clk_core_rate_protect(struct clk_core *core)
739{
740 lockdep_assert_held(&prepare_lock);
741
742 if (!core)
743 return;
744
745 if (core->protect_count == 0)
746 clk_core_rate_protect(core->parent);
747
748 core->protect_count++;
749}
750
751static void clk_core_rate_restore_protect(struct clk_core *core, int count)
752{
753 lockdep_assert_held(&prepare_lock);
754
755 if (!core)
756 return;
757
758 if (count == 0)
759 return;
760
761 clk_core_rate_protect(core);
762 core->protect_count = count;
763}
764
55e9b8b7
JB
765/**
766 * clk_rate_exclusive_get - get exclusivity over the clk rate control
767 * @clk: the clk over which the exclusity of rate control is requested
768 *
769 * clk_rate_exlusive_get() begins a critical section during which a clock
770 * consumer cannot tolerate any other consumer making any operation on the
771 * clock which could result in a rate change or rate glitch. Exclusive clocks
772 * cannot have their rate changed, either directly or indirectly due to changes
773 * further up the parent chain of clocks. As a result, clocks up parent chain
774 * also get under exclusive control of the calling consumer.
775 *
776 * If exlusivity is claimed more than once on clock, even by the same consumer,
777 * the rate effectively gets locked as exclusivity can't be preempted.
778 *
779 * Calls to clk_rate_exclusive_get() should be balanced with calls to
780 * clk_rate_exclusive_put(). Calls to this function may sleep.
781 * Returns 0 on success, -EERROR otherwise
782 */
783int clk_rate_exclusive_get(struct clk *clk)
784{
785 if (!clk)
786 return 0;
787
788 clk_prepare_lock();
789 clk_core_rate_protect(clk->core);
790 clk->exclusive_count++;
791 clk_prepare_unlock();
792
793 return 0;
794}
795EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
796
4dff95dc
SB
797static void clk_core_unprepare(struct clk_core *core)
798{
a6334725
SB
799 lockdep_assert_held(&prepare_lock);
800
4dff95dc
SB
801 if (!core)
802 return;
b2476490 803
ab525dcc
FE
804 if (WARN(core->prepare_count == 0,
805 "%s already unprepared\n", core->name))
4dff95dc 806 return;
b2476490 807
ab525dcc
FE
808 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
809 "Unpreparing critical %s\n", core->name))
2e20fbf5
LJ
810 return;
811
9461f7b3
JB
812 if (core->flags & CLK_SET_RATE_GATE)
813 clk_core_rate_unprotect(core);
814
4dff95dc
SB
815 if (--core->prepare_count > 0)
816 return;
b2476490 817
ab525dcc 818 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
b2476490 819
4dff95dc 820 trace_clk_unprepare(core);
b2476490 821
4dff95dc
SB
822 if (core->ops->unprepare)
823 core->ops->unprepare(core->hw);
824
9a34b453
MS
825 clk_pm_runtime_put(core);
826
4dff95dc
SB
827 trace_clk_unprepare_complete(core);
828 clk_core_unprepare(core->parent);
b2476490
MT
829}
830
a6adc30b
DA
831static void clk_core_unprepare_lock(struct clk_core *core)
832{
833 clk_prepare_lock();
834 clk_core_unprepare(core);
835 clk_prepare_unlock();
836}
837
4dff95dc
SB
838/**
839 * clk_unprepare - undo preparation of a clock source
840 * @clk: the clk being unprepared
841 *
842 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
843 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
844 * if the operation may sleep. One example is a clk which is accessed over
845 * I2c. In the complex case a clk gate operation may require a fast and a slow
846 * part. It is this reason that clk_unprepare and clk_disable are not mutually
847 * exclusive. In fact clk_disable must be called before clk_unprepare.
848 */
849void clk_unprepare(struct clk *clk)
1e435256 850{
4dff95dc
SB
851 if (IS_ERR_OR_NULL(clk))
852 return;
853
a6adc30b 854 clk_core_unprepare_lock(clk->core);
1e435256 855}
4dff95dc 856EXPORT_SYMBOL_GPL(clk_unprepare);
1e435256 857
4dff95dc 858static int clk_core_prepare(struct clk_core *core)
b2476490 859{
4dff95dc 860 int ret = 0;
b2476490 861
a6334725
SB
862 lockdep_assert_held(&prepare_lock);
863
4dff95dc 864 if (!core)
1e435256 865 return 0;
1e435256 866
4dff95dc 867 if (core->prepare_count == 0) {
9a34b453 868 ret = clk_pm_runtime_get(core);
4dff95dc
SB
869 if (ret)
870 return ret;
b2476490 871
9a34b453
MS
872 ret = clk_core_prepare(core->parent);
873 if (ret)
874 goto runtime_put;
875
4dff95dc 876 trace_clk_prepare(core);
b2476490 877
4dff95dc
SB
878 if (core->ops->prepare)
879 ret = core->ops->prepare(core->hw);
b2476490 880
4dff95dc 881 trace_clk_prepare_complete(core);
1c155b3d 882
9a34b453
MS
883 if (ret)
884 goto unprepare;
4dff95dc 885 }
1c155b3d 886
4dff95dc 887 core->prepare_count++;
b2476490 888
9461f7b3
JB
889 /*
890 * CLK_SET_RATE_GATE is a special case of clock protection
891 * Instead of a consumer claiming exclusive rate control, it is
892 * actually the provider which prevents any consumer from making any
893 * operation which could result in a rate change or rate glitch while
894 * the clock is prepared.
895 */
896 if (core->flags & CLK_SET_RATE_GATE)
897 clk_core_rate_protect(core);
898
b2476490 899 return 0;
9a34b453
MS
900unprepare:
901 clk_core_unprepare(core->parent);
902runtime_put:
903 clk_pm_runtime_put(core);
904 return ret;
b2476490 905}
b2476490 906
a6adc30b
DA
907static int clk_core_prepare_lock(struct clk_core *core)
908{
909 int ret;
910
911 clk_prepare_lock();
912 ret = clk_core_prepare(core);
913 clk_prepare_unlock();
914
915 return ret;
916}
917
4dff95dc
SB
918/**
919 * clk_prepare - prepare a clock source
920 * @clk: the clk being prepared
921 *
922 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
923 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
924 * operation may sleep. One example is a clk which is accessed over I2c. In
925 * the complex case a clk ungate operation may require a fast and a slow part.
926 * It is this reason that clk_prepare and clk_enable are not mutually
927 * exclusive. In fact clk_prepare must be called before clk_enable.
928 * Returns 0 on success, -EERROR otherwise.
929 */
930int clk_prepare(struct clk *clk)
b2476490 931{
4dff95dc
SB
932 if (!clk)
933 return 0;
b2476490 934
a6adc30b 935 return clk_core_prepare_lock(clk->core);
b2476490 936}
4dff95dc 937EXPORT_SYMBOL_GPL(clk_prepare);
b2476490 938
4dff95dc 939static void clk_core_disable(struct clk_core *core)
b2476490 940{
a6334725
SB
941 lockdep_assert_held(&enable_lock);
942
4dff95dc
SB
943 if (!core)
944 return;
035a61c3 945
ab525dcc 946 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
4dff95dc 947 return;
b2476490 948
ab525dcc
FE
949 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
950 "Disabling critical %s\n", core->name))
2e20fbf5
LJ
951 return;
952
4dff95dc
SB
953 if (--core->enable_count > 0)
954 return;
035a61c3 955
2f87a6ea 956 trace_clk_disable_rcuidle(core);
035a61c3 957
4dff95dc
SB
958 if (core->ops->disable)
959 core->ops->disable(core->hw);
035a61c3 960
2f87a6ea 961 trace_clk_disable_complete_rcuidle(core);
035a61c3 962
4dff95dc 963 clk_core_disable(core->parent);
035a61c3 964}
7ef3dcc8 965
a6adc30b
DA
966static void clk_core_disable_lock(struct clk_core *core)
967{
968 unsigned long flags;
969
970 flags = clk_enable_lock();
971 clk_core_disable(core);
972 clk_enable_unlock(flags);
973}
974
4dff95dc
SB
975/**
976 * clk_disable - gate a clock
977 * @clk: the clk being gated
978 *
979 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
980 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
981 * clk if the operation is fast and will never sleep. One example is a
982 * SoC-internal clk which is controlled via simple register writes. In the
983 * complex case a clk gate operation may require a fast and a slow part. It is
984 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
985 * In fact clk_disable must be called before clk_unprepare.
986 */
987void clk_disable(struct clk *clk)
b2476490 988{
4dff95dc
SB
989 if (IS_ERR_OR_NULL(clk))
990 return;
991
a6adc30b 992 clk_core_disable_lock(clk->core);
b2476490 993}
4dff95dc 994EXPORT_SYMBOL_GPL(clk_disable);
b2476490 995
4dff95dc 996static int clk_core_enable(struct clk_core *core)
b2476490 997{
4dff95dc 998 int ret = 0;
b2476490 999
a6334725
SB
1000 lockdep_assert_held(&enable_lock);
1001
4dff95dc
SB
1002 if (!core)
1003 return 0;
b2476490 1004
ab525dcc
FE
1005 if (WARN(core->prepare_count == 0,
1006 "Enabling unprepared %s\n", core->name))
4dff95dc 1007 return -ESHUTDOWN;
b2476490 1008
4dff95dc
SB
1009 if (core->enable_count == 0) {
1010 ret = clk_core_enable(core->parent);
b2476490 1011
4dff95dc
SB
1012 if (ret)
1013 return ret;
b2476490 1014
f17a0dd1 1015 trace_clk_enable_rcuidle(core);
035a61c3 1016
4dff95dc
SB
1017 if (core->ops->enable)
1018 ret = core->ops->enable(core->hw);
035a61c3 1019
f17a0dd1 1020 trace_clk_enable_complete_rcuidle(core);
4dff95dc
SB
1021
1022 if (ret) {
1023 clk_core_disable(core->parent);
1024 return ret;
1025 }
1026 }
1027
1028 core->enable_count++;
1029 return 0;
035a61c3 1030}
b2476490 1031
a6adc30b
DA
1032static int clk_core_enable_lock(struct clk_core *core)
1033{
1034 unsigned long flags;
1035 int ret;
1036
1037 flags = clk_enable_lock();
1038 ret = clk_core_enable(core);
1039 clk_enable_unlock(flags);
1040
1041 return ret;
1042}
1043
43536548
K
1044/**
1045 * clk_gate_restore_context - restore context for poweroff
1046 * @hw: the clk_hw pointer of clock whose state is to be restored
1047 *
1048 * The clock gate restore context function enables or disables
1049 * the gate clocks based on the enable_count. This is done in cases
1050 * where the clock context is lost and based on the enable_count
1051 * the clock either needs to be enabled/disabled. This
1052 * helps restore the state of gate clocks.
1053 */
1054void clk_gate_restore_context(struct clk_hw *hw)
1055{
9be76627
SB
1056 struct clk_core *core = hw->core;
1057
1058 if (core->enable_count)
1059 core->ops->enable(hw);
43536548 1060 else
9be76627 1061 core->ops->disable(hw);
43536548
K
1062}
1063EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1064
9be76627 1065static int clk_core_save_context(struct clk_core *core)
8b95d1ce
RD
1066{
1067 struct clk_core *child;
1068 int ret = 0;
1069
9be76627
SB
1070 hlist_for_each_entry(child, &core->children, child_node) {
1071 ret = clk_core_save_context(child);
8b95d1ce
RD
1072 if (ret < 0)
1073 return ret;
1074 }
1075
9be76627
SB
1076 if (core->ops && core->ops->save_context)
1077 ret = core->ops->save_context(core->hw);
8b95d1ce
RD
1078
1079 return ret;
1080}
1081
9be76627 1082static void clk_core_restore_context(struct clk_core *core)
8b95d1ce
RD
1083{
1084 struct clk_core *child;
1085
9be76627
SB
1086 if (core->ops && core->ops->restore_context)
1087 core->ops->restore_context(core->hw);
8b95d1ce 1088
9be76627
SB
1089 hlist_for_each_entry(child, &core->children, child_node)
1090 clk_core_restore_context(child);
8b95d1ce
RD
1091}
1092
1093/**
1094 * clk_save_context - save clock context for poweroff
1095 *
1096 * Saves the context of the clock register for powerstates in which the
1097 * contents of the registers will be lost. Occurs deep within the suspend
1098 * code. Returns 0 on success.
1099 */
1100int clk_save_context(void)
1101{
1102 struct clk_core *clk;
1103 int ret;
1104
1105 hlist_for_each_entry(clk, &clk_root_list, child_node) {
9be76627 1106 ret = clk_core_save_context(clk);
8b95d1ce
RD
1107 if (ret < 0)
1108 return ret;
1109 }
1110
1111 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
9be76627 1112 ret = clk_core_save_context(clk);
8b95d1ce
RD
1113 if (ret < 0)
1114 return ret;
1115 }
1116
1117 return 0;
1118}
1119EXPORT_SYMBOL_GPL(clk_save_context);
1120
1121/**
1122 * clk_restore_context - restore clock context after poweroff
1123 *
1124 * Restore the saved clock context upon resume.
1125 *
1126 */
1127void clk_restore_context(void)
1128{
9be76627 1129 struct clk_core *core;
8b95d1ce 1130
9be76627
SB
1131 hlist_for_each_entry(core, &clk_root_list, child_node)
1132 clk_core_restore_context(core);
8b95d1ce 1133
9be76627
SB
1134 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1135 clk_core_restore_context(core);
8b95d1ce
RD
1136}
1137EXPORT_SYMBOL_GPL(clk_restore_context);
1138
4dff95dc
SB
1139/**
1140 * clk_enable - ungate a clock
1141 * @clk: the clk being ungated
1142 *
1143 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1144 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1145 * if the operation will never sleep. One example is a SoC-internal clk which
1146 * is controlled via simple register writes. In the complex case a clk ungate
1147 * operation may require a fast and a slow part. It is this reason that
1148 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1149 * must be called before clk_enable. Returns 0 on success, -EERROR
1150 * otherwise.
1151 */
1152int clk_enable(struct clk *clk)
5279fc40 1153{
4dff95dc 1154 if (!clk)
5279fc40
BB
1155 return 0;
1156
a6adc30b
DA
1157 return clk_core_enable_lock(clk->core);
1158}
1159EXPORT_SYMBOL_GPL(clk_enable);
1160
1161static int clk_core_prepare_enable(struct clk_core *core)
1162{
1163 int ret;
1164
1165 ret = clk_core_prepare_lock(core);
1166 if (ret)
1167 return ret;
1168
1169 ret = clk_core_enable_lock(core);
1170 if (ret)
1171 clk_core_unprepare_lock(core);
5279fc40 1172
4dff95dc 1173 return ret;
b2476490 1174}
a6adc30b
DA
1175
1176static void clk_core_disable_unprepare(struct clk_core *core)
1177{
1178 clk_core_disable_lock(core);
1179 clk_core_unprepare_lock(core);
1180}
b2476490 1181
7ec986ef
DA
1182static void clk_unprepare_unused_subtree(struct clk_core *core)
1183{
1184 struct clk_core *child;
1185
1186 lockdep_assert_held(&prepare_lock);
1187
1188 hlist_for_each_entry(child, &core->children, child_node)
1189 clk_unprepare_unused_subtree(child);
1190
1191 if (core->prepare_count)
1192 return;
1193
1194 if (core->flags & CLK_IGNORE_UNUSED)
1195 return;
1196
9a34b453
MS
1197 if (clk_pm_runtime_get(core))
1198 return;
1199
7ec986ef
DA
1200 if (clk_core_is_prepared(core)) {
1201 trace_clk_unprepare(core);
1202 if (core->ops->unprepare_unused)
1203 core->ops->unprepare_unused(core->hw);
1204 else if (core->ops->unprepare)
1205 core->ops->unprepare(core->hw);
1206 trace_clk_unprepare_complete(core);
1207 }
9a34b453
MS
1208
1209 clk_pm_runtime_put(core);
7ec986ef
DA
1210}
1211
1212static void clk_disable_unused_subtree(struct clk_core *core)
1213{
1214 struct clk_core *child;
1215 unsigned long flags;
1216
1217 lockdep_assert_held(&prepare_lock);
1218
1219 hlist_for_each_entry(child, &core->children, child_node)
1220 clk_disable_unused_subtree(child);
1221
a4b3518d
DA
1222 if (core->flags & CLK_OPS_PARENT_ENABLE)
1223 clk_core_prepare_enable(core->parent);
1224
9a34b453
MS
1225 if (clk_pm_runtime_get(core))
1226 goto unprepare_out;
1227
7ec986ef
DA
1228 flags = clk_enable_lock();
1229
1230 if (core->enable_count)
1231 goto unlock_out;
1232
1233 if (core->flags & CLK_IGNORE_UNUSED)
1234 goto unlock_out;
1235
1236 /*
1237 * some gate clocks have special needs during the disable-unused
1238 * sequence. call .disable_unused if available, otherwise fall
1239 * back to .disable
1240 */
1241 if (clk_core_is_enabled(core)) {
1242 trace_clk_disable(core);
1243 if (core->ops->disable_unused)
1244 core->ops->disable_unused(core->hw);
1245 else if (core->ops->disable)
1246 core->ops->disable(core->hw);
1247 trace_clk_disable_complete(core);
1248 }
1249
1250unlock_out:
1251 clk_enable_unlock(flags);
9a34b453
MS
1252 clk_pm_runtime_put(core);
1253unprepare_out:
a4b3518d
DA
1254 if (core->flags & CLK_OPS_PARENT_ENABLE)
1255 clk_core_disable_unprepare(core->parent);
7ec986ef
DA
1256}
1257
1258static bool clk_ignore_unused;
1259static int __init clk_ignore_unused_setup(char *__unused)
1260{
1261 clk_ignore_unused = true;
1262 return 1;
1263}
1264__setup("clk_ignore_unused", clk_ignore_unused_setup);
1265
1266static int clk_disable_unused(void)
1267{
1268 struct clk_core *core;
1269
1270 if (clk_ignore_unused) {
1271 pr_warn("clk: Not disabling unused clocks\n");
1272 return 0;
1273 }
1274
1275 clk_prepare_lock();
1276
1277 hlist_for_each_entry(core, &clk_root_list, child_node)
1278 clk_disable_unused_subtree(core);
1279
1280 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1281 clk_disable_unused_subtree(core);
1282
1283 hlist_for_each_entry(core, &clk_root_list, child_node)
1284 clk_unprepare_unused_subtree(core);
1285
1286 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1287 clk_unprepare_unused_subtree(core);
1288
1289 clk_prepare_unlock();
1290
1291 return 0;
1292}
1293late_initcall_sync(clk_disable_unused);
1294
0f6cc2b8
JB
1295static int clk_core_determine_round_nolock(struct clk_core *core,
1296 struct clk_rate_request *req)
3d6ee287 1297{
0817b62c 1298 long rate;
4dff95dc
SB
1299
1300 lockdep_assert_held(&prepare_lock);
3d6ee287 1301
d6968fca 1302 if (!core)
4dff95dc 1303 return 0;
3d6ee287 1304
55e9b8b7
JB
1305 /*
1306 * At this point, core protection will be disabled if
1307 * - if the provider is not protected at all
1308 * - if the calling consumer is the only one which has exclusivity
1309 * over the provider
1310 */
e55a839a
JB
1311 if (clk_core_rate_is_protected(core)) {
1312 req->rate = core->rate;
1313 } else if (core->ops->determine_rate) {
0817b62c
BB
1314 return core->ops->determine_rate(core->hw, req);
1315 } else if (core->ops->round_rate) {
1316 rate = core->ops->round_rate(core->hw, req->rate,
1317 &req->best_parent_rate);
1318 if (rate < 0)
1319 return rate;
1320
1321 req->rate = rate;
0817b62c 1322 } else {
0f6cc2b8 1323 return -EINVAL;
0817b62c
BB
1324 }
1325
1326 return 0;
3d6ee287
UH
1327}
1328
0f6cc2b8
JB
1329static void clk_core_init_rate_req(struct clk_core * const core,
1330 struct clk_rate_request *req)
1331{
1332 struct clk_core *parent;
1333
1334 if (WARN_ON(!core || !req))
1335 return;
1336
1337 parent = core->parent;
1338 if (parent) {
1339 req->best_parent_hw = parent->hw;
1340 req->best_parent_rate = parent->rate;
1341 } else {
1342 req->best_parent_hw = NULL;
1343 req->best_parent_rate = 0;
0817b62c 1344 }
0f6cc2b8 1345}
0817b62c 1346
0f6cc2b8
JB
1347static bool clk_core_can_round(struct clk_core * const core)
1348{
eef1f1b6 1349 return core->ops->determine_rate || core->ops->round_rate;
0f6cc2b8
JB
1350}
1351
1352static int clk_core_round_rate_nolock(struct clk_core *core,
1353 struct clk_rate_request *req)
1354{
1355 lockdep_assert_held(&prepare_lock);
1356
04bf9ab3
JB
1357 if (!core) {
1358 req->rate = 0;
0f6cc2b8 1359 return 0;
04bf9ab3 1360 }
0817b62c 1361
0f6cc2b8
JB
1362 clk_core_init_rate_req(core, req);
1363
1364 if (clk_core_can_round(core))
1365 return clk_core_determine_round_nolock(core, req);
1366 else if (core->flags & CLK_SET_RATE_PARENT)
1367 return clk_core_round_rate_nolock(core->parent, req);
1368
1369 req->rate = core->rate;
0817b62c 1370 return 0;
3d6ee287
UH
1371}
1372
4dff95dc
SB
1373/**
1374 * __clk_determine_rate - get the closest rate actually supported by a clock
1375 * @hw: determine the rate of this clock
2d5b520c 1376 * @req: target rate request
4dff95dc 1377 *
6e5ab41b 1378 * Useful for clk_ops such as .set_rate and .determine_rate.
4dff95dc 1379 */
0817b62c 1380int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
035a61c3 1381{
0817b62c
BB
1382 if (!hw) {
1383 req->rate = 0;
4dff95dc 1384 return 0;
0817b62c 1385 }
035a61c3 1386
0817b62c 1387 return clk_core_round_rate_nolock(hw->core, req);
035a61c3 1388}
4dff95dc 1389EXPORT_SYMBOL_GPL(__clk_determine_rate);
035a61c3 1390
1a9c069c
SB
1391unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1392{
1393 int ret;
1394 struct clk_rate_request req;
1395
1396 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1397 req.rate = rate;
1398
1399 ret = clk_core_round_rate_nolock(hw->core, &req);
1400 if (ret)
1401 return 0;
1402
1403 return req.rate;
1404}
1405EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1406
4dff95dc
SB
1407/**
1408 * clk_round_rate - round the given rate for a clk
1409 * @clk: the clk for which we are rounding a rate
1410 * @rate: the rate which is to be rounded
1411 *
1412 * Takes in a rate as input and rounds it to a rate that the clk can actually
1413 * use which is then returned. If clk doesn't support round_rate operation
1414 * then the parent rate is returned.
1415 */
1416long clk_round_rate(struct clk *clk, unsigned long rate)
035a61c3 1417{
fc4a05d4
SB
1418 struct clk_rate_request req;
1419 int ret;
4dff95dc 1420
035a61c3 1421 if (!clk)
4dff95dc 1422 return 0;
035a61c3 1423
4dff95dc 1424 clk_prepare_lock();
fc4a05d4 1425
55e9b8b7
JB
1426 if (clk->exclusive_count)
1427 clk_core_rate_unprotect(clk->core);
1428
fc4a05d4
SB
1429 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1430 req.rate = rate;
1431
1432 ret = clk_core_round_rate_nolock(clk->core, &req);
55e9b8b7
JB
1433
1434 if (clk->exclusive_count)
1435 clk_core_rate_protect(clk->core);
1436
4dff95dc
SB
1437 clk_prepare_unlock();
1438
fc4a05d4
SB
1439 if (ret)
1440 return ret;
1441
1442 return req.rate;
035a61c3 1443}
4dff95dc 1444EXPORT_SYMBOL_GPL(clk_round_rate);
b2476490 1445
4dff95dc
SB
1446/**
1447 * __clk_notify - call clk notifier chain
1448 * @core: clk that is changing rate
1449 * @msg: clk notifier type (see include/linux/clk.h)
1450 * @old_rate: old clk rate
1451 * @new_rate: new clk rate
1452 *
1453 * Triggers a notifier call chain on the clk rate-change notification
1454 * for 'clk'. Passes a pointer to the struct clk and the previous
1455 * and current rates to the notifier callback. Intended to be called by
1456 * internal clock code only. Returns NOTIFY_DONE from the last driver
1457 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1458 * a driver returns that.
1459 */
1460static int __clk_notify(struct clk_core *core, unsigned long msg,
1461 unsigned long old_rate, unsigned long new_rate)
b2476490 1462{
4dff95dc
SB
1463 struct clk_notifier *cn;
1464 struct clk_notifier_data cnd;
1465 int ret = NOTIFY_DONE;
b2476490 1466
4dff95dc
SB
1467 cnd.old_rate = old_rate;
1468 cnd.new_rate = new_rate;
b2476490 1469
4dff95dc
SB
1470 list_for_each_entry(cn, &clk_notifier_list, node) {
1471 if (cn->clk->core == core) {
1472 cnd.clk = cn->clk;
1473 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1474 &cnd);
17c34c56
PDS
1475 if (ret & NOTIFY_STOP_MASK)
1476 return ret;
4dff95dc 1477 }
b2476490
MT
1478 }
1479
4dff95dc 1480 return ret;
b2476490
MT
1481}
1482
4dff95dc
SB
1483/**
1484 * __clk_recalc_accuracies
1485 * @core: first clk in the subtree
1486 *
1487 * Walks the subtree of clks starting with clk and recalculates accuracies as
1488 * it goes. Note that if a clk does not implement the .recalc_accuracy
6e5ab41b 1489 * callback then it is assumed that the clock will take on the accuracy of its
4dff95dc 1490 * parent.
4dff95dc
SB
1491 */
1492static void __clk_recalc_accuracies(struct clk_core *core)
b2476490 1493{
4dff95dc
SB
1494 unsigned long parent_accuracy = 0;
1495 struct clk_core *child;
b2476490 1496
4dff95dc 1497 lockdep_assert_held(&prepare_lock);
b2476490 1498
4dff95dc
SB
1499 if (core->parent)
1500 parent_accuracy = core->parent->accuracy;
b2476490 1501
4dff95dc
SB
1502 if (core->ops->recalc_accuracy)
1503 core->accuracy = core->ops->recalc_accuracy(core->hw,
1504 parent_accuracy);
1505 else
1506 core->accuracy = parent_accuracy;
b2476490 1507
4dff95dc
SB
1508 hlist_for_each_entry(child, &core->children, child_node)
1509 __clk_recalc_accuracies(child);
b2476490
MT
1510}
1511
4dff95dc 1512static long clk_core_get_accuracy(struct clk_core *core)
e366fdd7 1513{
4dff95dc 1514 unsigned long accuracy;
15a02c1f 1515
4dff95dc
SB
1516 clk_prepare_lock();
1517 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1518 __clk_recalc_accuracies(core);
15a02c1f 1519
4dff95dc
SB
1520 accuracy = __clk_get_accuracy(core);
1521 clk_prepare_unlock();
e366fdd7 1522
4dff95dc 1523 return accuracy;
e366fdd7 1524}
15a02c1f 1525
4dff95dc
SB
1526/**
1527 * clk_get_accuracy - return the accuracy of clk
1528 * @clk: the clk whose accuracy is being returned
1529 *
1530 * Simply returns the cached accuracy of the clk, unless
1531 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1532 * issued.
1533 * If clk is NULL then returns 0.
1534 */
1535long clk_get_accuracy(struct clk *clk)
035a61c3 1536{
4dff95dc
SB
1537 if (!clk)
1538 return 0;
035a61c3 1539
4dff95dc 1540 return clk_core_get_accuracy(clk->core);
035a61c3 1541}
4dff95dc 1542EXPORT_SYMBOL_GPL(clk_get_accuracy);
035a61c3 1543
4dff95dc
SB
1544static unsigned long clk_recalc(struct clk_core *core,
1545 unsigned long parent_rate)
1c8e6004 1546{
9a34b453
MS
1547 unsigned long rate = parent_rate;
1548
1549 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1550 rate = core->ops->recalc_rate(core->hw, parent_rate);
1551 clk_pm_runtime_put(core);
1552 }
1553 return rate;
1c8e6004
TV
1554}
1555
4dff95dc
SB
1556/**
1557 * __clk_recalc_rates
1558 * @core: first clk in the subtree
1559 * @msg: notification type (see include/linux/clk.h)
1560 *
1561 * Walks the subtree of clks starting with clk and recalculates rates as it
1562 * goes. Note that if a clk does not implement the .recalc_rate callback then
1563 * it is assumed that the clock will take on the rate of its parent.
1564 *
1565 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1566 * if necessary.
15a02c1f 1567 */
4dff95dc 1568static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
15a02c1f 1569{
4dff95dc
SB
1570 unsigned long old_rate;
1571 unsigned long parent_rate = 0;
1572 struct clk_core *child;
e366fdd7 1573
4dff95dc 1574 lockdep_assert_held(&prepare_lock);
15a02c1f 1575
4dff95dc 1576 old_rate = core->rate;
b2476490 1577
4dff95dc
SB
1578 if (core->parent)
1579 parent_rate = core->parent->rate;
b2476490 1580
4dff95dc 1581 core->rate = clk_recalc(core, parent_rate);
b2476490 1582
4dff95dc
SB
1583 /*
1584 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1585 * & ABORT_RATE_CHANGE notifiers
1586 */
1587 if (core->notifier_count && msg)
1588 __clk_notify(core, msg, old_rate, core->rate);
b2476490 1589
4dff95dc
SB
1590 hlist_for_each_entry(child, &core->children, child_node)
1591 __clk_recalc_rates(child, msg);
1592}
b2476490 1593
4dff95dc
SB
1594static unsigned long clk_core_get_rate(struct clk_core *core)
1595{
1596 unsigned long rate;
dfc202ea 1597
4dff95dc 1598 clk_prepare_lock();
b2476490 1599
4dff95dc
SB
1600 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1601 __clk_recalc_rates(core, 0);
1602
1603 rate = clk_core_get_rate_nolock(core);
1604 clk_prepare_unlock();
1605
1606 return rate;
b2476490
MT
1607}
1608
1609/**
4dff95dc
SB
1610 * clk_get_rate - return the rate of clk
1611 * @clk: the clk whose rate is being returned
b2476490 1612 *
4dff95dc
SB
1613 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1614 * is set, which means a recalc_rate will be issued.
1615 * If clk is NULL then returns 0.
b2476490 1616 */
4dff95dc 1617unsigned long clk_get_rate(struct clk *clk)
b2476490 1618{
4dff95dc
SB
1619 if (!clk)
1620 return 0;
63589e92 1621
4dff95dc 1622 return clk_core_get_rate(clk->core);
b2476490 1623}
4dff95dc 1624EXPORT_SYMBOL_GPL(clk_get_rate);
b2476490 1625
4dff95dc
SB
1626static int clk_fetch_parent_index(struct clk_core *core,
1627 struct clk_core *parent)
b2476490 1628{
4dff95dc 1629 int i;
b2476490 1630
508f884a
MY
1631 if (!parent)
1632 return -EINVAL;
1633
ede77858 1634 for (i = 0; i < core->num_parents; i++) {
1a079560 1635 /* Found it first try! */
fc0c209c 1636 if (core->parents[i].core == parent)
4dff95dc 1637 return i;
b2476490 1638
1a079560 1639 /* Something else is here, so keep looking */
fc0c209c 1640 if (core->parents[i].core)
ede77858
DB
1641 continue;
1642
1a079560
SB
1643 /* Maybe core hasn't been cached but the hw is all we know? */
1644 if (core->parents[i].hw) {
1645 if (core->parents[i].hw == parent->hw)
1646 break;
1647
1648 /* Didn't match, but we're expecting a clk_hw */
1649 continue;
ede77858 1650 }
1a079560
SB
1651
1652 /* Maybe it hasn't been cached (clk_set_parent() path) */
1653 if (parent == clk_core_get(core, i))
1654 break;
1655
1656 /* Fallback to comparing globally unique names */
24876f09
MB
1657 if (core->parents[i].name &&
1658 !strcmp(parent->name, core->parents[i].name))
1a079560 1659 break;
ede77858
DB
1660 }
1661
1a079560
SB
1662 if (i == core->num_parents)
1663 return -EINVAL;
1664
1665 core->parents[i].core = parent;
1666 return i;
b2476490
MT
1667}
1668
e6500344
HS
1669/*
1670 * Update the orphan status of @core and all its children.
1671 */
1672static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1673{
1674 struct clk_core *child;
1675
1676 core->orphan = is_orphan;
1677
1678 hlist_for_each_entry(child, &core->children, child_node)
1679 clk_core_update_orphan_status(child, is_orphan);
1680}
1681
4dff95dc 1682static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
b2476490 1683{
e6500344
HS
1684 bool was_orphan = core->orphan;
1685
4dff95dc 1686 hlist_del(&core->child_node);
035a61c3 1687
4dff95dc 1688 if (new_parent) {
e6500344
HS
1689 bool becomes_orphan = new_parent->orphan;
1690
4dff95dc
SB
1691 /* avoid duplicate POST_RATE_CHANGE notifications */
1692 if (new_parent->new_child == core)
1693 new_parent->new_child = NULL;
b2476490 1694
4dff95dc 1695 hlist_add_head(&core->child_node, &new_parent->children);
e6500344
HS
1696
1697 if (was_orphan != becomes_orphan)
1698 clk_core_update_orphan_status(core, becomes_orphan);
4dff95dc
SB
1699 } else {
1700 hlist_add_head(&core->child_node, &clk_orphan_list);
e6500344
HS
1701 if (!was_orphan)
1702 clk_core_update_orphan_status(core, true);
4dff95dc 1703 }
dfc202ea 1704
4dff95dc 1705 core->parent = new_parent;
035a61c3
TV
1706}
1707
4dff95dc
SB
1708static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1709 struct clk_core *parent)
b2476490
MT
1710{
1711 unsigned long flags;
4dff95dc 1712 struct clk_core *old_parent = core->parent;
b2476490 1713
4dff95dc 1714 /*
fc8726a2
DA
1715 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1716 *
1717 * 2. Migrate prepare state between parents and prevent race with
4dff95dc
SB
1718 * clk_enable().
1719 *
1720 * If the clock is not prepared, then a race with
1721 * clk_enable/disable() is impossible since we already have the
1722 * prepare lock (future calls to clk_enable() need to be preceded by
1723 * a clk_prepare()).
1724 *
1725 * If the clock is prepared, migrate the prepared state to the new
1726 * parent and also protect against a race with clk_enable() by
1727 * forcing the clock and the new parent on. This ensures that all
1728 * future calls to clk_enable() are practically NOPs with respect to
1729 * hardware and software states.
1730 *
1731 * See also: Comment for clk_set_parent() below.
1732 */
fc8726a2
DA
1733
1734 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1735 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1736 clk_core_prepare_enable(old_parent);
1737 clk_core_prepare_enable(parent);
1738 }
1739
1740 /* migrate prepare count if > 0 */
4dff95dc 1741 if (core->prepare_count) {
fc8726a2
DA
1742 clk_core_prepare_enable(parent);
1743 clk_core_enable_lock(core);
4dff95dc 1744 }
63589e92 1745
4dff95dc 1746 /* update the clk tree topology */
eab89f69 1747 flags = clk_enable_lock();
4dff95dc 1748 clk_reparent(core, parent);
eab89f69 1749 clk_enable_unlock(flags);
4dff95dc
SB
1750
1751 return old_parent;
b2476490 1752}
b2476490 1753
4dff95dc
SB
1754static void __clk_set_parent_after(struct clk_core *core,
1755 struct clk_core *parent,
1756 struct clk_core *old_parent)
b2476490 1757{
4dff95dc
SB
1758 /*
1759 * Finish the migration of prepare state and undo the changes done
1760 * for preventing a race with clk_enable().
1761 */
1762 if (core->prepare_count) {
fc8726a2
DA
1763 clk_core_disable_lock(core);
1764 clk_core_disable_unprepare(old_parent);
1765 }
1766
1767 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1768 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1769 clk_core_disable_unprepare(parent);
1770 clk_core_disable_unprepare(old_parent);
4dff95dc
SB
1771 }
1772}
b2476490 1773
4dff95dc
SB
1774static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1775 u8 p_index)
1776{
1777 unsigned long flags;
1778 int ret = 0;
1779 struct clk_core *old_parent;
b2476490 1780
4dff95dc 1781 old_parent = __clk_set_parent_before(core, parent);
b2476490 1782
4dff95dc 1783 trace_clk_set_parent(core, parent);
b2476490 1784
4dff95dc
SB
1785 /* change clock input source */
1786 if (parent && core->ops->set_parent)
1787 ret = core->ops->set_parent(core->hw, p_index);
dfc202ea 1788
4dff95dc 1789 trace_clk_set_parent_complete(core, parent);
dfc202ea 1790
4dff95dc
SB
1791 if (ret) {
1792 flags = clk_enable_lock();
1793 clk_reparent(core, old_parent);
1794 clk_enable_unlock(flags);
c660b2eb 1795 __clk_set_parent_after(core, old_parent, parent);
dfc202ea 1796
4dff95dc 1797 return ret;
b2476490
MT
1798 }
1799
4dff95dc
SB
1800 __clk_set_parent_after(core, parent, old_parent);
1801
b2476490
MT
1802 return 0;
1803}
1804
1805/**
4dff95dc
SB
1806 * __clk_speculate_rates
1807 * @core: first clk in the subtree
1808 * @parent_rate: the "future" rate of clk's parent
b2476490 1809 *
4dff95dc
SB
1810 * Walks the subtree of clks starting with clk, speculating rates as it
1811 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1812 *
1813 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1814 * pre-rate change notifications and returns early if no clks in the
1815 * subtree have subscribed to the notifications. Note that if a clk does not
1816 * implement the .recalc_rate callback then it is assumed that the clock will
1817 * take on the rate of its parent.
b2476490 1818 */
4dff95dc
SB
1819static int __clk_speculate_rates(struct clk_core *core,
1820 unsigned long parent_rate)
b2476490 1821{
4dff95dc
SB
1822 struct clk_core *child;
1823 unsigned long new_rate;
1824 int ret = NOTIFY_DONE;
b2476490 1825
4dff95dc 1826 lockdep_assert_held(&prepare_lock);
864e160a 1827
4dff95dc
SB
1828 new_rate = clk_recalc(core, parent_rate);
1829
1830 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1831 if (core->notifier_count)
1832 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1833
1834 if (ret & NOTIFY_STOP_MASK) {
1835 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1836 __func__, core->name, ret);
1837 goto out;
1838 }
1839
1840 hlist_for_each_entry(child, &core->children, child_node) {
1841 ret = __clk_speculate_rates(child, new_rate);
1842 if (ret & NOTIFY_STOP_MASK)
1843 break;
1844 }
b2476490 1845
4dff95dc 1846out:
b2476490
MT
1847 return ret;
1848}
b2476490 1849
4dff95dc
SB
1850static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1851 struct clk_core *new_parent, u8 p_index)
b2476490 1852{
4dff95dc 1853 struct clk_core *child;
b2476490 1854
4dff95dc
SB
1855 core->new_rate = new_rate;
1856 core->new_parent = new_parent;
1857 core->new_parent_index = p_index;
1858 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1859 core->new_child = NULL;
1860 if (new_parent && new_parent != core->parent)
1861 new_parent->new_child = core;
496eadf8 1862
4dff95dc
SB
1863 hlist_for_each_entry(child, &core->children, child_node) {
1864 child->new_rate = clk_recalc(child, new_rate);
1865 clk_calc_subtree(child, child->new_rate, NULL, 0);
1866 }
1867}
b2476490 1868
4dff95dc
SB
1869/*
1870 * calculate the new rates returning the topmost clock that has to be
1871 * changed.
1872 */
1873static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1874 unsigned long rate)
1875{
1876 struct clk_core *top = core;
1877 struct clk_core *old_parent, *parent;
4dff95dc
SB
1878 unsigned long best_parent_rate = 0;
1879 unsigned long new_rate;
1880 unsigned long min_rate;
1881 unsigned long max_rate;
1882 int p_index = 0;
1883 long ret;
1884
1885 /* sanity */
1886 if (IS_ERR_OR_NULL(core))
1887 return NULL;
1888
1889 /* save parent rate, if it exists */
1890 parent = old_parent = core->parent;
71472c0c 1891 if (parent)
4dff95dc 1892 best_parent_rate = parent->rate;
71472c0c 1893
4dff95dc
SB
1894 clk_core_get_boundaries(core, &min_rate, &max_rate);
1895
1896 /* find the closest rate and parent clk/rate */
0f6cc2b8 1897 if (clk_core_can_round(core)) {
0817b62c
BB
1898 struct clk_rate_request req;
1899
1900 req.rate = rate;
1901 req.min_rate = min_rate;
1902 req.max_rate = max_rate;
0817b62c 1903
0f6cc2b8
JB
1904 clk_core_init_rate_req(core, &req);
1905
1906 ret = clk_core_determine_round_nolock(core, &req);
4dff95dc
SB
1907 if (ret < 0)
1908 return NULL;
1c8e6004 1909
0817b62c
BB
1910 best_parent_rate = req.best_parent_rate;
1911 new_rate = req.rate;
1912 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
035a61c3 1913
4dff95dc
SB
1914 if (new_rate < min_rate || new_rate > max_rate)
1915 return NULL;
1916 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1917 /* pass-through clock without adjustable parent */
1918 core->new_rate = core->rate;
1919 return NULL;
1920 } else {
1921 /* pass-through clock with adjustable parent */
1922 top = clk_calc_new_rates(parent, rate);
1923 new_rate = parent->new_rate;
1924 goto out;
1925 }
1c8e6004 1926
4dff95dc
SB
1927 /* some clocks must be gated to change parent */
1928 if (parent != old_parent &&
1929 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1930 pr_debug("%s: %s not gated but wants to reparent\n",
1931 __func__, core->name);
1932 return NULL;
1933 }
b2476490 1934
4dff95dc
SB
1935 /* try finding the new parent index */
1936 if (parent && core->num_parents > 1) {
1937 p_index = clk_fetch_parent_index(core, parent);
1938 if (p_index < 0) {
1939 pr_debug("%s: clk %s can not be parent of clk %s\n",
1940 __func__, parent->name, core->name);
1941 return NULL;
1942 }
1943 }
b2476490 1944
4dff95dc
SB
1945 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1946 best_parent_rate != parent->rate)
1947 top = clk_calc_new_rates(parent, best_parent_rate);
035a61c3 1948
4dff95dc
SB
1949out:
1950 clk_calc_subtree(core, new_rate, parent, p_index);
b2476490 1951
4dff95dc 1952 return top;
b2476490 1953}
b2476490 1954
4dff95dc
SB
1955/*
1956 * Notify about rate changes in a subtree. Always walk down the whole tree
1957 * so that in case of an error we can walk down the whole tree again and
1958 * abort the change.
b2476490 1959 */
4dff95dc
SB
1960static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1961 unsigned long event)
b2476490 1962{
4dff95dc 1963 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
b2476490
MT
1964 int ret = NOTIFY_DONE;
1965
4dff95dc
SB
1966 if (core->rate == core->new_rate)
1967 return NULL;
b2476490 1968
4dff95dc
SB
1969 if (core->notifier_count) {
1970 ret = __clk_notify(core, event, core->rate, core->new_rate);
1971 if (ret & NOTIFY_STOP_MASK)
1972 fail_clk = core;
b2476490
MT
1973 }
1974
4dff95dc
SB
1975 hlist_for_each_entry(child, &core->children, child_node) {
1976 /* Skip children who will be reparented to another clock */
1977 if (child->new_parent && child->new_parent != core)
1978 continue;
1979 tmp_clk = clk_propagate_rate_change(child, event);
1980 if (tmp_clk)
1981 fail_clk = tmp_clk;
1982 }
5279fc40 1983
4dff95dc
SB
1984 /* handle the new child who might not be in core->children yet */
1985 if (core->new_child) {
1986 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1987 if (tmp_clk)
1988 fail_clk = tmp_clk;
1989 }
5279fc40 1990
4dff95dc 1991 return fail_clk;
5279fc40
BB
1992}
1993
4dff95dc
SB
1994/*
1995 * walk down a subtree and set the new rates notifying the rate
1996 * change on the way
1997 */
1998static void clk_change_rate(struct clk_core *core)
035a61c3 1999{
4dff95dc
SB
2000 struct clk_core *child;
2001 struct hlist_node *tmp;
2002 unsigned long old_rate;
2003 unsigned long best_parent_rate = 0;
2004 bool skip_set_rate = false;
2005 struct clk_core *old_parent;
fc8726a2 2006 struct clk_core *parent = NULL;
035a61c3 2007
4dff95dc 2008 old_rate = core->rate;
035a61c3 2009
fc8726a2
DA
2010 if (core->new_parent) {
2011 parent = core->new_parent;
4dff95dc 2012 best_parent_rate = core->new_parent->rate;
fc8726a2
DA
2013 } else if (core->parent) {
2014 parent = core->parent;
4dff95dc 2015 best_parent_rate = core->parent->rate;
fc8726a2 2016 }
035a61c3 2017
588fb54b
MS
2018 if (clk_pm_runtime_get(core))
2019 return;
2020
2eb8c710
HS
2021 if (core->flags & CLK_SET_RATE_UNGATE) {
2022 unsigned long flags;
2023
2024 clk_core_prepare(core);
2025 flags = clk_enable_lock();
2026 clk_core_enable(core);
2027 clk_enable_unlock(flags);
2028 }
2029
4dff95dc
SB
2030 if (core->new_parent && core->new_parent != core->parent) {
2031 old_parent = __clk_set_parent_before(core, core->new_parent);
2032 trace_clk_set_parent(core, core->new_parent);
5279fc40 2033
4dff95dc
SB
2034 if (core->ops->set_rate_and_parent) {
2035 skip_set_rate = true;
2036 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2037 best_parent_rate,
2038 core->new_parent_index);
2039 } else if (core->ops->set_parent) {
2040 core->ops->set_parent(core->hw, core->new_parent_index);
2041 }
5279fc40 2042
4dff95dc
SB
2043 trace_clk_set_parent_complete(core, core->new_parent);
2044 __clk_set_parent_after(core, core->new_parent, old_parent);
2045 }
8f2c2db1 2046
fc8726a2
DA
2047 if (core->flags & CLK_OPS_PARENT_ENABLE)
2048 clk_core_prepare_enable(parent);
2049
4dff95dc 2050 trace_clk_set_rate(core, core->new_rate);
b2476490 2051
4dff95dc
SB
2052 if (!skip_set_rate && core->ops->set_rate)
2053 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
496eadf8 2054
4dff95dc 2055 trace_clk_set_rate_complete(core, core->new_rate);
b2476490 2056
4dff95dc 2057 core->rate = clk_recalc(core, best_parent_rate);
b2476490 2058
2eb8c710
HS
2059 if (core->flags & CLK_SET_RATE_UNGATE) {
2060 unsigned long flags;
2061
2062 flags = clk_enable_lock();
2063 clk_core_disable(core);
2064 clk_enable_unlock(flags);
2065 clk_core_unprepare(core);
2066 }
2067
fc8726a2
DA
2068 if (core->flags & CLK_OPS_PARENT_ENABLE)
2069 clk_core_disable_unprepare(parent);
2070
4dff95dc
SB
2071 if (core->notifier_count && old_rate != core->rate)
2072 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
b2476490 2073
85e88fab
MT
2074 if (core->flags & CLK_RECALC_NEW_RATES)
2075 (void)clk_calc_new_rates(core, core->new_rate);
d8d91987 2076
b2476490 2077 /*
4dff95dc
SB
2078 * Use safe iteration, as change_rate can actually swap parents
2079 * for certain clock types.
b2476490 2080 */
4dff95dc
SB
2081 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2082 /* Skip children who will be reparented to another clock */
2083 if (child->new_parent && child->new_parent != core)
2084 continue;
2085 clk_change_rate(child);
2086 }
b2476490 2087
4dff95dc
SB
2088 /* handle the new child who might not be in core->children yet */
2089 if (core->new_child)
2090 clk_change_rate(core->new_child);
588fb54b
MS
2091
2092 clk_pm_runtime_put(core);
b2476490
MT
2093}
2094
ca5e089a
JB
2095static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2096 unsigned long req_rate)
2097{
e55a839a 2098 int ret, cnt;
ca5e089a
JB
2099 struct clk_rate_request req;
2100
2101 lockdep_assert_held(&prepare_lock);
2102
2103 if (!core)
2104 return 0;
2105
e55a839a
JB
2106 /* simulate what the rate would be if it could be freely set */
2107 cnt = clk_core_rate_nuke_protect(core);
2108 if (cnt < 0)
2109 return cnt;
2110
ca5e089a
JB
2111 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2112 req.rate = req_rate;
2113
2114 ret = clk_core_round_rate_nolock(core, &req);
2115
e55a839a
JB
2116 /* restore the protection */
2117 clk_core_rate_restore_protect(core, cnt);
2118
ca5e089a 2119 return ret ? 0 : req.rate;
b2476490
MT
2120}
2121
4dff95dc
SB
2122static int clk_core_set_rate_nolock(struct clk_core *core,
2123 unsigned long req_rate)
a093bde2 2124{
4dff95dc 2125 struct clk_core *top, *fail_clk;
ca5e089a 2126 unsigned long rate;
9a34b453 2127 int ret = 0;
a093bde2 2128
4dff95dc
SB
2129 if (!core)
2130 return 0;
a093bde2 2131
ca5e089a
JB
2132 rate = clk_core_req_round_rate_nolock(core, req_rate);
2133
4dff95dc
SB
2134 /* bail early if nothing to do */
2135 if (rate == clk_core_get_rate_nolock(core))
2136 return 0;
a093bde2 2137
e55a839a
JB
2138 /* fail on a direct rate set of a protected provider */
2139 if (clk_core_rate_is_protected(core))
2140 return -EBUSY;
2141
4dff95dc 2142 /* calculate new rates and get the topmost changed clock */
ca5e089a 2143 top = clk_calc_new_rates(core, req_rate);
4dff95dc
SB
2144 if (!top)
2145 return -EINVAL;
2146
9a34b453
MS
2147 ret = clk_pm_runtime_get(core);
2148 if (ret)
2149 return ret;
2150
4dff95dc
SB
2151 /* notify that we are about to change rates */
2152 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2153 if (fail_clk) {
2154 pr_debug("%s: failed to set %s rate\n", __func__,
2155 fail_clk->name);
2156 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
9a34b453
MS
2157 ret = -EBUSY;
2158 goto err;
4dff95dc
SB
2159 }
2160
2161 /* change the rates */
2162 clk_change_rate(top);
2163
2164 core->req_rate = req_rate;
9a34b453
MS
2165err:
2166 clk_pm_runtime_put(core);
4dff95dc 2167
9a34b453 2168 return ret;
a093bde2 2169}
035a61c3
TV
2170
2171/**
4dff95dc
SB
2172 * clk_set_rate - specify a new rate for clk
2173 * @clk: the clk whose rate is being changed
2174 * @rate: the new rate for clk
035a61c3 2175 *
4dff95dc
SB
2176 * In the simplest case clk_set_rate will only adjust the rate of clk.
2177 *
2178 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2179 * propagate up to clk's parent; whether or not this happens depends on the
2180 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
2181 * after calling .round_rate then upstream parent propagation is ignored. If
2182 * *parent_rate comes back with a new rate for clk's parent then we propagate
2183 * up to clk's parent and set its rate. Upward propagation will continue
2184 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2185 * .round_rate stops requesting changes to clk's parent_rate.
2186 *
2187 * Rate changes are accomplished via tree traversal that also recalculates the
2188 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2189 *
2190 * Returns 0 on success, -EERROR otherwise.
035a61c3 2191 */
4dff95dc 2192int clk_set_rate(struct clk *clk, unsigned long rate)
035a61c3 2193{
4dff95dc
SB
2194 int ret;
2195
035a61c3
TV
2196 if (!clk)
2197 return 0;
2198
4dff95dc
SB
2199 /* prevent racing with updates to the clock topology */
2200 clk_prepare_lock();
da0f0b2c 2201
55e9b8b7
JB
2202 if (clk->exclusive_count)
2203 clk_core_rate_unprotect(clk->core);
2204
4dff95dc 2205 ret = clk_core_set_rate_nolock(clk->core, rate);
da0f0b2c 2206
55e9b8b7
JB
2207 if (clk->exclusive_count)
2208 clk_core_rate_protect(clk->core);
2209
4dff95dc 2210 clk_prepare_unlock();
4935b22c 2211
4dff95dc 2212 return ret;
4935b22c 2213}
4dff95dc 2214EXPORT_SYMBOL_GPL(clk_set_rate);
4935b22c 2215
55e9b8b7 2216/**
65e2218d 2217 * clk_set_rate_exclusive - specify a new rate and get exclusive control
55e9b8b7
JB
2218 * @clk: the clk whose rate is being changed
2219 * @rate: the new rate for clk
2220 *
2221 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2222 * within a critical section
2223 *
2224 * This can be used initially to ensure that at least 1 consumer is
65e2218d 2225 * satisfied when several consumers are competing for exclusivity over the
55e9b8b7
JB
2226 * same clock provider.
2227 *
2228 * The exclusivity is not applied if setting the rate failed.
2229 *
2230 * Calls to clk_rate_exclusive_get() should be balanced with calls to
2231 * clk_rate_exclusive_put().
2232 *
2233 * Returns 0 on success, -EERROR otherwise.
2234 */
2235int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2236{
2237 int ret;
2238
2239 if (!clk)
2240 return 0;
2241
2242 /* prevent racing with updates to the clock topology */
2243 clk_prepare_lock();
2244
2245 /*
2246 * The temporary protection removal is not here, on purpose
2247 * This function is meant to be used instead of clk_rate_protect,
2248 * so before the consumer code path protect the clock provider
2249 */
2250
2251 ret = clk_core_set_rate_nolock(clk->core, rate);
2252 if (!ret) {
2253 clk_core_rate_protect(clk->core);
2254 clk->exclusive_count++;
2255 }
2256
2257 clk_prepare_unlock();
2258
2259 return ret;
2260}
2261EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2262
4dff95dc
SB
2263/**
2264 * clk_set_rate_range - set a rate range for a clock source
2265 * @clk: clock source
2266 * @min: desired minimum clock rate in Hz, inclusive
2267 * @max: desired maximum clock rate in Hz, inclusive
2268 *
2269 * Returns success (0) or negative errno.
2270 */
2271int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
4935b22c 2272{
4dff95dc 2273 int ret = 0;
6562fbcf 2274 unsigned long old_min, old_max, rate;
4935b22c 2275
4dff95dc
SB
2276 if (!clk)
2277 return 0;
903efc55 2278
4dff95dc
SB
2279 if (min > max) {
2280 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2281 __func__, clk->core->name, clk->dev_id, clk->con_id,
2282 min, max);
2283 return -EINVAL;
903efc55 2284 }
4935b22c 2285
4dff95dc 2286 clk_prepare_lock();
4935b22c 2287
55e9b8b7
JB
2288 if (clk->exclusive_count)
2289 clk_core_rate_unprotect(clk->core);
2290
6562fbcf
JB
2291 /* Save the current values in case we need to rollback the change */
2292 old_min = clk->min_rate;
2293 old_max = clk->max_rate;
2294 clk->min_rate = min;
2295 clk->max_rate = max;
2296
2297 rate = clk_core_get_rate_nolock(clk->core);
2298 if (rate < min || rate > max) {
2299 /*
2300 * FIXME:
2301 * We are in bit of trouble here, current rate is outside the
2302 * the requested range. We are going try to request appropriate
2303 * range boundary but there is a catch. It may fail for the
2304 * usual reason (clock broken, clock protected, etc) but also
2305 * because:
2306 * - round_rate() was not favorable and fell on the wrong
2307 * side of the boundary
2308 * - the determine_rate() callback does not really check for
2309 * this corner case when determining the rate
2310 */
2311
2312 if (rate < min)
2313 rate = min;
2314 else
2315 rate = max;
2316
2317 ret = clk_core_set_rate_nolock(clk->core, rate);
2318 if (ret) {
2319 /* rollback the changes */
2320 clk->min_rate = old_min;
2321 clk->max_rate = old_max;
2322 }
4935b22c
JH
2323 }
2324
55e9b8b7
JB
2325 if (clk->exclusive_count)
2326 clk_core_rate_protect(clk->core);
2327
4dff95dc 2328 clk_prepare_unlock();
4935b22c 2329
4dff95dc 2330 return ret;
3fa2252b 2331}
4dff95dc 2332EXPORT_SYMBOL_GPL(clk_set_rate_range);
3fa2252b 2333
4dff95dc
SB
2334/**
2335 * clk_set_min_rate - set a minimum clock rate for a clock source
2336 * @clk: clock source
2337 * @rate: desired minimum clock rate in Hz, inclusive
2338 *
2339 * Returns success (0) or negative errno.
2340 */
2341int clk_set_min_rate(struct clk *clk, unsigned long rate)
3fa2252b 2342{
4dff95dc
SB
2343 if (!clk)
2344 return 0;
2345
2346 return clk_set_rate_range(clk, rate, clk->max_rate);
3fa2252b 2347}
4dff95dc 2348EXPORT_SYMBOL_GPL(clk_set_min_rate);
3fa2252b 2349
4dff95dc
SB
2350/**
2351 * clk_set_max_rate - set a maximum clock rate for a clock source
2352 * @clk: clock source
2353 * @rate: desired maximum clock rate in Hz, inclusive
2354 *
2355 * Returns success (0) or negative errno.
2356 */
2357int clk_set_max_rate(struct clk *clk, unsigned long rate)
3fa2252b 2358{
4dff95dc
SB
2359 if (!clk)
2360 return 0;
4935b22c 2361
4dff95dc 2362 return clk_set_rate_range(clk, clk->min_rate, rate);
4935b22c 2363}
4dff95dc 2364EXPORT_SYMBOL_GPL(clk_set_max_rate);
4935b22c 2365
b2476490 2366/**
4dff95dc
SB
2367 * clk_get_parent - return the parent of a clk
2368 * @clk: the clk whose parent gets returned
b2476490 2369 *
4dff95dc 2370 * Simply returns clk->parent. Returns NULL if clk is NULL.
b2476490 2371 */
4dff95dc 2372struct clk *clk_get_parent(struct clk *clk)
b2476490 2373{
4dff95dc 2374 struct clk *parent;
b2476490 2375
fc4a05d4
SB
2376 if (!clk)
2377 return NULL;
2378
4dff95dc 2379 clk_prepare_lock();
fc4a05d4
SB
2380 /* TODO: Create a per-user clk and change callers to call clk_put */
2381 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
4dff95dc 2382 clk_prepare_unlock();
496eadf8 2383
4dff95dc
SB
2384 return parent;
2385}
2386EXPORT_SYMBOL_GPL(clk_get_parent);
b2476490 2387
4dff95dc
SB
2388static struct clk_core *__clk_init_parent(struct clk_core *core)
2389{
5146e0b0 2390 u8 index = 0;
4dff95dc 2391
2430a94d 2392 if (core->num_parents > 1 && core->ops->get_parent)
5146e0b0 2393 index = core->ops->get_parent(core->hw);
b2476490 2394
5146e0b0 2395 return clk_core_get_parent_by_index(core, index);
b2476490
MT
2396}
2397
4dff95dc
SB
2398static void clk_core_reparent(struct clk_core *core,
2399 struct clk_core *new_parent)
b2476490 2400{
4dff95dc
SB
2401 clk_reparent(core, new_parent);
2402 __clk_recalc_accuracies(core);
2403 __clk_recalc_rates(core, POST_RATE_CHANGE);
b2476490
MT
2404}
2405
42c86547
TV
2406void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2407{
2408 if (!hw)
2409 return;
2410
2411 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2412}
2413
4dff95dc
SB
2414/**
2415 * clk_has_parent - check if a clock is a possible parent for another
2416 * @clk: clock source
2417 * @parent: parent clock source
2418 *
2419 * This function can be used in drivers that need to check that a clock can be
2420 * the parent of another without actually changing the parent.
2421 *
2422 * Returns true if @parent is a possible parent for @clk, false otherwise.
b2476490 2423 */
4dff95dc 2424bool clk_has_parent(struct clk *clk, struct clk *parent)
b2476490 2425{
4dff95dc 2426 struct clk_core *core, *parent_core;
fc0c209c 2427 int i;
b2476490 2428
4dff95dc
SB
2429 /* NULL clocks should be nops, so return success if either is NULL. */
2430 if (!clk || !parent)
2431 return true;
7452b219 2432
4dff95dc
SB
2433 core = clk->core;
2434 parent_core = parent->core;
71472c0c 2435
4dff95dc
SB
2436 /* Optimize for the case where the parent is already the parent. */
2437 if (core->parent == parent_core)
2438 return true;
1c8e6004 2439
fc0c209c
SB
2440 for (i = 0; i < core->num_parents; i++)
2441 if (!strcmp(core->parents[i].name, parent_core->name))
2442 return true;
2443
2444 return false;
4dff95dc
SB
2445}
2446EXPORT_SYMBOL_GPL(clk_has_parent);
03bc10ab 2447
91baa9ff
JB
2448static int clk_core_set_parent_nolock(struct clk_core *core,
2449 struct clk_core *parent)
4dff95dc
SB
2450{
2451 int ret = 0;
2452 int p_index = 0;
2453 unsigned long p_rate = 0;
2454
91baa9ff
JB
2455 lockdep_assert_held(&prepare_lock);
2456
4dff95dc
SB
2457 if (!core)
2458 return 0;
2459
4dff95dc 2460 if (core->parent == parent)
91baa9ff 2461 return 0;
4dff95dc
SB
2462
2463 /* verify ops for for multi-parent clks */
91baa9ff
JB
2464 if (core->num_parents > 1 && !core->ops->set_parent)
2465 return -EPERM;
7452b219 2466
4dff95dc 2467 /* check that we are allowed to re-parent if the clock is in use */
91baa9ff
JB
2468 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2469 return -EBUSY;
b2476490 2470
e55a839a
JB
2471 if (clk_core_rate_is_protected(core))
2472 return -EBUSY;
b2476490 2473
71472c0c 2474 /* try finding the new parent index */
4dff95dc 2475 if (parent) {
d6968fca 2476 p_index = clk_fetch_parent_index(core, parent);
f1c8b2ed 2477 if (p_index < 0) {
71472c0c 2478 pr_debug("%s: clk %s can not be parent of clk %s\n",
4dff95dc 2479 __func__, parent->name, core->name);
91baa9ff 2480 return p_index;
71472c0c 2481 }
e8f0e68e 2482 p_rate = parent->rate;
b2476490
MT
2483 }
2484
9a34b453
MS
2485 ret = clk_pm_runtime_get(core);
2486 if (ret)
91baa9ff 2487 return ret;
9a34b453 2488
4dff95dc
SB
2489 /* propagate PRE_RATE_CHANGE notifications */
2490 ret = __clk_speculate_rates(core, p_rate);
b2476490 2491
4dff95dc
SB
2492 /* abort if a driver objects */
2493 if (ret & NOTIFY_STOP_MASK)
9a34b453 2494 goto runtime_put;
b2476490 2495
4dff95dc
SB
2496 /* do the re-parent */
2497 ret = __clk_set_parent(core, parent, p_index);
b2476490 2498
4dff95dc
SB
2499 /* propagate rate an accuracy recalculation accordingly */
2500 if (ret) {
2501 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2502 } else {
2503 __clk_recalc_rates(core, POST_RATE_CHANGE);
2504 __clk_recalc_accuracies(core);
b2476490
MT
2505 }
2506
9a34b453
MS
2507runtime_put:
2508 clk_pm_runtime_put(core);
71472c0c 2509
4dff95dc
SB
2510 return ret;
2511}
b2476490 2512
4dff95dc
SB
2513/**
2514 * clk_set_parent - switch the parent of a mux clk
2515 * @clk: the mux clk whose input we are switching
2516 * @parent: the new input to clk
2517 *
2518 * Re-parent clk to use parent as its new input source. If clk is in
2519 * prepared state, the clk will get enabled for the duration of this call. If
2520 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2521 * that, the reparenting is glitchy in hardware, etc), use the
2522 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2523 *
2524 * After successfully changing clk's parent clk_set_parent will update the
2525 * clk topology, sysfs topology and propagate rate recalculation via
2526 * __clk_recalc_rates.
2527 *
2528 * Returns 0 on success, -EERROR otherwise.
2529 */
2530int clk_set_parent(struct clk *clk, struct clk *parent)
2531{
91baa9ff
JB
2532 int ret;
2533
4dff95dc
SB
2534 if (!clk)
2535 return 0;
2536
91baa9ff 2537 clk_prepare_lock();
55e9b8b7
JB
2538
2539 if (clk->exclusive_count)
2540 clk_core_rate_unprotect(clk->core);
2541
91baa9ff
JB
2542 ret = clk_core_set_parent_nolock(clk->core,
2543 parent ? parent->core : NULL);
55e9b8b7
JB
2544
2545 if (clk->exclusive_count)
2546 clk_core_rate_protect(clk->core);
2547
91baa9ff
JB
2548 clk_prepare_unlock();
2549
2550 return ret;
b2476490 2551}
4dff95dc 2552EXPORT_SYMBOL_GPL(clk_set_parent);
b2476490 2553
9e4d04ad
JB
2554static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2555{
2556 int ret = -EINVAL;
2557
2558 lockdep_assert_held(&prepare_lock);
2559
2560 if (!core)
2561 return 0;
2562
e55a839a
JB
2563 if (clk_core_rate_is_protected(core))
2564 return -EBUSY;
2565
9e4d04ad
JB
2566 trace_clk_set_phase(core, degrees);
2567
7f95beea 2568 if (core->ops->set_phase) {
9e4d04ad 2569 ret = core->ops->set_phase(core->hw, degrees);
7f95beea
SL
2570 if (!ret)
2571 core->phase = degrees;
2572 }
9e4d04ad
JB
2573
2574 trace_clk_set_phase_complete(core, degrees);
2575
2576 return ret;
2577}
2578
4dff95dc
SB
2579/**
2580 * clk_set_phase - adjust the phase shift of a clock signal
2581 * @clk: clock signal source
2582 * @degrees: number of degrees the signal is shifted
2583 *
2584 * Shifts the phase of a clock signal by the specified
2585 * degrees. Returns 0 on success, -EERROR otherwise.
2586 *
2587 * This function makes no distinction about the input or reference
2588 * signal that we adjust the clock signal phase against. For example
2589 * phase locked-loop clock signal generators we may shift phase with
2590 * respect to feedback clock signal input, but for other cases the
2591 * clock phase may be shifted with respect to some other, unspecified
2592 * signal.
2593 *
2594 * Additionally the concept of phase shift does not propagate through
2595 * the clock tree hierarchy, which sets it apart from clock rates and
2596 * clock accuracy. A parent clock phase attribute does not have an
2597 * impact on the phase attribute of a child clock.
b2476490 2598 */
4dff95dc 2599int clk_set_phase(struct clk *clk, int degrees)
b2476490 2600{
9e4d04ad 2601 int ret;
b2476490 2602
4dff95dc
SB
2603 if (!clk)
2604 return 0;
b2476490 2605
4dff95dc
SB
2606 /* sanity check degrees */
2607 degrees %= 360;
2608 if (degrees < 0)
2609 degrees += 360;
bf47b4fd 2610
4dff95dc 2611 clk_prepare_lock();
3fa2252b 2612
55e9b8b7
JB
2613 if (clk->exclusive_count)
2614 clk_core_rate_unprotect(clk->core);
3fa2252b 2615
9e4d04ad 2616 ret = clk_core_set_phase_nolock(clk->core, degrees);
3fa2252b 2617
55e9b8b7
JB
2618 if (clk->exclusive_count)
2619 clk_core_rate_protect(clk->core);
b2476490 2620
4dff95dc 2621 clk_prepare_unlock();
dfc202ea 2622
4dff95dc
SB
2623 return ret;
2624}
2625EXPORT_SYMBOL_GPL(clk_set_phase);
b2476490 2626
4dff95dc
SB
2627static int clk_core_get_phase(struct clk_core *core)
2628{
2629 int ret;
b2476490 2630
4dff95dc 2631 clk_prepare_lock();
1f9c63e8
SL
2632 /* Always try to update cached phase if possible */
2633 if (core->ops->get_phase)
2634 core->phase = core->ops->get_phase(core->hw);
4dff95dc
SB
2635 ret = core->phase;
2636 clk_prepare_unlock();
71472c0c 2637
4dff95dc 2638 return ret;
b2476490
MT
2639}
2640
4dff95dc
SB
2641/**
2642 * clk_get_phase - return the phase shift of a clock signal
2643 * @clk: clock signal source
2644 *
2645 * Returns the phase shift of a clock node in degrees, otherwise returns
2646 * -EERROR.
2647 */
2648int clk_get_phase(struct clk *clk)
1c8e6004 2649{
4dff95dc 2650 if (!clk)
1c8e6004
TV
2651 return 0;
2652
4dff95dc
SB
2653 return clk_core_get_phase(clk->core);
2654}
2655EXPORT_SYMBOL_GPL(clk_get_phase);
1c8e6004 2656
9fba738a
JB
2657static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2658{
2659 /* Assume a default value of 50% */
2660 core->duty.num = 1;
2661 core->duty.den = 2;
2662}
2663
2664static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2665
2666static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2667{
2668 struct clk_duty *duty = &core->duty;
2669 int ret = 0;
2670
2671 if (!core->ops->get_duty_cycle)
2672 return clk_core_update_duty_cycle_parent_nolock(core);
2673
2674 ret = core->ops->get_duty_cycle(core->hw, duty);
2675 if (ret)
2676 goto reset;
2677
2678 /* Don't trust the clock provider too much */
2679 if (duty->den == 0 || duty->num > duty->den) {
2680 ret = -EINVAL;
2681 goto reset;
2682 }
2683
2684 return 0;
2685
2686reset:
2687 clk_core_reset_duty_cycle_nolock(core);
2688 return ret;
2689}
2690
2691static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2692{
2693 int ret = 0;
2694
2695 if (core->parent &&
2696 core->flags & CLK_DUTY_CYCLE_PARENT) {
2697 ret = clk_core_update_duty_cycle_nolock(core->parent);
2698 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2699 } else {
2700 clk_core_reset_duty_cycle_nolock(core);
2701 }
2702
2703 return ret;
2704}
2705
2706static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2707 struct clk_duty *duty);
2708
2709static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2710 struct clk_duty *duty)
2711{
2712 int ret;
2713
2714 lockdep_assert_held(&prepare_lock);
2715
2716 if (clk_core_rate_is_protected(core))
2717 return -EBUSY;
2718
2719 trace_clk_set_duty_cycle(core, duty);
2720
2721 if (!core->ops->set_duty_cycle)
2722 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2723
2724 ret = core->ops->set_duty_cycle(core->hw, duty);
2725 if (!ret)
2726 memcpy(&core->duty, duty, sizeof(*duty));
2727
2728 trace_clk_set_duty_cycle_complete(core, duty);
2729
2730 return ret;
2731}
2732
2733static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2734 struct clk_duty *duty)
2735{
2736 int ret = 0;
2737
2738 if (core->parent &&
2739 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2740 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2741 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2742 }
2743
2744 return ret;
2745}
2746
2747/**
2748 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2749 * @clk: clock signal source
2750 * @num: numerator of the duty cycle ratio to be applied
2751 * @den: denominator of the duty cycle ratio to be applied
2752 *
2753 * Apply the duty cycle ratio if the ratio is valid and the clock can
2754 * perform this operation
2755 *
2756 * Returns (0) on success, a negative errno otherwise.
2757 */
2758int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2759{
2760 int ret;
2761 struct clk_duty duty;
2762
2763 if (!clk)
2764 return 0;
2765
2766 /* sanity check the ratio */
2767 if (den == 0 || num > den)
2768 return -EINVAL;
2769
2770 duty.num = num;
2771 duty.den = den;
2772
2773 clk_prepare_lock();
2774
2775 if (clk->exclusive_count)
2776 clk_core_rate_unprotect(clk->core);
2777
2778 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2779
2780 if (clk->exclusive_count)
2781 clk_core_rate_protect(clk->core);
2782
2783 clk_prepare_unlock();
2784
2785 return ret;
2786}
2787EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2788
2789static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2790 unsigned int scale)
2791{
2792 struct clk_duty *duty = &core->duty;
2793 int ret;
2794
2795 clk_prepare_lock();
2796
2797 ret = clk_core_update_duty_cycle_nolock(core);
2798 if (!ret)
2799 ret = mult_frac(scale, duty->num, duty->den);
2800
2801 clk_prepare_unlock();
2802
2803 return ret;
2804}
2805
2806/**
2807 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2808 * @clk: clock signal source
2809 * @scale: scaling factor to be applied to represent the ratio as an integer
2810 *
2811 * Returns the duty cycle ratio of a clock node multiplied by the provided
2812 * scaling factor, or negative errno on error.
2813 */
2814int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2815{
2816 if (!clk)
2817 return 0;
2818
2819 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2820}
2821EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2822
4dff95dc
SB
2823/**
2824 * clk_is_match - check if two clk's point to the same hardware clock
2825 * @p: clk compared against q
2826 * @q: clk compared against p
2827 *
2828 * Returns true if the two struct clk pointers both point to the same hardware
2829 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2830 * share the same struct clk_core object.
2831 *
2832 * Returns false otherwise. Note that two NULL clks are treated as matching.
2833 */
2834bool clk_is_match(const struct clk *p, const struct clk *q)
2835{
2836 /* trivial case: identical struct clk's or both NULL */
2837 if (p == q)
2838 return true;
1c8e6004 2839
3fe003f9 2840 /* true if clk->core pointers match. Avoid dereferencing garbage */
4dff95dc
SB
2841 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2842 if (p->core == q->core)
2843 return true;
1c8e6004 2844
4dff95dc
SB
2845 return false;
2846}
2847EXPORT_SYMBOL_GPL(clk_is_match);
1c8e6004 2848
4dff95dc 2849/*** debugfs support ***/
1c8e6004 2850
4dff95dc
SB
2851#ifdef CONFIG_DEBUG_FS
2852#include <linux/debugfs.h>
1c8e6004 2853
4dff95dc
SB
2854static struct dentry *rootdir;
2855static int inited = 0;
2856static DEFINE_MUTEX(clk_debug_lock);
2857static HLIST_HEAD(clk_debug_list);
1c8e6004 2858
4dff95dc
SB
2859static struct hlist_head *all_lists[] = {
2860 &clk_root_list,
2861 &clk_orphan_list,
2862 NULL,
2863};
2864
2865static struct hlist_head *orphan_list[] = {
2866 &clk_orphan_list,
2867 NULL,
2868};
2869
2870static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2871 int level)
b2476490 2872{
4dff95dc
SB
2873 if (!c)
2874 return;
b2476490 2875
9fba738a 2876 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
4dff95dc
SB
2877 level * 3 + 1, "",
2878 30 - level * 3, c->name,
e55a839a
JB
2879 c->enable_count, c->prepare_count, c->protect_count,
2880 clk_core_get_rate(c), clk_core_get_accuracy(c),
9fba738a
JB
2881 clk_core_get_phase(c),
2882 clk_core_get_scaled_duty_cycle(c, 100000));
4dff95dc 2883}
89ac8d7a 2884
4dff95dc
SB
2885static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2886 int level)
2887{
2888 struct clk_core *child;
b2476490 2889
4dff95dc
SB
2890 if (!c)
2891 return;
b2476490 2892
4dff95dc 2893 clk_summary_show_one(s, c, level);
0e1c0301 2894
4dff95dc
SB
2895 hlist_for_each_entry(child, &c->children, child_node)
2896 clk_summary_show_subtree(s, child, level + 1);
1c8e6004 2897}
b2476490 2898
4dff95dc 2899static int clk_summary_show(struct seq_file *s, void *data)
1c8e6004 2900{
4dff95dc
SB
2901 struct clk_core *c;
2902 struct hlist_head **lists = (struct hlist_head **)s->private;
1c8e6004 2903
9fba738a
JB
2904 seq_puts(s, " enable prepare protect duty\n");
2905 seq_puts(s, " clock count count count rate accuracy phase cycle\n");
2906 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
b2476490 2907
1c8e6004
TV
2908 clk_prepare_lock();
2909
4dff95dc
SB
2910 for (; *lists; lists++)
2911 hlist_for_each_entry(c, *lists, child_node)
2912 clk_summary_show_subtree(s, c, 0);
b2476490 2913
eab89f69 2914 clk_prepare_unlock();
b2476490 2915
4dff95dc 2916 return 0;
b2476490 2917}
fec0ef3f 2918DEFINE_SHOW_ATTRIBUTE(clk_summary);
b2476490 2919
4dff95dc
SB
2920static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2921{
2922 if (!c)
2923 return;
b2476490 2924
7cb81136 2925 /* This should be JSON format, i.e. elements separated with a comma */
4dff95dc
SB
2926 seq_printf(s, "\"%s\": { ", c->name);
2927 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2928 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
e55a839a 2929 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
7cb81136
SW
2930 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2931 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
c6e90997 2932 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
9fba738a
JB
2933 seq_printf(s, "\"duty_cycle\": %u",
2934 clk_core_get_scaled_duty_cycle(c, 100000));
b2476490 2935}
b2476490 2936
4dff95dc 2937static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
b2476490 2938{
4dff95dc 2939 struct clk_core *child;
b2476490 2940
4dff95dc
SB
2941 if (!c)
2942 return;
b2476490 2943
4dff95dc 2944 clk_dump_one(s, c, level);
b2476490 2945
4dff95dc 2946 hlist_for_each_entry(child, &c->children, child_node) {
4d327586 2947 seq_putc(s, ',');
4dff95dc 2948 clk_dump_subtree(s, child, level + 1);
b2476490
MT
2949 }
2950
4d327586 2951 seq_putc(s, '}');
b2476490
MT
2952}
2953
fec0ef3f 2954static int clk_dump_show(struct seq_file *s, void *data)
4e88f3de 2955{
4dff95dc
SB
2956 struct clk_core *c;
2957 bool first_node = true;
2958 struct hlist_head **lists = (struct hlist_head **)s->private;
4e88f3de 2959
4d327586 2960 seq_putc(s, '{');
4dff95dc 2961 clk_prepare_lock();
035a61c3 2962
4dff95dc
SB
2963 for (; *lists; lists++) {
2964 hlist_for_each_entry(c, *lists, child_node) {
2965 if (!first_node)
4d327586 2966 seq_putc(s, ',');
4dff95dc
SB
2967 first_node = false;
2968 clk_dump_subtree(s, c, 0);
2969 }
2970 }
4e88f3de 2971
4dff95dc 2972 clk_prepare_unlock();
4e88f3de 2973
70e9f4dd 2974 seq_puts(s, "}\n");
4dff95dc 2975 return 0;
4e88f3de 2976}
fec0ef3f 2977DEFINE_SHOW_ATTRIBUTE(clk_dump);
89ac8d7a 2978
a6059ab9
GU
2979static const struct {
2980 unsigned long flag;
2981 const char *name;
2982} clk_flags[] = {
40dd71c7 2983#define ENTRY(f) { f, #f }
a6059ab9
GU
2984 ENTRY(CLK_SET_RATE_GATE),
2985 ENTRY(CLK_SET_PARENT_GATE),
2986 ENTRY(CLK_SET_RATE_PARENT),
2987 ENTRY(CLK_IGNORE_UNUSED),
a6059ab9
GU
2988 ENTRY(CLK_GET_RATE_NOCACHE),
2989 ENTRY(CLK_SET_RATE_NO_REPARENT),
2990 ENTRY(CLK_GET_ACCURACY_NOCACHE),
2991 ENTRY(CLK_RECALC_NEW_RATES),
2992 ENTRY(CLK_SET_RATE_UNGATE),
2993 ENTRY(CLK_IS_CRITICAL),
2994 ENTRY(CLK_OPS_PARENT_ENABLE),
9fba738a 2995 ENTRY(CLK_DUTY_CYCLE_PARENT),
a6059ab9
GU
2996#undef ENTRY
2997};
2998
fec0ef3f 2999static int clk_flags_show(struct seq_file *s, void *data)
a6059ab9
GU
3000{
3001 struct clk_core *core = s->private;
3002 unsigned long flags = core->flags;
3003 unsigned int i;
3004
3005 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3006 if (flags & clk_flags[i].flag) {
3007 seq_printf(s, "%s\n", clk_flags[i].name);
3008 flags &= ~clk_flags[i].flag;
3009 }
3010 }
3011 if (flags) {
3012 /* Unknown flags */
3013 seq_printf(s, "0x%lx\n", flags);
3014 }
3015
3016 return 0;
3017}
fec0ef3f 3018DEFINE_SHOW_ATTRIBUTE(clk_flags);
a6059ab9 3019
11f6c230
SB
3020static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3021 unsigned int i, char terminator)
92031575 3022{
2d156b78 3023 struct clk_core *parent;
92031575 3024
2d156b78
CYT
3025 /*
3026 * Go through the following options to fetch a parent's name.
3027 *
3028 * 1. Fetch the registered parent clock and use its name
3029 * 2. Use the global (fallback) name if specified
3030 * 3. Use the local fw_name if provided
3031 * 4. Fetch parent clock's clock-output-name if DT index was set
3032 *
3033 * This may still fail in some cases, such as when the parent is
3034 * specified directly via a struct clk_hw pointer, but it isn't
3035 * registered (yet).
3036 */
2d156b78
CYT
3037 parent = clk_core_get_parent_by_index(core, i);
3038 if (parent)
3039 seq_printf(s, "%s", parent->name);
3040 else if (core->parents[i].name)
3041 seq_printf(s, "%s", core->parents[i].name);
3042 else if (core->parents[i].fw_name)
3043 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3044 else if (core->parents[i].index >= 0)
3045 seq_printf(s, "%s",
3046 of_clk_get_parent_name(core->of_node,
3047 core->parents[i].index));
3048 else
3049 seq_puts(s, "(missing)");
92031575 3050
11f6c230
SB
3051 seq_putc(s, terminator);
3052}
3053
fec0ef3f 3054static int possible_parents_show(struct seq_file *s, void *data)
92031575
PDS
3055{
3056 struct clk_core *core = s->private;
3057 int i;
3058
3059 for (i = 0; i < core->num_parents - 1; i++)
11f6c230 3060 possible_parent_show(s, core, i, ' ');
92031575 3061
11f6c230 3062 possible_parent_show(s, core, i, '\n');
92031575
PDS
3063
3064 return 0;
3065}
fec0ef3f 3066DEFINE_SHOW_ATTRIBUTE(possible_parents);
92031575 3067
e5e89247
LC
3068static int current_parent_show(struct seq_file *s, void *data)
3069{
3070 struct clk_core *core = s->private;
3071
3072 if (core->parent)
3073 seq_printf(s, "%s\n", core->parent->name);
3074
3075 return 0;
3076}
3077DEFINE_SHOW_ATTRIBUTE(current_parent);
3078
9fba738a
JB
3079static int clk_duty_cycle_show(struct seq_file *s, void *data)
3080{
3081 struct clk_core *core = s->private;
3082 struct clk_duty *duty = &core->duty;
3083
3084 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3085
3086 return 0;
3087}
3088DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3089
8a26bbbb 3090static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
4dff95dc 3091{
8a26bbbb 3092 struct dentry *root;
b61c43c0 3093
8a26bbbb
GKH
3094 if (!core || !pdentry)
3095 return;
b2476490 3096
8a26bbbb
GKH
3097 root = debugfs_create_dir(core->name, pdentry);
3098 core->dentry = root;
92031575 3099
8a26bbbb
GKH
3100 debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
3101 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3102 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3103 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3104 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3105 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3106 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3107 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
9fba738a
JB
3108 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3109 &clk_duty_cycle_fops);
b2476490 3110
e5e89247
LC
3111 if (core->num_parents > 0)
3112 debugfs_create_file("clk_parent", 0444, root, core,
3113 &current_parent_fops);
3114
8a26bbbb
GKH
3115 if (core->num_parents > 1)
3116 debugfs_create_file("clk_possible_parents", 0444, root, core,
3117 &possible_parents_fops);
b2476490 3118
8a26bbbb
GKH
3119 if (core->ops->debug_init)
3120 core->ops->debug_init(core->hw, core->dentry);
b2476490 3121}
035a61c3
TV
3122
3123/**
6e5ab41b
SB
3124 * clk_debug_register - add a clk node to the debugfs clk directory
3125 * @core: the clk being added to the debugfs clk directory
035a61c3 3126 *
6e5ab41b
SB
3127 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3128 * initialized. Otherwise it bails out early since the debugfs clk directory
4dff95dc 3129 * will be created lazily by clk_debug_init as part of a late_initcall.
035a61c3 3130 */
8a26bbbb 3131static void clk_debug_register(struct clk_core *core)
035a61c3 3132{
4dff95dc
SB
3133 mutex_lock(&clk_debug_lock);
3134 hlist_add_head(&core->debug_node, &clk_debug_list);
db3188fa 3135 if (inited)
8a26bbbb 3136 clk_debug_create_one(core, rootdir);
4dff95dc 3137 mutex_unlock(&clk_debug_lock);
035a61c3 3138}
b2476490 3139
4dff95dc 3140 /**
6e5ab41b
SB
3141 * clk_debug_unregister - remove a clk node from the debugfs clk directory
3142 * @core: the clk being removed from the debugfs clk directory
e59c5371 3143 *
6e5ab41b
SB
3144 * Dynamically removes a clk and all its child nodes from the
3145 * debugfs clk directory if clk->dentry points to debugfs created by
706d5c73 3146 * clk_debug_register in __clk_core_init.
e59c5371 3147 */
4dff95dc 3148static void clk_debug_unregister(struct clk_core *core)
e59c5371 3149{
4dff95dc
SB
3150 mutex_lock(&clk_debug_lock);
3151 hlist_del_init(&core->debug_node);
3152 debugfs_remove_recursive(core->dentry);
3153 core->dentry = NULL;
3154 mutex_unlock(&clk_debug_lock);
3155}
e59c5371 3156
4dff95dc 3157/**
6e5ab41b 3158 * clk_debug_init - lazily populate the debugfs clk directory
4dff95dc 3159 *
6e5ab41b
SB
3160 * clks are often initialized very early during boot before memory can be
3161 * dynamically allocated and well before debugfs is setup. This function
3162 * populates the debugfs clk directory once at boot-time when we know that
3163 * debugfs is setup. It should only be called once at boot-time, all other clks
3164 * added dynamically will be done so with clk_debug_register.
4dff95dc
SB
3165 */
3166static int __init clk_debug_init(void)
3167{
3168 struct clk_core *core;
dfc202ea 3169
4dff95dc 3170 rootdir = debugfs_create_dir("clk", NULL);
e59c5371 3171
8a26bbbb
GKH
3172 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3173 &clk_summary_fops);
3174 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3175 &clk_dump_fops);
3176 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3177 &clk_summary_fops);
3178 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3179 &clk_dump_fops);
e59c5371 3180
4dff95dc
SB
3181 mutex_lock(&clk_debug_lock);
3182 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3183 clk_debug_create_one(core, rootdir);
e59c5371 3184
4dff95dc
SB
3185 inited = 1;
3186 mutex_unlock(&clk_debug_lock);
e59c5371 3187
4dff95dc
SB
3188 return 0;
3189}
3190late_initcall(clk_debug_init);
3191#else
8a26bbbb 3192static inline void clk_debug_register(struct clk_core *core) { }
4dff95dc
SB
3193static inline void clk_debug_reparent(struct clk_core *core,
3194 struct clk_core *new_parent)
035a61c3 3195{
035a61c3 3196}
4dff95dc 3197static inline void clk_debug_unregister(struct clk_core *core)
3d3801ef 3198{
3d3801ef 3199}
4dff95dc 3200#endif
3d3801ef 3201
b2476490 3202/**
be45ebf2 3203 * __clk_core_init - initialize the data structures in a struct clk_core
d35c80c2 3204 * @core: clk_core being initialized
b2476490 3205 *
035a61c3 3206 * Initializes the lists in struct clk_core, queries the hardware for the
b2476490 3207 * parent and rate and sets them both.
b2476490 3208 */
be45ebf2 3209static int __clk_core_init(struct clk_core *core)
b2476490 3210{
fc0c209c 3211 int ret;
035a61c3 3212 struct clk_core *orphan;
b67bfe0d 3213 struct hlist_node *tmp2;
1c8e6004 3214 unsigned long rate;
b2476490 3215
d35c80c2 3216 if (!core)
d1302a36 3217 return -EINVAL;
b2476490 3218
eab89f69 3219 clk_prepare_lock();
b2476490 3220
9a34b453
MS
3221 ret = clk_pm_runtime_get(core);
3222 if (ret)
3223 goto unlock;
3224
b2476490 3225 /* check to see if a clock with this name is already registered */
d6968fca 3226 if (clk_core_lookup(core->name)) {
d1302a36 3227 pr_debug("%s: clk %s already initialized\n",
d6968fca 3228 __func__, core->name);
d1302a36 3229 ret = -EEXIST;
b2476490 3230 goto out;
d1302a36 3231 }
b2476490 3232
5fb94e9c 3233 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
d6968fca
SB
3234 if (core->ops->set_rate &&
3235 !((core->ops->round_rate || core->ops->determine_rate) &&
3236 core->ops->recalc_rate)) {
c44fccb5
MY
3237 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3238 __func__, core->name);
d1302a36 3239 ret = -EINVAL;
d4d7e3dd
MT
3240 goto out;
3241 }
3242
d6968fca 3243 if (core->ops->set_parent && !core->ops->get_parent) {
c44fccb5
MY
3244 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3245 __func__, core->name);
d1302a36 3246 ret = -EINVAL;
d4d7e3dd
MT
3247 goto out;
3248 }
3249
3c8e77dd
MY
3250 if (core->num_parents > 1 && !core->ops->get_parent) {
3251 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3252 __func__, core->name);
3253 ret = -EINVAL;
3254 goto out;
3255 }
3256
d6968fca
SB
3257 if (core->ops->set_rate_and_parent &&
3258 !(core->ops->set_parent && core->ops->set_rate)) {
c44fccb5 3259 pr_err("%s: %s must implement .set_parent & .set_rate\n",
d6968fca 3260 __func__, core->name);
3fa2252b
SB
3261 ret = -EINVAL;
3262 goto out;
3263 }
3264
d6968fca 3265 core->parent = __clk_init_parent(core);
b2476490
MT
3266
3267 /*
706d5c73
SB
3268 * Populate core->parent if parent has already been clk_core_init'd. If
3269 * parent has not yet been clk_core_init'd then place clk in the orphan
47b0eeb3 3270 * list. If clk doesn't have any parents then place it in the root
b2476490
MT
3271 * clk list.
3272 *
3273 * Every time a new clk is clk_init'd then we walk the list of orphan
3274 * clocks and re-parent any that are children of the clock currently
3275 * being clk_init'd.
3276 */
e6500344 3277 if (core->parent) {
d6968fca
SB
3278 hlist_add_head(&core->child_node,
3279 &core->parent->children);
e6500344 3280 core->orphan = core->parent->orphan;
47b0eeb3 3281 } else if (!core->num_parents) {
d6968fca 3282 hlist_add_head(&core->child_node, &clk_root_list);
e6500344
HS
3283 core->orphan = false;
3284 } else {
d6968fca 3285 hlist_add_head(&core->child_node, &clk_orphan_list);
e6500344
HS
3286 core->orphan = true;
3287 }
b2476490 3288
541debae
JB
3289 /*
3290 * optional platform-specific magic
3291 *
3292 * The .init callback is not used by any of the basic clock types, but
3293 * exists for weird hardware that must perform initialization magic.
3294 * Please consider other ways of solving initialization problems before
3295 * using this callback, as its use is discouraged.
3296 */
3297 if (core->ops->init)
3298 core->ops->init(core->hw);
3299
5279fc40
BB
3300 /*
3301 * Set clk's accuracy. The preferred method is to use
3302 * .recalc_accuracy. For simple clocks and lazy developers the default
3303 * fallback is to use the parent's accuracy. If a clock doesn't have a
3304 * parent (or is orphaned) then accuracy is set to zero (perfect
3305 * clock).
3306 */
d6968fca
SB
3307 if (core->ops->recalc_accuracy)
3308 core->accuracy = core->ops->recalc_accuracy(core->hw,
3309 __clk_get_accuracy(core->parent));
3310 else if (core->parent)
3311 core->accuracy = core->parent->accuracy;
5279fc40 3312 else
d6968fca 3313 core->accuracy = 0;
5279fc40 3314
9824cf73
MR
3315 /*
3316 * Set clk's phase.
3317 * Since a phase is by definition relative to its parent, just
3318 * query the current clock phase, or just assume it's in phase.
3319 */
d6968fca
SB
3320 if (core->ops->get_phase)
3321 core->phase = core->ops->get_phase(core->hw);
9824cf73 3322 else
d6968fca 3323 core->phase = 0;
9824cf73 3324
9fba738a
JB
3325 /*
3326 * Set clk's duty cycle.
3327 */
3328 clk_core_update_duty_cycle_nolock(core);
3329
b2476490
MT
3330 /*
3331 * Set clk's rate. The preferred method is to use .recalc_rate. For
3332 * simple clocks and lazy developers the default fallback is to use the
3333 * parent's rate. If a clock doesn't have a parent (or is orphaned)
3334 * then rate is set to zero.
3335 */
d6968fca
SB
3336 if (core->ops->recalc_rate)
3337 rate = core->ops->recalc_rate(core->hw,
3338 clk_core_get_rate_nolock(core->parent));
3339 else if (core->parent)
3340 rate = core->parent->rate;
b2476490 3341 else
1c8e6004 3342 rate = 0;
d6968fca 3343 core->rate = core->req_rate = rate;
b2476490 3344
99652a46
JB
3345 /*
3346 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3347 * don't get accidentally disabled when walking the orphan tree and
3348 * reparenting clocks
3349 */
3350 if (core->flags & CLK_IS_CRITICAL) {
3351 unsigned long flags;
3352
3353 clk_core_prepare(core);
3354
3355 flags = clk_enable_lock();
3356 clk_core_enable(core);
3357 clk_enable_unlock(flags);
3358 }
3359
b2476490 3360 /*
0e8f6e49
MY
3361 * walk the list of orphan clocks and reparent any that newly finds a
3362 * parent.
b2476490 3363 */
b67bfe0d 3364 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
0e8f6e49 3365 struct clk_core *parent = __clk_init_parent(orphan);
1f61e5f1 3366
904e6ead 3367 /*
99652a46
JB
3368 * We need to use __clk_set_parent_before() and _after() to
3369 * to properly migrate any prepare/enable count of the orphan
3370 * clock. This is important for CLK_IS_CRITICAL clocks, which
3371 * are enabled during init but might not have a parent yet.
904e6ead
MT
3372 */
3373 if (parent) {
f8f8f1d0 3374 /* update the clk tree topology */
99652a46
JB
3375 __clk_set_parent_before(orphan, parent);
3376 __clk_set_parent_after(orphan, parent, NULL);
904e6ead
MT
3377 __clk_recalc_accuracies(orphan);
3378 __clk_recalc_rates(orphan, 0);
3379 }
0e8f6e49 3380 }
b2476490 3381
d6968fca 3382 kref_init(&core->ref);
b2476490 3383out:
9a34b453
MS
3384 clk_pm_runtime_put(core);
3385unlock:
eab89f69 3386 clk_prepare_unlock();
b2476490 3387
89f7e9de 3388 if (!ret)
d6968fca 3389 clk_debug_register(core);
89f7e9de 3390
d1302a36 3391 return ret;
b2476490
MT
3392}
3393
1df4046a
SB
3394/**
3395 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3396 * @core: clk to add consumer to
3397 * @clk: consumer to link to a clk
3398 */
3399static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3400{
3401 clk_prepare_lock();
3402 hlist_add_head(&clk->clks_node, &core->clks);
3403 clk_prepare_unlock();
3404}
3405
3406/**
3407 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3408 * @clk: consumer to unlink
3409 */
3410static void clk_core_unlink_consumer(struct clk *clk)
3411{
3412 lockdep_assert_held(&prepare_lock);
3413 hlist_del(&clk->clks_node);
3414}
3415
3416/**
3417 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3418 * @core: clk to allocate a consumer for
3419 * @dev_id: string describing device name
3420 * @con_id: connection ID string on device
3421 *
3422 * Returns: clk consumer left unlinked from the consumer list
3423 */
3424static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
035a61c3 3425 const char *con_id)
0197b3ea 3426{
0197b3ea
SK
3427 struct clk *clk;
3428
035a61c3
TV
3429 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3430 if (!clk)
3431 return ERR_PTR(-ENOMEM);
3432
1df4046a 3433 clk->core = core;
035a61c3 3434 clk->dev_id = dev_id;
253160a8 3435 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
1c8e6004
TV
3436 clk->max_rate = ULONG_MAX;
3437
0197b3ea
SK
3438 return clk;
3439}
035a61c3 3440
1df4046a
SB
3441/**
3442 * free_clk - Free a clk consumer
3443 * @clk: clk consumer to free
3444 *
3445 * Note, this assumes the clk has been unlinked from the clk_core consumer
3446 * list.
3447 */
3448static void free_clk(struct clk *clk)
1c8e6004 3449{
253160a8 3450 kfree_const(clk->con_id);
1c8e6004
TV
3451 kfree(clk);
3452}
0197b3ea 3453
1df4046a
SB
3454/**
3455 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3456 * a clk_hw
efa85048 3457 * @dev: clk consumer device
1df4046a
SB
3458 * @hw: clk_hw associated with the clk being consumed
3459 * @dev_id: string describing device name
3460 * @con_id: connection ID string on device
3461 *
3462 * This is the main function used to create a clk pointer for use by clk
3463 * consumers. It connects a consumer to the clk_core and clk_hw structures
3464 * used by the framework and clk provider respectively.
3465 */
efa85048 3466struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
1df4046a
SB
3467 const char *dev_id, const char *con_id)
3468{
3469 struct clk *clk;
3470 struct clk_core *core;
3471
3472 /* This is to allow this function to be chained to others */
3473 if (IS_ERR_OR_NULL(hw))
3474 return ERR_CAST(hw);
3475
3476 core = hw->core;
3477 clk = alloc_clk(core, dev_id, con_id);
3478 if (IS_ERR(clk))
3479 return clk;
efa85048 3480 clk->dev = dev;
1df4046a
SB
3481
3482 if (!try_module_get(core->owner)) {
3483 free_clk(clk);
3484 return ERR_PTR(-ENOENT);
3485 }
3486
3487 kref_get(&core->ref);
3488 clk_core_link_consumer(core, clk);
3489
3490 return clk;
3491}
3492
fc0c209c 3493static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
b2476490 3494{
fc0c209c
SB
3495 const char *dst;
3496
3497 if (!src) {
3498 if (must_exist)
3499 return -EINVAL;
3500 return 0;
3501 }
3502
3503 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3504 if (!dst)
3505 return -ENOMEM;
3506
3507 return 0;
3508}
3509
3510static int clk_core_populate_parent_map(struct clk_core *core)
3511{
3512 const struct clk_init_data *init = core->hw->init;
3513 u8 num_parents = init->num_parents;
3514 const char * const *parent_names = init->parent_names;
3515 const struct clk_hw **parent_hws = init->parent_hws;
3516 const struct clk_parent_data *parent_data = init->parent_data;
3517 int i, ret = 0;
3518 struct clk_parent_map *parents, *parent;
3519
3520 if (!num_parents)
3521 return 0;
3522
3523 /*
3524 * Avoid unnecessary string look-ups of clk_core's possible parents by
3525 * having a cache of names/clk_hw pointers to clk_core pointers.
3526 */
3527 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3528 core->parents = parents;
3529 if (!parents)
3530 return -ENOMEM;
3531
3532 /* Copy everything over because it might be __initdata */
3533 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
601b6e93 3534 parent->index = -1;
fc0c209c
SB
3535 if (parent_names) {
3536 /* throw a WARN if any entries are NULL */
3537 WARN(!parent_names[i],
3538 "%s: invalid NULL in %s's .parent_names\n",
3539 __func__, core->name);
3540 ret = clk_cpy_name(&parent->name, parent_names[i],
3541 true);
3542 } else if (parent_data) {
3543 parent->hw = parent_data[i].hw;
601b6e93 3544 parent->index = parent_data[i].index;
fc0c209c
SB
3545 ret = clk_cpy_name(&parent->fw_name,
3546 parent_data[i].fw_name, false);
3547 if (!ret)
3548 ret = clk_cpy_name(&parent->name,
3549 parent_data[i].name,
3550 false);
3551 } else if (parent_hws) {
3552 parent->hw = parent_hws[i];
3553 } else {
3554 ret = -EINVAL;
3555 WARN(1, "Must specify parents if num_parents > 0\n");
3556 }
3557
3558 if (ret) {
3559 do {
3560 kfree_const(parents[i].name);
3561 kfree_const(parents[i].fw_name);
3562 } while (--i >= 0);
3563 kfree(parents);
3564
3565 return ret;
3566 }
3567 }
3568
3569 return 0;
3570}
3571
3572static void clk_core_free_parent_map(struct clk_core *core)
3573{
3574 int i = core->num_parents;
3575
3576 if (!core->num_parents)
3577 return;
3578
3579 while (--i >= 0) {
3580 kfree_const(core->parents[i].name);
3581 kfree_const(core->parents[i].fw_name);
3582 }
3583
3584 kfree(core->parents);
3585}
3586
89a5ddcc
SB
3587static struct clk *
3588__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
b2476490 3589{
fc0c209c 3590 int ret;
d6968fca 3591 struct clk_core *core;
293ba3b4 3592
d6968fca
SB
3593 core = kzalloc(sizeof(*core), GFP_KERNEL);
3594 if (!core) {
293ba3b4
SB
3595 ret = -ENOMEM;
3596 goto fail_out;
3597 }
b2476490 3598
d6968fca
SB
3599 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
3600 if (!core->name) {
0197b3ea
SK
3601 ret = -ENOMEM;
3602 goto fail_name;
3603 }
29fd2a34
JB
3604
3605 if (WARN_ON(!hw->init->ops)) {
3606 ret = -EINVAL;
3607 goto fail_ops;
3608 }
d6968fca 3609 core->ops = hw->init->ops;
29fd2a34 3610
9a34b453 3611 if (dev && pm_runtime_enabled(dev))
24478839
MR
3612 core->rpm_enabled = true;
3613 core->dev = dev;
89a5ddcc 3614 core->of_node = np;
ac2df527 3615 if (dev && dev->driver)
d6968fca
SB
3616 core->owner = dev->driver->owner;
3617 core->hw = hw;
3618 core->flags = hw->init->flags;
3619 core->num_parents = hw->init->num_parents;
9783c0d9
SB
3620 core->min_rate = 0;
3621 core->max_rate = ULONG_MAX;
d6968fca 3622 hw->core = core;
b2476490 3623
fc0c209c
SB
3624 ret = clk_core_populate_parent_map(core);
3625 if (ret)
176d1169 3626 goto fail_parents;
176d1169 3627
d6968fca 3628 INIT_HLIST_HEAD(&core->clks);
1c8e6004 3629
1df4046a
SB
3630 /*
3631 * Don't call clk_hw_create_clk() here because that would pin the
3632 * provider module to itself and prevent it from ever being removed.
3633 */
3634 hw->clk = alloc_clk(core, NULL, NULL);
035a61c3 3635 if (IS_ERR(hw->clk)) {
035a61c3 3636 ret = PTR_ERR(hw->clk);
fc0c209c 3637 goto fail_create_clk;
035a61c3
TV
3638 }
3639
1df4046a
SB
3640 clk_core_link_consumer(hw->core, hw->clk);
3641
be45ebf2 3642 ret = __clk_core_init(core);
d1302a36 3643 if (!ret)
035a61c3 3644 return hw->clk;
b2476490 3645
1df4046a
SB
3646 clk_prepare_lock();
3647 clk_core_unlink_consumer(hw->clk);
3648 clk_prepare_unlock();
3649
3650 free_clk(hw->clk);
035a61c3 3651 hw->clk = NULL;
b2476490 3652
fc0c209c
SB
3653fail_create_clk:
3654 clk_core_free_parent_map(core);
176d1169 3655fail_parents:
29fd2a34 3656fail_ops:
d6968fca 3657 kfree_const(core->name);
0197b3ea 3658fail_name:
d6968fca 3659 kfree(core);
d1302a36
MT
3660fail_out:
3661 return ERR_PTR(ret);
b2476490 3662}
fceaa7d8
SB
3663
3664/**
3665 * clk_register - allocate a new clock, register it and return an opaque cookie
3666 * @dev: device that is registering this clock
3667 * @hw: link to hardware-specific clock data
3668 *
c1157f60
SB
3669 * clk_register is the *deprecated* interface for populating the clock tree with
3670 * new clock nodes. Use clk_hw_register() instead.
3671 *
3672 * Returns: a pointer to the newly allocated struct clk which
fceaa7d8
SB
3673 * cannot be dereferenced by driver code but may be used in conjunction with the
3674 * rest of the clock API. In the event of an error clk_register will return an
3675 * error code; drivers must test for an error code after calling clk_register.
3676 */
3677struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3678{
89a5ddcc 3679 return __clk_register(dev, dev_of_node(dev), hw);
fceaa7d8 3680}
b2476490
MT
3681EXPORT_SYMBOL_GPL(clk_register);
3682
4143804c
SB
3683/**
3684 * clk_hw_register - register a clk_hw and return an error code
3685 * @dev: device that is registering this clock
3686 * @hw: link to hardware-specific clock data
3687 *
3688 * clk_hw_register is the primary interface for populating the clock tree with
3689 * new clock nodes. It returns an integer equal to zero indicating success or
3690 * less than zero indicating failure. Drivers must test for an error code after
3691 * calling clk_hw_register().
3692 */
3693int clk_hw_register(struct device *dev, struct clk_hw *hw)
3694{
89a5ddcc 3695 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
4143804c
SB
3696}
3697EXPORT_SYMBOL_GPL(clk_hw_register);
3698
89a5ddcc
SB
3699/*
3700 * of_clk_hw_register - register a clk_hw and return an error code
3701 * @node: device_node of device that is registering this clock
3702 * @hw: link to hardware-specific clock data
3703 *
3704 * of_clk_hw_register() is the primary interface for populating the clock tree
3705 * with new clock nodes when a struct device is not available, but a struct
3706 * device_node is. It returns an integer equal to zero indicating success or
3707 * less than zero indicating failure. Drivers must test for an error code after
3708 * calling of_clk_hw_register().
3709 */
3710int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3711{
3712 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3713}
3714EXPORT_SYMBOL_GPL(of_clk_hw_register);
3715
6e5ab41b 3716/* Free memory allocated for a clock. */
fcb0ee6a
SN
3717static void __clk_release(struct kref *ref)
3718{
d6968fca 3719 struct clk_core *core = container_of(ref, struct clk_core, ref);
fcb0ee6a 3720
496eadf8
KK
3721 lockdep_assert_held(&prepare_lock);
3722
fc0c209c 3723 clk_core_free_parent_map(core);
d6968fca
SB
3724 kfree_const(core->name);
3725 kfree(core);
fcb0ee6a
SN
3726}
3727
3728/*
3729 * Empty clk_ops for unregistered clocks. These are used temporarily
3730 * after clk_unregister() was called on a clock and until last clock
3731 * consumer calls clk_put() and the struct clk object is freed.
3732 */
3733static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3734{
3735 return -ENXIO;
3736}
3737
3738static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3739{
3740 WARN_ON_ONCE(1);
3741}
3742
3743static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3744 unsigned long parent_rate)
3745{
3746 return -ENXIO;
3747}
3748
3749static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3750{
3751 return -ENXIO;
3752}
3753
3754static const struct clk_ops clk_nodrv_ops = {
3755 .enable = clk_nodrv_prepare_enable,
3756 .disable = clk_nodrv_disable_unprepare,
3757 .prepare = clk_nodrv_prepare_enable,
3758 .unprepare = clk_nodrv_disable_unprepare,
3759 .set_rate = clk_nodrv_set_rate,
3760 .set_parent = clk_nodrv_set_parent,
3761};
3762
1df5c939
MB
3763/**
3764 * clk_unregister - unregister a currently registered clock
3765 * @clk: clock to unregister
1df5c939 3766 */
fcb0ee6a
SN
3767void clk_unregister(struct clk *clk)
3768{
3769 unsigned long flags;
3770
6314b679
SB
3771 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3772 return;
3773
035a61c3 3774 clk_debug_unregister(clk->core);
fcb0ee6a
SN
3775
3776 clk_prepare_lock();
3777
035a61c3
TV
3778 if (clk->core->ops == &clk_nodrv_ops) {
3779 pr_err("%s: unregistered clock: %s\n", __func__,
3780 clk->core->name);
4106a3d9 3781 goto unlock;
fcb0ee6a
SN
3782 }
3783 /*
3784 * Assign empty clock ops for consumers that might still hold
3785 * a reference to this clock.
3786 */
3787 flags = clk_enable_lock();
035a61c3 3788 clk->core->ops = &clk_nodrv_ops;
fcb0ee6a
SN
3789 clk_enable_unlock(flags);
3790
035a61c3
TV
3791 if (!hlist_empty(&clk->core->children)) {
3792 struct clk_core *child;
874f224c 3793 struct hlist_node *t;
fcb0ee6a
SN
3794
3795 /* Reparent all children to the orphan list. */
035a61c3
TV
3796 hlist_for_each_entry_safe(child, t, &clk->core->children,
3797 child_node)
91baa9ff 3798 clk_core_set_parent_nolock(child, NULL);
fcb0ee6a
SN
3799 }
3800
035a61c3 3801 hlist_del_init(&clk->core->child_node);
fcb0ee6a 3802
035a61c3 3803 if (clk->core->prepare_count)
fcb0ee6a 3804 pr_warn("%s: unregistering prepared clock: %s\n",
035a61c3 3805 __func__, clk->core->name);
e55a839a
JB
3806
3807 if (clk->core->protect_count)
3808 pr_warn("%s: unregistering protected clock: %s\n",
3809 __func__, clk->core->name);
3810
035a61c3 3811 kref_put(&clk->core->ref, __clk_release);
4106a3d9 3812unlock:
fcb0ee6a
SN
3813 clk_prepare_unlock();
3814}
1df5c939
MB
3815EXPORT_SYMBOL_GPL(clk_unregister);
3816
4143804c
SB
3817/**
3818 * clk_hw_unregister - unregister a currently registered clk_hw
3819 * @hw: hardware-specific clock data to unregister
3820 */
3821void clk_hw_unregister(struct clk_hw *hw)
3822{
3823 clk_unregister(hw->clk);
3824}
3825EXPORT_SYMBOL_GPL(clk_hw_unregister);
3826
46c8773a
SB
3827static void devm_clk_release(struct device *dev, void *res)
3828{
293ba3b4 3829 clk_unregister(*(struct clk **)res);
46c8773a
SB
3830}
3831
4143804c
SB
3832static void devm_clk_hw_release(struct device *dev, void *res)
3833{
3834 clk_hw_unregister(*(struct clk_hw **)res);
3835}
3836
46c8773a
SB
3837/**
3838 * devm_clk_register - resource managed clk_register()
3839 * @dev: device that is registering this clock
3840 * @hw: link to hardware-specific clock data
3841 *
9fe9b7ab
SB
3842 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
3843 *
3844 * Clocks returned from this function are automatically clk_unregister()ed on
3845 * driver detach. See clk_register() for more information.
46c8773a
SB
3846 */
3847struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3848{
3849 struct clk *clk;
293ba3b4 3850 struct clk **clkp;
46c8773a 3851
293ba3b4
SB
3852 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3853 if (!clkp)
46c8773a
SB
3854 return ERR_PTR(-ENOMEM);
3855
293ba3b4
SB
3856 clk = clk_register(dev, hw);
3857 if (!IS_ERR(clk)) {
3858 *clkp = clk;
3859 devres_add(dev, clkp);
46c8773a 3860 } else {
293ba3b4 3861 devres_free(clkp);
46c8773a
SB
3862 }
3863
3864 return clk;
3865}
3866EXPORT_SYMBOL_GPL(devm_clk_register);
3867
4143804c
SB
3868/**
3869 * devm_clk_hw_register - resource managed clk_hw_register()
3870 * @dev: device that is registering this clock
3871 * @hw: link to hardware-specific clock data
3872 *
c47265ad 3873 * Managed clk_hw_register(). Clocks registered by this function are
4143804c
SB
3874 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
3875 * for more information.
3876 */
3877int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
3878{
3879 struct clk_hw **hwp;
3880 int ret;
3881
3882 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
3883 if (!hwp)
3884 return -ENOMEM;
3885
3886 ret = clk_hw_register(dev, hw);
3887 if (!ret) {
3888 *hwp = hw;
3889 devres_add(dev, hwp);
3890 } else {
3891 devres_free(hwp);
3892 }
3893
3894 return ret;
3895}
3896EXPORT_SYMBOL_GPL(devm_clk_hw_register);
3897
46c8773a
SB
3898static int devm_clk_match(struct device *dev, void *res, void *data)
3899{
3900 struct clk *c = res;
3901 if (WARN_ON(!c))
3902 return 0;
3903 return c == data;
3904}
3905
4143804c
SB
3906static int devm_clk_hw_match(struct device *dev, void *res, void *data)
3907{
3908 struct clk_hw *hw = res;
3909
3910 if (WARN_ON(!hw))
3911 return 0;
3912 return hw == data;
3913}
3914
46c8773a
SB
3915/**
3916 * devm_clk_unregister - resource managed clk_unregister()
3917 * @clk: clock to unregister
3918 *
3919 * Deallocate a clock allocated with devm_clk_register(). Normally
3920 * this function will not need to be called and the resource management
3921 * code will ensure that the resource is freed.
3922 */
3923void devm_clk_unregister(struct device *dev, struct clk *clk)
3924{
3925 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
3926}
3927EXPORT_SYMBOL_GPL(devm_clk_unregister);
3928
4143804c
SB
3929/**
3930 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3931 * @dev: device that is unregistering the hardware-specific clock data
3932 * @hw: link to hardware-specific clock data
3933 *
3934 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3935 * this function will not need to be called and the resource management
3936 * code will ensure that the resource is freed.
3937 */
3938void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
3939{
3940 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
3941 hw));
3942}
3943EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
3944
ac2df527
SN
3945/*
3946 * clkdev helpers
3947 */
ac2df527
SN
3948
3949void __clk_put(struct clk *clk)
3950{
10cdfe54
TV
3951 struct module *owner;
3952
00efcb1c 3953 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
ac2df527
SN
3954 return;
3955
fcb0ee6a 3956 clk_prepare_lock();
1c8e6004 3957
55e9b8b7
JB
3958 /*
3959 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
3960 * given user should be balanced with calls to clk_rate_exclusive_put()
3961 * and by that same consumer
3962 */
3963 if (WARN_ON(clk->exclusive_count)) {
3964 /* We voiced our concern, let's sanitize the situation */
3965 clk->core->protect_count -= (clk->exclusive_count - 1);
3966 clk_core_rate_unprotect(clk->core);
3967 clk->exclusive_count = 0;
3968 }
3969
50595f8b 3970 hlist_del(&clk->clks_node);
ec02ace8
TV
3971 if (clk->min_rate > clk->core->req_rate ||
3972 clk->max_rate < clk->core->req_rate)
3973 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
3974
1c8e6004
TV
3975 owner = clk->core->owner;
3976 kref_put(&clk->core->ref, __clk_release);
3977
fcb0ee6a
SN
3978 clk_prepare_unlock();
3979
10cdfe54 3980 module_put(owner);
035a61c3 3981
1df4046a 3982 free_clk(clk);
ac2df527
SN
3983}
3984
b2476490
MT
3985/*** clk rate change notifiers ***/
3986
3987/**
3988 * clk_notifier_register - add a clk rate change notifier
3989 * @clk: struct clk * to watch
3990 * @nb: struct notifier_block * with callback info
3991 *
3992 * Request notification when clk's rate changes. This uses an SRCU
3993 * notifier because we want it to block and notifier unregistrations are
3994 * uncommon. The callbacks associated with the notifier must not
3995 * re-enter into the clk framework by calling any top-level clk APIs;
3996 * this will cause a nested prepare_lock mutex.
3997 *
198bb594
MY
3998 * In all notification cases (pre, post and abort rate change) the original
3999 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4000 * and the new frequency is passed via struct clk_notifier_data.new_rate.
b2476490 4001 *
b2476490
MT
4002 * clk_notifier_register() must be called from non-atomic context.
4003 * Returns -EINVAL if called with null arguments, -ENOMEM upon
4004 * allocation failure; otherwise, passes along the return value of
4005 * srcu_notifier_chain_register().
4006 */
4007int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4008{
4009 struct clk_notifier *cn;
4010 int ret = -ENOMEM;
4011
4012 if (!clk || !nb)
4013 return -EINVAL;
4014
eab89f69 4015 clk_prepare_lock();
b2476490
MT
4016
4017 /* search the list of notifiers for this clk */
4018 list_for_each_entry(cn, &clk_notifier_list, node)
4019 if (cn->clk == clk)
4020 break;
4021
4022 /* if clk wasn't in the notifier list, allocate new clk_notifier */
4023 if (cn->clk != clk) {
1808a320 4024 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
b2476490
MT
4025 if (!cn)
4026 goto out;
4027
4028 cn->clk = clk;
4029 srcu_init_notifier_head(&cn->notifier_head);
4030
4031 list_add(&cn->node, &clk_notifier_list);
4032 }
4033
4034 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4035
035a61c3 4036 clk->core->notifier_count++;
b2476490
MT
4037
4038out:
eab89f69 4039 clk_prepare_unlock();
b2476490
MT
4040
4041 return ret;
4042}
4043EXPORT_SYMBOL_GPL(clk_notifier_register);
4044
4045/**
4046 * clk_notifier_unregister - remove a clk rate change notifier
4047 * @clk: struct clk *
4048 * @nb: struct notifier_block * with callback info
4049 *
4050 * Request no further notification for changes to 'clk' and frees memory
4051 * allocated in clk_notifier_register.
4052 *
4053 * Returns -EINVAL if called with null arguments; otherwise, passes
4054 * along the return value of srcu_notifier_chain_unregister().
4055 */
4056int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4057{
4058 struct clk_notifier *cn = NULL;
4059 int ret = -EINVAL;
4060
4061 if (!clk || !nb)
4062 return -EINVAL;
4063
eab89f69 4064 clk_prepare_lock();
b2476490
MT
4065
4066 list_for_each_entry(cn, &clk_notifier_list, node)
4067 if (cn->clk == clk)
4068 break;
4069
4070 if (cn->clk == clk) {
4071 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4072
035a61c3 4073 clk->core->notifier_count--;
b2476490
MT
4074
4075 /* XXX the notifier code should handle this better */
4076 if (!cn->notifier_head.head) {
4077 srcu_cleanup_notifier_head(&cn->notifier_head);
72b5322f 4078 list_del(&cn->node);
b2476490
MT
4079 kfree(cn);
4080 }
4081
4082 } else {
4083 ret = -ENOENT;
4084 }
4085
eab89f69 4086 clk_prepare_unlock();
b2476490
MT
4087
4088 return ret;
4089}
4090EXPORT_SYMBOL_GPL(clk_notifier_unregister);
766e6a4e
GL
4091
4092#ifdef CONFIG_OF
4093/**
4094 * struct of_clk_provider - Clock provider registration structure
4095 * @link: Entry in global list of clock providers
4096 * @node: Pointer to device tree node of clock provider
4097 * @get: Get clock callback. Returns NULL or a struct clk for the
4098 * given clock specifier
4099 * @data: context pointer to be passed into @get callback
4100 */
4101struct of_clk_provider {
4102 struct list_head link;
4103
4104 struct device_node *node;
4105 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
0861e5b8 4106 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
766e6a4e
GL
4107 void *data;
4108};
4109
30d5a945 4110extern struct of_device_id __clk_of_table;
f2f6c255
PG
4111static const struct of_device_id __clk_of_table_sentinel
4112 __used __section(__clk_of_table_end);
4113
766e6a4e 4114static LIST_HEAD(of_clk_providers);
d6782c26
SN
4115static DEFINE_MUTEX(of_clk_mutex);
4116
766e6a4e
GL
4117struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4118 void *data)
4119{
4120 return data;
4121}
4122EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4123
0861e5b8
SB
4124struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4125{
4126 return data;
4127}
4128EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4129
494bfec9
SG
4130struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4131{
4132 struct clk_onecell_data *clk_data = data;
4133 unsigned int idx = clkspec->args[0];
4134
4135 if (idx >= clk_data->clk_num) {
7e96353c 4136 pr_err("%s: invalid clock index %u\n", __func__, idx);
494bfec9
SG
4137 return ERR_PTR(-EINVAL);
4138 }
4139
4140 return clk_data->clks[idx];
4141}
4142EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4143
0861e5b8
SB
4144struct clk_hw *
4145of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4146{
4147 struct clk_hw_onecell_data *hw_data = data;
4148 unsigned int idx = clkspec->args[0];
4149
4150 if (idx >= hw_data->num) {
4151 pr_err("%s: invalid index %u\n", __func__, idx);
4152 return ERR_PTR(-EINVAL);
4153 }
4154
4155 return hw_data->hws[idx];
4156}
4157EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4158
766e6a4e
GL
4159/**
4160 * of_clk_add_provider() - Register a clock provider for a node
4161 * @np: Device node pointer associated with clock provider
4162 * @clk_src_get: callback for decoding clock
4163 * @data: context pointer for @clk_src_get callback.
9fe9b7ab
SB
4164 *
4165 * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
766e6a4e
GL
4166 */
4167int of_clk_add_provider(struct device_node *np,
4168 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4169 void *data),
4170 void *data)
4171{
4172 struct of_clk_provider *cp;
86be408b 4173 int ret;
766e6a4e 4174
1808a320 4175 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
766e6a4e
GL
4176 if (!cp)
4177 return -ENOMEM;
4178
4179 cp->node = of_node_get(np);
4180 cp->data = data;
4181 cp->get = clk_src_get;
4182
d6782c26 4183 mutex_lock(&of_clk_mutex);
766e6a4e 4184 list_add(&cp->link, &of_clk_providers);
d6782c26 4185 mutex_unlock(&of_clk_mutex);
16673931 4186 pr_debug("Added clock from %pOF\n", np);
766e6a4e 4187
86be408b
SN
4188 ret = of_clk_set_defaults(np, true);
4189 if (ret < 0)
4190 of_clk_del_provider(np);
4191
4192 return ret;
766e6a4e
GL
4193}
4194EXPORT_SYMBOL_GPL(of_clk_add_provider);
4195
0861e5b8
SB
4196/**
4197 * of_clk_add_hw_provider() - Register a clock provider for a node
4198 * @np: Device node pointer associated with clock provider
4199 * @get: callback for decoding clk_hw
4200 * @data: context pointer for @get callback.
4201 */
4202int of_clk_add_hw_provider(struct device_node *np,
4203 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4204 void *data),
4205 void *data)
4206{
4207 struct of_clk_provider *cp;
4208 int ret;
4209
4210 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4211 if (!cp)
4212 return -ENOMEM;
4213
4214 cp->node = of_node_get(np);
4215 cp->data = data;
4216 cp->get_hw = get;
4217
4218 mutex_lock(&of_clk_mutex);
4219 list_add(&cp->link, &of_clk_providers);
4220 mutex_unlock(&of_clk_mutex);
16673931 4221 pr_debug("Added clk_hw provider from %pOF\n", np);
0861e5b8
SB
4222
4223 ret = of_clk_set_defaults(np, true);
4224 if (ret < 0)
4225 of_clk_del_provider(np);
4226
4227 return ret;
4228}
4229EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4230
aa795c41
SB
4231static void devm_of_clk_release_provider(struct device *dev, void *res)
4232{
4233 of_clk_del_provider(*(struct device_node **)res);
4234}
4235
05502bf9
MV
4236/*
4237 * We allow a child device to use its parent device as the clock provider node
4238 * for cases like MFD sub-devices where the child device driver wants to use
4239 * devm_*() APIs but not list the device in DT as a sub-node.
4240 */
4241static struct device_node *get_clk_provider_node(struct device *dev)
4242{
4243 struct device_node *np, *parent_np;
4244
4245 np = dev->of_node;
4246 parent_np = dev->parent ? dev->parent->of_node : NULL;
4247
4248 if (!of_find_property(np, "#clock-cells", NULL))
4249 if (of_find_property(parent_np, "#clock-cells", NULL))
4250 np = parent_np;
4251
4252 return np;
4253}
4254
e45838b5
MV
4255/**
4256 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4257 * @dev: Device acting as the clock provider (used for DT node and lifetime)
4258 * @get: callback for decoding clk_hw
4259 * @data: context pointer for @get callback
4260 *
05502bf9
MV
4261 * Registers clock provider for given device's node. If the device has no DT
4262 * node or if the device node lacks of clock provider information (#clock-cells)
4263 * then the parent device's node is scanned for this information. If parent node
4264 * has the #clock-cells then it is used in registration. Provider is
4265 * automatically released at device exit.
e45838b5
MV
4266 *
4267 * Return: 0 on success or an errno on failure.
4268 */
aa795c41
SB
4269int devm_of_clk_add_hw_provider(struct device *dev,
4270 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4271 void *data),
4272 void *data)
4273{
4274 struct device_node **ptr, *np;
4275 int ret;
4276
4277 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4278 GFP_KERNEL);
4279 if (!ptr)
4280 return -ENOMEM;
4281
05502bf9 4282 np = get_clk_provider_node(dev);
aa795c41
SB
4283 ret = of_clk_add_hw_provider(np, get, data);
4284 if (!ret) {
4285 *ptr = np;
4286 devres_add(dev, ptr);
4287 } else {
4288 devres_free(ptr);
4289 }
4290
4291 return ret;
4292}
4293EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4294
766e6a4e
GL
4295/**
4296 * of_clk_del_provider() - Remove a previously registered clock provider
4297 * @np: Device node pointer associated with clock provider
4298 */
4299void of_clk_del_provider(struct device_node *np)
4300{
4301 struct of_clk_provider *cp;
4302
d6782c26 4303 mutex_lock(&of_clk_mutex);
766e6a4e
GL
4304 list_for_each_entry(cp, &of_clk_providers, link) {
4305 if (cp->node == np) {
4306 list_del(&cp->link);
4307 of_node_put(cp->node);
4308 kfree(cp);
4309 break;
4310 }
4311 }
d6782c26 4312 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
4313}
4314EXPORT_SYMBOL_GPL(of_clk_del_provider);
4315
aa795c41
SB
4316static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4317{
4318 struct device_node **np = res;
4319
4320 if (WARN_ON(!np || !*np))
4321 return 0;
4322
4323 return *np == data;
4324}
4325
e45838b5
MV
4326/**
4327 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4328 * @dev: Device to whose lifetime the clock provider was bound
4329 */
aa795c41
SB
4330void devm_of_clk_del_provider(struct device *dev)
4331{
4332 int ret;
05502bf9 4333 struct device_node *np = get_clk_provider_node(dev);
aa795c41
SB
4334
4335 ret = devres_release(dev, devm_of_clk_release_provider,
05502bf9 4336 devm_clk_provider_match, np);
aa795c41
SB
4337
4338 WARN_ON(ret);
4339}
4340EXPORT_SYMBOL(devm_of_clk_del_provider);
4341
5dc7e842
SB
4342/*
4343 * Beware the return values when np is valid, but no clock provider is found.
4344 * If name == NULL, the function returns -ENOENT.
4345 * If name != NULL, the function returns -EINVAL. This is because
4346 * of_parse_phandle_with_args() is called even if of_property_match_string()
4347 * returns an error.
4348 */
cf13f289
SB
4349static int of_parse_clkspec(const struct device_node *np, int index,
4350 const char *name, struct of_phandle_args *out_args)
4472287a
SB
4351{
4352 int ret = -ENOENT;
4353
4354 /* Walk up the tree of devices looking for a clock property that matches */
4355 while (np) {
4356 /*
4357 * For named clocks, first look up the name in the
4358 * "clock-names" property. If it cannot be found, then index
4359 * will be an error code and of_parse_phandle_with_args() will
4360 * return -EINVAL.
4361 */
4362 if (name)
4363 index = of_property_match_string(np, "clock-names", name);
4364 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4365 index, out_args);
4366 if (!ret)
4367 break;
4368 if (name && index >= 0)
4369 break;
4370
4371 /*
4372 * No matching clock found on this node. If the parent node
4373 * has a "clock-ranges" property, then we can try one of its
4374 * clocks.
4375 */
4376 np = np->parent;
4377 if (np && !of_get_property(np, "clock-ranges", NULL))
4378 break;
4379 index = 0;
4380 }
4381
4382 return ret;
4383}
4384
0861e5b8
SB
4385static struct clk_hw *
4386__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4387 struct of_phandle_args *clkspec)
4388{
4389 struct clk *clk;
0861e5b8 4390
74002fcd
SB
4391 if (provider->get_hw)
4392 return provider->get_hw(clkspec, provider->data);
0861e5b8 4393
74002fcd
SB
4394 clk = provider->get(clkspec, provider->data);
4395 if (IS_ERR(clk))
4396 return ERR_CAST(clk);
4397 return __clk_get_hw(clk);
0861e5b8
SB
4398}
4399
cf13f289
SB
4400static struct clk_hw *
4401of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
766e6a4e
GL
4402{
4403 struct of_clk_provider *provider;
1df4046a 4404 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
766e6a4e 4405
306c342f
SB
4406 if (!clkspec)
4407 return ERR_PTR(-EINVAL);
4408
306c342f 4409 mutex_lock(&of_clk_mutex);
766e6a4e 4410 list_for_each_entry(provider, &of_clk_providers, link) {
f155d15b 4411 if (provider->node == clkspec->np) {
0861e5b8 4412 hw = __of_clk_get_hw_from_provider(provider, clkspec);
1df4046a
SB
4413 if (!IS_ERR(hw))
4414 break;
73e0e496 4415 }
766e6a4e 4416 }
306c342f 4417 mutex_unlock(&of_clk_mutex);
d6782c26 4418
4472287a 4419 return hw;
d6782c26
SN
4420}
4421
306c342f
SB
4422/**
4423 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4424 * @clkspec: pointer to a clock specifier data structure
4425 *
4426 * This function looks up a struct clk from the registered list of clock
4427 * providers, an input is a clock specifier data structure as returned
4428 * from the of_parse_phandle_with_args() function call.
4429 */
d6782c26
SN
4430struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4431{
4472287a
SB
4432 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4433
efa85048 4434 return clk_hw_create_clk(NULL, hw, NULL, __func__);
766e6a4e 4435}
fb4dd222 4436EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
766e6a4e 4437
cf13f289
SB
4438struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4439 const char *con_id)
4440{
4441 int ret;
4442 struct clk_hw *hw;
4443 struct of_phandle_args clkspec;
4444
4445 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4446 if (ret)
4447 return ERR_PTR(ret);
4448
4449 hw = of_clk_get_hw_from_clkspec(&clkspec);
4450 of_node_put(clkspec.np);
4451
4452 return hw;
4453}
4454
4455static struct clk *__of_clk_get(struct device_node *np,
4456 int index, const char *dev_id,
4457 const char *con_id)
4458{
4459 struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4460
4461 return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4462}
4463
4464struct clk *of_clk_get(struct device_node *np, int index)
4465{
4466 return __of_clk_get(np, index, np->full_name, NULL);
4467}
4468EXPORT_SYMBOL(of_clk_get);
4469
4470/**
4471 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4472 * @np: pointer to clock consumer node
4473 * @name: name of consumer's clock input, or NULL for the first clock reference
4474 *
4475 * This function parses the clocks and clock-names properties,
4476 * and uses them to look up the struct clk from the registered list of clock
4477 * providers.
4478 */
4479struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4480{
4481 if (!np)
4482 return ERR_PTR(-ENOENT);
4483
65cf20ad 4484 return __of_clk_get(np, 0, np->full_name, name);
cf13f289
SB
4485}
4486EXPORT_SYMBOL(of_clk_get_by_name);
4487
929e7f3b
SB
4488/**
4489 * of_clk_get_parent_count() - Count the number of clocks a device node has
4490 * @np: device node to count
4491 *
4492 * Returns: The number of clocks that are possible parents of this node
4493 */
4494unsigned int of_clk_get_parent_count(struct device_node *np)
f6102742 4495{
929e7f3b
SB
4496 int count;
4497
4498 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4499 if (count < 0)
4500 return 0;
4501
4502 return count;
f6102742
MT
4503}
4504EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4505
766e6a4e
GL
4506const char *of_clk_get_parent_name(struct device_node *np, int index)
4507{
4508 struct of_phandle_args clkspec;
7a0fc1a3 4509 struct property *prop;
766e6a4e 4510 const char *clk_name;
7a0fc1a3
BD
4511 const __be32 *vp;
4512 u32 pv;
766e6a4e 4513 int rc;
7a0fc1a3 4514 int count;
0a4807c2 4515 struct clk *clk;
766e6a4e 4516
766e6a4e
GL
4517 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4518 &clkspec);
4519 if (rc)
4520 return NULL;
4521
7a0fc1a3
BD
4522 index = clkspec.args_count ? clkspec.args[0] : 0;
4523 count = 0;
4524
4525 /* if there is an indices property, use it to transfer the index
4526 * specified into an array offset for the clock-output-names property.
4527 */
4528 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4529 if (index == pv) {
4530 index = count;
4531 break;
4532 }
4533 count++;
4534 }
8da411cc
MY
4535 /* We went off the end of 'clock-indices' without finding it */
4536 if (prop && !vp)
4537 return NULL;
7a0fc1a3 4538
766e6a4e 4539 if (of_property_read_string_index(clkspec.np, "clock-output-names",
7a0fc1a3 4540 index,
0a4807c2
SB
4541 &clk_name) < 0) {
4542 /*
4543 * Best effort to get the name if the clock has been
4544 * registered with the framework. If the clock isn't
4545 * registered, we return the node name as the name of
4546 * the clock as long as #clock-cells = 0.
4547 */
4548 clk = of_clk_get_from_provider(&clkspec);
4549 if (IS_ERR(clk)) {
4550 if (clkspec.args_count == 0)
4551 clk_name = clkspec.np->name;
4552 else
4553 clk_name = NULL;
4554 } else {
4555 clk_name = __clk_get_name(clk);
4556 clk_put(clk);
4557 }
4558 }
4559
766e6a4e
GL
4560
4561 of_node_put(clkspec.np);
4562 return clk_name;
4563}
4564EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4565
2e61dfb3
DN
4566/**
4567 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4568 * number of parents
4569 * @np: Device node pointer associated with clock provider
4570 * @parents: pointer to char array that hold the parents' names
4571 * @size: size of the @parents array
4572 *
4573 * Return: number of parents for the clock node.
4574 */
4575int of_clk_parent_fill(struct device_node *np, const char **parents,
4576 unsigned int size)
4577{
4578 unsigned int i = 0;
4579
4580 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4581 i++;
4582
4583 return i;
4584}
4585EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4586
1771b10d 4587struct clock_provider {
a5970433 4588 void (*clk_init_cb)(struct device_node *);
1771b10d
GC
4589 struct device_node *np;
4590 struct list_head node;
4591};
4592
1771b10d
GC
4593/*
4594 * This function looks for a parent clock. If there is one, then it
4595 * checks that the provider for this parent clock was initialized, in
4596 * this case the parent clock will be ready.
4597 */
4598static int parent_ready(struct device_node *np)
4599{
4600 int i = 0;
4601
4602 while (true) {
4603 struct clk *clk = of_clk_get(np, i);
4604
4605 /* this parent is ready we can check the next one */
4606 if (!IS_ERR(clk)) {
4607 clk_put(clk);
4608 i++;
4609 continue;
4610 }
4611
4612 /* at least one parent is not ready, we exit now */
4613 if (PTR_ERR(clk) == -EPROBE_DEFER)
4614 return 0;
4615
4616 /*
4617 * Here we make assumption that the device tree is
4618 * written correctly. So an error means that there is
4619 * no more parent. As we didn't exit yet, then the
4620 * previous parent are ready. If there is no clock
4621 * parent, no need to wait for them, then we can
4622 * consider their absence as being ready
4623 */
4624 return 1;
4625 }
4626}
4627
d56f8994
LJ
4628/**
4629 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4630 * @np: Device node pointer associated with clock provider
4631 * @index: clock index
f7ae7503 4632 * @flags: pointer to top-level framework flags
d56f8994
LJ
4633 *
4634 * Detects if the clock-critical property exists and, if so, sets the
4635 * corresponding CLK_IS_CRITICAL flag.
4636 *
4637 * Do not use this function. It exists only for legacy Device Tree
4638 * bindings, such as the one-clock-per-node style that are outdated.
4639 * Those bindings typically put all clock data into .dts and the Linux
4640 * driver has no clock data, thus making it impossible to set this flag
4641 * correctly from the driver. Only those drivers may call
4642 * of_clk_detect_critical from their setup functions.
4643 *
4644 * Return: error code or zero on success
4645 */
4646int of_clk_detect_critical(struct device_node *np,
4647 int index, unsigned long *flags)
4648{
4649 struct property *prop;
4650 const __be32 *cur;
4651 uint32_t idx;
4652
4653 if (!np || !flags)
4654 return -EINVAL;
4655
4656 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4657 if (index == idx)
4658 *flags |= CLK_IS_CRITICAL;
4659
4660 return 0;
4661}
4662
766e6a4e
GL
4663/**
4664 * of_clk_init() - Scan and init clock providers from the DT
4665 * @matches: array of compatible values and init functions for providers.
4666 *
1771b10d 4667 * This function scans the device tree for matching clock providers
e5ca8fb4 4668 * and calls their initialization functions. It also does it by trying
1771b10d 4669 * to follow the dependencies.
766e6a4e
GL
4670 */
4671void __init of_clk_init(const struct of_device_id *matches)
4672{
7f7ed584 4673 const struct of_device_id *match;
766e6a4e 4674 struct device_node *np;
1771b10d
GC
4675 struct clock_provider *clk_provider, *next;
4676 bool is_init_done;
4677 bool force = false;
2573a02a 4678 LIST_HEAD(clk_provider_list);
766e6a4e 4679
f2f6c255 4680 if (!matches)
819b4861 4681 matches = &__clk_of_table;
f2f6c255 4682
1771b10d 4683 /* First prepare the list of the clocks providers */
7f7ed584 4684 for_each_matching_node_and_match(np, matches, &match) {
2e3b19f1
SB
4685 struct clock_provider *parent;
4686
3e5dd6f6
GU
4687 if (!of_device_is_available(np))
4688 continue;
4689
2e3b19f1
SB
4690 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4691 if (!parent) {
4692 list_for_each_entry_safe(clk_provider, next,
4693 &clk_provider_list, node) {
4694 list_del(&clk_provider->node);
6bc9d9d6 4695 of_node_put(clk_provider->np);
2e3b19f1
SB
4696 kfree(clk_provider);
4697 }
6bc9d9d6 4698 of_node_put(np);
2e3b19f1
SB
4699 return;
4700 }
1771b10d
GC
4701
4702 parent->clk_init_cb = match->data;
6bc9d9d6 4703 parent->np = of_node_get(np);
3f6d439f 4704 list_add_tail(&parent->node, &clk_provider_list);
1771b10d
GC
4705 }
4706
4707 while (!list_empty(&clk_provider_list)) {
4708 is_init_done = false;
4709 list_for_each_entry_safe(clk_provider, next,
4710 &clk_provider_list, node) {
4711 if (force || parent_ready(clk_provider->np)) {
86be408b 4712
989eafd0
RRD
4713 /* Don't populate platform devices */
4714 of_node_set_flag(clk_provider->np,
4715 OF_POPULATED);
4716
1771b10d 4717 clk_provider->clk_init_cb(clk_provider->np);
86be408b
SN
4718 of_clk_set_defaults(clk_provider->np, true);
4719
1771b10d 4720 list_del(&clk_provider->node);
6bc9d9d6 4721 of_node_put(clk_provider->np);
1771b10d
GC
4722 kfree(clk_provider);
4723 is_init_done = true;
4724 }
4725 }
4726
4727 /*
e5ca8fb4 4728 * We didn't manage to initialize any of the
1771b10d
GC
4729 * remaining providers during the last loop, so now we
4730 * initialize all the remaining ones unconditionally
4731 * in case the clock parent was not mandatory
4732 */
4733 if (!is_init_done)
4734 force = true;
766e6a4e
GL
4735 }
4736}
4737#endif