]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/clk/clk.c
clk: zynq: Add missing zynq clk header
[mirror_ubuntu-focal-kernel.git] / drivers / clk / clk.c
CommitLineData
b2476490
MT
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
766e6a4e 19#include <linux/of.h>
46c8773a 20#include <linux/device.h>
f2f6c255 21#include <linux/init.h>
b2476490
MT
22
23static DEFINE_SPINLOCK(enable_lock);
24static DEFINE_MUTEX(prepare_lock);
25
26static HLIST_HEAD(clk_root_list);
27static HLIST_HEAD(clk_orphan_list);
28static LIST_HEAD(clk_notifier_list);
29
30/*** debugfs support ***/
31
32#ifdef CONFIG_COMMON_CLK_DEBUG
33#include <linux/debugfs.h>
34
35static struct dentry *rootdir;
36static struct dentry *orphandir;
37static int inited = 0;
38
1af599df
PG
39static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
40{
41 if (!c)
42 return;
43
44 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
45 level * 3 + 1, "",
46 30 - level * 3, c->name,
47 c->enable_count, c->prepare_count, c->rate);
48 seq_printf(s, "\n");
49}
50
51static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
52 int level)
53{
54 struct clk *child;
1af599df
PG
55
56 if (!c)
57 return;
58
59 clk_summary_show_one(s, c, level);
60
b67bfe0d 61 hlist_for_each_entry(child, &c->children, child_node)
1af599df
PG
62 clk_summary_show_subtree(s, child, level + 1);
63}
64
65static int clk_summary_show(struct seq_file *s, void *data)
66{
67 struct clk *c;
1af599df
PG
68
69 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
70 seq_printf(s, "---------------------------------------------------------------------\n");
71
72 mutex_lock(&prepare_lock);
73
b67bfe0d 74 hlist_for_each_entry(c, &clk_root_list, child_node)
1af599df
PG
75 clk_summary_show_subtree(s, c, 0);
76
b67bfe0d 77 hlist_for_each_entry(c, &clk_orphan_list, child_node)
1af599df
PG
78 clk_summary_show_subtree(s, c, 0);
79
80 mutex_unlock(&prepare_lock);
81
82 return 0;
83}
84
85
86static int clk_summary_open(struct inode *inode, struct file *file)
87{
88 return single_open(file, clk_summary_show, inode->i_private);
89}
90
91static const struct file_operations clk_summary_fops = {
92 .open = clk_summary_open,
93 .read = seq_read,
94 .llseek = seq_lseek,
95 .release = single_release,
96};
97
bddca894
PG
98static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
99{
100 if (!c)
101 return;
102
103 seq_printf(s, "\"%s\": { ", c->name);
104 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
105 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
106 seq_printf(s, "\"rate\": %lu", c->rate);
107}
108
109static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
110{
111 struct clk *child;
bddca894
PG
112
113 if (!c)
114 return;
115
116 clk_dump_one(s, c, level);
117
b67bfe0d 118 hlist_for_each_entry(child, &c->children, child_node) {
bddca894
PG
119 seq_printf(s, ",");
120 clk_dump_subtree(s, child, level + 1);
121 }
122
123 seq_printf(s, "}");
124}
125
126static int clk_dump(struct seq_file *s, void *data)
127{
128 struct clk *c;
bddca894
PG
129 bool first_node = true;
130
131 seq_printf(s, "{");
132
133 mutex_lock(&prepare_lock);
134
b67bfe0d 135 hlist_for_each_entry(c, &clk_root_list, child_node) {
bddca894
PG
136 if (!first_node)
137 seq_printf(s, ",");
138 first_node = false;
139 clk_dump_subtree(s, c, 0);
140 }
141
b67bfe0d 142 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
bddca894
PG
143 seq_printf(s, ",");
144 clk_dump_subtree(s, c, 0);
145 }
146
147 mutex_unlock(&prepare_lock);
148
149 seq_printf(s, "}");
150 return 0;
151}
152
153
154static int clk_dump_open(struct inode *inode, struct file *file)
155{
156 return single_open(file, clk_dump, inode->i_private);
157}
158
159static const struct file_operations clk_dump_fops = {
160 .open = clk_dump_open,
161 .read = seq_read,
162 .llseek = seq_lseek,
163 .release = single_release,
164};
165
b2476490
MT
166/* caller must hold prepare_lock */
167static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
168{
169 struct dentry *d;
170 int ret = -ENOMEM;
171
172 if (!clk || !pdentry) {
173 ret = -EINVAL;
174 goto out;
175 }
176
177 d = debugfs_create_dir(clk->name, pdentry);
178 if (!d)
179 goto out;
180
181 clk->dentry = d;
182
183 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
184 (u32 *)&clk->rate);
185 if (!d)
186 goto err_out;
187
188 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
189 (u32 *)&clk->flags);
190 if (!d)
191 goto err_out;
192
193 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
194 (u32 *)&clk->prepare_count);
195 if (!d)
196 goto err_out;
197
198 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
199 (u32 *)&clk->enable_count);
200 if (!d)
201 goto err_out;
202
203 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
204 (u32 *)&clk->notifier_count);
205 if (!d)
206 goto err_out;
207
208 ret = 0;
209 goto out;
210
211err_out:
212 debugfs_remove(clk->dentry);
213out:
214 return ret;
215}
216
217/* caller must hold prepare_lock */
218static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
219{
220 struct clk *child;
b2476490
MT
221 int ret = -EINVAL;;
222
223 if (!clk || !pdentry)
224 goto out;
225
226 ret = clk_debug_create_one(clk, pdentry);
227
228 if (ret)
229 goto out;
230
b67bfe0d 231 hlist_for_each_entry(child, &clk->children, child_node)
b2476490
MT
232 clk_debug_create_subtree(child, clk->dentry);
233
234 ret = 0;
235out:
236 return ret;
237}
238
239/**
240 * clk_debug_register - add a clk node to the debugfs clk tree
241 * @clk: the clk being added to the debugfs clk tree
242 *
243 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
244 * initialized. Otherwise it bails out early since the debugfs clk tree
245 * will be created lazily by clk_debug_init as part of a late_initcall.
246 *
247 * Caller must hold prepare_lock. Only clk_init calls this function (so
248 * far) so this is taken care.
249 */
250static int clk_debug_register(struct clk *clk)
251{
252 struct clk *parent;
253 struct dentry *pdentry;
254 int ret = 0;
255
256 if (!inited)
257 goto out;
258
259 parent = clk->parent;
260
261 /*
262 * Check to see if a clk is a root clk. Also check that it is
263 * safe to add this clk to debugfs
264 */
265 if (!parent)
266 if (clk->flags & CLK_IS_ROOT)
267 pdentry = rootdir;
268 else
269 pdentry = orphandir;
270 else
271 if (parent->dentry)
272 pdentry = parent->dentry;
273 else
274 goto out;
275
276 ret = clk_debug_create_subtree(clk, pdentry);
277
278out:
279 return ret;
280}
281
282/**
283 * clk_debug_init - lazily create the debugfs clk tree visualization
284 *
285 * clks are often initialized very early during boot before memory can
286 * be dynamically allocated and well before debugfs is setup.
287 * clk_debug_init walks the clk tree hierarchy while holding
288 * prepare_lock and creates the topology as part of a late_initcall,
289 * thus insuring that clks initialized very early will still be
290 * represented in the debugfs clk tree. This function should only be
291 * called once at boot-time, and all other clks added dynamically will
292 * be done so with clk_debug_register.
293 */
294static int __init clk_debug_init(void)
295{
296 struct clk *clk;
1af599df 297 struct dentry *d;
b2476490
MT
298
299 rootdir = debugfs_create_dir("clk", NULL);
300
301 if (!rootdir)
302 return -ENOMEM;
303
1af599df
PG
304 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
305 &clk_summary_fops);
306 if (!d)
307 return -ENOMEM;
308
bddca894
PG
309 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
310 &clk_dump_fops);
311 if (!d)
312 return -ENOMEM;
313
b2476490
MT
314 orphandir = debugfs_create_dir("orphans", rootdir);
315
316 if (!orphandir)
317 return -ENOMEM;
318
319 mutex_lock(&prepare_lock);
320
b67bfe0d 321 hlist_for_each_entry(clk, &clk_root_list, child_node)
b2476490
MT
322 clk_debug_create_subtree(clk, rootdir);
323
b67bfe0d 324 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
b2476490
MT
325 clk_debug_create_subtree(clk, orphandir);
326
327 inited = 1;
328
329 mutex_unlock(&prepare_lock);
330
331 return 0;
332}
333late_initcall(clk_debug_init);
334#else
335static inline int clk_debug_register(struct clk *clk) { return 0; }
70d347e6 336#endif
b2476490 337
1c155b3d
UH
338/* caller must hold prepare_lock */
339static void clk_unprepare_unused_subtree(struct clk *clk)
340{
341 struct clk *child;
342
343 if (!clk)
344 return;
345
346 hlist_for_each_entry(child, &clk->children, child_node)
347 clk_unprepare_unused_subtree(child);
348
349 if (clk->prepare_count)
350 return;
351
352 if (clk->flags & CLK_IGNORE_UNUSED)
353 return;
354
3cc8247f
UH
355 if (__clk_is_prepared(clk)) {
356 if (clk->ops->unprepare_unused)
357 clk->ops->unprepare_unused(clk->hw);
358 else if (clk->ops->unprepare)
1c155b3d 359 clk->ops->unprepare(clk->hw);
3cc8247f 360 }
1c155b3d
UH
361}
362
b2476490
MT
363/* caller must hold prepare_lock */
364static void clk_disable_unused_subtree(struct clk *clk)
365{
366 struct clk *child;
b2476490
MT
367 unsigned long flags;
368
369 if (!clk)
370 goto out;
371
b67bfe0d 372 hlist_for_each_entry(child, &clk->children, child_node)
b2476490
MT
373 clk_disable_unused_subtree(child);
374
375 spin_lock_irqsave(&enable_lock, flags);
376
377 if (clk->enable_count)
378 goto unlock_out;
379
380 if (clk->flags & CLK_IGNORE_UNUSED)
381 goto unlock_out;
382
7c045a55
MT
383 /*
384 * some gate clocks have special needs during the disable-unused
385 * sequence. call .disable_unused if available, otherwise fall
386 * back to .disable
387 */
388 if (__clk_is_enabled(clk)) {
389 if (clk->ops->disable_unused)
390 clk->ops->disable_unused(clk->hw);
391 else if (clk->ops->disable)
392 clk->ops->disable(clk->hw);
393 }
b2476490
MT
394
395unlock_out:
396 spin_unlock_irqrestore(&enable_lock, flags);
397
398out:
399 return;
400}
401
402static int clk_disable_unused(void)
403{
404 struct clk *clk;
b2476490
MT
405
406 mutex_lock(&prepare_lock);
407
b67bfe0d 408 hlist_for_each_entry(clk, &clk_root_list, child_node)
b2476490
MT
409 clk_disable_unused_subtree(clk);
410
b67bfe0d 411 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
b2476490
MT
412 clk_disable_unused_subtree(clk);
413
1c155b3d
UH
414 hlist_for_each_entry(clk, &clk_root_list, child_node)
415 clk_unprepare_unused_subtree(clk);
416
417 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
418 clk_unprepare_unused_subtree(clk);
419
b2476490
MT
420 mutex_unlock(&prepare_lock);
421
422 return 0;
423}
424late_initcall(clk_disable_unused);
b2476490
MT
425
426/*** helper functions ***/
427
65800b2c 428const char *__clk_get_name(struct clk *clk)
b2476490
MT
429{
430 return !clk ? NULL : clk->name;
431}
4895084c 432EXPORT_SYMBOL_GPL(__clk_get_name);
b2476490 433
65800b2c 434struct clk_hw *__clk_get_hw(struct clk *clk)
b2476490
MT
435{
436 return !clk ? NULL : clk->hw;
437}
438
65800b2c 439u8 __clk_get_num_parents(struct clk *clk)
b2476490 440{
2ac6b1f5 441 return !clk ? 0 : clk->num_parents;
b2476490
MT
442}
443
65800b2c 444struct clk *__clk_get_parent(struct clk *clk)
b2476490
MT
445{
446 return !clk ? NULL : clk->parent;
447}
448
65800b2c 449unsigned int __clk_get_enable_count(struct clk *clk)
b2476490 450{
2ac6b1f5 451 return !clk ? 0 : clk->enable_count;
b2476490
MT
452}
453
65800b2c 454unsigned int __clk_get_prepare_count(struct clk *clk)
b2476490 455{
2ac6b1f5 456 return !clk ? 0 : clk->prepare_count;
b2476490
MT
457}
458
459unsigned long __clk_get_rate(struct clk *clk)
460{
461 unsigned long ret;
462
463 if (!clk) {
34e44fe8 464 ret = 0;
b2476490
MT
465 goto out;
466 }
467
468 ret = clk->rate;
469
470 if (clk->flags & CLK_IS_ROOT)
471 goto out;
472
473 if (!clk->parent)
34e44fe8 474 ret = 0;
b2476490
MT
475
476out:
477 return ret;
478}
479
65800b2c 480unsigned long __clk_get_flags(struct clk *clk)
b2476490 481{
2ac6b1f5 482 return !clk ? 0 : clk->flags;
b2476490
MT
483}
484
3d6ee287
UH
485bool __clk_is_prepared(struct clk *clk)
486{
487 int ret;
488
489 if (!clk)
490 return false;
491
492 /*
493 * .is_prepared is optional for clocks that can prepare
494 * fall back to software usage counter if it is missing
495 */
496 if (!clk->ops->is_prepared) {
497 ret = clk->prepare_count ? 1 : 0;
498 goto out;
499 }
500
501 ret = clk->ops->is_prepared(clk->hw);
502out:
503 return !!ret;
504}
505
2ac6b1f5 506bool __clk_is_enabled(struct clk *clk)
b2476490
MT
507{
508 int ret;
509
510 if (!clk)
2ac6b1f5 511 return false;
b2476490
MT
512
513 /*
514 * .is_enabled is only mandatory for clocks that gate
515 * fall back to software usage counter if .is_enabled is missing
516 */
517 if (!clk->ops->is_enabled) {
518 ret = clk->enable_count ? 1 : 0;
519 goto out;
520 }
521
522 ret = clk->ops->is_enabled(clk->hw);
523out:
2ac6b1f5 524 return !!ret;
b2476490
MT
525}
526
527static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
528{
529 struct clk *child;
530 struct clk *ret;
b2476490
MT
531
532 if (!strcmp(clk->name, name))
533 return clk;
534
b67bfe0d 535 hlist_for_each_entry(child, &clk->children, child_node) {
b2476490
MT
536 ret = __clk_lookup_subtree(name, child);
537 if (ret)
538 return ret;
539 }
540
541 return NULL;
542}
543
544struct clk *__clk_lookup(const char *name)
545{
546 struct clk *root_clk;
547 struct clk *ret;
b2476490
MT
548
549 if (!name)
550 return NULL;
551
552 /* search the 'proper' clk tree first */
b67bfe0d 553 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
b2476490
MT
554 ret = __clk_lookup_subtree(name, root_clk);
555 if (ret)
556 return ret;
557 }
558
559 /* if not found, then search the orphan tree */
b67bfe0d 560 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
b2476490
MT
561 ret = __clk_lookup_subtree(name, root_clk);
562 if (ret)
563 return ret;
564 }
565
566 return NULL;
567}
568
569/*** clk api ***/
570
571void __clk_unprepare(struct clk *clk)
572{
573 if (!clk)
574 return;
575
576 if (WARN_ON(clk->prepare_count == 0))
577 return;
578
579 if (--clk->prepare_count > 0)
580 return;
581
582 WARN_ON(clk->enable_count > 0);
583
584 if (clk->ops->unprepare)
585 clk->ops->unprepare(clk->hw);
586
587 __clk_unprepare(clk->parent);
588}
589
590/**
591 * clk_unprepare - undo preparation of a clock source
592 * @clk: the clk being unprepare
593 *
594 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
595 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
596 * if the operation may sleep. One example is a clk which is accessed over
597 * I2c. In the complex case a clk gate operation may require a fast and a slow
598 * part. It is this reason that clk_unprepare and clk_disable are not mutually
599 * exclusive. In fact clk_disable must be called before clk_unprepare.
600 */
601void clk_unprepare(struct clk *clk)
602{
603 mutex_lock(&prepare_lock);
604 __clk_unprepare(clk);
605 mutex_unlock(&prepare_lock);
606}
607EXPORT_SYMBOL_GPL(clk_unprepare);
608
609int __clk_prepare(struct clk *clk)
610{
611 int ret = 0;
612
613 if (!clk)
614 return 0;
615
616 if (clk->prepare_count == 0) {
617 ret = __clk_prepare(clk->parent);
618 if (ret)
619 return ret;
620
621 if (clk->ops->prepare) {
622 ret = clk->ops->prepare(clk->hw);
623 if (ret) {
624 __clk_unprepare(clk->parent);
625 return ret;
626 }
627 }
628 }
629
630 clk->prepare_count++;
631
632 return 0;
633}
634
635/**
636 * clk_prepare - prepare a clock source
637 * @clk: the clk being prepared
638 *
639 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
640 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
641 * operation may sleep. One example is a clk which is accessed over I2c. In
642 * the complex case a clk ungate operation may require a fast and a slow part.
643 * It is this reason that clk_prepare and clk_enable are not mutually
644 * exclusive. In fact clk_prepare must be called before clk_enable.
645 * Returns 0 on success, -EERROR otherwise.
646 */
647int clk_prepare(struct clk *clk)
648{
649 int ret;
650
651 mutex_lock(&prepare_lock);
652 ret = __clk_prepare(clk);
653 mutex_unlock(&prepare_lock);
654
655 return ret;
656}
657EXPORT_SYMBOL_GPL(clk_prepare);
658
659static void __clk_disable(struct clk *clk)
660{
661 if (!clk)
662 return;
663
e47c6a34
FW
664 if (WARN_ON(IS_ERR(clk)))
665 return;
666
b2476490
MT
667 if (WARN_ON(clk->enable_count == 0))
668 return;
669
670 if (--clk->enable_count > 0)
671 return;
672
673 if (clk->ops->disable)
674 clk->ops->disable(clk->hw);
675
676 __clk_disable(clk->parent);
677}
678
679/**
680 * clk_disable - gate a clock
681 * @clk: the clk being gated
682 *
683 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
684 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
685 * clk if the operation is fast and will never sleep. One example is a
686 * SoC-internal clk which is controlled via simple register writes. In the
687 * complex case a clk gate operation may require a fast and a slow part. It is
688 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
689 * In fact clk_disable must be called before clk_unprepare.
690 */
691void clk_disable(struct clk *clk)
692{
693 unsigned long flags;
694
695 spin_lock_irqsave(&enable_lock, flags);
696 __clk_disable(clk);
697 spin_unlock_irqrestore(&enable_lock, flags);
698}
699EXPORT_SYMBOL_GPL(clk_disable);
700
701static int __clk_enable(struct clk *clk)
702{
703 int ret = 0;
704
705 if (!clk)
706 return 0;
707
708 if (WARN_ON(clk->prepare_count == 0))
709 return -ESHUTDOWN;
710
711 if (clk->enable_count == 0) {
712 ret = __clk_enable(clk->parent);
713
714 if (ret)
715 return ret;
716
717 if (clk->ops->enable) {
718 ret = clk->ops->enable(clk->hw);
719 if (ret) {
720 __clk_disable(clk->parent);
721 return ret;
722 }
723 }
724 }
725
726 clk->enable_count++;
727 return 0;
728}
729
730/**
731 * clk_enable - ungate a clock
732 * @clk: the clk being ungated
733 *
734 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
735 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
736 * if the operation will never sleep. One example is a SoC-internal clk which
737 * is controlled via simple register writes. In the complex case a clk ungate
738 * operation may require a fast and a slow part. It is this reason that
739 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
740 * must be called before clk_enable. Returns 0 on success, -EERROR
741 * otherwise.
742 */
743int clk_enable(struct clk *clk)
744{
745 unsigned long flags;
746 int ret;
747
748 spin_lock_irqsave(&enable_lock, flags);
749 ret = __clk_enable(clk);
750 spin_unlock_irqrestore(&enable_lock, flags);
751
752 return ret;
753}
754EXPORT_SYMBOL_GPL(clk_enable);
755
b2476490
MT
756/**
757 * __clk_round_rate - round the given rate for a clk
758 * @clk: round the rate of this clock
759 *
760 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
761 */
762unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
763{
81536e07 764 unsigned long parent_rate = 0;
b2476490
MT
765
766 if (!clk)
2ac6b1f5 767 return 0;
b2476490 768
f4d8af2e
SG
769 if (!clk->ops->round_rate) {
770 if (clk->flags & CLK_SET_RATE_PARENT)
771 return __clk_round_rate(clk->parent, rate);
772 else
773 return clk->rate;
774 }
b2476490 775
81536e07
SG
776 if (clk->parent)
777 parent_rate = clk->parent->rate;
778
779 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
b2476490
MT
780}
781
782/**
783 * clk_round_rate - round the given rate for a clk
784 * @clk: the clk for which we are rounding a rate
785 * @rate: the rate which is to be rounded
786 *
787 * Takes in a rate as input and rounds it to a rate that the clk can actually
788 * use which is then returned. If clk doesn't support round_rate operation
789 * then the parent rate is returned.
790 */
791long clk_round_rate(struct clk *clk, unsigned long rate)
792{
793 unsigned long ret;
794
795 mutex_lock(&prepare_lock);
796 ret = __clk_round_rate(clk, rate);
797 mutex_unlock(&prepare_lock);
798
799 return ret;
800}
801EXPORT_SYMBOL_GPL(clk_round_rate);
802
803/**
804 * __clk_notify - call clk notifier chain
805 * @clk: struct clk * that is changing rate
806 * @msg: clk notifier type (see include/linux/clk.h)
807 * @old_rate: old clk rate
808 * @new_rate: new clk rate
809 *
810 * Triggers a notifier call chain on the clk rate-change notification
811 * for 'clk'. Passes a pointer to the struct clk and the previous
812 * and current rates to the notifier callback. Intended to be called by
813 * internal clock code only. Returns NOTIFY_DONE from the last driver
814 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
815 * a driver returns that.
816 */
817static int __clk_notify(struct clk *clk, unsigned long msg,
818 unsigned long old_rate, unsigned long new_rate)
819{
820 struct clk_notifier *cn;
821 struct clk_notifier_data cnd;
822 int ret = NOTIFY_DONE;
823
824 cnd.clk = clk;
825 cnd.old_rate = old_rate;
826 cnd.new_rate = new_rate;
827
828 list_for_each_entry(cn, &clk_notifier_list, node) {
829 if (cn->clk == clk) {
830 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
831 &cnd);
832 break;
833 }
834 }
835
836 return ret;
837}
838
839/**
840 * __clk_recalc_rates
841 * @clk: first clk in the subtree
842 * @msg: notification type (see include/linux/clk.h)
843 *
844 * Walks the subtree of clks starting with clk and recalculates rates as it
845 * goes. Note that if a clk does not implement the .recalc_rate callback then
846 * it is assumed that the clock will take on the rate of it's parent.
847 *
848 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
849 * if necessary.
850 *
851 * Caller must hold prepare_lock.
852 */
853static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
854{
855 unsigned long old_rate;
856 unsigned long parent_rate = 0;
b2476490
MT
857 struct clk *child;
858
859 old_rate = clk->rate;
860
861 if (clk->parent)
862 parent_rate = clk->parent->rate;
863
864 if (clk->ops->recalc_rate)
865 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
866 else
867 clk->rate = parent_rate;
868
869 /*
870 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
871 * & ABORT_RATE_CHANGE notifiers
872 */
873 if (clk->notifier_count && msg)
874 __clk_notify(clk, msg, old_rate, clk->rate);
875
b67bfe0d 876 hlist_for_each_entry(child, &clk->children, child_node)
b2476490
MT
877 __clk_recalc_rates(child, msg);
878}
879
a093bde2
UH
880/**
881 * clk_get_rate - return the rate of clk
882 * @clk: the clk whose rate is being returned
883 *
884 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
885 * is set, which means a recalc_rate will be issued.
886 * If clk is NULL then returns 0.
887 */
888unsigned long clk_get_rate(struct clk *clk)
889{
890 unsigned long rate;
891
892 mutex_lock(&prepare_lock);
893
894 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
895 __clk_recalc_rates(clk, 0);
896
897 rate = __clk_get_rate(clk);
898 mutex_unlock(&prepare_lock);
899
900 return rate;
901}
902EXPORT_SYMBOL_GPL(clk_get_rate);
903
b2476490
MT
904/**
905 * __clk_speculate_rates
906 * @clk: first clk in the subtree
907 * @parent_rate: the "future" rate of clk's parent
908 *
909 * Walks the subtree of clks starting with clk, speculating rates as it
910 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
911 *
912 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
913 * pre-rate change notifications and returns early if no clks in the
914 * subtree have subscribed to the notifications. Note that if a clk does not
915 * implement the .recalc_rate callback then it is assumed that the clock will
916 * take on the rate of it's parent.
917 *
918 * Caller must hold prepare_lock.
919 */
920static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
921{
b2476490
MT
922 struct clk *child;
923 unsigned long new_rate;
924 int ret = NOTIFY_DONE;
925
926 if (clk->ops->recalc_rate)
927 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
928 else
929 new_rate = parent_rate;
930
931 /* abort the rate change if a driver returns NOTIFY_BAD */
932 if (clk->notifier_count)
933 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
934
935 if (ret == NOTIFY_BAD)
936 goto out;
937
b67bfe0d 938 hlist_for_each_entry(child, &clk->children, child_node) {
b2476490
MT
939 ret = __clk_speculate_rates(child, new_rate);
940 if (ret == NOTIFY_BAD)
941 break;
942 }
943
944out:
945 return ret;
946}
947
948static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
949{
950 struct clk *child;
b2476490
MT
951
952 clk->new_rate = new_rate;
953
b67bfe0d 954 hlist_for_each_entry(child, &clk->children, child_node) {
b2476490
MT
955 if (child->ops->recalc_rate)
956 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
957 else
958 child->new_rate = new_rate;
959 clk_calc_subtree(child, child->new_rate);
960 }
961}
962
963/*
964 * calculate the new rates returning the topmost clock that has to be
965 * changed.
966 */
967static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
968{
969 struct clk *top = clk;
81536e07 970 unsigned long best_parent_rate = 0;
b2476490
MT
971 unsigned long new_rate;
972
7452b219
MT
973 /* sanity */
974 if (IS_ERR_OR_NULL(clk))
975 return NULL;
976
63f5c3b2
MT
977 /* save parent rate, if it exists */
978 if (clk->parent)
979 best_parent_rate = clk->parent->rate;
980
7452b219
MT
981 /* never propagate up to the parent */
982 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
983 if (!clk->ops->round_rate) {
984 clk->new_rate = clk->rate;
985 return NULL;
7452b219 986 }
63f5c3b2
MT
987 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
988 goto out;
7452b219
MT
989 }
990
991 /* need clk->parent from here on out */
992 if (!clk->parent) {
993 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
b2476490
MT
994 return NULL;
995 }
996
7452b219 997 if (!clk->ops->round_rate) {
b2476490 998 top = clk_calc_new_rates(clk->parent, rate);
1b2f9903 999 new_rate = clk->parent->new_rate;
b2476490
MT
1000
1001 goto out;
1002 }
1003
7452b219 1004 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
b2476490
MT
1005
1006 if (best_parent_rate != clk->parent->rate) {
1007 top = clk_calc_new_rates(clk->parent, best_parent_rate);
1008
1009 goto out;
1010 }
1011
1012out:
1013 clk_calc_subtree(clk, new_rate);
1014
1015 return top;
1016}
1017
1018/*
1019 * Notify about rate changes in a subtree. Always walk down the whole tree
1020 * so that in case of an error we can walk down the whole tree again and
1021 * abort the change.
1022 */
1023static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1024{
b2476490
MT
1025 struct clk *child, *fail_clk = NULL;
1026 int ret = NOTIFY_DONE;
1027
1028 if (clk->rate == clk->new_rate)
5fda6858 1029 return NULL;
b2476490
MT
1030
1031 if (clk->notifier_count) {
1032 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
1033 if (ret == NOTIFY_BAD)
1034 fail_clk = clk;
1035 }
1036
b67bfe0d 1037 hlist_for_each_entry(child, &clk->children, child_node) {
b2476490
MT
1038 clk = clk_propagate_rate_change(child, event);
1039 if (clk)
1040 fail_clk = clk;
1041 }
1042
1043 return fail_clk;
1044}
1045
1046/*
1047 * walk down a subtree and set the new rates notifying the rate
1048 * change on the way
1049 */
1050static void clk_change_rate(struct clk *clk)
1051{
1052 struct clk *child;
1053 unsigned long old_rate;
bf47b4fd 1054 unsigned long best_parent_rate = 0;
b2476490
MT
1055
1056 old_rate = clk->rate;
1057
bf47b4fd
PM
1058 if (clk->parent)
1059 best_parent_rate = clk->parent->rate;
1060
b2476490 1061 if (clk->ops->set_rate)
bf47b4fd 1062 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
b2476490
MT
1063
1064 if (clk->ops->recalc_rate)
bf47b4fd 1065 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
b2476490 1066 else
bf47b4fd 1067 clk->rate = best_parent_rate;
b2476490
MT
1068
1069 if (clk->notifier_count && old_rate != clk->rate)
1070 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1071
b67bfe0d 1072 hlist_for_each_entry(child, &clk->children, child_node)
b2476490
MT
1073 clk_change_rate(child);
1074}
1075
1076/**
1077 * clk_set_rate - specify a new rate for clk
1078 * @clk: the clk whose rate is being changed
1079 * @rate: the new rate for clk
1080 *
5654dc94 1081 * In the simplest case clk_set_rate will only adjust the rate of clk.
b2476490 1082 *
5654dc94
MT
1083 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1084 * propagate up to clk's parent; whether or not this happens depends on the
1085 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1086 * after calling .round_rate then upstream parent propagation is ignored. If
1087 * *parent_rate comes back with a new rate for clk's parent then we propagate
1088 * up to clk's parent and set it's rate. Upward propagation will continue
1089 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1090 * .round_rate stops requesting changes to clk's parent_rate.
b2476490 1091 *
5654dc94
MT
1092 * Rate changes are accomplished via tree traversal that also recalculates the
1093 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
b2476490
MT
1094 *
1095 * Returns 0 on success, -EERROR otherwise.
1096 */
1097int clk_set_rate(struct clk *clk, unsigned long rate)
1098{
1099 struct clk *top, *fail_clk;
1100 int ret = 0;
1101
1102 /* prevent racing with updates to the clock topology */
1103 mutex_lock(&prepare_lock);
1104
1105 /* bail early if nothing to do */
1106 if (rate == clk->rate)
1107 goto out;
1108
7e0fa1b5 1109 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
0e1c0301
VK
1110 ret = -EBUSY;
1111 goto out;
1112 }
1113
b2476490
MT
1114 /* calculate new rates and get the topmost changed clock */
1115 top = clk_calc_new_rates(clk, rate);
1116 if (!top) {
1117 ret = -EINVAL;
1118 goto out;
1119 }
1120
1121 /* notify that we are about to change rates */
1122 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1123 if (fail_clk) {
1124 pr_warn("%s: failed to set %s rate\n", __func__,
1125 fail_clk->name);
1126 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1127 ret = -EBUSY;
1128 goto out;
1129 }
1130
1131 /* change the rates */
1132 clk_change_rate(top);
1133
b2476490
MT
1134out:
1135 mutex_unlock(&prepare_lock);
1136
1137 return ret;
1138}
1139EXPORT_SYMBOL_GPL(clk_set_rate);
1140
1141/**
1142 * clk_get_parent - return the parent of a clk
1143 * @clk: the clk whose parent gets returned
1144 *
1145 * Simply returns clk->parent. Returns NULL if clk is NULL.
1146 */
1147struct clk *clk_get_parent(struct clk *clk)
1148{
1149 struct clk *parent;
1150
1151 mutex_lock(&prepare_lock);
1152 parent = __clk_get_parent(clk);
1153 mutex_unlock(&prepare_lock);
1154
1155 return parent;
1156}
1157EXPORT_SYMBOL_GPL(clk_get_parent);
1158
1159/*
1160 * .get_parent is mandatory for clocks with multiple possible parents. It is
1161 * optional for single-parent clocks. Always call .get_parent if it is
1162 * available and WARN if it is missing for multi-parent clocks.
1163 *
1164 * For single-parent clocks without .get_parent, first check to see if the
1165 * .parents array exists, and if so use it to avoid an expensive tree
1166 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1167 */
1168static struct clk *__clk_init_parent(struct clk *clk)
1169{
1170 struct clk *ret = NULL;
1171 u8 index;
1172
1173 /* handle the trivial cases */
1174
1175 if (!clk->num_parents)
1176 goto out;
1177
1178 if (clk->num_parents == 1) {
1179 if (IS_ERR_OR_NULL(clk->parent))
1180 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1181 ret = clk->parent;
1182 goto out;
1183 }
1184
1185 if (!clk->ops->get_parent) {
1186 WARN(!clk->ops->get_parent,
1187 "%s: multi-parent clocks must implement .get_parent\n",
1188 __func__);
1189 goto out;
1190 };
1191
1192 /*
1193 * Do our best to cache parent clocks in clk->parents. This prevents
1194 * unnecessary and expensive calls to __clk_lookup. We don't set
1195 * clk->parent here; that is done by the calling function
1196 */
1197
1198 index = clk->ops->get_parent(clk->hw);
1199
1200 if (!clk->parents)
1201 clk->parents =
7975059d 1202 kzalloc((sizeof(struct clk*) * clk->num_parents),
b2476490
MT
1203 GFP_KERNEL);
1204
1205 if (!clk->parents)
1206 ret = __clk_lookup(clk->parent_names[index]);
1207 else if (!clk->parents[index])
1208 ret = clk->parents[index] =
1209 __clk_lookup(clk->parent_names[index]);
1210 else
1211 ret = clk->parents[index];
1212
1213out:
1214 return ret;
1215}
1216
1217void __clk_reparent(struct clk *clk, struct clk *new_parent)
1218{
1219#ifdef CONFIG_COMMON_CLK_DEBUG
1220 struct dentry *d;
1221 struct dentry *new_parent_d;
1222#endif
1223
1224 if (!clk || !new_parent)
1225 return;
1226
1227 hlist_del(&clk->child_node);
1228
1229 if (new_parent)
1230 hlist_add_head(&clk->child_node, &new_parent->children);
1231 else
1232 hlist_add_head(&clk->child_node, &clk_orphan_list);
1233
1234#ifdef CONFIG_COMMON_CLK_DEBUG
1235 if (!inited)
1236 goto out;
1237
1238 if (new_parent)
1239 new_parent_d = new_parent->dentry;
1240 else
1241 new_parent_d = orphandir;
1242
1243 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1244 new_parent_d, clk->name);
1245 if (d)
1246 clk->dentry = d;
1247 else
1248 pr_debug("%s: failed to rename debugfs entry for %s\n",
1249 __func__, clk->name);
1250out:
1251#endif
1252
1253 clk->parent = new_parent;
1254
1255 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1256}
1257
1258static int __clk_set_parent(struct clk *clk, struct clk *parent)
1259{
1260 struct clk *old_parent;
1261 unsigned long flags;
1262 int ret = -EINVAL;
1263 u8 i;
1264
1265 old_parent = clk->parent;
1266
863b1327 1267 if (!clk->parents)
7975059d
RN
1268 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1269 GFP_KERNEL);
b2476490
MT
1270
1271 /*
863b1327
RN
1272 * find index of new parent clock using cached parent ptrs,
1273 * or if not yet cached, use string name comparison and cache
1274 * them now to avoid future calls to __clk_lookup.
b2476490 1275 */
863b1327
RN
1276 for (i = 0; i < clk->num_parents; i++) {
1277 if (clk->parents && clk->parents[i] == parent)
1278 break;
1279 else if (!strcmp(clk->parent_names[i], parent->name)) {
1280 if (clk->parents)
1281 clk->parents[i] = __clk_lookup(parent->name);
1282 break;
1283 }
1284 }
b2476490
MT
1285
1286 if (i == clk->num_parents) {
1287 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1288 __func__, parent->name, clk->name);
1289 goto out;
1290 }
1291
1292 /* migrate prepare and enable */
1293 if (clk->prepare_count)
1294 __clk_prepare(parent);
1295
1296 /* FIXME replace with clk_is_enabled(clk) someday */
1297 spin_lock_irqsave(&enable_lock, flags);
1298 if (clk->enable_count)
1299 __clk_enable(parent);
1300 spin_unlock_irqrestore(&enable_lock, flags);
1301
1302 /* change clock input source */
1303 ret = clk->ops->set_parent(clk->hw, i);
1304
1305 /* clean up old prepare and enable */
1306 spin_lock_irqsave(&enable_lock, flags);
1307 if (clk->enable_count)
1308 __clk_disable(old_parent);
1309 spin_unlock_irqrestore(&enable_lock, flags);
1310
1311 if (clk->prepare_count)
1312 __clk_unprepare(old_parent);
1313
1314out:
1315 return ret;
1316}
1317
1318/**
1319 * clk_set_parent - switch the parent of a mux clk
1320 * @clk: the mux clk whose input we are switching
1321 * @parent: the new input to clk
1322 *
1323 * Re-parent clk to use parent as it's new input source. If clk has the
1324 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1325 * operation to succeed. After successfully changing clk's parent
1326 * clk_set_parent will update the clk topology, sysfs topology and
1327 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1328 * success, -EERROR otherwise.
1329 */
1330int clk_set_parent(struct clk *clk, struct clk *parent)
1331{
1332 int ret = 0;
1333
1334 if (!clk || !clk->ops)
1335 return -EINVAL;
1336
1337 if (!clk->ops->set_parent)
1338 return -ENOSYS;
1339
1340 /* prevent racing with updates to the clock topology */
1341 mutex_lock(&prepare_lock);
1342
1343 if (clk->parent == parent)
1344 goto out;
1345
1346 /* propagate PRE_RATE_CHANGE notifications */
1347 if (clk->notifier_count)
1348 ret = __clk_speculate_rates(clk, parent->rate);
1349
1350 /* abort if a driver objects */
1351 if (ret == NOTIFY_STOP)
1352 goto out;
1353
1354 /* only re-parent if the clock is not in use */
1355 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1356 ret = -EBUSY;
1357 else
1358 ret = __clk_set_parent(clk, parent);
1359
1360 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1361 if (ret) {
1362 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1363 goto out;
1364 }
1365
1366 /* propagate rate recalculation downstream */
1367 __clk_reparent(clk, parent);
1368
1369out:
1370 mutex_unlock(&prepare_lock);
1371
1372 return ret;
1373}
1374EXPORT_SYMBOL_GPL(clk_set_parent);
1375
1376/**
1377 * __clk_init - initialize the data structures in a struct clk
1378 * @dev: device initializing this clk, placeholder for now
1379 * @clk: clk being initialized
1380 *
1381 * Initializes the lists in struct clk, queries the hardware for the
1382 * parent and rate and sets them both.
b2476490 1383 */
d1302a36 1384int __clk_init(struct device *dev, struct clk *clk)
b2476490 1385{
d1302a36 1386 int i, ret = 0;
b2476490 1387 struct clk *orphan;
b67bfe0d 1388 struct hlist_node *tmp2;
b2476490
MT
1389
1390 if (!clk)
d1302a36 1391 return -EINVAL;
b2476490
MT
1392
1393 mutex_lock(&prepare_lock);
1394
1395 /* check to see if a clock with this name is already registered */
d1302a36
MT
1396 if (__clk_lookup(clk->name)) {
1397 pr_debug("%s: clk %s already initialized\n",
1398 __func__, clk->name);
1399 ret = -EEXIST;
b2476490 1400 goto out;
d1302a36 1401 }
b2476490 1402
d4d7e3dd
MT
1403 /* check that clk_ops are sane. See Documentation/clk.txt */
1404 if (clk->ops->set_rate &&
1405 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1406 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1407 __func__, clk->name);
d1302a36 1408 ret = -EINVAL;
d4d7e3dd
MT
1409 goto out;
1410 }
1411
1412 if (clk->ops->set_parent && !clk->ops->get_parent) {
1413 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1414 __func__, clk->name);
d1302a36 1415 ret = -EINVAL;
d4d7e3dd
MT
1416 goto out;
1417 }
1418
b2476490
MT
1419 /* throw a WARN if any entries in parent_names are NULL */
1420 for (i = 0; i < clk->num_parents; i++)
1421 WARN(!clk->parent_names[i],
1422 "%s: invalid NULL in %s's .parent_names\n",
1423 __func__, clk->name);
1424
1425 /*
1426 * Allocate an array of struct clk *'s to avoid unnecessary string
1427 * look-ups of clk's possible parents. This can fail for clocks passed
1428 * in to clk_init during early boot; thus any access to clk->parents[]
1429 * must always check for a NULL pointer and try to populate it if
1430 * necessary.
1431 *
1432 * If clk->parents is not NULL we skip this entire block. This allows
1433 * for clock drivers to statically initialize clk->parents.
1434 */
9ca1c5a4
RN
1435 if (clk->num_parents > 1 && !clk->parents) {
1436 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
b2476490
MT
1437 GFP_KERNEL);
1438 /*
1439 * __clk_lookup returns NULL for parents that have not been
1440 * clk_init'd; thus any access to clk->parents[] must check
1441 * for a NULL pointer. We can always perform lazy lookups for
1442 * missing parents later on.
1443 */
1444 if (clk->parents)
1445 for (i = 0; i < clk->num_parents; i++)
1446 clk->parents[i] =
1447 __clk_lookup(clk->parent_names[i]);
1448 }
1449
1450 clk->parent = __clk_init_parent(clk);
1451
1452 /*
1453 * Populate clk->parent if parent has already been __clk_init'd. If
1454 * parent has not yet been __clk_init'd then place clk in the orphan
1455 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1456 * clk list.
1457 *
1458 * Every time a new clk is clk_init'd then we walk the list of orphan
1459 * clocks and re-parent any that are children of the clock currently
1460 * being clk_init'd.
1461 */
1462 if (clk->parent)
1463 hlist_add_head(&clk->child_node,
1464 &clk->parent->children);
1465 else if (clk->flags & CLK_IS_ROOT)
1466 hlist_add_head(&clk->child_node, &clk_root_list);
1467 else
1468 hlist_add_head(&clk->child_node, &clk_orphan_list);
1469
1470 /*
1471 * Set clk's rate. The preferred method is to use .recalc_rate. For
1472 * simple clocks and lazy developers the default fallback is to use the
1473 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1474 * then rate is set to zero.
1475 */
1476 if (clk->ops->recalc_rate)
1477 clk->rate = clk->ops->recalc_rate(clk->hw,
1478 __clk_get_rate(clk->parent));
1479 else if (clk->parent)
1480 clk->rate = clk->parent->rate;
1481 else
1482 clk->rate = 0;
1483
1484 /*
1485 * walk the list of orphan clocks and reparent any that are children of
1486 * this clock
1487 */
b67bfe0d 1488 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1f61e5f1
MF
1489 if (orphan->ops->get_parent) {
1490 i = orphan->ops->get_parent(orphan->hw);
1491 if (!strcmp(clk->name, orphan->parent_names[i]))
1492 __clk_reparent(orphan, clk);
1493 continue;
1494 }
1495
b2476490
MT
1496 for (i = 0; i < orphan->num_parents; i++)
1497 if (!strcmp(clk->name, orphan->parent_names[i])) {
1498 __clk_reparent(orphan, clk);
1499 break;
1500 }
1f61e5f1 1501 }
b2476490
MT
1502
1503 /*
1504 * optional platform-specific magic
1505 *
1506 * The .init callback is not used by any of the basic clock types, but
1507 * exists for weird hardware that must perform initialization magic.
1508 * Please consider other ways of solving initialization problems before
1509 * using this callback, as it's use is discouraged.
1510 */
1511 if (clk->ops->init)
1512 clk->ops->init(clk->hw);
1513
1514 clk_debug_register(clk);
1515
1516out:
1517 mutex_unlock(&prepare_lock);
1518
d1302a36 1519 return ret;
b2476490
MT
1520}
1521
0197b3ea
SK
1522/**
1523 * __clk_register - register a clock and return a cookie.
1524 *
1525 * Same as clk_register, except that the .clk field inside hw shall point to a
1526 * preallocated (generally statically allocated) struct clk. None of the fields
1527 * of the struct clk need to be initialized.
1528 *
1529 * The data pointed to by .init and .clk field shall NOT be marked as init
1530 * data.
1531 *
1532 * __clk_register is only exposed via clk-private.h and is intended for use with
1533 * very large numbers of clocks that need to be statically initialized. It is
1534 * a layering violation to include clk-private.h from any code which implements
1535 * a clock's .ops; as such any statically initialized clock data MUST be in a
1536 * separate C file from the logic that implements it's operations. Returns 0
1537 * on success, otherwise an error code.
1538 */
1539struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1540{
1541 int ret;
1542 struct clk *clk;
1543
1544 clk = hw->clk;
1545 clk->name = hw->init->name;
1546 clk->ops = hw->init->ops;
1547 clk->hw = hw;
1548 clk->flags = hw->init->flags;
1549 clk->parent_names = hw->init->parent_names;
1550 clk->num_parents = hw->init->num_parents;
1551
1552 ret = __clk_init(dev, clk);
1553 if (ret)
1554 return ERR_PTR(ret);
1555
1556 return clk;
1557}
1558EXPORT_SYMBOL_GPL(__clk_register);
1559
46c8773a 1560static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
b2476490 1561{
d1302a36 1562 int i, ret;
b2476490 1563
0197b3ea
SK
1564 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1565 if (!clk->name) {
1566 pr_err("%s: could not allocate clk->name\n", __func__);
1567 ret = -ENOMEM;
1568 goto fail_name;
1569 }
1570 clk->ops = hw->init->ops;
b2476490 1571 clk->hw = hw;
0197b3ea
SK
1572 clk->flags = hw->init->flags;
1573 clk->num_parents = hw->init->num_parents;
b2476490
MT
1574 hw->clk = clk;
1575
d1302a36 1576 /* allocate local copy in case parent_names is __initdata */
0197b3ea 1577 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
d1302a36
MT
1578 GFP_KERNEL);
1579
1580 if (!clk->parent_names) {
1581 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1582 ret = -ENOMEM;
1583 goto fail_parent_names;
1584 }
1585
1586
1587 /* copy each string name in case parent_names is __initdata */
0197b3ea
SK
1588 for (i = 0; i < clk->num_parents; i++) {
1589 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1590 GFP_KERNEL);
d1302a36
MT
1591 if (!clk->parent_names[i]) {
1592 pr_err("%s: could not copy parent_names\n", __func__);
1593 ret = -ENOMEM;
1594 goto fail_parent_names_copy;
1595 }
1596 }
1597
1598 ret = __clk_init(dev, clk);
1599 if (!ret)
46c8773a 1600 return 0;
b2476490 1601
d1302a36
MT
1602fail_parent_names_copy:
1603 while (--i >= 0)
1604 kfree(clk->parent_names[i]);
1605 kfree(clk->parent_names);
1606fail_parent_names:
0197b3ea
SK
1607 kfree(clk->name);
1608fail_name:
46c8773a
SB
1609 return ret;
1610}
1611
1612/**
1613 * clk_register - allocate a new clock, register it and return an opaque cookie
1614 * @dev: device that is registering this clock
1615 * @hw: link to hardware-specific clock data
1616 *
1617 * clk_register is the primary interface for populating the clock tree with new
1618 * clock nodes. It returns a pointer to the newly allocated struct clk which
1619 * cannot be dereferenced by driver code but may be used in conjuction with the
1620 * rest of the clock API. In the event of an error clk_register will return an
1621 * error code; drivers must test for an error code after calling clk_register.
1622 */
1623struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1624{
1625 int ret;
1626 struct clk *clk;
1627
1628 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1629 if (!clk) {
1630 pr_err("%s: could not allocate clk\n", __func__);
1631 ret = -ENOMEM;
1632 goto fail_out;
1633 }
1634
1635 ret = _clk_register(dev, hw, clk);
1636 if (!ret)
1637 return clk;
1638
d1302a36
MT
1639 kfree(clk);
1640fail_out:
1641 return ERR_PTR(ret);
b2476490
MT
1642}
1643EXPORT_SYMBOL_GPL(clk_register);
1644
1df5c939
MB
1645/**
1646 * clk_unregister - unregister a currently registered clock
1647 * @clk: clock to unregister
1648 *
1649 * Currently unimplemented.
1650 */
1651void clk_unregister(struct clk *clk) {}
1652EXPORT_SYMBOL_GPL(clk_unregister);
1653
46c8773a
SB
1654static void devm_clk_release(struct device *dev, void *res)
1655{
1656 clk_unregister(res);
1657}
1658
1659/**
1660 * devm_clk_register - resource managed clk_register()
1661 * @dev: device that is registering this clock
1662 * @hw: link to hardware-specific clock data
1663 *
1664 * Managed clk_register(). Clocks returned from this function are
1665 * automatically clk_unregister()ed on driver detach. See clk_register() for
1666 * more information.
1667 */
1668struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1669{
1670 struct clk *clk;
1671 int ret;
1672
1673 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1674 if (!clk)
1675 return ERR_PTR(-ENOMEM);
1676
1677 ret = _clk_register(dev, hw, clk);
1678 if (!ret) {
1679 devres_add(dev, clk);
1680 } else {
1681 devres_free(clk);
1682 clk = ERR_PTR(ret);
1683 }
1684
1685 return clk;
1686}
1687EXPORT_SYMBOL_GPL(devm_clk_register);
1688
1689static int devm_clk_match(struct device *dev, void *res, void *data)
1690{
1691 struct clk *c = res;
1692 if (WARN_ON(!c))
1693 return 0;
1694 return c == data;
1695}
1696
1697/**
1698 * devm_clk_unregister - resource managed clk_unregister()
1699 * @clk: clock to unregister
1700 *
1701 * Deallocate a clock allocated with devm_clk_register(). Normally
1702 * this function will not need to be called and the resource management
1703 * code will ensure that the resource is freed.
1704 */
1705void devm_clk_unregister(struct device *dev, struct clk *clk)
1706{
1707 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1708}
1709EXPORT_SYMBOL_GPL(devm_clk_unregister);
1710
b2476490
MT
1711/*** clk rate change notifiers ***/
1712
1713/**
1714 * clk_notifier_register - add a clk rate change notifier
1715 * @clk: struct clk * to watch
1716 * @nb: struct notifier_block * with callback info
1717 *
1718 * Request notification when clk's rate changes. This uses an SRCU
1719 * notifier because we want it to block and notifier unregistrations are
1720 * uncommon. The callbacks associated with the notifier must not
1721 * re-enter into the clk framework by calling any top-level clk APIs;
1722 * this will cause a nested prepare_lock mutex.
1723 *
1724 * Pre-change notifier callbacks will be passed the current, pre-change
1725 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1726 * post-change rate of the clk is passed via struct
1727 * clk_notifier_data.new_rate.
1728 *
1729 * Post-change notifiers will pass the now-current, post-change rate of
1730 * the clk in both struct clk_notifier_data.old_rate and struct
1731 * clk_notifier_data.new_rate.
1732 *
1733 * Abort-change notifiers are effectively the opposite of pre-change
1734 * notifiers: the original pre-change clk rate is passed in via struct
1735 * clk_notifier_data.new_rate and the failed post-change rate is passed
1736 * in via struct clk_notifier_data.old_rate.
1737 *
1738 * clk_notifier_register() must be called from non-atomic context.
1739 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1740 * allocation failure; otherwise, passes along the return value of
1741 * srcu_notifier_chain_register().
1742 */
1743int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1744{
1745 struct clk_notifier *cn;
1746 int ret = -ENOMEM;
1747
1748 if (!clk || !nb)
1749 return -EINVAL;
1750
1751 mutex_lock(&prepare_lock);
1752
1753 /* search the list of notifiers for this clk */
1754 list_for_each_entry(cn, &clk_notifier_list, node)
1755 if (cn->clk == clk)
1756 break;
1757
1758 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1759 if (cn->clk != clk) {
1760 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1761 if (!cn)
1762 goto out;
1763
1764 cn->clk = clk;
1765 srcu_init_notifier_head(&cn->notifier_head);
1766
1767 list_add(&cn->node, &clk_notifier_list);
1768 }
1769
1770 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1771
1772 clk->notifier_count++;
1773
1774out:
1775 mutex_unlock(&prepare_lock);
1776
1777 return ret;
1778}
1779EXPORT_SYMBOL_GPL(clk_notifier_register);
1780
1781/**
1782 * clk_notifier_unregister - remove a clk rate change notifier
1783 * @clk: struct clk *
1784 * @nb: struct notifier_block * with callback info
1785 *
1786 * Request no further notification for changes to 'clk' and frees memory
1787 * allocated in clk_notifier_register.
1788 *
1789 * Returns -EINVAL if called with null arguments; otherwise, passes
1790 * along the return value of srcu_notifier_chain_unregister().
1791 */
1792int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1793{
1794 struct clk_notifier *cn = NULL;
1795 int ret = -EINVAL;
1796
1797 if (!clk || !nb)
1798 return -EINVAL;
1799
1800 mutex_lock(&prepare_lock);
1801
1802 list_for_each_entry(cn, &clk_notifier_list, node)
1803 if (cn->clk == clk)
1804 break;
1805
1806 if (cn->clk == clk) {
1807 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1808
1809 clk->notifier_count--;
1810
1811 /* XXX the notifier code should handle this better */
1812 if (!cn->notifier_head.head) {
1813 srcu_cleanup_notifier_head(&cn->notifier_head);
1814 kfree(cn);
1815 }
1816
1817 } else {
1818 ret = -ENOENT;
1819 }
1820
1821 mutex_unlock(&prepare_lock);
1822
1823 return ret;
1824}
1825EXPORT_SYMBOL_GPL(clk_notifier_unregister);
766e6a4e
GL
1826
1827#ifdef CONFIG_OF
1828/**
1829 * struct of_clk_provider - Clock provider registration structure
1830 * @link: Entry in global list of clock providers
1831 * @node: Pointer to device tree node of clock provider
1832 * @get: Get clock callback. Returns NULL or a struct clk for the
1833 * given clock specifier
1834 * @data: context pointer to be passed into @get callback
1835 */
1836struct of_clk_provider {
1837 struct list_head link;
1838
1839 struct device_node *node;
1840 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1841 void *data;
1842};
1843
f2f6c255
PG
1844extern struct of_device_id __clk_of_table[];
1845
1846static const struct of_device_id __clk_of_table_sentinel
1847 __used __section(__clk_of_table_end);
1848
766e6a4e
GL
1849static LIST_HEAD(of_clk_providers);
1850static DEFINE_MUTEX(of_clk_lock);
1851
1852struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1853 void *data)
1854{
1855 return data;
1856}
1857EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1858
494bfec9
SG
1859struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1860{
1861 struct clk_onecell_data *clk_data = data;
1862 unsigned int idx = clkspec->args[0];
1863
1864 if (idx >= clk_data->clk_num) {
1865 pr_err("%s: invalid clock index %d\n", __func__, idx);
1866 return ERR_PTR(-EINVAL);
1867 }
1868
1869 return clk_data->clks[idx];
1870}
1871EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1872
766e6a4e
GL
1873/**
1874 * of_clk_add_provider() - Register a clock provider for a node
1875 * @np: Device node pointer associated with clock provider
1876 * @clk_src_get: callback for decoding clock
1877 * @data: context pointer for @clk_src_get callback.
1878 */
1879int of_clk_add_provider(struct device_node *np,
1880 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1881 void *data),
1882 void *data)
1883{
1884 struct of_clk_provider *cp;
1885
1886 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1887 if (!cp)
1888 return -ENOMEM;
1889
1890 cp->node = of_node_get(np);
1891 cp->data = data;
1892 cp->get = clk_src_get;
1893
1894 mutex_lock(&of_clk_lock);
1895 list_add(&cp->link, &of_clk_providers);
1896 mutex_unlock(&of_clk_lock);
1897 pr_debug("Added clock from %s\n", np->full_name);
1898
1899 return 0;
1900}
1901EXPORT_SYMBOL_GPL(of_clk_add_provider);
1902
1903/**
1904 * of_clk_del_provider() - Remove a previously registered clock provider
1905 * @np: Device node pointer associated with clock provider
1906 */
1907void of_clk_del_provider(struct device_node *np)
1908{
1909 struct of_clk_provider *cp;
1910
1911 mutex_lock(&of_clk_lock);
1912 list_for_each_entry(cp, &of_clk_providers, link) {
1913 if (cp->node == np) {
1914 list_del(&cp->link);
1915 of_node_put(cp->node);
1916 kfree(cp);
1917 break;
1918 }
1919 }
1920 mutex_unlock(&of_clk_lock);
1921}
1922EXPORT_SYMBOL_GPL(of_clk_del_provider);
1923
1924struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1925{
1926 struct of_clk_provider *provider;
1927 struct clk *clk = ERR_PTR(-ENOENT);
1928
1929 /* Check if we have such a provider in our array */
1930 mutex_lock(&of_clk_lock);
1931 list_for_each_entry(provider, &of_clk_providers, link) {
1932 if (provider->node == clkspec->np)
1933 clk = provider->get(clkspec, provider->data);
1934 if (!IS_ERR(clk))
1935 break;
1936 }
1937 mutex_unlock(&of_clk_lock);
1938
1939 return clk;
1940}
1941
1942const char *of_clk_get_parent_name(struct device_node *np, int index)
1943{
1944 struct of_phandle_args clkspec;
1945 const char *clk_name;
1946 int rc;
1947
1948 if (index < 0)
1949 return NULL;
1950
1951 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1952 &clkspec);
1953 if (rc)
1954 return NULL;
1955
1956 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1957 clkspec.args_count ? clkspec.args[0] : 0,
1958 &clk_name) < 0)
1959 clk_name = clkspec.np->name;
1960
1961 of_node_put(clkspec.np);
1962 return clk_name;
1963}
1964EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1965
1966/**
1967 * of_clk_init() - Scan and init clock providers from the DT
1968 * @matches: array of compatible values and init functions for providers.
1969 *
1970 * This function scans the device tree for matching clock providers and
1971 * calls their initialization functions
1972 */
1973void __init of_clk_init(const struct of_device_id *matches)
1974{
1975 struct device_node *np;
1976
f2f6c255
PG
1977 if (!matches)
1978 matches = __clk_of_table;
1979
766e6a4e
GL
1980 for_each_matching_node(np, matches) {
1981 const struct of_device_id *match = of_match_node(matches, np);
1982 of_clk_init_cb_t clk_init_cb = match->data;
1983 clk_init_cb(np);
1984 }
1985}
1986#endif