2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
21 static DEFINE_SPINLOCK(enable_lock
);
22 static DEFINE_MUTEX(prepare_lock
);
24 static HLIST_HEAD(clk_root_list
);
25 static HLIST_HEAD(clk_orphan_list
);
26 static LIST_HEAD(clk_notifier_list
);
28 /*** debugfs support ***/
30 #ifdef CONFIG_COMMON_CLK_DEBUG
31 #include <linux/debugfs.h>
33 static struct dentry
*rootdir
;
34 static struct dentry
*orphandir
;
35 static int inited
= 0;
37 /* caller must hold prepare_lock */
38 static int clk_debug_create_one(struct clk
*clk
, struct dentry
*pdentry
)
43 if (!clk
|| !pdentry
) {
48 d
= debugfs_create_dir(clk
->name
, pdentry
);
54 d
= debugfs_create_u32("clk_rate", S_IRUGO
, clk
->dentry
,
59 d
= debugfs_create_x32("clk_flags", S_IRUGO
, clk
->dentry
,
64 d
= debugfs_create_u32("clk_prepare_count", S_IRUGO
, clk
->dentry
,
65 (u32
*)&clk
->prepare_count
);
69 d
= debugfs_create_u32("clk_enable_count", S_IRUGO
, clk
->dentry
,
70 (u32
*)&clk
->enable_count
);
74 d
= debugfs_create_u32("clk_notifier_count", S_IRUGO
, clk
->dentry
,
75 (u32
*)&clk
->notifier_count
);
83 debugfs_remove(clk
->dentry
);
88 /* caller must hold prepare_lock */
89 static int clk_debug_create_subtree(struct clk
*clk
, struct dentry
*pdentry
)
92 struct hlist_node
*tmp
;
98 ret
= clk_debug_create_one(clk
, pdentry
);
103 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
)
104 clk_debug_create_subtree(child
, clk
->dentry
);
112 * clk_debug_register - add a clk node to the debugfs clk tree
113 * @clk: the clk being added to the debugfs clk tree
115 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
116 * initialized. Otherwise it bails out early since the debugfs clk tree
117 * will be created lazily by clk_debug_init as part of a late_initcall.
119 * Caller must hold prepare_lock. Only clk_init calls this function (so
120 * far) so this is taken care.
122 static int clk_debug_register(struct clk
*clk
)
125 struct dentry
*pdentry
;
131 parent
= clk
->parent
;
134 * Check to see if a clk is a root clk. Also check that it is
135 * safe to add this clk to debugfs
138 if (clk
->flags
& CLK_IS_ROOT
)
144 pdentry
= parent
->dentry
;
148 ret
= clk_debug_create_subtree(clk
, pdentry
);
155 * clk_debug_init - lazily create the debugfs clk tree visualization
157 * clks are often initialized very early during boot before memory can
158 * be dynamically allocated and well before debugfs is setup.
159 * clk_debug_init walks the clk tree hierarchy while holding
160 * prepare_lock and creates the topology as part of a late_initcall,
161 * thus insuring that clks initialized very early will still be
162 * represented in the debugfs clk tree. This function should only be
163 * called once at boot-time, and all other clks added dynamically will
164 * be done so with clk_debug_register.
166 static int __init
clk_debug_init(void)
169 struct hlist_node
*tmp
;
171 rootdir
= debugfs_create_dir("clk", NULL
);
176 orphandir
= debugfs_create_dir("orphans", rootdir
);
181 mutex_lock(&prepare_lock
);
183 hlist_for_each_entry(clk
, tmp
, &clk_root_list
, child_node
)
184 clk_debug_create_subtree(clk
, rootdir
);
186 hlist_for_each_entry(clk
, tmp
, &clk_orphan_list
, child_node
)
187 clk_debug_create_subtree(clk
, orphandir
);
191 mutex_unlock(&prepare_lock
);
195 late_initcall(clk_debug_init
);
197 static inline int clk_debug_register(struct clk
*clk
) { return 0; }
200 /* caller must hold prepare_lock */
201 static void clk_disable_unused_subtree(struct clk
*clk
)
204 struct hlist_node
*tmp
;
210 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
)
211 clk_disable_unused_subtree(child
);
213 spin_lock_irqsave(&enable_lock
, flags
);
215 if (clk
->enable_count
)
218 if (clk
->flags
& CLK_IGNORE_UNUSED
)
221 if (__clk_is_enabled(clk
) && clk
->ops
->disable
)
222 clk
->ops
->disable(clk
->hw
);
225 spin_unlock_irqrestore(&enable_lock
, flags
);
231 static int clk_disable_unused(void)
234 struct hlist_node
*tmp
;
236 mutex_lock(&prepare_lock
);
238 hlist_for_each_entry(clk
, tmp
, &clk_root_list
, child_node
)
239 clk_disable_unused_subtree(clk
);
241 hlist_for_each_entry(clk
, tmp
, &clk_orphan_list
, child_node
)
242 clk_disable_unused_subtree(clk
);
244 mutex_unlock(&prepare_lock
);
248 late_initcall(clk_disable_unused
);
250 /*** helper functions ***/
252 inline const char *__clk_get_name(struct clk
*clk
)
254 return !clk
? NULL
: clk
->name
;
257 inline struct clk_hw
*__clk_get_hw(struct clk
*clk
)
259 return !clk
? NULL
: clk
->hw
;
262 inline u8
__clk_get_num_parents(struct clk
*clk
)
264 return !clk
? -EINVAL
: clk
->num_parents
;
267 inline struct clk
*__clk_get_parent(struct clk
*clk
)
269 return !clk
? NULL
: clk
->parent
;
272 inline int __clk_get_enable_count(struct clk
*clk
)
274 return !clk
? -EINVAL
: clk
->enable_count
;
277 inline int __clk_get_prepare_count(struct clk
*clk
)
279 return !clk
? -EINVAL
: clk
->prepare_count
;
282 unsigned long __clk_get_rate(struct clk
*clk
)
293 if (clk
->flags
& CLK_IS_ROOT
)
303 inline unsigned long __clk_get_flags(struct clk
*clk
)
305 return !clk
? -EINVAL
: clk
->flags
;
308 int __clk_is_enabled(struct clk
*clk
)
316 * .is_enabled is only mandatory for clocks that gate
317 * fall back to software usage counter if .is_enabled is missing
319 if (!clk
->ops
->is_enabled
) {
320 ret
= clk
->enable_count
? 1 : 0;
324 ret
= clk
->ops
->is_enabled(clk
->hw
);
329 static struct clk
*__clk_lookup_subtree(const char *name
, struct clk
*clk
)
333 struct hlist_node
*tmp
;
335 if (!strcmp(clk
->name
, name
))
338 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
) {
339 ret
= __clk_lookup_subtree(name
, child
);
347 struct clk
*__clk_lookup(const char *name
)
349 struct clk
*root_clk
;
351 struct hlist_node
*tmp
;
356 /* search the 'proper' clk tree first */
357 hlist_for_each_entry(root_clk
, tmp
, &clk_root_list
, child_node
) {
358 ret
= __clk_lookup_subtree(name
, root_clk
);
363 /* if not found, then search the orphan tree */
364 hlist_for_each_entry(root_clk
, tmp
, &clk_orphan_list
, child_node
) {
365 ret
= __clk_lookup_subtree(name
, root_clk
);
375 void __clk_unprepare(struct clk
*clk
)
380 if (WARN_ON(clk
->prepare_count
== 0))
383 if (--clk
->prepare_count
> 0)
386 WARN_ON(clk
->enable_count
> 0);
388 if (clk
->ops
->unprepare
)
389 clk
->ops
->unprepare(clk
->hw
);
391 __clk_unprepare(clk
->parent
);
395 * clk_unprepare - undo preparation of a clock source
396 * @clk: the clk being unprepare
398 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
399 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
400 * if the operation may sleep. One example is a clk which is accessed over
401 * I2c. In the complex case a clk gate operation may require a fast and a slow
402 * part. It is this reason that clk_unprepare and clk_disable are not mutually
403 * exclusive. In fact clk_disable must be called before clk_unprepare.
405 void clk_unprepare(struct clk
*clk
)
407 mutex_lock(&prepare_lock
);
408 __clk_unprepare(clk
);
409 mutex_unlock(&prepare_lock
);
411 EXPORT_SYMBOL_GPL(clk_unprepare
);
413 int __clk_prepare(struct clk
*clk
)
420 if (clk
->prepare_count
== 0) {
421 ret
= __clk_prepare(clk
->parent
);
425 if (clk
->ops
->prepare
) {
426 ret
= clk
->ops
->prepare(clk
->hw
);
428 __clk_unprepare(clk
->parent
);
434 clk
->prepare_count
++;
440 * clk_prepare - prepare a clock source
441 * @clk: the clk being prepared
443 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
444 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
445 * operation may sleep. One example is a clk which is accessed over I2c. In
446 * the complex case a clk ungate operation may require a fast and a slow part.
447 * It is this reason that clk_prepare and clk_enable are not mutually
448 * exclusive. In fact clk_prepare must be called before clk_enable.
449 * Returns 0 on success, -EERROR otherwise.
451 int clk_prepare(struct clk
*clk
)
455 mutex_lock(&prepare_lock
);
456 ret
= __clk_prepare(clk
);
457 mutex_unlock(&prepare_lock
);
461 EXPORT_SYMBOL_GPL(clk_prepare
);
463 static void __clk_disable(struct clk
*clk
)
468 if (WARN_ON(IS_ERR(clk
)))
471 if (WARN_ON(clk
->enable_count
== 0))
474 if (--clk
->enable_count
> 0)
477 if (clk
->ops
->disable
)
478 clk
->ops
->disable(clk
->hw
);
480 __clk_disable(clk
->parent
);
484 * clk_disable - gate a clock
485 * @clk: the clk being gated
487 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
488 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
489 * clk if the operation is fast and will never sleep. One example is a
490 * SoC-internal clk which is controlled via simple register writes. In the
491 * complex case a clk gate operation may require a fast and a slow part. It is
492 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
493 * In fact clk_disable must be called before clk_unprepare.
495 void clk_disable(struct clk
*clk
)
499 spin_lock_irqsave(&enable_lock
, flags
);
501 spin_unlock_irqrestore(&enable_lock
, flags
);
503 EXPORT_SYMBOL_GPL(clk_disable
);
505 static int __clk_enable(struct clk
*clk
)
512 if (WARN_ON(clk
->prepare_count
== 0))
515 if (clk
->enable_count
== 0) {
516 ret
= __clk_enable(clk
->parent
);
521 if (clk
->ops
->enable
) {
522 ret
= clk
->ops
->enable(clk
->hw
);
524 __clk_disable(clk
->parent
);
535 * clk_enable - ungate a clock
536 * @clk: the clk being ungated
538 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
539 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
540 * if the operation will never sleep. One example is a SoC-internal clk which
541 * is controlled via simple register writes. In the complex case a clk ungate
542 * operation may require a fast and a slow part. It is this reason that
543 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
544 * must be called before clk_enable. Returns 0 on success, -EERROR
547 int clk_enable(struct clk
*clk
)
552 spin_lock_irqsave(&enable_lock
, flags
);
553 ret
= __clk_enable(clk
);
554 spin_unlock_irqrestore(&enable_lock
, flags
);
558 EXPORT_SYMBOL_GPL(clk_enable
);
561 * __clk_round_rate - round the given rate for a clk
562 * @clk: round the rate of this clock
564 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
566 unsigned long __clk_round_rate(struct clk
*clk
, unsigned long rate
)
568 unsigned long parent_rate
= 0;
573 if (!clk
->ops
->round_rate
) {
574 if (clk
->flags
& CLK_SET_RATE_PARENT
)
575 return __clk_round_rate(clk
->parent
, rate
);
581 parent_rate
= clk
->parent
->rate
;
583 return clk
->ops
->round_rate(clk
->hw
, rate
, &parent_rate
);
587 * clk_round_rate - round the given rate for a clk
588 * @clk: the clk for which we are rounding a rate
589 * @rate: the rate which is to be rounded
591 * Takes in a rate as input and rounds it to a rate that the clk can actually
592 * use which is then returned. If clk doesn't support round_rate operation
593 * then the parent rate is returned.
595 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
599 mutex_lock(&prepare_lock
);
600 ret
= __clk_round_rate(clk
, rate
);
601 mutex_unlock(&prepare_lock
);
605 EXPORT_SYMBOL_GPL(clk_round_rate
);
608 * __clk_notify - call clk notifier chain
609 * @clk: struct clk * that is changing rate
610 * @msg: clk notifier type (see include/linux/clk.h)
611 * @old_rate: old clk rate
612 * @new_rate: new clk rate
614 * Triggers a notifier call chain on the clk rate-change notification
615 * for 'clk'. Passes a pointer to the struct clk and the previous
616 * and current rates to the notifier callback. Intended to be called by
617 * internal clock code only. Returns NOTIFY_DONE from the last driver
618 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
619 * a driver returns that.
621 static int __clk_notify(struct clk
*clk
, unsigned long msg
,
622 unsigned long old_rate
, unsigned long new_rate
)
624 struct clk_notifier
*cn
;
625 struct clk_notifier_data cnd
;
626 int ret
= NOTIFY_DONE
;
629 cnd
.old_rate
= old_rate
;
630 cnd
.new_rate
= new_rate
;
632 list_for_each_entry(cn
, &clk_notifier_list
, node
) {
633 if (cn
->clk
== clk
) {
634 ret
= srcu_notifier_call_chain(&cn
->notifier_head
, msg
,
645 * @clk: first clk in the subtree
646 * @msg: notification type (see include/linux/clk.h)
648 * Walks the subtree of clks starting with clk and recalculates rates as it
649 * goes. Note that if a clk does not implement the .recalc_rate callback then
650 * it is assumed that the clock will take on the rate of it's parent.
652 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
655 * Caller must hold prepare_lock.
657 static void __clk_recalc_rates(struct clk
*clk
, unsigned long msg
)
659 unsigned long old_rate
;
660 unsigned long parent_rate
= 0;
661 struct hlist_node
*tmp
;
664 old_rate
= clk
->rate
;
667 parent_rate
= clk
->parent
->rate
;
669 if (clk
->ops
->recalc_rate
)
670 clk
->rate
= clk
->ops
->recalc_rate(clk
->hw
, parent_rate
);
672 clk
->rate
= parent_rate
;
675 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
676 * & ABORT_RATE_CHANGE notifiers
678 if (clk
->notifier_count
&& msg
)
679 __clk_notify(clk
, msg
, old_rate
, clk
->rate
);
681 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
)
682 __clk_recalc_rates(child
, msg
);
686 * clk_get_rate - return the rate of clk
687 * @clk: the clk whose rate is being returned
689 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
690 * is set, which means a recalc_rate will be issued.
691 * If clk is NULL then returns 0.
693 unsigned long clk_get_rate(struct clk
*clk
)
697 mutex_lock(&prepare_lock
);
699 if (clk
&& (clk
->flags
& CLK_GET_RATE_NOCACHE
))
700 __clk_recalc_rates(clk
, 0);
702 rate
= __clk_get_rate(clk
);
703 mutex_unlock(&prepare_lock
);
707 EXPORT_SYMBOL_GPL(clk_get_rate
);
710 * __clk_speculate_rates
711 * @clk: first clk in the subtree
712 * @parent_rate: the "future" rate of clk's parent
714 * Walks the subtree of clks starting with clk, speculating rates as it
715 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
717 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
718 * pre-rate change notifications and returns early if no clks in the
719 * subtree have subscribed to the notifications. Note that if a clk does not
720 * implement the .recalc_rate callback then it is assumed that the clock will
721 * take on the rate of it's parent.
723 * Caller must hold prepare_lock.
725 static int __clk_speculate_rates(struct clk
*clk
, unsigned long parent_rate
)
727 struct hlist_node
*tmp
;
729 unsigned long new_rate
;
730 int ret
= NOTIFY_DONE
;
732 if (clk
->ops
->recalc_rate
)
733 new_rate
= clk
->ops
->recalc_rate(clk
->hw
, parent_rate
);
735 new_rate
= parent_rate
;
737 /* abort the rate change if a driver returns NOTIFY_BAD */
738 if (clk
->notifier_count
)
739 ret
= __clk_notify(clk
, PRE_RATE_CHANGE
, clk
->rate
, new_rate
);
741 if (ret
== NOTIFY_BAD
)
744 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
) {
745 ret
= __clk_speculate_rates(child
, new_rate
);
746 if (ret
== NOTIFY_BAD
)
754 static void clk_calc_subtree(struct clk
*clk
, unsigned long new_rate
)
757 struct hlist_node
*tmp
;
759 clk
->new_rate
= new_rate
;
761 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
) {
762 if (child
->ops
->recalc_rate
)
763 child
->new_rate
= child
->ops
->recalc_rate(child
->hw
, new_rate
);
765 child
->new_rate
= new_rate
;
766 clk_calc_subtree(child
, child
->new_rate
);
771 * calculate the new rates returning the topmost clock that has to be
774 static struct clk
*clk_calc_new_rates(struct clk
*clk
, unsigned long rate
)
776 struct clk
*top
= clk
;
777 unsigned long best_parent_rate
= 0;
778 unsigned long new_rate
;
781 if (IS_ERR_OR_NULL(clk
))
784 /* save parent rate, if it exists */
786 best_parent_rate
= clk
->parent
->rate
;
788 /* never propagate up to the parent */
789 if (!(clk
->flags
& CLK_SET_RATE_PARENT
)) {
790 if (!clk
->ops
->round_rate
) {
791 clk
->new_rate
= clk
->rate
;
794 new_rate
= clk
->ops
->round_rate(clk
->hw
, rate
, &best_parent_rate
);
798 /* need clk->parent from here on out */
800 pr_debug("%s: %s has NULL parent\n", __func__
, clk
->name
);
804 if (!clk
->ops
->round_rate
) {
805 top
= clk_calc_new_rates(clk
->parent
, rate
);
806 new_rate
= clk
->parent
->new_rate
;
811 new_rate
= clk
->ops
->round_rate(clk
->hw
, rate
, &best_parent_rate
);
813 if (best_parent_rate
!= clk
->parent
->rate
) {
814 top
= clk_calc_new_rates(clk
->parent
, best_parent_rate
);
820 clk_calc_subtree(clk
, new_rate
);
826 * Notify about rate changes in a subtree. Always walk down the whole tree
827 * so that in case of an error we can walk down the whole tree again and
830 static struct clk
*clk_propagate_rate_change(struct clk
*clk
, unsigned long event
)
832 struct hlist_node
*tmp
;
833 struct clk
*child
, *fail_clk
= NULL
;
834 int ret
= NOTIFY_DONE
;
836 if (clk
->rate
== clk
->new_rate
)
839 if (clk
->notifier_count
) {
840 ret
= __clk_notify(clk
, event
, clk
->rate
, clk
->new_rate
);
841 if (ret
== NOTIFY_BAD
)
845 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
) {
846 clk
= clk_propagate_rate_change(child
, event
);
855 * walk down a subtree and set the new rates notifying the rate
858 static void clk_change_rate(struct clk
*clk
)
861 unsigned long old_rate
;
862 unsigned long best_parent_rate
= 0;
863 struct hlist_node
*tmp
;
865 old_rate
= clk
->rate
;
868 best_parent_rate
= clk
->parent
->rate
;
870 if (clk
->ops
->set_rate
)
871 clk
->ops
->set_rate(clk
->hw
, clk
->new_rate
, best_parent_rate
);
873 if (clk
->ops
->recalc_rate
)
874 clk
->rate
= clk
->ops
->recalc_rate(clk
->hw
, best_parent_rate
);
876 clk
->rate
= best_parent_rate
;
878 if (clk
->notifier_count
&& old_rate
!= clk
->rate
)
879 __clk_notify(clk
, POST_RATE_CHANGE
, old_rate
, clk
->rate
);
881 hlist_for_each_entry(child
, tmp
, &clk
->children
, child_node
)
882 clk_change_rate(child
);
886 * clk_set_rate - specify a new rate for clk
887 * @clk: the clk whose rate is being changed
888 * @rate: the new rate for clk
890 * In the simplest case clk_set_rate will only adjust the rate of clk.
892 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
893 * propagate up to clk's parent; whether or not this happens depends on the
894 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
895 * after calling .round_rate then upstream parent propagation is ignored. If
896 * *parent_rate comes back with a new rate for clk's parent then we propagate
897 * up to clk's parent and set it's rate. Upward propagation will continue
898 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
899 * .round_rate stops requesting changes to clk's parent_rate.
901 * Rate changes are accomplished via tree traversal that also recalculates the
902 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
904 * Returns 0 on success, -EERROR otherwise.
906 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
908 struct clk
*top
, *fail_clk
;
911 /* prevent racing with updates to the clock topology */
912 mutex_lock(&prepare_lock
);
914 /* bail early if nothing to do */
915 if (rate
== clk
->rate
)
918 if ((clk
->flags
& CLK_SET_RATE_GATE
) && clk
->prepare_count
) {
923 /* calculate new rates and get the topmost changed clock */
924 top
= clk_calc_new_rates(clk
, rate
);
930 /* notify that we are about to change rates */
931 fail_clk
= clk_propagate_rate_change(top
, PRE_RATE_CHANGE
);
933 pr_warn("%s: failed to set %s rate\n", __func__
,
935 clk_propagate_rate_change(top
, ABORT_RATE_CHANGE
);
940 /* change the rates */
941 clk_change_rate(top
);
943 mutex_unlock(&prepare_lock
);
947 mutex_unlock(&prepare_lock
);
951 EXPORT_SYMBOL_GPL(clk_set_rate
);
954 * clk_get_parent - return the parent of a clk
955 * @clk: the clk whose parent gets returned
957 * Simply returns clk->parent. Returns NULL if clk is NULL.
959 struct clk
*clk_get_parent(struct clk
*clk
)
963 mutex_lock(&prepare_lock
);
964 parent
= __clk_get_parent(clk
);
965 mutex_unlock(&prepare_lock
);
969 EXPORT_SYMBOL_GPL(clk_get_parent
);
972 * .get_parent is mandatory for clocks with multiple possible parents. It is
973 * optional for single-parent clocks. Always call .get_parent if it is
974 * available and WARN if it is missing for multi-parent clocks.
976 * For single-parent clocks without .get_parent, first check to see if the
977 * .parents array exists, and if so use it to avoid an expensive tree
978 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
980 static struct clk
*__clk_init_parent(struct clk
*clk
)
982 struct clk
*ret
= NULL
;
985 /* handle the trivial cases */
987 if (!clk
->num_parents
)
990 if (clk
->num_parents
== 1) {
991 if (IS_ERR_OR_NULL(clk
->parent
))
992 ret
= clk
->parent
= __clk_lookup(clk
->parent_names
[0]);
997 if (!clk
->ops
->get_parent
) {
998 WARN(!clk
->ops
->get_parent
,
999 "%s: multi-parent clocks must implement .get_parent\n",
1005 * Do our best to cache parent clocks in clk->parents. This prevents
1006 * unnecessary and expensive calls to __clk_lookup. We don't set
1007 * clk->parent here; that is done by the calling function
1010 index
= clk
->ops
->get_parent(clk
->hw
);
1014 kzalloc((sizeof(struct clk
*) * clk
->num_parents
),
1018 ret
= __clk_lookup(clk
->parent_names
[index
]);
1019 else if (!clk
->parents
[index
])
1020 ret
= clk
->parents
[index
] =
1021 __clk_lookup(clk
->parent_names
[index
]);
1023 ret
= clk
->parents
[index
];
1029 void __clk_reparent(struct clk
*clk
, struct clk
*new_parent
)
1031 #ifdef CONFIG_COMMON_CLK_DEBUG
1033 struct dentry
*new_parent_d
;
1036 if (!clk
|| !new_parent
)
1039 hlist_del(&clk
->child_node
);
1042 hlist_add_head(&clk
->child_node
, &new_parent
->children
);
1044 hlist_add_head(&clk
->child_node
, &clk_orphan_list
);
1046 #ifdef CONFIG_COMMON_CLK_DEBUG
1051 new_parent_d
= new_parent
->dentry
;
1053 new_parent_d
= orphandir
;
1055 d
= debugfs_rename(clk
->dentry
->d_parent
, clk
->dentry
,
1056 new_parent_d
, clk
->name
);
1060 pr_debug("%s: failed to rename debugfs entry for %s\n",
1061 __func__
, clk
->name
);
1065 clk
->parent
= new_parent
;
1067 __clk_recalc_rates(clk
, POST_RATE_CHANGE
);
1070 static int __clk_set_parent(struct clk
*clk
, struct clk
*parent
)
1072 struct clk
*old_parent
;
1073 unsigned long flags
;
1077 old_parent
= clk
->parent
;
1080 clk
->parents
= kzalloc((sizeof(struct clk
*) * clk
->num_parents
),
1084 * find index of new parent clock using cached parent ptrs,
1085 * or if not yet cached, use string name comparison and cache
1086 * them now to avoid future calls to __clk_lookup.
1088 for (i
= 0; i
< clk
->num_parents
; i
++) {
1089 if (clk
->parents
&& clk
->parents
[i
] == parent
)
1091 else if (!strcmp(clk
->parent_names
[i
], parent
->name
)) {
1093 clk
->parents
[i
] = __clk_lookup(parent
->name
);
1098 if (i
== clk
->num_parents
) {
1099 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1100 __func__
, parent
->name
, clk
->name
);
1104 /* migrate prepare and enable */
1105 if (clk
->prepare_count
)
1106 __clk_prepare(parent
);
1108 /* FIXME replace with clk_is_enabled(clk) someday */
1109 spin_lock_irqsave(&enable_lock
, flags
);
1110 if (clk
->enable_count
)
1111 __clk_enable(parent
);
1112 spin_unlock_irqrestore(&enable_lock
, flags
);
1114 /* change clock input source */
1115 ret
= clk
->ops
->set_parent(clk
->hw
, i
);
1117 /* clean up old prepare and enable */
1118 spin_lock_irqsave(&enable_lock
, flags
);
1119 if (clk
->enable_count
)
1120 __clk_disable(old_parent
);
1121 spin_unlock_irqrestore(&enable_lock
, flags
);
1123 if (clk
->prepare_count
)
1124 __clk_unprepare(old_parent
);
1131 * clk_set_parent - switch the parent of a mux clk
1132 * @clk: the mux clk whose input we are switching
1133 * @parent: the new input to clk
1135 * Re-parent clk to use parent as it's new input source. If clk has the
1136 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1137 * operation to succeed. After successfully changing clk's parent
1138 * clk_set_parent will update the clk topology, sysfs topology and
1139 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1140 * success, -EERROR otherwise.
1142 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
1146 if (!clk
|| !clk
->ops
)
1149 if (!clk
->ops
->set_parent
)
1152 /* prevent racing with updates to the clock topology */
1153 mutex_lock(&prepare_lock
);
1155 if (clk
->parent
== parent
)
1158 /* propagate PRE_RATE_CHANGE notifications */
1159 if (clk
->notifier_count
)
1160 ret
= __clk_speculate_rates(clk
, parent
->rate
);
1162 /* abort if a driver objects */
1163 if (ret
== NOTIFY_STOP
)
1166 /* only re-parent if the clock is not in use */
1167 if ((clk
->flags
& CLK_SET_PARENT_GATE
) && clk
->prepare_count
)
1170 ret
= __clk_set_parent(clk
, parent
);
1172 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1174 __clk_recalc_rates(clk
, ABORT_RATE_CHANGE
);
1178 /* propagate rate recalculation downstream */
1179 __clk_reparent(clk
, parent
);
1182 mutex_unlock(&prepare_lock
);
1186 EXPORT_SYMBOL_GPL(clk_set_parent
);
1189 * __clk_init - initialize the data structures in a struct clk
1190 * @dev: device initializing this clk, placeholder for now
1191 * @clk: clk being initialized
1193 * Initializes the lists in struct clk, queries the hardware for the
1194 * parent and rate and sets them both.
1196 int __clk_init(struct device
*dev
, struct clk
*clk
)
1200 struct hlist_node
*tmp
, *tmp2
;
1205 mutex_lock(&prepare_lock
);
1207 /* check to see if a clock with this name is already registered */
1208 if (__clk_lookup(clk
->name
)) {
1209 pr_debug("%s: clk %s already initialized\n",
1210 __func__
, clk
->name
);
1215 /* check that clk_ops are sane. See Documentation/clk.txt */
1216 if (clk
->ops
->set_rate
&&
1217 !(clk
->ops
->round_rate
&& clk
->ops
->recalc_rate
)) {
1218 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1219 __func__
, clk
->name
);
1224 if (clk
->ops
->set_parent
&& !clk
->ops
->get_parent
) {
1225 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1226 __func__
, clk
->name
);
1231 /* throw a WARN if any entries in parent_names are NULL */
1232 for (i
= 0; i
< clk
->num_parents
; i
++)
1233 WARN(!clk
->parent_names
[i
],
1234 "%s: invalid NULL in %s's .parent_names\n",
1235 __func__
, clk
->name
);
1238 * Allocate an array of struct clk *'s to avoid unnecessary string
1239 * look-ups of clk's possible parents. This can fail for clocks passed
1240 * in to clk_init during early boot; thus any access to clk->parents[]
1241 * must always check for a NULL pointer and try to populate it if
1244 * If clk->parents is not NULL we skip this entire block. This allows
1245 * for clock drivers to statically initialize clk->parents.
1247 if (clk
->num_parents
> 1 && !clk
->parents
) {
1248 clk
->parents
= kzalloc((sizeof(struct clk
*) * clk
->num_parents
),
1251 * __clk_lookup returns NULL for parents that have not been
1252 * clk_init'd; thus any access to clk->parents[] must check
1253 * for a NULL pointer. We can always perform lazy lookups for
1254 * missing parents later on.
1257 for (i
= 0; i
< clk
->num_parents
; i
++)
1259 __clk_lookup(clk
->parent_names
[i
]);
1262 clk
->parent
= __clk_init_parent(clk
);
1265 * Populate clk->parent if parent has already been __clk_init'd. If
1266 * parent has not yet been __clk_init'd then place clk in the orphan
1267 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1270 * Every time a new clk is clk_init'd then we walk the list of orphan
1271 * clocks and re-parent any that are children of the clock currently
1275 hlist_add_head(&clk
->child_node
,
1276 &clk
->parent
->children
);
1277 else if (clk
->flags
& CLK_IS_ROOT
)
1278 hlist_add_head(&clk
->child_node
, &clk_root_list
);
1280 hlist_add_head(&clk
->child_node
, &clk_orphan_list
);
1283 * Set clk's rate. The preferred method is to use .recalc_rate. For
1284 * simple clocks and lazy developers the default fallback is to use the
1285 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1286 * then rate is set to zero.
1288 if (clk
->ops
->recalc_rate
)
1289 clk
->rate
= clk
->ops
->recalc_rate(clk
->hw
,
1290 __clk_get_rate(clk
->parent
));
1291 else if (clk
->parent
)
1292 clk
->rate
= clk
->parent
->rate
;
1297 * walk the list of orphan clocks and reparent any that are children of
1300 hlist_for_each_entry_safe(orphan
, tmp
, tmp2
, &clk_orphan_list
, child_node
)
1301 for (i
= 0; i
< orphan
->num_parents
; i
++)
1302 if (!strcmp(clk
->name
, orphan
->parent_names
[i
])) {
1303 __clk_reparent(orphan
, clk
);
1308 * optional platform-specific magic
1310 * The .init callback is not used by any of the basic clock types, but
1311 * exists for weird hardware that must perform initialization magic.
1312 * Please consider other ways of solving initialization problems before
1313 * using this callback, as it's use is discouraged.
1316 clk
->ops
->init(clk
->hw
);
1318 clk_debug_register(clk
);
1321 mutex_unlock(&prepare_lock
);
1327 * __clk_register - register a clock and return a cookie.
1329 * Same as clk_register, except that the .clk field inside hw shall point to a
1330 * preallocated (generally statically allocated) struct clk. None of the fields
1331 * of the struct clk need to be initialized.
1333 * The data pointed to by .init and .clk field shall NOT be marked as init
1336 * __clk_register is only exposed via clk-private.h and is intended for use with
1337 * very large numbers of clocks that need to be statically initialized. It is
1338 * a layering violation to include clk-private.h from any code which implements
1339 * a clock's .ops; as such any statically initialized clock data MUST be in a
1340 * separate C file from the logic that implements it's operations. Returns 0
1341 * on success, otherwise an error code.
1343 struct clk
*__clk_register(struct device
*dev
, struct clk_hw
*hw
)
1349 clk
->name
= hw
->init
->name
;
1350 clk
->ops
= hw
->init
->ops
;
1352 clk
->flags
= hw
->init
->flags
;
1353 clk
->parent_names
= hw
->init
->parent_names
;
1354 clk
->num_parents
= hw
->init
->num_parents
;
1356 ret
= __clk_init(dev
, clk
);
1358 return ERR_PTR(ret
);
1362 EXPORT_SYMBOL_GPL(__clk_register
);
1365 * clk_register - allocate a new clock, register it and return an opaque cookie
1366 * @dev: device that is registering this clock
1367 * @hw: link to hardware-specific clock data
1369 * clk_register is the primary interface for populating the clock tree with new
1370 * clock nodes. It returns a pointer to the newly allocated struct clk which
1371 * cannot be dereferenced by driver code but may be used in conjuction with the
1372 * rest of the clock API. In the event of an error clk_register will return an
1373 * error code; drivers must test for an error code after calling clk_register.
1375 struct clk
*clk_register(struct device
*dev
, struct clk_hw
*hw
)
1380 clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
);
1382 pr_err("%s: could not allocate clk\n", __func__
);
1387 clk
->name
= kstrdup(hw
->init
->name
, GFP_KERNEL
);
1389 pr_err("%s: could not allocate clk->name\n", __func__
);
1393 clk
->ops
= hw
->init
->ops
;
1395 clk
->flags
= hw
->init
->flags
;
1396 clk
->num_parents
= hw
->init
->num_parents
;
1399 /* allocate local copy in case parent_names is __initdata */
1400 clk
->parent_names
= kzalloc((sizeof(char*) * clk
->num_parents
),
1403 if (!clk
->parent_names
) {
1404 pr_err("%s: could not allocate clk->parent_names\n", __func__
);
1406 goto fail_parent_names
;
1410 /* copy each string name in case parent_names is __initdata */
1411 for (i
= 0; i
< clk
->num_parents
; i
++) {
1412 clk
->parent_names
[i
] = kstrdup(hw
->init
->parent_names
[i
],
1414 if (!clk
->parent_names
[i
]) {
1415 pr_err("%s: could not copy parent_names\n", __func__
);
1417 goto fail_parent_names_copy
;
1421 ret
= __clk_init(dev
, clk
);
1425 fail_parent_names_copy
:
1427 kfree(clk
->parent_names
[i
]);
1428 kfree(clk
->parent_names
);
1434 return ERR_PTR(ret
);
1436 EXPORT_SYMBOL_GPL(clk_register
);
1439 * clk_unregister - unregister a currently registered clock
1440 * @clk: clock to unregister
1442 * Currently unimplemented.
1444 void clk_unregister(struct clk
*clk
) {}
1445 EXPORT_SYMBOL_GPL(clk_unregister
);
1447 /*** clk rate change notifiers ***/
1450 * clk_notifier_register - add a clk rate change notifier
1451 * @clk: struct clk * to watch
1452 * @nb: struct notifier_block * with callback info
1454 * Request notification when clk's rate changes. This uses an SRCU
1455 * notifier because we want it to block and notifier unregistrations are
1456 * uncommon. The callbacks associated with the notifier must not
1457 * re-enter into the clk framework by calling any top-level clk APIs;
1458 * this will cause a nested prepare_lock mutex.
1460 * Pre-change notifier callbacks will be passed the current, pre-change
1461 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1462 * post-change rate of the clk is passed via struct
1463 * clk_notifier_data.new_rate.
1465 * Post-change notifiers will pass the now-current, post-change rate of
1466 * the clk in both struct clk_notifier_data.old_rate and struct
1467 * clk_notifier_data.new_rate.
1469 * Abort-change notifiers are effectively the opposite of pre-change
1470 * notifiers: the original pre-change clk rate is passed in via struct
1471 * clk_notifier_data.new_rate and the failed post-change rate is passed
1472 * in via struct clk_notifier_data.old_rate.
1474 * clk_notifier_register() must be called from non-atomic context.
1475 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1476 * allocation failure; otherwise, passes along the return value of
1477 * srcu_notifier_chain_register().
1479 int clk_notifier_register(struct clk
*clk
, struct notifier_block
*nb
)
1481 struct clk_notifier
*cn
;
1487 mutex_lock(&prepare_lock
);
1489 /* search the list of notifiers for this clk */
1490 list_for_each_entry(cn
, &clk_notifier_list
, node
)
1494 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1495 if (cn
->clk
!= clk
) {
1496 cn
= kzalloc(sizeof(struct clk_notifier
), GFP_KERNEL
);
1501 srcu_init_notifier_head(&cn
->notifier_head
);
1503 list_add(&cn
->node
, &clk_notifier_list
);
1506 ret
= srcu_notifier_chain_register(&cn
->notifier_head
, nb
);
1508 clk
->notifier_count
++;
1511 mutex_unlock(&prepare_lock
);
1515 EXPORT_SYMBOL_GPL(clk_notifier_register
);
1518 * clk_notifier_unregister - remove a clk rate change notifier
1519 * @clk: struct clk *
1520 * @nb: struct notifier_block * with callback info
1522 * Request no further notification for changes to 'clk' and frees memory
1523 * allocated in clk_notifier_register.
1525 * Returns -EINVAL if called with null arguments; otherwise, passes
1526 * along the return value of srcu_notifier_chain_unregister().
1528 int clk_notifier_unregister(struct clk
*clk
, struct notifier_block
*nb
)
1530 struct clk_notifier
*cn
= NULL
;
1536 mutex_lock(&prepare_lock
);
1538 list_for_each_entry(cn
, &clk_notifier_list
, node
)
1542 if (cn
->clk
== clk
) {
1543 ret
= srcu_notifier_chain_unregister(&cn
->notifier_head
, nb
);
1545 clk
->notifier_count
--;
1547 /* XXX the notifier code should handle this better */
1548 if (!cn
->notifier_head
.head
) {
1549 srcu_cleanup_notifier_head(&cn
->notifier_head
);
1557 mutex_unlock(&prepare_lock
);
1561 EXPORT_SYMBOL_GPL(clk_notifier_unregister
);
1565 * struct of_clk_provider - Clock provider registration structure
1566 * @link: Entry in global list of clock providers
1567 * @node: Pointer to device tree node of clock provider
1568 * @get: Get clock callback. Returns NULL or a struct clk for the
1569 * given clock specifier
1570 * @data: context pointer to be passed into @get callback
1572 struct of_clk_provider
{
1573 struct list_head link
;
1575 struct device_node
*node
;
1576 struct clk
*(*get
)(struct of_phandle_args
*clkspec
, void *data
);
1580 static LIST_HEAD(of_clk_providers
);
1581 static DEFINE_MUTEX(of_clk_lock
);
1583 struct clk
*of_clk_src_simple_get(struct of_phandle_args
*clkspec
,
1588 EXPORT_SYMBOL_GPL(of_clk_src_simple_get
);
1590 struct clk
*of_clk_src_onecell_get(struct of_phandle_args
*clkspec
, void *data
)
1592 struct clk_onecell_data
*clk_data
= data
;
1593 unsigned int idx
= clkspec
->args
[0];
1595 if (idx
>= clk_data
->clk_num
) {
1596 pr_err("%s: invalid clock index %d\n", __func__
, idx
);
1597 return ERR_PTR(-EINVAL
);
1600 return clk_data
->clks
[idx
];
1602 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get
);
1605 * of_clk_add_provider() - Register a clock provider for a node
1606 * @np: Device node pointer associated with clock provider
1607 * @clk_src_get: callback for decoding clock
1608 * @data: context pointer for @clk_src_get callback.
1610 int of_clk_add_provider(struct device_node
*np
,
1611 struct clk
*(*clk_src_get
)(struct of_phandle_args
*clkspec
,
1615 struct of_clk_provider
*cp
;
1617 cp
= kzalloc(sizeof(struct of_clk_provider
), GFP_KERNEL
);
1621 cp
->node
= of_node_get(np
);
1623 cp
->get
= clk_src_get
;
1625 mutex_lock(&of_clk_lock
);
1626 list_add(&cp
->link
, &of_clk_providers
);
1627 mutex_unlock(&of_clk_lock
);
1628 pr_debug("Added clock from %s\n", np
->full_name
);
1632 EXPORT_SYMBOL_GPL(of_clk_add_provider
);
1635 * of_clk_del_provider() - Remove a previously registered clock provider
1636 * @np: Device node pointer associated with clock provider
1638 void of_clk_del_provider(struct device_node
*np
)
1640 struct of_clk_provider
*cp
;
1642 mutex_lock(&of_clk_lock
);
1643 list_for_each_entry(cp
, &of_clk_providers
, link
) {
1644 if (cp
->node
== np
) {
1645 list_del(&cp
->link
);
1646 of_node_put(cp
->node
);
1651 mutex_unlock(&of_clk_lock
);
1653 EXPORT_SYMBOL_GPL(of_clk_del_provider
);
1655 struct clk
*of_clk_get_from_provider(struct of_phandle_args
*clkspec
)
1657 struct of_clk_provider
*provider
;
1658 struct clk
*clk
= ERR_PTR(-ENOENT
);
1660 /* Check if we have such a provider in our array */
1661 mutex_lock(&of_clk_lock
);
1662 list_for_each_entry(provider
, &of_clk_providers
, link
) {
1663 if (provider
->node
== clkspec
->np
)
1664 clk
= provider
->get(clkspec
, provider
->data
);
1668 mutex_unlock(&of_clk_lock
);
1673 const char *of_clk_get_parent_name(struct device_node
*np
, int index
)
1675 struct of_phandle_args clkspec
;
1676 const char *clk_name
;
1682 rc
= of_parse_phandle_with_args(np
, "clocks", "#clock-cells", index
,
1687 if (of_property_read_string_index(clkspec
.np
, "clock-output-names",
1688 clkspec
.args_count
? clkspec
.args
[0] : 0,
1690 clk_name
= clkspec
.np
->name
;
1692 of_node_put(clkspec
.np
);
1695 EXPORT_SYMBOL_GPL(of_clk_get_parent_name
);
1698 * of_clk_init() - Scan and init clock providers from the DT
1699 * @matches: array of compatible values and init functions for providers.
1701 * This function scans the device tree for matching clock providers and
1702 * calls their initialization functions
1704 void __init
of_clk_init(const struct of_device_id
*matches
)
1706 struct device_node
*np
;
1708 for_each_matching_node(np
, matches
) {
1709 const struct of_device_id
*match
= of_match_node(matches
, np
);
1710 of_clk_init_cb_t clk_init_cb
= match
->data
;