2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-provider.h>
13 #include <linux/clk/clk-conf.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/spinlock.h>
17 #include <linux/err.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/sched.h>
27 static DEFINE_SPINLOCK(enable_lock
);
28 static DEFINE_MUTEX(prepare_lock
);
30 static struct task_struct
*prepare_owner
;
31 static struct task_struct
*enable_owner
;
33 static int prepare_refcnt
;
34 static int enable_refcnt
;
36 static HLIST_HEAD(clk_root_list
);
37 static HLIST_HEAD(clk_orphan_list
);
38 static LIST_HEAD(clk_notifier_list
);
40 static long clk_core_get_accuracy(struct clk_core
*clk
);
41 static unsigned long clk_core_get_rate(struct clk_core
*clk
);
42 static int clk_core_get_phase(struct clk_core
*clk
);
43 static bool clk_core_is_prepared(struct clk_core
*clk
);
44 static bool clk_core_is_enabled(struct clk_core
*clk
);
45 static struct clk_core
*clk_core_lookup(const char *name
);
47 /*** private data structures ***/
51 const struct clk_ops
*ops
;
54 struct clk_core
*parent
;
55 const char **parent_names
;
56 struct clk_core
**parents
;
60 unsigned long req_rate
;
61 unsigned long new_rate
;
62 struct clk_core
*new_parent
;
63 struct clk_core
*new_child
;
65 unsigned int enable_count
;
66 unsigned int prepare_count
;
67 unsigned long accuracy
;
69 struct hlist_head children
;
70 struct hlist_node child_node
;
71 struct hlist_node debug_node
;
72 struct hlist_head clks
;
73 unsigned int notifier_count
;
74 #ifdef CONFIG_DEBUG_FS
75 struct dentry
*dentry
;
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/clk.h>
84 struct clk_core
*core
;
87 unsigned long min_rate
;
88 unsigned long max_rate
;
89 struct hlist_node clks_node
;
93 static void clk_prepare_lock(void)
95 if (!mutex_trylock(&prepare_lock
)) {
96 if (prepare_owner
== current
) {
100 mutex_lock(&prepare_lock
);
102 WARN_ON_ONCE(prepare_owner
!= NULL
);
103 WARN_ON_ONCE(prepare_refcnt
!= 0);
104 prepare_owner
= current
;
108 static void clk_prepare_unlock(void)
110 WARN_ON_ONCE(prepare_owner
!= current
);
111 WARN_ON_ONCE(prepare_refcnt
== 0);
113 if (--prepare_refcnt
)
115 prepare_owner
= NULL
;
116 mutex_unlock(&prepare_lock
);
119 static unsigned long clk_enable_lock(void)
123 if (!spin_trylock_irqsave(&enable_lock
, flags
)) {
124 if (enable_owner
== current
) {
128 spin_lock_irqsave(&enable_lock
, flags
);
130 WARN_ON_ONCE(enable_owner
!= NULL
);
131 WARN_ON_ONCE(enable_refcnt
!= 0);
132 enable_owner
= current
;
137 static void clk_enable_unlock(unsigned long flags
)
139 WARN_ON_ONCE(enable_owner
!= current
);
140 WARN_ON_ONCE(enable_refcnt
== 0);
145 spin_unlock_irqrestore(&enable_lock
, flags
);
148 /*** debugfs support ***/
150 #ifdef CONFIG_DEBUG_FS
151 #include <linux/debugfs.h>
153 static struct dentry
*rootdir
;
154 static int inited
= 0;
155 static DEFINE_MUTEX(clk_debug_lock
);
156 static HLIST_HEAD(clk_debug_list
);
158 static struct hlist_head
*all_lists
[] = {
164 static struct hlist_head
*orphan_list
[] = {
169 static void clk_summary_show_one(struct seq_file
*s
, struct clk_core
*c
,
175 seq_printf(s
, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
177 30 - level
* 3, c
->name
,
178 c
->enable_count
, c
->prepare_count
, clk_core_get_rate(c
),
179 clk_core_get_accuracy(c
), clk_core_get_phase(c
));
182 static void clk_summary_show_subtree(struct seq_file
*s
, struct clk_core
*c
,
185 struct clk_core
*child
;
190 clk_summary_show_one(s
, c
, level
);
192 hlist_for_each_entry(child
, &c
->children
, child_node
)
193 clk_summary_show_subtree(s
, child
, level
+ 1);
196 static int clk_summary_show(struct seq_file
*s
, void *data
)
199 struct hlist_head
**lists
= (struct hlist_head
**)s
->private;
201 seq_puts(s
, " clock enable_cnt prepare_cnt rate accuracy phase\n");
202 seq_puts(s
, "----------------------------------------------------------------------------------------\n");
206 for (; *lists
; lists
++)
207 hlist_for_each_entry(c
, *lists
, child_node
)
208 clk_summary_show_subtree(s
, c
, 0);
210 clk_prepare_unlock();
216 static int clk_summary_open(struct inode
*inode
, struct file
*file
)
218 return single_open(file
, clk_summary_show
, inode
->i_private
);
221 static const struct file_operations clk_summary_fops
= {
222 .open
= clk_summary_open
,
225 .release
= single_release
,
228 static void clk_dump_one(struct seq_file
*s
, struct clk_core
*c
, int level
)
233 seq_printf(s
, "\"%s\": { ", c
->name
);
234 seq_printf(s
, "\"enable_count\": %d,", c
->enable_count
);
235 seq_printf(s
, "\"prepare_count\": %d,", c
->prepare_count
);
236 seq_printf(s
, "\"rate\": %lu", clk_core_get_rate(c
));
237 seq_printf(s
, "\"accuracy\": %lu", clk_core_get_accuracy(c
));
238 seq_printf(s
, "\"phase\": %d", clk_core_get_phase(c
));
241 static void clk_dump_subtree(struct seq_file
*s
, struct clk_core
*c
, int level
)
243 struct clk_core
*child
;
248 clk_dump_one(s
, c
, level
);
250 hlist_for_each_entry(child
, &c
->children
, child_node
) {
252 clk_dump_subtree(s
, child
, level
+ 1);
258 static int clk_dump(struct seq_file
*s
, void *data
)
261 bool first_node
= true;
262 struct hlist_head
**lists
= (struct hlist_head
**)s
->private;
268 for (; *lists
; lists
++) {
269 hlist_for_each_entry(c
, *lists
, child_node
) {
273 clk_dump_subtree(s
, c
, 0);
277 clk_prepare_unlock();
284 static int clk_dump_open(struct inode
*inode
, struct file
*file
)
286 return single_open(file
, clk_dump
, inode
->i_private
);
289 static const struct file_operations clk_dump_fops
= {
290 .open
= clk_dump_open
,
293 .release
= single_release
,
296 static int clk_debug_create_one(struct clk_core
*clk
, struct dentry
*pdentry
)
301 if (!clk
|| !pdentry
) {
306 d
= debugfs_create_dir(clk
->name
, pdentry
);
312 d
= debugfs_create_u32("clk_rate", S_IRUGO
, clk
->dentry
,
317 d
= debugfs_create_u32("clk_accuracy", S_IRUGO
, clk
->dentry
,
318 (u32
*)&clk
->accuracy
);
322 d
= debugfs_create_u32("clk_phase", S_IRUGO
, clk
->dentry
,
327 d
= debugfs_create_x32("clk_flags", S_IRUGO
, clk
->dentry
,
332 d
= debugfs_create_u32("clk_prepare_count", S_IRUGO
, clk
->dentry
,
333 (u32
*)&clk
->prepare_count
);
337 d
= debugfs_create_u32("clk_enable_count", S_IRUGO
, clk
->dentry
,
338 (u32
*)&clk
->enable_count
);
342 d
= debugfs_create_u32("clk_notifier_count", S_IRUGO
, clk
->dentry
,
343 (u32
*)&clk
->notifier_count
);
347 if (clk
->ops
->debug_init
) {
348 ret
= clk
->ops
->debug_init(clk
->hw
, clk
->dentry
);
357 debugfs_remove_recursive(clk
->dentry
);
364 * clk_debug_register - add a clk node to the debugfs clk tree
365 * @clk: the clk being added to the debugfs clk tree
367 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
368 * initialized. Otherwise it bails out early since the debugfs clk tree
369 * will be created lazily by clk_debug_init as part of a late_initcall.
371 static int clk_debug_register(struct clk_core
*clk
)
375 mutex_lock(&clk_debug_lock
);
376 hlist_add_head(&clk
->debug_node
, &clk_debug_list
);
381 ret
= clk_debug_create_one(clk
, rootdir
);
383 mutex_unlock(&clk_debug_lock
);
389 * clk_debug_unregister - remove a clk node from the debugfs clk tree
390 * @clk: the clk being removed from the debugfs clk tree
392 * Dynamically removes a clk and all it's children clk nodes from the
393 * debugfs clk tree if clk->dentry points to debugfs created by
394 * clk_debug_register in __clk_init.
396 static void clk_debug_unregister(struct clk_core
*clk
)
398 mutex_lock(&clk_debug_lock
);
399 hlist_del_init(&clk
->debug_node
);
400 debugfs_remove_recursive(clk
->dentry
);
402 mutex_unlock(&clk_debug_lock
);
405 struct dentry
*clk_debugfs_add_file(struct clk_hw
*hw
, char *name
, umode_t mode
,
406 void *data
, const struct file_operations
*fops
)
408 struct dentry
*d
= NULL
;
410 if (hw
->core
->dentry
)
411 d
= debugfs_create_file(name
, mode
, hw
->core
->dentry
, data
,
416 EXPORT_SYMBOL_GPL(clk_debugfs_add_file
);
419 * clk_debug_init - lazily create the debugfs clk tree visualization
421 * clks are often initialized very early during boot before memory can
422 * be dynamically allocated and well before debugfs is setup.
423 * clk_debug_init walks the clk tree hierarchy while holding
424 * prepare_lock and creates the topology as part of a late_initcall,
425 * thus insuring that clks initialized very early will still be
426 * represented in the debugfs clk tree. This function should only be
427 * called once at boot-time, and all other clks added dynamically will
428 * be done so with clk_debug_register.
430 static int __init
clk_debug_init(void)
432 struct clk_core
*clk
;
435 rootdir
= debugfs_create_dir("clk", NULL
);
440 d
= debugfs_create_file("clk_summary", S_IRUGO
, rootdir
, &all_lists
,
445 d
= debugfs_create_file("clk_dump", S_IRUGO
, rootdir
, &all_lists
,
450 d
= debugfs_create_file("clk_orphan_summary", S_IRUGO
, rootdir
,
451 &orphan_list
, &clk_summary_fops
);
455 d
= debugfs_create_file("clk_orphan_dump", S_IRUGO
, rootdir
,
456 &orphan_list
, &clk_dump_fops
);
460 mutex_lock(&clk_debug_lock
);
461 hlist_for_each_entry(clk
, &clk_debug_list
, debug_node
)
462 clk_debug_create_one(clk
, rootdir
);
465 mutex_unlock(&clk_debug_lock
);
469 late_initcall(clk_debug_init
);
471 static inline int clk_debug_register(struct clk_core
*clk
) { return 0; }
472 static inline void clk_debug_reparent(struct clk_core
*clk
,
473 struct clk_core
*new_parent
)
476 static inline void clk_debug_unregister(struct clk_core
*clk
)
481 /* caller must hold prepare_lock */
482 static void clk_unprepare_unused_subtree(struct clk_core
*clk
)
484 struct clk_core
*child
;
486 lockdep_assert_held(&prepare_lock
);
488 hlist_for_each_entry(child
, &clk
->children
, child_node
)
489 clk_unprepare_unused_subtree(child
);
491 if (clk
->prepare_count
)
494 if (clk
->flags
& CLK_IGNORE_UNUSED
)
497 if (clk_core_is_prepared(clk
)) {
498 trace_clk_unprepare(clk
);
499 if (clk
->ops
->unprepare_unused
)
500 clk
->ops
->unprepare_unused(clk
->hw
);
501 else if (clk
->ops
->unprepare
)
502 clk
->ops
->unprepare(clk
->hw
);
503 trace_clk_unprepare_complete(clk
);
507 /* caller must hold prepare_lock */
508 static void clk_disable_unused_subtree(struct clk_core
*clk
)
510 struct clk_core
*child
;
513 lockdep_assert_held(&prepare_lock
);
515 hlist_for_each_entry(child
, &clk
->children
, child_node
)
516 clk_disable_unused_subtree(child
);
518 flags
= clk_enable_lock();
520 if (clk
->enable_count
)
523 if (clk
->flags
& CLK_IGNORE_UNUSED
)
527 * some gate clocks have special needs during the disable-unused
528 * sequence. call .disable_unused if available, otherwise fall
531 if (clk_core_is_enabled(clk
)) {
532 trace_clk_disable(clk
);
533 if (clk
->ops
->disable_unused
)
534 clk
->ops
->disable_unused(clk
->hw
);
535 else if (clk
->ops
->disable
)
536 clk
->ops
->disable(clk
->hw
);
537 trace_clk_disable_complete(clk
);
541 clk_enable_unlock(flags
);
544 static bool clk_ignore_unused
;
545 static int __init
clk_ignore_unused_setup(char *__unused
)
547 clk_ignore_unused
= true;
550 __setup("clk_ignore_unused", clk_ignore_unused_setup
);
552 static int clk_disable_unused(void)
554 struct clk_core
*clk
;
556 if (clk_ignore_unused
) {
557 pr_warn("clk: Not disabling unused clocks\n");
563 hlist_for_each_entry(clk
, &clk_root_list
, child_node
)
564 clk_disable_unused_subtree(clk
);
566 hlist_for_each_entry(clk
, &clk_orphan_list
, child_node
)
567 clk_disable_unused_subtree(clk
);
569 hlist_for_each_entry(clk
, &clk_root_list
, child_node
)
570 clk_unprepare_unused_subtree(clk
);
572 hlist_for_each_entry(clk
, &clk_orphan_list
, child_node
)
573 clk_unprepare_unused_subtree(clk
);
575 clk_prepare_unlock();
579 late_initcall_sync(clk_disable_unused
);
581 /*** helper functions ***/
583 const char *__clk_get_name(struct clk
*clk
)
585 return !clk
? NULL
: clk
->core
->name
;
587 EXPORT_SYMBOL_GPL(__clk_get_name
);
589 struct clk_hw
*__clk_get_hw(struct clk
*clk
)
591 return !clk
? NULL
: clk
->core
->hw
;
593 EXPORT_SYMBOL_GPL(__clk_get_hw
);
595 u8
__clk_get_num_parents(struct clk
*clk
)
597 return !clk
? 0 : clk
->core
->num_parents
;
599 EXPORT_SYMBOL_GPL(__clk_get_num_parents
);
601 struct clk
*__clk_get_parent(struct clk
*clk
)
606 /* TODO: Create a per-user clk and change callers to call clk_put */
607 return !clk
->core
->parent
? NULL
: clk
->core
->parent
->hw
->clk
;
609 EXPORT_SYMBOL_GPL(__clk_get_parent
);
611 static struct clk_core
*clk_core_get_parent_by_index(struct clk_core
*clk
,
614 if (!clk
|| index
>= clk
->num_parents
)
616 else if (!clk
->parents
)
617 return clk_core_lookup(clk
->parent_names
[index
]);
618 else if (!clk
->parents
[index
])
619 return clk
->parents
[index
] =
620 clk_core_lookup(clk
->parent_names
[index
]);
622 return clk
->parents
[index
];
625 struct clk
*clk_get_parent_by_index(struct clk
*clk
, u8 index
)
627 struct clk_core
*parent
;
632 parent
= clk_core_get_parent_by_index(clk
->core
, index
);
634 return !parent
? NULL
: parent
->hw
->clk
;
636 EXPORT_SYMBOL_GPL(clk_get_parent_by_index
);
638 unsigned int __clk_get_enable_count(struct clk
*clk
)
640 return !clk
? 0 : clk
->core
->enable_count
;
643 static unsigned long clk_core_get_rate_nolock(struct clk_core
*clk
)
654 if (clk
->flags
& CLK_IS_ROOT
)
664 unsigned long __clk_get_rate(struct clk
*clk
)
669 return clk_core_get_rate_nolock(clk
->core
);
671 EXPORT_SYMBOL_GPL(__clk_get_rate
);
673 static unsigned long __clk_get_accuracy(struct clk_core
*clk
)
678 return clk
->accuracy
;
681 unsigned long __clk_get_flags(struct clk
*clk
)
683 return !clk
? 0 : clk
->core
->flags
;
685 EXPORT_SYMBOL_GPL(__clk_get_flags
);
687 static bool clk_core_is_prepared(struct clk_core
*clk
)
695 * .is_prepared is optional for clocks that can prepare
696 * fall back to software usage counter if it is missing
698 if (!clk
->ops
->is_prepared
) {
699 ret
= clk
->prepare_count
? 1 : 0;
703 ret
= clk
->ops
->is_prepared(clk
->hw
);
708 bool __clk_is_prepared(struct clk
*clk
)
713 return clk_core_is_prepared(clk
->core
);
716 static bool clk_core_is_enabled(struct clk_core
*clk
)
724 * .is_enabled is only mandatory for clocks that gate
725 * fall back to software usage counter if .is_enabled is missing
727 if (!clk
->ops
->is_enabled
) {
728 ret
= clk
->enable_count
? 1 : 0;
732 ret
= clk
->ops
->is_enabled(clk
->hw
);
737 bool __clk_is_enabled(struct clk
*clk
)
742 return clk_core_is_enabled(clk
->core
);
744 EXPORT_SYMBOL_GPL(__clk_is_enabled
);
746 static struct clk_core
*__clk_lookup_subtree(const char *name
,
747 struct clk_core
*clk
)
749 struct clk_core
*child
;
750 struct clk_core
*ret
;
752 if (!strcmp(clk
->name
, name
))
755 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
756 ret
= __clk_lookup_subtree(name
, child
);
764 static struct clk_core
*clk_core_lookup(const char *name
)
766 struct clk_core
*root_clk
;
767 struct clk_core
*ret
;
772 /* search the 'proper' clk tree first */
773 hlist_for_each_entry(root_clk
, &clk_root_list
, child_node
) {
774 ret
= __clk_lookup_subtree(name
, root_clk
);
779 /* if not found, then search the orphan tree */
780 hlist_for_each_entry(root_clk
, &clk_orphan_list
, child_node
) {
781 ret
= __clk_lookup_subtree(name
, root_clk
);
789 static bool mux_is_better_rate(unsigned long rate
, unsigned long now
,
790 unsigned long best
, unsigned long flags
)
792 if (flags
& CLK_MUX_ROUND_CLOSEST
)
793 return abs(now
- rate
) < abs(best
- rate
);
795 return now
<= rate
&& now
> best
;
799 clk_mux_determine_rate_flags(struct clk_hw
*hw
, unsigned long rate
,
800 unsigned long min_rate
,
801 unsigned long max_rate
,
802 unsigned long *best_parent_rate
,
803 struct clk_hw
**best_parent_p
,
806 struct clk_core
*core
= hw
->core
, *parent
, *best_parent
= NULL
;
808 unsigned long parent_rate
, best
= 0;
810 /* if NO_REPARENT flag set, pass through to current parent */
811 if (core
->flags
& CLK_SET_RATE_NO_REPARENT
) {
812 parent
= core
->parent
;
813 if (core
->flags
& CLK_SET_RATE_PARENT
)
814 best
= __clk_determine_rate(parent
? parent
->hw
: NULL
,
815 rate
, min_rate
, max_rate
);
817 best
= clk_core_get_rate_nolock(parent
);
819 best
= clk_core_get_rate_nolock(core
);
823 /* find the parent that can provide the fastest rate <= rate */
824 num_parents
= core
->num_parents
;
825 for (i
= 0; i
< num_parents
; i
++) {
826 parent
= clk_core_get_parent_by_index(core
, i
);
829 if (core
->flags
& CLK_SET_RATE_PARENT
)
830 parent_rate
= __clk_determine_rate(parent
->hw
, rate
,
834 parent_rate
= clk_core_get_rate_nolock(parent
);
835 if (mux_is_better_rate(rate
, parent_rate
, best
, flags
)) {
836 best_parent
= parent
;
843 *best_parent_p
= best_parent
->hw
;
844 *best_parent_rate
= best
;
849 struct clk
*__clk_lookup(const char *name
)
851 struct clk_core
*core
= clk_core_lookup(name
);
853 return !core
? NULL
: core
->hw
->clk
;
856 static void clk_core_get_boundaries(struct clk_core
*clk
,
857 unsigned long *min_rate
,
858 unsigned long *max_rate
)
860 struct clk
*clk_user
;
863 *max_rate
= ULONG_MAX
;
865 hlist_for_each_entry(clk_user
, &clk
->clks
, clks_node
)
866 *min_rate
= max(*min_rate
, clk_user
->min_rate
);
868 hlist_for_each_entry(clk_user
, &clk
->clks
, clks_node
)
869 *max_rate
= min(*max_rate
, clk_user
->max_rate
);
873 * Helper for finding best parent to provide a given frequency. This can be used
874 * directly as a determine_rate callback (e.g. for a mux), or from a more
875 * complex clock that may combine a mux with other operations.
877 long __clk_mux_determine_rate(struct clk_hw
*hw
, unsigned long rate
,
878 unsigned long min_rate
,
879 unsigned long max_rate
,
880 unsigned long *best_parent_rate
,
881 struct clk_hw
**best_parent_p
)
883 return clk_mux_determine_rate_flags(hw
, rate
, min_rate
, max_rate
,
887 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate
);
889 long __clk_mux_determine_rate_closest(struct clk_hw
*hw
, unsigned long rate
,
890 unsigned long min_rate
,
891 unsigned long max_rate
,
892 unsigned long *best_parent_rate
,
893 struct clk_hw
**best_parent_p
)
895 return clk_mux_determine_rate_flags(hw
, rate
, min_rate
, max_rate
,
898 CLK_MUX_ROUND_CLOSEST
);
900 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest
);
904 static void clk_core_unprepare(struct clk_core
*clk
)
909 if (WARN_ON(clk
->prepare_count
== 0))
912 if (--clk
->prepare_count
> 0)
915 WARN_ON(clk
->enable_count
> 0);
917 trace_clk_unprepare(clk
);
919 if (clk
->ops
->unprepare
)
920 clk
->ops
->unprepare(clk
->hw
);
922 trace_clk_unprepare_complete(clk
);
923 clk_core_unprepare(clk
->parent
);
927 * clk_unprepare - undo preparation of a clock source
928 * @clk: the clk being unprepared
930 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
931 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
932 * if the operation may sleep. One example is a clk which is accessed over
933 * I2c. In the complex case a clk gate operation may require a fast and a slow
934 * part. It is this reason that clk_unprepare and clk_disable are not mutually
935 * exclusive. In fact clk_disable must be called before clk_unprepare.
937 void clk_unprepare(struct clk
*clk
)
939 if (IS_ERR_OR_NULL(clk
))
943 clk_core_unprepare(clk
->core
);
944 clk_prepare_unlock();
946 EXPORT_SYMBOL_GPL(clk_unprepare
);
948 static int clk_core_prepare(struct clk_core
*clk
)
955 if (clk
->prepare_count
== 0) {
956 ret
= clk_core_prepare(clk
->parent
);
960 trace_clk_prepare(clk
);
962 if (clk
->ops
->prepare
)
963 ret
= clk
->ops
->prepare(clk
->hw
);
965 trace_clk_prepare_complete(clk
);
968 clk_core_unprepare(clk
->parent
);
973 clk
->prepare_count
++;
979 * clk_prepare - prepare a clock source
980 * @clk: the clk being prepared
982 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
983 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
984 * operation may sleep. One example is a clk which is accessed over I2c. In
985 * the complex case a clk ungate operation may require a fast and a slow part.
986 * It is this reason that clk_prepare and clk_enable are not mutually
987 * exclusive. In fact clk_prepare must be called before clk_enable.
988 * Returns 0 on success, -EERROR otherwise.
990 int clk_prepare(struct clk
*clk
)
998 ret
= clk_core_prepare(clk
->core
);
999 clk_prepare_unlock();
1003 EXPORT_SYMBOL_GPL(clk_prepare
);
1005 static void clk_core_disable(struct clk_core
*clk
)
1010 if (WARN_ON(clk
->enable_count
== 0))
1013 if (--clk
->enable_count
> 0)
1016 trace_clk_disable(clk
);
1018 if (clk
->ops
->disable
)
1019 clk
->ops
->disable(clk
->hw
);
1021 trace_clk_disable_complete(clk
);
1023 clk_core_disable(clk
->parent
);
1026 static void __clk_disable(struct clk
*clk
)
1031 clk_core_disable(clk
->core
);
1035 * clk_disable - gate a clock
1036 * @clk: the clk being gated
1038 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
1039 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
1040 * clk if the operation is fast and will never sleep. One example is a
1041 * SoC-internal clk which is controlled via simple register writes. In the
1042 * complex case a clk gate operation may require a fast and a slow part. It is
1043 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
1044 * In fact clk_disable must be called before clk_unprepare.
1046 void clk_disable(struct clk
*clk
)
1048 unsigned long flags
;
1050 if (IS_ERR_OR_NULL(clk
))
1053 flags
= clk_enable_lock();
1055 clk_enable_unlock(flags
);
1057 EXPORT_SYMBOL_GPL(clk_disable
);
1059 static int clk_core_enable(struct clk_core
*clk
)
1066 if (WARN_ON(clk
->prepare_count
== 0))
1069 if (clk
->enable_count
== 0) {
1070 ret
= clk_core_enable(clk
->parent
);
1075 trace_clk_enable(clk
);
1077 if (clk
->ops
->enable
)
1078 ret
= clk
->ops
->enable(clk
->hw
);
1080 trace_clk_enable_complete(clk
);
1083 clk_core_disable(clk
->parent
);
1088 clk
->enable_count
++;
1092 static int __clk_enable(struct clk
*clk
)
1097 return clk_core_enable(clk
->core
);
1101 * clk_enable - ungate a clock
1102 * @clk: the clk being ungated
1104 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1105 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1106 * if the operation will never sleep. One example is a SoC-internal clk which
1107 * is controlled via simple register writes. In the complex case a clk ungate
1108 * operation may require a fast and a slow part. It is this reason that
1109 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1110 * must be called before clk_enable. Returns 0 on success, -EERROR
1113 int clk_enable(struct clk
*clk
)
1115 unsigned long flags
;
1118 flags
= clk_enable_lock();
1119 ret
= __clk_enable(clk
);
1120 clk_enable_unlock(flags
);
1124 EXPORT_SYMBOL_GPL(clk_enable
);
1126 static unsigned long clk_core_round_rate_nolock(struct clk_core
*clk
,
1128 unsigned long min_rate
,
1129 unsigned long max_rate
)
1131 unsigned long parent_rate
= 0;
1132 struct clk_core
*parent
;
1133 struct clk_hw
*parent_hw
;
1135 lockdep_assert_held(&prepare_lock
);
1140 parent
= clk
->parent
;
1142 parent_rate
= parent
->rate
;
1144 if (clk
->ops
->determine_rate
) {
1145 parent_hw
= parent
? parent
->hw
: NULL
;
1146 return clk
->ops
->determine_rate(clk
->hw
, rate
,
1148 &parent_rate
, &parent_hw
);
1149 } else if (clk
->ops
->round_rate
)
1150 return clk
->ops
->round_rate(clk
->hw
, rate
, &parent_rate
);
1151 else if (clk
->flags
& CLK_SET_RATE_PARENT
)
1152 return clk_core_round_rate_nolock(clk
->parent
, rate
, min_rate
,
1159 * __clk_determine_rate - get the closest rate actually supported by a clock
1160 * @hw: determine the rate of this clock
1161 * @rate: target rate
1162 * @min_rate: returned rate must be greater than this rate
1163 * @max_rate: returned rate must be less than this rate
1165 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and
1168 unsigned long __clk_determine_rate(struct clk_hw
*hw
,
1170 unsigned long min_rate
,
1171 unsigned long max_rate
)
1176 return clk_core_round_rate_nolock(hw
->core
, rate
, min_rate
, max_rate
);
1178 EXPORT_SYMBOL_GPL(__clk_determine_rate
);
1181 * __clk_round_rate - round the given rate for a clk
1182 * @clk: round the rate of this clock
1183 * @rate: the rate which is to be rounded
1185 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
1187 unsigned long __clk_round_rate(struct clk
*clk
, unsigned long rate
)
1189 unsigned long min_rate
;
1190 unsigned long max_rate
;
1195 clk_core_get_boundaries(clk
->core
, &min_rate
, &max_rate
);
1197 return clk_core_round_rate_nolock(clk
->core
, rate
, min_rate
, max_rate
);
1199 EXPORT_SYMBOL_GPL(__clk_round_rate
);
1202 * clk_round_rate - round the given rate for a clk
1203 * @clk: the clk for which we are rounding a rate
1204 * @rate: the rate which is to be rounded
1206 * Takes in a rate as input and rounds it to a rate that the clk can actually
1207 * use which is then returned. If clk doesn't support round_rate operation
1208 * then the parent rate is returned.
1210 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
1218 ret
= __clk_round_rate(clk
, rate
);
1219 clk_prepare_unlock();
1223 EXPORT_SYMBOL_GPL(clk_round_rate
);
1226 * __clk_notify - call clk notifier chain
1227 * @clk: struct clk * that is changing rate
1228 * @msg: clk notifier type (see include/linux/clk.h)
1229 * @old_rate: old clk rate
1230 * @new_rate: new clk rate
1232 * Triggers a notifier call chain on the clk rate-change notification
1233 * for 'clk'. Passes a pointer to the struct clk and the previous
1234 * and current rates to the notifier callback. Intended to be called by
1235 * internal clock code only. Returns NOTIFY_DONE from the last driver
1236 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1237 * a driver returns that.
1239 static int __clk_notify(struct clk_core
*clk
, unsigned long msg
,
1240 unsigned long old_rate
, unsigned long new_rate
)
1242 struct clk_notifier
*cn
;
1243 struct clk_notifier_data cnd
;
1244 int ret
= NOTIFY_DONE
;
1246 cnd
.old_rate
= old_rate
;
1247 cnd
.new_rate
= new_rate
;
1249 list_for_each_entry(cn
, &clk_notifier_list
, node
) {
1250 if (cn
->clk
->core
== clk
) {
1252 ret
= srcu_notifier_call_chain(&cn
->notifier_head
, msg
,
1261 * __clk_recalc_accuracies
1262 * @clk: first clk in the subtree
1264 * Walks the subtree of clks starting with clk and recalculates accuracies as
1265 * it goes. Note that if a clk does not implement the .recalc_accuracy
1266 * callback then it is assumed that the clock will take on the accuracy of it's
1269 * Caller must hold prepare_lock.
1271 static void __clk_recalc_accuracies(struct clk_core
*clk
)
1273 unsigned long parent_accuracy
= 0;
1274 struct clk_core
*child
;
1276 lockdep_assert_held(&prepare_lock
);
1279 parent_accuracy
= clk
->parent
->accuracy
;
1281 if (clk
->ops
->recalc_accuracy
)
1282 clk
->accuracy
= clk
->ops
->recalc_accuracy(clk
->hw
,
1285 clk
->accuracy
= parent_accuracy
;
1287 hlist_for_each_entry(child
, &clk
->children
, child_node
)
1288 __clk_recalc_accuracies(child
);
1291 static long clk_core_get_accuracy(struct clk_core
*clk
)
1293 unsigned long accuracy
;
1296 if (clk
&& (clk
->flags
& CLK_GET_ACCURACY_NOCACHE
))
1297 __clk_recalc_accuracies(clk
);
1299 accuracy
= __clk_get_accuracy(clk
);
1300 clk_prepare_unlock();
1306 * clk_get_accuracy - return the accuracy of clk
1307 * @clk: the clk whose accuracy is being returned
1309 * Simply returns the cached accuracy of the clk, unless
1310 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1312 * If clk is NULL then returns 0.
1314 long clk_get_accuracy(struct clk
*clk
)
1319 return clk_core_get_accuracy(clk
->core
);
1321 EXPORT_SYMBOL_GPL(clk_get_accuracy
);
1323 static unsigned long clk_recalc(struct clk_core
*clk
,
1324 unsigned long parent_rate
)
1326 if (clk
->ops
->recalc_rate
)
1327 return clk
->ops
->recalc_rate(clk
->hw
, parent_rate
);
1332 * __clk_recalc_rates
1333 * @clk: first clk in the subtree
1334 * @msg: notification type (see include/linux/clk.h)
1336 * Walks the subtree of clks starting with clk and recalculates rates as it
1337 * goes. Note that if a clk does not implement the .recalc_rate callback then
1338 * it is assumed that the clock will take on the rate of its parent.
1340 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1343 * Caller must hold prepare_lock.
1345 static void __clk_recalc_rates(struct clk_core
*clk
, unsigned long msg
)
1347 unsigned long old_rate
;
1348 unsigned long parent_rate
= 0;
1349 struct clk_core
*child
;
1351 lockdep_assert_held(&prepare_lock
);
1353 old_rate
= clk
->rate
;
1356 parent_rate
= clk
->parent
->rate
;
1358 clk
->rate
= clk_recalc(clk
, parent_rate
);
1361 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1362 * & ABORT_RATE_CHANGE notifiers
1364 if (clk
->notifier_count
&& msg
)
1365 __clk_notify(clk
, msg
, old_rate
, clk
->rate
);
1367 hlist_for_each_entry(child
, &clk
->children
, child_node
)
1368 __clk_recalc_rates(child
, msg
);
1371 static unsigned long clk_core_get_rate(struct clk_core
*clk
)
1377 if (clk
&& (clk
->flags
& CLK_GET_RATE_NOCACHE
))
1378 __clk_recalc_rates(clk
, 0);
1380 rate
= clk_core_get_rate_nolock(clk
);
1381 clk_prepare_unlock();
1387 * clk_get_rate - return the rate of clk
1388 * @clk: the clk whose rate is being returned
1390 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1391 * is set, which means a recalc_rate will be issued.
1392 * If clk is NULL then returns 0.
1394 unsigned long clk_get_rate(struct clk
*clk
)
1399 return clk_core_get_rate(clk
->core
);
1401 EXPORT_SYMBOL_GPL(clk_get_rate
);
1403 static int clk_fetch_parent_index(struct clk_core
*clk
,
1404 struct clk_core
*parent
)
1408 if (!clk
->parents
) {
1409 clk
->parents
= kcalloc(clk
->num_parents
,
1410 sizeof(struct clk
*), GFP_KERNEL
);
1416 * find index of new parent clock using cached parent ptrs,
1417 * or if not yet cached, use string name comparison and cache
1418 * them now to avoid future calls to clk_core_lookup.
1420 for (i
= 0; i
< clk
->num_parents
; i
++) {
1421 if (clk
->parents
[i
] == parent
)
1424 if (clk
->parents
[i
])
1427 if (!strcmp(clk
->parent_names
[i
], parent
->name
)) {
1428 clk
->parents
[i
] = clk_core_lookup(parent
->name
);
1436 static void clk_reparent(struct clk_core
*clk
, struct clk_core
*new_parent
)
1438 hlist_del(&clk
->child_node
);
1441 /* avoid duplicate POST_RATE_CHANGE notifications */
1442 if (new_parent
->new_child
== clk
)
1443 new_parent
->new_child
= NULL
;
1445 hlist_add_head(&clk
->child_node
, &new_parent
->children
);
1447 hlist_add_head(&clk
->child_node
, &clk_orphan_list
);
1450 clk
->parent
= new_parent
;
1453 static struct clk_core
*__clk_set_parent_before(struct clk_core
*clk
,
1454 struct clk_core
*parent
)
1456 unsigned long flags
;
1457 struct clk_core
*old_parent
= clk
->parent
;
1460 * Migrate prepare state between parents and prevent race with
1463 * If the clock is not prepared, then a race with
1464 * clk_enable/disable() is impossible since we already have the
1465 * prepare lock (future calls to clk_enable() need to be preceded by
1468 * If the clock is prepared, migrate the prepared state to the new
1469 * parent and also protect against a race with clk_enable() by
1470 * forcing the clock and the new parent on. This ensures that all
1471 * future calls to clk_enable() are practically NOPs with respect to
1472 * hardware and software states.
1474 * See also: Comment for clk_set_parent() below.
1476 if (clk
->prepare_count
) {
1477 clk_core_prepare(parent
);
1478 flags
= clk_enable_lock();
1479 clk_core_enable(parent
);
1480 clk_core_enable(clk
);
1481 clk_enable_unlock(flags
);
1484 /* update the clk tree topology */
1485 flags
= clk_enable_lock();
1486 clk_reparent(clk
, parent
);
1487 clk_enable_unlock(flags
);
1492 static void __clk_set_parent_after(struct clk_core
*core
,
1493 struct clk_core
*parent
,
1494 struct clk_core
*old_parent
)
1496 unsigned long flags
;
1499 * Finish the migration of prepare state and undo the changes done
1500 * for preventing a race with clk_enable().
1502 if (core
->prepare_count
) {
1503 flags
= clk_enable_lock();
1504 clk_core_disable(core
);
1505 clk_core_disable(old_parent
);
1506 clk_enable_unlock(flags
);
1507 clk_core_unprepare(old_parent
);
1511 static int __clk_set_parent(struct clk_core
*clk
, struct clk_core
*parent
,
1514 unsigned long flags
;
1516 struct clk_core
*old_parent
;
1518 old_parent
= __clk_set_parent_before(clk
, parent
);
1520 trace_clk_set_parent(clk
, parent
);
1522 /* change clock input source */
1523 if (parent
&& clk
->ops
->set_parent
)
1524 ret
= clk
->ops
->set_parent(clk
->hw
, p_index
);
1526 trace_clk_set_parent_complete(clk
, parent
);
1529 flags
= clk_enable_lock();
1530 clk_reparent(clk
, old_parent
);
1531 clk_enable_unlock(flags
);
1533 if (clk
->prepare_count
) {
1534 flags
= clk_enable_lock();
1535 clk_core_disable(clk
);
1536 clk_core_disable(parent
);
1537 clk_enable_unlock(flags
);
1538 clk_core_unprepare(parent
);
1543 __clk_set_parent_after(clk
, parent
, old_parent
);
1549 * __clk_speculate_rates
1550 * @clk: first clk in the subtree
1551 * @parent_rate: the "future" rate of clk's parent
1553 * Walks the subtree of clks starting with clk, speculating rates as it
1554 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1556 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1557 * pre-rate change notifications and returns early if no clks in the
1558 * subtree have subscribed to the notifications. Note that if a clk does not
1559 * implement the .recalc_rate callback then it is assumed that the clock will
1560 * take on the rate of its parent.
1562 * Caller must hold prepare_lock.
1564 static int __clk_speculate_rates(struct clk_core
*clk
,
1565 unsigned long parent_rate
)
1567 struct clk_core
*child
;
1568 unsigned long new_rate
;
1569 int ret
= NOTIFY_DONE
;
1571 lockdep_assert_held(&prepare_lock
);
1573 new_rate
= clk_recalc(clk
, parent_rate
);
1575 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1576 if (clk
->notifier_count
)
1577 ret
= __clk_notify(clk
, PRE_RATE_CHANGE
, clk
->rate
, new_rate
);
1579 if (ret
& NOTIFY_STOP_MASK
) {
1580 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1581 __func__
, clk
->name
, ret
);
1585 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1586 ret
= __clk_speculate_rates(child
, new_rate
);
1587 if (ret
& NOTIFY_STOP_MASK
)
1595 static void clk_calc_subtree(struct clk_core
*clk
, unsigned long new_rate
,
1596 struct clk_core
*new_parent
, u8 p_index
)
1598 struct clk_core
*child
;
1600 clk
->new_rate
= new_rate
;
1601 clk
->new_parent
= new_parent
;
1602 clk
->new_parent_index
= p_index
;
1603 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1604 clk
->new_child
= NULL
;
1605 if (new_parent
&& new_parent
!= clk
->parent
)
1606 new_parent
->new_child
= clk
;
1608 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1609 child
->new_rate
= clk_recalc(child
, new_rate
);
1610 clk_calc_subtree(child
, child
->new_rate
, NULL
, 0);
1615 * calculate the new rates returning the topmost clock that has to be
1618 static struct clk_core
*clk_calc_new_rates(struct clk_core
*clk
,
1621 struct clk_core
*top
= clk
;
1622 struct clk_core
*old_parent
, *parent
;
1623 struct clk_hw
*parent_hw
;
1624 unsigned long best_parent_rate
= 0;
1625 unsigned long new_rate
;
1626 unsigned long min_rate
;
1627 unsigned long max_rate
;
1632 if (IS_ERR_OR_NULL(clk
))
1635 /* save parent rate, if it exists */
1636 parent
= old_parent
= clk
->parent
;
1638 best_parent_rate
= parent
->rate
;
1640 clk_core_get_boundaries(clk
, &min_rate
, &max_rate
);
1642 /* find the closest rate and parent clk/rate */
1643 if (clk
->ops
->determine_rate
) {
1644 parent_hw
= parent
? parent
->hw
: NULL
;
1645 ret
= clk
->ops
->determine_rate(clk
->hw
, rate
,
1654 parent
= parent_hw
? parent_hw
->core
: NULL
;
1655 } else if (clk
->ops
->round_rate
) {
1656 ret
= clk
->ops
->round_rate(clk
->hw
, rate
,
1662 if (new_rate
< min_rate
|| new_rate
> max_rate
)
1664 } else if (!parent
|| !(clk
->flags
& CLK_SET_RATE_PARENT
)) {
1665 /* pass-through clock without adjustable parent */
1666 clk
->new_rate
= clk
->rate
;
1669 /* pass-through clock with adjustable parent */
1670 top
= clk_calc_new_rates(parent
, rate
);
1671 new_rate
= parent
->new_rate
;
1675 /* some clocks must be gated to change parent */
1676 if (parent
!= old_parent
&&
1677 (clk
->flags
& CLK_SET_PARENT_GATE
) && clk
->prepare_count
) {
1678 pr_debug("%s: %s not gated but wants to reparent\n",
1679 __func__
, clk
->name
);
1683 /* try finding the new parent index */
1684 if (parent
&& clk
->num_parents
> 1) {
1685 p_index
= clk_fetch_parent_index(clk
, parent
);
1687 pr_debug("%s: clk %s can not be parent of clk %s\n",
1688 __func__
, parent
->name
, clk
->name
);
1693 if ((clk
->flags
& CLK_SET_RATE_PARENT
) && parent
&&
1694 best_parent_rate
!= parent
->rate
)
1695 top
= clk_calc_new_rates(parent
, best_parent_rate
);
1698 clk_calc_subtree(clk
, new_rate
, parent
, p_index
);
1704 * Notify about rate changes in a subtree. Always walk down the whole tree
1705 * so that in case of an error we can walk down the whole tree again and
1708 static struct clk_core
*clk_propagate_rate_change(struct clk_core
*clk
,
1709 unsigned long event
)
1711 struct clk_core
*child
, *tmp_clk
, *fail_clk
= NULL
;
1712 int ret
= NOTIFY_DONE
;
1714 if (clk
->rate
== clk
->new_rate
)
1717 if (clk
->notifier_count
) {
1718 ret
= __clk_notify(clk
, event
, clk
->rate
, clk
->new_rate
);
1719 if (ret
& NOTIFY_STOP_MASK
)
1723 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1724 /* Skip children who will be reparented to another clock */
1725 if (child
->new_parent
&& child
->new_parent
!= clk
)
1727 tmp_clk
= clk_propagate_rate_change(child
, event
);
1732 /* handle the new child who might not be in clk->children yet */
1733 if (clk
->new_child
) {
1734 tmp_clk
= clk_propagate_rate_change(clk
->new_child
, event
);
1743 * walk down a subtree and set the new rates notifying the rate
1746 static void clk_change_rate(struct clk_core
*clk
)
1748 struct clk_core
*child
;
1749 struct hlist_node
*tmp
;
1750 unsigned long old_rate
;
1751 unsigned long best_parent_rate
= 0;
1752 bool skip_set_rate
= false;
1753 struct clk_core
*old_parent
;
1755 old_rate
= clk
->rate
;
1757 if (clk
->new_parent
)
1758 best_parent_rate
= clk
->new_parent
->rate
;
1759 else if (clk
->parent
)
1760 best_parent_rate
= clk
->parent
->rate
;
1762 if (clk
->new_parent
&& clk
->new_parent
!= clk
->parent
) {
1763 old_parent
= __clk_set_parent_before(clk
, clk
->new_parent
);
1764 trace_clk_set_parent(clk
, clk
->new_parent
);
1766 if (clk
->ops
->set_rate_and_parent
) {
1767 skip_set_rate
= true;
1768 clk
->ops
->set_rate_and_parent(clk
->hw
, clk
->new_rate
,
1770 clk
->new_parent_index
);
1771 } else if (clk
->ops
->set_parent
) {
1772 clk
->ops
->set_parent(clk
->hw
, clk
->new_parent_index
);
1775 trace_clk_set_parent_complete(clk
, clk
->new_parent
);
1776 __clk_set_parent_after(clk
, clk
->new_parent
, old_parent
);
1779 trace_clk_set_rate(clk
, clk
->new_rate
);
1781 if (!skip_set_rate
&& clk
->ops
->set_rate
)
1782 clk
->ops
->set_rate(clk
->hw
, clk
->new_rate
, best_parent_rate
);
1784 trace_clk_set_rate_complete(clk
, clk
->new_rate
);
1786 clk
->rate
= clk_recalc(clk
, best_parent_rate
);
1788 if (clk
->notifier_count
&& old_rate
!= clk
->rate
)
1789 __clk_notify(clk
, POST_RATE_CHANGE
, old_rate
, clk
->rate
);
1792 * Use safe iteration, as change_rate can actually swap parents
1793 * for certain clock types.
1795 hlist_for_each_entry_safe(child
, tmp
, &clk
->children
, child_node
) {
1796 /* Skip children who will be reparented to another clock */
1797 if (child
->new_parent
&& child
->new_parent
!= clk
)
1799 clk_change_rate(child
);
1802 /* handle the new child who might not be in clk->children yet */
1804 clk_change_rate(clk
->new_child
);
1807 static int clk_core_set_rate_nolock(struct clk_core
*clk
,
1808 unsigned long req_rate
)
1810 struct clk_core
*top
, *fail_clk
;
1811 unsigned long rate
= req_rate
;
1817 /* bail early if nothing to do */
1818 if (rate
== clk_core_get_rate_nolock(clk
))
1821 if ((clk
->flags
& CLK_SET_RATE_GATE
) && clk
->prepare_count
)
1824 /* calculate new rates and get the topmost changed clock */
1825 top
= clk_calc_new_rates(clk
, rate
);
1829 /* notify that we are about to change rates */
1830 fail_clk
= clk_propagate_rate_change(top
, PRE_RATE_CHANGE
);
1832 pr_debug("%s: failed to set %s rate\n", __func__
,
1834 clk_propagate_rate_change(top
, ABORT_RATE_CHANGE
);
1838 /* change the rates */
1839 clk_change_rate(top
);
1841 clk
->req_rate
= req_rate
;
1847 * clk_set_rate - specify a new rate for clk
1848 * @clk: the clk whose rate is being changed
1849 * @rate: the new rate for clk
1851 * In the simplest case clk_set_rate will only adjust the rate of clk.
1853 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1854 * propagate up to clk's parent; whether or not this happens depends on the
1855 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1856 * after calling .round_rate then upstream parent propagation is ignored. If
1857 * *parent_rate comes back with a new rate for clk's parent then we propagate
1858 * up to clk's parent and set its rate. Upward propagation will continue
1859 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1860 * .round_rate stops requesting changes to clk's parent_rate.
1862 * Rate changes are accomplished via tree traversal that also recalculates the
1863 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1865 * Returns 0 on success, -EERROR otherwise.
1867 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
1874 /* prevent racing with updates to the clock topology */
1877 ret
= clk_core_set_rate_nolock(clk
->core
, rate
);
1879 clk_prepare_unlock();
1883 EXPORT_SYMBOL_GPL(clk_set_rate
);
1886 * clk_set_rate_range - set a rate range for a clock source
1887 * @clk: clock source
1888 * @min: desired minimum clock rate in Hz, inclusive
1889 * @max: desired maximum clock rate in Hz, inclusive
1891 * Returns success (0) or negative errno.
1893 int clk_set_rate_range(struct clk
*clk
, unsigned long min
, unsigned long max
)
1901 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1902 __func__
, clk
->core
->name
, clk
->dev_id
, clk
->con_id
,
1909 if (min
!= clk
->min_rate
|| max
!= clk
->max_rate
) {
1910 clk
->min_rate
= min
;
1911 clk
->max_rate
= max
;
1912 ret
= clk_core_set_rate_nolock(clk
->core
, clk
->core
->req_rate
);
1915 clk_prepare_unlock();
1919 EXPORT_SYMBOL_GPL(clk_set_rate_range
);
1922 * clk_set_min_rate - set a minimum clock rate for a clock source
1923 * @clk: clock source
1924 * @rate: desired minimum clock rate in Hz, inclusive
1926 * Returns success (0) or negative errno.
1928 int clk_set_min_rate(struct clk
*clk
, unsigned long rate
)
1933 return clk_set_rate_range(clk
, rate
, clk
->max_rate
);
1935 EXPORT_SYMBOL_GPL(clk_set_min_rate
);
1938 * clk_set_max_rate - set a maximum clock rate for a clock source
1939 * @clk: clock source
1940 * @rate: desired maximum clock rate in Hz, inclusive
1942 * Returns success (0) or negative errno.
1944 int clk_set_max_rate(struct clk
*clk
, unsigned long rate
)
1949 return clk_set_rate_range(clk
, clk
->min_rate
, rate
);
1951 EXPORT_SYMBOL_GPL(clk_set_max_rate
);
1954 * clk_get_parent - return the parent of a clk
1955 * @clk: the clk whose parent gets returned
1957 * Simply returns clk->parent. Returns NULL if clk is NULL.
1959 struct clk
*clk_get_parent(struct clk
*clk
)
1964 parent
= __clk_get_parent(clk
);
1965 clk_prepare_unlock();
1969 EXPORT_SYMBOL_GPL(clk_get_parent
);
1972 * .get_parent is mandatory for clocks with multiple possible parents. It is
1973 * optional for single-parent clocks. Always call .get_parent if it is
1974 * available and WARN if it is missing for multi-parent clocks.
1976 * For single-parent clocks without .get_parent, first check to see if the
1977 * .parents array exists, and if so use it to avoid an expensive tree
1978 * traversal. If .parents does not exist then walk the tree.
1980 static struct clk_core
*__clk_init_parent(struct clk_core
*clk
)
1982 struct clk_core
*ret
= NULL
;
1985 /* handle the trivial cases */
1987 if (!clk
->num_parents
)
1990 if (clk
->num_parents
== 1) {
1991 if (IS_ERR_OR_NULL(clk
->parent
))
1992 clk
->parent
= clk_core_lookup(clk
->parent_names
[0]);
1997 if (!clk
->ops
->get_parent
) {
1998 WARN(!clk
->ops
->get_parent
,
1999 "%s: multi-parent clocks must implement .get_parent\n",
2005 * Do our best to cache parent clocks in clk->parents. This prevents
2006 * unnecessary and expensive lookups. We don't set clk->parent here;
2007 * that is done by the calling function.
2010 index
= clk
->ops
->get_parent(clk
->hw
);
2014 kcalloc(clk
->num_parents
, sizeof(struct clk
*),
2017 ret
= clk_core_get_parent_by_index(clk
, index
);
2023 static void clk_core_reparent(struct clk_core
*clk
,
2024 struct clk_core
*new_parent
)
2026 clk_reparent(clk
, new_parent
);
2027 __clk_recalc_accuracies(clk
);
2028 __clk_recalc_rates(clk
, POST_RATE_CHANGE
);
2032 * clk_has_parent - check if a clock is a possible parent for another
2033 * @clk: clock source
2034 * @parent: parent clock source
2036 * This function can be used in drivers that need to check that a clock can be
2037 * the parent of another without actually changing the parent.
2039 * Returns true if @parent is a possible parent for @clk, false otherwise.
2041 bool clk_has_parent(struct clk
*clk
, struct clk
*parent
)
2043 struct clk_core
*core
, *parent_core
;
2046 /* NULL clocks should be nops, so return success if either is NULL. */
2047 if (!clk
|| !parent
)
2051 parent_core
= parent
->core
;
2053 /* Optimize for the case where the parent is already the parent. */
2054 if (core
->parent
== parent_core
)
2057 for (i
= 0; i
< core
->num_parents
; i
++)
2058 if (strcmp(core
->parent_names
[i
], parent_core
->name
) == 0)
2063 EXPORT_SYMBOL_GPL(clk_has_parent
);
2065 static int clk_core_set_parent(struct clk_core
*clk
, struct clk_core
*parent
)
2069 unsigned long p_rate
= 0;
2074 /* prevent racing with updates to the clock topology */
2077 if (clk
->parent
== parent
)
2080 /* verify ops for for multi-parent clks */
2081 if ((clk
->num_parents
> 1) && (!clk
->ops
->set_parent
)) {
2086 /* check that we are allowed to re-parent if the clock is in use */
2087 if ((clk
->flags
& CLK_SET_PARENT_GATE
) && clk
->prepare_count
) {
2092 /* try finding the new parent index */
2094 p_index
= clk_fetch_parent_index(clk
, parent
);
2095 p_rate
= parent
->rate
;
2097 pr_debug("%s: clk %s can not be parent of clk %s\n",
2098 __func__
, parent
->name
, clk
->name
);
2104 /* propagate PRE_RATE_CHANGE notifications */
2105 ret
= __clk_speculate_rates(clk
, p_rate
);
2107 /* abort if a driver objects */
2108 if (ret
& NOTIFY_STOP_MASK
)
2111 /* do the re-parent */
2112 ret
= __clk_set_parent(clk
, parent
, p_index
);
2114 /* propagate rate an accuracy recalculation accordingly */
2116 __clk_recalc_rates(clk
, ABORT_RATE_CHANGE
);
2118 __clk_recalc_rates(clk
, POST_RATE_CHANGE
);
2119 __clk_recalc_accuracies(clk
);
2123 clk_prepare_unlock();
2129 * clk_set_parent - switch the parent of a mux clk
2130 * @clk: the mux clk whose input we are switching
2131 * @parent: the new input to clk
2133 * Re-parent clk to use parent as its new input source. If clk is in
2134 * prepared state, the clk will get enabled for the duration of this call. If
2135 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2136 * that, the reparenting is glitchy in hardware, etc), use the
2137 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2139 * After successfully changing clk's parent clk_set_parent will update the
2140 * clk topology, sysfs topology and propagate rate recalculation via
2141 * __clk_recalc_rates.
2143 * Returns 0 on success, -EERROR otherwise.
2145 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
2150 return clk_core_set_parent(clk
->core
, parent
? parent
->core
: NULL
);
2152 EXPORT_SYMBOL_GPL(clk_set_parent
);
2155 * clk_set_phase - adjust the phase shift of a clock signal
2156 * @clk: clock signal source
2157 * @degrees: number of degrees the signal is shifted
2159 * Shifts the phase of a clock signal by the specified
2160 * degrees. Returns 0 on success, -EERROR otherwise.
2162 * This function makes no distinction about the input or reference
2163 * signal that we adjust the clock signal phase against. For example
2164 * phase locked-loop clock signal generators we may shift phase with
2165 * respect to feedback clock signal input, but for other cases the
2166 * clock phase may be shifted with respect to some other, unspecified
2169 * Additionally the concept of phase shift does not propagate through
2170 * the clock tree hierarchy, which sets it apart from clock rates and
2171 * clock accuracy. A parent clock phase attribute does not have an
2172 * impact on the phase attribute of a child clock.
2174 int clk_set_phase(struct clk
*clk
, int degrees
)
2181 /* sanity check degrees */
2188 trace_clk_set_phase(clk
->core
, degrees
);
2190 if (clk
->core
->ops
->set_phase
)
2191 ret
= clk
->core
->ops
->set_phase(clk
->core
->hw
, degrees
);
2193 trace_clk_set_phase_complete(clk
->core
, degrees
);
2196 clk
->core
->phase
= degrees
;
2198 clk_prepare_unlock();
2202 EXPORT_SYMBOL_GPL(clk_set_phase
);
2204 static int clk_core_get_phase(struct clk_core
*clk
)
2213 clk_prepare_unlock();
2218 EXPORT_SYMBOL_GPL(clk_get_phase
);
2221 * clk_get_phase - return the phase shift of a clock signal
2222 * @clk: clock signal source
2224 * Returns the phase shift of a clock node in degrees, otherwise returns
2227 int clk_get_phase(struct clk
*clk
)
2232 return clk_core_get_phase(clk
->core
);
2236 * clk_is_match - check if two clk's point to the same hardware clock
2237 * @p: clk compared against q
2238 * @q: clk compared against p
2240 * Returns true if the two struct clk pointers both point to the same hardware
2241 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2242 * share the same struct clk_core object.
2244 * Returns false otherwise. Note that two NULL clks are treated as matching.
2246 bool clk_is_match(const struct clk
*p
, const struct clk
*q
)
2248 /* trivial case: identical struct clk's or both NULL */
2252 /* true if clk->core pointers match. Avoid derefing garbage */
2253 if (!IS_ERR_OR_NULL(p
) && !IS_ERR_OR_NULL(q
))
2254 if (p
->core
== q
->core
)
2259 EXPORT_SYMBOL_GPL(clk_is_match
);
2262 * __clk_init - initialize the data structures in a struct clk
2263 * @dev: device initializing this clk, placeholder for now
2264 * @clk: clk being initialized
2266 * Initializes the lists in struct clk_core, queries the hardware for the
2267 * parent and rate and sets them both.
2269 static int __clk_init(struct device
*dev
, struct clk
*clk_user
)
2272 struct clk_core
*orphan
;
2273 struct hlist_node
*tmp2
;
2274 struct clk_core
*clk
;
2280 clk
= clk_user
->core
;
2284 /* check to see if a clock with this name is already registered */
2285 if (clk_core_lookup(clk
->name
)) {
2286 pr_debug("%s: clk %s already initialized\n",
2287 __func__
, clk
->name
);
2292 /* check that clk_ops are sane. See Documentation/clk.txt */
2293 if (clk
->ops
->set_rate
&&
2294 !((clk
->ops
->round_rate
|| clk
->ops
->determine_rate
) &&
2295 clk
->ops
->recalc_rate
)) {
2296 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2297 __func__
, clk
->name
);
2302 if (clk
->ops
->set_parent
&& !clk
->ops
->get_parent
) {
2303 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
2304 __func__
, clk
->name
);
2309 if (clk
->ops
->set_rate_and_parent
&&
2310 !(clk
->ops
->set_parent
&& clk
->ops
->set_rate
)) {
2311 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
2312 __func__
, clk
->name
);
2317 /* throw a WARN if any entries in parent_names are NULL */
2318 for (i
= 0; i
< clk
->num_parents
; i
++)
2319 WARN(!clk
->parent_names
[i
],
2320 "%s: invalid NULL in %s's .parent_names\n",
2321 __func__
, clk
->name
);
2324 * Allocate an array of struct clk *'s to avoid unnecessary string
2325 * look-ups of clk's possible parents. This can fail for clocks passed
2326 * in to clk_init during early boot; thus any access to clk->parents[]
2327 * must always check for a NULL pointer and try to populate it if
2330 * If clk->parents is not NULL we skip this entire block. This allows
2331 * for clock drivers to statically initialize clk->parents.
2333 if (clk
->num_parents
> 1 && !clk
->parents
) {
2334 clk
->parents
= kcalloc(clk
->num_parents
, sizeof(struct clk
*),
2337 * clk_core_lookup returns NULL for parents that have not been
2338 * clk_init'd; thus any access to clk->parents[] must check
2339 * for a NULL pointer. We can always perform lazy lookups for
2340 * missing parents later on.
2343 for (i
= 0; i
< clk
->num_parents
; i
++)
2345 clk_core_lookup(clk
->parent_names
[i
]);
2348 clk
->parent
= __clk_init_parent(clk
);
2351 * Populate clk->parent if parent has already been __clk_init'd. If
2352 * parent has not yet been __clk_init'd then place clk in the orphan
2353 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
2356 * Every time a new clk is clk_init'd then we walk the list of orphan
2357 * clocks and re-parent any that are children of the clock currently
2361 hlist_add_head(&clk
->child_node
,
2362 &clk
->parent
->children
);
2363 else if (clk
->flags
& CLK_IS_ROOT
)
2364 hlist_add_head(&clk
->child_node
, &clk_root_list
);
2366 hlist_add_head(&clk
->child_node
, &clk_orphan_list
);
2369 * Set clk's accuracy. The preferred method is to use
2370 * .recalc_accuracy. For simple clocks and lazy developers the default
2371 * fallback is to use the parent's accuracy. If a clock doesn't have a
2372 * parent (or is orphaned) then accuracy is set to zero (perfect
2375 if (clk
->ops
->recalc_accuracy
)
2376 clk
->accuracy
= clk
->ops
->recalc_accuracy(clk
->hw
,
2377 __clk_get_accuracy(clk
->parent
));
2378 else if (clk
->parent
)
2379 clk
->accuracy
= clk
->parent
->accuracy
;
2385 * Since a phase is by definition relative to its parent, just
2386 * query the current clock phase, or just assume it's in phase.
2388 if (clk
->ops
->get_phase
)
2389 clk
->phase
= clk
->ops
->get_phase(clk
->hw
);
2394 * Set clk's rate. The preferred method is to use .recalc_rate. For
2395 * simple clocks and lazy developers the default fallback is to use the
2396 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2397 * then rate is set to zero.
2399 if (clk
->ops
->recalc_rate
)
2400 rate
= clk
->ops
->recalc_rate(clk
->hw
,
2401 clk_core_get_rate_nolock(clk
->parent
));
2402 else if (clk
->parent
)
2403 rate
= clk
->parent
->rate
;
2406 clk
->rate
= clk
->req_rate
= rate
;
2409 * walk the list of orphan clocks and reparent any that are children of
2412 hlist_for_each_entry_safe(orphan
, tmp2
, &clk_orphan_list
, child_node
) {
2413 if (orphan
->num_parents
&& orphan
->ops
->get_parent
) {
2414 i
= orphan
->ops
->get_parent(orphan
->hw
);
2415 if (!strcmp(clk
->name
, orphan
->parent_names
[i
]))
2416 clk_core_reparent(orphan
, clk
);
2420 for (i
= 0; i
< orphan
->num_parents
; i
++)
2421 if (!strcmp(clk
->name
, orphan
->parent_names
[i
])) {
2422 clk_core_reparent(orphan
, clk
);
2428 * optional platform-specific magic
2430 * The .init callback is not used by any of the basic clock types, but
2431 * exists for weird hardware that must perform initialization magic.
2432 * Please consider other ways of solving initialization problems before
2433 * using this callback, as its use is discouraged.
2436 clk
->ops
->init(clk
->hw
);
2438 kref_init(&clk
->ref
);
2440 clk_prepare_unlock();
2443 clk_debug_register(clk
);
2448 struct clk
*__clk_create_clk(struct clk_hw
*hw
, const char *dev_id
,
2453 /* This is to allow this function to be chained to others */
2454 if (!hw
|| IS_ERR(hw
))
2455 return (struct clk
*) hw
;
2457 clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
);
2459 return ERR_PTR(-ENOMEM
);
2461 clk
->core
= hw
->core
;
2462 clk
->dev_id
= dev_id
;
2463 clk
->con_id
= con_id
;
2464 clk
->max_rate
= ULONG_MAX
;
2467 hlist_add_head(&clk
->clks_node
, &hw
->core
->clks
);
2468 clk_prepare_unlock();
2473 void __clk_free_clk(struct clk
*clk
)
2476 hlist_del(&clk
->clks_node
);
2477 clk_prepare_unlock();
2483 * clk_register - allocate a new clock, register it and return an opaque cookie
2484 * @dev: device that is registering this clock
2485 * @hw: link to hardware-specific clock data
2487 * clk_register is the primary interface for populating the clock tree with new
2488 * clock nodes. It returns a pointer to the newly allocated struct clk which
2489 * cannot be dereferenced by driver code but may be used in conjuction with the
2490 * rest of the clock API. In the event of an error clk_register will return an
2491 * error code; drivers must test for an error code after calling clk_register.
2493 struct clk
*clk_register(struct device
*dev
, struct clk_hw
*hw
)
2496 struct clk_core
*clk
;
2498 clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
);
2500 pr_err("%s: could not allocate clk\n", __func__
);
2505 clk
->name
= kstrdup_const(hw
->init
->name
, GFP_KERNEL
);
2507 pr_err("%s: could not allocate clk->name\n", __func__
);
2511 clk
->ops
= hw
->init
->ops
;
2512 if (dev
&& dev
->driver
)
2513 clk
->owner
= dev
->driver
->owner
;
2515 clk
->flags
= hw
->init
->flags
;
2516 clk
->num_parents
= hw
->init
->num_parents
;
2519 /* allocate local copy in case parent_names is __initdata */
2520 clk
->parent_names
= kcalloc(clk
->num_parents
, sizeof(char *),
2523 if (!clk
->parent_names
) {
2524 pr_err("%s: could not allocate clk->parent_names\n", __func__
);
2526 goto fail_parent_names
;
2530 /* copy each string name in case parent_names is __initdata */
2531 for (i
= 0; i
< clk
->num_parents
; i
++) {
2532 clk
->parent_names
[i
] = kstrdup_const(hw
->init
->parent_names
[i
],
2534 if (!clk
->parent_names
[i
]) {
2535 pr_err("%s: could not copy parent_names\n", __func__
);
2537 goto fail_parent_names_copy
;
2541 INIT_HLIST_HEAD(&clk
->clks
);
2543 hw
->clk
= __clk_create_clk(hw
, NULL
, NULL
);
2544 if (IS_ERR(hw
->clk
)) {
2545 pr_err("%s: could not allocate per-user clk\n", __func__
);
2546 ret
= PTR_ERR(hw
->clk
);
2547 goto fail_parent_names_copy
;
2550 ret
= __clk_init(dev
, hw
->clk
);
2554 __clk_free_clk(hw
->clk
);
2557 fail_parent_names_copy
:
2559 kfree_const(clk
->parent_names
[i
]);
2560 kfree(clk
->parent_names
);
2562 kfree_const(clk
->name
);
2566 return ERR_PTR(ret
);
2568 EXPORT_SYMBOL_GPL(clk_register
);
2571 * Free memory allocated for a clock.
2572 * Caller must hold prepare_lock.
2574 static void __clk_release(struct kref
*ref
)
2576 struct clk_core
*clk
= container_of(ref
, struct clk_core
, ref
);
2577 int i
= clk
->num_parents
;
2579 lockdep_assert_held(&prepare_lock
);
2581 kfree(clk
->parents
);
2583 kfree_const(clk
->parent_names
[i
]);
2585 kfree(clk
->parent_names
);
2586 kfree_const(clk
->name
);
2591 * Empty clk_ops for unregistered clocks. These are used temporarily
2592 * after clk_unregister() was called on a clock and until last clock
2593 * consumer calls clk_put() and the struct clk object is freed.
2595 static int clk_nodrv_prepare_enable(struct clk_hw
*hw
)
2600 static void clk_nodrv_disable_unprepare(struct clk_hw
*hw
)
2605 static int clk_nodrv_set_rate(struct clk_hw
*hw
, unsigned long rate
,
2606 unsigned long parent_rate
)
2611 static int clk_nodrv_set_parent(struct clk_hw
*hw
, u8 index
)
2616 static const struct clk_ops clk_nodrv_ops
= {
2617 .enable
= clk_nodrv_prepare_enable
,
2618 .disable
= clk_nodrv_disable_unprepare
,
2619 .prepare
= clk_nodrv_prepare_enable
,
2620 .unprepare
= clk_nodrv_disable_unprepare
,
2621 .set_rate
= clk_nodrv_set_rate
,
2622 .set_parent
= clk_nodrv_set_parent
,
2626 * clk_unregister - unregister a currently registered clock
2627 * @clk: clock to unregister
2629 void clk_unregister(struct clk
*clk
)
2631 unsigned long flags
;
2633 if (!clk
|| WARN_ON_ONCE(IS_ERR(clk
)))
2636 clk_debug_unregister(clk
->core
);
2640 if (clk
->core
->ops
== &clk_nodrv_ops
) {
2641 pr_err("%s: unregistered clock: %s\n", __func__
,
2646 * Assign empty clock ops for consumers that might still hold
2647 * a reference to this clock.
2649 flags
= clk_enable_lock();
2650 clk
->core
->ops
= &clk_nodrv_ops
;
2651 clk_enable_unlock(flags
);
2653 if (!hlist_empty(&clk
->core
->children
)) {
2654 struct clk_core
*child
;
2655 struct hlist_node
*t
;
2657 /* Reparent all children to the orphan list. */
2658 hlist_for_each_entry_safe(child
, t
, &clk
->core
->children
,
2660 clk_core_set_parent(child
, NULL
);
2663 hlist_del_init(&clk
->core
->child_node
);
2665 if (clk
->core
->prepare_count
)
2666 pr_warn("%s: unregistering prepared clock: %s\n",
2667 __func__
, clk
->core
->name
);
2668 kref_put(&clk
->core
->ref
, __clk_release
);
2670 clk_prepare_unlock();
2672 EXPORT_SYMBOL_GPL(clk_unregister
);
2674 static void devm_clk_release(struct device
*dev
, void *res
)
2676 clk_unregister(*(struct clk
**)res
);
2680 * devm_clk_register - resource managed clk_register()
2681 * @dev: device that is registering this clock
2682 * @hw: link to hardware-specific clock data
2684 * Managed clk_register(). Clocks returned from this function are
2685 * automatically clk_unregister()ed on driver detach. See clk_register() for
2688 struct clk
*devm_clk_register(struct device
*dev
, struct clk_hw
*hw
)
2693 clkp
= devres_alloc(devm_clk_release
, sizeof(*clkp
), GFP_KERNEL
);
2695 return ERR_PTR(-ENOMEM
);
2697 clk
= clk_register(dev
, hw
);
2700 devres_add(dev
, clkp
);
2707 EXPORT_SYMBOL_GPL(devm_clk_register
);
2709 static int devm_clk_match(struct device
*dev
, void *res
, void *data
)
2711 struct clk
*c
= res
;
2718 * devm_clk_unregister - resource managed clk_unregister()
2719 * @clk: clock to unregister
2721 * Deallocate a clock allocated with devm_clk_register(). Normally
2722 * this function will not need to be called and the resource management
2723 * code will ensure that the resource is freed.
2725 void devm_clk_unregister(struct device
*dev
, struct clk
*clk
)
2727 WARN_ON(devres_release(dev
, devm_clk_release
, devm_clk_match
, clk
));
2729 EXPORT_SYMBOL_GPL(devm_clk_unregister
);
2734 int __clk_get(struct clk
*clk
)
2736 struct clk_core
*core
= !clk
? NULL
: clk
->core
;
2739 if (!try_module_get(core
->owner
))
2742 kref_get(&core
->ref
);
2747 void __clk_put(struct clk
*clk
)
2749 struct module
*owner
;
2751 if (!clk
|| WARN_ON_ONCE(IS_ERR(clk
)))
2756 hlist_del(&clk
->clks_node
);
2757 if (clk
->min_rate
> clk
->core
->req_rate
||
2758 clk
->max_rate
< clk
->core
->req_rate
)
2759 clk_core_set_rate_nolock(clk
->core
, clk
->core
->req_rate
);
2761 owner
= clk
->core
->owner
;
2762 kref_put(&clk
->core
->ref
, __clk_release
);
2764 clk_prepare_unlock();
2771 /*** clk rate change notifiers ***/
2774 * clk_notifier_register - add a clk rate change notifier
2775 * @clk: struct clk * to watch
2776 * @nb: struct notifier_block * with callback info
2778 * Request notification when clk's rate changes. This uses an SRCU
2779 * notifier because we want it to block and notifier unregistrations are
2780 * uncommon. The callbacks associated with the notifier must not
2781 * re-enter into the clk framework by calling any top-level clk APIs;
2782 * this will cause a nested prepare_lock mutex.
2784 * In all notification cases cases (pre, post and abort rate change) the
2785 * original clock rate is passed to the callback via struct
2786 * clk_notifier_data.old_rate and the new frequency is passed via struct
2787 * clk_notifier_data.new_rate.
2789 * clk_notifier_register() must be called from non-atomic context.
2790 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2791 * allocation failure; otherwise, passes along the return value of
2792 * srcu_notifier_chain_register().
2794 int clk_notifier_register(struct clk
*clk
, struct notifier_block
*nb
)
2796 struct clk_notifier
*cn
;
2804 /* search the list of notifiers for this clk */
2805 list_for_each_entry(cn
, &clk_notifier_list
, node
)
2809 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2810 if (cn
->clk
!= clk
) {
2811 cn
= kzalloc(sizeof(struct clk_notifier
), GFP_KERNEL
);
2816 srcu_init_notifier_head(&cn
->notifier_head
);
2818 list_add(&cn
->node
, &clk_notifier_list
);
2821 ret
= srcu_notifier_chain_register(&cn
->notifier_head
, nb
);
2823 clk
->core
->notifier_count
++;
2826 clk_prepare_unlock();
2830 EXPORT_SYMBOL_GPL(clk_notifier_register
);
2833 * clk_notifier_unregister - remove a clk rate change notifier
2834 * @clk: struct clk *
2835 * @nb: struct notifier_block * with callback info
2837 * Request no further notification for changes to 'clk' and frees memory
2838 * allocated in clk_notifier_register.
2840 * Returns -EINVAL if called with null arguments; otherwise, passes
2841 * along the return value of srcu_notifier_chain_unregister().
2843 int clk_notifier_unregister(struct clk
*clk
, struct notifier_block
*nb
)
2845 struct clk_notifier
*cn
= NULL
;
2853 list_for_each_entry(cn
, &clk_notifier_list
, node
)
2857 if (cn
->clk
== clk
) {
2858 ret
= srcu_notifier_chain_unregister(&cn
->notifier_head
, nb
);
2860 clk
->core
->notifier_count
--;
2862 /* XXX the notifier code should handle this better */
2863 if (!cn
->notifier_head
.head
) {
2864 srcu_cleanup_notifier_head(&cn
->notifier_head
);
2865 list_del(&cn
->node
);
2873 clk_prepare_unlock();
2877 EXPORT_SYMBOL_GPL(clk_notifier_unregister
);
2881 * struct of_clk_provider - Clock provider registration structure
2882 * @link: Entry in global list of clock providers
2883 * @node: Pointer to device tree node of clock provider
2884 * @get: Get clock callback. Returns NULL or a struct clk for the
2885 * given clock specifier
2886 * @data: context pointer to be passed into @get callback
2888 struct of_clk_provider
{
2889 struct list_head link
;
2891 struct device_node
*node
;
2892 struct clk
*(*get
)(struct of_phandle_args
*clkspec
, void *data
);
2896 static const struct of_device_id __clk_of_table_sentinel
2897 __used
__section(__clk_of_table_end
);
2899 static LIST_HEAD(of_clk_providers
);
2900 static DEFINE_MUTEX(of_clk_mutex
);
2902 struct clk
*of_clk_src_simple_get(struct of_phandle_args
*clkspec
,
2907 EXPORT_SYMBOL_GPL(of_clk_src_simple_get
);
2909 struct clk
*of_clk_src_onecell_get(struct of_phandle_args
*clkspec
, void *data
)
2911 struct clk_onecell_data
*clk_data
= data
;
2912 unsigned int idx
= clkspec
->args
[0];
2914 if (idx
>= clk_data
->clk_num
) {
2915 pr_err("%s: invalid clock index %d\n", __func__
, idx
);
2916 return ERR_PTR(-EINVAL
);
2919 return clk_data
->clks
[idx
];
2921 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get
);
2924 * of_clk_add_provider() - Register a clock provider for a node
2925 * @np: Device node pointer associated with clock provider
2926 * @clk_src_get: callback for decoding clock
2927 * @data: context pointer for @clk_src_get callback.
2929 int of_clk_add_provider(struct device_node
*np
,
2930 struct clk
*(*clk_src_get
)(struct of_phandle_args
*clkspec
,
2934 struct of_clk_provider
*cp
;
2937 cp
= kzalloc(sizeof(struct of_clk_provider
), GFP_KERNEL
);
2941 cp
->node
= of_node_get(np
);
2943 cp
->get
= clk_src_get
;
2945 mutex_lock(&of_clk_mutex
);
2946 list_add(&cp
->link
, &of_clk_providers
);
2947 mutex_unlock(&of_clk_mutex
);
2948 pr_debug("Added clock from %s\n", np
->full_name
);
2950 ret
= of_clk_set_defaults(np
, true);
2952 of_clk_del_provider(np
);
2956 EXPORT_SYMBOL_GPL(of_clk_add_provider
);
2959 * of_clk_del_provider() - Remove a previously registered clock provider
2960 * @np: Device node pointer associated with clock provider
2962 void of_clk_del_provider(struct device_node
*np
)
2964 struct of_clk_provider
*cp
;
2966 mutex_lock(&of_clk_mutex
);
2967 list_for_each_entry(cp
, &of_clk_providers
, link
) {
2968 if (cp
->node
== np
) {
2969 list_del(&cp
->link
);
2970 of_node_put(cp
->node
);
2975 mutex_unlock(&of_clk_mutex
);
2977 EXPORT_SYMBOL_GPL(of_clk_del_provider
);
2979 struct clk
*__of_clk_get_from_provider(struct of_phandle_args
*clkspec
,
2980 const char *dev_id
, const char *con_id
)
2982 struct of_clk_provider
*provider
;
2983 struct clk
*clk
= ERR_PTR(-EPROBE_DEFER
);
2986 return ERR_PTR(-EINVAL
);
2988 /* Check if we have such a provider in our array */
2989 mutex_lock(&of_clk_mutex
);
2990 list_for_each_entry(provider
, &of_clk_providers
, link
) {
2991 if (provider
->node
== clkspec
->np
)
2992 clk
= provider
->get(clkspec
, provider
->data
);
2994 clk
= __clk_create_clk(__clk_get_hw(clk
), dev_id
,
2997 if (!IS_ERR(clk
) && !__clk_get(clk
)) {
2998 __clk_free_clk(clk
);
2999 clk
= ERR_PTR(-ENOENT
);
3005 mutex_unlock(&of_clk_mutex
);
3011 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3012 * @clkspec: pointer to a clock specifier data structure
3014 * This function looks up a struct clk from the registered list of clock
3015 * providers, an input is a clock specifier data structure as returned
3016 * from the of_parse_phandle_with_args() function call.
3018 struct clk
*of_clk_get_from_provider(struct of_phandle_args
*clkspec
)
3020 return __of_clk_get_from_provider(clkspec
, NULL
, __func__
);
3023 int of_clk_get_parent_count(struct device_node
*np
)
3025 return of_count_phandle_with_args(np
, "clocks", "#clock-cells");
3027 EXPORT_SYMBOL_GPL(of_clk_get_parent_count
);
3029 const char *of_clk_get_parent_name(struct device_node
*np
, int index
)
3031 struct of_phandle_args clkspec
;
3032 struct property
*prop
;
3033 const char *clk_name
;
3042 rc
= of_parse_phandle_with_args(np
, "clocks", "#clock-cells", index
,
3047 index
= clkspec
.args_count
? clkspec
.args
[0] : 0;
3050 /* if there is an indices property, use it to transfer the index
3051 * specified into an array offset for the clock-output-names property.
3053 of_property_for_each_u32(clkspec
.np
, "clock-indices", prop
, vp
, pv
) {
3061 if (of_property_read_string_index(clkspec
.np
, "clock-output-names",
3064 clk_name
= clkspec
.np
->name
;
3066 of_node_put(clkspec
.np
);
3069 EXPORT_SYMBOL_GPL(of_clk_get_parent_name
);
3071 struct clock_provider
{
3072 of_clk_init_cb_t clk_init_cb
;
3073 struct device_node
*np
;
3074 struct list_head node
;
3077 static LIST_HEAD(clk_provider_list
);
3080 * This function looks for a parent clock. If there is one, then it
3081 * checks that the provider for this parent clock was initialized, in
3082 * this case the parent clock will be ready.
3084 static int parent_ready(struct device_node
*np
)
3089 struct clk
*clk
= of_clk_get(np
, i
);
3091 /* this parent is ready we can check the next one */
3098 /* at least one parent is not ready, we exit now */
3099 if (PTR_ERR(clk
) == -EPROBE_DEFER
)
3103 * Here we make assumption that the device tree is
3104 * written correctly. So an error means that there is
3105 * no more parent. As we didn't exit yet, then the
3106 * previous parent are ready. If there is no clock
3107 * parent, no need to wait for them, then we can
3108 * consider their absence as being ready
3115 * of_clk_init() - Scan and init clock providers from the DT
3116 * @matches: array of compatible values and init functions for providers.
3118 * This function scans the device tree for matching clock providers
3119 * and calls their initialization functions. It also does it by trying
3120 * to follow the dependencies.
3122 void __init
of_clk_init(const struct of_device_id
*matches
)
3124 const struct of_device_id
*match
;
3125 struct device_node
*np
;
3126 struct clock_provider
*clk_provider
, *next
;
3131 matches
= &__clk_of_table
;
3133 /* First prepare the list of the clocks providers */
3134 for_each_matching_node_and_match(np
, matches
, &match
) {
3135 struct clock_provider
*parent
=
3136 kzalloc(sizeof(struct clock_provider
), GFP_KERNEL
);
3138 parent
->clk_init_cb
= match
->data
;
3140 list_add_tail(&parent
->node
, &clk_provider_list
);
3143 while (!list_empty(&clk_provider_list
)) {
3144 is_init_done
= false;
3145 list_for_each_entry_safe(clk_provider
, next
,
3146 &clk_provider_list
, node
) {
3147 if (force
|| parent_ready(clk_provider
->np
)) {
3149 clk_provider
->clk_init_cb(clk_provider
->np
);
3150 of_clk_set_defaults(clk_provider
->np
, true);
3152 list_del(&clk_provider
->node
);
3153 kfree(clk_provider
);
3154 is_init_done
= true;
3159 * We didn't manage to initialize any of the
3160 * remaining providers during the last loop, so now we
3161 * initialize all the remaining ones unconditionally
3162 * in case the clock parent was not mandatory