]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/clk/clk-bulk.c
mlxsw: spectrum: Change stage of ACL initialization
[mirror_ubuntu-eoan-kernel.git] / drivers / clk / clk-bulk.c
CommitLineData
266e4e9d
DA
1/*
2 * Copyright 2017 NXP
3 *
4 * Dong Aisheng <aisheng.dong@nxp.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
616e45df 20#include <linux/clk-provider.h>
266e4e9d
DA
21#include <linux/device.h>
22#include <linux/export.h>
cfdc0411 23#include <linux/of.h>
616e45df 24#include <linux/slab.h>
cfdc0411
DA
25
26static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
27 struct clk_bulk_data *clks)
28{
29 int ret;
30 int i;
31
32 for (i = 0; i < num_clks; i++)
33 clks[i].clk = NULL;
34
35 for (i = 0; i < num_clks; i++) {
36 clks[i].clk = of_clk_get(np, i);
37 if (IS_ERR(clks[i].clk)) {
38 ret = PTR_ERR(clks[i].clk);
39 pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
40 np, i, ret);
41 clks[i].clk = NULL;
42 goto err;
43 }
44 }
45
46 return 0;
47
48err:
49 clk_bulk_put(i, clks);
50
51 return ret;
52}
266e4e9d 53
616e45df
DA
54static int __must_check of_clk_bulk_get_all(struct device_node *np,
55 struct clk_bulk_data **clks)
56{
57 struct clk_bulk_data *clk_bulk;
58 int num_clks;
59 int ret;
60
61 num_clks = of_clk_get_parent_count(np);
62 if (!num_clks)
63 return 0;
64
65 clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
66 if (!clk_bulk)
67 return -ENOMEM;
68
69 ret = of_clk_bulk_get(np, num_clks, clk_bulk);
70 if (ret) {
71 kfree(clk_bulk);
72 return ret;
73 }
74
75 *clks = clk_bulk;
76
77 return num_clks;
78}
79
266e4e9d
DA
80void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
81{
82 while (--num_clks >= 0) {
83 clk_put(clks[num_clks].clk);
84 clks[num_clks].clk = NULL;
85 }
86}
87EXPORT_SYMBOL_GPL(clk_bulk_put);
88
89int __must_check clk_bulk_get(struct device *dev, int num_clks,
90 struct clk_bulk_data *clks)
91{
92 int ret;
93 int i;
94
95 for (i = 0; i < num_clks; i++)
96 clks[i].clk = NULL;
97
98 for (i = 0; i < num_clks; i++) {
99 clks[i].clk = clk_get(dev, clks[i].id);
100 if (IS_ERR(clks[i].clk)) {
101 ret = PTR_ERR(clks[i].clk);
329470f2
JB
102 if (ret != -EPROBE_DEFER)
103 dev_err(dev, "Failed to get clk '%s': %d\n",
104 clks[i].id, ret);
266e4e9d
DA
105 clks[i].clk = NULL;
106 goto err;
107 }
108 }
109
110 return 0;
111
112err:
113 clk_bulk_put(i, clks);
114
115 return ret;
116}
117EXPORT_SYMBOL(clk_bulk_get);
118
616e45df
DA
119void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
120{
121 if (IS_ERR_OR_NULL(clks))
122 return;
123
124 clk_bulk_put(num_clks, clks);
125
126 kfree(clks);
127}
128EXPORT_SYMBOL(clk_bulk_put_all);
129
130int __must_check clk_bulk_get_all(struct device *dev,
131 struct clk_bulk_data **clks)
132{
133 struct device_node *np = dev_of_node(dev);
134
135 if (!np)
136 return 0;
137
138 return of_clk_bulk_get_all(np, clks);
139}
140EXPORT_SYMBOL(clk_bulk_get_all);
141
266e4e9d
DA
142#ifdef CONFIG_HAVE_CLK_PREPARE
143
144/**
145 * clk_bulk_unprepare - undo preparation of a set of clock sources
146 * @num_clks: the number of clk_bulk_data
147 * @clks: the clk_bulk_data table being unprepared
148 *
149 * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
150 * Returns 0 on success, -EERROR otherwise.
151 */
152void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
153{
154 while (--num_clks >= 0)
155 clk_unprepare(clks[num_clks].clk);
156}
157EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
158
159/**
160 * clk_bulk_prepare - prepare a set of clocks
161 * @num_clks: the number of clk_bulk_data
162 * @clks: the clk_bulk_data table being prepared
163 *
164 * clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
165 * Returns 0 on success, -EERROR otherwise.
166 */
167int __must_check clk_bulk_prepare(int num_clks,
168 const struct clk_bulk_data *clks)
169{
170 int ret;
171 int i;
172
173 for (i = 0; i < num_clks; i++) {
174 ret = clk_prepare(clks[i].clk);
175 if (ret) {
176 pr_err("Failed to prepare clk '%s': %d\n",
177 clks[i].id, ret);
178 goto err;
179 }
180 }
181
182 return 0;
183
184err:
185 clk_bulk_unprepare(i, clks);
186
187 return ret;
188}
9792bf5a 189EXPORT_SYMBOL_GPL(clk_bulk_prepare);
266e4e9d
DA
190
191#endif /* CONFIG_HAVE_CLK_PREPARE */
192
193/**
194 * clk_bulk_disable - gate a set of clocks
195 * @num_clks: the number of clk_bulk_data
196 * @clks: the clk_bulk_data table being gated
197 *
198 * clk_bulk_disable must not sleep, which differentiates it from
199 * clk_bulk_unprepare. clk_bulk_disable must be called before
200 * clk_bulk_unprepare.
201 */
202void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
203{
204
205 while (--num_clks >= 0)
206 clk_disable(clks[num_clks].clk);
207}
208EXPORT_SYMBOL_GPL(clk_bulk_disable);
209
210/**
211 * clk_bulk_enable - ungate a set of clocks
212 * @num_clks: the number of clk_bulk_data
213 * @clks: the clk_bulk_data table being ungated
214 *
215 * clk_bulk_enable must not sleep
216 * Returns 0 on success, -EERROR otherwise.
217 */
218int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
219{
220 int ret;
221 int i;
222
223 for (i = 0; i < num_clks; i++) {
224 ret = clk_enable(clks[i].clk);
225 if (ret) {
226 pr_err("Failed to enable clk '%s': %d\n",
227 clks[i].id, ret);
228 goto err;
229 }
230 }
231
232 return 0;
233
234err:
235 clk_bulk_disable(i, clks);
236
237 return ret;
238}
239EXPORT_SYMBOL_GPL(clk_bulk_enable);