]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/firmware/arm_scmi/clock.c
firmware: arm_scmi: Fix sorting of retrieved clock rates
[mirror_ubuntu-jammy-kernel.git] / drivers / firmware / arm_scmi / clock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Clock Protocol
4 *
5 * Copyright (C) 2018-2021 ARM Ltd.
6 */
7
8 #include <linux/module.h>
9 #include <linux/sort.h>
10
11 #include "common.h"
12
13 enum scmi_clock_protocol_cmd {
14 CLOCK_ATTRIBUTES = 0x3,
15 CLOCK_DESCRIBE_RATES = 0x4,
16 CLOCK_RATE_SET = 0x5,
17 CLOCK_RATE_GET = 0x6,
18 CLOCK_CONFIG_SET = 0x7,
19 };
20
21 struct scmi_msg_resp_clock_protocol_attributes {
22 __le16 num_clocks;
23 u8 max_async_req;
24 u8 reserved;
25 };
26
27 struct scmi_msg_resp_clock_attributes {
28 __le32 attributes;
29 #define CLOCK_ENABLE BIT(0)
30 u8 name[SCMI_MAX_STR_SIZE];
31 };
32
33 struct scmi_clock_set_config {
34 __le32 id;
35 __le32 attributes;
36 };
37
38 struct scmi_msg_clock_describe_rates {
39 __le32 id;
40 __le32 rate_index;
41 };
42
43 struct scmi_msg_resp_clock_describe_rates {
44 __le32 num_rates_flags;
45 #define NUM_RETURNED(x) ((x) & 0xfff)
46 #define RATE_DISCRETE(x) !((x) & BIT(12))
47 #define NUM_REMAINING(x) ((x) >> 16)
48 struct {
49 __le32 value_low;
50 __le32 value_high;
51 } rate[0];
52 #define RATE_TO_U64(X) \
53 ({ \
54 typeof(X) x = (X); \
55 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
56 })
57 };
58
59 struct scmi_clock_set_rate {
60 __le32 flags;
61 #define CLOCK_SET_ASYNC BIT(0)
62 #define CLOCK_SET_IGNORE_RESP BIT(1)
63 #define CLOCK_SET_ROUND_UP BIT(2)
64 #define CLOCK_SET_ROUND_AUTO BIT(3)
65 __le32 id;
66 __le32 value_low;
67 __le32 value_high;
68 };
69
70 struct clock_info {
71 u32 version;
72 int num_clocks;
73 int max_async_req;
74 atomic_t cur_async_req;
75 struct scmi_clock_info *clk;
76 };
77
78 static int
79 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
80 struct clock_info *ci)
81 {
82 int ret;
83 struct scmi_xfer *t;
84 struct scmi_msg_resp_clock_protocol_attributes *attr;
85
86 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
87 0, sizeof(*attr), &t);
88 if (ret)
89 return ret;
90
91 attr = t->rx.buf;
92
93 ret = ph->xops->do_xfer(ph, t);
94 if (!ret) {
95 ci->num_clocks = le16_to_cpu(attr->num_clocks);
96 ci->max_async_req = attr->max_async_req;
97 }
98
99 ph->xops->xfer_put(ph, t);
100 return ret;
101 }
102
103 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
104 u32 clk_id, struct scmi_clock_info *clk)
105 {
106 int ret;
107 struct scmi_xfer *t;
108 struct scmi_msg_resp_clock_attributes *attr;
109
110 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
111 sizeof(clk_id), sizeof(*attr), &t);
112 if (ret)
113 return ret;
114
115 put_unaligned_le32(clk_id, t->tx.buf);
116 attr = t->rx.buf;
117
118 ret = ph->xops->do_xfer(ph, t);
119 if (!ret)
120 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
121 else
122 clk->name[0] = '\0';
123
124 ph->xops->xfer_put(ph, t);
125 return ret;
126 }
127
128 static int rate_cmp_func(const void *_r1, const void *_r2)
129 {
130 const u64 *r1 = _r1, *r2 = _r2;
131
132 if (*r1 < *r2)
133 return -1;
134 else if (*r1 == *r2)
135 return 0;
136 else
137 return 1;
138 }
139
140 static int
141 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
142 struct scmi_clock_info *clk)
143 {
144 u64 *rate = NULL;
145 int ret, cnt;
146 bool rate_discrete = false;
147 u32 tot_rate_cnt = 0, rates_flag;
148 u16 num_returned, num_remaining;
149 struct scmi_xfer *t;
150 struct scmi_msg_clock_describe_rates *clk_desc;
151 struct scmi_msg_resp_clock_describe_rates *rlist;
152
153 ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
154 sizeof(*clk_desc), 0, &t);
155 if (ret)
156 return ret;
157
158 clk_desc = t->tx.buf;
159 rlist = t->rx.buf;
160
161 do {
162 clk_desc->id = cpu_to_le32(clk_id);
163 /* Set the number of rates to be skipped/already read */
164 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
165
166 ret = ph->xops->do_xfer(ph, t);
167 if (ret)
168 goto err;
169
170 rates_flag = le32_to_cpu(rlist->num_rates_flags);
171 num_remaining = NUM_REMAINING(rates_flag);
172 rate_discrete = RATE_DISCRETE(rates_flag);
173 num_returned = NUM_RETURNED(rates_flag);
174
175 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
176 dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
177 break;
178 }
179
180 if (!rate_discrete) {
181 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
182 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
183 clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
184 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
185 clk->range.min_rate, clk->range.max_rate,
186 clk->range.step_size);
187 break;
188 }
189
190 rate = &clk->list.rates[tot_rate_cnt];
191 for (cnt = 0; cnt < num_returned; cnt++, rate++) {
192 *rate = RATE_TO_U64(rlist->rate[cnt]);
193 dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
194 }
195
196 tot_rate_cnt += num_returned;
197
198 ph->xops->reset_rx_to_maxsz(ph, t);
199 /*
200 * check for both returned and remaining to avoid infinite
201 * loop due to buggy firmware
202 */
203 } while (num_returned && num_remaining);
204
205 if (rate_discrete && rate) {
206 clk->list.num_rates = tot_rate_cnt;
207 sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
208 rate_cmp_func, NULL);
209 }
210
211 clk->rate_discrete = rate_discrete;
212
213 err:
214 ph->xops->xfer_put(ph, t);
215 return ret;
216 }
217
218 static int
219 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
220 u32 clk_id, u64 *value)
221 {
222 int ret;
223 struct scmi_xfer *t;
224
225 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
226 sizeof(__le32), sizeof(u64), &t);
227 if (ret)
228 return ret;
229
230 put_unaligned_le32(clk_id, t->tx.buf);
231
232 ret = ph->xops->do_xfer(ph, t);
233 if (!ret)
234 *value = get_unaligned_le64(t->rx.buf);
235
236 ph->xops->xfer_put(ph, t);
237 return ret;
238 }
239
240 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
241 u32 clk_id, u64 rate)
242 {
243 int ret;
244 u32 flags = 0;
245 struct scmi_xfer *t;
246 struct scmi_clock_set_rate *cfg;
247 struct clock_info *ci = ph->get_priv(ph);
248
249 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
250 if (ret)
251 return ret;
252
253 if (ci->max_async_req &&
254 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
255 flags |= CLOCK_SET_ASYNC;
256
257 cfg = t->tx.buf;
258 cfg->flags = cpu_to_le32(flags);
259 cfg->id = cpu_to_le32(clk_id);
260 cfg->value_low = cpu_to_le32(rate & 0xffffffff);
261 cfg->value_high = cpu_to_le32(rate >> 32);
262
263 if (flags & CLOCK_SET_ASYNC)
264 ret = ph->xops->do_xfer_with_response(ph, t);
265 else
266 ret = ph->xops->do_xfer(ph, t);
267
268 if (ci->max_async_req)
269 atomic_dec(&ci->cur_async_req);
270
271 ph->xops->xfer_put(ph, t);
272 return ret;
273 }
274
275 static int
276 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
277 u32 config)
278 {
279 int ret;
280 struct scmi_xfer *t;
281 struct scmi_clock_set_config *cfg;
282
283 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
284 sizeof(*cfg), 0, &t);
285 if (ret)
286 return ret;
287
288 cfg = t->tx.buf;
289 cfg->id = cpu_to_le32(clk_id);
290 cfg->attributes = cpu_to_le32(config);
291
292 ret = ph->xops->do_xfer(ph, t);
293
294 ph->xops->xfer_put(ph, t);
295 return ret;
296 }
297
298 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
299 {
300 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
301 }
302
303 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
304 {
305 return scmi_clock_config_set(ph, clk_id, 0);
306 }
307
308 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
309 {
310 struct clock_info *ci = ph->get_priv(ph);
311
312 return ci->num_clocks;
313 }
314
315 static const struct scmi_clock_info *
316 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
317 {
318 struct clock_info *ci = ph->get_priv(ph);
319 struct scmi_clock_info *clk = ci->clk + clk_id;
320
321 if (!clk->name[0])
322 return NULL;
323
324 return clk;
325 }
326
327 static const struct scmi_clk_proto_ops clk_proto_ops = {
328 .count_get = scmi_clock_count_get,
329 .info_get = scmi_clock_info_get,
330 .rate_get = scmi_clock_rate_get,
331 .rate_set = scmi_clock_rate_set,
332 .enable = scmi_clock_enable,
333 .disable = scmi_clock_disable,
334 };
335
336 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
337 {
338 u32 version;
339 int clkid, ret;
340 struct clock_info *cinfo;
341
342 ph->xops->version_get(ph, &version);
343
344 dev_dbg(ph->dev, "Clock Version %d.%d\n",
345 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
346
347 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
348 if (!cinfo)
349 return -ENOMEM;
350
351 scmi_clock_protocol_attributes_get(ph, cinfo);
352
353 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
354 sizeof(*cinfo->clk), GFP_KERNEL);
355 if (!cinfo->clk)
356 return -ENOMEM;
357
358 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
359 struct scmi_clock_info *clk = cinfo->clk + clkid;
360
361 ret = scmi_clock_attributes_get(ph, clkid, clk);
362 if (!ret)
363 scmi_clock_describe_rates_get(ph, clkid, clk);
364 }
365
366 cinfo->version = version;
367 return ph->set_priv(ph, cinfo);
368 }
369
370 static const struct scmi_protocol scmi_clock = {
371 .id = SCMI_PROTOCOL_CLOCK,
372 .owner = THIS_MODULE,
373 .instance_init = &scmi_clock_protocol_init,
374 .ops = &clk_proto_ops,
375 };
376
377 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)