1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Clock Protocol
5 * Copyright (C) 2018-2021 ARM Ltd.
8 #include <linux/module.h>
9 #include <linux/sort.h>
13 enum scmi_clock_protocol_cmd
{
14 CLOCK_ATTRIBUTES
= 0x3,
15 CLOCK_DESCRIBE_RATES
= 0x4,
18 CLOCK_CONFIG_SET
= 0x7,
21 struct scmi_msg_resp_clock_protocol_attributes
{
27 struct scmi_msg_resp_clock_attributes
{
29 #define CLOCK_ENABLE BIT(0)
30 u8 name
[SCMI_MAX_STR_SIZE
];
33 struct scmi_clock_set_config
{
38 struct scmi_msg_clock_describe_rates
{
43 struct scmi_msg_resp_clock_describe_rates
{
44 __le32 num_rates_flags
;
45 #define NUM_RETURNED(x) ((x) & 0xfff)
46 #define RATE_DISCRETE(x) !((x) & BIT(12))
47 #define NUM_REMAINING(x) ((x) >> 16)
52 #define RATE_TO_U64(X) \
55 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
59 struct scmi_clock_set_rate
{
61 #define CLOCK_SET_ASYNC BIT(0)
62 #define CLOCK_SET_IGNORE_RESP BIT(1)
63 #define CLOCK_SET_ROUND_UP BIT(2)
64 #define CLOCK_SET_ROUND_AUTO BIT(3)
74 atomic_t cur_async_req
;
75 struct scmi_clock_info
*clk
;
79 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle
*ph
,
80 struct clock_info
*ci
)
84 struct scmi_msg_resp_clock_protocol_attributes
*attr
;
86 ret
= ph
->xops
->xfer_get_init(ph
, PROTOCOL_ATTRIBUTES
,
87 0, sizeof(*attr
), &t
);
93 ret
= ph
->xops
->do_xfer(ph
, t
);
95 ci
->num_clocks
= le16_to_cpu(attr
->num_clocks
);
96 ci
->max_async_req
= attr
->max_async_req
;
99 ph
->xops
->xfer_put(ph
, t
);
103 static int scmi_clock_attributes_get(const struct scmi_protocol_handle
*ph
,
104 u32 clk_id
, struct scmi_clock_info
*clk
)
108 struct scmi_msg_resp_clock_attributes
*attr
;
110 ret
= ph
->xops
->xfer_get_init(ph
, CLOCK_ATTRIBUTES
,
111 sizeof(clk_id
), sizeof(*attr
), &t
);
115 put_unaligned_le32(clk_id
, t
->tx
.buf
);
118 ret
= ph
->xops
->do_xfer(ph
, t
);
120 strlcpy(clk
->name
, attr
->name
, SCMI_MAX_STR_SIZE
);
124 ph
->xops
->xfer_put(ph
, t
);
128 static int rate_cmp_func(const void *_r1
, const void *_r2
)
130 const u64
*r1
= _r1
, *r2
= _r2
;
141 scmi_clock_describe_rates_get(const struct scmi_protocol_handle
*ph
, u32 clk_id
,
142 struct scmi_clock_info
*clk
)
146 bool rate_discrete
= false;
147 u32 tot_rate_cnt
= 0, rates_flag
;
148 u16 num_returned
, num_remaining
;
150 struct scmi_msg_clock_describe_rates
*clk_desc
;
151 struct scmi_msg_resp_clock_describe_rates
*rlist
;
153 ret
= ph
->xops
->xfer_get_init(ph
, CLOCK_DESCRIBE_RATES
,
154 sizeof(*clk_desc
), 0, &t
);
158 clk_desc
= t
->tx
.buf
;
162 clk_desc
->id
= cpu_to_le32(clk_id
);
163 /* Set the number of rates to be skipped/already read */
164 clk_desc
->rate_index
= cpu_to_le32(tot_rate_cnt
);
166 ret
= ph
->xops
->do_xfer(ph
, t
);
170 rates_flag
= le32_to_cpu(rlist
->num_rates_flags
);
171 num_remaining
= NUM_REMAINING(rates_flag
);
172 rate_discrete
= RATE_DISCRETE(rates_flag
);
173 num_returned
= NUM_RETURNED(rates_flag
);
175 if (tot_rate_cnt
+ num_returned
> SCMI_MAX_NUM_RATES
) {
176 dev_err(ph
->dev
, "No. of rates > MAX_NUM_RATES");
180 if (!rate_discrete
) {
181 clk
->range
.min_rate
= RATE_TO_U64(rlist
->rate
[0]);
182 clk
->range
.max_rate
= RATE_TO_U64(rlist
->rate
[1]);
183 clk
->range
.step_size
= RATE_TO_U64(rlist
->rate
[2]);
184 dev_dbg(ph
->dev
, "Min %llu Max %llu Step %llu Hz\n",
185 clk
->range
.min_rate
, clk
->range
.max_rate
,
186 clk
->range
.step_size
);
190 rate
= &clk
->list
.rates
[tot_rate_cnt
];
191 for (cnt
= 0; cnt
< num_returned
; cnt
++, rate
++) {
192 *rate
= RATE_TO_U64(rlist
->rate
[cnt
]);
193 dev_dbg(ph
->dev
, "Rate %llu Hz\n", *rate
);
196 tot_rate_cnt
+= num_returned
;
198 ph
->xops
->reset_rx_to_maxsz(ph
, t
);
200 * check for both returned and remaining to avoid infinite
201 * loop due to buggy firmware
203 } while (num_returned
&& num_remaining
);
205 if (rate_discrete
&& rate
) {
206 clk
->list
.num_rates
= tot_rate_cnt
;
207 sort(clk
->list
.rates
, tot_rate_cnt
, sizeof(*rate
),
208 rate_cmp_func
, NULL
);
211 clk
->rate_discrete
= rate_discrete
;
214 ph
->xops
->xfer_put(ph
, t
);
219 scmi_clock_rate_get(const struct scmi_protocol_handle
*ph
,
220 u32 clk_id
, u64
*value
)
225 ret
= ph
->xops
->xfer_get_init(ph
, CLOCK_RATE_GET
,
226 sizeof(__le32
), sizeof(u64
), &t
);
230 put_unaligned_le32(clk_id
, t
->tx
.buf
);
232 ret
= ph
->xops
->do_xfer(ph
, t
);
234 *value
= get_unaligned_le64(t
->rx
.buf
);
236 ph
->xops
->xfer_put(ph
, t
);
240 static int scmi_clock_rate_set(const struct scmi_protocol_handle
*ph
,
241 u32 clk_id
, u64 rate
)
246 struct scmi_clock_set_rate
*cfg
;
247 struct clock_info
*ci
= ph
->get_priv(ph
);
249 ret
= ph
->xops
->xfer_get_init(ph
, CLOCK_RATE_SET
, sizeof(*cfg
), 0, &t
);
253 if (ci
->max_async_req
&&
254 atomic_inc_return(&ci
->cur_async_req
) < ci
->max_async_req
)
255 flags
|= CLOCK_SET_ASYNC
;
258 cfg
->flags
= cpu_to_le32(flags
);
259 cfg
->id
= cpu_to_le32(clk_id
);
260 cfg
->value_low
= cpu_to_le32(rate
& 0xffffffff);
261 cfg
->value_high
= cpu_to_le32(rate
>> 32);
263 if (flags
& CLOCK_SET_ASYNC
)
264 ret
= ph
->xops
->do_xfer_with_response(ph
, t
);
266 ret
= ph
->xops
->do_xfer(ph
, t
);
268 if (ci
->max_async_req
)
269 atomic_dec(&ci
->cur_async_req
);
271 ph
->xops
->xfer_put(ph
, t
);
276 scmi_clock_config_set(const struct scmi_protocol_handle
*ph
, u32 clk_id
,
281 struct scmi_clock_set_config
*cfg
;
283 ret
= ph
->xops
->xfer_get_init(ph
, CLOCK_CONFIG_SET
,
284 sizeof(*cfg
), 0, &t
);
289 cfg
->id
= cpu_to_le32(clk_id
);
290 cfg
->attributes
= cpu_to_le32(config
);
292 ret
= ph
->xops
->do_xfer(ph
, t
);
294 ph
->xops
->xfer_put(ph
, t
);
298 static int scmi_clock_enable(const struct scmi_protocol_handle
*ph
, u32 clk_id
)
300 return scmi_clock_config_set(ph
, clk_id
, CLOCK_ENABLE
);
303 static int scmi_clock_disable(const struct scmi_protocol_handle
*ph
, u32 clk_id
)
305 return scmi_clock_config_set(ph
, clk_id
, 0);
308 static int scmi_clock_count_get(const struct scmi_protocol_handle
*ph
)
310 struct clock_info
*ci
= ph
->get_priv(ph
);
312 return ci
->num_clocks
;
315 static const struct scmi_clock_info
*
316 scmi_clock_info_get(const struct scmi_protocol_handle
*ph
, u32 clk_id
)
318 struct clock_info
*ci
= ph
->get_priv(ph
);
319 struct scmi_clock_info
*clk
= ci
->clk
+ clk_id
;
327 static const struct scmi_clk_proto_ops clk_proto_ops
= {
328 .count_get
= scmi_clock_count_get
,
329 .info_get
= scmi_clock_info_get
,
330 .rate_get
= scmi_clock_rate_get
,
331 .rate_set
= scmi_clock_rate_set
,
332 .enable
= scmi_clock_enable
,
333 .disable
= scmi_clock_disable
,
336 static int scmi_clock_protocol_init(const struct scmi_protocol_handle
*ph
)
340 struct clock_info
*cinfo
;
342 ph
->xops
->version_get(ph
, &version
);
344 dev_dbg(ph
->dev
, "Clock Version %d.%d\n",
345 PROTOCOL_REV_MAJOR(version
), PROTOCOL_REV_MINOR(version
));
347 cinfo
= devm_kzalloc(ph
->dev
, sizeof(*cinfo
), GFP_KERNEL
);
351 scmi_clock_protocol_attributes_get(ph
, cinfo
);
353 cinfo
->clk
= devm_kcalloc(ph
->dev
, cinfo
->num_clocks
,
354 sizeof(*cinfo
->clk
), GFP_KERNEL
);
358 for (clkid
= 0; clkid
< cinfo
->num_clocks
; clkid
++) {
359 struct scmi_clock_info
*clk
= cinfo
->clk
+ clkid
;
361 ret
= scmi_clock_attributes_get(ph
, clkid
, clk
);
363 scmi_clock_describe_rates_get(ph
, clkid
, clk
);
366 cinfo
->version
= version
;
367 return ph
->set_priv(ph
, cinfo
);
370 static const struct scmi_protocol scmi_clock
= {
371 .id
= SCMI_PROTOCOL_CLOCK
,
372 .owner
= THIS_MODULE
,
373 .instance_init
= &scmi_clock_protocol_init
,
374 .ops
= &clk_proto_ops
,
377 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock
, scmi_clock
)