]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm/mach-vexpress/spc.c
cpufreq: arm_big_little: add vexpress SPC interface driver
[mirror_ubuntu-jammy-kernel.git] / arch / arm / mach-vexpress / spc.c
CommitLineData
63819cb1
LP
1/*
2 * Versatile Express Serial Power Controller (SPC) support
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 *
6 * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
7 * Achin Gupta <achin.gupta@arm.com>
8 * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
4d910d5b
SH
20#include <linux/clk-provider.h>
21#include <linux/clkdev.h>
22#include <linux/cpu.h>
f7cd2d83 23#include <linux/delay.h>
63819cb1 24#include <linux/err.h>
f7cd2d83 25#include <linux/interrupt.h>
63819cb1 26#include <linux/io.h>
f7cd2d83 27#include <linux/pm_opp.h>
63819cb1 28#include <linux/slab.h>
f7cd2d83 29#include <linux/semaphore.h>
63819cb1
LP
30
31#include <asm/cacheflush.h>
32
33#define SPCLOG "vexpress-spc: "
34
f7cd2d83
SH
35#define PERF_LVL_A15 0x00
36#define PERF_REQ_A15 0x04
37#define PERF_LVL_A7 0x08
38#define PERF_REQ_A7 0x0c
39#define COMMS 0x10
40#define COMMS_REQ 0x14
41#define PWC_STATUS 0x18
42#define PWC_FLAG 0x1c
43
63819cb1
LP
44/* SPC wake-up IRQs status and mask */
45#define WAKE_INT_MASK 0x24
46#define WAKE_INT_RAW 0x28
47#define WAKE_INT_STAT 0x2c
48/* SPC power down registers */
49#define A15_PWRDN_EN 0x30
50#define A7_PWRDN_EN 0x34
51/* SPC per-CPU mailboxes */
52#define A15_BX_ADDR0 0x68
53#define A7_BX_ADDR0 0x78
54
f7cd2d83
SH
55/* SPC system config interface registers */
56#define SYSCFG_WDATA 0x70
57#define SYSCFG_RDATA 0x74
58
59/* A15/A7 OPP virtual register base */
60#define A15_PERFVAL_BASE 0xC10
61#define A7_PERFVAL_BASE 0xC30
62
63/* Config interface control bits */
64#define SYSCFG_START (1 << 31)
65#define SYSCFG_SCC (6 << 20)
66#define SYSCFG_STAT (14 << 20)
67
63819cb1
LP
68/* wake-up interrupt masks */
69#define GBL_WAKEUP_INT_MSK (0x3 << 10)
70
71/* TC2 static dual-cluster configuration */
72#define MAX_CLUSTERS 2
73
f7cd2d83
SH
74/*
75 * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
76 * operation, the operation could start just before jiffie is about
77 * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
78 */
79#define TIMEOUT_US 20000
80
81#define MAX_OPPS 8
82#define CA15_DVFS 0
83#define CA7_DVFS 1
84#define SPC_SYS_CFG 2
85#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
86#define STAT_ERR(type) ((1 << 1) << (type << 2))
87#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
88
89struct ve_spc_opp {
90 unsigned long freq;
91 unsigned long u_volt;
92};
93
63819cb1
LP
94struct ve_spc_drvdata {
95 void __iomem *baseaddr;
96 /*
97 * A15s cluster identifier
98 * It corresponds to A15 processors MPIDR[15:8] bitfield
99 */
100 u32 a15_clusid;
f7cd2d83
SH
101 uint32_t cur_rsp_mask;
102 uint32_t cur_rsp_stat;
103 struct semaphore sem;
104 struct completion done;
105 struct ve_spc_opp *opps[MAX_CLUSTERS];
106 int num_opps[MAX_CLUSTERS];
63819cb1
LP
107};
108
109static struct ve_spc_drvdata *info;
110
111static inline bool cluster_is_a15(u32 cluster)
112{
113 return cluster == info->a15_clusid;
114}
115
116/**
117 * ve_spc_global_wakeup_irq()
118 *
119 * Function to set/clear global wakeup IRQs. Not protected by locking since
120 * it might be used in code paths where normal cacheable locks are not
121 * working. Locking must be provided by the caller to ensure atomicity.
122 *
123 * @set: if true, global wake-up IRQs are set, if false they are cleared
124 */
125void ve_spc_global_wakeup_irq(bool set)
126{
127 u32 reg;
128
129 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
130
131 if (set)
132 reg |= GBL_WAKEUP_INT_MSK;
133 else
134 reg &= ~GBL_WAKEUP_INT_MSK;
135
136 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
137}
138
139/**
140 * ve_spc_cpu_wakeup_irq()
141 *
142 * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
143 * it might be used in code paths where normal cacheable locks are not
144 * working. Locking must be provided by the caller to ensure atomicity.
145 *
146 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
147 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
148 * @set: if true, wake-up IRQs are set, if false they are cleared
149 */
150void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
151{
152 u32 mask, reg;
153
154 if (cluster >= MAX_CLUSTERS)
155 return;
156
157 mask = 1 << cpu;
158
159 if (!cluster_is_a15(cluster))
160 mask <<= 4;
161
162 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
163
164 if (set)
165 reg |= mask;
166 else
167 reg &= ~mask;
168
169 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
170}
171
172/**
173 * ve_spc_set_resume_addr() - set the jump address used for warm boot
174 *
175 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
176 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
177 * @addr: physical resume address
178 */
179void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
180{
181 void __iomem *baseaddr;
182
183 if (cluster >= MAX_CLUSTERS)
184 return;
185
186 if (cluster_is_a15(cluster))
187 baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
188 else
189 baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
190
191 writel_relaxed(addr, baseaddr);
192}
193
194/**
195 * ve_spc_powerdown()
196 *
197 * Function to enable/disable cluster powerdown. Not protected by locking
198 * since it might be used in code paths where normal cacheable locks are not
199 * working. Locking must be provided by the caller to ensure atomicity.
200 *
201 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
202 * @enable: if true enables powerdown, if false disables it
203 */
204void ve_spc_powerdown(u32 cluster, bool enable)
205{
206 u32 pwdrn_reg;
207
208 if (cluster >= MAX_CLUSTERS)
209 return;
210
211 pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
212 writel_relaxed(enable, info->baseaddr + pwdrn_reg);
213}
214
f7cd2d83
SH
215static int ve_spc_get_performance(int cluster, u32 *freq)
216{
217 struct ve_spc_opp *opps = info->opps[cluster];
218 u32 perf_cfg_reg = 0;
219 u32 perf;
220
221 perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
222
223 perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
224 if (perf >= info->num_opps[cluster])
225 return -EINVAL;
226
227 opps += perf;
228 *freq = opps->freq;
229
230 return 0;
231}
232
233/* find closest match to given frequency in OPP table */
234static int ve_spc_round_performance(int cluster, u32 freq)
235{
236 int idx, max_opp = info->num_opps[cluster];
237 struct ve_spc_opp *opps = info->opps[cluster];
238 u32 fmin = 0, fmax = ~0, ftmp;
239
240 freq /= 1000; /* OPP entries in kHz */
241 for (idx = 0; idx < max_opp; idx++, opps++) {
242 ftmp = opps->freq;
243 if (ftmp >= freq) {
244 if (ftmp <= fmax)
245 fmax = ftmp;
246 } else {
247 if (ftmp >= fmin)
248 fmin = ftmp;
249 }
250 }
251 if (fmax != ~0)
252 return fmax * 1000;
253 else
254 return fmin * 1000;
255}
256
257static int ve_spc_find_performance_index(int cluster, u32 freq)
258{
259 int idx, max_opp = info->num_opps[cluster];
260 struct ve_spc_opp *opps = info->opps[cluster];
261
262 for (idx = 0; idx < max_opp; idx++, opps++)
263 if (opps->freq == freq)
264 break;
265 return (idx == max_opp) ? -EINVAL : idx;
266}
267
268static int ve_spc_waitforcompletion(int req_type)
269{
270 int ret = wait_for_completion_interruptible_timeout(
271 &info->done, usecs_to_jiffies(TIMEOUT_US));
272 if (ret == 0)
273 ret = -ETIMEDOUT;
274 else if (ret > 0)
275 ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
276 return ret;
277}
278
279static int ve_spc_set_performance(int cluster, u32 freq)
280{
281 u32 perf_cfg_reg, perf_stat_reg;
282 int ret, perf, req_type;
283
284 if (cluster_is_a15(cluster)) {
285 req_type = CA15_DVFS;
286 perf_cfg_reg = PERF_LVL_A15;
287 perf_stat_reg = PERF_REQ_A15;
288 } else {
289 req_type = CA7_DVFS;
290 perf_cfg_reg = PERF_LVL_A7;
291 perf_stat_reg = PERF_REQ_A7;
292 }
293
294 perf = ve_spc_find_performance_index(cluster, freq);
295
296 if (perf < 0)
297 return perf;
298
299 if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
300 return -ETIME;
301
302 init_completion(&info->done);
303 info->cur_rsp_mask = RESPONSE_MASK(req_type);
304
305 writel(perf, info->baseaddr + perf_cfg_reg);
306 ret = ve_spc_waitforcompletion(req_type);
307
308 info->cur_rsp_mask = 0;
309 up(&info->sem);
310
311 return ret;
312}
313
314static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
315{
316 int ret;
317
318 if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
319 return -ETIME;
320
321 init_completion(&info->done);
322 info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
323
324 /* Set the control value */
325 writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
326 ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
327
328 if (ret == 0)
329 *data = readl(info->baseaddr + SYSCFG_RDATA);
330
331 info->cur_rsp_mask = 0;
332 up(&info->sem);
333
334 return ret;
335}
336
337static irqreturn_t ve_spc_irq_handler(int irq, void *data)
338{
339 struct ve_spc_drvdata *drv_data = data;
340 uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
341
342 if (info->cur_rsp_mask & status) {
343 info->cur_rsp_stat = status;
344 complete(&drv_data->done);
345 }
346
347 return IRQ_HANDLED;
348}
349
350/*
351 * +--------------------------+
352 * | 31 20 | 19 0 |
353 * +--------------------------+
354 * | u_volt | freq(kHz) |
355 * +--------------------------+
356 */
357#define MULT_FACTOR 20
358#define VOLT_SHIFT 20
359#define FREQ_MASK (0xFFFFF)
360static int ve_spc_populate_opps(uint32_t cluster)
361{
362 uint32_t data = 0, off, ret, idx;
363 struct ve_spc_opp *opps;
364
365 opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
366 if (!opps)
367 return -ENOMEM;
368
369 info->opps[cluster] = opps;
370
371 off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
372 for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
373 ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
374 if (!ret) {
375 opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
376 opps->u_volt = data >> VOLT_SHIFT;
377 } else {
378 break;
379 }
380 }
381 info->num_opps[cluster] = idx;
382
383 return ret;
384}
385
386static int ve_init_opp_table(struct device *cpu_dev)
387{
388 int cluster = topology_physical_package_id(cpu_dev->id);
389 int idx, ret = 0, max_opp = info->num_opps[cluster];
390 struct ve_spc_opp *opps = info->opps[cluster];
391
392 for (idx = 0; idx < max_opp; idx++, opps++) {
393 ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
394 if (ret) {
395 dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
396 opps->freq, opps->u_volt);
397 return ret;
398 }
399 }
400 return ret;
401}
402
403int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
63819cb1 404{
f7cd2d83 405 int ret;
63819cb1
LP
406 info = kzalloc(sizeof(*info), GFP_KERNEL);
407 if (!info) {
408 pr_err(SPCLOG "unable to allocate mem\n");
409 return -ENOMEM;
410 }
411
412 info->baseaddr = baseaddr;
413 info->a15_clusid = a15_clusid;
414
f7cd2d83
SH
415 if (irq <= 0) {
416 pr_err(SPCLOG "Invalid IRQ %d\n", irq);
417 kfree(info);
418 return -EINVAL;
419 }
420
421 init_completion(&info->done);
422
423 readl_relaxed(info->baseaddr + PWC_STATUS);
424
425 ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
426 | IRQF_ONESHOT, "vexpress-spc", info);
427 if (ret) {
428 pr_err(SPCLOG "IRQ %d request failed\n", irq);
429 kfree(info);
430 return -ENODEV;
431 }
432
433 sema_init(&info->sem, 1);
63819cb1
LP
434 /*
435 * Multi-cluster systems may need this data when non-coherent, during
436 * cluster power-up/power-down. Make sure driver info reaches main
437 * memory.
438 */
439 sync_cache_w(info);
440 sync_cache_w(&info);
441
442 return 0;
443}
4d910d5b
SH
444
445struct clk_spc {
446 struct clk_hw hw;
447 int cluster;
448};
449
450#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
451static unsigned long spc_recalc_rate(struct clk_hw *hw,
452 unsigned long parent_rate)
453{
454 struct clk_spc *spc = to_clk_spc(hw);
455 u32 freq;
456
457 if (ve_spc_get_performance(spc->cluster, &freq))
458 return -EIO;
459
460 return freq * 1000;
461}
462
463static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
464 unsigned long *parent_rate)
465{
466 struct clk_spc *spc = to_clk_spc(hw);
467
468 return ve_spc_round_performance(spc->cluster, drate);
469}
470
471static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
472 unsigned long parent_rate)
473{
474 struct clk_spc *spc = to_clk_spc(hw);
475
476 return ve_spc_set_performance(spc->cluster, rate / 1000);
477}
478
479static struct clk_ops clk_spc_ops = {
480 .recalc_rate = spc_recalc_rate,
481 .round_rate = spc_round_rate,
482 .set_rate = spc_set_rate,
483};
484
485static struct clk *ve_spc_clk_register(struct device *cpu_dev)
486{
487 struct clk_init_data init;
488 struct clk_spc *spc;
489
490 spc = kzalloc(sizeof(*spc), GFP_KERNEL);
491 if (!spc) {
492 pr_err("could not allocate spc clk\n");
493 return ERR_PTR(-ENOMEM);
494 }
495
496 spc->hw.init = &init;
497 spc->cluster = topology_physical_package_id(cpu_dev->id);
498
499 init.name = dev_name(cpu_dev);
500 init.ops = &clk_spc_ops;
501 init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
502 init.num_parents = 0;
503
504 return devm_clk_register(cpu_dev, &spc->hw);
505}
506
507static int __init ve_spc_clk_init(void)
508{
509 int cpu;
510 struct clk *clk;
511
512 if (!info)
513 return 0; /* Continue only if SPC is initialised */
514
515 if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
516 pr_err("failed to build OPP table\n");
517 return -ENODEV;
518 }
519
520 for_each_possible_cpu(cpu) {
521 struct device *cpu_dev = get_cpu_device(cpu);
522 if (!cpu_dev) {
523 pr_warn("failed to get cpu%d device\n", cpu);
524 continue;
525 }
526 clk = ve_spc_clk_register(cpu_dev);
527 if (IS_ERR(clk)) {
528 pr_warn("failed to register cpu%d clock\n", cpu);
529 continue;
530 }
531 if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
532 pr_warn("failed to register cpu%d clock lookup\n", cpu);
533 continue;
534 }
535
536 if (ve_init_opp_table(cpu_dev))
537 pr_warn("failed to initialise cpu%d opp table\n", cpu);
538 }
539
540 return 0;
541}
542module_init(ve_spc_clk_init);