]>
Commit | Line | Data |
---|---|---|
63819cb1 LP |
1 | /* |
2 | * Versatile Express Serial Power Controller (SPC) support | |
3 | * | |
4 | * Copyright (C) 2013 ARM Ltd. | |
5 | * | |
6 | * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | |
7 | * Achin Gupta <achin.gupta@arm.com> | |
8 | * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
15 | * kind, whether express or implied; without even the implied warranty | |
16 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
f7cd2d83 | 20 | #include <linux/delay.h> |
63819cb1 | 21 | #include <linux/err.h> |
f7cd2d83 | 22 | #include <linux/interrupt.h> |
63819cb1 | 23 | #include <linux/io.h> |
f7cd2d83 | 24 | #include <linux/pm_opp.h> |
63819cb1 | 25 | #include <linux/slab.h> |
f7cd2d83 | 26 | #include <linux/semaphore.h> |
63819cb1 LP |
27 | |
28 | #include <asm/cacheflush.h> | |
29 | ||
30 | #define SPCLOG "vexpress-spc: " | |
31 | ||
f7cd2d83 SH |
32 | #define PERF_LVL_A15 0x00 |
33 | #define PERF_REQ_A15 0x04 | |
34 | #define PERF_LVL_A7 0x08 | |
35 | #define PERF_REQ_A7 0x0c | |
36 | #define COMMS 0x10 | |
37 | #define COMMS_REQ 0x14 | |
38 | #define PWC_STATUS 0x18 | |
39 | #define PWC_FLAG 0x1c | |
40 | ||
63819cb1 LP |
41 | /* SPC wake-up IRQs status and mask */ |
42 | #define WAKE_INT_MASK 0x24 | |
43 | #define WAKE_INT_RAW 0x28 | |
44 | #define WAKE_INT_STAT 0x2c | |
45 | /* SPC power down registers */ | |
46 | #define A15_PWRDN_EN 0x30 | |
47 | #define A7_PWRDN_EN 0x34 | |
48 | /* SPC per-CPU mailboxes */ | |
49 | #define A15_BX_ADDR0 0x68 | |
50 | #define A7_BX_ADDR0 0x78 | |
51 | ||
f7cd2d83 SH |
52 | /* SPC system config interface registers */ |
53 | #define SYSCFG_WDATA 0x70 | |
54 | #define SYSCFG_RDATA 0x74 | |
55 | ||
56 | /* A15/A7 OPP virtual register base */ | |
57 | #define A15_PERFVAL_BASE 0xC10 | |
58 | #define A7_PERFVAL_BASE 0xC30 | |
59 | ||
60 | /* Config interface control bits */ | |
61 | #define SYSCFG_START (1 << 31) | |
62 | #define SYSCFG_SCC (6 << 20) | |
63 | #define SYSCFG_STAT (14 << 20) | |
64 | ||
63819cb1 LP |
65 | /* wake-up interrupt masks */ |
66 | #define GBL_WAKEUP_INT_MSK (0x3 << 10) | |
67 | ||
68 | /* TC2 static dual-cluster configuration */ | |
69 | #define MAX_CLUSTERS 2 | |
70 | ||
f7cd2d83 SH |
71 | /* |
72 | * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS | |
73 | * operation, the operation could start just before jiffie is about | |
74 | * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz | |
75 | */ | |
76 | #define TIMEOUT_US 20000 | |
77 | ||
78 | #define MAX_OPPS 8 | |
79 | #define CA15_DVFS 0 | |
80 | #define CA7_DVFS 1 | |
81 | #define SPC_SYS_CFG 2 | |
82 | #define STAT_COMPLETE(type) ((1 << 0) << (type << 2)) | |
83 | #define STAT_ERR(type) ((1 << 1) << (type << 2)) | |
84 | #define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type)) | |
85 | ||
86 | struct ve_spc_opp { | |
87 | unsigned long freq; | |
88 | unsigned long u_volt; | |
89 | }; | |
90 | ||
63819cb1 LP |
91 | struct ve_spc_drvdata { |
92 | void __iomem *baseaddr; | |
93 | /* | |
94 | * A15s cluster identifier | |
95 | * It corresponds to A15 processors MPIDR[15:8] bitfield | |
96 | */ | |
97 | u32 a15_clusid; | |
f7cd2d83 SH |
98 | uint32_t cur_rsp_mask; |
99 | uint32_t cur_rsp_stat; | |
100 | struct semaphore sem; | |
101 | struct completion done; | |
102 | struct ve_spc_opp *opps[MAX_CLUSTERS]; | |
103 | int num_opps[MAX_CLUSTERS]; | |
63819cb1 LP |
104 | }; |
105 | ||
106 | static struct ve_spc_drvdata *info; | |
107 | ||
108 | static inline bool cluster_is_a15(u32 cluster) | |
109 | { | |
110 | return cluster == info->a15_clusid; | |
111 | } | |
112 | ||
113 | /** | |
114 | * ve_spc_global_wakeup_irq() | |
115 | * | |
116 | * Function to set/clear global wakeup IRQs. Not protected by locking since | |
117 | * it might be used in code paths where normal cacheable locks are not | |
118 | * working. Locking must be provided by the caller to ensure atomicity. | |
119 | * | |
120 | * @set: if true, global wake-up IRQs are set, if false they are cleared | |
121 | */ | |
122 | void ve_spc_global_wakeup_irq(bool set) | |
123 | { | |
124 | u32 reg; | |
125 | ||
126 | reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); | |
127 | ||
128 | if (set) | |
129 | reg |= GBL_WAKEUP_INT_MSK; | |
130 | else | |
131 | reg &= ~GBL_WAKEUP_INT_MSK; | |
132 | ||
133 | writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); | |
134 | } | |
135 | ||
136 | /** | |
137 | * ve_spc_cpu_wakeup_irq() | |
138 | * | |
139 | * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since | |
140 | * it might be used in code paths where normal cacheable locks are not | |
141 | * working. Locking must be provided by the caller to ensure atomicity. | |
142 | * | |
143 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
144 | * @cpu: mpidr[7:0] bitfield describing cpu affinity level | |
145 | * @set: if true, wake-up IRQs are set, if false they are cleared | |
146 | */ | |
147 | void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) | |
148 | { | |
149 | u32 mask, reg; | |
150 | ||
151 | if (cluster >= MAX_CLUSTERS) | |
152 | return; | |
153 | ||
154 | mask = 1 << cpu; | |
155 | ||
156 | if (!cluster_is_a15(cluster)) | |
157 | mask <<= 4; | |
158 | ||
159 | reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); | |
160 | ||
161 | if (set) | |
162 | reg |= mask; | |
163 | else | |
164 | reg &= ~mask; | |
165 | ||
166 | writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); | |
167 | } | |
168 | ||
169 | /** | |
170 | * ve_spc_set_resume_addr() - set the jump address used for warm boot | |
171 | * | |
172 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
173 | * @cpu: mpidr[7:0] bitfield describing cpu affinity level | |
174 | * @addr: physical resume address | |
175 | */ | |
176 | void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) | |
177 | { | |
178 | void __iomem *baseaddr; | |
179 | ||
180 | if (cluster >= MAX_CLUSTERS) | |
181 | return; | |
182 | ||
183 | if (cluster_is_a15(cluster)) | |
184 | baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); | |
185 | else | |
186 | baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); | |
187 | ||
188 | writel_relaxed(addr, baseaddr); | |
189 | } | |
190 | ||
191 | /** | |
192 | * ve_spc_powerdown() | |
193 | * | |
194 | * Function to enable/disable cluster powerdown. Not protected by locking | |
195 | * since it might be used in code paths where normal cacheable locks are not | |
196 | * working. Locking must be provided by the caller to ensure atomicity. | |
197 | * | |
198 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
199 | * @enable: if true enables powerdown, if false disables it | |
200 | */ | |
201 | void ve_spc_powerdown(u32 cluster, bool enable) | |
202 | { | |
203 | u32 pwdrn_reg; | |
204 | ||
205 | if (cluster >= MAX_CLUSTERS) | |
206 | return; | |
207 | ||
208 | pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; | |
209 | writel_relaxed(enable, info->baseaddr + pwdrn_reg); | |
210 | } | |
211 | ||
f7cd2d83 SH |
212 | static int ve_spc_get_performance(int cluster, u32 *freq) |
213 | { | |
214 | struct ve_spc_opp *opps = info->opps[cluster]; | |
215 | u32 perf_cfg_reg = 0; | |
216 | u32 perf; | |
217 | ||
218 | perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; | |
219 | ||
220 | perf = readl_relaxed(info->baseaddr + perf_cfg_reg); | |
221 | if (perf >= info->num_opps[cluster]) | |
222 | return -EINVAL; | |
223 | ||
224 | opps += perf; | |
225 | *freq = opps->freq; | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | /* find closest match to given frequency in OPP table */ | |
231 | static int ve_spc_round_performance(int cluster, u32 freq) | |
232 | { | |
233 | int idx, max_opp = info->num_opps[cluster]; | |
234 | struct ve_spc_opp *opps = info->opps[cluster]; | |
235 | u32 fmin = 0, fmax = ~0, ftmp; | |
236 | ||
237 | freq /= 1000; /* OPP entries in kHz */ | |
238 | for (idx = 0; idx < max_opp; idx++, opps++) { | |
239 | ftmp = opps->freq; | |
240 | if (ftmp >= freq) { | |
241 | if (ftmp <= fmax) | |
242 | fmax = ftmp; | |
243 | } else { | |
244 | if (ftmp >= fmin) | |
245 | fmin = ftmp; | |
246 | } | |
247 | } | |
248 | if (fmax != ~0) | |
249 | return fmax * 1000; | |
250 | else | |
251 | return fmin * 1000; | |
252 | } | |
253 | ||
254 | static int ve_spc_find_performance_index(int cluster, u32 freq) | |
255 | { | |
256 | int idx, max_opp = info->num_opps[cluster]; | |
257 | struct ve_spc_opp *opps = info->opps[cluster]; | |
258 | ||
259 | for (idx = 0; idx < max_opp; idx++, opps++) | |
260 | if (opps->freq == freq) | |
261 | break; | |
262 | return (idx == max_opp) ? -EINVAL : idx; | |
263 | } | |
264 | ||
265 | static int ve_spc_waitforcompletion(int req_type) | |
266 | { | |
267 | int ret = wait_for_completion_interruptible_timeout( | |
268 | &info->done, usecs_to_jiffies(TIMEOUT_US)); | |
269 | if (ret == 0) | |
270 | ret = -ETIMEDOUT; | |
271 | else if (ret > 0) | |
272 | ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO; | |
273 | return ret; | |
274 | } | |
275 | ||
276 | static int ve_spc_set_performance(int cluster, u32 freq) | |
277 | { | |
278 | u32 perf_cfg_reg, perf_stat_reg; | |
279 | int ret, perf, req_type; | |
280 | ||
281 | if (cluster_is_a15(cluster)) { | |
282 | req_type = CA15_DVFS; | |
283 | perf_cfg_reg = PERF_LVL_A15; | |
284 | perf_stat_reg = PERF_REQ_A15; | |
285 | } else { | |
286 | req_type = CA7_DVFS; | |
287 | perf_cfg_reg = PERF_LVL_A7; | |
288 | perf_stat_reg = PERF_REQ_A7; | |
289 | } | |
290 | ||
291 | perf = ve_spc_find_performance_index(cluster, freq); | |
292 | ||
293 | if (perf < 0) | |
294 | return perf; | |
295 | ||
296 | if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) | |
297 | return -ETIME; | |
298 | ||
299 | init_completion(&info->done); | |
300 | info->cur_rsp_mask = RESPONSE_MASK(req_type); | |
301 | ||
302 | writel(perf, info->baseaddr + perf_cfg_reg); | |
303 | ret = ve_spc_waitforcompletion(req_type); | |
304 | ||
305 | info->cur_rsp_mask = 0; | |
306 | up(&info->sem); | |
307 | ||
308 | return ret; | |
309 | } | |
310 | ||
311 | static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data) | |
312 | { | |
313 | int ret; | |
314 | ||
315 | if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) | |
316 | return -ETIME; | |
317 | ||
318 | init_completion(&info->done); | |
319 | info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG); | |
320 | ||
321 | /* Set the control value */ | |
322 | writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS); | |
323 | ret = ve_spc_waitforcompletion(SPC_SYS_CFG); | |
324 | ||
325 | if (ret == 0) | |
326 | *data = readl(info->baseaddr + SYSCFG_RDATA); | |
327 | ||
328 | info->cur_rsp_mask = 0; | |
329 | up(&info->sem); | |
330 | ||
331 | return ret; | |
332 | } | |
333 | ||
334 | static irqreturn_t ve_spc_irq_handler(int irq, void *data) | |
335 | { | |
336 | struct ve_spc_drvdata *drv_data = data; | |
337 | uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS); | |
338 | ||
339 | if (info->cur_rsp_mask & status) { | |
340 | info->cur_rsp_stat = status; | |
341 | complete(&drv_data->done); | |
342 | } | |
343 | ||
344 | return IRQ_HANDLED; | |
345 | } | |
346 | ||
347 | /* | |
348 | * +--------------------------+ | |
349 | * | 31 20 | 19 0 | | |
350 | * +--------------------------+ | |
351 | * | u_volt | freq(kHz) | | |
352 | * +--------------------------+ | |
353 | */ | |
354 | #define MULT_FACTOR 20 | |
355 | #define VOLT_SHIFT 20 | |
356 | #define FREQ_MASK (0xFFFFF) | |
357 | static int ve_spc_populate_opps(uint32_t cluster) | |
358 | { | |
359 | uint32_t data = 0, off, ret, idx; | |
360 | struct ve_spc_opp *opps; | |
361 | ||
362 | opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL); | |
363 | if (!opps) | |
364 | return -ENOMEM; | |
365 | ||
366 | info->opps[cluster] = opps; | |
367 | ||
368 | off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; | |
369 | for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) { | |
370 | ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data); | |
371 | if (!ret) { | |
372 | opps->freq = (data & FREQ_MASK) * MULT_FACTOR; | |
373 | opps->u_volt = data >> VOLT_SHIFT; | |
374 | } else { | |
375 | break; | |
376 | } | |
377 | } | |
378 | info->num_opps[cluster] = idx; | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
383 | static int ve_init_opp_table(struct device *cpu_dev) | |
384 | { | |
385 | int cluster = topology_physical_package_id(cpu_dev->id); | |
386 | int idx, ret = 0, max_opp = info->num_opps[cluster]; | |
387 | struct ve_spc_opp *opps = info->opps[cluster]; | |
388 | ||
389 | for (idx = 0; idx < max_opp; idx++, opps++) { | |
390 | ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); | |
391 | if (ret) { | |
392 | dev_warn(cpu_dev, "failed to add opp %lu %lu\n", | |
393 | opps->freq, opps->u_volt); | |
394 | return ret; | |
395 | } | |
396 | } | |
397 | return ret; | |
398 | } | |
399 | ||
400 | int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq) | |
63819cb1 | 401 | { |
f7cd2d83 | 402 | int ret; |
63819cb1 LP |
403 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
404 | if (!info) { | |
405 | pr_err(SPCLOG "unable to allocate mem\n"); | |
406 | return -ENOMEM; | |
407 | } | |
408 | ||
409 | info->baseaddr = baseaddr; | |
410 | info->a15_clusid = a15_clusid; | |
411 | ||
f7cd2d83 SH |
412 | if (irq <= 0) { |
413 | pr_err(SPCLOG "Invalid IRQ %d\n", irq); | |
414 | kfree(info); | |
415 | return -EINVAL; | |
416 | } | |
417 | ||
418 | init_completion(&info->done); | |
419 | ||
420 | readl_relaxed(info->baseaddr + PWC_STATUS); | |
421 | ||
422 | ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH | |
423 | | IRQF_ONESHOT, "vexpress-spc", info); | |
424 | if (ret) { | |
425 | pr_err(SPCLOG "IRQ %d request failed\n", irq); | |
426 | kfree(info); | |
427 | return -ENODEV; | |
428 | } | |
429 | ||
430 | sema_init(&info->sem, 1); | |
63819cb1 LP |
431 | /* |
432 | * Multi-cluster systems may need this data when non-coherent, during | |
433 | * cluster power-up/power-down. Make sure driver info reaches main | |
434 | * memory. | |
435 | */ | |
436 | sync_cache_w(info); | |
437 | sync_cache_w(&info); | |
438 | ||
439 | return 0; | |
440 | } |