]>
Commit | Line | Data |
---|---|---|
e8db288e NP |
1 | /* |
2 | * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM | |
3 | * | |
4 | * Created by: Nicolas Pitre, March 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
7c2b8605 NP |
12 | #include <linux/kernel.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/irqflags.h> | |
3721924c | 15 | #include <linux/cpu_pm.h> |
7c2b8605 | 16 | |
e8db288e NP |
17 | #include <asm/mcpm.h> |
18 | #include <asm/cacheflush.h> | |
7c2b8605 | 19 | #include <asm/idmap.h> |
7fe31d28 | 20 | #include <asm/cputype.h> |
3721924c | 21 | #include <asm/suspend.h> |
e8db288e | 22 | |
1c2c7d51 NP |
23 | /* |
24 | * The public API for this code is documented in arch/arm/include/asm/mcpm.h. | |
25 | * For a comprehensive description of the main algorithm used here, please | |
26 | * see Documentation/arm/cluster-pm-race-avoidance.txt. | |
27 | */ | |
7cc8b991 NP |
28 | |
29 | struct sync_struct mcpm_sync; | |
30 | ||
31 | /* | |
32 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. | |
33 | * This must be called at the point of committing to teardown of a CPU. | |
34 | * The CPU cache (SCTRL.C bit) is expected to still be active. | |
35 | */ | |
36 | static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) | |
37 | { | |
38 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; | |
39 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | |
40 | } | |
41 | ||
42 | /* | |
43 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the | |
44 | * cluster can be torn down without disrupting this CPU. | |
45 | * To avoid deadlocks, this must be called before a CPU is powered down. | |
46 | * The CPU cache (SCTRL.C bit) is expected to be off. | |
47 | * However L2 cache might or might not be active. | |
48 | */ | |
49 | static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) | |
50 | { | |
51 | dmb(); | |
52 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; | |
53 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | |
54 | sev(); | |
55 | } | |
56 | ||
57 | /* | |
58 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. | |
59 | * @state: the final state of the cluster: | |
60 | * CLUSTER_UP: no destructive teardown was done and the cluster has been | |
61 | * restored to the previous state (CPU cache still active); or | |
62 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off | |
63 | * (CPU cache disabled, L2 cache either enabled or disabled). | |
64 | */ | |
65 | static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) | |
66 | { | |
67 | dmb(); | |
68 | mcpm_sync.clusters[cluster].cluster = state; | |
69 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); | |
70 | sev(); | |
71 | } | |
72 | ||
73 | /* | |
74 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. | |
75 | * This function should be called by the last man, after local CPU teardown | |
76 | * is complete. CPU cache expected to be active. | |
77 | * | |
78 | * Returns: | |
79 | * false: the critical section was not entered because an inbound CPU was | |
80 | * observed, or the cluster is already being set up; | |
81 | * true: the critical section was entered: it is now safe to tear down the | |
82 | * cluster. | |
83 | */ | |
84 | static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) | |
85 | { | |
86 | unsigned int i; | |
87 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; | |
88 | ||
89 | /* Warn inbound CPUs that the cluster is being torn down: */ | |
90 | c->cluster = CLUSTER_GOING_DOWN; | |
91 | sync_cache_w(&c->cluster); | |
92 | ||
93 | /* Back out if the inbound cluster is already in the critical region: */ | |
94 | sync_cache_r(&c->inbound); | |
95 | if (c->inbound == INBOUND_COMING_UP) | |
96 | goto abort; | |
97 | ||
98 | /* | |
99 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local | |
100 | * teardown is complete on each CPU before tearing down the cluster. | |
101 | * | |
102 | * If any CPU has been woken up again from the DOWN state, then we | |
103 | * shouldn't be taking the cluster down at all: abort in that case. | |
104 | */ | |
105 | sync_cache_r(&c->cpus); | |
106 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { | |
107 | int cpustate; | |
108 | ||
109 | if (i == cpu) | |
110 | continue; | |
111 | ||
112 | while (1) { | |
113 | cpustate = c->cpus[i].cpu; | |
114 | if (cpustate != CPU_GOING_DOWN) | |
115 | break; | |
116 | ||
117 | wfe(); | |
118 | sync_cache_r(&c->cpus[i].cpu); | |
119 | } | |
120 | ||
121 | switch (cpustate) { | |
122 | case CPU_DOWN: | |
123 | continue; | |
124 | ||
125 | default: | |
126 | goto abort; | |
127 | } | |
128 | } | |
129 | ||
130 | return true; | |
131 | ||
132 | abort: | |
133 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); | |
134 | return false; | |
135 | } | |
136 | ||
137 | static int __mcpm_cluster_state(unsigned int cluster) | |
138 | { | |
139 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); | |
140 | return mcpm_sync.clusters[cluster].cluster; | |
141 | } | |
142 | ||
e8db288e NP |
143 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
144 | ||
145 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) | |
146 | { | |
147 | unsigned long val = ptr ? virt_to_phys(ptr) : 0; | |
148 | mcpm_entry_vectors[cluster][cpu] = val; | |
149 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); | |
150 | } | |
7c2b8605 | 151 | |
de885d14 NP |
152 | extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; |
153 | ||
154 | void mcpm_set_early_poke(unsigned cpu, unsigned cluster, | |
155 | unsigned long poke_phys_addr, unsigned long poke_val) | |
156 | { | |
157 | unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; | |
158 | poke[0] = poke_phys_addr; | |
159 | poke[1] = poke_val; | |
efcfc46e | 160 | __sync_cache_range_w(poke, 2 * sizeof(*poke)); |
de885d14 NP |
161 | } |
162 | ||
7c2b8605 NP |
163 | static const struct mcpm_platform_ops *platform_ops; |
164 | ||
165 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) | |
166 | { | |
167 | if (platform_ops) | |
168 | return -EBUSY; | |
169 | platform_ops = ops; | |
170 | return 0; | |
171 | } | |
172 | ||
4530e4b6 NP |
173 | bool mcpm_is_available(void) |
174 | { | |
175 | return (platform_ops) ? true : false; | |
176 | } | |
177 | ||
d3a87544 NP |
178 | /* |
179 | * We can't use regular spinlocks. In the switcher case, it is possible | |
180 | * for an outbound CPU to call power_down() after its inbound counterpart | |
181 | * is already live using the same logical CPU number which trips lockdep | |
182 | * debugging. | |
183 | */ | |
184 | static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
185 | ||
186 | static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | |
187 | ||
188 | static inline bool mcpm_cluster_unused(unsigned int cluster) | |
189 | { | |
190 | int i, cnt; | |
191 | for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++) | |
192 | cnt |= mcpm_cpu_use_count[cluster][i]; | |
193 | return !cnt; | |
194 | } | |
195 | ||
7c2b8605 NP |
196 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) |
197 | { | |
d3a87544 NP |
198 | bool cpu_is_down, cluster_is_down; |
199 | int ret = 0; | |
200 | ||
77404d81 | 201 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
7c2b8605 NP |
202 | if (!platform_ops) |
203 | return -EUNATCH; /* try not to shadow power_up errors */ | |
204 | might_sleep(); | |
d3a87544 | 205 | |
d3a87544 NP |
206 | /* |
207 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | |
208 | * variant exists, we need to disable IRQs manually here. | |
209 | */ | |
210 | local_irq_disable(); | |
211 | arch_spin_lock(&mcpm_lock); | |
212 | ||
213 | cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; | |
214 | cluster_is_down = mcpm_cluster_unused(cluster); | |
215 | ||
216 | mcpm_cpu_use_count[cluster][cpu]++; | |
217 | /* | |
218 | * The only possible values are: | |
219 | * 0 = CPU down | |
220 | * 1 = CPU (still) up | |
221 | * 2 = CPU requested to be up before it had a chance | |
222 | * to actually make itself down. | |
223 | * Any other value is a bug. | |
224 | */ | |
225 | BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && | |
226 | mcpm_cpu_use_count[cluster][cpu] != 2); | |
227 | ||
228 | if (cluster_is_down) | |
229 | ret = platform_ops->cluster_powerup(cluster); | |
230 | if (cpu_is_down && !ret) | |
231 | ret = platform_ops->cpu_powerup(cpu, cluster); | |
232 | ||
233 | arch_spin_unlock(&mcpm_lock); | |
234 | local_irq_enable(); | |
235 | return ret; | |
7c2b8605 NP |
236 | } |
237 | ||
238 | typedef void (*phys_reset_t)(unsigned long); | |
239 | ||
240 | void mcpm_cpu_power_down(void) | |
241 | { | |
d3a87544 NP |
242 | unsigned int mpidr, cpu, cluster; |
243 | bool cpu_going_down, last_man; | |
7c2b8605 NP |
244 | phys_reset_t phys_reset; |
245 | ||
77404d81 NP |
246 | mpidr = read_cpuid_mpidr(); |
247 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
248 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
249 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
d3a87544 NP |
250 | if (WARN_ON_ONCE(!platform_ops)) |
251 | return; | |
7c2b8605 NP |
252 | BUG_ON(!irqs_disabled()); |
253 | ||
7c2b8605 NP |
254 | setup_mm_for_reboot(); |
255 | ||
d3a87544 | 256 | __mcpm_cpu_going_down(cpu, cluster); |
d3a87544 NP |
257 | arch_spin_lock(&mcpm_lock); |
258 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); | |
259 | ||
260 | mcpm_cpu_use_count[cluster][cpu]--; | |
261 | BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && | |
262 | mcpm_cpu_use_count[cluster][cpu] != 1); | |
263 | cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; | |
264 | last_man = mcpm_cluster_unused(cluster); | |
265 | ||
266 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { | |
267 | platform_ops->cpu_powerdown_prepare(cpu, cluster); | |
268 | platform_ops->cluster_powerdown_prepare(cluster); | |
269 | arch_spin_unlock(&mcpm_lock); | |
270 | platform_ops->cluster_cache_disable(); | |
271 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | |
272 | } else { | |
273 | if (cpu_going_down) | |
274 | platform_ops->cpu_powerdown_prepare(cpu, cluster); | |
275 | arch_spin_unlock(&mcpm_lock); | |
276 | /* | |
277 | * If cpu_going_down is false here, that means a power_up | |
278 | * request raced ahead of us. Even if we do not want to | |
279 | * shut this CPU down, the caller still expects execution | |
280 | * to return through the system resume entry path, like | |
281 | * when the WFI is aborted due to a new IRQ or the like.. | |
282 | * So let's continue with cache cleaning in all cases. | |
283 | */ | |
284 | platform_ops->cpu_cache_disable(); | |
285 | } | |
286 | ||
287 | __mcpm_cpu_down(cpu, cluster); | |
288 | ||
289 | /* Now we are prepared for power-down, do it: */ | |
290 | if (cpu_going_down) | |
291 | wfi(); | |
292 | ||
7c2b8605 NP |
293 | /* |
294 | * It is possible for a power_up request to happen concurrently | |
295 | * with a power_down request for the same CPU. In this case the | |
d3a87544 NP |
296 | * CPU might not be able to actually enter a powered down state |
297 | * with the WFI instruction if the power_up request has removed | |
298 | * the required reset condition. We must perform a re-entry in | |
299 | * the kernel as if the power_up method just had deasserted reset | |
300 | * on the CPU. | |
7c2b8605 | 301 | */ |
7c2b8605 NP |
302 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
303 | phys_reset(virt_to_phys(mcpm_entry_point)); | |
304 | ||
305 | /* should never get here */ | |
306 | BUG(); | |
307 | } | |
308 | ||
166aaf39 | 309 | int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) |
0de0d646 DM |
310 | { |
311 | int ret; | |
312 | ||
166aaf39 | 313 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) |
0de0d646 DM |
314 | return -EUNATCH; |
315 | ||
166aaf39 | 316 | ret = platform_ops->wait_for_powerdown(cpu, cluster); |
0de0d646 DM |
317 | if (ret) |
318 | pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", | |
319 | __func__, cpu, cluster, ret); | |
320 | ||
321 | return ret; | |
322 | } | |
323 | ||
7895f731 | 324 | void mcpm_cpu_suspend(void) |
7c2b8605 | 325 | { |
d3a87544 | 326 | if (WARN_ON_ONCE(!platform_ops)) |
d0cdef6e | 327 | return; |
7c2b8605 | 328 | |
d3a87544 NP |
329 | /* Some platforms might have to enable special resume modes, etc. */ |
330 | if (platform_ops->cpu_suspend_prepare) { | |
331 | unsigned int mpidr = read_cpuid_mpidr(); | |
332 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
333 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
334 | arch_spin_lock(&mcpm_lock); | |
335 | platform_ops->cpu_suspend_prepare(cpu, cluster); | |
336 | arch_spin_unlock(&mcpm_lock); | |
337 | } | |
338 | mcpm_cpu_power_down(); | |
7c2b8605 NP |
339 | } |
340 | ||
341 | int mcpm_cpu_powered_up(void) | |
342 | { | |
d3a87544 NP |
343 | unsigned int mpidr, cpu, cluster; |
344 | bool cpu_was_down, first_man; | |
345 | unsigned long flags; | |
346 | ||
7c2b8605 NP |
347 | if (!platform_ops) |
348 | return -EUNATCH; | |
d3a87544 | 349 | |
d3a87544 NP |
350 | mpidr = read_cpuid_mpidr(); |
351 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
352 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
353 | local_irq_save(flags); | |
354 | arch_spin_lock(&mcpm_lock); | |
355 | ||
356 | cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; | |
357 | first_man = mcpm_cluster_unused(cluster); | |
358 | ||
359 | if (first_man && platform_ops->cluster_is_up) | |
360 | platform_ops->cluster_is_up(cluster); | |
361 | if (cpu_was_down) | |
362 | mcpm_cpu_use_count[cluster][cpu] = 1; | |
363 | if (platform_ops->cpu_is_up) | |
364 | platform_ops->cpu_is_up(cpu, cluster); | |
365 | ||
366 | arch_spin_unlock(&mcpm_lock); | |
367 | local_irq_restore(flags); | |
368 | ||
7c2b8605 NP |
369 | return 0; |
370 | } | |
7fe31d28 | 371 | |
3721924c NP |
372 | #ifdef CONFIG_ARM_CPU_SUSPEND |
373 | ||
374 | static int __init nocache_trampoline(unsigned long _arg) | |
375 | { | |
376 | void (*cache_disable)(void) = (void *)_arg; | |
377 | unsigned int mpidr = read_cpuid_mpidr(); | |
378 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
379 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
380 | phys_reset_t phys_reset; | |
381 | ||
382 | mcpm_set_entry_vector(cpu, cluster, cpu_resume); | |
383 | setup_mm_for_reboot(); | |
384 | ||
385 | __mcpm_cpu_going_down(cpu, cluster); | |
386 | BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); | |
387 | cache_disable(); | |
388 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | |
389 | __mcpm_cpu_down(cpu, cluster); | |
390 | ||
391 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | |
392 | phys_reset(virt_to_phys(mcpm_entry_point)); | |
393 | BUG(); | |
394 | } | |
395 | ||
396 | int __init mcpm_loopback(void (*cache_disable)(void)) | |
397 | { | |
398 | int ret; | |
399 | ||
400 | /* | |
401 | * We're going to soft-restart the current CPU through the | |
402 | * low-level MCPM code by leveraging the suspend/resume | |
403 | * infrastructure. Let's play it safe by using cpu_pm_enter() | |
404 | * in case the CPU init code path resets the VFP or similar. | |
405 | */ | |
406 | local_irq_disable(); | |
407 | local_fiq_disable(); | |
408 | ret = cpu_pm_enter(); | |
409 | if (!ret) { | |
410 | ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); | |
411 | cpu_pm_exit(); | |
412 | } | |
413 | local_fiq_enable(); | |
414 | local_irq_enable(); | |
415 | if (ret) | |
416 | pr_err("%s returned %d\n", __func__, ret); | |
417 | return ret; | |
418 | } | |
419 | ||
420 | #endif | |
421 | ||
7fe31d28 DM |
422 | extern unsigned long mcpm_power_up_setup_phys; |
423 | ||
424 | int __init mcpm_sync_init( | |
425 | void (*power_up_setup)(unsigned int affinity_level)) | |
426 | { | |
427 | unsigned int i, j, mpidr, this_cluster; | |
428 | ||
429 | BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); | |
430 | BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); | |
431 | ||
432 | /* | |
433 | * Set initial CPU and cluster states. | |
434 | * Only one cluster is assumed to be active at this point. | |
435 | */ | |
436 | for (i = 0; i < MAX_NR_CLUSTERS; i++) { | |
437 | mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; | |
438 | mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; | |
439 | for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) | |
440 | mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; | |
441 | } | |
442 | mpidr = read_cpuid_mpidr(); | |
443 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
d3a87544 NP |
444 | for_each_online_cpu(i) { |
445 | mcpm_cpu_use_count[this_cluster][i] = 1; | |
7fe31d28 | 446 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; |
d3a87544 | 447 | } |
7fe31d28 DM |
448 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; |
449 | sync_cache_w(&mcpm_sync); | |
450 | ||
451 | if (power_up_setup) { | |
452 | mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); | |
453 | sync_cache_w(&mcpm_power_up_setup_phys); | |
454 | } | |
455 | ||
456 | return 0; | |
457 | } |