]>
Commit | Line | Data |
---|---|---|
1c33be57 NP |
1 | /* |
2 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver | |
3 | * | |
4 | * Created by: Nicolas Pitre, March 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
0577fee2 | 12 | #include <linux/atomic.h> |
1c33be57 NP |
13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
174cd4b1 | 16 | #include <linux/sched/signal.h> |
ae7e81c0 | 17 | #include <uapi/linux/sched/types.h> |
1c33be57 NP |
18 | #include <linux/interrupt.h> |
19 | #include <linux/cpu_pm.h> | |
71ce1dee | 20 | #include <linux/cpu.h> |
3f09d479 | 21 | #include <linux/cpumask.h> |
71ce1dee NP |
22 | #include <linux/kthread.h> |
23 | #include <linux/wait.h> | |
1bfbddb6 | 24 | #include <linux/time.h> |
3f09d479 LP |
25 | #include <linux/clockchips.h> |
26 | #include <linux/hrtimer.h> | |
27 | #include <linux/tick.h> | |
491990e2 | 28 | #include <linux/notifier.h> |
1c33be57 | 29 | #include <linux/mm.h> |
c0f43751 | 30 | #include <linux/mutex.h> |
b09bbe5b | 31 | #include <linux/smp.h> |
0577fee2 | 32 | #include <linux/spinlock.h> |
1c33be57 | 33 | #include <linux/string.h> |
6b7437ae | 34 | #include <linux/sysfs.h> |
1c33be57 | 35 | #include <linux/irqchip/arm-gic.h> |
c4821c05 | 36 | #include <linux/moduleparam.h> |
1c33be57 NP |
37 | |
38 | #include <asm/smp_plat.h> | |
1bfbddb6 | 39 | #include <asm/cputype.h> |
1c33be57 NP |
40 | #include <asm/suspend.h> |
41 | #include <asm/mcpm.h> | |
42 | #include <asm/bL_switcher.h> | |
43 | ||
1bfbddb6 DM |
44 | #define CREATE_TRACE_POINTS |
45 | #include <trace/events/power_cpu_migrate.h> | |
46 | ||
1c33be57 NP |
47 | |
48 | /* | |
49 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have | |
50 | * __attribute_const__ and we don't want the compiler to assume any | |
51 | * constness here as the value _does_ change along some code paths. | |
52 | */ | |
53 | ||
54 | static int read_mpidr(void) | |
55 | { | |
56 | unsigned int id; | |
57 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); | |
58 | return id & MPIDR_HWID_BITMASK; | |
59 | } | |
60 | ||
61 | /* | |
62 | * bL switcher core code. | |
63 | */ | |
64 | ||
108a9640 | 65 | static void bL_do_switch(void *_arg) |
1c33be57 | 66 | { |
38c35d4f | 67 | unsigned ib_mpidr, ib_cpu, ib_cluster; |
108a9640 | 68 | long volatile handshake, **handshake_ptr = _arg; |
1c33be57 | 69 | |
1c33be57 NP |
70 | pr_debug("%s\n", __func__); |
71 | ||
38c35d4f NP |
72 | ib_mpidr = cpu_logical_map(smp_processor_id()); |
73 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); | |
74 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); | |
1c33be57 | 75 | |
108a9640 NP |
76 | /* Advertise our handshake location */ |
77 | if (handshake_ptr) { | |
78 | handshake = 0; | |
79 | *handshake_ptr = &handshake; | |
80 | } else | |
81 | handshake = -1; | |
82 | ||
1c33be57 NP |
83 | /* |
84 | * Our state has been saved at this point. Let's release our | |
85 | * inbound CPU. | |
86 | */ | |
38c35d4f | 87 | mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); |
1c33be57 NP |
88 | sev(); |
89 | ||
90 | /* | |
91 | * From this point, we must assume that our counterpart CPU might | |
92 | * have taken over in its parallel world already, as if execution | |
93 | * just returned from cpu_suspend(). It is therefore important to | |
94 | * be very careful not to make any change the other guy is not | |
95 | * expecting. This is why we need stack isolation. | |
96 | * | |
97 | * Fancy under cover tasks could be performed here. For now | |
98 | * we have none. | |
99 | */ | |
100 | ||
108a9640 NP |
101 | /* |
102 | * Let's wait until our inbound is alive. | |
103 | */ | |
104 | while (!handshake) { | |
105 | wfe(); | |
106 | smp_mb(); | |
107 | } | |
108 | ||
1c33be57 NP |
109 | /* Let's put ourself down. */ |
110 | mcpm_cpu_power_down(); | |
111 | ||
112 | /* should never get here */ | |
113 | BUG(); | |
114 | } | |
115 | ||
116 | /* | |
c052de26 NP |
117 | * Stack isolation. To ensure 'current' remains valid, we just use another |
118 | * piece of our thread's stack space which should be fairly lightly used. | |
119 | * The selected area starts just above the thread_info structure located | |
120 | * at the very bottom of the stack, aligned to a cache line, and indexed | |
121 | * with the cluster number. | |
1c33be57 | 122 | */ |
c052de26 | 123 | #define STACK_SIZE 512 |
1c33be57 NP |
124 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
125 | static int bL_switchpoint(unsigned long _arg) | |
126 | { | |
127 | unsigned int mpidr = read_mpidr(); | |
1c33be57 | 128 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
c052de26 | 129 | void *stack = current_thread_info() + 1; |
1c33be57 | 130 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); |
c052de26 | 131 | stack += clusterid * STACK_SIZE + STACK_SIZE; |
1c33be57 NP |
132 | call_with_stack(bL_do_switch, (void *)_arg, stack); |
133 | BUG(); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Generic switcher interface | |
138 | */ | |
139 | ||
ed96762e | 140 | static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; |
38c35d4f | 141 | static int bL_switcher_cpu_pairing[NR_CPUS]; |
ed96762e | 142 | |
1c33be57 NP |
143 | /* |
144 | * bL_switch_to - Switch to a specific cluster for the current CPU | |
145 | * @new_cluster_id: the ID of the cluster to switch to. | |
146 | * | |
147 | * This function must be called on the CPU to be switched. | |
148 | * Returns 0 on success, else a negative status code. | |
149 | */ | |
150 | static int bL_switch_to(unsigned int new_cluster_id) | |
151 | { | |
38c35d4f NP |
152 | unsigned int mpidr, this_cpu, that_cpu; |
153 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; | |
6137eba6 | 154 | struct completion inbound_alive; |
108a9640 | 155 | long volatile *handshake_ptr; |
6137eba6 | 156 | int ipi_nr, ret; |
1c33be57 | 157 | |
38c35d4f NP |
158 | this_cpu = smp_processor_id(); |
159 | ob_mpidr = read_mpidr(); | |
160 | ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); | |
161 | ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); | |
162 | BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); | |
1c33be57 | 163 | |
38c35d4f | 164 | if (new_cluster_id == ob_cluster) |
1c33be57 NP |
165 | return 0; |
166 | ||
38c35d4f NP |
167 | that_cpu = bL_switcher_cpu_pairing[this_cpu]; |
168 | ib_mpidr = cpu_logical_map(that_cpu); | |
169 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); | |
170 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); | |
171 | ||
172 | pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", | |
173 | this_cpu, ob_mpidr, ib_mpidr); | |
1c33be57 | 174 | |
6137eba6 NP |
175 | this_cpu = smp_processor_id(); |
176 | ||
1c33be57 | 177 | /* Close the gate for our entry vectors */ |
38c35d4f NP |
178 | mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); |
179 | mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); | |
1c33be57 | 180 | |
6137eba6 NP |
181 | /* Install our "inbound alive" notifier. */ |
182 | init_completion(&inbound_alive); | |
183 | ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); | |
184 | ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); | |
185 | mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); | |
186 | ||
1c33be57 NP |
187 | /* |
188 | * Let's wake up the inbound CPU now in case it requires some delay | |
189 | * to come online, but leave it gated in our entry vector code. | |
190 | */ | |
38c35d4f | 191 | ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); |
1c33be57 NP |
192 | if (ret) { |
193 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); | |
194 | return ret; | |
195 | } | |
196 | ||
6137eba6 NP |
197 | /* |
198 | * Raise a SGI on the inbound CPU to make sure it doesn't stall | |
199 | * in a possible WFI, such as in bL_power_down(). | |
200 | */ | |
201 | gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); | |
202 | ||
203 | /* | |
204 | * Wait for the inbound to come up. This allows for other | |
205 | * tasks to be scheduled in the mean time. | |
206 | */ | |
207 | wait_for_completion(&inbound_alive); | |
208 | mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); | |
209 | ||
1c33be57 NP |
210 | /* |
211 | * From this point we are entering the switch critical zone | |
212 | * and can't take any interrupts anymore. | |
213 | */ | |
214 | local_irq_disable(); | |
215 | local_fiq_disable(); | |
41fa4215 | 216 | trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr); |
1c33be57 | 217 | |
1c33be57 | 218 | /* redirect GIC's SGIs to our counterpart */ |
38c35d4f | 219 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); |
1c33be57 | 220 | |
7270d11c | 221 | tick_suspend_local(); |
3f09d479 | 222 | |
1c33be57 NP |
223 | ret = cpu_pm_enter(); |
224 | ||
225 | /* we can not tolerate errors at this point */ | |
226 | if (ret) | |
227 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); | |
228 | ||
38c35d4f NP |
229 | /* Swap the physical CPUs in the logical map for this logical CPU. */ |
230 | cpu_logical_map(this_cpu) = ib_mpidr; | |
231 | cpu_logical_map(that_cpu) = ob_mpidr; | |
1c33be57 NP |
232 | |
233 | /* Let's do the actual CPU switch. */ | |
108a9640 | 234 | ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); |
1c33be57 NP |
235 | if (ret > 0) |
236 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); | |
237 | ||
238 | /* We are executing on the inbound CPU at this point */ | |
239 | mpidr = read_mpidr(); | |
38c35d4f NP |
240 | pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); |
241 | BUG_ON(mpidr != ib_mpidr); | |
1c33be57 NP |
242 | |
243 | mcpm_cpu_powered_up(); | |
244 | ||
245 | ret = cpu_pm_exit(); | |
246 | ||
7270d11c | 247 | tick_resume_local(); |
3f09d479 | 248 | |
41fa4215 | 249 | trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); |
1c33be57 NP |
250 | local_fiq_enable(); |
251 | local_irq_enable(); | |
252 | ||
108a9640 NP |
253 | *handshake_ptr = 1; |
254 | dsb_sev(); | |
255 | ||
1c33be57 NP |
256 | if (ret) |
257 | pr_err("%s exiting with error %d\n", __func__, ret); | |
258 | return ret; | |
259 | } | |
260 | ||
71ce1dee | 261 | struct bL_thread { |
0577fee2 | 262 | spinlock_t lock; |
71ce1dee NP |
263 | struct task_struct *task; |
264 | wait_queue_head_t wq; | |
265 | int wanted_cluster; | |
6b7437ae | 266 | struct completion started; |
0577fee2 DM |
267 | bL_switch_completion_handler completer; |
268 | void *completer_cookie; | |
1c33be57 NP |
269 | }; |
270 | ||
71ce1dee NP |
271 | static struct bL_thread bL_threads[NR_CPUS]; |
272 | ||
273 | static int bL_switcher_thread(void *arg) | |
274 | { | |
275 | struct bL_thread *t = arg; | |
276 | struct sched_param param = { .sched_priority = 1 }; | |
277 | int cluster; | |
0577fee2 DM |
278 | bL_switch_completion_handler completer; |
279 | void *completer_cookie; | |
71ce1dee NP |
280 | |
281 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); | |
6b7437ae | 282 | complete(&t->started); |
71ce1dee NP |
283 | |
284 | do { | |
285 | if (signal_pending(current)) | |
286 | flush_signals(current); | |
287 | wait_event_interruptible(t->wq, | |
288 | t->wanted_cluster != -1 || | |
289 | kthread_should_stop()); | |
0577fee2 DM |
290 | |
291 | spin_lock(&t->lock); | |
292 | cluster = t->wanted_cluster; | |
293 | completer = t->completer; | |
294 | completer_cookie = t->completer_cookie; | |
295 | t->wanted_cluster = -1; | |
296 | t->completer = NULL; | |
297 | spin_unlock(&t->lock); | |
298 | ||
299 | if (cluster != -1) { | |
71ce1dee | 300 | bL_switch_to(cluster); |
0577fee2 DM |
301 | |
302 | if (completer) | |
303 | completer(completer_cookie); | |
304 | } | |
71ce1dee NP |
305 | } while (!kthread_should_stop()); |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
6b7437ae | 310 | static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) |
1c33be57 | 311 | { |
71ce1dee NP |
312 | struct task_struct *task; |
313 | ||
314 | task = kthread_create_on_node(bL_switcher_thread, arg, | |
315 | cpu_to_node(cpu), "kswitcher_%d", cpu); | |
316 | if (!IS_ERR(task)) { | |
317 | kthread_bind(task, cpu); | |
318 | wake_up_process(task); | |
319 | } else | |
320 | pr_err("%s failed for CPU %d\n", __func__, cpu); | |
321 | return task; | |
1c33be57 NP |
322 | } |
323 | ||
324 | /* | |
0577fee2 DM |
325 | * bL_switch_request_cb - Switch to a specific cluster for the given CPU, |
326 | * with completion notification via a callback | |
1c33be57 NP |
327 | * |
328 | * @cpu: the CPU to switch | |
329 | * @new_cluster_id: the ID of the cluster to switch to. | |
0577fee2 DM |
330 | * @completer: switch completion callback. if non-NULL, |
331 | * @completer(@completer_cookie) will be called on completion of | |
332 | * the switch, in non-atomic context. | |
333 | * @completer_cookie: opaque context argument for @completer. | |
1c33be57 | 334 | * |
71ce1dee NP |
335 | * This function causes a cluster switch on the given CPU by waking up |
336 | * the appropriate switcher thread. This function may or may not return | |
337 | * before the switch has occurred. | |
0577fee2 DM |
338 | * |
339 | * If a @completer callback function is supplied, it will be called when | |
340 | * the switch is complete. This can be used to determine asynchronously | |
341 | * when the switch is complete, regardless of when bL_switch_request() | |
342 | * returns. When @completer is supplied, no new switch request is permitted | |
343 | * for the affected CPU until after the switch is complete, and @completer | |
344 | * has returned. | |
1c33be57 | 345 | */ |
0577fee2 DM |
346 | int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, |
347 | bL_switch_completion_handler completer, | |
348 | void *completer_cookie) | |
1c33be57 | 349 | { |
71ce1dee | 350 | struct bL_thread *t; |
1c33be57 | 351 | |
71ce1dee NP |
352 | if (cpu >= ARRAY_SIZE(bL_threads)) { |
353 | pr_err("%s: cpu %d out of bounds\n", __func__, cpu); | |
354 | return -EINVAL; | |
1c33be57 | 355 | } |
1c33be57 | 356 | |
71ce1dee | 357 | t = &bL_threads[cpu]; |
0577fee2 | 358 | |
71ce1dee NP |
359 | if (IS_ERR(t->task)) |
360 | return PTR_ERR(t->task); | |
361 | if (!t->task) | |
362 | return -ESRCH; | |
363 | ||
0577fee2 DM |
364 | spin_lock(&t->lock); |
365 | if (t->completer) { | |
366 | spin_unlock(&t->lock); | |
367 | return -EBUSY; | |
368 | } | |
369 | t->completer = completer; | |
370 | t->completer_cookie = completer_cookie; | |
71ce1dee | 371 | t->wanted_cluster = new_cluster_id; |
0577fee2 | 372 | spin_unlock(&t->lock); |
71ce1dee NP |
373 | wake_up(&t->wq); |
374 | return 0; | |
1c33be57 | 375 | } |
0577fee2 | 376 | EXPORT_SYMBOL_GPL(bL_switch_request_cb); |
71ce1dee | 377 | |
9797a0e9 NP |
378 | /* |
379 | * Activation and configuration code. | |
380 | */ | |
381 | ||
c0f43751 | 382 | static DEFINE_MUTEX(bL_switcher_activation_lock); |
491990e2 | 383 | static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); |
6b7437ae | 384 | static unsigned int bL_switcher_active; |
38c35d4f | 385 | static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; |
9797a0e9 NP |
386 | static cpumask_t bL_switcher_removed_logical_cpus; |
387 | ||
491990e2 DM |
388 | int bL_switcher_register_notifier(struct notifier_block *nb) |
389 | { | |
390 | return blocking_notifier_chain_register(&bL_activation_notifier, nb); | |
391 | } | |
392 | EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); | |
393 | ||
394 | int bL_switcher_unregister_notifier(struct notifier_block *nb) | |
395 | { | |
396 | return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); | |
397 | } | |
398 | EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); | |
399 | ||
400 | static int bL_activation_notify(unsigned long val) | |
401 | { | |
402 | int ret; | |
403 | ||
404 | ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); | |
405 | if (ret & NOTIFY_STOP_MASK) | |
406 | pr_err("%s: notifier chain failed with status 0x%x\n", | |
407 | __func__, ret); | |
408 | return notifier_to_errno(ret); | |
409 | } | |
410 | ||
6b7437ae | 411 | static void bL_switcher_restore_cpus(void) |
9797a0e9 NP |
412 | { |
413 | int i; | |
414 | ||
3f8517e7 NP |
415 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) { |
416 | struct device *cpu_dev = get_cpu_device(i); | |
417 | int ret = device_online(cpu_dev); | |
418 | if (ret) | |
419 | dev_err(cpu_dev, "switcher: unable to restore CPU\n"); | |
420 | } | |
9797a0e9 NP |
421 | } |
422 | ||
6b7437ae | 423 | static int bL_switcher_halve_cpus(void) |
9797a0e9 | 424 | { |
38c35d4f NP |
425 | int i, j, cluster_0, gic_id, ret; |
426 | unsigned int cpu, cluster, mask; | |
427 | cpumask_t available_cpus; | |
9797a0e9 | 428 | |
38c35d4f NP |
429 | /* First pass to validate what we have */ |
430 | mask = 0; | |
9797a0e9 | 431 | for_each_online_cpu(i) { |
38c35d4f NP |
432 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
433 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); | |
9797a0e9 NP |
434 | if (cluster >= 2) { |
435 | pr_err("%s: only dual cluster systems are supported\n", __func__); | |
436 | return -EINVAL; | |
437 | } | |
38c35d4f NP |
438 | if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) |
439 | return -EINVAL; | |
440 | mask |= (1 << cluster); | |
9797a0e9 | 441 | } |
38c35d4f NP |
442 | if (mask != 3) { |
443 | pr_err("%s: no CPU pairing possible\n", __func__); | |
9797a0e9 NP |
444 | return -EINVAL; |
445 | } | |
446 | ||
38c35d4f NP |
447 | /* |
448 | * Now let's do the pairing. We match each CPU with another CPU | |
449 | * from a different cluster. To get a uniform scheduling behavior | |
450 | * without fiddling with CPU topology and compute capacity data, | |
451 | * we'll use logical CPUs initially belonging to the same cluster. | |
452 | */ | |
453 | memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); | |
454 | cpumask_copy(&available_cpus, cpu_online_mask); | |
455 | cluster_0 = -1; | |
456 | for_each_cpu(i, &available_cpus) { | |
457 | int match = -1; | |
458 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); | |
459 | if (cluster_0 == -1) | |
460 | cluster_0 = cluster; | |
461 | if (cluster != cluster_0) | |
462 | continue; | |
463 | cpumask_clear_cpu(i, &available_cpus); | |
464 | for_each_cpu(j, &available_cpus) { | |
465 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); | |
9797a0e9 | 466 | /* |
38c35d4f NP |
467 | * Let's remember the last match to create "odd" |
468 | * pairings on purpose in order for other code not | |
469 | * to assume any relation between physical and | |
470 | * logical CPU numbers. | |
9797a0e9 | 471 | */ |
38c35d4f NP |
472 | if (cluster != cluster_0) |
473 | match = j; | |
474 | } | |
475 | if (match != -1) { | |
476 | bL_switcher_cpu_pairing[i] = match; | |
477 | cpumask_clear_cpu(match, &available_cpus); | |
478 | pr_info("CPU%d paired with CPU%d\n", i, match); | |
479 | } | |
480 | } | |
481 | ||
482 | /* | |
483 | * Now we disable the unwanted CPUs i.e. everything that has no | |
484 | * pairing information (that includes the pairing counterparts). | |
485 | */ | |
486 | cpumask_clear(&bL_switcher_removed_logical_cpus); | |
487 | for_each_online_cpu(i) { | |
488 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); | |
489 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); | |
490 | ||
491 | /* Let's take note of the GIC ID for this CPU */ | |
492 | gic_id = gic_get_cpu_id(i); | |
493 | if (gic_id < 0) { | |
494 | pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); | |
495 | bL_switcher_restore_cpus(); | |
496 | return -EINVAL; | |
497 | } | |
498 | bL_gic_id[cpu][cluster] = gic_id; | |
499 | pr_info("GIC ID for CPU %u cluster %u is %u\n", | |
500 | cpu, cluster, gic_id); | |
501 | ||
502 | if (bL_switcher_cpu_pairing[i] != -1) { | |
503 | bL_switcher_cpu_original_cluster[i] = cluster; | |
504 | continue; | |
9797a0e9 NP |
505 | } |
506 | ||
3f8517e7 | 507 | ret = device_offline(get_cpu_device(i)); |
9797a0e9 NP |
508 | if (ret) { |
509 | bL_switcher_restore_cpus(); | |
510 | return ret; | |
511 | } | |
512 | cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); | |
513 | } | |
514 | ||
515 | return 0; | |
516 | } | |
517 | ||
d08e2e09 DM |
518 | /* Determine the logical CPU a given physical CPU is grouped on. */ |
519 | int bL_switcher_get_logical_index(u32 mpidr) | |
520 | { | |
521 | int cpu; | |
522 | ||
523 | if (!bL_switcher_active) | |
524 | return -EUNATCH; | |
525 | ||
526 | mpidr &= MPIDR_HWID_BITMASK; | |
527 | for_each_online_cpu(cpu) { | |
528 | int pairing = bL_switcher_cpu_pairing[cpu]; | |
529 | if (pairing == -1) | |
530 | continue; | |
531 | if ((mpidr == cpu_logical_map(cpu)) || | |
532 | (mpidr == cpu_logical_map(pairing))) | |
533 | return cpu; | |
534 | } | |
535 | return -EINVAL; | |
536 | } | |
537 | ||
b09bbe5b DM |
538 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) |
539 | { | |
41fa4215 | 540 | trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr()); |
b09bbe5b DM |
541 | } |
542 | ||
29064b88 | 543 | int bL_switcher_trace_trigger(void) |
b09bbe5b DM |
544 | { |
545 | int ret; | |
546 | ||
547 | preempt_disable(); | |
548 | ||
549 | bL_switcher_trace_trigger_cpu(NULL); | |
550 | ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); | |
551 | ||
552 | preempt_enable(); | |
553 | ||
554 | return ret; | |
555 | } | |
29064b88 | 556 | EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); |
b09bbe5b | 557 | |
6b7437ae | 558 | static int bL_switcher_enable(void) |
71ce1dee | 559 | { |
9797a0e9 | 560 | int cpu, ret; |
71ce1dee | 561 | |
c0f43751 | 562 | mutex_lock(&bL_switcher_activation_lock); |
b0ced9d2 | 563 | lock_device_hotplug(); |
6b7437ae | 564 | if (bL_switcher_active) { |
b0ced9d2 | 565 | unlock_device_hotplug(); |
c0f43751 | 566 | mutex_unlock(&bL_switcher_activation_lock); |
6b7437ae | 567 | return 0; |
9797a0e9 NP |
568 | } |
569 | ||
6b7437ae NP |
570 | pr_info("big.LITTLE switcher initializing\n"); |
571 | ||
491990e2 DM |
572 | ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); |
573 | if (ret) | |
574 | goto error; | |
575 | ||
9797a0e9 | 576 | ret = bL_switcher_halve_cpus(); |
491990e2 DM |
577 | if (ret) |
578 | goto error; | |
9797a0e9 | 579 | |
b09bbe5b DM |
580 | bL_switcher_trace_trigger(); |
581 | ||
71ce1dee NP |
582 | for_each_online_cpu(cpu) { |
583 | struct bL_thread *t = &bL_threads[cpu]; | |
0577fee2 | 584 | spin_lock_init(&t->lock); |
71ce1dee | 585 | init_waitqueue_head(&t->wq); |
6b7437ae | 586 | init_completion(&t->started); |
71ce1dee NP |
587 | t->wanted_cluster = -1; |
588 | t->task = bL_switcher_thread_create(cpu, t); | |
589 | } | |
6b7437ae NP |
590 | |
591 | bL_switcher_active = 1; | |
491990e2 | 592 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
71ce1dee | 593 | pr_info("big.LITTLE switcher initialized\n"); |
491990e2 DM |
594 | goto out; |
595 | ||
596 | error: | |
597 | pr_warn("big.LITTLE switcher initialization failed\n"); | |
598 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); | |
c0f43751 | 599 | |
491990e2 | 600 | out: |
b0ced9d2 | 601 | unlock_device_hotplug(); |
c0f43751 | 602 | mutex_unlock(&bL_switcher_activation_lock); |
491990e2 | 603 | return ret; |
71ce1dee NP |
604 | } |
605 | ||
6b7437ae NP |
606 | #ifdef CONFIG_SYSFS |
607 | ||
608 | static void bL_switcher_disable(void) | |
609 | { | |
38c35d4f | 610 | unsigned int cpu, cluster; |
6b7437ae NP |
611 | struct bL_thread *t; |
612 | struct task_struct *task; | |
613 | ||
c0f43751 | 614 | mutex_lock(&bL_switcher_activation_lock); |
b0ced9d2 | 615 | lock_device_hotplug(); |
491990e2 DM |
616 | |
617 | if (!bL_switcher_active) | |
618 | goto out; | |
619 | ||
620 | if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { | |
621 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); | |
622 | goto out; | |
6b7437ae | 623 | } |
491990e2 | 624 | |
6b7437ae NP |
625 | bL_switcher_active = 0; |
626 | ||
627 | /* | |
628 | * To deactivate the switcher, we must shut down the switcher | |
629 | * threads to prevent any other requests from being accepted. | |
630 | * Then, if the final cluster for given logical CPU is not the | |
631 | * same as the original one, we'll recreate a switcher thread | |
632 | * just for the purpose of switching the CPU back without any | |
633 | * possibility for interference from external requests. | |
634 | */ | |
635 | for_each_online_cpu(cpu) { | |
6b7437ae NP |
636 | t = &bL_threads[cpu]; |
637 | task = t->task; | |
638 | t->task = NULL; | |
639 | if (!task || IS_ERR(task)) | |
640 | continue; | |
641 | kthread_stop(task); | |
642 | /* no more switch may happen on this CPU at this point */ | |
643 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); | |
644 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) | |
645 | continue; | |
646 | init_completion(&t->started); | |
647 | t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; | |
648 | task = bL_switcher_thread_create(cpu, t); | |
649 | if (!IS_ERR(task)) { | |
650 | wait_for_completion(&t->started); | |
651 | kthread_stop(task); | |
652 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); | |
653 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) | |
654 | continue; | |
655 | } | |
656 | /* If execution gets here, we're in trouble. */ | |
657 | pr_crit("%s: unable to restore original cluster for CPU %d\n", | |
658 | __func__, cpu); | |
38c35d4f NP |
659 | pr_crit("%s: CPU %d can't be restored\n", |
660 | __func__, bL_switcher_cpu_pairing[cpu]); | |
661 | cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], | |
662 | &bL_switcher_removed_logical_cpus); | |
6b7437ae NP |
663 | } |
664 | ||
665 | bL_switcher_restore_cpus(); | |
b09bbe5b DM |
666 | bL_switcher_trace_trigger(); |
667 | ||
491990e2 DM |
668 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
669 | ||
670 | out: | |
b0ced9d2 | 671 | unlock_device_hotplug(); |
c0f43751 | 672 | mutex_unlock(&bL_switcher_activation_lock); |
6b7437ae NP |
673 | } |
674 | ||
675 | static ssize_t bL_switcher_active_show(struct kobject *kobj, | |
676 | struct kobj_attribute *attr, char *buf) | |
677 | { | |
678 | return sprintf(buf, "%u\n", bL_switcher_active); | |
679 | } | |
680 | ||
681 | static ssize_t bL_switcher_active_store(struct kobject *kobj, | |
682 | struct kobj_attribute *attr, const char *buf, size_t count) | |
683 | { | |
684 | int ret; | |
685 | ||
686 | switch (buf[0]) { | |
687 | case '0': | |
688 | bL_switcher_disable(); | |
689 | ret = 0; | |
690 | break; | |
691 | case '1': | |
692 | ret = bL_switcher_enable(); | |
693 | break; | |
694 | default: | |
695 | ret = -EINVAL; | |
696 | } | |
697 | ||
698 | return (ret >= 0) ? count : ret; | |
699 | } | |
700 | ||
b09bbe5b DM |
701 | static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, |
702 | struct kobj_attribute *attr, const char *buf, size_t count) | |
703 | { | |
704 | int ret = bL_switcher_trace_trigger(); | |
705 | ||
706 | return ret ? ret : count; | |
707 | } | |
708 | ||
6b7437ae NP |
709 | static struct kobj_attribute bL_switcher_active_attr = |
710 | __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); | |
711 | ||
b09bbe5b DM |
712 | static struct kobj_attribute bL_switcher_trace_trigger_attr = |
713 | __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); | |
714 | ||
6b7437ae NP |
715 | static struct attribute *bL_switcher_attrs[] = { |
716 | &bL_switcher_active_attr.attr, | |
b09bbe5b | 717 | &bL_switcher_trace_trigger_attr.attr, |
6b7437ae NP |
718 | NULL, |
719 | }; | |
720 | ||
721 | static struct attribute_group bL_switcher_attr_group = { | |
722 | .attrs = bL_switcher_attrs, | |
723 | }; | |
724 | ||
725 | static struct kobject *bL_switcher_kobj; | |
726 | ||
727 | static int __init bL_switcher_sysfs_init(void) | |
728 | { | |
729 | int ret; | |
730 | ||
731 | bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); | |
732 | if (!bL_switcher_kobj) | |
733 | return -ENOMEM; | |
734 | ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); | |
735 | if (ret) | |
736 | kobject_put(bL_switcher_kobj); | |
737 | return ret; | |
738 | } | |
739 | ||
740 | #endif /* CONFIG_SYSFS */ | |
741 | ||
c0f43751 DM |
742 | bool bL_switcher_get_enabled(void) |
743 | { | |
744 | mutex_lock(&bL_switcher_activation_lock); | |
745 | ||
746 | return bL_switcher_active; | |
747 | } | |
748 | EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); | |
749 | ||
750 | void bL_switcher_put_enabled(void) | |
751 | { | |
752 | mutex_unlock(&bL_switcher_activation_lock); | |
753 | } | |
754 | EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); | |
755 | ||
27261435 NP |
756 | /* |
757 | * Veto any CPU hotplug operation on those CPUs we've removed | |
758 | * while the switcher is active. | |
759 | * We're just not ready to deal with that given the trickery involved. | |
760 | */ | |
a3c9b14f | 761 | static int bL_switcher_cpu_pre(unsigned int cpu) |
27261435 | 762 | { |
a3c9b14f SAS |
763 | int pairing; |
764 | ||
765 | if (!bL_switcher_active) | |
766 | return 0; | |
767 | ||
768 | pairing = bL_switcher_cpu_pairing[cpu]; | |
769 | ||
770 | if (pairing == -1) | |
771 | return -EINVAL; | |
772 | return 0; | |
27261435 NP |
773 | } |
774 | ||
c4821c05 NP |
775 | static bool no_bL_switcher; |
776 | core_param(no_bL_switcher, no_bL_switcher, bool, 0644); | |
777 | ||
6b7437ae NP |
778 | static int __init bL_switcher_init(void) |
779 | { | |
780 | int ret; | |
781 | ||
4530e4b6 NP |
782 | if (!mcpm_is_available()) |
783 | return -ENODEV; | |
6b7437ae | 784 | |
a3c9b14f SAS |
785 | cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare", |
786 | bL_switcher_cpu_pre, NULL); | |
787 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown", | |
788 | NULL, bL_switcher_cpu_pre); | |
789 | if (ret < 0) { | |
790 | cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE); | |
791 | pr_err("bL_switcher: Failed to allocate a hotplug state\n"); | |
792 | return ret; | |
793 | } | |
c4821c05 NP |
794 | if (!no_bL_switcher) { |
795 | ret = bL_switcher_enable(); | |
796 | if (ret) | |
797 | return ret; | |
798 | } | |
6b7437ae NP |
799 | |
800 | #ifdef CONFIG_SYSFS | |
801 | ret = bL_switcher_sysfs_init(); | |
802 | if (ret) | |
803 | pr_err("%s: unable to create sysfs entry\n", __func__); | |
804 | #endif | |
805 | ||
806 | return 0; | |
807 | } | |
808 | ||
71ce1dee | 809 | late_initcall(bL_switcher_init); |