]>
Commit | Line | Data |
---|---|---|
ea8b1c4a KB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright (C) 2016 ARM Limited | |
12 | */ | |
13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | ||
16 | #include <linux/atomic.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/cpu.h> | |
19 | #include <linux/cpuidle.h> | |
20 | #include <linux/cpu_pm.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/kthread.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/preempt.h> | |
25 | #include <linux/psci.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/tick.h> | |
28 | #include <linux/topology.h> | |
29 | ||
30 | #include <asm/cpuidle.h> | |
31 | ||
32 | #include <uapi/linux/psci.h> | |
33 | ||
34 | #define NUM_SUSPEND_CYCLE (10) | |
35 | ||
36 | static unsigned int nb_available_cpus; | |
37 | static int tos_resident_cpu = -1; | |
38 | ||
39 | static atomic_t nb_active_threads; | |
40 | static struct completion suspend_threads_started = | |
41 | COMPLETION_INITIALIZER(suspend_threads_started); | |
42 | static struct completion suspend_threads_done = | |
43 | COMPLETION_INITIALIZER(suspend_threads_done); | |
44 | ||
45 | /* | |
46 | * We assume that PSCI operations are used if they are available. This is not | |
47 | * necessarily true on arm64, since the decision is based on the | |
48 | * "enable-method" property of each CPU in the DT, but given that there is no | |
49 | * arch-specific way to check this, we assume that the DT is sensible. | |
50 | */ | |
51 | static int psci_ops_check(void) | |
52 | { | |
53 | int migrate_type = -1; | |
54 | int cpu; | |
55 | ||
56 | if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { | |
57 | pr_warn("Missing PSCI operations, aborting tests\n"); | |
58 | return -EOPNOTSUPP; | |
59 | } | |
60 | ||
61 | if (psci_ops.migrate_info_type) | |
62 | migrate_type = psci_ops.migrate_info_type(); | |
63 | ||
64 | if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || | |
65 | migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { | |
66 | /* There is a UP Trusted OS, find on which core it resides. */ | |
67 | for_each_online_cpu(cpu) | |
68 | if (psci_tos_resident_on(cpu)) { | |
69 | tos_resident_cpu = cpu; | |
70 | break; | |
71 | } | |
72 | if (tos_resident_cpu == -1) | |
73 | pr_warn("UP Trusted OS resides on no online CPU\n"); | |
74 | } | |
75 | ||
76 | return 0; | |
77 | } | |
78 | ||
79 | static int find_clusters(const struct cpumask *cpus, | |
80 | const struct cpumask **clusters) | |
81 | { | |
82 | unsigned int nb = 0; | |
83 | cpumask_var_t tmp; | |
84 | ||
85 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | |
86 | return -ENOMEM; | |
87 | cpumask_copy(tmp, cpus); | |
88 | ||
89 | while (!cpumask_empty(tmp)) { | |
90 | const struct cpumask *cluster = | |
91 | topology_core_cpumask(cpumask_any(tmp)); | |
92 | ||
93 | clusters[nb++] = cluster; | |
94 | cpumask_andnot(tmp, tmp, cluster); | |
95 | } | |
96 | ||
97 | free_cpumask_var(tmp); | |
98 | return nb; | |
99 | } | |
100 | ||
101 | /* | |
102 | * offlined_cpus is a temporary array but passing it as an argument avoids | |
103 | * multiple allocations. | |
104 | */ | |
105 | static unsigned int down_and_up_cpus(const struct cpumask *cpus, | |
106 | struct cpumask *offlined_cpus) | |
107 | { | |
108 | int cpu; | |
109 | int err = 0; | |
110 | ||
111 | cpumask_clear(offlined_cpus); | |
112 | ||
113 | /* Try to power down all CPUs in the mask. */ | |
114 | for_each_cpu(cpu, cpus) { | |
115 | int ret = cpu_down(cpu); | |
116 | ||
117 | /* | |
118 | * cpu_down() checks the number of online CPUs before the TOS | |
119 | * resident CPU. | |
120 | */ | |
121 | if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { | |
122 | if (ret != -EBUSY) { | |
123 | pr_err("Unexpected return code %d while trying " | |
124 | "to power down last online CPU %d\n", | |
125 | ret, cpu); | |
126 | ++err; | |
127 | } | |
128 | } else if (cpu == tos_resident_cpu) { | |
129 | if (ret != -EPERM) { | |
130 | pr_err("Unexpected return code %d while trying " | |
131 | "to power down TOS resident CPU %d\n", | |
132 | ret, cpu); | |
133 | ++err; | |
134 | } | |
135 | } else if (ret != 0) { | |
136 | pr_err("Error occurred (%d) while trying " | |
137 | "to power down CPU %d\n", ret, cpu); | |
138 | ++err; | |
139 | } | |
140 | ||
141 | if (ret == 0) | |
142 | cpumask_set_cpu(cpu, offlined_cpus); | |
143 | } | |
144 | ||
145 | /* Try to power up all the CPUs that have been offlined. */ | |
146 | for_each_cpu(cpu, offlined_cpus) { | |
147 | int ret = cpu_up(cpu); | |
148 | ||
149 | if (ret != 0) { | |
150 | pr_err("Error occurred (%d) while trying " | |
151 | "to power up CPU %d\n", ret, cpu); | |
152 | ++err; | |
153 | } else { | |
154 | cpumask_clear_cpu(cpu, offlined_cpus); | |
155 | } | |
156 | } | |
157 | ||
158 | /* | |
159 | * Something went bad at some point and some CPUs could not be turned | |
160 | * back on. | |
161 | */ | |
162 | WARN_ON(!cpumask_empty(offlined_cpus) || | |
163 | num_online_cpus() != nb_available_cpus); | |
164 | ||
165 | return err; | |
166 | } | |
167 | ||
168 | static int hotplug_tests(void) | |
169 | { | |
170 | int err; | |
171 | cpumask_var_t offlined_cpus; | |
172 | int i, nb_cluster; | |
173 | const struct cpumask **clusters; | |
174 | char *page_buf; | |
175 | ||
176 | err = -ENOMEM; | |
177 | if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) | |
178 | return err; | |
179 | /* We may have up to nb_available_cpus clusters. */ | |
180 | clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters), | |
181 | GFP_KERNEL); | |
182 | if (!clusters) | |
183 | goto out_free_cpus; | |
184 | page_buf = (char *)__get_free_page(GFP_KERNEL); | |
185 | if (!page_buf) | |
186 | goto out_free_clusters; | |
187 | ||
188 | err = 0; | |
189 | nb_cluster = find_clusters(cpu_online_mask, clusters); | |
190 | ||
191 | /* | |
192 | * Of course the last CPU cannot be powered down and cpu_down() should | |
193 | * refuse doing that. | |
194 | */ | |
195 | pr_info("Trying to turn off and on again all CPUs\n"); | |
196 | err += down_and_up_cpus(cpu_online_mask, offlined_cpus); | |
197 | ||
198 | /* | |
199 | * Take down CPUs by cluster this time. When the last CPU is turned | |
200 | * off, the cluster itself should shut down. | |
201 | */ | |
202 | for (i = 0; i < nb_cluster; ++i) { | |
203 | int cluster_id = | |
204 | topology_physical_package_id(cpumask_any(clusters[i])); | |
205 | ssize_t len = cpumap_print_to_pagebuf(true, page_buf, | |
206 | clusters[i]); | |
207 | /* Remove trailing newline. */ | |
208 | page_buf[len - 1] = '\0'; | |
209 | pr_info("Trying to turn off and on again cluster %d " | |
210 | "(CPUs %s)\n", cluster_id, page_buf); | |
211 | err += down_and_up_cpus(clusters[i], offlined_cpus); | |
212 | } | |
213 | ||
214 | free_page((unsigned long)page_buf); | |
215 | out_free_clusters: | |
216 | kfree(clusters); | |
217 | out_free_cpus: | |
218 | free_cpumask_var(offlined_cpus); | |
219 | return err; | |
220 | } | |
221 | ||
222 | static void dummy_callback(unsigned long ignored) {} | |
223 | ||
224 | static int suspend_cpu(int index, bool broadcast) | |
225 | { | |
226 | int ret; | |
227 | ||
228 | arch_cpu_idle_enter(); | |
229 | ||
230 | if (broadcast) { | |
231 | /* | |
232 | * The local timer will be shut down, we need to enter tick | |
233 | * broadcast. | |
234 | */ | |
235 | ret = tick_broadcast_enter(); | |
236 | if (ret) { | |
237 | /* | |
238 | * In the absence of hardware broadcast mechanism, | |
239 | * this CPU might be used to broadcast wakeups, which | |
240 | * may be why entering tick broadcast has failed. | |
241 | * There is little the kernel can do to work around | |
242 | * that, so enter WFI instead (idle state 0). | |
243 | */ | |
244 | cpu_do_idle(); | |
245 | ret = 0; | |
246 | goto out_arch_exit; | |
247 | } | |
248 | } | |
249 | ||
250 | /* | |
251 | * Replicate the common ARM cpuidle enter function | |
252 | * (arm_enter_idle_state). | |
253 | */ | |
254 | ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); | |
255 | ||
256 | if (broadcast) | |
257 | tick_broadcast_exit(); | |
258 | ||
259 | out_arch_exit: | |
260 | arch_cpu_idle_exit(); | |
261 | ||
262 | return ret; | |
263 | } | |
264 | ||
265 | static int suspend_test_thread(void *arg) | |
266 | { | |
267 | int cpu = (long)arg; | |
268 | int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; | |
269 | struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; | |
270 | struct cpuidle_device *dev; | |
271 | struct cpuidle_driver *drv; | |
272 | /* No need for an actual callback, we just want to wake up the CPU. */ | |
273 | struct timer_list wakeup_timer = | |
274 | TIMER_INITIALIZER(dummy_callback, 0, 0); | |
275 | ||
276 | /* Wait for the main thread to give the start signal. */ | |
277 | wait_for_completion(&suspend_threads_started); | |
278 | ||
279 | /* Set maximum priority to preempt all other threads on this CPU. */ | |
280 | if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) | |
281 | pr_warn("Failed to set suspend thread scheduler on CPU %d\n", | |
282 | cpu); | |
283 | ||
284 | dev = this_cpu_read(cpuidle_devices); | |
285 | drv = cpuidle_get_cpu_driver(dev); | |
286 | ||
287 | pr_info("CPU %d entering suspend cycles, states 1 through %d\n", | |
288 | cpu, drv->state_count - 1); | |
289 | ||
290 | for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { | |
291 | int index; | |
292 | /* | |
293 | * Test all possible states, except 0 (which is usually WFI and | |
294 | * doesn't use PSCI). | |
295 | */ | |
296 | for (index = 1; index < drv->state_count; ++index) { | |
297 | struct cpuidle_state *state = &drv->states[index]; | |
298 | bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; | |
299 | int ret; | |
300 | ||
301 | /* | |
302 | * Set the timer to wake this CPU up in some time (which | |
303 | * should be largely sufficient for entering suspend). | |
304 | * If the local tick is disabled when entering suspend, | |
305 | * suspend_cpu() takes care of switching to a broadcast | |
306 | * tick, so the timer will still wake us up. | |
307 | */ | |
308 | mod_timer(&wakeup_timer, jiffies + | |
309 | usecs_to_jiffies(state->target_residency)); | |
310 | ||
311 | /* IRQs must be disabled during suspend operations. */ | |
312 | local_irq_disable(); | |
313 | ||
314 | ret = suspend_cpu(index, broadcast); | |
315 | ||
316 | /* | |
317 | * We have woken up. Re-enable IRQs to handle any | |
318 | * pending interrupt, do not wait until the end of the | |
319 | * loop. | |
320 | */ | |
321 | local_irq_enable(); | |
322 | ||
323 | if (ret == index) { | |
324 | ++nb_suspend; | |
325 | } else if (ret >= 0) { | |
326 | /* We did not enter the expected state. */ | |
327 | ++nb_shallow_sleep; | |
328 | } else { | |
329 | pr_err("Failed to suspend CPU %d: error %d " | |
330 | "(requested state %d, cycle %d)\n", | |
331 | cpu, ret, index, i); | |
332 | ++nb_err; | |
333 | } | |
334 | } | |
335 | } | |
336 | ||
337 | /* | |
338 | * Disable the timer to make sure that the timer will not trigger | |
339 | * later. | |
340 | */ | |
341 | del_timer(&wakeup_timer); | |
342 | ||
343 | if (atomic_dec_return_relaxed(&nb_active_threads) == 0) | |
344 | complete(&suspend_threads_done); | |
345 | ||
346 | /* Give up on RT scheduling and wait for termination. */ | |
347 | sched_priority.sched_priority = 0; | |
348 | if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) | |
349 | pr_warn("Failed to set suspend thread scheduler on CPU %d\n", | |
350 | cpu); | |
351 | for (;;) { | |
352 | /* Needs to be set first to avoid missing a wakeup. */ | |
353 | set_current_state(TASK_INTERRUPTIBLE); | |
354 | if (kthread_should_stop()) { | |
355 | __set_current_state(TASK_RUNNING); | |
356 | break; | |
357 | } | |
358 | schedule(); | |
359 | } | |
360 | ||
361 | pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", | |
362 | cpu, nb_suspend, nb_shallow_sleep, nb_err); | |
363 | ||
364 | return nb_err; | |
365 | } | |
366 | ||
367 | static int suspend_tests(void) | |
368 | { | |
369 | int i, cpu, err = 0; | |
370 | struct task_struct **threads; | |
371 | int nb_threads = 0; | |
372 | ||
373 | threads = kmalloc_array(nb_available_cpus, sizeof(*threads), | |
374 | GFP_KERNEL); | |
375 | if (!threads) | |
376 | return -ENOMEM; | |
377 | ||
378 | /* | |
379 | * Stop cpuidle to prevent the idle tasks from entering a deep sleep | |
380 | * mode, as it might interfere with the suspend threads on other CPUs. | |
381 | * This does not prevent the suspend threads from using cpuidle (only | |
382 | * the idle tasks check this status). Take the idle lock so that | |
383 | * the cpuidle driver and device look-up can be carried out safely. | |
384 | */ | |
385 | cpuidle_pause_and_lock(); | |
386 | ||
387 | for_each_online_cpu(cpu) { | |
388 | struct task_struct *thread; | |
389 | /* Check that cpuidle is available on that CPU. */ | |
390 | struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); | |
391 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
392 | ||
393 | if (!dev || !drv) { | |
394 | pr_warn("cpuidle not available on CPU %d, ignoring\n", | |
395 | cpu); | |
396 | continue; | |
397 | } | |
398 | ||
399 | thread = kthread_create_on_cpu(suspend_test_thread, | |
400 | (void *)(long)cpu, cpu, | |
401 | "psci_suspend_test"); | |
402 | if (IS_ERR(thread)) | |
403 | pr_err("Failed to create kthread on CPU %d\n", cpu); | |
404 | else | |
405 | threads[nb_threads++] = thread; | |
406 | } | |
407 | ||
408 | if (nb_threads < 1) { | |
409 | err = -ENODEV; | |
410 | goto out; | |
411 | } | |
412 | ||
413 | atomic_set(&nb_active_threads, nb_threads); | |
414 | ||
415 | /* | |
416 | * Wake up the suspend threads. To avoid the main thread being preempted | |
417 | * before all the threads have been unparked, the suspend threads will | |
418 | * wait for the completion of suspend_threads_started. | |
419 | */ | |
420 | for (i = 0; i < nb_threads; ++i) | |
421 | wake_up_process(threads[i]); | |
422 | complete_all(&suspend_threads_started); | |
423 | ||
424 | wait_for_completion(&suspend_threads_done); | |
425 | ||
426 | ||
427 | /* Stop and destroy all threads, get return status. */ | |
428 | for (i = 0; i < nb_threads; ++i) | |
429 | err += kthread_stop(threads[i]); | |
430 | out: | |
431 | cpuidle_resume_and_unlock(); | |
432 | kfree(threads); | |
433 | return err; | |
434 | } | |
435 | ||
436 | static int __init psci_checker(void) | |
437 | { | |
438 | int ret; | |
439 | ||
440 | /* | |
441 | * Since we're in an initcall, we assume that all the CPUs that all | |
442 | * CPUs that can be onlined have been onlined. | |
443 | * | |
444 | * The tests assume that hotplug is enabled but nobody else is using it, | |
445 | * otherwise the results will be unpredictable. However, since there | |
446 | * is no userspace yet in initcalls, that should be fine, as long as | |
447 | * no torture test is running at the same time (see Kconfig). | |
448 | */ | |
449 | nb_available_cpus = num_online_cpus(); | |
450 | ||
451 | /* Check PSCI operations are set up and working. */ | |
452 | ret = psci_ops_check(); | |
453 | if (ret) | |
454 | return ret; | |
455 | ||
456 | pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); | |
457 | ||
458 | pr_info("Starting hotplug tests\n"); | |
459 | ret = hotplug_tests(); | |
460 | if (ret == 0) | |
461 | pr_info("Hotplug tests passed OK\n"); | |
462 | else if (ret > 0) | |
463 | pr_err("%d error(s) encountered in hotplug tests\n", ret); | |
464 | else { | |
465 | pr_err("Out of memory\n"); | |
466 | return ret; | |
467 | } | |
468 | ||
469 | pr_info("Starting suspend tests (%d cycles per state)\n", | |
470 | NUM_SUSPEND_CYCLE); | |
471 | ret = suspend_tests(); | |
472 | if (ret == 0) | |
473 | pr_info("Suspend tests passed OK\n"); | |
474 | else if (ret > 0) | |
475 | pr_err("%d error(s) encountered in suspend tests\n", ret); | |
476 | else { | |
477 | switch (ret) { | |
478 | case -ENOMEM: | |
479 | pr_err("Out of memory\n"); | |
480 | break; | |
481 | case -ENODEV: | |
482 | pr_warn("Could not start suspend tests on any CPU\n"); | |
483 | break; | |
484 | } | |
485 | } | |
486 | ||
487 | pr_info("PSCI checker completed\n"); | |
488 | return ret < 0 ? ret : 0; | |
489 | } | |
490 | late_initcall(psci_checker); |