1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/process.c
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
12 #include <linux/compat.h>
13 #include <linux/efi.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/kernel.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/user.h>
24 #include <linux/delay.h>
25 #include <linux/reboot.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/cpu.h>
29 #include <linux/elfcore.h>
31 #include <linux/tick.h>
32 #include <linux/utsname.h>
33 #include <linux/uaccess.h>
34 #include <linux/random.h>
35 #include <linux/hw_breakpoint.h>
36 #include <linux/personality.h>
37 #include <linux/notifier.h>
38 #include <trace/events/power.h>
39 #include <linux/percpu.h>
40 #include <linux/thread_info.h>
42 #include <asm/alternative.h>
43 #include <asm/arch_gicv3.h>
44 #include <asm/compat.h>
45 #include <asm/cacheflush.h>
47 #include <asm/fpsimd.h>
48 #include <asm/mmu_context.h>
49 #include <asm/processor.h>
50 #include <asm/pointer_auth.h>
51 #include <asm/stacktrace.h>
53 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
54 #include <linux/stackprotector.h>
55 unsigned long __stack_chk_guard __read_mostly
;
56 EXPORT_SYMBOL(__stack_chk_guard
);
60 * Function pointers to optional machine specific functions
62 void (*pm_power_off
)(void);
63 EXPORT_SYMBOL_GPL(pm_power_off
);
65 void (*arm_pm_restart
)(enum reboot_mode reboot_mode
, const char *cmd
);
67 static void __cpu_do_idle(void)
73 static void __cpu_do_idle_irqprio(void)
76 unsigned long daif_bits
;
78 daif_bits
= read_sysreg(daif
);
79 write_sysreg(daif_bits
| PSR_I_BIT
, daif
);
82 * Unmask PMR before going idle to make sure interrupts can
86 gic_write_pmr(GIC_PRIO_IRQON
| GIC_PRIO_PSR_I_SET
);
91 write_sysreg(daif_bits
, daif
);
97 * Idle the processor (wait for interrupt).
99 * If the CPU supports priority masking we must do additional work to
100 * ensure that interrupts are not masked at the PMR (because the core will
101 * not wake up if we block the wake up signal in the interrupt controller).
103 void cpu_do_idle(void)
105 if (system_uses_irq_prio_masking())
106 __cpu_do_idle_irqprio();
112 * This is our default idle handler.
114 void arch_cpu_idle(void)
117 * This should do all the clock switching and wait for interrupt
120 trace_cpu_idle_rcuidle(1, smp_processor_id());
123 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
126 #ifdef CONFIG_HOTPLUG_CPU
127 void arch_cpu_idle_dead(void)
134 * Called by kexec, immediately prior to machine_kexec().
136 * This must completely disable all secondary CPUs; simply causing those CPUs
137 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
138 * kexec'd kernel to use any and all RAM as it sees fit, without having to
139 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
140 * functionality embodied in disable_nonboot_cpus() to achieve this.
142 void machine_shutdown(void)
144 disable_nonboot_cpus();
148 * Halting simply requires that the secondary CPUs stop performing any
149 * activity (executing tasks, handling interrupts). smp_send_stop()
152 void machine_halt(void)
160 * Power-off simply requires that the secondary CPUs stop performing any
161 * activity (executing tasks, handling interrupts). smp_send_stop()
162 * achieves this. When the system power is turned off, it will take all CPUs
165 void machine_power_off(void)
174 * Restart requires that the secondary CPUs stop performing any activity
175 * while the primary CPU resets the system. Systems with multiple CPUs must
176 * provide a HW restart implementation, to ensure that all CPUs reset at once.
177 * This is required so that any code running after reset on the primary CPU
178 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
179 * executing pre-reset code, and using RAM that the primary CPU's code wishes
180 * to use. Implementing such co-ordination would be essentially impossible.
182 void machine_restart(char *cmd
)
184 /* Disable interrupts first */
189 * UpdateCapsule() depends on the system being reset via
192 if (efi_enabled(EFI_RUNTIME_SERVICES
))
193 efi_reboot(reboot_mode
, NULL
);
195 /* Now call the architecture specific reboot code. */
197 arm_pm_restart(reboot_mode
, cmd
);
199 do_kernel_restart(cmd
);
202 * Whoops - the architecture was unable to reboot.
204 printk("Reboot failed -- System halted\n");
208 static void print_pstate(struct pt_regs
*regs
)
210 u64 pstate
= regs
->pstate
;
212 if (compat_user_mode(regs
)) {
213 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
215 pstate
& PSR_AA32_N_BIT
? 'N' : 'n',
216 pstate
& PSR_AA32_Z_BIT
? 'Z' : 'z',
217 pstate
& PSR_AA32_C_BIT
? 'C' : 'c',
218 pstate
& PSR_AA32_V_BIT
? 'V' : 'v',
219 pstate
& PSR_AA32_Q_BIT
? 'Q' : 'q',
220 pstate
& PSR_AA32_T_BIT
? "T32" : "A32",
221 pstate
& PSR_AA32_E_BIT
? "BE" : "LE",
222 pstate
& PSR_AA32_A_BIT
? 'A' : 'a',
223 pstate
& PSR_AA32_I_BIT
? 'I' : 'i',
224 pstate
& PSR_AA32_F_BIT
? 'F' : 'f');
226 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
228 pstate
& PSR_N_BIT
? 'N' : 'n',
229 pstate
& PSR_Z_BIT
? 'Z' : 'z',
230 pstate
& PSR_C_BIT
? 'C' : 'c',
231 pstate
& PSR_V_BIT
? 'V' : 'v',
232 pstate
& PSR_D_BIT
? 'D' : 'd',
233 pstate
& PSR_A_BIT
? 'A' : 'a',
234 pstate
& PSR_I_BIT
? 'I' : 'i',
235 pstate
& PSR_F_BIT
? 'F' : 'f',
236 pstate
& PSR_PAN_BIT
? '+' : '-',
237 pstate
& PSR_UAO_BIT
? '+' : '-');
241 void __show_regs(struct pt_regs
*regs
)
246 if (compat_user_mode(regs
)) {
247 lr
= regs
->compat_lr
;
248 sp
= regs
->compat_sp
;
256 show_regs_print_info(KERN_DEFAULT
);
259 if (!user_mode(regs
)) {
260 printk("pc : %pS\n", (void *)regs
->pc
);
261 printk("lr : %pS\n", (void *)lr
);
263 printk("pc : %016llx\n", regs
->pc
);
264 printk("lr : %016llx\n", lr
);
267 printk("sp : %016llx\n", sp
);
269 if (system_uses_irq_prio_masking())
270 printk("pmr_save: %08llx\n", regs
->pmr_save
);
275 printk("x%-2d: %016llx ", i
, regs
->regs
[i
]);
279 pr_cont("x%-2d: %016llx ", i
, regs
->regs
[i
]);
287 void show_regs(struct pt_regs
* regs
)
290 dump_backtrace(regs
, NULL
);
293 static void tls_thread_flush(void)
295 write_sysreg(0, tpidr_el0
);
297 if (is_compat_task()) {
298 current
->thread
.uw
.tp_value
= 0;
301 * We need to ensure ordering between the shadow state and the
302 * hardware state, so that we don't corrupt the hardware state
303 * with a stale shadow state during context switch.
306 write_sysreg(0, tpidrro_el0
);
310 void flush_thread(void)
312 fpsimd_flush_thread();
314 flush_ptrace_hw_breakpoint(current
);
317 void release_thread(struct task_struct
*dead_task
)
321 void arch_release_task_struct(struct task_struct
*tsk
)
323 fpsimd_release_task(tsk
);
326 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
329 fpsimd_preserve_current_state();
332 /* We rely on the above assignment to initialize dst's thread_flags: */
333 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK
));
336 * Detach src's sve_state (if any) from dst so that it does not
337 * get erroneously used or freed prematurely. dst's sve_state
338 * will be allocated on demand later on if dst uses SVE.
339 * For consistency, also clear TIF_SVE here: this could be done
340 * later in copy_process(), but to avoid tripping up future
341 * maintainers it is best not to leave TIF_SVE and sve_state in
342 * an inconsistent state, even temporarily.
344 dst
->thread
.sve_state
= NULL
;
345 clear_tsk_thread_flag(dst
, TIF_SVE
);
350 asmlinkage
void ret_from_fork(void) asm("ret_from_fork");
352 int copy_thread(unsigned long clone_flags
, unsigned long stack_start
,
353 unsigned long stk_sz
, struct task_struct
*p
)
355 struct pt_regs
*childregs
= task_pt_regs(p
);
357 memset(&p
->thread
.cpu_context
, 0, sizeof(struct cpu_context
));
360 * In case p was allocated the same task_struct pointer as some
361 * other recently-exited task, make sure p is disassociated from
362 * any cpu that may have run that now-exited task recently.
363 * Otherwise we could erroneously skip reloading the FPSIMD
366 fpsimd_flush_task_state(p
);
368 if (likely(!(p
->flags
& PF_KTHREAD
))) {
369 *childregs
= *current_pt_regs();
370 childregs
->regs
[0] = 0;
373 * Read the current TLS pointer from tpidr_el0 as it may be
374 * out-of-sync with the saved value.
376 *task_user_tls(p
) = read_sysreg(tpidr_el0
);
379 if (is_compat_thread(task_thread_info(p
)))
380 childregs
->compat_sp
= stack_start
;
382 childregs
->sp
= stack_start
;
386 * If a TLS pointer was passed to clone (4th argument), use it
387 * for the new thread.
389 if (clone_flags
& CLONE_SETTLS
)
390 p
->thread
.uw
.tp_value
= childregs
->regs
[3];
392 memset(childregs
, 0, sizeof(struct pt_regs
));
393 childregs
->pstate
= PSR_MODE_EL1h
;
394 if (IS_ENABLED(CONFIG_ARM64_UAO
) &&
395 cpus_have_const_cap(ARM64_HAS_UAO
))
396 childregs
->pstate
|= PSR_UAO_BIT
;
398 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE
)
399 set_ssbs_bit(childregs
);
401 if (system_uses_irq_prio_masking())
402 childregs
->pmr_save
= GIC_PRIO_IRQON
;
404 p
->thread
.cpu_context
.x19
= stack_start
;
405 p
->thread
.cpu_context
.x20
= stk_sz
;
407 p
->thread
.cpu_context
.pc
= (unsigned long)ret_from_fork
;
408 p
->thread
.cpu_context
.sp
= (unsigned long)childregs
;
410 ptrace_hw_copy_thread(p
);
415 void tls_preserve_current_state(void)
417 *task_user_tls(current
) = read_sysreg(tpidr_el0
);
420 static void tls_thread_switch(struct task_struct
*next
)
422 tls_preserve_current_state();
424 if (is_compat_thread(task_thread_info(next
)))
425 write_sysreg(next
->thread
.uw
.tp_value
, tpidrro_el0
);
426 else if (!arm64_kernel_unmapped_at_el0())
427 write_sysreg(0, tpidrro_el0
);
429 write_sysreg(*task_user_tls(next
), tpidr_el0
);
432 /* Restore the UAO state depending on next's addr_limit */
433 void uao_thread_switch(struct task_struct
*next
)
435 if (IS_ENABLED(CONFIG_ARM64_UAO
)) {
436 if (task_thread_info(next
)->addr_limit
== KERNEL_DS
)
437 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO
));
439 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO
));
444 * Force SSBS state on context-switch, since it may be lost after migrating
445 * from a CPU which treats the bit as RES0 in a heterogeneous system.
447 static void ssbs_thread_switch(struct task_struct
*next
)
449 struct pt_regs
*regs
= task_pt_regs(next
);
452 * Nothing to do for kernel threads, but 'regs' may be junk
453 * (e.g. idle task) so check the flags and bail early.
455 if (unlikely(next
->flags
& PF_KTHREAD
))
458 /* If the mitigation is enabled, then we leave SSBS clear. */
459 if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE
) ||
460 test_tsk_thread_flag(next
, TIF_SSBD
))
463 if (compat_user_mode(regs
))
464 set_compat_ssbs_bit(regs
);
465 else if (user_mode(regs
))
470 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
471 * shadow copy so that we can restore this upon entry from userspace.
473 * This is *only* for exception entry from EL0, and is not valid until we
474 * __switch_to() a user task.
476 DEFINE_PER_CPU(struct task_struct
*, __entry_task
);
478 static void entry_task_switch(struct task_struct
*next
)
480 __this_cpu_write(__entry_task
, next
);
486 __notrace_funcgraph
struct task_struct
*__switch_to(struct task_struct
*prev
,
487 struct task_struct
*next
)
489 struct task_struct
*last
;
491 fpsimd_thread_switch(next
);
492 tls_thread_switch(next
);
493 hw_breakpoint_thread_switch(next
);
494 contextidr_thread_switch(next
);
495 entry_task_switch(next
);
496 uao_thread_switch(next
);
497 ptrauth_thread_switch(next
);
498 ssbs_thread_switch(next
);
501 * Complete any pending TLB or cache maintenance on this CPU in case
502 * the thread migrates to a different CPU.
503 * This full barrier is also required by the membarrier system
508 /* the actual thread switch */
509 last
= cpu_switch_to(prev
, next
);
514 unsigned long get_wchan(struct task_struct
*p
)
516 struct stackframe frame
;
517 unsigned long stack_page
, ret
= 0;
519 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
522 stack_page
= (unsigned long)try_get_task_stack(p
);
526 start_backtrace(&frame
, thread_saved_fp(p
), thread_saved_pc(p
));
529 if (unwind_frame(p
, &frame
))
531 if (!in_sched_functions(frame
.pc
)) {
535 } while (count
++ < 16);
542 unsigned long arch_align_stack(unsigned long sp
)
544 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
545 sp
-= get_random_int() & ~PAGE_MASK
;
549 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
551 if (is_compat_task())
552 return randomize_page(mm
->brk
, SZ_32M
);
554 return randomize_page(mm
->brk
, SZ_1G
);
558 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
560 void arch_setup_new_exec(void)
562 current
->mm
->context
.flags
= is_compat_task() ? MMCF_AARCH32
: 0;
564 ptrauth_thread_init_user(current
);