2 * Based on arch/arm/kernel/process.c
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/compat.h>
24 #include <linux/efi.h>
25 #include <linux/export.h>
26 #include <linux/sched.h>
27 #include <linux/sched/debug.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/kernel.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/user.h>
35 #include <linux/delay.h>
36 #include <linux/reboot.h>
37 #include <linux/interrupt.h>
38 #include <linux/kallsyms.h>
39 #include <linux/init.h>
40 #include <linux/cpu.h>
41 #include <linux/elfcore.h>
43 #include <linux/tick.h>
44 #include <linux/utsname.h>
45 #include <linux/uaccess.h>
46 #include <linux/random.h>
47 #include <linux/hw_breakpoint.h>
48 #include <linux/personality.h>
49 #include <linux/notifier.h>
50 #include <trace/events/power.h>
51 #include <linux/percpu.h>
52 #include <linux/thread_info.h>
54 #include <asm/alternative.h>
55 #include <asm/compat.h>
56 #include <asm/cacheflush.h>
58 #include <asm/fpsimd.h>
59 #include <asm/mmu_context.h>
60 #include <asm/processor.h>
61 #include <asm/stacktrace.h>
63 #ifdef CONFIG_CC_STACKPROTECTOR
64 #include <linux/stackprotector.h>
65 unsigned long __stack_chk_guard __read_mostly
;
66 EXPORT_SYMBOL(__stack_chk_guard
);
70 * Function pointers to optional machine specific functions
72 void (*pm_power_off
)(void);
73 EXPORT_SYMBOL_GPL(pm_power_off
);
75 void (*arm_pm_restart
)(enum reboot_mode reboot_mode
, const char *cmd
);
78 * This is our default idle handler.
80 void arch_cpu_idle(void)
83 * This should do all the clock switching and wait for interrupt
86 trace_cpu_idle_rcuidle(1, smp_processor_id());
89 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
92 #ifdef CONFIG_HOTPLUG_CPU
93 void arch_cpu_idle_dead(void)
100 * Called by kexec, immediately prior to machine_kexec().
102 * This must completely disable all secondary CPUs; simply causing those CPUs
103 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
104 * kexec'd kernel to use any and all RAM as it sees fit, without having to
105 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
106 * functionality embodied in disable_nonboot_cpus() to achieve this.
108 void machine_shutdown(void)
110 disable_nonboot_cpus();
114 * Halting simply requires that the secondary CPUs stop performing any
115 * activity (executing tasks, handling interrupts). smp_send_stop()
118 void machine_halt(void)
126 * Power-off simply requires that the secondary CPUs stop performing any
127 * activity (executing tasks, handling interrupts). smp_send_stop()
128 * achieves this. When the system power is turned off, it will take all CPUs
131 void machine_power_off(void)
140 * Restart requires that the secondary CPUs stop performing any activity
141 * while the primary CPU resets the system. Systems with multiple CPUs must
142 * provide a HW restart implementation, to ensure that all CPUs reset at once.
143 * This is required so that any code running after reset on the primary CPU
144 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
145 * executing pre-reset code, and using RAM that the primary CPU's code wishes
146 * to use. Implementing such co-ordination would be essentially impossible.
148 void machine_restart(char *cmd
)
150 /* Disable interrupts first */
155 * UpdateCapsule() depends on the system being reset via
158 if (efi_enabled(EFI_RUNTIME_SERVICES
))
159 efi_reboot(reboot_mode
, NULL
);
161 /* Now call the architecture specific reboot code. */
163 arm_pm_restart(reboot_mode
, cmd
);
165 do_kernel_restart(cmd
);
168 * Whoops - the architecture was unable to reboot.
170 printk("Reboot failed -- System halted\n");
174 static void print_pstate(struct pt_regs
*regs
)
176 u64 pstate
= regs
->pstate
;
178 if (compat_user_mode(regs
)) {
179 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
181 pstate
& COMPAT_PSR_N_BIT
? 'N' : 'n',
182 pstate
& COMPAT_PSR_Z_BIT
? 'Z' : 'z',
183 pstate
& COMPAT_PSR_C_BIT
? 'C' : 'c',
184 pstate
& COMPAT_PSR_V_BIT
? 'V' : 'v',
185 pstate
& COMPAT_PSR_Q_BIT
? 'Q' : 'q',
186 pstate
& COMPAT_PSR_T_BIT
? "T32" : "A32",
187 pstate
& COMPAT_PSR_E_BIT
? "BE" : "LE",
188 pstate
& COMPAT_PSR_A_BIT
? 'A' : 'a',
189 pstate
& COMPAT_PSR_I_BIT
? 'I' : 'i',
190 pstate
& COMPAT_PSR_F_BIT
? 'F' : 'f');
192 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
194 pstate
& PSR_N_BIT
? 'N' : 'n',
195 pstate
& PSR_Z_BIT
? 'Z' : 'z',
196 pstate
& PSR_C_BIT
? 'C' : 'c',
197 pstate
& PSR_V_BIT
? 'V' : 'v',
198 pstate
& PSR_D_BIT
? 'D' : 'd',
199 pstate
& PSR_A_BIT
? 'A' : 'a',
200 pstate
& PSR_I_BIT
? 'I' : 'i',
201 pstate
& PSR_F_BIT
? 'F' : 'f',
202 pstate
& PSR_PAN_BIT
? '+' : '-',
203 pstate
& PSR_UAO_BIT
? '+' : '-');
207 void __show_regs(struct pt_regs
*regs
)
212 if (compat_user_mode(regs
)) {
213 lr
= regs
->compat_lr
;
214 sp
= regs
->compat_sp
;
222 show_regs_print_info(KERN_DEFAULT
);
224 print_symbol("pc : %s\n", regs
->pc
);
225 print_symbol("lr : %s\n", lr
);
226 printk("sp : %016llx\n", sp
);
231 printk("x%-2d: %016llx ", i
, regs
->regs
[i
]);
235 pr_cont("x%-2d: %016llx ", i
, regs
->regs
[i
]);
243 void show_regs(struct pt_regs
* regs
)
246 dump_backtrace(regs
, NULL
);
249 static void tls_thread_flush(void)
251 write_sysreg(0, tpidr_el0
);
253 if (is_compat_task()) {
254 current
->thread
.tp_value
= 0;
257 * We need to ensure ordering between the shadow state and the
258 * hardware state, so that we don't corrupt the hardware state
259 * with a stale shadow state during context switch.
262 write_sysreg(0, tpidrro_el0
);
266 void flush_thread(void)
268 fpsimd_flush_thread();
270 flush_ptrace_hw_breakpoint(current
);
273 void release_thread(struct task_struct
*dead_task
)
277 void arch_release_task_struct(struct task_struct
*tsk
)
279 fpsimd_release_task(tsk
);
283 * src and dst may temporarily have aliased sve_state after task_struct
284 * is copied. We cannot fix this properly here, because src may have
285 * live SVE state and dst's thread_info may not exist yet, so tweaking
286 * either src's or dst's TIF_SVE is not safe.
288 * The unaliasing is done in copy_thread() instead. This works because
289 * dst is not schedulable or traceable until both of these functions
292 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
295 fpsimd_preserve_current_state();
301 asmlinkage
void ret_from_fork(void) asm("ret_from_fork");
303 int copy_thread(unsigned long clone_flags
, unsigned long stack_start
,
304 unsigned long stk_sz
, struct task_struct
*p
)
306 struct pt_regs
*childregs
= task_pt_regs(p
);
308 memset(&p
->thread
.cpu_context
, 0, sizeof(struct cpu_context
));
311 * Unalias p->thread.sve_state (if any) from the parent task
312 * and disable discard SVE state for p:
314 clear_tsk_thread_flag(p
, TIF_SVE
);
315 p
->thread
.sve_state
= NULL
;
317 if (likely(!(p
->flags
& PF_KTHREAD
))) {
318 *childregs
= *current_pt_regs();
319 childregs
->regs
[0] = 0;
322 * Read the current TLS pointer from tpidr_el0 as it may be
323 * out-of-sync with the saved value.
325 *task_user_tls(p
) = read_sysreg(tpidr_el0
);
328 if (is_compat_thread(task_thread_info(p
)))
329 childregs
->compat_sp
= stack_start
;
331 childregs
->sp
= stack_start
;
335 * If a TLS pointer was passed to clone (4th argument), use it
336 * for the new thread.
338 if (clone_flags
& CLONE_SETTLS
)
339 p
->thread
.tp_value
= childregs
->regs
[3];
341 memset(childregs
, 0, sizeof(struct pt_regs
));
342 childregs
->pstate
= PSR_MODE_EL1h
;
343 if (IS_ENABLED(CONFIG_ARM64_UAO
) &&
344 cpus_have_const_cap(ARM64_HAS_UAO
))
345 childregs
->pstate
|= PSR_UAO_BIT
;
346 p
->thread
.cpu_context
.x19
= stack_start
;
347 p
->thread
.cpu_context
.x20
= stk_sz
;
349 p
->thread
.cpu_context
.pc
= (unsigned long)ret_from_fork
;
350 p
->thread
.cpu_context
.sp
= (unsigned long)childregs
;
352 ptrace_hw_copy_thread(p
);
357 void tls_preserve_current_state(void)
359 *task_user_tls(current
) = read_sysreg(tpidr_el0
);
362 static void tls_thread_switch(struct task_struct
*next
)
364 unsigned long tpidr
, tpidrro
;
366 tls_preserve_current_state();
368 tpidr
= *task_user_tls(next
);
369 tpidrro
= is_compat_thread(task_thread_info(next
)) ?
370 next
->thread
.tp_value
: 0;
372 write_sysreg(tpidr
, tpidr_el0
);
373 write_sysreg(tpidrro
, tpidrro_el0
);
376 /* Restore the UAO state depending on next's addr_limit */
377 void uao_thread_switch(struct task_struct
*next
)
379 if (IS_ENABLED(CONFIG_ARM64_UAO
)) {
380 if (task_thread_info(next
)->addr_limit
== KERNEL_DS
)
381 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO
));
383 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO
));
388 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
389 * shadow copy so that we can restore this upon entry from userspace.
391 * This is *only* for exception entry from EL0, and is not valid until we
392 * __switch_to() a user task.
394 DEFINE_PER_CPU(struct task_struct
*, __entry_task
);
396 static void entry_task_switch(struct task_struct
*next
)
398 __this_cpu_write(__entry_task
, next
);
404 __notrace_funcgraph
struct task_struct
*__switch_to(struct task_struct
*prev
,
405 struct task_struct
*next
)
407 struct task_struct
*last
;
409 fpsimd_thread_switch(next
);
410 tls_thread_switch(next
);
411 hw_breakpoint_thread_switch(next
);
412 contextidr_thread_switch(next
);
413 entry_task_switch(next
);
414 uao_thread_switch(next
);
417 * Complete any pending TLB or cache maintenance on this CPU in case
418 * the thread migrates to a different CPU.
419 * This full barrier is also required by the membarrier system
424 /* the actual thread switch */
425 last
= cpu_switch_to(prev
, next
);
430 unsigned long get_wchan(struct task_struct
*p
)
432 struct stackframe frame
;
433 unsigned long stack_page
, ret
= 0;
435 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
438 stack_page
= (unsigned long)try_get_task_stack(p
);
442 frame
.fp
= thread_saved_fp(p
);
443 frame
.pc
= thread_saved_pc(p
);
444 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
445 frame
.graph
= p
->curr_ret_stack
;
448 if (unwind_frame(p
, &frame
))
450 if (!in_sched_functions(frame
.pc
)) {
454 } while (count
++ < 16);
461 unsigned long arch_align_stack(unsigned long sp
)
463 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
464 sp
-= get_random_int() & ~PAGE_MASK
;
468 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
470 if (is_compat_task())
471 return randomize_page(mm
->brk
, SZ_32M
);
473 return randomize_page(mm
->brk
, SZ_1G
);
477 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
479 void arch_setup_new_exec(void)
481 current
->mm
->context
.flags
= is_compat_task() ? MMCF_AARCH32
: 0;