SYM_FUNC_END(__create_page_tables)
/*
+ * Initialize CPU registers with task-specific and cpu-specific context.
+ *
* Create a final frame record at task_pt_regs(current)->stackframe, so
* that the unwinder can identify the final frame record of any task by
* its location in the task stack. We reserve the entire pt_regs space
* for consistency with user tasks and kthreads.
*/
- .macro setup_final_frame
+ .macro init_cpu_task tsk, tmp
+ msr sp_el0, \tsk
+
+ ldr \tmp, [\tsk, #TSK_STACK]
+ add sp, \tmp, #THREAD_SIZE
sub sp, sp, #PT_REGS_SIZE
+
stp xzr, xzr, [sp, #S_STACKFRAME]
add x29, sp, #S_STACKFRAME
+
+ scs_load \tsk, \tmp
.endm
/*
* x0 = __PHYS_OFFSET
*/
SYM_FUNC_START_LOCAL(__primary_switched)
- adrp x4, init_thread_union
- add sp, x4, #THREAD_SIZE
- adr_l x5, init_task
- msr sp_el0, x5 // Save thread_info
+ adr_l x4, init_task
+ init_cpu_task x4, x5
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
- stp xzr, x30, [sp, #-16]!
+ stp x29, x30, [sp, #-16]!
mov x29, sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
-#endif
-
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
0:
#endif
bl switch_to_vhe // Prefer VHE if possible
- add sp, sp, #16
- setup_final_frame
+ ldp x29, x30, [sp], #16
bl start_kernel
ASM_BUG()
SYM_FUNC_END(__primary_switched)
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
- ldr x1, [x2, #TSK_STACK]
- add sp, x1, #THREAD_SIZE
-
- msr sp_el0, x2
- scs_load x2, x3
- setup_final_frame
+ init_cpu_task x2, x1
#ifdef CONFIG_ARM64_PTR_AUTH
ptrauth_keys_init_cpu x2, x3, x4, x5