2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/init.h>
22 #include <linux/linkage.h>
24 #include <asm/alternative.h>
25 #include <asm/assembler.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/errno.h>
31 #include <asm/memory.h>
33 #include <asm/processor.h>
34 #include <asm/ptrace.h>
35 #include <asm/thread_info.h>
36 #include <asm/asm-uaccess.h>
37 #include <asm/unistd.h>
40 * Context tracking subsystem. Used to instrument transitions
41 * between user and kernel mode.
43 .macro ct_user_exit, syscall = 0
44 #ifdef CONFIG_CONTEXT_TRACKING
45 bl context_tracking_user_exit
48 * Save/restore needed during syscalls. Restore syscall arguments from
49 * the values already saved on stack during kernel_entry.
52 ldp x2, x3, [sp, #S_X2]
53 ldp x4, x5, [sp, #S_X4]
54 ldp x6, x7, [sp, #S_X6]
60 #ifdef CONFIG_CONTEXT_TRACKING
61 bl context_tracking_user_enter
74 .macro kernel_ventry, el, label, regsize = 64
76 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
77 alternative_if ARM64_UNMAP_KERNEL_AT_EL0
86 alternative_else_nop_endif
89 sub sp, sp, #S_FRAME_SIZE
90 #ifdef CONFIG_VMAP_STACK
92 * Test whether the SP has overflowed, without corrupting a GPR.
93 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
95 add sp, sp, x0 // sp' = sp + x0
96 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
97 tbnz x0, #THREAD_SHIFT, 0f
98 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
99 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
104 * Either we've just detected an overflow, or we've taken an exception
105 * while on the overflow stack. Either way, we won't return to
106 * userspace, and can clobber EL0 registers to free up GPRs.
109 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
112 /* Recover the original x0 value and stash it in tpidrro_el0 */
116 /* Switch to the overflow stack */
117 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
120 * Check whether we were already on the overflow stack. This may happen
121 * after panic() re-enables interrupts.
123 mrs x0, tpidr_el0 // sp of interrupted context
124 sub x0, sp, x0 // delta with top of overflow stack
125 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
126 b.ne __bad_stack // no? -> bad stack pointer
128 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
135 .macro tramp_alias, dst, sym
136 mov_q \dst, TRAMP_VALIAS
137 add \dst, \dst, #(\sym - .entry.tramp.text)
140 .macro kernel_entry, el, regsize = 64
142 mov w0, w0 // zero upper 32 bits of x0
144 stp x0, x1, [sp, #16 * 0]
145 stp x2, x3, [sp, #16 * 1]
146 stp x4, x5, [sp, #16 * 2]
147 stp x6, x7, [sp, #16 * 3]
148 stp x8, x9, [sp, #16 * 4]
149 stp x10, x11, [sp, #16 * 5]
150 stp x12, x13, [sp, #16 * 6]
151 stp x14, x15, [sp, #16 * 7]
152 stp x16, x17, [sp, #16 * 8]
153 stp x18, x19, [sp, #16 * 9]
154 stp x20, x21, [sp, #16 * 10]
155 stp x22, x23, [sp, #16 * 11]
156 stp x24, x25, [sp, #16 * 12]
157 stp x26, x27, [sp, #16 * 13]
158 stp x28, x29, [sp, #16 * 14]
162 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
163 ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
164 disable_step_tsk x19, x20 // exceptions when scheduling.
166 mov x29, xzr // fp pointed to user-space
168 add x21, sp, #S_FRAME_SIZE
170 /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
171 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
172 str x20, [sp, #S_ORIG_ADDR_LIMIT]
173 mov x20, #TASK_SIZE_64
174 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
175 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
176 .endif /* \el == 0 */
179 stp lr, x21, [sp, #S_LR]
182 * In order to be able to dump the contents of struct pt_regs at the
183 * time the exception was taken (in case we attempt to walk the call
184 * stack later), chain it together with the stack frames.
187 stp xzr, xzr, [sp, #S_STACKFRAME]
189 stp x29, x22, [sp, #S_STACKFRAME]
191 add x29, sp, #S_STACKFRAME
193 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
195 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
196 * EL0, there is no need to check the state of TTBR0_EL1 since
197 * accesses are always enabled.
198 * Note that the meaning of this bit differs from the ARMv8.1 PAN
199 * feature as all TTBR0_EL1 accesses are disabled, not just those to
202 alternative_if ARM64_HAS_PAN
203 b 1f // skip TTBR0 PAN
204 alternative_else_nop_endif
208 tst x21, #0xffff << 48 // Check for the reserved ASID
209 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
210 b.eq 1f // TTBR0 access already disabled
211 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
214 __uaccess_ttbr0_disable x21
218 stp x22, x23, [sp, #S_PC]
221 * Set syscallno to -1 by default (overridden later if real syscall).
225 str w21, [sp, #S_SYSCALLNO]
229 * Set sp_el0 to current thread_info.
236 * Registers that may be useful after this macro is invoked:
240 * x23 - aborted PSTATE
244 .macro kernel_exit, el
246 /* Restore the task's original addr_limit. */
247 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
248 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
250 /* No need to restore UAO, it will be restored from SPSR_EL1 */
253 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
258 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
260 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
263 alternative_if ARM64_HAS_PAN
264 b 2f // skip TTBR0 PAN
265 alternative_else_nop_endif
268 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
271 __uaccess_ttbr0_enable x0, x1
275 * Enable errata workarounds only if returning to user. The only
276 * workaround currently required for TTBR0_EL1 changes are for the
277 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
280 post_ttbr_update_workaround
284 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
290 ldr x23, [sp, #S_SP] // load return stack pointer
292 tst x22, #PSR_MODE32_BIT // native task?
295 #ifdef CONFIG_ARM64_ERRATUM_845719
296 alternative_if ARM64_WORKAROUND_845719
297 #ifdef CONFIG_PID_IN_CONTEXTIDR
298 mrs x29, contextidr_el1
299 msr contextidr_el1, x29
301 msr contextidr_el1, xzr
303 alternative_else_nop_endif
308 msr elr_el1, x21 // set up the return data
310 ldp x0, x1, [sp, #16 * 0]
311 ldp x2, x3, [sp, #16 * 1]
312 ldp x4, x5, [sp, #16 * 2]
313 ldp x6, x7, [sp, #16 * 3]
314 ldp x8, x9, [sp, #16 * 4]
315 ldp x10, x11, [sp, #16 * 5]
316 ldp x12, x13, [sp, #16 * 6]
317 ldp x14, x15, [sp, #16 * 7]
318 ldp x16, x17, [sp, #16 * 8]
319 ldp x18, x19, [sp, #16 * 9]
320 ldp x20, x21, [sp, #16 * 10]
321 ldp x22, x23, [sp, #16 * 11]
322 ldp x24, x25, [sp, #16 * 12]
323 ldp x26, x27, [sp, #16 * 13]
324 ldp x28, x29, [sp, #16 * 14]
326 add sp, sp, #S_FRAME_SIZE // restore sp
329 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
330 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
333 tramp_alias x30, tramp_exit_native
336 tramp_alias x30, tramp_exit_compat
344 .macro irq_stack_entry
345 mov x19, sp // preserve the original sp
348 * Compare sp with the base of the task stack.
349 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
350 * and should switch to the irq stack.
352 ldr x25, [tsk, TSK_STACK]
354 and x25, x25, #~(THREAD_SIZE - 1)
357 ldr_this_cpu x25, irq_stack_ptr, x26
358 mov x26, #IRQ_STACK_SIZE
361 /* switch to the irq stack */
367 * x19 should be preserved between irq_stack_entry and
370 .macro irq_stack_exit
375 * These are the registers used in the syscall handler, and allow us to
376 * have in theory up to 7 arguments to a function - x0 to x6.
378 * x7 is reserved for the system call number in 32-bit mode.
380 wsc_nr .req w25 // number of system calls
381 wscno .req w26 // syscall number
382 xscno .req x26 // syscall number (zero-extended)
383 stbl .req x27 // syscall table pointer
384 tsk .req x28 // current thread_info
387 * Interrupt handling.
390 ldr_l x1, handle_arch_irq
402 .pushsection ".entry.text", "ax"
406 kernel_ventry 1, sync_invalid // Synchronous EL1t
407 kernel_ventry 1, irq_invalid // IRQ EL1t
408 kernel_ventry 1, fiq_invalid // FIQ EL1t
409 kernel_ventry 1, error_invalid // Error EL1t
411 kernel_ventry 1, sync // Synchronous EL1h
412 kernel_ventry 1, irq // IRQ EL1h
413 kernel_ventry 1, fiq_invalid // FIQ EL1h
414 kernel_ventry 1, error_invalid // Error EL1h
416 kernel_ventry 0, sync // Synchronous 64-bit EL0
417 kernel_ventry 0, irq // IRQ 64-bit EL0
418 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
419 kernel_ventry 0, error_invalid // Error 64-bit EL0
422 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
423 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
424 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
425 kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
427 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
428 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
429 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
430 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
434 #ifdef CONFIG_VMAP_STACK
436 * We detected an overflow in kernel_ventry, which switched to the
437 * overflow stack. Stash the exception regs, and head to our overflow
441 /* Restore the original x0 value */
445 * Store the original GPRs to the new stack. The orginal SP (minus
446 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
448 sub sp, sp, #S_FRAME_SIZE
451 add x0, x0, #S_FRAME_SIZE
454 /* Stash the regs for handle_bad_stack */
460 #endif /* CONFIG_VMAP_STACK */
463 * Invalid mode handlers
465 .macro inv_entry, el, reason, regsize = 64
466 kernel_entry \el, \regsize
475 inv_entry 0, BAD_SYNC
476 ENDPROC(el0_sync_invalid)
480 ENDPROC(el0_irq_invalid)
484 ENDPROC(el0_fiq_invalid)
487 inv_entry 0, BAD_ERROR
488 ENDPROC(el0_error_invalid)
491 el0_fiq_invalid_compat:
492 inv_entry 0, BAD_FIQ, 32
493 ENDPROC(el0_fiq_invalid_compat)
495 el0_error_invalid_compat:
496 inv_entry 0, BAD_ERROR, 32
497 ENDPROC(el0_error_invalid_compat)
501 inv_entry 1, BAD_SYNC
502 ENDPROC(el1_sync_invalid)
506 ENDPROC(el1_irq_invalid)
510 ENDPROC(el1_fiq_invalid)
513 inv_entry 1, BAD_ERROR
514 ENDPROC(el1_error_invalid)
522 mrs x1, esr_el1 // read the syndrome register
523 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
524 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
526 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
528 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
530 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
532 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
534 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
536 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
542 * Fall through to the Data abort case
546 * Data abort handling
550 // re-enable interrupts if they were enabled in the aborted context
551 tbnz x23, #7, 1f // PSR_I_BIT
554 clear_address_tag x0, x3
555 mov x2, sp // struct pt_regs
558 // disable interrupts before pulling preserved data off the stack
563 * Stack or PC alignment exception handling
572 * Undefined instruction
580 * Debug exception handling
582 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
583 cinc x24, x24, eq // set bit '0'
584 tbz x24, #0, el1_inv // EL1 only
586 mov x2, sp // struct pt_regs
587 bl do_debug_exception
590 // TODO: add support for undefined instructions in kernel mode
603 #ifdef CONFIG_TRACE_IRQFLAGS
604 bl trace_hardirqs_off
609 #ifdef CONFIG_PREEMPT
610 ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
611 cbnz w24, 1f // preempt count != 0
612 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
613 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
617 #ifdef CONFIG_TRACE_IRQFLAGS
623 #ifdef CONFIG_PREEMPT
626 1: bl preempt_schedule_irq // irq en/disable is done inside
627 ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
628 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
638 mrs x25, esr_el1 // read the syndrome register
639 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
640 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
642 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
644 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
646 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
648 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
650 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
652 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
654 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
656 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
658 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
666 mrs x25, esr_el1 // read the syndrome register
667 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
668 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
670 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
672 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
674 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
676 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
678 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
680 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
682 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
684 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
686 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
688 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
690 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
692 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
697 * AArch32 syscall handling
699 adrp stbl, compat_sys_call_table // load compat syscall table pointer
700 mov wscno, w7 // syscall number in w7 (r7)
701 mov wsc_nr, #__NR_compat_syscalls
712 * Data abort handling
715 // enable interrupts before calling the main handler
718 clear_address_tag x0, x26
725 * Instruction abort handling
728 // enable interrupts before calling the main handler
738 * Floating Point or Advanced SIMD access
748 * Floating Point or Advanced SIMD exception
758 * Stack or PC alignment exception handling
761 // enable interrupts before calling the main handler
771 * Undefined instruction
773 // enable interrupts before calling the main handler
781 * System instructions, for trapped cache maintenance instructions
791 * Debug exception handling
793 tbnz x24, #0, el0_inv // EL0 only
797 bl do_debug_exception
816 #ifdef CONFIG_TRACE_IRQFLAGS
817 bl trace_hardirqs_off
823 #ifdef CONFIG_TRACE_IRQFLAGS
830 * This is the fast syscall return path. We do as little as possible here,
831 * and this includes saving x0 back into the kernel stack.
834 disable_irq // disable interrupts
835 str x0, [sp, #S_X0] // returned x0
836 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
837 and x2, x1, #_TIF_SYSCALL_WORK
838 cbnz x2, ret_fast_syscall_trace
839 and x2, x1, #_TIF_WORK_MASK
840 cbnz x2, work_pending
841 enable_step_tsk x1, x2
843 ret_fast_syscall_trace:
844 enable_irq // enable interrupts
845 b __sys_trace_return_skipped // we already saved x0
848 * Ok, we need to do extra processing, enter the slow path.
853 #ifdef CONFIG_TRACE_IRQFLAGS
854 bl trace_hardirqs_on // enabled while in userspace
856 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
859 * "slow" syscall return path.
862 disable_irq // disable interrupts
863 ldr x1, [tsk, #TSK_TI_FLAGS]
864 and x2, x1, #_TIF_WORK_MASK
865 cbnz x2, work_pending
867 enable_step_tsk x1, x2
876 adrp stbl, sys_call_table // load syscall table pointer
877 mov wscno, w8 // syscall number in w8
878 mov wsc_nr, #__NR_syscalls
879 el0_svc_naked: // compat entry point
880 stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
884 ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
885 tst x16, #_TIF_SYSCALL_WORK
887 cmp wscno, wsc_nr // check upper syscall limit
889 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
890 blr x16 // call sys_* routine
899 * This is the really slow path. We're going to be doing context
900 * switches, and waiting for our parent to respond.
903 cmp wscno, #-1 // user-issued syscall(-1)?
905 mov x0, #-ENOSYS // set default errno if so
908 bl syscall_trace_enter
909 cmp w0, #-1 // skip the syscall?
910 b.eq __sys_trace_return_skipped
911 mov wscno, w0 // syscall number (possibly new)
912 mov x1, sp // pointer to regs
913 cmp wscno, wsc_nr // check upper syscall limit
915 ldp x0, x1, [sp] // restore the syscall args
916 ldp x2, x3, [sp, #S_X2]
917 ldp x4, x5, [sp, #S_X4]
918 ldp x6, x7, [sp, #S_X6]
919 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
920 blr x16 // call sys_* routine
923 str x0, [sp, #S_X0] // save returned x0
924 __sys_trace_return_skipped:
926 bl syscall_trace_exit
934 .popsection // .entry.text
936 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
938 * Exception vectors trampoline.
940 .pushsection ".entry.tramp.text", "ax"
942 .macro tramp_map_kernel, tmp
944 sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
945 bic \tmp, \tmp, #USER_ASID_FLAG
947 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
948 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
949 /* ASID already in \tmp[63:48] */
950 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
951 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
952 /* 2MB boundary containing the vectors, so we nobble the walk cache */
953 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
957 alternative_else_nop_endif
958 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
961 .macro tramp_unmap_kernel, tmp
963 add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
964 orr \tmp, \tmp, #USER_ASID_FLAG
967 * We avoid running the post_ttbr_update_workaround here because the
968 * user and kernel ASIDs don't have conflicting mappings, so any
969 * "blessing" as described in:
971 * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
973 * will not hurt correctness. Whilst this may partially defeat the
974 * point of using split ASIDs in the first place, it avoids
975 * the hit of invalidating the entire I-cache on every return to
980 .macro tramp_ventry, regsize = 64
984 msr tpidrro_el0, x30 // Restored in kernel_ventry
987 #ifdef CONFIG_RANDOMIZE_BASE
988 adr x30, tramp_vectors + PAGE_SIZE
989 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
994 prfm plil1strm, [x30, #(1b - tramp_vectors)]
996 add x30, x30, #(1b - tramp_vectors)
1001 .macro tramp_exit, regsize = 64
1002 adr x30, tramp_vectors
1004 tramp_unmap_kernel x30
1012 ENTRY(tramp_vectors)
1026 ENTRY(tramp_exit_native)
1028 END(tramp_exit_native)
1030 ENTRY(tramp_exit_compat)
1032 END(tramp_exit_compat)
1035 .popsection // .entry.tramp.text
1036 #ifdef CONFIG_RANDOMIZE_BASE
1037 .pushsection ".rodata", "a"
1039 .globl __entry_tramp_data_start
1040 __entry_tramp_data_start:
1042 .popsection // .rodata
1043 #endif /* CONFIG_RANDOMIZE_BASE */
1044 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1047 * Special system call wrappers.
1049 ENTRY(sys_rt_sigreturn_wrapper)
1052 ENDPROC(sys_rt_sigreturn_wrapper)
1055 * Register switch for AArch64. The callee-saved registers need to be saved
1056 * and restored. On entry:
1057 * x0 = previous task_struct (must be preserved across the switch)
1058 * x1 = next task_struct
1059 * Previous and next are guaranteed not to be the same.
1062 ENTRY(cpu_switch_to)
1063 mov x10, #THREAD_CPU_CONTEXT
1066 stp x19, x20, [x8], #16 // store callee-saved registers
1067 stp x21, x22, [x8], #16
1068 stp x23, x24, [x8], #16
1069 stp x25, x26, [x8], #16
1070 stp x27, x28, [x8], #16
1071 stp x29, x9, [x8], #16
1074 ldp x19, x20, [x8], #16 // restore callee-saved registers
1075 ldp x21, x22, [x8], #16
1076 ldp x23, x24, [x8], #16
1077 ldp x25, x26, [x8], #16
1078 ldp x27, x28, [x8], #16
1079 ldp x29, x9, [x8], #16
1084 ENDPROC(cpu_switch_to)
1085 NOKPROBE(cpu_switch_to)
1088 * This is how we return from a fork.
1090 ENTRY(ret_from_fork)
1092 cbz x19, 1f // not a kernel thread
1095 1: get_thread_info tsk
1097 ENDPROC(ret_from_fork)
1098 NOKPROBE(ret_from_fork)