2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/init.h>
22 #include <linux/linkage.h>
24 #include <asm/alternative.h>
25 #include <asm/assembler.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/errno.h>
31 #include <asm/memory.h>
32 #include <asm/ptrace.h>
33 #include <asm/thread_info.h>
34 #include <asm/asm-uaccess.h>
35 #include <asm/unistd.h>
38 * Context tracking subsystem. Used to instrument transitions
39 * between user and kernel mode.
41 .macro ct_user_exit, syscall = 0
42 #ifdef CONFIG_CONTEXT_TRACKING
43 bl context_tracking_user_exit
46 * Save/restore needed during syscalls. Restore syscall arguments from
47 * the values already saved on stack during kernel_entry.
50 ldp x2, x3, [sp, #S_X2]
51 ldp x4, x5, [sp, #S_X4]
52 ldp x6, x7, [sp, #S_X6]
58 #ifdef CONFIG_CONTEXT_TRACKING
59 bl context_tracking_user_enter
72 .macro kernel_ventry label
74 sub sp, sp, #S_FRAME_SIZE
75 #ifdef CONFIG_VMAP_STACK
77 * Test whether the SP has overflowed, without corrupting a GPR.
78 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
80 add sp, sp, x0 // sp' = sp + x0
81 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
82 tbnz x0, #THREAD_SHIFT, 0f
83 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
84 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
89 * Either we've just detected an overflow, or we've taken an exception
90 * while on the overflow stack. Either way, we won't return to
91 * userspace, and can clobber EL0 registers to free up GPRs.
94 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
97 /* Recover the original x0 value and stash it in tpidrro_el0 */
101 /* Switch to the overflow stack */
102 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
105 * Check whether we were already on the overflow stack. This may happen
106 * after panic() re-enables interrupts.
108 mrs x0, tpidr_el0 // sp of interrupted context
109 sub x0, sp, x0 // delta with top of overflow stack
110 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
111 b.ne __bad_stack // no? -> bad stack pointer
113 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
120 .macro kernel_entry, el, regsize = 64
122 mov w0, w0 // zero upper 32 bits of x0
124 stp x0, x1, [sp, #16 * 0]
125 stp x2, x3, [sp, #16 * 1]
126 stp x4, x5, [sp, #16 * 2]
127 stp x6, x7, [sp, #16 * 3]
128 stp x8, x9, [sp, #16 * 4]
129 stp x10, x11, [sp, #16 * 5]
130 stp x12, x13, [sp, #16 * 6]
131 stp x14, x15, [sp, #16 * 7]
132 stp x16, x17, [sp, #16 * 8]
133 stp x18, x19, [sp, #16 * 9]
134 stp x20, x21, [sp, #16 * 10]
135 stp x22, x23, [sp, #16 * 11]
136 stp x24, x25, [sp, #16 * 12]
137 stp x26, x27, [sp, #16 * 13]
138 stp x28, x29, [sp, #16 * 14]
142 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
143 ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
144 disable_step_tsk x19, x20 // exceptions when scheduling.
146 mov x29, xzr // fp pointed to user-space
148 add x21, sp, #S_FRAME_SIZE
150 /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
151 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
152 str x20, [sp, #S_ORIG_ADDR_LIMIT]
153 mov x20, #TASK_SIZE_64
154 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
155 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
156 .endif /* \el == 0 */
159 stp lr, x21, [sp, #S_LR]
162 * In order to be able to dump the contents of struct pt_regs at the
163 * time the exception was taken (in case we attempt to walk the call
164 * stack later), chain it together with the stack frames.
167 stp xzr, xzr, [sp, #S_STACKFRAME]
169 stp x29, x22, [sp, #S_STACKFRAME]
171 add x29, sp, #S_STACKFRAME
173 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
175 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
176 * EL0, there is no need to check the state of TTBR0_EL1 since
177 * accesses are always enabled.
178 * Note that the meaning of this bit differs from the ARMv8.1 PAN
179 * feature as all TTBR0_EL1 accesses are disabled, not just those to
182 alternative_if ARM64_HAS_PAN
183 b 1f // skip TTBR0 PAN
184 alternative_else_nop_endif
188 tst x21, #0xffff << 48 // Check for the reserved ASID
189 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
190 b.eq 1f // TTBR0 access already disabled
191 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
194 __uaccess_ttbr0_disable x21
198 stp x22, x23, [sp, #S_PC]
201 * Set syscallno to -1 by default (overridden later if real syscall).
205 str x21, [sp, #S_SYSCALLNO]
209 * Set sp_el0 to current thread_info.
216 * Registers that may be useful after this macro is invoked:
220 * x23 - aborted PSTATE
224 .macro kernel_exit, el
226 /* Restore the task's original addr_limit. */
227 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
228 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
230 /* No need to restore UAO, it will be restored from SPSR_EL1 */
233 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
238 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
240 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
243 alternative_if ARM64_HAS_PAN
244 b 2f // skip TTBR0 PAN
245 alternative_else_nop_endif
248 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
251 __uaccess_ttbr0_enable x0
255 * Enable errata workarounds only if returning to user. The only
256 * workaround currently required for TTBR0_EL1 changes are for the
257 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
260 post_ttbr0_update_workaround
264 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
270 ldr x23, [sp, #S_SP] // load return stack pointer
272 #ifdef CONFIG_ARM64_ERRATUM_845719
273 alternative_if ARM64_WORKAROUND_845719
275 #ifdef CONFIG_PID_IN_CONTEXTIDR
276 mrs x29, contextidr_el1
277 msr contextidr_el1, x29
279 msr contextidr_el1, xzr
282 alternative_else_nop_endif
286 msr elr_el1, x21 // set up the return data
288 ldp x0, x1, [sp, #16 * 0]
289 ldp x2, x3, [sp, #16 * 1]
290 ldp x4, x5, [sp, #16 * 2]
291 ldp x6, x7, [sp, #16 * 3]
292 ldp x8, x9, [sp, #16 * 4]
293 ldp x10, x11, [sp, #16 * 5]
294 ldp x12, x13, [sp, #16 * 6]
295 ldp x14, x15, [sp, #16 * 7]
296 ldp x16, x17, [sp, #16 * 8]
297 ldp x18, x19, [sp, #16 * 9]
298 ldp x20, x21, [sp, #16 * 10]
299 ldp x22, x23, [sp, #16 * 11]
300 ldp x24, x25, [sp, #16 * 12]
301 ldp x26, x27, [sp, #16 * 13]
302 ldp x28, x29, [sp, #16 * 14]
304 add sp, sp, #S_FRAME_SIZE // restore sp
305 eret // return to kernel
308 .macro irq_stack_entry
309 mov x19, sp // preserve the original sp
312 * Compare sp with the base of the task stack.
313 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
314 * and should switch to the irq stack.
316 ldr x25, [tsk, TSK_STACK]
318 and x25, x25, #~(THREAD_SIZE - 1)
321 ldr_this_cpu x25, irq_stack_ptr, x26
322 mov x26, #IRQ_STACK_SIZE
325 /* switch to the irq stack */
331 * x19 should be preserved between irq_stack_entry and
334 .macro irq_stack_exit
339 * These are the registers used in the syscall handler, and allow us to
340 * have in theory up to 7 arguments to a function - x0 to x6.
342 * x7 is reserved for the system call number in 32-bit mode.
344 sc_nr .req x25 // number of system calls
345 scno .req x26 // syscall number
346 stbl .req x27 // syscall table pointer
347 tsk .req x28 // current thread_info
350 * Interrupt handling.
353 ldr_l x1, handle_arch_irq
365 .pushsection ".entry.text", "ax"
369 kernel_ventry el1_sync_invalid // Synchronous EL1t
370 kernel_ventry el1_irq_invalid // IRQ EL1t
371 kernel_ventry el1_fiq_invalid // FIQ EL1t
372 kernel_ventry el1_error_invalid // Error EL1t
374 kernel_ventry el1_sync // Synchronous EL1h
375 kernel_ventry el1_irq // IRQ EL1h
376 kernel_ventry el1_fiq_invalid // FIQ EL1h
377 kernel_ventry el1_error_invalid // Error EL1h
379 kernel_ventry el0_sync // Synchronous 64-bit EL0
380 kernel_ventry el0_irq // IRQ 64-bit EL0
381 kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
382 kernel_ventry el0_error_invalid // Error 64-bit EL0
385 kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
386 kernel_ventry el0_irq_compat // IRQ 32-bit EL0
387 kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
388 kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
390 kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
391 kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
392 kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
393 kernel_ventry el0_error_invalid // Error 32-bit EL0
397 #ifdef CONFIG_VMAP_STACK
399 * We detected an overflow in kernel_ventry, which switched to the
400 * overflow stack. Stash the exception regs, and head to our overflow
404 /* Restore the original x0 value */
408 * Store the original GPRs to the new stack. The orginal SP (minus
409 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
411 sub sp, sp, #S_FRAME_SIZE
414 add x0, x0, #S_FRAME_SIZE
417 /* Stash the regs for handle_bad_stack */
423 #endif /* CONFIG_VMAP_STACK */
426 * Invalid mode handlers
428 .macro inv_entry, el, reason, regsize = 64
429 kernel_entry \el, \regsize
438 inv_entry 0, BAD_SYNC
439 ENDPROC(el0_sync_invalid)
443 ENDPROC(el0_irq_invalid)
447 ENDPROC(el0_fiq_invalid)
450 inv_entry 0, BAD_ERROR
451 ENDPROC(el0_error_invalid)
454 el0_fiq_invalid_compat:
455 inv_entry 0, BAD_FIQ, 32
456 ENDPROC(el0_fiq_invalid_compat)
458 el0_error_invalid_compat:
459 inv_entry 0, BAD_ERROR, 32
460 ENDPROC(el0_error_invalid_compat)
464 inv_entry 1, BAD_SYNC
465 ENDPROC(el1_sync_invalid)
469 ENDPROC(el1_irq_invalid)
473 ENDPROC(el1_fiq_invalid)
476 inv_entry 1, BAD_ERROR
477 ENDPROC(el1_error_invalid)
485 mrs x1, esr_el1 // read the syndrome register
486 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
487 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
489 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
491 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
493 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
495 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
497 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
499 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
505 * Fall through to the Data abort case
509 * Data abort handling
513 // re-enable interrupts if they were enabled in the aborted context
514 tbnz x23, #7, 1f // PSR_I_BIT
517 clear_address_tag x0, x3
518 mov x2, sp // struct pt_regs
521 // disable interrupts before pulling preserved data off the stack
526 * Stack or PC alignment exception handling
535 * Undefined instruction
543 * Debug exception handling
545 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
546 cinc x24, x24, eq // set bit '0'
547 tbz x24, #0, el1_inv // EL1 only
549 mov x2, sp // struct pt_regs
550 bl do_debug_exception
553 // TODO: add support for undefined instructions in kernel mode
566 #ifdef CONFIG_TRACE_IRQFLAGS
567 bl trace_hardirqs_off
572 #ifdef CONFIG_PREEMPT
573 ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
574 cbnz w24, 1f // preempt count != 0
575 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
576 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
580 #ifdef CONFIG_TRACE_IRQFLAGS
586 #ifdef CONFIG_PREEMPT
589 1: bl preempt_schedule_irq // irq en/disable is done inside
590 ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
591 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
601 mrs x25, esr_el1 // read the syndrome register
602 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
603 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
605 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
607 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
609 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
611 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
613 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
615 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
617 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
619 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
621 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
629 mrs x25, esr_el1 // read the syndrome register
630 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
631 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
633 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
635 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
637 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
639 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
641 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
643 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
645 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
647 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
649 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
651 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
653 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
655 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
660 * AArch32 syscall handling
662 adrp stbl, compat_sys_call_table // load compat syscall table pointer
663 uxtw scno, w7 // syscall number in w7 (r7)
664 mov sc_nr, #__NR_compat_syscalls
675 * Data abort handling
678 // enable interrupts before calling the main handler
681 clear_address_tag x0, x26
688 * Instruction abort handling
691 // enable interrupts before calling the main handler
701 * Floating Point or Advanced SIMD access
711 * Floating Point or Advanced SIMD exception
721 * Stack or PC alignment exception handling
724 // enable interrupts before calling the main handler
734 * Undefined instruction
736 // enable interrupts before calling the main handler
744 * System instructions, for trapped cache maintenance instructions
754 * Debug exception handling
756 tbnz x24, #0, el0_inv // EL0 only
760 bl do_debug_exception
779 #ifdef CONFIG_TRACE_IRQFLAGS
780 bl trace_hardirqs_off
786 #ifdef CONFIG_TRACE_IRQFLAGS
793 * This is the fast syscall return path. We do as little as possible here,
794 * and this includes saving x0 back into the kernel stack.
797 disable_irq // disable interrupts
798 str x0, [sp, #S_X0] // returned x0
799 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
800 and x2, x1, #_TIF_SYSCALL_WORK
801 cbnz x2, ret_fast_syscall_trace
802 and x2, x1, #_TIF_WORK_MASK
803 cbnz x2, work_pending
804 enable_step_tsk x1, x2
806 ret_fast_syscall_trace:
807 enable_irq // enable interrupts
808 b __sys_trace_return_skipped // we already saved x0
811 * Ok, we need to do extra processing, enter the slow path.
816 #ifdef CONFIG_TRACE_IRQFLAGS
817 bl trace_hardirqs_on // enabled while in userspace
819 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
822 * "slow" syscall return path.
825 disable_irq // disable interrupts
826 ldr x1, [tsk, #TSK_TI_FLAGS]
827 and x2, x1, #_TIF_WORK_MASK
828 cbnz x2, work_pending
830 enable_step_tsk x1, x2
839 adrp stbl, sys_call_table // load syscall table pointer
840 uxtw scno, w8 // syscall number in w8
841 mov sc_nr, #__NR_syscalls
842 el0_svc_naked: // compat entry point
843 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
847 ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
848 tst x16, #_TIF_SYSCALL_WORK
850 cmp scno, sc_nr // check upper syscall limit
852 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
853 blr x16 // call sys_* routine
862 * This is the really slow path. We're going to be doing context
863 * switches, and waiting for our parent to respond.
866 mov w0, #-1 // set default errno for
867 cmp scno, x0 // user-issued syscall(-1)
872 bl syscall_trace_enter
873 cmp w0, #-1 // skip the syscall?
874 b.eq __sys_trace_return_skipped
875 uxtw scno, w0 // syscall number (possibly new)
876 mov x1, sp // pointer to regs
877 cmp scno, sc_nr // check upper syscall limit
879 ldp x0, x1, [sp] // restore the syscall args
880 ldp x2, x3, [sp, #S_X2]
881 ldp x4, x5, [sp, #S_X4]
882 ldp x6, x7, [sp, #S_X6]
883 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
884 blr x16 // call sys_* routine
887 str x0, [sp, #S_X0] // save returned x0
888 __sys_trace_return_skipped:
890 bl syscall_trace_exit
898 .popsection // .entry.text
901 * Special system call wrappers.
903 ENTRY(sys_rt_sigreturn_wrapper)
906 ENDPROC(sys_rt_sigreturn_wrapper)
909 * Register switch for AArch64. The callee-saved registers need to be saved
910 * and restored. On entry:
911 * x0 = previous task_struct (must be preserved across the switch)
912 * x1 = next task_struct
913 * Previous and next are guaranteed not to be the same.
917 mov x10, #THREAD_CPU_CONTEXT
920 stp x19, x20, [x8], #16 // store callee-saved registers
921 stp x21, x22, [x8], #16
922 stp x23, x24, [x8], #16
923 stp x25, x26, [x8], #16
924 stp x27, x28, [x8], #16
925 stp x29, x9, [x8], #16
928 ldp x19, x20, [x8], #16 // restore callee-saved registers
929 ldp x21, x22, [x8], #16
930 ldp x23, x24, [x8], #16
931 ldp x25, x26, [x8], #16
932 ldp x27, x28, [x8], #16
933 ldp x29, x9, [x8], #16
938 ENDPROC(cpu_switch_to)
939 NOKPROBE(cpu_switch_to)
942 * This is how we return from a fork.
946 cbz x19, 1f // not a kernel thread
949 1: get_thread_info tsk
951 ENDPROC(ret_from_fork)
952 NOKPROBE(ret_from_fork)