]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kernel/entry.S
arm64: Exception handling
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / entry.S
CommitLineData
60ffc30d
CM
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/thread_info.h>
28#include <asm/unistd.h>
29
30/*
31 * Bad Abort numbers
32 *-----------------
33 */
34#define BAD_SYNC 0
35#define BAD_IRQ 1
36#define BAD_FIQ 2
37#define BAD_ERROR 3
38
39 .macro kernel_entry, el, regsize = 64
40 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
41 .if \regsize == 32
42 mov w0, w0 // zero upper 32 bits of x0
43 .endif
44 push x28, x29
45 push x26, x27
46 push x24, x25
47 push x22, x23
48 push x20, x21
49 push x18, x19
50 push x16, x17
51 push x14, x15
52 push x12, x13
53 push x10, x11
54 push x8, x9
55 push x6, x7
56 push x4, x5
57 push x2, x3
58 push x0, x1
59 .if \el == 0
60 mrs x21, sp_el0
61 .else
62 add x21, sp, #S_FRAME_SIZE
63 .endif
64 mrs x22, elr_el1
65 mrs x23, spsr_el1
66 stp lr, x21, [sp, #S_LR]
67 stp x22, x23, [sp, #S_PC]
68
69 /*
70 * Set syscallno to -1 by default (overridden later if real syscall).
71 */
72 .if \el == 0
73 mvn x21, xzr
74 str x21, [sp, #S_SYSCALLNO]
75 .endif
76
77 /*
78 * Registers that may be useful after this macro is invoked:
79 *
80 * x21 - aborted SP
81 * x22 - aborted PC
82 * x23 - aborted PSTATE
83 */
84 .endm
85
86 .macro kernel_exit, el, ret = 0
87 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
88 .if \el == 0
89 ldr x23, [sp, #S_SP] // load return stack pointer
90 .endif
91 .if \ret
92 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
93 add sp, sp, S_X2
94 .else
95 pop x0, x1
96 .endif
97 pop x2, x3 // load the rest of the registers
98 pop x4, x5
99 pop x6, x7
100 pop x8, x9
101 msr elr_el1, x21 // set up the return data
102 msr spsr_el1, x22
103 .if \el == 0
104 msr sp_el0, x23
105 .endif
106 pop x10, x11
107 pop x12, x13
108 pop x14, x15
109 pop x16, x17
110 pop x18, x19
111 pop x20, x21
112 pop x22, x23
113 pop x24, x25
114 pop x26, x27
115 pop x28, x29
116 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
117 eret // return to kernel
118 .endm
119
120 .macro get_thread_info, rd
121 mov \rd, sp
122 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
123 .endm
124
125/*
126 * These are the registers used in the syscall handler, and allow us to
127 * have in theory up to 7 arguments to a function - x0 to x6.
128 *
129 * x7 is reserved for the system call number in 32-bit mode.
130 */
131sc_nr .req x25 // number of system calls
132scno .req x26 // syscall number
133stbl .req x27 // syscall table pointer
134tsk .req x28 // current thread_info
135
136/*
137 * Interrupt handling.
138 */
139 .macro irq_handler
140 ldr x1, handle_arch_irq
141 mov x0, sp
142 blr x1
143 .endm
144
145 .text
146
147/*
148 * Exception vectors.
149 */
150 .macro ventry label
151 .align 7
152 b \label
153 .endm
154
155 .align 11
156ENTRY(vectors)
157 ventry el1_sync_invalid // Synchronous EL1t
158 ventry el1_irq_invalid // IRQ EL1t
159 ventry el1_fiq_invalid // FIQ EL1t
160 ventry el1_error_invalid // Error EL1t
161
162 ventry el1_sync // Synchronous EL1h
163 ventry el1_irq // IRQ EL1h
164 ventry el1_fiq_invalid // FIQ EL1h
165 ventry el1_error_invalid // Error EL1h
166
167 ventry el0_sync // Synchronous 64-bit EL0
168 ventry el0_irq // IRQ 64-bit EL0
169 ventry el0_fiq_invalid // FIQ 64-bit EL0
170 ventry el0_error_invalid // Error 64-bit EL0
171
172#ifdef CONFIG_COMPAT
173 ventry el0_sync_compat // Synchronous 32-bit EL0
174 ventry el0_irq_compat // IRQ 32-bit EL0
175 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
176 ventry el0_error_invalid_compat // Error 32-bit EL0
177#else
178 ventry el0_sync_invalid // Synchronous 32-bit EL0
179 ventry el0_irq_invalid // IRQ 32-bit EL0
180 ventry el0_fiq_invalid // FIQ 32-bit EL0
181 ventry el0_error_invalid // Error 32-bit EL0
182#endif
183END(vectors)
184
185/*
186 * Invalid mode handlers
187 */
188 .macro inv_entry, el, reason, regsize = 64
189 kernel_entry el, \regsize
190 mov x0, sp
191 mov x1, #\reason
192 mrs x2, esr_el1
193 b bad_mode
194 .endm
195
196el0_sync_invalid:
197 inv_entry 0, BAD_SYNC
198ENDPROC(el0_sync_invalid)
199
200el0_irq_invalid:
201 inv_entry 0, BAD_IRQ
202ENDPROC(el0_irq_invalid)
203
204el0_fiq_invalid:
205 inv_entry 0, BAD_FIQ
206ENDPROC(el0_fiq_invalid)
207
208el0_error_invalid:
209 inv_entry 0, BAD_ERROR
210ENDPROC(el0_error_invalid)
211
212#ifdef CONFIG_COMPAT
213el0_fiq_invalid_compat:
214 inv_entry 0, BAD_FIQ, 32
215ENDPROC(el0_fiq_invalid_compat)
216
217el0_error_invalid_compat:
218 inv_entry 0, BAD_ERROR, 32
219ENDPROC(el0_error_invalid_compat)
220#endif
221
222el1_sync_invalid:
223 inv_entry 1, BAD_SYNC
224ENDPROC(el1_sync_invalid)
225
226el1_irq_invalid:
227 inv_entry 1, BAD_IRQ
228ENDPROC(el1_irq_invalid)
229
230el1_fiq_invalid:
231 inv_entry 1, BAD_FIQ
232ENDPROC(el1_fiq_invalid)
233
234el1_error_invalid:
235 inv_entry 1, BAD_ERROR
236ENDPROC(el1_error_invalid)
237
238/*
239 * EL1 mode handlers.
240 */
241 .align 6
242el1_sync:
243 kernel_entry 1
244 mrs x1, esr_el1 // read the syndrome register
245 lsr x24, x1, #26 // exception class
246 cmp x24, #0x25 // data abort in EL1
247 b.eq el1_da
248 cmp x24, #0x18 // configurable trap
249 b.eq el1_undef
250 cmp x24, #0x26 // stack alignment exception
251 b.eq el1_sp_pc
252 cmp x24, #0x22 // pc alignment exception
253 b.eq el1_sp_pc
254 cmp x24, #0x00 // unknown exception in EL1
255 b.eq el1_undef
256 cmp x24, #0x30 // debug exception in EL1
257 b.ge el1_dbg
258 b el1_inv
259el1_da:
260 /*
261 * Data abort handling
262 */
263 mrs x0, far_el1
264 enable_dbg_if_not_stepping x2
265 // re-enable interrupts if they were enabled in the aborted context
266 tbnz x23, #7, 1f // PSR_I_BIT
267 enable_irq
2681:
269 mov x2, sp // struct pt_regs
270 bl do_mem_abort
271
272 // disable interrupts before pulling preserved data off the stack
273 disable_irq
274 kernel_exit 1
275el1_sp_pc:
276 /*
277 * Stack or PC alignment exception handling
278 */
279 mrs x0, far_el1
280 mov x1, x25
281 mov x2, sp
282 b do_sp_pc_abort
283el1_undef:
284 /*
285 * Undefined instruction
286 */
287 mov x0, sp
288 b do_undefinstr
289el1_dbg:
290 /*
291 * Debug exception handling
292 */
293 tbz x24, #0, el1_inv // EL1 only
294 mrs x0, far_el1
295 mov x2, sp // struct pt_regs
296 bl do_debug_exception
297
298 kernel_exit 1
299el1_inv:
300 // TODO: add support for undefined instructions in kernel mode
301 mov x0, sp
302 mov x1, #BAD_SYNC
303 mrs x2, esr_el1
304 b bad_mode
305ENDPROC(el1_sync)
306
307 .align 6
308el1_irq:
309 kernel_entry 1
310 enable_dbg_if_not_stepping x0
311#ifdef CONFIG_TRACE_IRQFLAGS
312 bl trace_hardirqs_off
313#endif
314#ifdef CONFIG_PREEMPT
315 get_thread_info tsk
316 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
317 add x0, x24, #1 // increment it
318 str x0, [tsk, #TI_PREEMPT]
319#endif
320 irq_handler
321#ifdef CONFIG_PREEMPT
322 str x24, [tsk, #TI_PREEMPT] // restore preempt count
323 cbnz x24, 1f // preempt count != 0
324 ldr x0, [tsk, #TI_FLAGS] // get flags
325 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
326 bl el1_preempt
3271:
328#endif
329#ifdef CONFIG_TRACE_IRQFLAGS
330 bl trace_hardirqs_on
331#endif
332 kernel_exit 1
333ENDPROC(el1_irq)
334
335#ifdef CONFIG_PREEMPT
336el1_preempt:
337 mov x24, lr
3381: enable_dbg
339 bl preempt_schedule_irq // irq en/disable is done inside
340 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
341 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
342 ret x24
343#endif
344
345/*
346 * EL0 mode handlers.
347 */
348 .align 6
349el0_sync:
350 kernel_entry 0
351 mrs x25, esr_el1 // read the syndrome register
352 lsr x24, x25, #26 // exception class
353 cmp x24, #0x15 // SVC in 64-bit state
354 b.eq el0_svc
355 adr lr, ret_from_exception
356 cmp x24, #0x24 // data abort in EL0
357 b.eq el0_da
358 cmp x24, #0x20 // instruction abort in EL0
359 b.eq el0_ia
360 cmp x24, #0x07 // FP/ASIMD access
361 b.eq el0_fpsimd_acc
362 cmp x24, #0x2c // FP/ASIMD exception
363 b.eq el0_fpsimd_exc
364 cmp x24, #0x18 // configurable trap
365 b.eq el0_undef
366 cmp x24, #0x26 // stack alignment exception
367 b.eq el0_sp_pc
368 cmp x24, #0x22 // pc alignment exception
369 b.eq el0_sp_pc
370 cmp x24, #0x00 // unknown exception in EL0
371 b.eq el0_undef
372 cmp x24, #0x30 // debug exception in EL0
373 b.ge el0_dbg
374 b el0_inv
375
376#ifdef CONFIG_COMPAT
377 .align 6
378el0_sync_compat:
379 kernel_entry 0, 32
380 mrs x25, esr_el1 // read the syndrome register
381 lsr x24, x25, #26 // exception class
382 cmp x24, #0x11 // SVC in 32-bit state
383 b.eq el0_svc_compat
384 adr lr, ret_from_exception
385 cmp x24, #0x24 // data abort in EL0
386 b.eq el0_da
387 cmp x24, #0x20 // instruction abort in EL0
388 b.eq el0_ia
389 cmp x24, #0x07 // FP/ASIMD access
390 b.eq el0_fpsimd_acc
391 cmp x24, #0x28 // FP/ASIMD exception
392 b.eq el0_fpsimd_exc
393 cmp x24, #0x00 // unknown exception in EL0
394 b.eq el0_undef
395 cmp x24, #0x30 // debug exception in EL0
396 b.ge el0_dbg
397 b el0_inv
398el0_svc_compat:
399 /*
400 * AArch32 syscall handling
401 */
402 adr stbl, compat_sys_call_table // load compat syscall table pointer
403 uxtw scno, w7 // syscall number in w7 (r7)
404 mov sc_nr, #__NR_compat_syscalls
405 b el0_svc_naked
406
407 .align 6
408el0_irq_compat:
409 kernel_entry 0, 32
410 b el0_irq_naked
411#endif
412
413el0_da:
414 /*
415 * Data abort handling
416 */
417 mrs x0, far_el1
418 disable_step x1
419 isb
420 enable_dbg
421 // enable interrupts before calling the main handler
422 enable_irq
423 mov x1, x25
424 mov x2, sp
425 b do_mem_abort
426el0_ia:
427 /*
428 * Instruction abort handling
429 */
430 mrs x0, far_el1
431 disable_step x1
432 isb
433 enable_dbg
434 // enable interrupts before calling the main handler
435 enable_irq
436 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
437 mov x2, sp
438 b do_mem_abort
439el0_fpsimd_acc:
440 /*
441 * Floating Point or Advanced SIMD access
442 */
443 mov x0, x25
444 mov x1, sp
445 b do_fpsimd_acc
446el0_fpsimd_exc:
447 /*
448 * Floating Point or Advanced SIMD exception
449 */
450 mov x0, x25
451 mov x1, sp
452 b do_fpsimd_exc
453el0_sp_pc:
454 /*
455 * Stack or PC alignment exception handling
456 */
457 mrs x0, far_el1
458 disable_step x1
459 isb
460 enable_dbg
461 // enable interrupts before calling the main handler
462 enable_irq
463 mov x1, x25
464 mov x2, sp
465 b do_sp_pc_abort
466el0_undef:
467 /*
468 * Undefined instruction
469 */
470 mov x0, sp
471 b do_undefinstr
472el0_dbg:
473 /*
474 * Debug exception handling
475 */
476 tbnz x24, #0, el0_inv // EL0 only
477 mrs x0, far_el1
478 disable_step x1
479 mov x1, x25
480 mov x2, sp
481 b do_debug_exception
482el0_inv:
483 mov x0, sp
484 mov x1, #BAD_SYNC
485 mrs x2, esr_el1
486 b bad_mode
487ENDPROC(el0_sync)
488
489 .align 6
490el0_irq:
491 kernel_entry 0
492el0_irq_naked:
493 disable_step x1
494 isb
495 enable_dbg
496#ifdef CONFIG_TRACE_IRQFLAGS
497 bl trace_hardirqs_off
498#endif
499 get_thread_info tsk
500#ifdef CONFIG_PREEMPT
501 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
502 add x23, x24, #1 // increment it
503 str x23, [tsk, #TI_PREEMPT]
504#endif
505 irq_handler
506#ifdef CONFIG_PREEMPT
507 ldr x0, [tsk, #TI_PREEMPT]
508 str x24, [tsk, #TI_PREEMPT]
509 cmp x0, x23
510 b.eq 1f
511 mov x1, #0
512 str x1, [x1] // BUG
5131:
514#endif
515#ifdef CONFIG_TRACE_IRQFLAGS
516 bl trace_hardirqs_on
517#endif
518 b ret_to_user
519ENDPROC(el0_irq)
520
521/*
522 * This is the return code to user mode for abort handlers
523 */
524ret_from_exception:
525 get_thread_info tsk
526 b ret_to_user
527ENDPROC(ret_from_exception)
528
529/*
530 * Register switch for AArch64. The callee-saved registers need to be saved
531 * and restored. On entry:
532 * x0 = previous task_struct (must be preserved across the switch)
533 * x1 = next task_struct
534 * Previous and next are guaranteed not to be the same.
535 *
536 */
537ENTRY(cpu_switch_to)
538 add x8, x0, #THREAD_CPU_CONTEXT
539 mov x9, sp
540 stp x19, x20, [x8], #16 // store callee-saved registers
541 stp x21, x22, [x8], #16
542 stp x23, x24, [x8], #16
543 stp x25, x26, [x8], #16
544 stp x27, x28, [x8], #16
545 stp x29, x9, [x8], #16
546 str lr, [x8]
547 add x8, x1, #THREAD_CPU_CONTEXT
548 ldp x19, x20, [x8], #16 // restore callee-saved registers
549 ldp x21, x22, [x8], #16
550 ldp x23, x24, [x8], #16
551 ldp x25, x26, [x8], #16
552 ldp x27, x28, [x8], #16
553 ldp x29, x9, [x8], #16
554 ldr lr, [x8]
555 mov sp, x9
556 ret
557ENDPROC(cpu_switch_to)
558
559/*
560 * This is the fast syscall return path. We do as little as possible here,
561 * and this includes saving x0 back into the kernel stack.
562 */
563ret_fast_syscall:
564 disable_irq // disable interrupts
565 ldr x1, [tsk, #TI_FLAGS]
566 and x2, x1, #_TIF_WORK_MASK
567 cbnz x2, fast_work_pending
568 tbz x1, #TIF_SINGLESTEP, fast_exit
569 disable_dbg
570 enable_step x2
571fast_exit:
572 kernel_exit 0, ret = 1
573
574/*
575 * Ok, we need to do extra processing, enter the slow path.
576 */
577fast_work_pending:
578 str x0, [sp, #S_X0] // returned x0
579work_pending:
580 tbnz x1, #TIF_NEED_RESCHED, work_resched
581 /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
582 ldr x2, [sp, #S_PSTATE]
583 mov x0, sp // 'regs'
584 tst x2, #PSR_MODE_MASK // user mode regs?
585 b.ne no_work_pending // returning to kernel
586 bl do_notify_resume
587 b ret_to_user
588work_resched:
589 enable_dbg
590 bl schedule
591
592/*
593 * "slow" syscall return path.
594 */
595ENTRY(ret_to_user)
596 disable_irq // disable interrupts
597 ldr x1, [tsk, #TI_FLAGS]
598 and x2, x1, #_TIF_WORK_MASK
599 cbnz x2, work_pending
600 tbz x1, #TIF_SINGLESTEP, no_work_pending
601 disable_dbg
602 enable_step x2
603no_work_pending:
604 kernel_exit 0, ret = 0
605ENDPROC(ret_to_user)
606
607/*
608 * This is how we return from a fork.
609 */
610ENTRY(ret_from_fork)
611 bl schedule_tail
612 get_thread_info tsk
613 b ret_to_user
614ENDPROC(ret_from_fork)
615
616/*
617 * SVC handler.
618 */
619 .align 6
620el0_svc:
621 adrp stbl, sys_call_table // load syscall table pointer
622 uxtw scno, w8 // syscall number in w8
623 mov sc_nr, #__NR_syscalls
624el0_svc_naked: // compat entry point
625 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
626 disable_step x16
627 isb
628 enable_dbg
629 enable_irq
630
631 get_thread_info tsk
632 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
633 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
634 adr lr, ret_fast_syscall // return address
635 cmp scno, sc_nr // check upper syscall limit
636 b.hs ni_sys
637 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
638 br x16 // call sys_* routine
639ni_sys:
640 mov x0, sp
641 b do_ni_syscall
642ENDPROC(el0_svc)
643
644 /*
645 * This is the really slow path. We're going to be doing context
646 * switches, and waiting for our parent to respond.
647 */
648__sys_trace:
649 mov x1, sp
650 mov w0, #0 // trace entry
651 bl syscall_trace
652 adr lr, __sys_trace_return // return address
653 uxtw scno, w0 // syscall number (possibly new)
654 mov x1, sp // pointer to regs
655 cmp scno, sc_nr // check upper syscall limit
656 b.hs ni_sys
657 ldp x0, x1, [sp] // restore the syscall args
658 ldp x2, x3, [sp, #S_X2]
659 ldp x4, x5, [sp, #S_X4]
660 ldp x6, x7, [sp, #S_X6]
661 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
662 br x16 // call sys_* routine
663
664__sys_trace_return:
665 str x0, [sp] // save returned x0
666 mov x1, sp
667 mov w0, #1 // trace exit
668 bl syscall_trace
669 b ret_to_user
670
671/*
672 * Special system call wrappers.
673 */
674ENTRY(sys_execve_wrapper)
675 mov x3, sp
676 b sys_execve
677ENDPROC(sys_execve_wrapper)
678
679ENTRY(sys_clone_wrapper)
680 mov x5, sp
681 b sys_clone
682ENDPROC(sys_clone_wrapper)
683
684ENTRY(sys_rt_sigreturn_wrapper)
685 mov x0, sp
686 b sys_rt_sigreturn
687ENDPROC(sys_rt_sigreturn_wrapper)
688
689ENTRY(sys_sigaltstack_wrapper)
690 ldr x2, [sp, #S_SP]
691 b sys_sigaltstack
692ENDPROC(sys_sigaltstack_wrapper)
693
694ENTRY(handle_arch_irq)
695 .quad 0