]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kernel/entry.S
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / entry.S
CommitLineData
60ffc30d
CM
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
5c1ce6f7 27#include <asm/esr.h>
60ffc30d
CM
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
f3d447a9 30#include <asm/unistd32.h>
60ffc30d
CM
31
32/*
33 * Bad Abort numbers
34 *-----------------
35 */
36#define BAD_SYNC 0
37#define BAD_IRQ 1
38#define BAD_FIQ 2
39#define BAD_ERROR 3
40
41 .macro kernel_entry, el, regsize = 64
42 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
43 .if \regsize == 32
44 mov w0, w0 // zero upper 32 bits of x0
45 .endif
46 push x28, x29
47 push x26, x27
48 push x24, x25
49 push x22, x23
50 push x20, x21
51 push x18, x19
52 push x16, x17
53 push x14, x15
54 push x12, x13
55 push x10, x11
56 push x8, x9
57 push x6, x7
58 push x4, x5
59 push x2, x3
60 push x0, x1
61 .if \el == 0
62 mrs x21, sp_el0
2a283070
WD
63 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
64 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
65 disable_step_tsk x19, x20 // exceptions when scheduling.
60ffc30d
CM
66 .else
67 add x21, sp, #S_FRAME_SIZE
68 .endif
69 mrs x22, elr_el1
70 mrs x23, spsr_el1
71 stp lr, x21, [sp, #S_LR]
72 stp x22, x23, [sp, #S_PC]
73
74 /*
75 * Set syscallno to -1 by default (overridden later if real syscall).
76 */
77 .if \el == 0
78 mvn x21, xzr
79 str x21, [sp, #S_SYSCALLNO]
80 .endif
81
82 /*
83 * Registers that may be useful after this macro is invoked:
84 *
85 * x21 - aborted SP
86 * x22 - aborted PC
87 * x23 - aborted PSTATE
88 */
89 .endm
90
91 .macro kernel_exit, el, ret = 0
92 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
93 .if \el == 0
94 ldr x23, [sp, #S_SP] // load return stack pointer
95 .endif
96 .if \ret
97 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
98 add sp, sp, S_X2
99 .else
100 pop x0, x1
101 .endif
102 pop x2, x3 // load the rest of the registers
103 pop x4, x5
104 pop x6, x7
105 pop x8, x9
106 msr elr_el1, x21 // set up the return data
107 msr spsr_el1, x22
108 .if \el == 0
109 msr sp_el0, x23
110 .endif
111 pop x10, x11
112 pop x12, x13
113 pop x14, x15
114 pop x16, x17
115 pop x18, x19
116 pop x20, x21
117 pop x22, x23
118 pop x24, x25
119 pop x26, x27
120 pop x28, x29
121 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
122 eret // return to kernel
123 .endm
124
125 .macro get_thread_info, rd
126 mov \rd, sp
845ad05e 127 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
60ffc30d
CM
128 .endm
129
130/*
131 * These are the registers used in the syscall handler, and allow us to
132 * have in theory up to 7 arguments to a function - x0 to x6.
133 *
134 * x7 is reserved for the system call number in 32-bit mode.
135 */
136sc_nr .req x25 // number of system calls
137scno .req x26 // syscall number
138stbl .req x27 // syscall table pointer
139tsk .req x28 // current thread_info
140
141/*
142 * Interrupt handling.
143 */
144 .macro irq_handler
145 ldr x1, handle_arch_irq
146 mov x0, sp
147 blr x1
148 .endm
149
150 .text
151
152/*
153 * Exception vectors.
154 */
60ffc30d
CM
155
156 .align 11
157ENTRY(vectors)
158 ventry el1_sync_invalid // Synchronous EL1t
159 ventry el1_irq_invalid // IRQ EL1t
160 ventry el1_fiq_invalid // FIQ EL1t
161 ventry el1_error_invalid // Error EL1t
162
163 ventry el1_sync // Synchronous EL1h
164 ventry el1_irq // IRQ EL1h
165 ventry el1_fiq_invalid // FIQ EL1h
166 ventry el1_error_invalid // Error EL1h
167
168 ventry el0_sync // Synchronous 64-bit EL0
169 ventry el0_irq // IRQ 64-bit EL0
170 ventry el0_fiq_invalid // FIQ 64-bit EL0
171 ventry el0_error_invalid // Error 64-bit EL0
172
173#ifdef CONFIG_COMPAT
174 ventry el0_sync_compat // Synchronous 32-bit EL0
175 ventry el0_irq_compat // IRQ 32-bit EL0
176 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
177 ventry el0_error_invalid_compat // Error 32-bit EL0
178#else
179 ventry el0_sync_invalid // Synchronous 32-bit EL0
180 ventry el0_irq_invalid // IRQ 32-bit EL0
181 ventry el0_fiq_invalid // FIQ 32-bit EL0
182 ventry el0_error_invalid // Error 32-bit EL0
183#endif
184END(vectors)
185
186/*
187 * Invalid mode handlers
188 */
189 .macro inv_entry, el, reason, regsize = 64
190 kernel_entry el, \regsize
191 mov x0, sp
192 mov x1, #\reason
193 mrs x2, esr_el1
194 b bad_mode
195 .endm
196
197el0_sync_invalid:
198 inv_entry 0, BAD_SYNC
199ENDPROC(el0_sync_invalid)
200
201el0_irq_invalid:
202 inv_entry 0, BAD_IRQ
203ENDPROC(el0_irq_invalid)
204
205el0_fiq_invalid:
206 inv_entry 0, BAD_FIQ
207ENDPROC(el0_fiq_invalid)
208
209el0_error_invalid:
210 inv_entry 0, BAD_ERROR
211ENDPROC(el0_error_invalid)
212
213#ifdef CONFIG_COMPAT
214el0_fiq_invalid_compat:
215 inv_entry 0, BAD_FIQ, 32
216ENDPROC(el0_fiq_invalid_compat)
217
218el0_error_invalid_compat:
219 inv_entry 0, BAD_ERROR, 32
220ENDPROC(el0_error_invalid_compat)
221#endif
222
223el1_sync_invalid:
224 inv_entry 1, BAD_SYNC
225ENDPROC(el1_sync_invalid)
226
227el1_irq_invalid:
228 inv_entry 1, BAD_IRQ
229ENDPROC(el1_irq_invalid)
230
231el1_fiq_invalid:
232 inv_entry 1, BAD_FIQ
233ENDPROC(el1_fiq_invalid)
234
235el1_error_invalid:
236 inv_entry 1, BAD_ERROR
237ENDPROC(el1_error_invalid)
238
239/*
240 * EL1 mode handlers.
241 */
242 .align 6
243el1_sync:
244 kernel_entry 1
245 mrs x1, esr_el1 // read the syndrome register
5c1ce6f7
MZ
246 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
247 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
60ffc30d 248 b.eq el1_da
5c1ce6f7 249 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 250 b.eq el1_undef
5c1ce6f7 251 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 252 b.eq el1_sp_pc
5c1ce6f7 253 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 254 b.eq el1_sp_pc
5c1ce6f7 255 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
60ffc30d 256 b.eq el1_undef
5c1ce6f7 257 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
60ffc30d
CM
258 b.ge el1_dbg
259 b el1_inv
260el1_da:
261 /*
262 * Data abort handling
263 */
264 mrs x0, far_el1
2a283070 265 enable_dbg
60ffc30d
CM
266 // re-enable interrupts if they were enabled in the aborted context
267 tbnz x23, #7, 1f // PSR_I_BIT
268 enable_irq
2691:
270 mov x2, sp // struct pt_regs
271 bl do_mem_abort
272
273 // disable interrupts before pulling preserved data off the stack
274 disable_irq
275 kernel_exit 1
276el1_sp_pc:
277 /*
278 * Stack or PC alignment exception handling
279 */
280 mrs x0, far_el1
2a283070 281 enable_dbg
60ffc30d
CM
282 mov x1, x25
283 mov x2, sp
284 b do_sp_pc_abort
285el1_undef:
286 /*
287 * Undefined instruction
288 */
2a283070 289 enable_dbg
60ffc30d
CM
290 mov x0, sp
291 b do_undefinstr
292el1_dbg:
293 /*
294 * Debug exception handling
295 */
ee6214ce
SP
296 cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
297 cinc x24, x24, eq // set bit '0'
60ffc30d
CM
298 tbz x24, #0, el1_inv // EL1 only
299 mrs x0, far_el1
300 mov x2, sp // struct pt_regs
301 bl do_debug_exception
2a283070 302 enable_dbg
60ffc30d
CM
303 kernel_exit 1
304el1_inv:
305 // TODO: add support for undefined instructions in kernel mode
2a283070 306 enable_dbg
60ffc30d
CM
307 mov x0, sp
308 mov x1, #BAD_SYNC
309 mrs x2, esr_el1
310 b bad_mode
311ENDPROC(el1_sync)
312
313 .align 6
314el1_irq:
315 kernel_entry 1
2a283070 316 enable_dbg
60ffc30d
CM
317#ifdef CONFIG_TRACE_IRQFLAGS
318 bl trace_hardirqs_off
319#endif
64681787 320
60ffc30d 321 irq_handler
64681787 322
60ffc30d 323#ifdef CONFIG_PREEMPT
64681787 324 get_thread_info tsk
883c0573 325 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
717321fc 326 cbnz w24, 1f // preempt count != 0
60ffc30d
CM
327 ldr x0, [tsk, #TI_FLAGS] // get flags
328 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
329 bl el1_preempt
3301:
331#endif
332#ifdef CONFIG_TRACE_IRQFLAGS
333 bl trace_hardirqs_on
334#endif
335 kernel_exit 1
336ENDPROC(el1_irq)
337
338#ifdef CONFIG_PREEMPT
339el1_preempt:
340 mov x24, lr
2a283070 3411: bl preempt_schedule_irq // irq en/disable is done inside
60ffc30d
CM
342 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
343 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
344 ret x24
345#endif
346
347/*
348 * EL0 mode handlers.
349 */
350 .align 6
351el0_sync:
352 kernel_entry 0
353 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
354 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
355 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
60ffc30d 356 b.eq el0_svc
2a283070 357 adr lr, ret_to_user
5c1ce6f7 358 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 359 b.eq el0_da
5c1ce6f7 360 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 361 b.eq el0_ia
5c1ce6f7 362 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 363 b.eq el0_fpsimd_acc
5c1ce6f7 364 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
60ffc30d 365 b.eq el0_fpsimd_exc
5c1ce6f7 366 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 367 b.eq el0_undef
5c1ce6f7 368 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 369 b.eq el0_sp_pc
5c1ce6f7 370 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 371 b.eq el0_sp_pc
5c1ce6f7 372 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 373 b.eq el0_undef
5c1ce6f7 374 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
375 b.ge el0_dbg
376 b el0_inv
377
378#ifdef CONFIG_COMPAT
379 .align 6
380el0_sync_compat:
381 kernel_entry 0, 32
382 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
383 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
384 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
60ffc30d 385 b.eq el0_svc_compat
2a283070 386 adr lr, ret_to_user
5c1ce6f7 387 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 388 b.eq el0_da
5c1ce6f7 389 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 390 b.eq el0_ia
5c1ce6f7 391 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 392 b.eq el0_fpsimd_acc
5c1ce6f7 393 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
60ffc30d 394 b.eq el0_fpsimd_exc
5c1ce6f7 395 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 396 b.eq el0_undef
381cc2b9
MR
397 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
398 b.eq el0_undef
399 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
400 b.eq el0_undef
401 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
402 b.eq el0_undef
403 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
404 b.eq el0_undef
405 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
406 b.eq el0_undef
5c1ce6f7 407 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
408 b.ge el0_dbg
409 b el0_inv
410el0_svc_compat:
411 /*
412 * AArch32 syscall handling
413 */
414 adr stbl, compat_sys_call_table // load compat syscall table pointer
415 uxtw scno, w7 // syscall number in w7 (r7)
416 mov sc_nr, #__NR_compat_syscalls
417 b el0_svc_naked
418
419 .align 6
420el0_irq_compat:
421 kernel_entry 0, 32
422 b el0_irq_naked
423#endif
424
425el0_da:
426 /*
427 * Data abort handling
428 */
429 mrs x0, far_el1
d50240a5 430 bic x0, x0, #(0xff << 56)
60ffc30d 431 // enable interrupts before calling the main handler
2a283070 432 enable_dbg_and_irq
60ffc30d
CM
433 mov x1, x25
434 mov x2, sp
435 b do_mem_abort
436el0_ia:
437 /*
438 * Instruction abort handling
439 */
440 mrs x0, far_el1
60ffc30d 441 // enable interrupts before calling the main handler
2a283070 442 enable_dbg_and_irq
60ffc30d
CM
443 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
444 mov x2, sp
445 b do_mem_abort
446el0_fpsimd_acc:
447 /*
448 * Floating Point or Advanced SIMD access
449 */
2a283070 450 enable_dbg
60ffc30d
CM
451 mov x0, x25
452 mov x1, sp
453 b do_fpsimd_acc
454el0_fpsimd_exc:
455 /*
456 * Floating Point or Advanced SIMD exception
457 */
2a283070 458 enable_dbg
60ffc30d
CM
459 mov x0, x25
460 mov x1, sp
461 b do_fpsimd_exc
462el0_sp_pc:
463 /*
464 * Stack or PC alignment exception handling
465 */
466 mrs x0, far_el1
60ffc30d 467 // enable interrupts before calling the main handler
2a283070 468 enable_dbg_and_irq
60ffc30d
CM
469 mov x1, x25
470 mov x2, sp
471 b do_sp_pc_abort
472el0_undef:
473 /*
474 * Undefined instruction
475 */
2600e130 476 // enable interrupts before calling the main handler
2a283070
WD
477 enable_dbg_and_irq
478 mov x0, sp
60ffc30d
CM
479 b do_undefinstr
480el0_dbg:
481 /*
482 * Debug exception handling
483 */
484 tbnz x24, #0, el0_inv // EL0 only
485 mrs x0, far_el1
60ffc30d
CM
486 mov x1, x25
487 mov x2, sp
2a283070
WD
488 bl do_debug_exception
489 enable_dbg
490 b ret_to_user
60ffc30d 491el0_inv:
2a283070 492 enable_dbg
60ffc30d
CM
493 mov x0, sp
494 mov x1, #BAD_SYNC
495 mrs x2, esr_el1
496 b bad_mode
497ENDPROC(el0_sync)
498
499 .align 6
500el0_irq:
501 kernel_entry 0
502el0_irq_naked:
60ffc30d
CM
503 enable_dbg
504#ifdef CONFIG_TRACE_IRQFLAGS
505 bl trace_hardirqs_off
506#endif
64681787 507
60ffc30d 508 irq_handler
64681787 509
60ffc30d
CM
510#ifdef CONFIG_TRACE_IRQFLAGS
511 bl trace_hardirqs_on
512#endif
513 b ret_to_user
514ENDPROC(el0_irq)
515
60ffc30d
CM
516/*
517 * Register switch for AArch64. The callee-saved registers need to be saved
518 * and restored. On entry:
519 * x0 = previous task_struct (must be preserved across the switch)
520 * x1 = next task_struct
521 * Previous and next are guaranteed not to be the same.
522 *
523 */
524ENTRY(cpu_switch_to)
525 add x8, x0, #THREAD_CPU_CONTEXT
526 mov x9, sp
527 stp x19, x20, [x8], #16 // store callee-saved registers
528 stp x21, x22, [x8], #16
529 stp x23, x24, [x8], #16
530 stp x25, x26, [x8], #16
531 stp x27, x28, [x8], #16
532 stp x29, x9, [x8], #16
533 str lr, [x8]
534 add x8, x1, #THREAD_CPU_CONTEXT
535 ldp x19, x20, [x8], #16 // restore callee-saved registers
536 ldp x21, x22, [x8], #16
537 ldp x23, x24, [x8], #16
538 ldp x25, x26, [x8], #16
539 ldp x27, x28, [x8], #16
540 ldp x29, x9, [x8], #16
541 ldr lr, [x8]
542 mov sp, x9
543 ret
544ENDPROC(cpu_switch_to)
545
546/*
547 * This is the fast syscall return path. We do as little as possible here,
548 * and this includes saving x0 back into the kernel stack.
549 */
550ret_fast_syscall:
551 disable_irq // disable interrupts
552 ldr x1, [tsk, #TI_FLAGS]
553 and x2, x1, #_TIF_WORK_MASK
554 cbnz x2, fast_work_pending
2a283070 555 enable_step_tsk x1, x2
60ffc30d
CM
556 kernel_exit 0, ret = 1
557
558/*
559 * Ok, we need to do extra processing, enter the slow path.
560 */
561fast_work_pending:
562 str x0, [sp, #S_X0] // returned x0
563work_pending:
564 tbnz x1, #TIF_NEED_RESCHED, work_resched
565 /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
566 ldr x2, [sp, #S_PSTATE]
567 mov x0, sp // 'regs'
568 tst x2, #PSR_MODE_MASK // user mode regs?
569 b.ne no_work_pending // returning to kernel
6916fd08 570 enable_irq // enable interrupts for do_notify_resume()
60ffc30d
CM
571 bl do_notify_resume
572 b ret_to_user
573work_resched:
60ffc30d
CM
574 bl schedule
575
576/*
577 * "slow" syscall return path.
578 */
59dc67b0 579ret_to_user:
60ffc30d
CM
580 disable_irq // disable interrupts
581 ldr x1, [tsk, #TI_FLAGS]
582 and x2, x1, #_TIF_WORK_MASK
583 cbnz x2, work_pending
2a283070 584 enable_step_tsk x1, x2
60ffc30d
CM
585no_work_pending:
586 kernel_exit 0, ret = 0
587ENDPROC(ret_to_user)
588
589/*
590 * This is how we return from a fork.
591 */
592ENTRY(ret_from_fork)
593 bl schedule_tail
c34501d2
CM
594 cbz x19, 1f // not a kernel thread
595 mov x0, x20
596 blr x19
5971: get_thread_info tsk
60ffc30d
CM
598 b ret_to_user
599ENDPROC(ret_from_fork)
600
601/*
602 * SVC handler.
603 */
604 .align 6
605el0_svc:
606 adrp stbl, sys_call_table // load syscall table pointer
607 uxtw scno, w8 // syscall number in w8
608 mov sc_nr, #__NR_syscalls
609el0_svc_naked: // compat entry point
610 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
2a283070 611 enable_dbg_and_irq
60ffc30d 612
60ffc30d
CM
613 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
614 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
615 adr lr, ret_fast_syscall // return address
616 cmp scno, sc_nr // check upper syscall limit
617 b.hs ni_sys
618 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
619 br x16 // call sys_* routine
620ni_sys:
621 mov x0, sp
622 b do_ni_syscall
623ENDPROC(el0_svc)
624
625 /*
626 * This is the really slow path. We're going to be doing context
627 * switches, and waiting for our parent to respond.
628 */
629__sys_trace:
630 mov x1, sp
631 mov w0, #0 // trace entry
632 bl syscall_trace
633 adr lr, __sys_trace_return // return address
634 uxtw scno, w0 // syscall number (possibly new)
635 mov x1, sp // pointer to regs
636 cmp scno, sc_nr // check upper syscall limit
637 b.hs ni_sys
638 ldp x0, x1, [sp] // restore the syscall args
639 ldp x2, x3, [sp, #S_X2]
640 ldp x4, x5, [sp, #S_X4]
641 ldp x6, x7, [sp, #S_X6]
642 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
643 br x16 // call sys_* routine
644
645__sys_trace_return:
646 str x0, [sp] // save returned x0
647 mov x1, sp
648 mov w0, #1 // trace exit
649 bl syscall_trace
650 b ret_to_user
651
652/*
653 * Special system call wrappers.
654 */
60ffc30d
CM
655ENTRY(sys_rt_sigreturn_wrapper)
656 mov x0, sp
657 b sys_rt_sigreturn
658ENDPROC(sys_rt_sigreturn_wrapper)
659
60ffc30d
CM
660ENTRY(handle_arch_irq)
661 .quad 0