]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kernel/entry.S
ARM64: fix framepointer check in unwind_frame
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / entry.S
CommitLineData
60ffc30d
CM
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
5c1ce6f7 27#include <asm/esr.h>
60ffc30d
CM
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
f3d447a9 30#include <asm/unistd32.h>
60ffc30d
CM
31
32/*
33 * Bad Abort numbers
34 *-----------------
35 */
36#define BAD_SYNC 0
37#define BAD_IRQ 1
38#define BAD_FIQ 2
39#define BAD_ERROR 3
40
41 .macro kernel_entry, el, regsize = 64
42 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
43 .if \regsize == 32
44 mov w0, w0 // zero upper 32 bits of x0
45 .endif
46 push x28, x29
47 push x26, x27
48 push x24, x25
49 push x22, x23
50 push x20, x21
51 push x18, x19
52 push x16, x17
53 push x14, x15
54 push x12, x13
55 push x10, x11
56 push x8, x9
57 push x6, x7
58 push x4, x5
59 push x2, x3
60 push x0, x1
61 .if \el == 0
62 mrs x21, sp_el0
63 .else
64 add x21, sp, #S_FRAME_SIZE
65 .endif
66 mrs x22, elr_el1
67 mrs x23, spsr_el1
68 stp lr, x21, [sp, #S_LR]
69 stp x22, x23, [sp, #S_PC]
70
71 /*
72 * Set syscallno to -1 by default (overridden later if real syscall).
73 */
74 .if \el == 0
75 mvn x21, xzr
76 str x21, [sp, #S_SYSCALLNO]
77 .endif
78
79 /*
80 * Registers that may be useful after this macro is invoked:
81 *
82 * x21 - aborted SP
83 * x22 - aborted PC
84 * x23 - aborted PSTATE
85 */
86 .endm
87
88 .macro kernel_exit, el, ret = 0
89 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
90 .if \el == 0
91 ldr x23, [sp, #S_SP] // load return stack pointer
92 .endif
93 .if \ret
94 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
95 add sp, sp, S_X2
96 .else
97 pop x0, x1
98 .endif
99 pop x2, x3 // load the rest of the registers
100 pop x4, x5
101 pop x6, x7
102 pop x8, x9
103 msr elr_el1, x21 // set up the return data
104 msr spsr_el1, x22
105 .if \el == 0
106 msr sp_el0, x23
107 .endif
108 pop x10, x11
109 pop x12, x13
110 pop x14, x15
111 pop x16, x17
112 pop x18, x19
113 pop x20, x21
114 pop x22, x23
115 pop x24, x25
116 pop x26, x27
117 pop x28, x29
118 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
119 eret // return to kernel
120 .endm
121
122 .macro get_thread_info, rd
123 mov \rd, sp
845ad05e 124 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
60ffc30d
CM
125 .endm
126
127/*
128 * These are the registers used in the syscall handler, and allow us to
129 * have in theory up to 7 arguments to a function - x0 to x6.
130 *
131 * x7 is reserved for the system call number in 32-bit mode.
132 */
133sc_nr .req x25 // number of system calls
134scno .req x26 // syscall number
135stbl .req x27 // syscall table pointer
136tsk .req x28 // current thread_info
137
138/*
139 * Interrupt handling.
140 */
141 .macro irq_handler
142 ldr x1, handle_arch_irq
143 mov x0, sp
144 blr x1
145 .endm
146
147 .text
148
149/*
150 * Exception vectors.
151 */
60ffc30d
CM
152
153 .align 11
154ENTRY(vectors)
155 ventry el1_sync_invalid // Synchronous EL1t
156 ventry el1_irq_invalid // IRQ EL1t
157 ventry el1_fiq_invalid // FIQ EL1t
158 ventry el1_error_invalid // Error EL1t
159
160 ventry el1_sync // Synchronous EL1h
161 ventry el1_irq // IRQ EL1h
162 ventry el1_fiq_invalid // FIQ EL1h
163 ventry el1_error_invalid // Error EL1h
164
165 ventry el0_sync // Synchronous 64-bit EL0
166 ventry el0_irq // IRQ 64-bit EL0
167 ventry el0_fiq_invalid // FIQ 64-bit EL0
168 ventry el0_error_invalid // Error 64-bit EL0
169
170#ifdef CONFIG_COMPAT
171 ventry el0_sync_compat // Synchronous 32-bit EL0
172 ventry el0_irq_compat // IRQ 32-bit EL0
173 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
174 ventry el0_error_invalid_compat // Error 32-bit EL0
175#else
176 ventry el0_sync_invalid // Synchronous 32-bit EL0
177 ventry el0_irq_invalid // IRQ 32-bit EL0
178 ventry el0_fiq_invalid // FIQ 32-bit EL0
179 ventry el0_error_invalid // Error 32-bit EL0
180#endif
181END(vectors)
182
183/*
184 * Invalid mode handlers
185 */
186 .macro inv_entry, el, reason, regsize = 64
187 kernel_entry el, \regsize
188 mov x0, sp
189 mov x1, #\reason
190 mrs x2, esr_el1
191 b bad_mode
192 .endm
193
194el0_sync_invalid:
195 inv_entry 0, BAD_SYNC
196ENDPROC(el0_sync_invalid)
197
198el0_irq_invalid:
199 inv_entry 0, BAD_IRQ
200ENDPROC(el0_irq_invalid)
201
202el0_fiq_invalid:
203 inv_entry 0, BAD_FIQ
204ENDPROC(el0_fiq_invalid)
205
206el0_error_invalid:
207 inv_entry 0, BAD_ERROR
208ENDPROC(el0_error_invalid)
209
210#ifdef CONFIG_COMPAT
211el0_fiq_invalid_compat:
212 inv_entry 0, BAD_FIQ, 32
213ENDPROC(el0_fiq_invalid_compat)
214
215el0_error_invalid_compat:
216 inv_entry 0, BAD_ERROR, 32
217ENDPROC(el0_error_invalid_compat)
218#endif
219
220el1_sync_invalid:
221 inv_entry 1, BAD_SYNC
222ENDPROC(el1_sync_invalid)
223
224el1_irq_invalid:
225 inv_entry 1, BAD_IRQ
226ENDPROC(el1_irq_invalid)
227
228el1_fiq_invalid:
229 inv_entry 1, BAD_FIQ
230ENDPROC(el1_fiq_invalid)
231
232el1_error_invalid:
233 inv_entry 1, BAD_ERROR
234ENDPROC(el1_error_invalid)
235
236/*
237 * EL1 mode handlers.
238 */
239 .align 6
240el1_sync:
241 kernel_entry 1
242 mrs x1, esr_el1 // read the syndrome register
5c1ce6f7
MZ
243 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
244 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
60ffc30d 245 b.eq el1_da
5c1ce6f7 246 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 247 b.eq el1_undef
5c1ce6f7 248 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 249 b.eq el1_sp_pc
5c1ce6f7 250 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 251 b.eq el1_sp_pc
5c1ce6f7 252 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
60ffc30d 253 b.eq el1_undef
5c1ce6f7 254 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
60ffc30d
CM
255 b.ge el1_dbg
256 b el1_inv
257el1_da:
258 /*
259 * Data abort handling
260 */
261 mrs x0, far_el1
262 enable_dbg_if_not_stepping x2
263 // re-enable interrupts if they were enabled in the aborted context
264 tbnz x23, #7, 1f // PSR_I_BIT
265 enable_irq
2661:
267 mov x2, sp // struct pt_regs
268 bl do_mem_abort
269
270 // disable interrupts before pulling preserved data off the stack
271 disable_irq
272 kernel_exit 1
273el1_sp_pc:
274 /*
275 * Stack or PC alignment exception handling
276 */
277 mrs x0, far_el1
278 mov x1, x25
279 mov x2, sp
280 b do_sp_pc_abort
281el1_undef:
282 /*
283 * Undefined instruction
284 */
285 mov x0, sp
286 b do_undefinstr
287el1_dbg:
288 /*
289 * Debug exception handling
290 */
291 tbz x24, #0, el1_inv // EL1 only
292 mrs x0, far_el1
293 mov x2, sp // struct pt_regs
294 bl do_debug_exception
295
296 kernel_exit 1
297el1_inv:
298 // TODO: add support for undefined instructions in kernel mode
299 mov x0, sp
300 mov x1, #BAD_SYNC
301 mrs x2, esr_el1
302 b bad_mode
303ENDPROC(el1_sync)
304
305 .align 6
306el1_irq:
307 kernel_entry 1
308 enable_dbg_if_not_stepping x0
309#ifdef CONFIG_TRACE_IRQFLAGS
310 bl trace_hardirqs_off
311#endif
64681787 312
60ffc30d 313 irq_handler
64681787 314
60ffc30d 315#ifdef CONFIG_PREEMPT
64681787
MZ
316 get_thread_info tsk
317 ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
717321fc 318 cbnz w24, 1f // preempt count != 0
60ffc30d
CM
319 ldr x0, [tsk, #TI_FLAGS] // get flags
320 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
321 bl el1_preempt
3221:
323#endif
324#ifdef CONFIG_TRACE_IRQFLAGS
325 bl trace_hardirqs_on
326#endif
327 kernel_exit 1
328ENDPROC(el1_irq)
329
330#ifdef CONFIG_PREEMPT
331el1_preempt:
332 mov x24, lr
3331: enable_dbg
334 bl preempt_schedule_irq // irq en/disable is done inside
335 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
336 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
337 ret x24
338#endif
339
340/*
341 * EL0 mode handlers.
342 */
343 .align 6
344el0_sync:
345 kernel_entry 0
346 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
347 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
348 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
60ffc30d
CM
349 b.eq el0_svc
350 adr lr, ret_from_exception
5c1ce6f7 351 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 352 b.eq el0_da
5c1ce6f7 353 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 354 b.eq el0_ia
5c1ce6f7 355 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 356 b.eq el0_fpsimd_acc
5c1ce6f7 357 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
60ffc30d 358 b.eq el0_fpsimd_exc
5c1ce6f7 359 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 360 b.eq el0_undef
5c1ce6f7 361 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 362 b.eq el0_sp_pc
5c1ce6f7 363 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 364 b.eq el0_sp_pc
5c1ce6f7 365 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 366 b.eq el0_undef
5c1ce6f7 367 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
368 b.ge el0_dbg
369 b el0_inv
370
371#ifdef CONFIG_COMPAT
372 .align 6
373el0_sync_compat:
374 kernel_entry 0, 32
375 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
376 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
377 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
60ffc30d
CM
378 b.eq el0_svc_compat
379 adr lr, ret_from_exception
5c1ce6f7 380 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 381 b.eq el0_da
5c1ce6f7 382 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 383 b.eq el0_ia
5c1ce6f7 384 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 385 b.eq el0_fpsimd_acc
5c1ce6f7 386 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
60ffc30d 387 b.eq el0_fpsimd_exc
5c1ce6f7 388 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 389 b.eq el0_undef
381cc2b9
MR
390 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
391 b.eq el0_undef
392 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
393 b.eq el0_undef
394 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
395 b.eq el0_undef
396 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
397 b.eq el0_undef
398 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
399 b.eq el0_undef
5c1ce6f7 400 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
401 b.ge el0_dbg
402 b el0_inv
403el0_svc_compat:
404 /*
405 * AArch32 syscall handling
406 */
407 adr stbl, compat_sys_call_table // load compat syscall table pointer
408 uxtw scno, w7 // syscall number in w7 (r7)
409 mov sc_nr, #__NR_compat_syscalls
410 b el0_svc_naked
411
412 .align 6
413el0_irq_compat:
414 kernel_entry 0, 32
415 b el0_irq_naked
416#endif
417
418el0_da:
419 /*
420 * Data abort handling
421 */
422 mrs x0, far_el1
d50240a5 423 bic x0, x0, #(0xff << 56)
60ffc30d
CM
424 disable_step x1
425 isb
426 enable_dbg
427 // enable interrupts before calling the main handler
428 enable_irq
429 mov x1, x25
430 mov x2, sp
431 b do_mem_abort
432el0_ia:
433 /*
434 * Instruction abort handling
435 */
436 mrs x0, far_el1
437 disable_step x1
438 isb
439 enable_dbg
440 // enable interrupts before calling the main handler
441 enable_irq
442 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
443 mov x2, sp
444 b do_mem_abort
445el0_fpsimd_acc:
446 /*
447 * Floating Point or Advanced SIMD access
448 */
449 mov x0, x25
450 mov x1, sp
451 b do_fpsimd_acc
452el0_fpsimd_exc:
453 /*
454 * Floating Point or Advanced SIMD exception
455 */
456 mov x0, x25
457 mov x1, sp
458 b do_fpsimd_exc
459el0_sp_pc:
460 /*
461 * Stack or PC alignment exception handling
462 */
463 mrs x0, far_el1
464 disable_step x1
465 isb
466 enable_dbg
467 // enable interrupts before calling the main handler
468 enable_irq
469 mov x1, x25
470 mov x2, sp
471 b do_sp_pc_abort
472el0_undef:
473 /*
474 * Undefined instruction
475 */
476 mov x0, sp
2600e130
CM
477 // enable interrupts before calling the main handler
478 enable_irq
60ffc30d
CM
479 b do_undefinstr
480el0_dbg:
481 /*
482 * Debug exception handling
483 */
484 tbnz x24, #0, el0_inv // EL0 only
485 mrs x0, far_el1
486 disable_step x1
487 mov x1, x25
488 mov x2, sp
489 b do_debug_exception
490el0_inv:
491 mov x0, sp
492 mov x1, #BAD_SYNC
493 mrs x2, esr_el1
494 b bad_mode
495ENDPROC(el0_sync)
496
497 .align 6
498el0_irq:
499 kernel_entry 0
500el0_irq_naked:
501 disable_step x1
502 isb
503 enable_dbg
504#ifdef CONFIG_TRACE_IRQFLAGS
505 bl trace_hardirqs_off
506#endif
64681787 507
60ffc30d 508 irq_handler
64681787
MZ
509 get_thread_info tsk
510
60ffc30d
CM
511#ifdef CONFIG_TRACE_IRQFLAGS
512 bl trace_hardirqs_on
513#endif
514 b ret_to_user
515ENDPROC(el0_irq)
516
517/*
518 * This is the return code to user mode for abort handlers
519 */
520ret_from_exception:
521 get_thread_info tsk
522 b ret_to_user
523ENDPROC(ret_from_exception)
524
525/*
526 * Register switch for AArch64. The callee-saved registers need to be saved
527 * and restored. On entry:
528 * x0 = previous task_struct (must be preserved across the switch)
529 * x1 = next task_struct
530 * Previous and next are guaranteed not to be the same.
531 *
532 */
533ENTRY(cpu_switch_to)
534 add x8, x0, #THREAD_CPU_CONTEXT
535 mov x9, sp
536 stp x19, x20, [x8], #16 // store callee-saved registers
537 stp x21, x22, [x8], #16
538 stp x23, x24, [x8], #16
539 stp x25, x26, [x8], #16
540 stp x27, x28, [x8], #16
541 stp x29, x9, [x8], #16
542 str lr, [x8]
543 add x8, x1, #THREAD_CPU_CONTEXT
544 ldp x19, x20, [x8], #16 // restore callee-saved registers
545 ldp x21, x22, [x8], #16
546 ldp x23, x24, [x8], #16
547 ldp x25, x26, [x8], #16
548 ldp x27, x28, [x8], #16
549 ldp x29, x9, [x8], #16
550 ldr lr, [x8]
551 mov sp, x9
552 ret
553ENDPROC(cpu_switch_to)
554
555/*
556 * This is the fast syscall return path. We do as little as possible here,
557 * and this includes saving x0 back into the kernel stack.
558 */
559ret_fast_syscall:
560 disable_irq // disable interrupts
561 ldr x1, [tsk, #TI_FLAGS]
562 and x2, x1, #_TIF_WORK_MASK
563 cbnz x2, fast_work_pending
564 tbz x1, #TIF_SINGLESTEP, fast_exit
565 disable_dbg
566 enable_step x2
567fast_exit:
568 kernel_exit 0, ret = 1
569
570/*
571 * Ok, we need to do extra processing, enter the slow path.
572 */
573fast_work_pending:
574 str x0, [sp, #S_X0] // returned x0
575work_pending:
576 tbnz x1, #TIF_NEED_RESCHED, work_resched
577 /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
578 ldr x2, [sp, #S_PSTATE]
579 mov x0, sp // 'regs'
580 tst x2, #PSR_MODE_MASK // user mode regs?
581 b.ne no_work_pending // returning to kernel
6916fd08 582 enable_irq // enable interrupts for do_notify_resume()
60ffc30d
CM
583 bl do_notify_resume
584 b ret_to_user
585work_resched:
586 enable_dbg
587 bl schedule
588
589/*
590 * "slow" syscall return path.
591 */
59dc67b0 592ret_to_user:
60ffc30d
CM
593 disable_irq // disable interrupts
594 ldr x1, [tsk, #TI_FLAGS]
595 and x2, x1, #_TIF_WORK_MASK
596 cbnz x2, work_pending
597 tbz x1, #TIF_SINGLESTEP, no_work_pending
598 disable_dbg
599 enable_step x2
600no_work_pending:
601 kernel_exit 0, ret = 0
602ENDPROC(ret_to_user)
603
604/*
605 * This is how we return from a fork.
606 */
607ENTRY(ret_from_fork)
608 bl schedule_tail
c34501d2
CM
609 cbz x19, 1f // not a kernel thread
610 mov x0, x20
611 blr x19
6121: get_thread_info tsk
60ffc30d
CM
613 b ret_to_user
614ENDPROC(ret_from_fork)
615
616/*
617 * SVC handler.
618 */
619 .align 6
620el0_svc:
621 adrp stbl, sys_call_table // load syscall table pointer
622 uxtw scno, w8 // syscall number in w8
623 mov sc_nr, #__NR_syscalls
624el0_svc_naked: // compat entry point
625 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
626 disable_step x16
627 isb
628 enable_dbg
629 enable_irq
630
631 get_thread_info tsk
632 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
633 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
634 adr lr, ret_fast_syscall // return address
635 cmp scno, sc_nr // check upper syscall limit
636 b.hs ni_sys
637 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
638 br x16 // call sys_* routine
639ni_sys:
640 mov x0, sp
641 b do_ni_syscall
642ENDPROC(el0_svc)
643
644 /*
645 * This is the really slow path. We're going to be doing context
646 * switches, and waiting for our parent to respond.
647 */
648__sys_trace:
649 mov x1, sp
650 mov w0, #0 // trace entry
651 bl syscall_trace
652 adr lr, __sys_trace_return // return address
653 uxtw scno, w0 // syscall number (possibly new)
654 mov x1, sp // pointer to regs
655 cmp scno, sc_nr // check upper syscall limit
656 b.hs ni_sys
657 ldp x0, x1, [sp] // restore the syscall args
658 ldp x2, x3, [sp, #S_X2]
659 ldp x4, x5, [sp, #S_X4]
660 ldp x6, x7, [sp, #S_X6]
661 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
662 br x16 // call sys_* routine
663
664__sys_trace_return:
665 str x0, [sp] // save returned x0
666 mov x1, sp
667 mov w0, #1 // trace exit
668 bl syscall_trace
669 b ret_to_user
670
671/*
672 * Special system call wrappers.
673 */
60ffc30d
CM
674ENTRY(sys_rt_sigreturn_wrapper)
675 mov x0, sp
676 b sys_rt_sigreturn
677ENDPROC(sys_rt_sigreturn_wrapper)
678
60ffc30d
CM
679ENTRY(handle_arch_irq)
680 .quad 0