]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kernel/entry.S
arm64: Fix up /proc/cpuinfo
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / entry.S
CommitLineData
60ffc30d
CM
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
5c1ce6f7 27#include <asm/esr.h>
60ffc30d
CM
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
30
6c81fe79
LB
31/*
32 * Context tracking subsystem. Used to instrument transitions
33 * between user and kernel mode.
34 */
35 .macro ct_user_exit, syscall = 0
36#ifdef CONFIG_CONTEXT_TRACKING
37 bl context_tracking_user_exit
38 .if \syscall == 1
39 /*
40 * Save/restore needed during syscalls. Restore syscall arguments from
41 * the values already saved on stack during kernel_entry.
42 */
43 ldp x0, x1, [sp]
44 ldp x2, x3, [sp, #S_X2]
45 ldp x4, x5, [sp, #S_X4]
46 ldp x6, x7, [sp, #S_X6]
47 .endif
48#endif
49 .endm
50
51 .macro ct_user_enter
52#ifdef CONFIG_CONTEXT_TRACKING
53 bl context_tracking_user_enter
54#endif
55 .endm
56
60ffc30d
CM
57/*
58 * Bad Abort numbers
59 *-----------------
60 */
61#define BAD_SYNC 0
62#define BAD_IRQ 1
63#define BAD_FIQ 2
64#define BAD_ERROR 3
65
66 .macro kernel_entry, el, regsize = 64
67 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
68 .if \regsize == 32
69 mov w0, w0 // zero upper 32 bits of x0
70 .endif
71 push x28, x29
72 push x26, x27
73 push x24, x25
74 push x22, x23
75 push x20, x21
76 push x18, x19
77 push x16, x17
78 push x14, x15
79 push x12, x13
80 push x10, x11
81 push x8, x9
82 push x6, x7
83 push x4, x5
84 push x2, x3
85 push x0, x1
86 .if \el == 0
87 mrs x21, sp_el0
2a283070
WD
88 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
89 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
90 disable_step_tsk x19, x20 // exceptions when scheduling.
60ffc30d
CM
91 .else
92 add x21, sp, #S_FRAME_SIZE
93 .endif
94 mrs x22, elr_el1
95 mrs x23, spsr_el1
96 stp lr, x21, [sp, #S_LR]
97 stp x22, x23, [sp, #S_PC]
98
99 /*
100 * Set syscallno to -1 by default (overridden later if real syscall).
101 */
102 .if \el == 0
103 mvn x21, xzr
104 str x21, [sp, #S_SYSCALLNO]
105 .endif
106
107 /*
108 * Registers that may be useful after this macro is invoked:
109 *
110 * x21 - aborted SP
111 * x22 - aborted PC
112 * x23 - aborted PSTATE
113 */
114 .endm
115
116 .macro kernel_exit, el, ret = 0
117 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
118 .if \el == 0
6c81fe79 119 ct_user_enter
60ffc30d
CM
120 ldr x23, [sp, #S_SP] // load return stack pointer
121 .endif
122 .if \ret
123 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
124 add sp, sp, S_X2
125 .else
126 pop x0, x1
127 .endif
128 pop x2, x3 // load the rest of the registers
129 pop x4, x5
130 pop x6, x7
131 pop x8, x9
132 msr elr_el1, x21 // set up the return data
133 msr spsr_el1, x22
134 .if \el == 0
135 msr sp_el0, x23
136 .endif
137 pop x10, x11
138 pop x12, x13
139 pop x14, x15
140 pop x16, x17
141 pop x18, x19
142 pop x20, x21
143 pop x22, x23
144 pop x24, x25
145 pop x26, x27
146 pop x28, x29
147 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
148 eret // return to kernel
149 .endm
150
151 .macro get_thread_info, rd
152 mov \rd, sp
845ad05e 153 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
60ffc30d
CM
154 .endm
155
156/*
157 * These are the registers used in the syscall handler, and allow us to
158 * have in theory up to 7 arguments to a function - x0 to x6.
159 *
160 * x7 is reserved for the system call number in 32-bit mode.
161 */
162sc_nr .req x25 // number of system calls
163scno .req x26 // syscall number
164stbl .req x27 // syscall table pointer
165tsk .req x28 // current thread_info
166
167/*
168 * Interrupt handling.
169 */
170 .macro irq_handler
171 ldr x1, handle_arch_irq
172 mov x0, sp
173 blr x1
174 .endm
175
176 .text
177
178/*
179 * Exception vectors.
180 */
60ffc30d
CM
181
182 .align 11
183ENTRY(vectors)
184 ventry el1_sync_invalid // Synchronous EL1t
185 ventry el1_irq_invalid // IRQ EL1t
186 ventry el1_fiq_invalid // FIQ EL1t
187 ventry el1_error_invalid // Error EL1t
188
189 ventry el1_sync // Synchronous EL1h
190 ventry el1_irq // IRQ EL1h
191 ventry el1_fiq_invalid // FIQ EL1h
192 ventry el1_error_invalid // Error EL1h
193
194 ventry el0_sync // Synchronous 64-bit EL0
195 ventry el0_irq // IRQ 64-bit EL0
196 ventry el0_fiq_invalid // FIQ 64-bit EL0
197 ventry el0_error_invalid // Error 64-bit EL0
198
199#ifdef CONFIG_COMPAT
200 ventry el0_sync_compat // Synchronous 32-bit EL0
201 ventry el0_irq_compat // IRQ 32-bit EL0
202 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
203 ventry el0_error_invalid_compat // Error 32-bit EL0
204#else
205 ventry el0_sync_invalid // Synchronous 32-bit EL0
206 ventry el0_irq_invalid // IRQ 32-bit EL0
207 ventry el0_fiq_invalid // FIQ 32-bit EL0
208 ventry el0_error_invalid // Error 32-bit EL0
209#endif
210END(vectors)
211
212/*
213 * Invalid mode handlers
214 */
215 .macro inv_entry, el, reason, regsize = 64
216 kernel_entry el, \regsize
217 mov x0, sp
218 mov x1, #\reason
219 mrs x2, esr_el1
220 b bad_mode
221 .endm
222
223el0_sync_invalid:
224 inv_entry 0, BAD_SYNC
225ENDPROC(el0_sync_invalid)
226
227el0_irq_invalid:
228 inv_entry 0, BAD_IRQ
229ENDPROC(el0_irq_invalid)
230
231el0_fiq_invalid:
232 inv_entry 0, BAD_FIQ
233ENDPROC(el0_fiq_invalid)
234
235el0_error_invalid:
236 inv_entry 0, BAD_ERROR
237ENDPROC(el0_error_invalid)
238
239#ifdef CONFIG_COMPAT
240el0_fiq_invalid_compat:
241 inv_entry 0, BAD_FIQ, 32
242ENDPROC(el0_fiq_invalid_compat)
243
244el0_error_invalid_compat:
245 inv_entry 0, BAD_ERROR, 32
246ENDPROC(el0_error_invalid_compat)
247#endif
248
249el1_sync_invalid:
250 inv_entry 1, BAD_SYNC
251ENDPROC(el1_sync_invalid)
252
253el1_irq_invalid:
254 inv_entry 1, BAD_IRQ
255ENDPROC(el1_irq_invalid)
256
257el1_fiq_invalid:
258 inv_entry 1, BAD_FIQ
259ENDPROC(el1_fiq_invalid)
260
261el1_error_invalid:
262 inv_entry 1, BAD_ERROR
263ENDPROC(el1_error_invalid)
264
265/*
266 * EL1 mode handlers.
267 */
268 .align 6
269el1_sync:
270 kernel_entry 1
271 mrs x1, esr_el1 // read the syndrome register
5c1ce6f7
MZ
272 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
273 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
60ffc30d 274 b.eq el1_da
5c1ce6f7 275 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 276 b.eq el1_undef
5c1ce6f7 277 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 278 b.eq el1_sp_pc
5c1ce6f7 279 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 280 b.eq el1_sp_pc
5c1ce6f7 281 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
60ffc30d 282 b.eq el1_undef
5c1ce6f7 283 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
60ffc30d
CM
284 b.ge el1_dbg
285 b el1_inv
286el1_da:
287 /*
288 * Data abort handling
289 */
290 mrs x0, far_el1
2a283070 291 enable_dbg
60ffc30d
CM
292 // re-enable interrupts if they were enabled in the aborted context
293 tbnz x23, #7, 1f // PSR_I_BIT
294 enable_irq
2951:
296 mov x2, sp // struct pt_regs
297 bl do_mem_abort
298
299 // disable interrupts before pulling preserved data off the stack
300 disable_irq
301 kernel_exit 1
302el1_sp_pc:
303 /*
304 * Stack or PC alignment exception handling
305 */
306 mrs x0, far_el1
2a283070 307 enable_dbg
60ffc30d
CM
308 mov x2, sp
309 b do_sp_pc_abort
310el1_undef:
311 /*
312 * Undefined instruction
313 */
2a283070 314 enable_dbg
60ffc30d
CM
315 mov x0, sp
316 b do_undefinstr
317el1_dbg:
318 /*
319 * Debug exception handling
320 */
ee6214ce
SP
321 cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
322 cinc x24, x24, eq // set bit '0'
60ffc30d
CM
323 tbz x24, #0, el1_inv // EL1 only
324 mrs x0, far_el1
325 mov x2, sp // struct pt_regs
326 bl do_debug_exception
60ffc30d
CM
327 kernel_exit 1
328el1_inv:
329 // TODO: add support for undefined instructions in kernel mode
2a283070 330 enable_dbg
60ffc30d
CM
331 mov x0, sp
332 mov x1, #BAD_SYNC
333 mrs x2, esr_el1
334 b bad_mode
335ENDPROC(el1_sync)
336
337 .align 6
338el1_irq:
339 kernel_entry 1
2a283070 340 enable_dbg
60ffc30d
CM
341#ifdef CONFIG_TRACE_IRQFLAGS
342 bl trace_hardirqs_off
343#endif
64681787 344
60ffc30d 345 irq_handler
64681787 346
60ffc30d 347#ifdef CONFIG_PREEMPT
64681787 348 get_thread_info tsk
883c0573 349 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
717321fc 350 cbnz w24, 1f // preempt count != 0
60ffc30d
CM
351 ldr x0, [tsk, #TI_FLAGS] // get flags
352 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
353 bl el1_preempt
3541:
355#endif
356#ifdef CONFIG_TRACE_IRQFLAGS
357 bl trace_hardirqs_on
358#endif
359 kernel_exit 1
360ENDPROC(el1_irq)
361
362#ifdef CONFIG_PREEMPT
363el1_preempt:
364 mov x24, lr
2a283070 3651: bl preempt_schedule_irq // irq en/disable is done inside
60ffc30d
CM
366 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
367 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
368 ret x24
369#endif
370
371/*
372 * EL0 mode handlers.
373 */
374 .align 6
375el0_sync:
376 kernel_entry 0
377 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
378 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
379 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
60ffc30d 380 b.eq el0_svc
5c1ce6f7 381 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 382 b.eq el0_da
5c1ce6f7 383 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 384 b.eq el0_ia
5c1ce6f7 385 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 386 b.eq el0_fpsimd_acc
5c1ce6f7 387 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
60ffc30d 388 b.eq el0_fpsimd_exc
5c1ce6f7 389 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 390 b.eq el0_undef
5c1ce6f7 391 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 392 b.eq el0_sp_pc
5c1ce6f7 393 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 394 b.eq el0_sp_pc
5c1ce6f7 395 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 396 b.eq el0_undef
5c1ce6f7 397 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
398 b.ge el0_dbg
399 b el0_inv
400
401#ifdef CONFIG_COMPAT
402 .align 6
403el0_sync_compat:
404 kernel_entry 0, 32
405 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
406 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
407 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
60ffc30d 408 b.eq el0_svc_compat
5c1ce6f7 409 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 410 b.eq el0_da
5c1ce6f7 411 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 412 b.eq el0_ia
5c1ce6f7 413 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 414 b.eq el0_fpsimd_acc
5c1ce6f7 415 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
60ffc30d 416 b.eq el0_fpsimd_exc
5c1ce6f7 417 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 418 b.eq el0_undef
381cc2b9
MR
419 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
420 b.eq el0_undef
421 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
422 b.eq el0_undef
423 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
424 b.eq el0_undef
425 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
426 b.eq el0_undef
427 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
428 b.eq el0_undef
5c1ce6f7 429 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
430 b.ge el0_dbg
431 b el0_inv
432el0_svc_compat:
433 /*
434 * AArch32 syscall handling
435 */
436 adr stbl, compat_sys_call_table // load compat syscall table pointer
437 uxtw scno, w7 // syscall number in w7 (r7)
438 mov sc_nr, #__NR_compat_syscalls
439 b el0_svc_naked
440
441 .align 6
442el0_irq_compat:
443 kernel_entry 0, 32
444 b el0_irq_naked
445#endif
446
447el0_da:
448 /*
449 * Data abort handling
450 */
6ab6463a 451 mrs x26, far_el1
60ffc30d 452 // enable interrupts before calling the main handler
2a283070 453 enable_dbg_and_irq
6c81fe79 454 ct_user_exit
6ab6463a 455 bic x0, x26, #(0xff << 56)
60ffc30d
CM
456 mov x1, x25
457 mov x2, sp
6ab6463a 458 adr lr, ret_to_user
60ffc30d
CM
459 b do_mem_abort
460el0_ia:
461 /*
462 * Instruction abort handling
463 */
6ab6463a 464 mrs x26, far_el1
60ffc30d 465 // enable interrupts before calling the main handler
2a283070 466 enable_dbg_and_irq
6c81fe79 467 ct_user_exit
6ab6463a 468 mov x0, x26
60ffc30d
CM
469 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
470 mov x2, sp
6ab6463a 471 adr lr, ret_to_user
60ffc30d
CM
472 b do_mem_abort
473el0_fpsimd_acc:
474 /*
475 * Floating Point or Advanced SIMD access
476 */
2a283070 477 enable_dbg
6c81fe79 478 ct_user_exit
60ffc30d
CM
479 mov x0, x25
480 mov x1, sp
6ab6463a 481 adr lr, ret_to_user
60ffc30d
CM
482 b do_fpsimd_acc
483el0_fpsimd_exc:
484 /*
485 * Floating Point or Advanced SIMD exception
486 */
2a283070 487 enable_dbg
6c81fe79 488 ct_user_exit
60ffc30d
CM
489 mov x0, x25
490 mov x1, sp
6ab6463a 491 adr lr, ret_to_user
60ffc30d
CM
492 b do_fpsimd_exc
493el0_sp_pc:
494 /*
495 * Stack or PC alignment exception handling
496 */
6ab6463a 497 mrs x26, far_el1
60ffc30d 498 // enable interrupts before calling the main handler
2a283070 499 enable_dbg_and_irq
6ab6463a 500 mov x0, x26
60ffc30d
CM
501 mov x1, x25
502 mov x2, sp
6ab6463a 503 adr lr, ret_to_user
60ffc30d
CM
504 b do_sp_pc_abort
505el0_undef:
506 /*
507 * Undefined instruction
508 */
2600e130 509 // enable interrupts before calling the main handler
2a283070 510 enable_dbg_and_irq
6c81fe79 511 ct_user_exit
2a283070 512 mov x0, sp
6ab6463a 513 adr lr, ret_to_user
60ffc30d
CM
514 b do_undefinstr
515el0_dbg:
516 /*
517 * Debug exception handling
518 */
519 tbnz x24, #0, el0_inv // EL0 only
520 mrs x0, far_el1
60ffc30d
CM
521 mov x1, x25
522 mov x2, sp
2a283070
WD
523 bl do_debug_exception
524 enable_dbg
6c81fe79 525 ct_user_exit
2a283070 526 b ret_to_user
60ffc30d 527el0_inv:
2a283070 528 enable_dbg
6c81fe79 529 ct_user_exit
60ffc30d
CM
530 mov x0, sp
531 mov x1, #BAD_SYNC
532 mrs x2, esr_el1
6ab6463a 533 adr lr, ret_to_user
60ffc30d
CM
534 b bad_mode
535ENDPROC(el0_sync)
536
537 .align 6
538el0_irq:
539 kernel_entry 0
540el0_irq_naked:
60ffc30d
CM
541 enable_dbg
542#ifdef CONFIG_TRACE_IRQFLAGS
543 bl trace_hardirqs_off
544#endif
64681787 545
6c81fe79 546 ct_user_exit
60ffc30d 547 irq_handler
64681787 548
60ffc30d
CM
549#ifdef CONFIG_TRACE_IRQFLAGS
550 bl trace_hardirqs_on
551#endif
552 b ret_to_user
553ENDPROC(el0_irq)
554
60ffc30d
CM
555/*
556 * Register switch for AArch64. The callee-saved registers need to be saved
557 * and restored. On entry:
558 * x0 = previous task_struct (must be preserved across the switch)
559 * x1 = next task_struct
560 * Previous and next are guaranteed not to be the same.
561 *
562 */
563ENTRY(cpu_switch_to)
564 add x8, x0, #THREAD_CPU_CONTEXT
565 mov x9, sp
566 stp x19, x20, [x8], #16 // store callee-saved registers
567 stp x21, x22, [x8], #16
568 stp x23, x24, [x8], #16
569 stp x25, x26, [x8], #16
570 stp x27, x28, [x8], #16
571 stp x29, x9, [x8], #16
572 str lr, [x8]
573 add x8, x1, #THREAD_CPU_CONTEXT
574 ldp x19, x20, [x8], #16 // restore callee-saved registers
575 ldp x21, x22, [x8], #16
576 ldp x23, x24, [x8], #16
577 ldp x25, x26, [x8], #16
578 ldp x27, x28, [x8], #16
579 ldp x29, x9, [x8], #16
580 ldr lr, [x8]
581 mov sp, x9
582 ret
583ENDPROC(cpu_switch_to)
584
585/*
586 * This is the fast syscall return path. We do as little as possible here,
587 * and this includes saving x0 back into the kernel stack.
588 */
589ret_fast_syscall:
590 disable_irq // disable interrupts
591 ldr x1, [tsk, #TI_FLAGS]
592 and x2, x1, #_TIF_WORK_MASK
593 cbnz x2, fast_work_pending
2a283070 594 enable_step_tsk x1, x2
60ffc30d
CM
595 kernel_exit 0, ret = 1
596
597/*
598 * Ok, we need to do extra processing, enter the slow path.
599 */
600fast_work_pending:
601 str x0, [sp, #S_X0] // returned x0
602work_pending:
603 tbnz x1, #TIF_NEED_RESCHED, work_resched
005f78cd 604 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
60ffc30d
CM
605 ldr x2, [sp, #S_PSTATE]
606 mov x0, sp // 'regs'
607 tst x2, #PSR_MODE_MASK // user mode regs?
608 b.ne no_work_pending // returning to kernel
6916fd08 609 enable_irq // enable interrupts for do_notify_resume()
60ffc30d
CM
610 bl do_notify_resume
611 b ret_to_user
612work_resched:
60ffc30d
CM
613 bl schedule
614
615/*
616 * "slow" syscall return path.
617 */
59dc67b0 618ret_to_user:
60ffc30d
CM
619 disable_irq // disable interrupts
620 ldr x1, [tsk, #TI_FLAGS]
621 and x2, x1, #_TIF_WORK_MASK
622 cbnz x2, work_pending
2a283070 623 enable_step_tsk x1, x2
60ffc30d
CM
624no_work_pending:
625 kernel_exit 0, ret = 0
626ENDPROC(ret_to_user)
627
628/*
629 * This is how we return from a fork.
630 */
631ENTRY(ret_from_fork)
632 bl schedule_tail
c34501d2
CM
633 cbz x19, 1f // not a kernel thread
634 mov x0, x20
635 blr x19
6361: get_thread_info tsk
60ffc30d
CM
637 b ret_to_user
638ENDPROC(ret_from_fork)
639
640/*
641 * SVC handler.
642 */
643 .align 6
644el0_svc:
645 adrp stbl, sys_call_table // load syscall table pointer
646 uxtw scno, w8 // syscall number in w8
647 mov sc_nr, #__NR_syscalls
648el0_svc_naked: // compat entry point
649 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
2a283070 650 enable_dbg_and_irq
6c81fe79 651 ct_user_exit 1
60ffc30d 652
449f81a4
AT
653 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
654 tst x16, #_TIF_SYSCALL_WORK
655 b.ne __sys_trace
60ffc30d
CM
656 adr lr, ret_fast_syscall // return address
657 cmp scno, sc_nr // check upper syscall limit
658 b.hs ni_sys
659 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
660 br x16 // call sys_* routine
661ni_sys:
662 mov x0, sp
663 b do_ni_syscall
664ENDPROC(el0_svc)
665
666 /*
667 * This is the really slow path. We're going to be doing context
668 * switches, and waiting for our parent to respond.
669 */
670__sys_trace:
3157858f
AT
671 mov x0, sp
672 bl syscall_trace_enter
60ffc30d
CM
673 adr lr, __sys_trace_return // return address
674 uxtw scno, w0 // syscall number (possibly new)
675 mov x1, sp // pointer to regs
676 cmp scno, sc_nr // check upper syscall limit
677 b.hs ni_sys
678 ldp x0, x1, [sp] // restore the syscall args
679 ldp x2, x3, [sp, #S_X2]
680 ldp x4, x5, [sp, #S_X4]
681 ldp x6, x7, [sp, #S_X6]
682 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
683 br x16 // call sys_* routine
684
685__sys_trace_return:
686 str x0, [sp] // save returned x0
3157858f
AT
687 mov x0, sp
688 bl syscall_trace_exit
60ffc30d
CM
689 b ret_to_user
690
691/*
692 * Special system call wrappers.
693 */
60ffc30d
CM
694ENTRY(sys_rt_sigreturn_wrapper)
695 mov x0, sp
696 b sys_rt_sigreturn
697ENDPROC(sys_rt_sigreturn_wrapper)
698
60ffc30d
CM
699ENTRY(handle_arch_irq)
700 .quad 0