]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/kernel/entry.S
arm64: Make USER_DS an inclusive limit
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kernel / entry.S
1 /*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <linux/init.h>
22 #include <linux/linkage.h>
23
24 #include <asm/alternative.h>
25 #include <asm/assembler.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/errno.h>
29 #include <asm/esr.h>
30 #include <asm/irq.h>
31 #include <asm/memory.h>
32 #include <asm/mmu.h>
33 #include <asm/processor.h>
34 #include <asm/ptrace.h>
35 #include <asm/thread_info.h>
36 #include <asm/asm-uaccess.h>
37 #include <asm/unistd.h>
38
39 /*
40 * Context tracking subsystem. Used to instrument transitions
41 * between user and kernel mode.
42 */
43 .macro ct_user_exit, syscall = 0
44 #ifdef CONFIG_CONTEXT_TRACKING
45 bl context_tracking_user_exit
46 .if \syscall == 1
47 /*
48 * Save/restore needed during syscalls. Restore syscall arguments from
49 * the values already saved on stack during kernel_entry.
50 */
51 ldp x0, x1, [sp]
52 ldp x2, x3, [sp, #S_X2]
53 ldp x4, x5, [sp, #S_X4]
54 ldp x6, x7, [sp, #S_X6]
55 .endif
56 #endif
57 .endm
58
59 .macro ct_user_enter
60 #ifdef CONFIG_CONTEXT_TRACKING
61 bl context_tracking_user_enter
62 #endif
63 .endm
64
65 /*
66 * Bad Abort numbers
67 *-----------------
68 */
69 #define BAD_SYNC 0
70 #define BAD_IRQ 1
71 #define BAD_FIQ 2
72 #define BAD_ERROR 3
73
74 .macro kernel_ventry, el, label, regsize = 64
75 .align 7
76 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
77 alternative_if ARM64_UNMAP_KERNEL_AT_EL0
78 .if \el == 0
79 .if \regsize == 64
80 mrs x30, tpidrro_el0
81 msr tpidrro_el0, xzr
82 .else
83 mov x30, xzr
84 .endif
85 .endif
86 alternative_else_nop_endif
87 #endif
88
89 sub sp, sp, #S_FRAME_SIZE
90 #ifdef CONFIG_VMAP_STACK
91 /*
92 * Test whether the SP has overflowed, without corrupting a GPR.
93 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
94 */
95 add sp, sp, x0 // sp' = sp + x0
96 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
97 tbnz x0, #THREAD_SHIFT, 0f
98 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
99 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
100 b el\()\el\()_\label
101
102 0:
103 /*
104 * Either we've just detected an overflow, or we've taken an exception
105 * while on the overflow stack. Either way, we won't return to
106 * userspace, and can clobber EL0 registers to free up GPRs.
107 */
108
109 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
110 msr tpidr_el0, x0
111
112 /* Recover the original x0 value and stash it in tpidrro_el0 */
113 sub x0, sp, x0
114 msr tpidrro_el0, x0
115
116 /* Switch to the overflow stack */
117 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
118
119 /*
120 * Check whether we were already on the overflow stack. This may happen
121 * after panic() re-enables interrupts.
122 */
123 mrs x0, tpidr_el0 // sp of interrupted context
124 sub x0, sp, x0 // delta with top of overflow stack
125 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
126 b.ne __bad_stack // no? -> bad stack pointer
127
128 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
129 sub sp, sp, x0
130 mrs x0, tpidrro_el0
131 #endif
132 b el\()\el\()_\label
133 .endm
134
135 .macro tramp_alias, dst, sym
136 mov_q \dst, TRAMP_VALIAS
137 add \dst, \dst, #(\sym - .entry.tramp.text)
138 .endm
139
140 .macro kernel_entry, el, regsize = 64
141 .if \regsize == 32
142 mov w0, w0 // zero upper 32 bits of x0
143 .endif
144 stp x0, x1, [sp, #16 * 0]
145 stp x2, x3, [sp, #16 * 1]
146 stp x4, x5, [sp, #16 * 2]
147 stp x6, x7, [sp, #16 * 3]
148 stp x8, x9, [sp, #16 * 4]
149 stp x10, x11, [sp, #16 * 5]
150 stp x12, x13, [sp, #16 * 6]
151 stp x14, x15, [sp, #16 * 7]
152 stp x16, x17, [sp, #16 * 8]
153 stp x18, x19, [sp, #16 * 9]
154 stp x20, x21, [sp, #16 * 10]
155 stp x22, x23, [sp, #16 * 11]
156 stp x24, x25, [sp, #16 * 12]
157 stp x26, x27, [sp, #16 * 13]
158 stp x28, x29, [sp, #16 * 14]
159
160 .if \el == 0
161 mrs x21, sp_el0
162 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
163 ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
164 disable_step_tsk x19, x20 // exceptions when scheduling.
165
166 mov x29, xzr // fp pointed to user-space
167 .else
168 add x21, sp, #S_FRAME_SIZE
169 get_thread_info tsk
170 /* Save the task's original addr_limit and set USER_DS */
171 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
172 str x20, [sp, #S_ORIG_ADDR_LIMIT]
173 mov x20, #USER_DS
174 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
175 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
176 .endif /* \el == 0 */
177 mrs x22, elr_el1
178 mrs x23, spsr_el1
179 stp lr, x21, [sp, #S_LR]
180
181 /*
182 * In order to be able to dump the contents of struct pt_regs at the
183 * time the exception was taken (in case we attempt to walk the call
184 * stack later), chain it together with the stack frames.
185 */
186 .if \el == 0
187 stp xzr, xzr, [sp, #S_STACKFRAME]
188 .else
189 stp x29, x22, [sp, #S_STACKFRAME]
190 .endif
191 add x29, sp, #S_STACKFRAME
192
193 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
194 /*
195 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
196 * EL0, there is no need to check the state of TTBR0_EL1 since
197 * accesses are always enabled.
198 * Note that the meaning of this bit differs from the ARMv8.1 PAN
199 * feature as all TTBR0_EL1 accesses are disabled, not just those to
200 * user mappings.
201 */
202 alternative_if ARM64_HAS_PAN
203 b 1f // skip TTBR0 PAN
204 alternative_else_nop_endif
205
206 .if \el != 0
207 mrs x21, ttbr0_el1
208 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
209 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
210 b.eq 1f // TTBR0 access already disabled
211 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
212 .endif
213
214 __uaccess_ttbr0_disable x21
215 1:
216 #endif
217
218 stp x22, x23, [sp, #S_PC]
219
220 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
221 .if \el == 0
222 mov w21, #NO_SYSCALL
223 str w21, [sp, #S_SYSCALLNO]
224 .endif
225
226 /*
227 * Set sp_el0 to current thread_info.
228 */
229 .if \el == 0
230 msr sp_el0, tsk
231 .endif
232
233 /*
234 * Registers that may be useful after this macro is invoked:
235 *
236 * x21 - aborted SP
237 * x22 - aborted PC
238 * x23 - aborted PSTATE
239 */
240 .endm
241
242 .macro kernel_exit, el
243 .if \el != 0
244 disable_daif
245
246 /* Restore the task's original addr_limit. */
247 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
248 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
249
250 /* No need to restore UAO, it will be restored from SPSR_EL1 */
251 .endif
252
253 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
254 .if \el == 0
255 ct_user_enter
256 .endif
257
258 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
259 /*
260 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
261 * PAN bit checking.
262 */
263 alternative_if ARM64_HAS_PAN
264 b 2f // skip TTBR0 PAN
265 alternative_else_nop_endif
266
267 .if \el != 0
268 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
269 .endif
270
271 __uaccess_ttbr0_enable x0, x1
272
273 .if \el == 0
274 /*
275 * Enable errata workarounds only if returning to user. The only
276 * workaround currently required for TTBR0_EL1 changes are for the
277 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
278 * corruption).
279 */
280 post_ttbr_update_workaround
281 .endif
282 1:
283 .if \el != 0
284 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
285 .endif
286 2:
287 #endif
288
289 .if \el == 0
290 ldr x23, [sp, #S_SP] // load return stack pointer
291 msr sp_el0, x23
292 tst x22, #PSR_MODE32_BIT // native task?
293 b.eq 3f
294
295 #ifdef CONFIG_ARM64_ERRATUM_845719
296 alternative_if ARM64_WORKAROUND_845719
297 #ifdef CONFIG_PID_IN_CONTEXTIDR
298 mrs x29, contextidr_el1
299 msr contextidr_el1, x29
300 #else
301 msr contextidr_el1, xzr
302 #endif
303 alternative_else_nop_endif
304 #endif
305 3:
306 .endif
307
308 msr elr_el1, x21 // set up the return data
309 msr spsr_el1, x22
310 ldp x0, x1, [sp, #16 * 0]
311 ldp x2, x3, [sp, #16 * 1]
312 ldp x4, x5, [sp, #16 * 2]
313 ldp x6, x7, [sp, #16 * 3]
314 ldp x8, x9, [sp, #16 * 4]
315 ldp x10, x11, [sp, #16 * 5]
316 ldp x12, x13, [sp, #16 * 6]
317 ldp x14, x15, [sp, #16 * 7]
318 ldp x16, x17, [sp, #16 * 8]
319 ldp x18, x19, [sp, #16 * 9]
320 ldp x20, x21, [sp, #16 * 10]
321 ldp x22, x23, [sp, #16 * 11]
322 ldp x24, x25, [sp, #16 * 12]
323 ldp x26, x27, [sp, #16 * 13]
324 ldp x28, x29, [sp, #16 * 14]
325 ldr lr, [sp, #S_LR]
326 add sp, sp, #S_FRAME_SIZE // restore sp
327
328 .if \el == 0
329 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
330 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
331 bne 4f
332 msr far_el1, x30
333 tramp_alias x30, tramp_exit_native
334 br x30
335 4:
336 tramp_alias x30, tramp_exit_compat
337 br x30
338 #endif
339 .else
340 eret
341 .endif
342 .endm
343
344 .macro irq_stack_entry
345 mov x19, sp // preserve the original sp
346
347 /*
348 * Compare sp with the base of the task stack.
349 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
350 * and should switch to the irq stack.
351 */
352 ldr x25, [tsk, TSK_STACK]
353 eor x25, x25, x19
354 and x25, x25, #~(THREAD_SIZE - 1)
355 cbnz x25, 9998f
356
357 ldr_this_cpu x25, irq_stack_ptr, x26
358 mov x26, #IRQ_STACK_SIZE
359 add x26, x25, x26
360
361 /* switch to the irq stack */
362 mov sp, x26
363 9998:
364 .endm
365
366 /*
367 * x19 should be preserved between irq_stack_entry and
368 * irq_stack_exit.
369 */
370 .macro irq_stack_exit
371 mov sp, x19
372 .endm
373
374 /*
375 * These are the registers used in the syscall handler, and allow us to
376 * have in theory up to 7 arguments to a function - x0 to x6.
377 *
378 * x7 is reserved for the system call number in 32-bit mode.
379 */
380 wsc_nr .req w25 // number of system calls
381 wscno .req w26 // syscall number
382 xscno .req x26 // syscall number (zero-extended)
383 stbl .req x27 // syscall table pointer
384 tsk .req x28 // current thread_info
385
386 /*
387 * Interrupt handling.
388 */
389 .macro irq_handler
390 ldr_l x1, handle_arch_irq
391 mov x0, sp
392 irq_stack_entry
393 blr x1
394 irq_stack_exit
395 .endm
396
397 .text
398
399 /*
400 * Exception vectors.
401 */
402 .pushsection ".entry.text", "ax"
403
404 .align 11
405 ENTRY(vectors)
406 kernel_ventry 1, sync_invalid // Synchronous EL1t
407 kernel_ventry 1, irq_invalid // IRQ EL1t
408 kernel_ventry 1, fiq_invalid // FIQ EL1t
409 kernel_ventry 1, error_invalid // Error EL1t
410
411 kernel_ventry 1, sync // Synchronous EL1h
412 kernel_ventry 1, irq // IRQ EL1h
413 kernel_ventry 1, fiq_invalid // FIQ EL1h
414 kernel_ventry 1, error // Error EL1h
415
416 kernel_ventry 0, sync // Synchronous 64-bit EL0
417 kernel_ventry 0, irq // IRQ 64-bit EL0
418 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
419 kernel_ventry 0, error // Error 64-bit EL0
420
421 #ifdef CONFIG_COMPAT
422 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
423 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
424 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
425 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
426 #else
427 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
428 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
429 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
430 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
431 #endif
432 END(vectors)
433
434 #ifdef CONFIG_VMAP_STACK
435 /*
436 * We detected an overflow in kernel_ventry, which switched to the
437 * overflow stack. Stash the exception regs, and head to our overflow
438 * handler.
439 */
440 __bad_stack:
441 /* Restore the original x0 value */
442 mrs x0, tpidrro_el0
443
444 /*
445 * Store the original GPRs to the new stack. The orginal SP (minus
446 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
447 */
448 sub sp, sp, #S_FRAME_SIZE
449 kernel_entry 1
450 mrs x0, tpidr_el0
451 add x0, x0, #S_FRAME_SIZE
452 str x0, [sp, #S_SP]
453
454 /* Stash the regs for handle_bad_stack */
455 mov x0, sp
456
457 /* Time to die */
458 bl handle_bad_stack
459 ASM_BUG()
460 #endif /* CONFIG_VMAP_STACK */
461
462 /*
463 * Invalid mode handlers
464 */
465 .macro inv_entry, el, reason, regsize = 64
466 kernel_entry \el, \regsize
467 mov x0, sp
468 mov x1, #\reason
469 mrs x2, esr_el1
470 bl bad_mode
471 ASM_BUG()
472 .endm
473
474 el0_sync_invalid:
475 inv_entry 0, BAD_SYNC
476 ENDPROC(el0_sync_invalid)
477
478 el0_irq_invalid:
479 inv_entry 0, BAD_IRQ
480 ENDPROC(el0_irq_invalid)
481
482 el0_fiq_invalid:
483 inv_entry 0, BAD_FIQ
484 ENDPROC(el0_fiq_invalid)
485
486 el0_error_invalid:
487 inv_entry 0, BAD_ERROR
488 ENDPROC(el0_error_invalid)
489
490 #ifdef CONFIG_COMPAT
491 el0_fiq_invalid_compat:
492 inv_entry 0, BAD_FIQ, 32
493 ENDPROC(el0_fiq_invalid_compat)
494 #endif
495
496 el1_sync_invalid:
497 inv_entry 1, BAD_SYNC
498 ENDPROC(el1_sync_invalid)
499
500 el1_irq_invalid:
501 inv_entry 1, BAD_IRQ
502 ENDPROC(el1_irq_invalid)
503
504 el1_fiq_invalid:
505 inv_entry 1, BAD_FIQ
506 ENDPROC(el1_fiq_invalid)
507
508 el1_error_invalid:
509 inv_entry 1, BAD_ERROR
510 ENDPROC(el1_error_invalid)
511
512 /*
513 * EL1 mode handlers.
514 */
515 .align 6
516 el1_sync:
517 kernel_entry 1
518 mrs x1, esr_el1 // read the syndrome register
519 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
520 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
521 b.eq el1_da
522 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
523 b.eq el1_ia
524 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
525 b.eq el1_undef
526 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
527 b.eq el1_sp_pc
528 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
529 b.eq el1_sp_pc
530 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
531 b.eq el1_undef
532 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
533 b.ge el1_dbg
534 b el1_inv
535
536 el1_ia:
537 /*
538 * Fall through to the Data abort case
539 */
540 el1_da:
541 /*
542 * Data abort handling
543 */
544 mrs x3, far_el1
545 inherit_daif pstate=x23, tmp=x2
546 clear_address_tag x0, x3
547 mov x2, sp // struct pt_regs
548 bl do_mem_abort
549
550 kernel_exit 1
551 el1_sp_pc:
552 /*
553 * Stack or PC alignment exception handling
554 */
555 mrs x0, far_el1
556 inherit_daif pstate=x23, tmp=x2
557 mov x2, sp
558 bl do_sp_pc_abort
559 ASM_BUG()
560 el1_undef:
561 /*
562 * Undefined instruction
563 */
564 inherit_daif pstate=x23, tmp=x2
565 mov x0, sp
566 bl do_undefinstr
567 ASM_BUG()
568 el1_dbg:
569 /*
570 * Debug exception handling
571 */
572 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
573 cinc x24, x24, eq // set bit '0'
574 tbz x24, #0, el1_inv // EL1 only
575 mrs x0, far_el1
576 mov x2, sp // struct pt_regs
577 bl do_debug_exception
578 kernel_exit 1
579 el1_inv:
580 // TODO: add support for undefined instructions in kernel mode
581 inherit_daif pstate=x23, tmp=x2
582 mov x0, sp
583 mov x2, x1
584 mov x1, #BAD_SYNC
585 bl bad_mode
586 ASM_BUG()
587 ENDPROC(el1_sync)
588
589 .align 6
590 el1_irq:
591 kernel_entry 1
592 enable_da_f
593 #ifdef CONFIG_TRACE_IRQFLAGS
594 bl trace_hardirqs_off
595 #endif
596
597 irq_handler
598
599 #ifdef CONFIG_PREEMPT
600 ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
601 cbnz w24, 1f // preempt count != 0
602 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
603 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
604 bl el1_preempt
605 1:
606 #endif
607 #ifdef CONFIG_TRACE_IRQFLAGS
608 bl trace_hardirqs_on
609 #endif
610 kernel_exit 1
611 ENDPROC(el1_irq)
612
613 #ifdef CONFIG_PREEMPT
614 el1_preempt:
615 mov x24, lr
616 1: bl preempt_schedule_irq // irq en/disable is done inside
617 ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
618 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
619 ret x24
620 #endif
621
622 /*
623 * EL0 mode handlers.
624 */
625 .align 6
626 el0_sync:
627 kernel_entry 0
628 mrs x25, esr_el1 // read the syndrome register
629 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
630 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
631 b.eq el0_svc
632 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
633 b.eq el0_da
634 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
635 b.eq el0_ia
636 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
637 b.eq el0_fpsimd_acc
638 cmp x24, #ESR_ELx_EC_SVE // SVE access
639 b.eq el0_sve_acc
640 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
641 b.eq el0_fpsimd_exc
642 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
643 b.eq el0_sys
644 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
645 b.eq el0_sp_pc
646 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
647 b.eq el0_sp_pc
648 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
649 b.eq el0_undef
650 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
651 b.ge el0_dbg
652 b el0_inv
653
654 #ifdef CONFIG_COMPAT
655 .align 6
656 el0_sync_compat:
657 kernel_entry 0, 32
658 mrs x25, esr_el1 // read the syndrome register
659 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
660 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
661 b.eq el0_svc_compat
662 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
663 b.eq el0_da
664 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
665 b.eq el0_ia
666 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
667 b.eq el0_fpsimd_acc
668 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
669 b.eq el0_fpsimd_exc
670 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
671 b.eq el0_sp_pc
672 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
673 b.eq el0_undef
674 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
675 b.eq el0_undef
676 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
677 b.eq el0_undef
678 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
679 b.eq el0_undef
680 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
681 b.eq el0_undef
682 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
683 b.eq el0_undef
684 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
685 b.ge el0_dbg
686 b el0_inv
687 el0_svc_compat:
688 /*
689 * AArch32 syscall handling
690 */
691 ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
692 adrp stbl, compat_sys_call_table // load compat syscall table pointer
693 mov wscno, w7 // syscall number in w7 (r7)
694 mov wsc_nr, #__NR_compat_syscalls
695 b el0_svc_naked
696
697 .align 6
698 el0_irq_compat:
699 kernel_entry 0, 32
700 b el0_irq_naked
701
702 el0_error_compat:
703 kernel_entry 0, 32
704 b el0_error_naked
705 #endif
706
707 el0_da:
708 /*
709 * Data abort handling
710 */
711 mrs x26, far_el1
712 enable_daif
713 ct_user_exit
714 clear_address_tag x0, x26
715 mov x1, x25
716 mov x2, sp
717 bl do_mem_abort
718 b ret_to_user
719 el0_ia:
720 /*
721 * Instruction abort handling
722 */
723 mrs x26, far_el1
724 enable_daif
725 ct_user_exit
726 mov x0, x26
727 mov x1, x25
728 mov x2, sp
729 bl do_mem_abort
730 b ret_to_user
731 el0_fpsimd_acc:
732 /*
733 * Floating Point or Advanced SIMD access
734 */
735 enable_daif
736 ct_user_exit
737 mov x0, x25
738 mov x1, sp
739 bl do_fpsimd_acc
740 b ret_to_user
741 el0_sve_acc:
742 /*
743 * Scalable Vector Extension access
744 */
745 enable_daif
746 ct_user_exit
747 mov x0, x25
748 mov x1, sp
749 bl do_sve_acc
750 b ret_to_user
751 el0_fpsimd_exc:
752 /*
753 * Floating Point, Advanced SIMD or SVE exception
754 */
755 enable_daif
756 ct_user_exit
757 mov x0, x25
758 mov x1, sp
759 bl do_fpsimd_exc
760 b ret_to_user
761 el0_sp_pc:
762 /*
763 * Stack or PC alignment exception handling
764 */
765 mrs x26, far_el1
766 enable_daif
767 ct_user_exit
768 mov x0, x26
769 mov x1, x25
770 mov x2, sp
771 bl do_sp_pc_abort
772 b ret_to_user
773 el0_undef:
774 /*
775 * Undefined instruction
776 */
777 enable_daif
778 ct_user_exit
779 mov x0, sp
780 bl do_undefinstr
781 b ret_to_user
782 el0_sys:
783 /*
784 * System instructions, for trapped cache maintenance instructions
785 */
786 enable_daif
787 ct_user_exit
788 mov x0, x25
789 mov x1, sp
790 bl do_sysinstr
791 b ret_to_user
792 el0_dbg:
793 /*
794 * Debug exception handling
795 */
796 tbnz x24, #0, el0_inv // EL0 only
797 mrs x0, far_el1
798 mov x1, x25
799 mov x2, sp
800 bl do_debug_exception
801 enable_daif
802 ct_user_exit
803 b ret_to_user
804 el0_inv:
805 enable_daif
806 ct_user_exit
807 mov x0, sp
808 mov x1, #BAD_SYNC
809 mov x2, x25
810 bl bad_el0_sync
811 b ret_to_user
812 ENDPROC(el0_sync)
813
814 .align 6
815 el0_irq:
816 kernel_entry 0
817 el0_irq_naked:
818 enable_da_f
819 #ifdef CONFIG_TRACE_IRQFLAGS
820 bl trace_hardirqs_off
821 #endif
822
823 ct_user_exit
824 irq_handler
825
826 #ifdef CONFIG_TRACE_IRQFLAGS
827 bl trace_hardirqs_on
828 #endif
829 b ret_to_user
830 ENDPROC(el0_irq)
831
832 el1_error:
833 kernel_entry 1
834 mrs x1, esr_el1
835 enable_dbg
836 mov x0, sp
837 bl do_serror
838 kernel_exit 1
839 ENDPROC(el1_error)
840
841 el0_error:
842 kernel_entry 0
843 el0_error_naked:
844 mrs x1, esr_el1
845 enable_dbg
846 mov x0, sp
847 bl do_serror
848 enable_daif
849 ct_user_exit
850 b ret_to_user
851 ENDPROC(el0_error)
852
853
854 /*
855 * This is the fast syscall return path. We do as little as possible here,
856 * and this includes saving x0 back into the kernel stack.
857 */
858 ret_fast_syscall:
859 disable_daif
860 str x0, [sp, #S_X0] // returned x0
861 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
862 and x2, x1, #_TIF_SYSCALL_WORK
863 cbnz x2, ret_fast_syscall_trace
864 and x2, x1, #_TIF_WORK_MASK
865 cbnz x2, work_pending
866 enable_step_tsk x1, x2
867 kernel_exit 0
868 ret_fast_syscall_trace:
869 enable_daif
870 b __sys_trace_return_skipped // we already saved x0
871
872 /*
873 * Ok, we need to do extra processing, enter the slow path.
874 */
875 work_pending:
876 mov x0, sp // 'regs'
877 bl do_notify_resume
878 #ifdef CONFIG_TRACE_IRQFLAGS
879 bl trace_hardirqs_on // enabled while in userspace
880 #endif
881 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
882 b finish_ret_to_user
883 /*
884 * "slow" syscall return path.
885 */
886 ret_to_user:
887 disable_daif
888 ldr x1, [tsk, #TSK_TI_FLAGS]
889 and x2, x1, #_TIF_WORK_MASK
890 cbnz x2, work_pending
891 finish_ret_to_user:
892 enable_step_tsk x1, x2
893 kernel_exit 0
894 ENDPROC(ret_to_user)
895
896 /*
897 * SVC handler.
898 */
899 .align 6
900 el0_svc:
901 ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
902 adrp stbl, sys_call_table // load syscall table pointer
903 mov wscno, w8 // syscall number in w8
904 mov wsc_nr, #__NR_syscalls
905
906 #ifdef CONFIG_ARM64_SVE
907 alternative_if_not ARM64_SVE
908 b el0_svc_naked
909 alternative_else_nop_endif
910 tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
911 bic x16, x16, #_TIF_SVE // discard SVE state
912 str x16, [tsk, #TSK_TI_FLAGS]
913
914 /*
915 * task_fpsimd_load() won't be called to update CPACR_EL1 in
916 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
917 * happens if a context switch or kernel_neon_begin() or context
918 * modification (sigreturn, ptrace) intervenes.
919 * So, ensure that CPACR_EL1 is already correct for the fast-path case:
920 */
921 mrs x9, cpacr_el1
922 bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
923 msr cpacr_el1, x9 // synchronised by eret to el0
924 #endif
925
926 el0_svc_naked: // compat entry point
927 stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
928 enable_daif
929 ct_user_exit 1
930
931 tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
932 b.ne __sys_trace
933 cmp wscno, wsc_nr // check upper syscall limit
934 b.hs ni_sys
935 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
936 blr x16 // call sys_* routine
937 b ret_fast_syscall
938 ni_sys:
939 mov x0, sp
940 bl do_ni_syscall
941 b ret_fast_syscall
942 ENDPROC(el0_svc)
943
944 /*
945 * This is the really slow path. We're going to be doing context
946 * switches, and waiting for our parent to respond.
947 */
948 __sys_trace:
949 cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
950 b.ne 1f
951 mov x0, #-ENOSYS // set default errno if so
952 str x0, [sp, #S_X0]
953 1: mov x0, sp
954 bl syscall_trace_enter
955 cmp w0, #NO_SYSCALL // skip the syscall?
956 b.eq __sys_trace_return_skipped
957 mov wscno, w0 // syscall number (possibly new)
958 mov x1, sp // pointer to regs
959 cmp wscno, wsc_nr // check upper syscall limit
960 b.hs __ni_sys_trace
961 ldp x0, x1, [sp] // restore the syscall args
962 ldp x2, x3, [sp, #S_X2]
963 ldp x4, x5, [sp, #S_X4]
964 ldp x6, x7, [sp, #S_X6]
965 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
966 blr x16 // call sys_* routine
967
968 __sys_trace_return:
969 str x0, [sp, #S_X0] // save returned x0
970 __sys_trace_return_skipped:
971 mov x0, sp
972 bl syscall_trace_exit
973 b ret_to_user
974
975 __ni_sys_trace:
976 mov x0, sp
977 bl do_ni_syscall
978 b __sys_trace_return
979
980 .popsection // .entry.text
981
982 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
983 /*
984 * Exception vectors trampoline.
985 */
986 .pushsection ".entry.tramp.text", "ax"
987
988 .macro tramp_map_kernel, tmp
989 mrs \tmp, ttbr1_el1
990 sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
991 bic \tmp, \tmp, #USER_ASID_FLAG
992 msr ttbr1_el1, \tmp
993 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
994 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
995 /* ASID already in \tmp[63:48] */
996 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
997 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
998 /* 2MB boundary containing the vectors, so we nobble the walk cache */
999 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1000 isb
1001 tlbi vae1, \tmp
1002 dsb nsh
1003 alternative_else_nop_endif
1004 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1005 .endm
1006
1007 .macro tramp_unmap_kernel, tmp
1008 mrs \tmp, ttbr1_el1
1009 add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
1010 orr \tmp, \tmp, #USER_ASID_FLAG
1011 msr ttbr1_el1, \tmp
1012 /*
1013 * We avoid running the post_ttbr_update_workaround here because
1014 * it's only needed by Cavium ThunderX, which requires KPTI to be
1015 * disabled.
1016 */
1017 .endm
1018
1019 .macro tramp_ventry, regsize = 64
1020 .align 7
1021 1:
1022 .if \regsize == 64
1023 msr tpidrro_el0, x30 // Restored in kernel_ventry
1024 .endif
1025 /*
1026 * Defend against branch aliasing attacks by pushing a dummy
1027 * entry onto the return stack and using a RET instruction to
1028 * enter the full-fat kernel vectors.
1029 */
1030 bl 2f
1031 b .
1032 2:
1033 tramp_map_kernel x30
1034 #ifdef CONFIG_RANDOMIZE_BASE
1035 adr x30, tramp_vectors + PAGE_SIZE
1036 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1037 ldr x30, [x30]
1038 #else
1039 ldr x30, =vectors
1040 #endif
1041 prfm plil1strm, [x30, #(1b - tramp_vectors)]
1042 msr vbar_el1, x30
1043 add x30, x30, #(1b - tramp_vectors)
1044 isb
1045 ret
1046 .endm
1047
1048 .macro tramp_exit, regsize = 64
1049 adr x30, tramp_vectors
1050 msr vbar_el1, x30
1051 tramp_unmap_kernel x30
1052 .if \regsize == 64
1053 mrs x30, far_el1
1054 .endif
1055 eret
1056 .endm
1057
1058 .align 11
1059 ENTRY(tramp_vectors)
1060 .space 0x400
1061
1062 tramp_ventry
1063 tramp_ventry
1064 tramp_ventry
1065 tramp_ventry
1066
1067 tramp_ventry 32
1068 tramp_ventry 32
1069 tramp_ventry 32
1070 tramp_ventry 32
1071 END(tramp_vectors)
1072
1073 ENTRY(tramp_exit_native)
1074 tramp_exit
1075 END(tramp_exit_native)
1076
1077 ENTRY(tramp_exit_compat)
1078 tramp_exit 32
1079 END(tramp_exit_compat)
1080
1081 .ltorg
1082 .popsection // .entry.tramp.text
1083 #ifdef CONFIG_RANDOMIZE_BASE
1084 .pushsection ".rodata", "a"
1085 .align PAGE_SHIFT
1086 .globl __entry_tramp_data_start
1087 __entry_tramp_data_start:
1088 .quad vectors
1089 .popsection // .rodata
1090 #endif /* CONFIG_RANDOMIZE_BASE */
1091 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1092
1093 /*
1094 * Special system call wrappers.
1095 */
1096 ENTRY(sys_rt_sigreturn_wrapper)
1097 mov x0, sp
1098 b sys_rt_sigreturn
1099 ENDPROC(sys_rt_sigreturn_wrapper)
1100
1101 /*
1102 * Register switch for AArch64. The callee-saved registers need to be saved
1103 * and restored. On entry:
1104 * x0 = previous task_struct (must be preserved across the switch)
1105 * x1 = next task_struct
1106 * Previous and next are guaranteed not to be the same.
1107 *
1108 */
1109 ENTRY(cpu_switch_to)
1110 mov x10, #THREAD_CPU_CONTEXT
1111 add x8, x0, x10
1112 mov x9, sp
1113 stp x19, x20, [x8], #16 // store callee-saved registers
1114 stp x21, x22, [x8], #16
1115 stp x23, x24, [x8], #16
1116 stp x25, x26, [x8], #16
1117 stp x27, x28, [x8], #16
1118 stp x29, x9, [x8], #16
1119 str lr, [x8]
1120 add x8, x1, x10
1121 ldp x19, x20, [x8], #16 // restore callee-saved registers
1122 ldp x21, x22, [x8], #16
1123 ldp x23, x24, [x8], #16
1124 ldp x25, x26, [x8], #16
1125 ldp x27, x28, [x8], #16
1126 ldp x29, x9, [x8], #16
1127 ldr lr, [x8]
1128 mov sp, x9
1129 msr sp_el0, x1
1130 ret
1131 ENDPROC(cpu_switch_to)
1132 NOKPROBE(cpu_switch_to)
1133
1134 /*
1135 * This is how we return from a fork.
1136 */
1137 ENTRY(ret_from_fork)
1138 bl schedule_tail
1139 cbz x19, 1f // not a kernel thread
1140 mov x0, x20
1141 blr x19
1142 1: get_thread_info tsk
1143 b ret_to_user
1144 ENDPROC(ret_from_fork)
1145 NOKPROBE(ret_from_fork)