]>
Commit | Line | Data |
---|---|---|
60ffc30d CM |
1 | /* |
2 | * Low-level exception handling code | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * Will Deacon <will.deacon@arm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <linux/init.h> | |
22 | #include <linux/linkage.h> | |
23 | ||
8d883b23 | 24 | #include <asm/alternative.h> |
60ffc30d CM |
25 | #include <asm/assembler.h> |
26 | #include <asm/asm-offsets.h> | |
905e8c5d | 27 | #include <asm/cpufeature.h> |
60ffc30d | 28 | #include <asm/errno.h> |
5c1ce6f7 | 29 | #include <asm/esr.h> |
8e23dacd | 30 | #include <asm/irq.h> |
d57a98a9 WD |
31 | #include <asm/memory.h> |
32 | #include <asm/mmu.h> | |
eef94a3d | 33 | #include <asm/processor.h> |
39bc88e5 | 34 | #include <asm/ptrace.h> |
60ffc30d | 35 | #include <asm/thread_info.h> |
b4b8664d | 36 | #include <asm/asm-uaccess.h> |
60ffc30d CM |
37 | #include <asm/unistd.h> |
38 | ||
6c81fe79 LB |
39 | /* |
40 | * Context tracking subsystem. Used to instrument transitions | |
41 | * between user and kernel mode. | |
42 | */ | |
43 | .macro ct_user_exit, syscall = 0 | |
44 | #ifdef CONFIG_CONTEXT_TRACKING | |
45 | bl context_tracking_user_exit | |
46 | .if \syscall == 1 | |
47 | /* | |
48 | * Save/restore needed during syscalls. Restore syscall arguments from | |
49 | * the values already saved on stack during kernel_entry. | |
50 | */ | |
51 | ldp x0, x1, [sp] | |
52 | ldp x2, x3, [sp, #S_X2] | |
53 | ldp x4, x5, [sp, #S_X4] | |
54 | ldp x6, x7, [sp, #S_X6] | |
55 | .endif | |
56 | #endif | |
57 | .endm | |
58 | ||
59 | .macro ct_user_enter | |
60 | #ifdef CONFIG_CONTEXT_TRACKING | |
61 | bl context_tracking_user_enter | |
62 | #endif | |
63 | .endm | |
64 | ||
60ffc30d CM |
65 | /* |
66 | * Bad Abort numbers | |
67 | *----------------- | |
68 | */ | |
69 | #define BAD_SYNC 0 | |
70 | #define BAD_IRQ 1 | |
71 | #define BAD_FIQ 2 | |
72 | #define BAD_ERROR 3 | |
73 | ||
ca58e0a9 | 74 | .macro kernel_ventry, el, label, regsize = 64 |
b11e5759 | 75 | .align 7 |
8448fb20 WD |
76 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
77 | .if \el == 0 | |
78 | .if \regsize == 64 | |
79 | mrs x30, tpidrro_el0 | |
80 | msr tpidrro_el0, xzr | |
81 | .else | |
82 | mov x30, xzr | |
83 | .endif | |
84 | .endif | |
85 | #endif | |
86 | ||
63648dd2 | 87 | sub sp, sp, #S_FRAME_SIZE |
872d8327 MR |
88 | #ifdef CONFIG_VMAP_STACK |
89 | /* | |
90 | * Test whether the SP has overflowed, without corrupting a GPR. | |
91 | * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). | |
92 | */ | |
93 | add sp, sp, x0 // sp' = sp + x0 | |
94 | sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp | |
95 | tbnz x0, #THREAD_SHIFT, 0f | |
96 | sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 | |
97 | sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp | |
ca58e0a9 | 98 | b el\()\el\()_\label |
872d8327 MR |
99 | |
100 | 0: | |
101 | /* | |
102 | * Either we've just detected an overflow, or we've taken an exception | |
103 | * while on the overflow stack. Either way, we won't return to | |
104 | * userspace, and can clobber EL0 registers to free up GPRs. | |
105 | */ | |
106 | ||
107 | /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */ | |
108 | msr tpidr_el0, x0 | |
109 | ||
110 | /* Recover the original x0 value and stash it in tpidrro_el0 */ | |
111 | sub x0, sp, x0 | |
112 | msr tpidrro_el0, x0 | |
113 | ||
114 | /* Switch to the overflow stack */ | |
115 | adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 | |
116 | ||
117 | /* | |
118 | * Check whether we were already on the overflow stack. This may happen | |
119 | * after panic() re-enables interrupts. | |
120 | */ | |
121 | mrs x0, tpidr_el0 // sp of interrupted context | |
122 | sub x0, sp, x0 // delta with top of overflow stack | |
123 | tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? | |
124 | b.ne __bad_stack // no? -> bad stack pointer | |
125 | ||
126 | /* We were already on the overflow stack. Restore sp/x0 and carry on. */ | |
127 | sub sp, sp, x0 | |
128 | mrs x0, tpidrro_el0 | |
129 | #endif | |
ca58e0a9 | 130 | b el\()\el\()_\label |
b11e5759 MR |
131 | .endm |
132 | ||
8448fb20 WD |
133 | .macro tramp_alias, dst, sym |
134 | mov_q \dst, TRAMP_VALIAS | |
135 | add \dst, \dst, #(\sym - .entry.tramp.text) | |
136 | .endm | |
137 | ||
b11e5759 | 138 | .macro kernel_entry, el, regsize = 64 |
60ffc30d CM |
139 | .if \regsize == 32 |
140 | mov w0, w0 // zero upper 32 bits of x0 | |
141 | .endif | |
63648dd2 WD |
142 | stp x0, x1, [sp, #16 * 0] |
143 | stp x2, x3, [sp, #16 * 1] | |
144 | stp x4, x5, [sp, #16 * 2] | |
145 | stp x6, x7, [sp, #16 * 3] | |
146 | stp x8, x9, [sp, #16 * 4] | |
147 | stp x10, x11, [sp, #16 * 5] | |
148 | stp x12, x13, [sp, #16 * 6] | |
149 | stp x14, x15, [sp, #16 * 7] | |
150 | stp x16, x17, [sp, #16 * 8] | |
151 | stp x18, x19, [sp, #16 * 9] | |
152 | stp x20, x21, [sp, #16 * 10] | |
153 | stp x22, x23, [sp, #16 * 11] | |
154 | stp x24, x25, [sp, #16 * 12] | |
155 | stp x26, x27, [sp, #16 * 13] | |
156 | stp x28, x29, [sp, #16 * 14] | |
157 | ||
60ffc30d CM |
158 | .if \el == 0 |
159 | mrs x21, sp_el0 | |
c02433dd MR |
160 | ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, |
161 | ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug | |
2a283070 | 162 | disable_step_tsk x19, x20 // exceptions when scheduling. |
49003a8d JM |
163 | |
164 | mov x29, xzr // fp pointed to user-space | |
60ffc30d CM |
165 | .else |
166 | add x21, sp, #S_FRAME_SIZE | |
e19a6ee2 JM |
167 | get_thread_info tsk |
168 | /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ | |
c02433dd | 169 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] |
e19a6ee2 JM |
170 | str x20, [sp, #S_ORIG_ADDR_LIMIT] |
171 | mov x20, #TASK_SIZE_64 | |
c02433dd | 172 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
563cada0 | 173 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ |
e19a6ee2 | 174 | .endif /* \el == 0 */ |
60ffc30d CM |
175 | mrs x22, elr_el1 |
176 | mrs x23, spsr_el1 | |
177 | stp lr, x21, [sp, #S_LR] | |
39bc88e5 | 178 | |
73267498 AB |
179 | /* |
180 | * In order to be able to dump the contents of struct pt_regs at the | |
181 | * time the exception was taken (in case we attempt to walk the call | |
182 | * stack later), chain it together with the stack frames. | |
183 | */ | |
184 | .if \el == 0 | |
185 | stp xzr, xzr, [sp, #S_STACKFRAME] | |
186 | .else | |
187 | stp x29, x22, [sp, #S_STACKFRAME] | |
188 | .endif | |
189 | add x29, sp, #S_STACKFRAME | |
190 | ||
39bc88e5 CM |
191 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
192 | /* | |
193 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | |
194 | * EL0, there is no need to check the state of TTBR0_EL1 since | |
195 | * accesses are always enabled. | |
196 | * Note that the meaning of this bit differs from the ARMv8.1 PAN | |
197 | * feature as all TTBR0_EL1 accesses are disabled, not just those to | |
198 | * user mappings. | |
199 | */ | |
200 | alternative_if ARM64_HAS_PAN | |
201 | b 1f // skip TTBR0 PAN | |
202 | alternative_else_nop_endif | |
203 | ||
204 | .if \el != 0 | |
7a7e2f4d | 205 | mrs x21, ttbr1_el1 |
39bc88e5 CM |
206 | tst x21, #0xffff << 48 // Check for the reserved ASID |
207 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR | |
208 | b.eq 1f // TTBR0 access already disabled | |
209 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | |
210 | .endif | |
211 | ||
212 | __uaccess_ttbr0_disable x21 | |
213 | 1: | |
214 | #endif | |
215 | ||
60ffc30d CM |
216 | stp x22, x23, [sp, #S_PC] |
217 | ||
17c28958 | 218 | /* Not in a syscall by default (el0_svc overwrites for real syscall) */ |
60ffc30d | 219 | .if \el == 0 |
17c28958 | 220 | mov w21, #NO_SYSCALL |
35d0e6fb | 221 | str w21, [sp, #S_SYSCALLNO] |
60ffc30d CM |
222 | .endif |
223 | ||
6cdf9c7c JL |
224 | /* |
225 | * Set sp_el0 to current thread_info. | |
226 | */ | |
227 | .if \el == 0 | |
228 | msr sp_el0, tsk | |
229 | .endif | |
230 | ||
60ffc30d CM |
231 | /* |
232 | * Registers that may be useful after this macro is invoked: | |
233 | * | |
234 | * x21 - aborted SP | |
235 | * x22 - aborted PC | |
236 | * x23 - aborted PSTATE | |
237 | */ | |
238 | .endm | |
239 | ||
412fcb6c | 240 | .macro kernel_exit, el |
e19a6ee2 | 241 | .if \el != 0 |
8d66772e JM |
242 | disable_daif |
243 | ||
e19a6ee2 JM |
244 | /* Restore the task's original addr_limit. */ |
245 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] | |
c02433dd | 246 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
e19a6ee2 JM |
247 | |
248 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ | |
249 | .endif | |
250 | ||
60ffc30d CM |
251 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
252 | .if \el == 0 | |
6c81fe79 | 253 | ct_user_enter |
39bc88e5 CM |
254 | .endif |
255 | ||
256 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
257 | /* | |
258 | * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR | |
259 | * PAN bit checking. | |
260 | */ | |
261 | alternative_if ARM64_HAS_PAN | |
262 | b 2f // skip TTBR0 PAN | |
263 | alternative_else_nop_endif | |
264 | ||
265 | .if \el != 0 | |
266 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | |
267 | .endif | |
268 | ||
7a7e2f4d | 269 | __uaccess_ttbr0_enable x0, x1 |
39bc88e5 CM |
270 | |
271 | .if \el == 0 | |
272 | /* | |
273 | * Enable errata workarounds only if returning to user. The only | |
274 | * workaround currently required for TTBR0_EL1 changes are for the | |
275 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | |
276 | * corruption). | |
277 | */ | |
7a2f1a8a | 278 | post_ttbr_update_workaround |
39bc88e5 CM |
279 | .endif |
280 | 1: | |
281 | .if \el != 0 | |
282 | and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit | |
283 | .endif | |
284 | 2: | |
285 | #endif | |
286 | ||
287 | .if \el == 0 | |
60ffc30d | 288 | ldr x23, [sp, #S_SP] // load return stack pointer |
63648dd2 | 289 | msr sp_el0, x23 |
8448fb20 WD |
290 | tst x22, #PSR_MODE32_BIT // native task? |
291 | b.eq 3f | |
292 | ||
905e8c5d | 293 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
6ba3b554 | 294 | alternative_if ARM64_WORKAROUND_845719 |
e28cabf1 DT |
295 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
296 | mrs x29, contextidr_el1 | |
297 | msr contextidr_el1, x29 | |
905e8c5d | 298 | #else |
e28cabf1 | 299 | msr contextidr_el1, xzr |
905e8c5d | 300 | #endif |
6ba3b554 | 301 | alternative_else_nop_endif |
905e8c5d | 302 | #endif |
8448fb20 | 303 | 3: |
60ffc30d | 304 | .endif |
39bc88e5 | 305 | |
63648dd2 WD |
306 | msr elr_el1, x21 // set up the return data |
307 | msr spsr_el1, x22 | |
63648dd2 | 308 | ldp x0, x1, [sp, #16 * 0] |
63648dd2 WD |
309 | ldp x2, x3, [sp, #16 * 1] |
310 | ldp x4, x5, [sp, #16 * 2] | |
311 | ldp x6, x7, [sp, #16 * 3] | |
312 | ldp x8, x9, [sp, #16 * 4] | |
313 | ldp x10, x11, [sp, #16 * 5] | |
314 | ldp x12, x13, [sp, #16 * 6] | |
315 | ldp x14, x15, [sp, #16 * 7] | |
316 | ldp x16, x17, [sp, #16 * 8] | |
317 | ldp x18, x19, [sp, #16 * 9] | |
318 | ldp x20, x21, [sp, #16 * 10] | |
319 | ldp x22, x23, [sp, #16 * 11] | |
320 | ldp x24, x25, [sp, #16 * 12] | |
321 | ldp x26, x27, [sp, #16 * 13] | |
322 | ldp x28, x29, [sp, #16 * 14] | |
323 | ldr lr, [sp, #S_LR] | |
324 | add sp, sp, #S_FRAME_SIZE // restore sp | |
8448fb20 WD |
325 | |
326 | #ifndef CONFIG_UNMAP_KERNEL_AT_EL0 | |
327 | eret | |
328 | #else | |
329 | .if \el == 0 | |
330 | bne 4f | |
331 | msr far_el1, x30 | |
332 | tramp_alias x30, tramp_exit_native | |
333 | br x30 | |
334 | 4: | |
335 | tramp_alias x30, tramp_exit_compat | |
336 | br x30 | |
337 | .else | |
338 | eret | |
339 | .endif | |
340 | #endif | |
60ffc30d CM |
341 | .endm |
342 | ||
971c67ce | 343 | .macro irq_stack_entry |
8e23dacd JM |
344 | mov x19, sp // preserve the original sp |
345 | ||
8e23dacd | 346 | /* |
c02433dd MR |
347 | * Compare sp with the base of the task stack. |
348 | * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, | |
349 | * and should switch to the irq stack. | |
8e23dacd | 350 | */ |
c02433dd MR |
351 | ldr x25, [tsk, TSK_STACK] |
352 | eor x25, x25, x19 | |
353 | and x25, x25, #~(THREAD_SIZE - 1) | |
354 | cbnz x25, 9998f | |
8e23dacd | 355 | |
f60fe78f | 356 | ldr_this_cpu x25, irq_stack_ptr, x26 |
34be98f4 | 357 | mov x26, #IRQ_STACK_SIZE |
8e23dacd | 358 | add x26, x25, x26 |
d224a69e JM |
359 | |
360 | /* switch to the irq stack */ | |
8e23dacd | 361 | mov sp, x26 |
8e23dacd JM |
362 | 9998: |
363 | .endm | |
364 | ||
365 | /* | |
366 | * x19 should be preserved between irq_stack_entry and | |
367 | * irq_stack_exit. | |
368 | */ | |
369 | .macro irq_stack_exit | |
370 | mov sp, x19 | |
371 | .endm | |
372 | ||
60ffc30d CM |
373 | /* |
374 | * These are the registers used in the syscall handler, and allow us to | |
375 | * have in theory up to 7 arguments to a function - x0 to x6. | |
376 | * | |
377 | * x7 is reserved for the system call number in 32-bit mode. | |
378 | */ | |
35d0e6fb DM |
379 | wsc_nr .req w25 // number of system calls |
380 | wscno .req w26 // syscall number | |
381 | xscno .req x26 // syscall number (zero-extended) | |
60ffc30d CM |
382 | stbl .req x27 // syscall table pointer |
383 | tsk .req x28 // current thread_info | |
384 | ||
385 | /* | |
386 | * Interrupt handling. | |
387 | */ | |
388 | .macro irq_handler | |
8e23dacd | 389 | ldr_l x1, handle_arch_irq |
60ffc30d | 390 | mov x0, sp |
971c67ce | 391 | irq_stack_entry |
60ffc30d | 392 | blr x1 |
8e23dacd | 393 | irq_stack_exit |
60ffc30d CM |
394 | .endm |
395 | ||
396 | .text | |
397 | ||
398 | /* | |
399 | * Exception vectors. | |
400 | */ | |
888b3c87 | 401 | .pushsection ".entry.text", "ax" |
60ffc30d CM |
402 | |
403 | .align 11 | |
404 | ENTRY(vectors) | |
ca58e0a9 WD |
405 | kernel_ventry 1, sync_invalid // Synchronous EL1t |
406 | kernel_ventry 1, irq_invalid // IRQ EL1t | |
407 | kernel_ventry 1, fiq_invalid // FIQ EL1t | |
408 | kernel_ventry 1, error_invalid // Error EL1t | |
60ffc30d | 409 | |
ca58e0a9 WD |
410 | kernel_ventry 1, sync // Synchronous EL1h |
411 | kernel_ventry 1, irq // IRQ EL1h | |
412 | kernel_ventry 1, fiq_invalid // FIQ EL1h | |
413 | kernel_ventry 1, error // Error EL1h | |
60ffc30d | 414 | |
ca58e0a9 WD |
415 | kernel_ventry 0, sync // Synchronous 64-bit EL0 |
416 | kernel_ventry 0, irq // IRQ 64-bit EL0 | |
417 | kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 | |
418 | kernel_ventry 0, error // Error 64-bit EL0 | |
60ffc30d CM |
419 | |
420 | #ifdef CONFIG_COMPAT | |
ca58e0a9 WD |
421 | kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 |
422 | kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 | |
423 | kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 | |
424 | kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 | |
60ffc30d | 425 | #else |
ca58e0a9 WD |
426 | kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 |
427 | kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 | |
428 | kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 | |
429 | kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 | |
60ffc30d CM |
430 | #endif |
431 | END(vectors) | |
432 | ||
872d8327 MR |
433 | #ifdef CONFIG_VMAP_STACK |
434 | /* | |
435 | * We detected an overflow in kernel_ventry, which switched to the | |
436 | * overflow stack. Stash the exception regs, and head to our overflow | |
437 | * handler. | |
438 | */ | |
439 | __bad_stack: | |
440 | /* Restore the original x0 value */ | |
441 | mrs x0, tpidrro_el0 | |
442 | ||
443 | /* | |
444 | * Store the original GPRs to the new stack. The orginal SP (minus | |
445 | * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry. | |
446 | */ | |
447 | sub sp, sp, #S_FRAME_SIZE | |
448 | kernel_entry 1 | |
449 | mrs x0, tpidr_el0 | |
450 | add x0, x0, #S_FRAME_SIZE | |
451 | str x0, [sp, #S_SP] | |
452 | ||
453 | /* Stash the regs for handle_bad_stack */ | |
454 | mov x0, sp | |
455 | ||
456 | /* Time to die */ | |
457 | bl handle_bad_stack | |
458 | ASM_BUG() | |
459 | #endif /* CONFIG_VMAP_STACK */ | |
460 | ||
60ffc30d CM |
461 | /* |
462 | * Invalid mode handlers | |
463 | */ | |
464 | .macro inv_entry, el, reason, regsize = 64 | |
b660950c | 465 | kernel_entry \el, \regsize |
60ffc30d CM |
466 | mov x0, sp |
467 | mov x1, #\reason | |
468 | mrs x2, esr_el1 | |
2d0e751a MR |
469 | bl bad_mode |
470 | ASM_BUG() | |
60ffc30d CM |
471 | .endm |
472 | ||
473 | el0_sync_invalid: | |
474 | inv_entry 0, BAD_SYNC | |
475 | ENDPROC(el0_sync_invalid) | |
476 | ||
477 | el0_irq_invalid: | |
478 | inv_entry 0, BAD_IRQ | |
479 | ENDPROC(el0_irq_invalid) | |
480 | ||
481 | el0_fiq_invalid: | |
482 | inv_entry 0, BAD_FIQ | |
483 | ENDPROC(el0_fiq_invalid) | |
484 | ||
485 | el0_error_invalid: | |
486 | inv_entry 0, BAD_ERROR | |
487 | ENDPROC(el0_error_invalid) | |
488 | ||
489 | #ifdef CONFIG_COMPAT | |
490 | el0_fiq_invalid_compat: | |
491 | inv_entry 0, BAD_FIQ, 32 | |
492 | ENDPROC(el0_fiq_invalid_compat) | |
60ffc30d CM |
493 | #endif |
494 | ||
495 | el1_sync_invalid: | |
496 | inv_entry 1, BAD_SYNC | |
497 | ENDPROC(el1_sync_invalid) | |
498 | ||
499 | el1_irq_invalid: | |
500 | inv_entry 1, BAD_IRQ | |
501 | ENDPROC(el1_irq_invalid) | |
502 | ||
503 | el1_fiq_invalid: | |
504 | inv_entry 1, BAD_FIQ | |
505 | ENDPROC(el1_fiq_invalid) | |
506 | ||
507 | el1_error_invalid: | |
508 | inv_entry 1, BAD_ERROR | |
509 | ENDPROC(el1_error_invalid) | |
510 | ||
511 | /* | |
512 | * EL1 mode handlers. | |
513 | */ | |
514 | .align 6 | |
515 | el1_sync: | |
516 | kernel_entry 1 | |
517 | mrs x1, esr_el1 // read the syndrome register | |
aed40e01 MR |
518 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
519 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | |
60ffc30d | 520 | b.eq el1_da |
9adeb8e7 LA |
521 | cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 |
522 | b.eq el1_ia | |
aed40e01 | 523 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
60ffc30d | 524 | b.eq el1_undef |
aed40e01 | 525 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
60ffc30d | 526 | b.eq el1_sp_pc |
aed40e01 | 527 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
60ffc30d | 528 | b.eq el1_sp_pc |
aed40e01 | 529 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 |
60ffc30d | 530 | b.eq el1_undef |
aed40e01 | 531 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
60ffc30d CM |
532 | b.ge el1_dbg |
533 | b el1_inv | |
9adeb8e7 LA |
534 | |
535 | el1_ia: | |
536 | /* | |
537 | * Fall through to the Data abort case | |
538 | */ | |
60ffc30d CM |
539 | el1_da: |
540 | /* | |
541 | * Data abort handling | |
542 | */ | |
276e9327 | 543 | mrs x3, far_el1 |
b55a5a1b | 544 | inherit_daif pstate=x23, tmp=x2 |
276e9327 | 545 | clear_address_tag x0, x3 |
60ffc30d CM |
546 | mov x2, sp // struct pt_regs |
547 | bl do_mem_abort | |
548 | ||
60ffc30d CM |
549 | kernel_exit 1 |
550 | el1_sp_pc: | |
551 | /* | |
552 | * Stack or PC alignment exception handling | |
553 | */ | |
554 | mrs x0, far_el1 | |
b55a5a1b | 555 | inherit_daif pstate=x23, tmp=x2 |
60ffc30d | 556 | mov x2, sp |
2d0e751a MR |
557 | bl do_sp_pc_abort |
558 | ASM_BUG() | |
60ffc30d CM |
559 | el1_undef: |
560 | /* | |
561 | * Undefined instruction | |
562 | */ | |
b55a5a1b | 563 | inherit_daif pstate=x23, tmp=x2 |
60ffc30d | 564 | mov x0, sp |
2d0e751a MR |
565 | bl do_undefinstr |
566 | ASM_BUG() | |
60ffc30d CM |
567 | el1_dbg: |
568 | /* | |
569 | * Debug exception handling | |
570 | */ | |
aed40e01 | 571 | cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 |
ee6214ce | 572 | cinc x24, x24, eq // set bit '0' |
60ffc30d CM |
573 | tbz x24, #0, el1_inv // EL1 only |
574 | mrs x0, far_el1 | |
575 | mov x2, sp // struct pt_regs | |
576 | bl do_debug_exception | |
60ffc30d CM |
577 | kernel_exit 1 |
578 | el1_inv: | |
579 | // TODO: add support for undefined instructions in kernel mode | |
b55a5a1b | 580 | inherit_daif pstate=x23, tmp=x2 |
60ffc30d | 581 | mov x0, sp |
1b42804d | 582 | mov x2, x1 |
60ffc30d | 583 | mov x1, #BAD_SYNC |
2d0e751a MR |
584 | bl bad_mode |
585 | ASM_BUG() | |
60ffc30d CM |
586 | ENDPROC(el1_sync) |
587 | ||
588 | .align 6 | |
589 | el1_irq: | |
590 | kernel_entry 1 | |
b282e1ce | 591 | enable_da_f |
60ffc30d CM |
592 | #ifdef CONFIG_TRACE_IRQFLAGS |
593 | bl trace_hardirqs_off | |
594 | #endif | |
64681787 | 595 | |
60ffc30d | 596 | irq_handler |
64681787 | 597 | |
60ffc30d | 598 | #ifdef CONFIG_PREEMPT |
c02433dd | 599 | ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count |
717321fc | 600 | cbnz w24, 1f // preempt count != 0 |
c02433dd | 601 | ldr x0, [tsk, #TSK_TI_FLAGS] // get flags |
60ffc30d CM |
602 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? |
603 | bl el1_preempt | |
604 | 1: | |
605 | #endif | |
606 | #ifdef CONFIG_TRACE_IRQFLAGS | |
607 | bl trace_hardirqs_on | |
608 | #endif | |
609 | kernel_exit 1 | |
610 | ENDPROC(el1_irq) | |
611 | ||
612 | #ifdef CONFIG_PREEMPT | |
613 | el1_preempt: | |
614 | mov x24, lr | |
2a283070 | 615 | 1: bl preempt_schedule_irq // irq en/disable is done inside |
c02433dd | 616 | ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS |
60ffc30d CM |
617 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? |
618 | ret x24 | |
619 | #endif | |
620 | ||
621 | /* | |
622 | * EL0 mode handlers. | |
623 | */ | |
624 | .align 6 | |
625 | el0_sync: | |
626 | kernel_entry 0 | |
627 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
628 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
629 | cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state | |
60ffc30d | 630 | b.eq el0_svc |
aed40e01 | 631 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 632 | b.eq el0_da |
aed40e01 | 633 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 634 | b.eq el0_ia |
aed40e01 | 635 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 636 | b.eq el0_fpsimd_acc |
bc0ee476 DM |
637 | cmp x24, #ESR_ELx_EC_SVE // SVE access |
638 | b.eq el0_sve_acc | |
aed40e01 | 639 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception |
60ffc30d | 640 | b.eq el0_fpsimd_exc |
aed40e01 | 641 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
7dd01aef | 642 | b.eq el0_sys |
aed40e01 | 643 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
60ffc30d | 644 | b.eq el0_sp_pc |
aed40e01 | 645 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
60ffc30d | 646 | b.eq el0_sp_pc |
aed40e01 | 647 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 648 | b.eq el0_undef |
aed40e01 | 649 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
650 | b.ge el0_dbg |
651 | b el0_inv | |
652 | ||
653 | #ifdef CONFIG_COMPAT | |
654 | .align 6 | |
655 | el0_sync_compat: | |
656 | kernel_entry 0, 32 | |
657 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
658 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
659 | cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state | |
60ffc30d | 660 | b.eq el0_svc_compat |
aed40e01 | 661 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 662 | b.eq el0_da |
aed40e01 | 663 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 664 | b.eq el0_ia |
aed40e01 | 665 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 666 | b.eq el0_fpsimd_acc |
aed40e01 | 667 | cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception |
60ffc30d | 668 | b.eq el0_fpsimd_exc |
77f3228f MS |
669 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
670 | b.eq el0_sp_pc | |
aed40e01 | 671 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 672 | b.eq el0_undef |
aed40e01 | 673 | cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap |
381cc2b9 | 674 | b.eq el0_undef |
aed40e01 | 675 | cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap |
381cc2b9 | 676 | b.eq el0_undef |
aed40e01 | 677 | cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap |
381cc2b9 | 678 | b.eq el0_undef |
aed40e01 | 679 | cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap |
381cc2b9 | 680 | b.eq el0_undef |
aed40e01 | 681 | cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap |
381cc2b9 | 682 | b.eq el0_undef |
aed40e01 | 683 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
684 | b.ge el0_dbg |
685 | b el0_inv | |
686 | el0_svc_compat: | |
687 | /* | |
688 | * AArch32 syscall handling | |
689 | */ | |
bc0ee476 | 690 | ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags |
0156411b | 691 | adrp stbl, compat_sys_call_table // load compat syscall table pointer |
35d0e6fb DM |
692 | mov wscno, w7 // syscall number in w7 (r7) |
693 | mov wsc_nr, #__NR_compat_syscalls | |
60ffc30d CM |
694 | b el0_svc_naked |
695 | ||
696 | .align 6 | |
697 | el0_irq_compat: | |
698 | kernel_entry 0, 32 | |
699 | b el0_irq_naked | |
a92d4d14 XX |
700 | |
701 | el0_error_compat: | |
702 | kernel_entry 0, 32 | |
703 | b el0_error_naked | |
60ffc30d CM |
704 | #endif |
705 | ||
706 | el0_da: | |
707 | /* | |
708 | * Data abort handling | |
709 | */ | |
6ab6463a | 710 | mrs x26, far_el1 |
746647c7 | 711 | enable_daif |
6c81fe79 | 712 | ct_user_exit |
276e9327 | 713 | clear_address_tag x0, x26 |
60ffc30d CM |
714 | mov x1, x25 |
715 | mov x2, sp | |
d54e81f9 WD |
716 | bl do_mem_abort |
717 | b ret_to_user | |
60ffc30d CM |
718 | el0_ia: |
719 | /* | |
720 | * Instruction abort handling | |
721 | */ | |
6ab6463a | 722 | mrs x26, far_el1 |
746647c7 | 723 | enable_daif |
6c81fe79 | 724 | ct_user_exit |
6ab6463a | 725 | mov x0, x26 |
541ec870 | 726 | mov x1, x25 |
60ffc30d | 727 | mov x2, sp |
d54e81f9 WD |
728 | bl do_mem_abort |
729 | b ret_to_user | |
60ffc30d CM |
730 | el0_fpsimd_acc: |
731 | /* | |
732 | * Floating Point or Advanced SIMD access | |
733 | */ | |
746647c7 | 734 | enable_daif |
6c81fe79 | 735 | ct_user_exit |
60ffc30d CM |
736 | mov x0, x25 |
737 | mov x1, sp | |
d54e81f9 WD |
738 | bl do_fpsimd_acc |
739 | b ret_to_user | |
bc0ee476 DM |
740 | el0_sve_acc: |
741 | /* | |
742 | * Scalable Vector Extension access | |
743 | */ | |
744 | enable_daif | |
745 | ct_user_exit | |
746 | mov x0, x25 | |
747 | mov x1, sp | |
748 | bl do_sve_acc | |
749 | b ret_to_user | |
60ffc30d CM |
750 | el0_fpsimd_exc: |
751 | /* | |
bc0ee476 | 752 | * Floating Point, Advanced SIMD or SVE exception |
60ffc30d | 753 | */ |
746647c7 | 754 | enable_daif |
6c81fe79 | 755 | ct_user_exit |
60ffc30d CM |
756 | mov x0, x25 |
757 | mov x1, sp | |
d54e81f9 WD |
758 | bl do_fpsimd_exc |
759 | b ret_to_user | |
60ffc30d CM |
760 | el0_sp_pc: |
761 | /* | |
762 | * Stack or PC alignment exception handling | |
763 | */ | |
6ab6463a | 764 | mrs x26, far_el1 |
746647c7 | 765 | enable_daif |
46b0567c | 766 | ct_user_exit |
6ab6463a | 767 | mov x0, x26 |
60ffc30d CM |
768 | mov x1, x25 |
769 | mov x2, sp | |
d54e81f9 WD |
770 | bl do_sp_pc_abort |
771 | b ret_to_user | |
60ffc30d CM |
772 | el0_undef: |
773 | /* | |
774 | * Undefined instruction | |
775 | */ | |
746647c7 | 776 | enable_daif |
6c81fe79 | 777 | ct_user_exit |
2a283070 | 778 | mov x0, sp |
d54e81f9 WD |
779 | bl do_undefinstr |
780 | b ret_to_user | |
7dd01aef AP |
781 | el0_sys: |
782 | /* | |
783 | * System instructions, for trapped cache maintenance instructions | |
784 | */ | |
746647c7 | 785 | enable_daif |
7dd01aef AP |
786 | ct_user_exit |
787 | mov x0, x25 | |
788 | mov x1, sp | |
789 | bl do_sysinstr | |
790 | b ret_to_user | |
60ffc30d CM |
791 | el0_dbg: |
792 | /* | |
793 | * Debug exception handling | |
794 | */ | |
795 | tbnz x24, #0, el0_inv // EL0 only | |
796 | mrs x0, far_el1 | |
60ffc30d CM |
797 | mov x1, x25 |
798 | mov x2, sp | |
2a283070 | 799 | bl do_debug_exception |
746647c7 | 800 | enable_daif |
6c81fe79 | 801 | ct_user_exit |
2a283070 | 802 | b ret_to_user |
60ffc30d | 803 | el0_inv: |
746647c7 | 804 | enable_daif |
6c81fe79 | 805 | ct_user_exit |
60ffc30d CM |
806 | mov x0, sp |
807 | mov x1, #BAD_SYNC | |
1b42804d | 808 | mov x2, x25 |
7d9e8f71 | 809 | bl bad_el0_sync |
d54e81f9 | 810 | b ret_to_user |
60ffc30d CM |
811 | ENDPROC(el0_sync) |
812 | ||
813 | .align 6 | |
814 | el0_irq: | |
815 | kernel_entry 0 | |
816 | el0_irq_naked: | |
b282e1ce | 817 | enable_da_f |
60ffc30d CM |
818 | #ifdef CONFIG_TRACE_IRQFLAGS |
819 | bl trace_hardirqs_off | |
820 | #endif | |
64681787 | 821 | |
6c81fe79 | 822 | ct_user_exit |
60ffc30d | 823 | irq_handler |
64681787 | 824 | |
60ffc30d CM |
825 | #ifdef CONFIG_TRACE_IRQFLAGS |
826 | bl trace_hardirqs_on | |
827 | #endif | |
828 | b ret_to_user | |
829 | ENDPROC(el0_irq) | |
830 | ||
a92d4d14 XX |
831 | el1_error: |
832 | kernel_entry 1 | |
833 | mrs x1, esr_el1 | |
834 | enable_dbg | |
835 | mov x0, sp | |
836 | bl do_serror | |
837 | kernel_exit 1 | |
838 | ENDPROC(el1_error) | |
839 | ||
840 | el0_error: | |
841 | kernel_entry 0 | |
842 | el0_error_naked: | |
843 | mrs x1, esr_el1 | |
844 | enable_dbg | |
845 | mov x0, sp | |
846 | bl do_serror | |
847 | enable_daif | |
848 | ct_user_exit | |
849 | b ret_to_user | |
850 | ENDPROC(el0_error) | |
851 | ||
852 | ||
60ffc30d CM |
853 | /* |
854 | * This is the fast syscall return path. We do as little as possible here, | |
855 | * and this includes saving x0 back into the kernel stack. | |
856 | */ | |
857 | ret_fast_syscall: | |
8d66772e | 858 | disable_daif |
412fcb6c | 859 | str x0, [sp, #S_X0] // returned x0 |
c02433dd | 860 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing |
04d7e098 JS |
861 | and x2, x1, #_TIF_SYSCALL_WORK |
862 | cbnz x2, ret_fast_syscall_trace | |
60ffc30d | 863 | and x2, x1, #_TIF_WORK_MASK |
412fcb6c | 864 | cbnz x2, work_pending |
2a283070 | 865 | enable_step_tsk x1, x2 |
412fcb6c | 866 | kernel_exit 0 |
04d7e098 | 867 | ret_fast_syscall_trace: |
8d66772e | 868 | enable_daif |
412fcb6c | 869 | b __sys_trace_return_skipped // we already saved x0 |
60ffc30d CM |
870 | |
871 | /* | |
872 | * Ok, we need to do extra processing, enter the slow path. | |
873 | */ | |
60ffc30d | 874 | work_pending: |
60ffc30d | 875 | mov x0, sp // 'regs' |
60ffc30d | 876 | bl do_notify_resume |
db3899a6 | 877 | #ifdef CONFIG_TRACE_IRQFLAGS |
421dd6fa | 878 | bl trace_hardirqs_on // enabled while in userspace |
db3899a6 | 879 | #endif |
c02433dd | 880 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step |
421dd6fa | 881 | b finish_ret_to_user |
60ffc30d CM |
882 | /* |
883 | * "slow" syscall return path. | |
884 | */ | |
59dc67b0 | 885 | ret_to_user: |
8d66772e | 886 | disable_daif |
c02433dd | 887 | ldr x1, [tsk, #TSK_TI_FLAGS] |
60ffc30d CM |
888 | and x2, x1, #_TIF_WORK_MASK |
889 | cbnz x2, work_pending | |
421dd6fa | 890 | finish_ret_to_user: |
2a283070 | 891 | enable_step_tsk x1, x2 |
412fcb6c | 892 | kernel_exit 0 |
60ffc30d CM |
893 | ENDPROC(ret_to_user) |
894 | ||
60ffc30d CM |
895 | /* |
896 | * SVC handler. | |
897 | */ | |
898 | .align 6 | |
899 | el0_svc: | |
bc0ee476 | 900 | ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags |
60ffc30d | 901 | adrp stbl, sys_call_table // load syscall table pointer |
35d0e6fb DM |
902 | mov wscno, w8 // syscall number in w8 |
903 | mov wsc_nr, #__NR_syscalls | |
bc0ee476 | 904 | |
43994d82 DM |
905 | #ifdef CONFIG_ARM64_SVE |
906 | alternative_if_not ARM64_SVE | |
bc0ee476 | 907 | b el0_svc_naked |
43994d82 | 908 | alternative_else_nop_endif |
bc0ee476 DM |
909 | tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set: |
910 | bic x16, x16, #_TIF_SVE // discard SVE state | |
911 | str x16, [tsk, #TSK_TI_FLAGS] | |
912 | ||
913 | /* | |
914 | * task_fpsimd_load() won't be called to update CPACR_EL1 in | |
915 | * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only | |
916 | * happens if a context switch or kernel_neon_begin() or context | |
917 | * modification (sigreturn, ptrace) intervenes. | |
918 | * So, ensure that CPACR_EL1 is already correct for the fast-path case: | |
919 | */ | |
920 | mrs x9, cpacr_el1 | |
921 | bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0 | |
922 | msr cpacr_el1, x9 // synchronised by eret to el0 | |
43994d82 | 923 | #endif |
bc0ee476 | 924 | |
60ffc30d | 925 | el0_svc_naked: // compat entry point |
35d0e6fb | 926 | stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number |
746647c7 | 927 | enable_daif |
6c81fe79 | 928 | ct_user_exit 1 |
60ffc30d | 929 | |
bc0ee476 | 930 | tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks |
449f81a4 | 931 | b.ne __sys_trace |
35d0e6fb | 932 | cmp wscno, wsc_nr // check upper syscall limit |
60ffc30d | 933 | b.hs ni_sys |
35d0e6fb | 934 | ldr x16, [stbl, xscno, lsl #3] // address in the syscall table |
d54e81f9 WD |
935 | blr x16 // call sys_* routine |
936 | b ret_fast_syscall | |
60ffc30d CM |
937 | ni_sys: |
938 | mov x0, sp | |
d54e81f9 WD |
939 | bl do_ni_syscall |
940 | b ret_fast_syscall | |
60ffc30d CM |
941 | ENDPROC(el0_svc) |
942 | ||
943 | /* | |
944 | * This is the really slow path. We're going to be doing context | |
945 | * switches, and waiting for our parent to respond. | |
946 | */ | |
947 | __sys_trace: | |
17c28958 | 948 | cmp wscno, #NO_SYSCALL // user-issued syscall(-1)? |
1014c81d | 949 | b.ne 1f |
35d0e6fb | 950 | mov x0, #-ENOSYS // set default errno if so |
1014c81d AT |
951 | str x0, [sp, #S_X0] |
952 | 1: mov x0, sp | |
3157858f | 953 | bl syscall_trace_enter |
17c28958 | 954 | cmp w0, #NO_SYSCALL // skip the syscall? |
1014c81d | 955 | b.eq __sys_trace_return_skipped |
35d0e6fb | 956 | mov wscno, w0 // syscall number (possibly new) |
60ffc30d | 957 | mov x1, sp // pointer to regs |
35d0e6fb | 958 | cmp wscno, wsc_nr // check upper syscall limit |
d54e81f9 | 959 | b.hs __ni_sys_trace |
60ffc30d CM |
960 | ldp x0, x1, [sp] // restore the syscall args |
961 | ldp x2, x3, [sp, #S_X2] | |
962 | ldp x4, x5, [sp, #S_X4] | |
963 | ldp x6, x7, [sp, #S_X6] | |
35d0e6fb | 964 | ldr x16, [stbl, xscno, lsl #3] // address in the syscall table |
d54e81f9 | 965 | blr x16 // call sys_* routine |
60ffc30d CM |
966 | |
967 | __sys_trace_return: | |
1014c81d AT |
968 | str x0, [sp, #S_X0] // save returned x0 |
969 | __sys_trace_return_skipped: | |
3157858f AT |
970 | mov x0, sp |
971 | bl syscall_trace_exit | |
60ffc30d CM |
972 | b ret_to_user |
973 | ||
d54e81f9 WD |
974 | __ni_sys_trace: |
975 | mov x0, sp | |
976 | bl do_ni_syscall | |
977 | b __sys_trace_return | |
978 | ||
888b3c87 PA |
979 | .popsection // .entry.text |
980 | ||
d57a98a9 WD |
981 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
982 | /* | |
983 | * Exception vectors trampoline. | |
984 | */ | |
985 | .pushsection ".entry.tramp.text", "ax" | |
986 | ||
987 | .macro tramp_map_kernel, tmp | |
988 | mrs \tmp, ttbr1_el1 | |
989 | sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) | |
990 | bic \tmp, \tmp, #USER_ASID_FLAG | |
991 | msr ttbr1_el1, \tmp | |
992 | .endm | |
993 | ||
994 | .macro tramp_unmap_kernel, tmp | |
995 | mrs \tmp, ttbr1_el1 | |
996 | add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) | |
997 | orr \tmp, \tmp, #USER_ASID_FLAG | |
998 | msr ttbr1_el1, \tmp | |
999 | /* | |
1000 | * We avoid running the post_ttbr_update_workaround here because the | |
1001 | * user and kernel ASIDs don't have conflicting mappings, so any | |
1002 | * "blessing" as described in: | |
1003 | * | |
1004 | * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com | |
1005 | * | |
1006 | * will not hurt correctness. Whilst this may partially defeat the | |
1007 | * point of using split ASIDs in the first place, it avoids | |
1008 | * the hit of invalidating the entire I-cache on every return to | |
1009 | * userspace. | |
1010 | */ | |
1011 | .endm | |
1012 | ||
1013 | .macro tramp_ventry, regsize = 64 | |
1014 | .align 7 | |
1015 | 1: | |
1016 | .if \regsize == 64 | |
1017 | msr tpidrro_el0, x30 // Restored in kernel_ventry | |
1018 | .endif | |
1019 | tramp_map_kernel x30 | |
1020 | ldr x30, =vectors | |
1021 | prfm plil1strm, [x30, #(1b - tramp_vectors)] | |
1022 | msr vbar_el1, x30 | |
1023 | add x30, x30, #(1b - tramp_vectors) | |
1024 | isb | |
1025 | br x30 | |
1026 | .endm | |
1027 | ||
1028 | .macro tramp_exit, regsize = 64 | |
1029 | adr x30, tramp_vectors | |
1030 | msr vbar_el1, x30 | |
1031 | tramp_unmap_kernel x30 | |
1032 | .if \regsize == 64 | |
1033 | mrs x30, far_el1 | |
1034 | .endif | |
1035 | eret | |
1036 | .endm | |
1037 | ||
1038 | .align 11 | |
1039 | ENTRY(tramp_vectors) | |
1040 | .space 0x400 | |
1041 | ||
1042 | tramp_ventry | |
1043 | tramp_ventry | |
1044 | tramp_ventry | |
1045 | tramp_ventry | |
1046 | ||
1047 | tramp_ventry 32 | |
1048 | tramp_ventry 32 | |
1049 | tramp_ventry 32 | |
1050 | tramp_ventry 32 | |
1051 | END(tramp_vectors) | |
1052 | ||
1053 | ENTRY(tramp_exit_native) | |
1054 | tramp_exit | |
1055 | END(tramp_exit_native) | |
1056 | ||
1057 | ENTRY(tramp_exit_compat) | |
1058 | tramp_exit 32 | |
1059 | END(tramp_exit_compat) | |
1060 | ||
1061 | .ltorg | |
1062 | .popsection // .entry.tramp.text | |
1063 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | |
1064 | ||
60ffc30d CM |
1065 | /* |
1066 | * Special system call wrappers. | |
1067 | */ | |
60ffc30d CM |
1068 | ENTRY(sys_rt_sigreturn_wrapper) |
1069 | mov x0, sp | |
1070 | b sys_rt_sigreturn | |
1071 | ENDPROC(sys_rt_sigreturn_wrapper) | |
ed84b4e9 MR |
1072 | |
1073 | /* | |
1074 | * Register switch for AArch64. The callee-saved registers need to be saved | |
1075 | * and restored. On entry: | |
1076 | * x0 = previous task_struct (must be preserved across the switch) | |
1077 | * x1 = next task_struct | |
1078 | * Previous and next are guaranteed not to be the same. | |
1079 | * | |
1080 | */ | |
1081 | ENTRY(cpu_switch_to) | |
1082 | mov x10, #THREAD_CPU_CONTEXT | |
1083 | add x8, x0, x10 | |
1084 | mov x9, sp | |
1085 | stp x19, x20, [x8], #16 // store callee-saved registers | |
1086 | stp x21, x22, [x8], #16 | |
1087 | stp x23, x24, [x8], #16 | |
1088 | stp x25, x26, [x8], #16 | |
1089 | stp x27, x28, [x8], #16 | |
1090 | stp x29, x9, [x8], #16 | |
1091 | str lr, [x8] | |
1092 | add x8, x1, x10 | |
1093 | ldp x19, x20, [x8], #16 // restore callee-saved registers | |
1094 | ldp x21, x22, [x8], #16 | |
1095 | ldp x23, x24, [x8], #16 | |
1096 | ldp x25, x26, [x8], #16 | |
1097 | ldp x27, x28, [x8], #16 | |
1098 | ldp x29, x9, [x8], #16 | |
1099 | ldr lr, [x8] | |
1100 | mov sp, x9 | |
1101 | msr sp_el0, x1 | |
1102 | ret | |
1103 | ENDPROC(cpu_switch_to) | |
1104 | NOKPROBE(cpu_switch_to) | |
1105 | ||
1106 | /* | |
1107 | * This is how we return from a fork. | |
1108 | */ | |
1109 | ENTRY(ret_from_fork) | |
1110 | bl schedule_tail | |
1111 | cbz x19, 1f // not a kernel thread | |
1112 | mov x0, x20 | |
1113 | blr x19 | |
1114 | 1: get_thread_info tsk | |
1115 | b ret_to_user | |
1116 | ENDPROC(ret_from_fork) | |
1117 | NOKPROBE(ret_from_fork) |