]>
Commit | Line | Data |
---|---|---|
60ffc30d CM |
1 | /* |
2 | * Low-level exception handling code | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * Will Deacon <will.deacon@arm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <linux/init.h> | |
22 | #include <linux/linkage.h> | |
23 | ||
8d883b23 | 24 | #include <asm/alternative.h> |
60ffc30d CM |
25 | #include <asm/assembler.h> |
26 | #include <asm/asm-offsets.h> | |
905e8c5d | 27 | #include <asm/cpufeature.h> |
60ffc30d | 28 | #include <asm/errno.h> |
5c1ce6f7 | 29 | #include <asm/esr.h> |
8e23dacd | 30 | #include <asm/irq.h> |
e19a6ee2 | 31 | #include <asm/memory.h> |
39bc88e5 | 32 | #include <asm/ptrace.h> |
60ffc30d | 33 | #include <asm/thread_info.h> |
b4b8664d | 34 | #include <asm/asm-uaccess.h> |
60ffc30d CM |
35 | #include <asm/unistd.h> |
36 | ||
6c81fe79 LB |
37 | /* |
38 | * Context tracking subsystem. Used to instrument transitions | |
39 | * between user and kernel mode. | |
40 | */ | |
41 | .macro ct_user_exit, syscall = 0 | |
42 | #ifdef CONFIG_CONTEXT_TRACKING | |
43 | bl context_tracking_user_exit | |
44 | .if \syscall == 1 | |
45 | /* | |
46 | * Save/restore needed during syscalls. Restore syscall arguments from | |
47 | * the values already saved on stack during kernel_entry. | |
48 | */ | |
49 | ldp x0, x1, [sp] | |
50 | ldp x2, x3, [sp, #S_X2] | |
51 | ldp x4, x5, [sp, #S_X4] | |
52 | ldp x6, x7, [sp, #S_X6] | |
53 | .endif | |
54 | #endif | |
55 | .endm | |
56 | ||
57 | .macro ct_user_enter | |
58 | #ifdef CONFIG_CONTEXT_TRACKING | |
59 | bl context_tracking_user_enter | |
60 | #endif | |
61 | .endm | |
62 | ||
60ffc30d CM |
63 | /* |
64 | * Bad Abort numbers | |
65 | *----------------- | |
66 | */ | |
67 | #define BAD_SYNC 0 | |
68 | #define BAD_IRQ 1 | |
69 | #define BAD_FIQ 2 | |
70 | #define BAD_ERROR 3 | |
71 | ||
72 | .macro kernel_entry, el, regsize = 64 | |
63648dd2 | 73 | sub sp, sp, #S_FRAME_SIZE |
60ffc30d CM |
74 | .if \regsize == 32 |
75 | mov w0, w0 // zero upper 32 bits of x0 | |
76 | .endif | |
63648dd2 WD |
77 | stp x0, x1, [sp, #16 * 0] |
78 | stp x2, x3, [sp, #16 * 1] | |
79 | stp x4, x5, [sp, #16 * 2] | |
80 | stp x6, x7, [sp, #16 * 3] | |
81 | stp x8, x9, [sp, #16 * 4] | |
82 | stp x10, x11, [sp, #16 * 5] | |
83 | stp x12, x13, [sp, #16 * 6] | |
84 | stp x14, x15, [sp, #16 * 7] | |
85 | stp x16, x17, [sp, #16 * 8] | |
86 | stp x18, x19, [sp, #16 * 9] | |
87 | stp x20, x21, [sp, #16 * 10] | |
88 | stp x22, x23, [sp, #16 * 11] | |
89 | stp x24, x25, [sp, #16 * 12] | |
90 | stp x26, x27, [sp, #16 * 13] | |
91 | stp x28, x29, [sp, #16 * 14] | |
92 | ||
60ffc30d CM |
93 | .if \el == 0 |
94 | mrs x21, sp_el0 | |
c02433dd MR |
95 | ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, |
96 | ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug | |
2a283070 | 97 | disable_step_tsk x19, x20 // exceptions when scheduling. |
49003a8d JM |
98 | |
99 | mov x29, xzr // fp pointed to user-space | |
60ffc30d CM |
100 | .else |
101 | add x21, sp, #S_FRAME_SIZE | |
e19a6ee2 JM |
102 | get_thread_info tsk |
103 | /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ | |
c02433dd | 104 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] |
e19a6ee2 JM |
105 | str x20, [sp, #S_ORIG_ADDR_LIMIT] |
106 | mov x20, #TASK_SIZE_64 | |
c02433dd | 107 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
563cada0 | 108 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ |
e19a6ee2 | 109 | .endif /* \el == 0 */ |
60ffc30d CM |
110 | mrs x22, elr_el1 |
111 | mrs x23, spsr_el1 | |
112 | stp lr, x21, [sp, #S_LR] | |
39bc88e5 | 113 | |
73267498 AB |
114 | /* |
115 | * In order to be able to dump the contents of struct pt_regs at the | |
116 | * time the exception was taken (in case we attempt to walk the call | |
117 | * stack later), chain it together with the stack frames. | |
118 | */ | |
119 | .if \el == 0 | |
120 | stp xzr, xzr, [sp, #S_STACKFRAME] | |
121 | .else | |
122 | stp x29, x22, [sp, #S_STACKFRAME] | |
123 | .endif | |
124 | add x29, sp, #S_STACKFRAME | |
125 | ||
39bc88e5 CM |
126 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
127 | /* | |
128 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | |
129 | * EL0, there is no need to check the state of TTBR0_EL1 since | |
130 | * accesses are always enabled. | |
131 | * Note that the meaning of this bit differs from the ARMv8.1 PAN | |
132 | * feature as all TTBR0_EL1 accesses are disabled, not just those to | |
133 | * user mappings. | |
134 | */ | |
135 | alternative_if ARM64_HAS_PAN | |
136 | b 1f // skip TTBR0 PAN | |
137 | alternative_else_nop_endif | |
138 | ||
139 | .if \el != 0 | |
140 | mrs x21, ttbr0_el1 | |
141 | tst x21, #0xffff << 48 // Check for the reserved ASID | |
142 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR | |
143 | b.eq 1f // TTBR0 access already disabled | |
144 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | |
145 | .endif | |
146 | ||
147 | __uaccess_ttbr0_disable x21 | |
148 | 1: | |
149 | #endif | |
150 | ||
60ffc30d CM |
151 | stp x22, x23, [sp, #S_PC] |
152 | ||
153 | /* | |
154 | * Set syscallno to -1 by default (overridden later if real syscall). | |
155 | */ | |
156 | .if \el == 0 | |
157 | mvn x21, xzr | |
158 | str x21, [sp, #S_SYSCALLNO] | |
159 | .endif | |
160 | ||
6cdf9c7c JL |
161 | /* |
162 | * Set sp_el0 to current thread_info. | |
163 | */ | |
164 | .if \el == 0 | |
165 | msr sp_el0, tsk | |
166 | .endif | |
167 | ||
60ffc30d CM |
168 | /* |
169 | * Registers that may be useful after this macro is invoked: | |
170 | * | |
171 | * x21 - aborted SP | |
172 | * x22 - aborted PC | |
173 | * x23 - aborted PSTATE | |
174 | */ | |
175 | .endm | |
176 | ||
412fcb6c | 177 | .macro kernel_exit, el |
e19a6ee2 JM |
178 | .if \el != 0 |
179 | /* Restore the task's original addr_limit. */ | |
180 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] | |
c02433dd | 181 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
e19a6ee2 JM |
182 | |
183 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ | |
184 | .endif | |
185 | ||
60ffc30d CM |
186 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
187 | .if \el == 0 | |
6c81fe79 | 188 | ct_user_enter |
39bc88e5 CM |
189 | .endif |
190 | ||
191 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
192 | /* | |
193 | * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR | |
194 | * PAN bit checking. | |
195 | */ | |
196 | alternative_if ARM64_HAS_PAN | |
197 | b 2f // skip TTBR0 PAN | |
198 | alternative_else_nop_endif | |
199 | ||
200 | .if \el != 0 | |
201 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | |
202 | .endif | |
203 | ||
204 | __uaccess_ttbr0_enable x0 | |
205 | ||
206 | .if \el == 0 | |
207 | /* | |
208 | * Enable errata workarounds only if returning to user. The only | |
209 | * workaround currently required for TTBR0_EL1 changes are for the | |
210 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | |
211 | * corruption). | |
212 | */ | |
213 | post_ttbr0_update_workaround | |
214 | .endif | |
215 | 1: | |
216 | .if \el != 0 | |
217 | and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit | |
218 | .endif | |
219 | 2: | |
220 | #endif | |
221 | ||
222 | .if \el == 0 | |
60ffc30d | 223 | ldr x23, [sp, #S_SP] // load return stack pointer |
63648dd2 | 224 | msr sp_el0, x23 |
905e8c5d | 225 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
6ba3b554 | 226 | alternative_if ARM64_WORKAROUND_845719 |
e28cabf1 DT |
227 | tbz x22, #4, 1f |
228 | #ifdef CONFIG_PID_IN_CONTEXTIDR | |
229 | mrs x29, contextidr_el1 | |
230 | msr contextidr_el1, x29 | |
905e8c5d | 231 | #else |
e28cabf1 | 232 | msr contextidr_el1, xzr |
905e8c5d | 233 | #endif |
e28cabf1 | 234 | 1: |
6ba3b554 | 235 | alternative_else_nop_endif |
905e8c5d | 236 | #endif |
60ffc30d | 237 | .endif |
39bc88e5 | 238 | |
63648dd2 WD |
239 | msr elr_el1, x21 // set up the return data |
240 | msr spsr_el1, x22 | |
63648dd2 | 241 | ldp x0, x1, [sp, #16 * 0] |
63648dd2 WD |
242 | ldp x2, x3, [sp, #16 * 1] |
243 | ldp x4, x5, [sp, #16 * 2] | |
244 | ldp x6, x7, [sp, #16 * 3] | |
245 | ldp x8, x9, [sp, #16 * 4] | |
246 | ldp x10, x11, [sp, #16 * 5] | |
247 | ldp x12, x13, [sp, #16 * 6] | |
248 | ldp x14, x15, [sp, #16 * 7] | |
249 | ldp x16, x17, [sp, #16 * 8] | |
250 | ldp x18, x19, [sp, #16 * 9] | |
251 | ldp x20, x21, [sp, #16 * 10] | |
252 | ldp x22, x23, [sp, #16 * 11] | |
253 | ldp x24, x25, [sp, #16 * 12] | |
254 | ldp x26, x27, [sp, #16 * 13] | |
255 | ldp x28, x29, [sp, #16 * 14] | |
256 | ldr lr, [sp, #S_LR] | |
257 | add sp, sp, #S_FRAME_SIZE // restore sp | |
60ffc30d CM |
258 | eret // return to kernel |
259 | .endm | |
260 | ||
971c67ce | 261 | .macro irq_stack_entry |
8e23dacd JM |
262 | mov x19, sp // preserve the original sp |
263 | ||
8e23dacd | 264 | /* |
c02433dd MR |
265 | * Compare sp with the base of the task stack. |
266 | * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, | |
267 | * and should switch to the irq stack. | |
8e23dacd | 268 | */ |
c02433dd MR |
269 | ldr x25, [tsk, TSK_STACK] |
270 | eor x25, x25, x19 | |
271 | and x25, x25, #~(THREAD_SIZE - 1) | |
272 | cbnz x25, 9998f | |
8e23dacd | 273 | |
1b7e2296 | 274 | adr_this_cpu x25, irq_stack, x26 |
8e23dacd JM |
275 | mov x26, #IRQ_STACK_START_SP |
276 | add x26, x25, x26 | |
d224a69e JM |
277 | |
278 | /* switch to the irq stack */ | |
8e23dacd | 279 | mov sp, x26 |
8e23dacd JM |
280 | 9998: |
281 | .endm | |
282 | ||
283 | /* | |
284 | * x19 should be preserved between irq_stack_entry and | |
285 | * irq_stack_exit. | |
286 | */ | |
287 | .macro irq_stack_exit | |
288 | mov sp, x19 | |
289 | .endm | |
290 | ||
60ffc30d CM |
291 | /* |
292 | * These are the registers used in the syscall handler, and allow us to | |
293 | * have in theory up to 7 arguments to a function - x0 to x6. | |
294 | * | |
295 | * x7 is reserved for the system call number in 32-bit mode. | |
296 | */ | |
297 | sc_nr .req x25 // number of system calls | |
298 | scno .req x26 // syscall number | |
299 | stbl .req x27 // syscall table pointer | |
300 | tsk .req x28 // current thread_info | |
301 | ||
302 | /* | |
303 | * Interrupt handling. | |
304 | */ | |
305 | .macro irq_handler | |
8e23dacd | 306 | ldr_l x1, handle_arch_irq |
60ffc30d | 307 | mov x0, sp |
971c67ce | 308 | irq_stack_entry |
60ffc30d | 309 | blr x1 |
8e23dacd | 310 | irq_stack_exit |
60ffc30d CM |
311 | .endm |
312 | ||
313 | .text | |
314 | ||
315 | /* | |
316 | * Exception vectors. | |
317 | */ | |
888b3c87 | 318 | .pushsection ".entry.text", "ax" |
60ffc30d CM |
319 | |
320 | .align 11 | |
321 | ENTRY(vectors) | |
322 | ventry el1_sync_invalid // Synchronous EL1t | |
323 | ventry el1_irq_invalid // IRQ EL1t | |
324 | ventry el1_fiq_invalid // FIQ EL1t | |
325 | ventry el1_error_invalid // Error EL1t | |
326 | ||
327 | ventry el1_sync // Synchronous EL1h | |
328 | ventry el1_irq // IRQ EL1h | |
329 | ventry el1_fiq_invalid // FIQ EL1h | |
330 | ventry el1_error_invalid // Error EL1h | |
331 | ||
332 | ventry el0_sync // Synchronous 64-bit EL0 | |
333 | ventry el0_irq // IRQ 64-bit EL0 | |
334 | ventry el0_fiq_invalid // FIQ 64-bit EL0 | |
335 | ventry el0_error_invalid // Error 64-bit EL0 | |
336 | ||
337 | #ifdef CONFIG_COMPAT | |
338 | ventry el0_sync_compat // Synchronous 32-bit EL0 | |
339 | ventry el0_irq_compat // IRQ 32-bit EL0 | |
340 | ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 | |
341 | ventry el0_error_invalid_compat // Error 32-bit EL0 | |
342 | #else | |
343 | ventry el0_sync_invalid // Synchronous 32-bit EL0 | |
344 | ventry el0_irq_invalid // IRQ 32-bit EL0 | |
345 | ventry el0_fiq_invalid // FIQ 32-bit EL0 | |
346 | ventry el0_error_invalid // Error 32-bit EL0 | |
347 | #endif | |
348 | END(vectors) | |
349 | ||
350 | /* | |
351 | * Invalid mode handlers | |
352 | */ | |
353 | .macro inv_entry, el, reason, regsize = 64 | |
b660950c | 354 | kernel_entry \el, \regsize |
60ffc30d CM |
355 | mov x0, sp |
356 | mov x1, #\reason | |
357 | mrs x2, esr_el1 | |
2d0e751a MR |
358 | bl bad_mode |
359 | ASM_BUG() | |
60ffc30d CM |
360 | .endm |
361 | ||
362 | el0_sync_invalid: | |
363 | inv_entry 0, BAD_SYNC | |
364 | ENDPROC(el0_sync_invalid) | |
365 | ||
366 | el0_irq_invalid: | |
367 | inv_entry 0, BAD_IRQ | |
368 | ENDPROC(el0_irq_invalid) | |
369 | ||
370 | el0_fiq_invalid: | |
371 | inv_entry 0, BAD_FIQ | |
372 | ENDPROC(el0_fiq_invalid) | |
373 | ||
374 | el0_error_invalid: | |
375 | inv_entry 0, BAD_ERROR | |
376 | ENDPROC(el0_error_invalid) | |
377 | ||
378 | #ifdef CONFIG_COMPAT | |
379 | el0_fiq_invalid_compat: | |
380 | inv_entry 0, BAD_FIQ, 32 | |
381 | ENDPROC(el0_fiq_invalid_compat) | |
382 | ||
383 | el0_error_invalid_compat: | |
384 | inv_entry 0, BAD_ERROR, 32 | |
385 | ENDPROC(el0_error_invalid_compat) | |
386 | #endif | |
387 | ||
388 | el1_sync_invalid: | |
389 | inv_entry 1, BAD_SYNC | |
390 | ENDPROC(el1_sync_invalid) | |
391 | ||
392 | el1_irq_invalid: | |
393 | inv_entry 1, BAD_IRQ | |
394 | ENDPROC(el1_irq_invalid) | |
395 | ||
396 | el1_fiq_invalid: | |
397 | inv_entry 1, BAD_FIQ | |
398 | ENDPROC(el1_fiq_invalid) | |
399 | ||
400 | el1_error_invalid: | |
401 | inv_entry 1, BAD_ERROR | |
402 | ENDPROC(el1_error_invalid) | |
403 | ||
404 | /* | |
405 | * EL1 mode handlers. | |
406 | */ | |
407 | .align 6 | |
408 | el1_sync: | |
409 | kernel_entry 1 | |
410 | mrs x1, esr_el1 // read the syndrome register | |
aed40e01 MR |
411 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
412 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | |
60ffc30d | 413 | b.eq el1_da |
9adeb8e7 LA |
414 | cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 |
415 | b.eq el1_ia | |
aed40e01 | 416 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
60ffc30d | 417 | b.eq el1_undef |
aed40e01 | 418 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
60ffc30d | 419 | b.eq el1_sp_pc |
aed40e01 | 420 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
60ffc30d | 421 | b.eq el1_sp_pc |
aed40e01 | 422 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 |
60ffc30d | 423 | b.eq el1_undef |
aed40e01 | 424 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
60ffc30d CM |
425 | b.ge el1_dbg |
426 | b el1_inv | |
9adeb8e7 LA |
427 | |
428 | el1_ia: | |
429 | /* | |
430 | * Fall through to the Data abort case | |
431 | */ | |
60ffc30d CM |
432 | el1_da: |
433 | /* | |
434 | * Data abort handling | |
435 | */ | |
276e9327 | 436 | mrs x3, far_el1 |
2a283070 | 437 | enable_dbg |
60ffc30d CM |
438 | // re-enable interrupts if they were enabled in the aborted context |
439 | tbnz x23, #7, 1f // PSR_I_BIT | |
440 | enable_irq | |
441 | 1: | |
276e9327 | 442 | clear_address_tag x0, x3 |
60ffc30d CM |
443 | mov x2, sp // struct pt_regs |
444 | bl do_mem_abort | |
445 | ||
446 | // disable interrupts before pulling preserved data off the stack | |
447 | disable_irq | |
448 | kernel_exit 1 | |
449 | el1_sp_pc: | |
450 | /* | |
451 | * Stack or PC alignment exception handling | |
452 | */ | |
453 | mrs x0, far_el1 | |
2a283070 | 454 | enable_dbg |
60ffc30d | 455 | mov x2, sp |
2d0e751a MR |
456 | bl do_sp_pc_abort |
457 | ASM_BUG() | |
60ffc30d CM |
458 | el1_undef: |
459 | /* | |
460 | * Undefined instruction | |
461 | */ | |
2a283070 | 462 | enable_dbg |
60ffc30d | 463 | mov x0, sp |
2d0e751a MR |
464 | bl do_undefinstr |
465 | ASM_BUG() | |
60ffc30d CM |
466 | el1_dbg: |
467 | /* | |
468 | * Debug exception handling | |
469 | */ | |
aed40e01 | 470 | cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 |
ee6214ce | 471 | cinc x24, x24, eq // set bit '0' |
60ffc30d CM |
472 | tbz x24, #0, el1_inv // EL1 only |
473 | mrs x0, far_el1 | |
474 | mov x2, sp // struct pt_regs | |
475 | bl do_debug_exception | |
60ffc30d CM |
476 | kernel_exit 1 |
477 | el1_inv: | |
478 | // TODO: add support for undefined instructions in kernel mode | |
2a283070 | 479 | enable_dbg |
60ffc30d | 480 | mov x0, sp |
1b42804d | 481 | mov x2, x1 |
60ffc30d | 482 | mov x1, #BAD_SYNC |
2d0e751a MR |
483 | bl bad_mode |
484 | ASM_BUG() | |
60ffc30d CM |
485 | ENDPROC(el1_sync) |
486 | ||
487 | .align 6 | |
488 | el1_irq: | |
489 | kernel_entry 1 | |
2a283070 | 490 | enable_dbg |
60ffc30d CM |
491 | #ifdef CONFIG_TRACE_IRQFLAGS |
492 | bl trace_hardirqs_off | |
493 | #endif | |
64681787 | 494 | |
60ffc30d | 495 | irq_handler |
64681787 | 496 | |
60ffc30d | 497 | #ifdef CONFIG_PREEMPT |
c02433dd | 498 | ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count |
717321fc | 499 | cbnz w24, 1f // preempt count != 0 |
c02433dd | 500 | ldr x0, [tsk, #TSK_TI_FLAGS] // get flags |
60ffc30d CM |
501 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? |
502 | bl el1_preempt | |
503 | 1: | |
504 | #endif | |
505 | #ifdef CONFIG_TRACE_IRQFLAGS | |
506 | bl trace_hardirqs_on | |
507 | #endif | |
508 | kernel_exit 1 | |
509 | ENDPROC(el1_irq) | |
510 | ||
511 | #ifdef CONFIG_PREEMPT | |
512 | el1_preempt: | |
513 | mov x24, lr | |
2a283070 | 514 | 1: bl preempt_schedule_irq // irq en/disable is done inside |
c02433dd | 515 | ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS |
60ffc30d CM |
516 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? |
517 | ret x24 | |
518 | #endif | |
519 | ||
520 | /* | |
521 | * EL0 mode handlers. | |
522 | */ | |
523 | .align 6 | |
524 | el0_sync: | |
525 | kernel_entry 0 | |
526 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
527 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
528 | cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state | |
60ffc30d | 529 | b.eq el0_svc |
aed40e01 | 530 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 531 | b.eq el0_da |
aed40e01 | 532 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 533 | b.eq el0_ia |
aed40e01 | 534 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 535 | b.eq el0_fpsimd_acc |
aed40e01 | 536 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception |
60ffc30d | 537 | b.eq el0_fpsimd_exc |
aed40e01 | 538 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
7dd01aef | 539 | b.eq el0_sys |
aed40e01 | 540 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
60ffc30d | 541 | b.eq el0_sp_pc |
aed40e01 | 542 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
60ffc30d | 543 | b.eq el0_sp_pc |
aed40e01 | 544 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 545 | b.eq el0_undef |
aed40e01 | 546 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
547 | b.ge el0_dbg |
548 | b el0_inv | |
549 | ||
550 | #ifdef CONFIG_COMPAT | |
551 | .align 6 | |
552 | el0_sync_compat: | |
553 | kernel_entry 0, 32 | |
554 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
555 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
556 | cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state | |
60ffc30d | 557 | b.eq el0_svc_compat |
aed40e01 | 558 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 559 | b.eq el0_da |
aed40e01 | 560 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 561 | b.eq el0_ia |
aed40e01 | 562 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 563 | b.eq el0_fpsimd_acc |
aed40e01 | 564 | cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception |
60ffc30d | 565 | b.eq el0_fpsimd_exc |
77f3228f MS |
566 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
567 | b.eq el0_sp_pc | |
aed40e01 | 568 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 569 | b.eq el0_undef |
aed40e01 | 570 | cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap |
381cc2b9 | 571 | b.eq el0_undef |
aed40e01 | 572 | cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap |
381cc2b9 | 573 | b.eq el0_undef |
aed40e01 | 574 | cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap |
381cc2b9 | 575 | b.eq el0_undef |
aed40e01 | 576 | cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap |
381cc2b9 | 577 | b.eq el0_undef |
aed40e01 | 578 | cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap |
381cc2b9 | 579 | b.eq el0_undef |
aed40e01 | 580 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
581 | b.ge el0_dbg |
582 | b el0_inv | |
583 | el0_svc_compat: | |
584 | /* | |
585 | * AArch32 syscall handling | |
586 | */ | |
0156411b | 587 | adrp stbl, compat_sys_call_table // load compat syscall table pointer |
60ffc30d CM |
588 | uxtw scno, w7 // syscall number in w7 (r7) |
589 | mov sc_nr, #__NR_compat_syscalls | |
590 | b el0_svc_naked | |
591 | ||
592 | .align 6 | |
593 | el0_irq_compat: | |
594 | kernel_entry 0, 32 | |
595 | b el0_irq_naked | |
596 | #endif | |
597 | ||
598 | el0_da: | |
599 | /* | |
600 | * Data abort handling | |
601 | */ | |
6ab6463a | 602 | mrs x26, far_el1 |
60ffc30d | 603 | // enable interrupts before calling the main handler |
2a283070 | 604 | enable_dbg_and_irq |
6c81fe79 | 605 | ct_user_exit |
276e9327 | 606 | clear_address_tag x0, x26 |
60ffc30d CM |
607 | mov x1, x25 |
608 | mov x2, sp | |
d54e81f9 WD |
609 | bl do_mem_abort |
610 | b ret_to_user | |
60ffc30d CM |
611 | el0_ia: |
612 | /* | |
613 | * Instruction abort handling | |
614 | */ | |
6ab6463a | 615 | mrs x26, far_el1 |
60ffc30d | 616 | // enable interrupts before calling the main handler |
2a283070 | 617 | enable_dbg_and_irq |
6c81fe79 | 618 | ct_user_exit |
6ab6463a | 619 | mov x0, x26 |
541ec870 | 620 | mov x1, x25 |
60ffc30d | 621 | mov x2, sp |
d54e81f9 WD |
622 | bl do_mem_abort |
623 | b ret_to_user | |
60ffc30d CM |
624 | el0_fpsimd_acc: |
625 | /* | |
626 | * Floating Point or Advanced SIMD access | |
627 | */ | |
2a283070 | 628 | enable_dbg |
6c81fe79 | 629 | ct_user_exit |
60ffc30d CM |
630 | mov x0, x25 |
631 | mov x1, sp | |
d54e81f9 WD |
632 | bl do_fpsimd_acc |
633 | b ret_to_user | |
60ffc30d CM |
634 | el0_fpsimd_exc: |
635 | /* | |
636 | * Floating Point or Advanced SIMD exception | |
637 | */ | |
2a283070 | 638 | enable_dbg |
6c81fe79 | 639 | ct_user_exit |
60ffc30d CM |
640 | mov x0, x25 |
641 | mov x1, sp | |
d54e81f9 WD |
642 | bl do_fpsimd_exc |
643 | b ret_to_user | |
60ffc30d CM |
644 | el0_sp_pc: |
645 | /* | |
646 | * Stack or PC alignment exception handling | |
647 | */ | |
6ab6463a | 648 | mrs x26, far_el1 |
60ffc30d | 649 | // enable interrupts before calling the main handler |
2a283070 | 650 | enable_dbg_and_irq |
46b0567c | 651 | ct_user_exit |
6ab6463a | 652 | mov x0, x26 |
60ffc30d CM |
653 | mov x1, x25 |
654 | mov x2, sp | |
d54e81f9 WD |
655 | bl do_sp_pc_abort |
656 | b ret_to_user | |
60ffc30d CM |
657 | el0_undef: |
658 | /* | |
659 | * Undefined instruction | |
660 | */ | |
2600e130 | 661 | // enable interrupts before calling the main handler |
2a283070 | 662 | enable_dbg_and_irq |
6c81fe79 | 663 | ct_user_exit |
2a283070 | 664 | mov x0, sp |
d54e81f9 WD |
665 | bl do_undefinstr |
666 | b ret_to_user | |
7dd01aef AP |
667 | el0_sys: |
668 | /* | |
669 | * System instructions, for trapped cache maintenance instructions | |
670 | */ | |
671 | enable_dbg_and_irq | |
672 | ct_user_exit | |
673 | mov x0, x25 | |
674 | mov x1, sp | |
675 | bl do_sysinstr | |
676 | b ret_to_user | |
60ffc30d CM |
677 | el0_dbg: |
678 | /* | |
679 | * Debug exception handling | |
680 | */ | |
681 | tbnz x24, #0, el0_inv // EL0 only | |
682 | mrs x0, far_el1 | |
60ffc30d CM |
683 | mov x1, x25 |
684 | mov x2, sp | |
2a283070 WD |
685 | bl do_debug_exception |
686 | enable_dbg | |
6c81fe79 | 687 | ct_user_exit |
2a283070 | 688 | b ret_to_user |
60ffc30d | 689 | el0_inv: |
2a283070 | 690 | enable_dbg |
6c81fe79 | 691 | ct_user_exit |
60ffc30d CM |
692 | mov x0, sp |
693 | mov x1, #BAD_SYNC | |
1b42804d | 694 | mov x2, x25 |
7d9e8f71 | 695 | bl bad_el0_sync |
d54e81f9 | 696 | b ret_to_user |
60ffc30d CM |
697 | ENDPROC(el0_sync) |
698 | ||
699 | .align 6 | |
700 | el0_irq: | |
701 | kernel_entry 0 | |
702 | el0_irq_naked: | |
60ffc30d CM |
703 | enable_dbg |
704 | #ifdef CONFIG_TRACE_IRQFLAGS | |
705 | bl trace_hardirqs_off | |
706 | #endif | |
64681787 | 707 | |
6c81fe79 | 708 | ct_user_exit |
60ffc30d | 709 | irq_handler |
64681787 | 710 | |
60ffc30d CM |
711 | #ifdef CONFIG_TRACE_IRQFLAGS |
712 | bl trace_hardirqs_on | |
713 | #endif | |
714 | b ret_to_user | |
715 | ENDPROC(el0_irq) | |
716 | ||
60ffc30d CM |
717 | /* |
718 | * This is the fast syscall return path. We do as little as possible here, | |
719 | * and this includes saving x0 back into the kernel stack. | |
720 | */ | |
721 | ret_fast_syscall: | |
722 | disable_irq // disable interrupts | |
412fcb6c | 723 | str x0, [sp, #S_X0] // returned x0 |
c02433dd | 724 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing |
04d7e098 JS |
725 | and x2, x1, #_TIF_SYSCALL_WORK |
726 | cbnz x2, ret_fast_syscall_trace | |
60ffc30d | 727 | and x2, x1, #_TIF_WORK_MASK |
412fcb6c | 728 | cbnz x2, work_pending |
2a283070 | 729 | enable_step_tsk x1, x2 |
412fcb6c | 730 | kernel_exit 0 |
04d7e098 JS |
731 | ret_fast_syscall_trace: |
732 | enable_irq // enable interrupts | |
412fcb6c | 733 | b __sys_trace_return_skipped // we already saved x0 |
60ffc30d CM |
734 | |
735 | /* | |
736 | * Ok, we need to do extra processing, enter the slow path. | |
737 | */ | |
60ffc30d | 738 | work_pending: |
60ffc30d | 739 | mov x0, sp // 'regs' |
60ffc30d | 740 | bl do_notify_resume |
db3899a6 | 741 | #ifdef CONFIG_TRACE_IRQFLAGS |
421dd6fa | 742 | bl trace_hardirqs_on // enabled while in userspace |
db3899a6 | 743 | #endif |
c02433dd | 744 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step |
421dd6fa | 745 | b finish_ret_to_user |
60ffc30d CM |
746 | /* |
747 | * "slow" syscall return path. | |
748 | */ | |
59dc67b0 | 749 | ret_to_user: |
60ffc30d | 750 | disable_irq // disable interrupts |
c02433dd | 751 | ldr x1, [tsk, #TSK_TI_FLAGS] |
60ffc30d CM |
752 | and x2, x1, #_TIF_WORK_MASK |
753 | cbnz x2, work_pending | |
421dd6fa | 754 | finish_ret_to_user: |
2a283070 | 755 | enable_step_tsk x1, x2 |
412fcb6c | 756 | kernel_exit 0 |
60ffc30d CM |
757 | ENDPROC(ret_to_user) |
758 | ||
60ffc30d CM |
759 | /* |
760 | * SVC handler. | |
761 | */ | |
762 | .align 6 | |
763 | el0_svc: | |
764 | adrp stbl, sys_call_table // load syscall table pointer | |
765 | uxtw scno, w8 // syscall number in w8 | |
766 | mov sc_nr, #__NR_syscalls | |
767 | el0_svc_naked: // compat entry point | |
768 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | |
2a283070 | 769 | enable_dbg_and_irq |
6c81fe79 | 770 | ct_user_exit 1 |
60ffc30d | 771 | |
c02433dd | 772 | ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks |
449f81a4 AT |
773 | tst x16, #_TIF_SYSCALL_WORK |
774 | b.ne __sys_trace | |
60ffc30d CM |
775 | cmp scno, sc_nr // check upper syscall limit |
776 | b.hs ni_sys | |
777 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | |
d54e81f9 WD |
778 | blr x16 // call sys_* routine |
779 | b ret_fast_syscall | |
60ffc30d CM |
780 | ni_sys: |
781 | mov x0, sp | |
d54e81f9 WD |
782 | bl do_ni_syscall |
783 | b ret_fast_syscall | |
60ffc30d CM |
784 | ENDPROC(el0_svc) |
785 | ||
786 | /* | |
787 | * This is the really slow path. We're going to be doing context | |
788 | * switches, and waiting for our parent to respond. | |
789 | */ | |
790 | __sys_trace: | |
1014c81d AT |
791 | mov w0, #-1 // set default errno for |
792 | cmp scno, x0 // user-issued syscall(-1) | |
793 | b.ne 1f | |
794 | mov x0, #-ENOSYS | |
795 | str x0, [sp, #S_X0] | |
796 | 1: mov x0, sp | |
3157858f | 797 | bl syscall_trace_enter |
1014c81d AT |
798 | cmp w0, #-1 // skip the syscall? |
799 | b.eq __sys_trace_return_skipped | |
60ffc30d CM |
800 | uxtw scno, w0 // syscall number (possibly new) |
801 | mov x1, sp // pointer to regs | |
802 | cmp scno, sc_nr // check upper syscall limit | |
d54e81f9 | 803 | b.hs __ni_sys_trace |
60ffc30d CM |
804 | ldp x0, x1, [sp] // restore the syscall args |
805 | ldp x2, x3, [sp, #S_X2] | |
806 | ldp x4, x5, [sp, #S_X4] | |
807 | ldp x6, x7, [sp, #S_X6] | |
808 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | |
d54e81f9 | 809 | blr x16 // call sys_* routine |
60ffc30d CM |
810 | |
811 | __sys_trace_return: | |
1014c81d AT |
812 | str x0, [sp, #S_X0] // save returned x0 |
813 | __sys_trace_return_skipped: | |
3157858f AT |
814 | mov x0, sp |
815 | bl syscall_trace_exit | |
60ffc30d CM |
816 | b ret_to_user |
817 | ||
d54e81f9 WD |
818 | __ni_sys_trace: |
819 | mov x0, sp | |
820 | bl do_ni_syscall | |
821 | b __sys_trace_return | |
822 | ||
888b3c87 PA |
823 | .popsection // .entry.text |
824 | ||
60ffc30d CM |
825 | /* |
826 | * Special system call wrappers. | |
827 | */ | |
60ffc30d CM |
828 | ENTRY(sys_rt_sigreturn_wrapper) |
829 | mov x0, sp | |
830 | b sys_rt_sigreturn | |
831 | ENDPROC(sys_rt_sigreturn_wrapper) | |
ed84b4e9 MR |
832 | |
833 | /* | |
834 | * Register switch for AArch64. The callee-saved registers need to be saved | |
835 | * and restored. On entry: | |
836 | * x0 = previous task_struct (must be preserved across the switch) | |
837 | * x1 = next task_struct | |
838 | * Previous and next are guaranteed not to be the same. | |
839 | * | |
840 | */ | |
841 | ENTRY(cpu_switch_to) | |
842 | mov x10, #THREAD_CPU_CONTEXT | |
843 | add x8, x0, x10 | |
844 | mov x9, sp | |
845 | stp x19, x20, [x8], #16 // store callee-saved registers | |
846 | stp x21, x22, [x8], #16 | |
847 | stp x23, x24, [x8], #16 | |
848 | stp x25, x26, [x8], #16 | |
849 | stp x27, x28, [x8], #16 | |
850 | stp x29, x9, [x8], #16 | |
851 | str lr, [x8] | |
852 | add x8, x1, x10 | |
853 | ldp x19, x20, [x8], #16 // restore callee-saved registers | |
854 | ldp x21, x22, [x8], #16 | |
855 | ldp x23, x24, [x8], #16 | |
856 | ldp x25, x26, [x8], #16 | |
857 | ldp x27, x28, [x8], #16 | |
858 | ldp x29, x9, [x8], #16 | |
859 | ldr lr, [x8] | |
860 | mov sp, x9 | |
861 | msr sp_el0, x1 | |
862 | ret | |
863 | ENDPROC(cpu_switch_to) | |
864 | NOKPROBE(cpu_switch_to) | |
865 | ||
866 | /* | |
867 | * This is how we return from a fork. | |
868 | */ | |
869 | ENTRY(ret_from_fork) | |
870 | bl schedule_tail | |
871 | cbz x19, 1f // not a kernel thread | |
872 | mov x0, x20 | |
873 | blr x19 | |
874 | 1: get_thread_info tsk | |
875 | b ret_to_user | |
876 | ENDPROC(ret_from_fork) | |
877 | NOKPROBE(ret_from_fork) |