]>
Commit | Line | Data |
---|---|---|
60ffc30d CM |
1 | /* |
2 | * Low-level exception handling code | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * Will Deacon <will.deacon@arm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <linux/init.h> | |
22 | #include <linux/linkage.h> | |
23 | ||
8d883b23 | 24 | #include <asm/alternative.h> |
60ffc30d CM |
25 | #include <asm/assembler.h> |
26 | #include <asm/asm-offsets.h> | |
905e8c5d | 27 | #include <asm/cpufeature.h> |
60ffc30d | 28 | #include <asm/errno.h> |
5c1ce6f7 | 29 | #include <asm/esr.h> |
8e23dacd | 30 | #include <asm/irq.h> |
e19a6ee2 | 31 | #include <asm/memory.h> |
60ffc30d CM |
32 | #include <asm/thread_info.h> |
33 | #include <asm/unistd.h> | |
34 | ||
6c81fe79 LB |
35 | /* |
36 | * Context tracking subsystem. Used to instrument transitions | |
37 | * between user and kernel mode. | |
38 | */ | |
39 | .macro ct_user_exit, syscall = 0 | |
40 | #ifdef CONFIG_CONTEXT_TRACKING | |
41 | bl context_tracking_user_exit | |
42 | .if \syscall == 1 | |
43 | /* | |
44 | * Save/restore needed during syscalls. Restore syscall arguments from | |
45 | * the values already saved on stack during kernel_entry. | |
46 | */ | |
47 | ldp x0, x1, [sp] | |
48 | ldp x2, x3, [sp, #S_X2] | |
49 | ldp x4, x5, [sp, #S_X4] | |
50 | ldp x6, x7, [sp, #S_X6] | |
51 | .endif | |
52 | #endif | |
53 | .endm | |
54 | ||
55 | .macro ct_user_enter | |
56 | #ifdef CONFIG_CONTEXT_TRACKING | |
57 | bl context_tracking_user_enter | |
58 | #endif | |
59 | .endm | |
60 | ||
60ffc30d CM |
61 | /* |
62 | * Bad Abort numbers | |
63 | *----------------- | |
64 | */ | |
65 | #define BAD_SYNC 0 | |
66 | #define BAD_IRQ 1 | |
67 | #define BAD_FIQ 2 | |
68 | #define BAD_ERROR 3 | |
69 | ||
70 | .macro kernel_entry, el, regsize = 64 | |
63648dd2 | 71 | sub sp, sp, #S_FRAME_SIZE |
60ffc30d CM |
72 | .if \regsize == 32 |
73 | mov w0, w0 // zero upper 32 bits of x0 | |
74 | .endif | |
63648dd2 WD |
75 | stp x0, x1, [sp, #16 * 0] |
76 | stp x2, x3, [sp, #16 * 1] | |
77 | stp x4, x5, [sp, #16 * 2] | |
78 | stp x6, x7, [sp, #16 * 3] | |
79 | stp x8, x9, [sp, #16 * 4] | |
80 | stp x10, x11, [sp, #16 * 5] | |
81 | stp x12, x13, [sp, #16 * 6] | |
82 | stp x14, x15, [sp, #16 * 7] | |
83 | stp x16, x17, [sp, #16 * 8] | |
84 | stp x18, x19, [sp, #16 * 9] | |
85 | stp x20, x21, [sp, #16 * 10] | |
86 | stp x22, x23, [sp, #16 * 11] | |
87 | stp x24, x25, [sp, #16 * 12] | |
88 | stp x26, x27, [sp, #16 * 13] | |
89 | stp x28, x29, [sp, #16 * 14] | |
90 | ||
60ffc30d CM |
91 | .if \el == 0 |
92 | mrs x21, sp_el0 | |
6cdf9c7c JL |
93 | mov tsk, sp |
94 | and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear, | |
2a283070 WD |
95 | ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug |
96 | disable_step_tsk x19, x20 // exceptions when scheduling. | |
49003a8d JM |
97 | |
98 | mov x29, xzr // fp pointed to user-space | |
60ffc30d CM |
99 | .else |
100 | add x21, sp, #S_FRAME_SIZE | |
e19a6ee2 JM |
101 | get_thread_info tsk |
102 | /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ | |
103 | ldr x20, [tsk, #TI_ADDR_LIMIT] | |
104 | str x20, [sp, #S_ORIG_ADDR_LIMIT] | |
105 | mov x20, #TASK_SIZE_64 | |
106 | str x20, [tsk, #TI_ADDR_LIMIT] | |
107 | ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO) | |
108 | .endif /* \el == 0 */ | |
60ffc30d CM |
109 | mrs x22, elr_el1 |
110 | mrs x23, spsr_el1 | |
111 | stp lr, x21, [sp, #S_LR] | |
112 | stp x22, x23, [sp, #S_PC] | |
113 | ||
114 | /* | |
115 | * Set syscallno to -1 by default (overridden later if real syscall). | |
116 | */ | |
117 | .if \el == 0 | |
118 | mvn x21, xzr | |
119 | str x21, [sp, #S_SYSCALLNO] | |
120 | .endif | |
121 | ||
6cdf9c7c JL |
122 | /* |
123 | * Set sp_el0 to current thread_info. | |
124 | */ | |
125 | .if \el == 0 | |
126 | msr sp_el0, tsk | |
127 | .endif | |
128 | ||
60ffc30d CM |
129 | /* |
130 | * Registers that may be useful after this macro is invoked: | |
131 | * | |
132 | * x21 - aborted SP | |
133 | * x22 - aborted PC | |
134 | * x23 - aborted PSTATE | |
135 | */ | |
136 | .endm | |
137 | ||
412fcb6c | 138 | .macro kernel_exit, el |
e19a6ee2 JM |
139 | .if \el != 0 |
140 | /* Restore the task's original addr_limit. */ | |
141 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] | |
142 | str x20, [tsk, #TI_ADDR_LIMIT] | |
143 | ||
144 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ | |
145 | .endif | |
146 | ||
60ffc30d CM |
147 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
148 | .if \el == 0 | |
6c81fe79 | 149 | ct_user_enter |
60ffc30d | 150 | ldr x23, [sp, #S_SP] // load return stack pointer |
63648dd2 | 151 | msr sp_el0, x23 |
905e8c5d | 152 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
e28cabf1 DT |
153 | alternative_if_not ARM64_WORKAROUND_845719 |
154 | nop | |
155 | nop | |
905e8c5d | 156 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
e28cabf1 DT |
157 | nop |
158 | #endif | |
159 | alternative_else | |
160 | tbz x22, #4, 1f | |
161 | #ifdef CONFIG_PID_IN_CONTEXTIDR | |
162 | mrs x29, contextidr_el1 | |
163 | msr contextidr_el1, x29 | |
905e8c5d | 164 | #else |
e28cabf1 | 165 | msr contextidr_el1, xzr |
905e8c5d | 166 | #endif |
e28cabf1 DT |
167 | 1: |
168 | alternative_endif | |
905e8c5d | 169 | #endif |
60ffc30d | 170 | .endif |
63648dd2 WD |
171 | msr elr_el1, x21 // set up the return data |
172 | msr spsr_el1, x22 | |
63648dd2 | 173 | ldp x0, x1, [sp, #16 * 0] |
63648dd2 WD |
174 | ldp x2, x3, [sp, #16 * 1] |
175 | ldp x4, x5, [sp, #16 * 2] | |
176 | ldp x6, x7, [sp, #16 * 3] | |
177 | ldp x8, x9, [sp, #16 * 4] | |
178 | ldp x10, x11, [sp, #16 * 5] | |
179 | ldp x12, x13, [sp, #16 * 6] | |
180 | ldp x14, x15, [sp, #16 * 7] | |
181 | ldp x16, x17, [sp, #16 * 8] | |
182 | ldp x18, x19, [sp, #16 * 9] | |
183 | ldp x20, x21, [sp, #16 * 10] | |
184 | ldp x22, x23, [sp, #16 * 11] | |
185 | ldp x24, x25, [sp, #16 * 12] | |
186 | ldp x26, x27, [sp, #16 * 13] | |
187 | ldp x28, x29, [sp, #16 * 14] | |
188 | ldr lr, [sp, #S_LR] | |
189 | add sp, sp, #S_FRAME_SIZE // restore sp | |
60ffc30d CM |
190 | eret // return to kernel |
191 | .endm | |
192 | ||
193 | .macro get_thread_info, rd | |
6cdf9c7c | 194 | mrs \rd, sp_el0 |
60ffc30d CM |
195 | .endm |
196 | ||
971c67ce | 197 | .macro irq_stack_entry |
8e23dacd JM |
198 | mov x19, sp // preserve the original sp |
199 | ||
8e23dacd | 200 | /* |
d224a69e JM |
201 | * Compare sp with the current thread_info, if the top |
202 | * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and | |
203 | * should switch to the irq stack. | |
8e23dacd | 204 | */ |
d224a69e JM |
205 | and x25, x19, #~(THREAD_SIZE - 1) |
206 | cmp x25, tsk | |
207 | b.ne 9998f | |
8e23dacd | 208 | |
d224a69e | 209 | this_cpu_ptr irq_stack, x25, x26 |
8e23dacd JM |
210 | mov x26, #IRQ_STACK_START_SP |
211 | add x26, x25, x26 | |
d224a69e JM |
212 | |
213 | /* switch to the irq stack */ | |
8e23dacd JM |
214 | mov sp, x26 |
215 | ||
971c67ce JM |
216 | /* |
217 | * Add a dummy stack frame, this non-standard format is fixed up | |
218 | * by unwind_frame() | |
219 | */ | |
220 | stp x29, x19, [sp, #-16]! | |
8e23dacd | 221 | mov x29, sp |
8e23dacd JM |
222 | |
223 | 9998: | |
224 | .endm | |
225 | ||
226 | /* | |
227 | * x19 should be preserved between irq_stack_entry and | |
228 | * irq_stack_exit. | |
229 | */ | |
230 | .macro irq_stack_exit | |
231 | mov sp, x19 | |
232 | .endm | |
233 | ||
60ffc30d CM |
234 | /* |
235 | * These are the registers used in the syscall handler, and allow us to | |
236 | * have in theory up to 7 arguments to a function - x0 to x6. | |
237 | * | |
238 | * x7 is reserved for the system call number in 32-bit mode. | |
239 | */ | |
240 | sc_nr .req x25 // number of system calls | |
241 | scno .req x26 // syscall number | |
242 | stbl .req x27 // syscall table pointer | |
243 | tsk .req x28 // current thread_info | |
244 | ||
245 | /* | |
246 | * Interrupt handling. | |
247 | */ | |
248 | .macro irq_handler | |
8e23dacd | 249 | ldr_l x1, handle_arch_irq |
60ffc30d | 250 | mov x0, sp |
971c67ce | 251 | irq_stack_entry |
60ffc30d | 252 | blr x1 |
8e23dacd | 253 | irq_stack_exit |
60ffc30d CM |
254 | .endm |
255 | ||
256 | .text | |
257 | ||
258 | /* | |
259 | * Exception vectors. | |
260 | */ | |
888b3c87 | 261 | .pushsection ".entry.text", "ax" |
60ffc30d CM |
262 | |
263 | .align 11 | |
264 | ENTRY(vectors) | |
265 | ventry el1_sync_invalid // Synchronous EL1t | |
266 | ventry el1_irq_invalid // IRQ EL1t | |
267 | ventry el1_fiq_invalid // FIQ EL1t | |
268 | ventry el1_error_invalid // Error EL1t | |
269 | ||
270 | ventry el1_sync // Synchronous EL1h | |
271 | ventry el1_irq // IRQ EL1h | |
272 | ventry el1_fiq_invalid // FIQ EL1h | |
273 | ventry el1_error_invalid // Error EL1h | |
274 | ||
275 | ventry el0_sync // Synchronous 64-bit EL0 | |
276 | ventry el0_irq // IRQ 64-bit EL0 | |
277 | ventry el0_fiq_invalid // FIQ 64-bit EL0 | |
278 | ventry el0_error_invalid // Error 64-bit EL0 | |
279 | ||
280 | #ifdef CONFIG_COMPAT | |
281 | ventry el0_sync_compat // Synchronous 32-bit EL0 | |
282 | ventry el0_irq_compat // IRQ 32-bit EL0 | |
283 | ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 | |
284 | ventry el0_error_invalid_compat // Error 32-bit EL0 | |
285 | #else | |
286 | ventry el0_sync_invalid // Synchronous 32-bit EL0 | |
287 | ventry el0_irq_invalid // IRQ 32-bit EL0 | |
288 | ventry el0_fiq_invalid // FIQ 32-bit EL0 | |
289 | ventry el0_error_invalid // Error 32-bit EL0 | |
290 | #endif | |
291 | END(vectors) | |
292 | ||
293 | /* | |
294 | * Invalid mode handlers | |
295 | */ | |
296 | .macro inv_entry, el, reason, regsize = 64 | |
b660950c | 297 | kernel_entry \el, \regsize |
60ffc30d CM |
298 | mov x0, sp |
299 | mov x1, #\reason | |
300 | mrs x2, esr_el1 | |
301 | b bad_mode | |
302 | .endm | |
303 | ||
304 | el0_sync_invalid: | |
305 | inv_entry 0, BAD_SYNC | |
306 | ENDPROC(el0_sync_invalid) | |
307 | ||
308 | el0_irq_invalid: | |
309 | inv_entry 0, BAD_IRQ | |
310 | ENDPROC(el0_irq_invalid) | |
311 | ||
312 | el0_fiq_invalid: | |
313 | inv_entry 0, BAD_FIQ | |
314 | ENDPROC(el0_fiq_invalid) | |
315 | ||
316 | el0_error_invalid: | |
317 | inv_entry 0, BAD_ERROR | |
318 | ENDPROC(el0_error_invalid) | |
319 | ||
320 | #ifdef CONFIG_COMPAT | |
321 | el0_fiq_invalid_compat: | |
322 | inv_entry 0, BAD_FIQ, 32 | |
323 | ENDPROC(el0_fiq_invalid_compat) | |
324 | ||
325 | el0_error_invalid_compat: | |
326 | inv_entry 0, BAD_ERROR, 32 | |
327 | ENDPROC(el0_error_invalid_compat) | |
328 | #endif | |
329 | ||
330 | el1_sync_invalid: | |
331 | inv_entry 1, BAD_SYNC | |
332 | ENDPROC(el1_sync_invalid) | |
333 | ||
334 | el1_irq_invalid: | |
335 | inv_entry 1, BAD_IRQ | |
336 | ENDPROC(el1_irq_invalid) | |
337 | ||
338 | el1_fiq_invalid: | |
339 | inv_entry 1, BAD_FIQ | |
340 | ENDPROC(el1_fiq_invalid) | |
341 | ||
342 | el1_error_invalid: | |
343 | inv_entry 1, BAD_ERROR | |
344 | ENDPROC(el1_error_invalid) | |
345 | ||
346 | /* | |
347 | * EL1 mode handlers. | |
348 | */ | |
349 | .align 6 | |
350 | el1_sync: | |
351 | kernel_entry 1 | |
352 | mrs x1, esr_el1 // read the syndrome register | |
aed40e01 MR |
353 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
354 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | |
60ffc30d | 355 | b.eq el1_da |
aed40e01 | 356 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
60ffc30d | 357 | b.eq el1_undef |
aed40e01 | 358 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
60ffc30d | 359 | b.eq el1_sp_pc |
aed40e01 | 360 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
60ffc30d | 361 | b.eq el1_sp_pc |
aed40e01 | 362 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 |
60ffc30d | 363 | b.eq el1_undef |
aed40e01 | 364 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
60ffc30d CM |
365 | b.ge el1_dbg |
366 | b el1_inv | |
367 | el1_da: | |
368 | /* | |
369 | * Data abort handling | |
370 | */ | |
371 | mrs x0, far_el1 | |
2a283070 | 372 | enable_dbg |
60ffc30d CM |
373 | // re-enable interrupts if they were enabled in the aborted context |
374 | tbnz x23, #7, 1f // PSR_I_BIT | |
375 | enable_irq | |
376 | 1: | |
377 | mov x2, sp // struct pt_regs | |
378 | bl do_mem_abort | |
379 | ||
380 | // disable interrupts before pulling preserved data off the stack | |
381 | disable_irq | |
382 | kernel_exit 1 | |
383 | el1_sp_pc: | |
384 | /* | |
385 | * Stack or PC alignment exception handling | |
386 | */ | |
387 | mrs x0, far_el1 | |
2a283070 | 388 | enable_dbg |
60ffc30d CM |
389 | mov x2, sp |
390 | b do_sp_pc_abort | |
391 | el1_undef: | |
392 | /* | |
393 | * Undefined instruction | |
394 | */ | |
2a283070 | 395 | enable_dbg |
60ffc30d CM |
396 | mov x0, sp |
397 | b do_undefinstr | |
398 | el1_dbg: | |
399 | /* | |
400 | * Debug exception handling | |
401 | */ | |
aed40e01 | 402 | cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 |
ee6214ce | 403 | cinc x24, x24, eq // set bit '0' |
60ffc30d CM |
404 | tbz x24, #0, el1_inv // EL1 only |
405 | mrs x0, far_el1 | |
406 | mov x2, sp // struct pt_regs | |
407 | bl do_debug_exception | |
60ffc30d CM |
408 | kernel_exit 1 |
409 | el1_inv: | |
410 | // TODO: add support for undefined instructions in kernel mode | |
2a283070 | 411 | enable_dbg |
60ffc30d | 412 | mov x0, sp |
1b42804d | 413 | mov x2, x1 |
60ffc30d | 414 | mov x1, #BAD_SYNC |
60ffc30d CM |
415 | b bad_mode |
416 | ENDPROC(el1_sync) | |
417 | ||
418 | .align 6 | |
419 | el1_irq: | |
420 | kernel_entry 1 | |
2a283070 | 421 | enable_dbg |
60ffc30d CM |
422 | #ifdef CONFIG_TRACE_IRQFLAGS |
423 | bl trace_hardirqs_off | |
424 | #endif | |
64681787 | 425 | |
60ffc30d | 426 | irq_handler |
64681787 | 427 | |
60ffc30d | 428 | #ifdef CONFIG_PREEMPT |
883c0573 | 429 | ldr w24, [tsk, #TI_PREEMPT] // get preempt count |
717321fc | 430 | cbnz w24, 1f // preempt count != 0 |
60ffc30d CM |
431 | ldr x0, [tsk, #TI_FLAGS] // get flags |
432 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? | |
433 | bl el1_preempt | |
434 | 1: | |
435 | #endif | |
436 | #ifdef CONFIG_TRACE_IRQFLAGS | |
437 | bl trace_hardirqs_on | |
438 | #endif | |
439 | kernel_exit 1 | |
440 | ENDPROC(el1_irq) | |
441 | ||
442 | #ifdef CONFIG_PREEMPT | |
443 | el1_preempt: | |
444 | mov x24, lr | |
2a283070 | 445 | 1: bl preempt_schedule_irq // irq en/disable is done inside |
60ffc30d CM |
446 | ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS |
447 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? | |
448 | ret x24 | |
449 | #endif | |
450 | ||
451 | /* | |
452 | * EL0 mode handlers. | |
453 | */ | |
454 | .align 6 | |
455 | el0_sync: | |
456 | kernel_entry 0 | |
457 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
458 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
459 | cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state | |
60ffc30d | 460 | b.eq el0_svc |
aed40e01 | 461 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 462 | b.eq el0_da |
aed40e01 | 463 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 464 | b.eq el0_ia |
aed40e01 | 465 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 466 | b.eq el0_fpsimd_acc |
aed40e01 | 467 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception |
60ffc30d | 468 | b.eq el0_fpsimd_exc |
aed40e01 | 469 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
7dd01aef | 470 | b.eq el0_sys |
aed40e01 | 471 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
60ffc30d | 472 | b.eq el0_sp_pc |
aed40e01 | 473 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
60ffc30d | 474 | b.eq el0_sp_pc |
aed40e01 | 475 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 476 | b.eq el0_undef |
aed40e01 | 477 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
478 | b.ge el0_dbg |
479 | b el0_inv | |
480 | ||
481 | #ifdef CONFIG_COMPAT | |
482 | .align 6 | |
483 | el0_sync_compat: | |
484 | kernel_entry 0, 32 | |
485 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
486 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
487 | cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state | |
60ffc30d | 488 | b.eq el0_svc_compat |
aed40e01 | 489 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 490 | b.eq el0_da |
aed40e01 | 491 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 492 | b.eq el0_ia |
aed40e01 | 493 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 494 | b.eq el0_fpsimd_acc |
aed40e01 | 495 | cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception |
60ffc30d | 496 | b.eq el0_fpsimd_exc |
77f3228f MS |
497 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
498 | b.eq el0_sp_pc | |
aed40e01 | 499 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 500 | b.eq el0_undef |
aed40e01 | 501 | cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap |
381cc2b9 | 502 | b.eq el0_undef |
aed40e01 | 503 | cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap |
381cc2b9 | 504 | b.eq el0_undef |
aed40e01 | 505 | cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap |
381cc2b9 | 506 | b.eq el0_undef |
aed40e01 | 507 | cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap |
381cc2b9 | 508 | b.eq el0_undef |
aed40e01 | 509 | cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap |
381cc2b9 | 510 | b.eq el0_undef |
aed40e01 | 511 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
512 | b.ge el0_dbg |
513 | b el0_inv | |
514 | el0_svc_compat: | |
515 | /* | |
516 | * AArch32 syscall handling | |
517 | */ | |
0156411b | 518 | adrp stbl, compat_sys_call_table // load compat syscall table pointer |
60ffc30d CM |
519 | uxtw scno, w7 // syscall number in w7 (r7) |
520 | mov sc_nr, #__NR_compat_syscalls | |
521 | b el0_svc_naked | |
522 | ||
523 | .align 6 | |
524 | el0_irq_compat: | |
525 | kernel_entry 0, 32 | |
526 | b el0_irq_naked | |
527 | #endif | |
528 | ||
529 | el0_da: | |
530 | /* | |
531 | * Data abort handling | |
532 | */ | |
6ab6463a | 533 | mrs x26, far_el1 |
60ffc30d | 534 | // enable interrupts before calling the main handler |
2a283070 | 535 | enable_dbg_and_irq |
6c81fe79 | 536 | ct_user_exit |
6ab6463a | 537 | bic x0, x26, #(0xff << 56) |
60ffc30d CM |
538 | mov x1, x25 |
539 | mov x2, sp | |
d54e81f9 WD |
540 | bl do_mem_abort |
541 | b ret_to_user | |
60ffc30d CM |
542 | el0_ia: |
543 | /* | |
544 | * Instruction abort handling | |
545 | */ | |
6ab6463a | 546 | mrs x26, far_el1 |
60ffc30d | 547 | // enable interrupts before calling the main handler |
2a283070 | 548 | enable_dbg_and_irq |
6c81fe79 | 549 | ct_user_exit |
6ab6463a | 550 | mov x0, x26 |
541ec870 | 551 | mov x1, x25 |
60ffc30d | 552 | mov x2, sp |
d54e81f9 WD |
553 | bl do_mem_abort |
554 | b ret_to_user | |
60ffc30d CM |
555 | el0_fpsimd_acc: |
556 | /* | |
557 | * Floating Point or Advanced SIMD access | |
558 | */ | |
2a283070 | 559 | enable_dbg |
6c81fe79 | 560 | ct_user_exit |
60ffc30d CM |
561 | mov x0, x25 |
562 | mov x1, sp | |
d54e81f9 WD |
563 | bl do_fpsimd_acc |
564 | b ret_to_user | |
60ffc30d CM |
565 | el0_fpsimd_exc: |
566 | /* | |
567 | * Floating Point or Advanced SIMD exception | |
568 | */ | |
2a283070 | 569 | enable_dbg |
6c81fe79 | 570 | ct_user_exit |
60ffc30d CM |
571 | mov x0, x25 |
572 | mov x1, sp | |
d54e81f9 WD |
573 | bl do_fpsimd_exc |
574 | b ret_to_user | |
60ffc30d CM |
575 | el0_sp_pc: |
576 | /* | |
577 | * Stack or PC alignment exception handling | |
578 | */ | |
6ab6463a | 579 | mrs x26, far_el1 |
60ffc30d | 580 | // enable interrupts before calling the main handler |
2a283070 | 581 | enable_dbg_and_irq |
46b0567c | 582 | ct_user_exit |
6ab6463a | 583 | mov x0, x26 |
60ffc30d CM |
584 | mov x1, x25 |
585 | mov x2, sp | |
d54e81f9 WD |
586 | bl do_sp_pc_abort |
587 | b ret_to_user | |
60ffc30d CM |
588 | el0_undef: |
589 | /* | |
590 | * Undefined instruction | |
591 | */ | |
2600e130 | 592 | // enable interrupts before calling the main handler |
2a283070 | 593 | enable_dbg_and_irq |
6c81fe79 | 594 | ct_user_exit |
2a283070 | 595 | mov x0, sp |
d54e81f9 WD |
596 | bl do_undefinstr |
597 | b ret_to_user | |
7dd01aef AP |
598 | el0_sys: |
599 | /* | |
600 | * System instructions, for trapped cache maintenance instructions | |
601 | */ | |
602 | enable_dbg_and_irq | |
603 | ct_user_exit | |
604 | mov x0, x25 | |
605 | mov x1, sp | |
606 | bl do_sysinstr | |
607 | b ret_to_user | |
60ffc30d CM |
608 | el0_dbg: |
609 | /* | |
610 | * Debug exception handling | |
611 | */ | |
612 | tbnz x24, #0, el0_inv // EL0 only | |
613 | mrs x0, far_el1 | |
60ffc30d CM |
614 | mov x1, x25 |
615 | mov x2, sp | |
2a283070 WD |
616 | bl do_debug_exception |
617 | enable_dbg | |
6c81fe79 | 618 | ct_user_exit |
2a283070 | 619 | b ret_to_user |
60ffc30d | 620 | el0_inv: |
2a283070 | 621 | enable_dbg |
6c81fe79 | 622 | ct_user_exit |
60ffc30d CM |
623 | mov x0, sp |
624 | mov x1, #BAD_SYNC | |
1b42804d | 625 | mov x2, x25 |
d54e81f9 WD |
626 | bl bad_mode |
627 | b ret_to_user | |
60ffc30d CM |
628 | ENDPROC(el0_sync) |
629 | ||
630 | .align 6 | |
631 | el0_irq: | |
632 | kernel_entry 0 | |
633 | el0_irq_naked: | |
60ffc30d CM |
634 | enable_dbg |
635 | #ifdef CONFIG_TRACE_IRQFLAGS | |
636 | bl trace_hardirqs_off | |
637 | #endif | |
64681787 | 638 | |
6c81fe79 | 639 | ct_user_exit |
60ffc30d | 640 | irq_handler |
64681787 | 641 | |
60ffc30d CM |
642 | #ifdef CONFIG_TRACE_IRQFLAGS |
643 | bl trace_hardirqs_on | |
644 | #endif | |
645 | b ret_to_user | |
646 | ENDPROC(el0_irq) | |
647 | ||
60ffc30d CM |
648 | /* |
649 | * Register switch for AArch64. The callee-saved registers need to be saved | |
650 | * and restored. On entry: | |
651 | * x0 = previous task_struct (must be preserved across the switch) | |
652 | * x1 = next task_struct | |
653 | * Previous and next are guaranteed not to be the same. | |
654 | * | |
655 | */ | |
656 | ENTRY(cpu_switch_to) | |
c0d3fce5 WD |
657 | mov x10, #THREAD_CPU_CONTEXT |
658 | add x8, x0, x10 | |
60ffc30d CM |
659 | mov x9, sp |
660 | stp x19, x20, [x8], #16 // store callee-saved registers | |
661 | stp x21, x22, [x8], #16 | |
662 | stp x23, x24, [x8], #16 | |
663 | stp x25, x26, [x8], #16 | |
664 | stp x27, x28, [x8], #16 | |
665 | stp x29, x9, [x8], #16 | |
666 | str lr, [x8] | |
c0d3fce5 | 667 | add x8, x1, x10 |
60ffc30d CM |
668 | ldp x19, x20, [x8], #16 // restore callee-saved registers |
669 | ldp x21, x22, [x8], #16 | |
670 | ldp x23, x24, [x8], #16 | |
671 | ldp x25, x26, [x8], #16 | |
672 | ldp x27, x28, [x8], #16 | |
673 | ldp x29, x9, [x8], #16 | |
674 | ldr lr, [x8] | |
675 | mov sp, x9 | |
6cdf9c7c JL |
676 | and x9, x9, #~(THREAD_SIZE - 1) |
677 | msr sp_el0, x9 | |
60ffc30d CM |
678 | ret |
679 | ENDPROC(cpu_switch_to) | |
680 | ||
681 | /* | |
682 | * This is the fast syscall return path. We do as little as possible here, | |
683 | * and this includes saving x0 back into the kernel stack. | |
684 | */ | |
685 | ret_fast_syscall: | |
686 | disable_irq // disable interrupts | |
412fcb6c | 687 | str x0, [sp, #S_X0] // returned x0 |
04d7e098 JS |
688 | ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing |
689 | and x2, x1, #_TIF_SYSCALL_WORK | |
690 | cbnz x2, ret_fast_syscall_trace | |
60ffc30d | 691 | and x2, x1, #_TIF_WORK_MASK |
412fcb6c | 692 | cbnz x2, work_pending |
2a283070 | 693 | enable_step_tsk x1, x2 |
412fcb6c | 694 | kernel_exit 0 |
04d7e098 JS |
695 | ret_fast_syscall_trace: |
696 | enable_irq // enable interrupts | |
412fcb6c | 697 | b __sys_trace_return_skipped // we already saved x0 |
60ffc30d CM |
698 | |
699 | /* | |
700 | * Ok, we need to do extra processing, enter the slow path. | |
701 | */ | |
60ffc30d CM |
702 | work_pending: |
703 | tbnz x1, #TIF_NEED_RESCHED, work_resched | |
005f78cd | 704 | /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ |
60ffc30d | 705 | mov x0, sp // 'regs' |
6916fd08 | 706 | enable_irq // enable interrupts for do_notify_resume() |
60ffc30d CM |
707 | bl do_notify_resume |
708 | b ret_to_user | |
709 | work_resched: | |
db3899a6 CM |
710 | #ifdef CONFIG_TRACE_IRQFLAGS |
711 | bl trace_hardirqs_off // the IRQs are off here, inform the tracing code | |
712 | #endif | |
60ffc30d CM |
713 | bl schedule |
714 | ||
715 | /* | |
716 | * "slow" syscall return path. | |
717 | */ | |
59dc67b0 | 718 | ret_to_user: |
60ffc30d CM |
719 | disable_irq // disable interrupts |
720 | ldr x1, [tsk, #TI_FLAGS] | |
721 | and x2, x1, #_TIF_WORK_MASK | |
722 | cbnz x2, work_pending | |
2a283070 | 723 | enable_step_tsk x1, x2 |
412fcb6c | 724 | kernel_exit 0 |
60ffc30d CM |
725 | ENDPROC(ret_to_user) |
726 | ||
727 | /* | |
728 | * This is how we return from a fork. | |
729 | */ | |
730 | ENTRY(ret_from_fork) | |
731 | bl schedule_tail | |
c34501d2 CM |
732 | cbz x19, 1f // not a kernel thread |
733 | mov x0, x20 | |
734 | blr x19 | |
735 | 1: get_thread_info tsk | |
60ffc30d CM |
736 | b ret_to_user |
737 | ENDPROC(ret_from_fork) | |
738 | ||
739 | /* | |
740 | * SVC handler. | |
741 | */ | |
742 | .align 6 | |
743 | el0_svc: | |
744 | adrp stbl, sys_call_table // load syscall table pointer | |
745 | uxtw scno, w8 // syscall number in w8 | |
746 | mov sc_nr, #__NR_syscalls | |
747 | el0_svc_naked: // compat entry point | |
748 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | |
2a283070 | 749 | enable_dbg_and_irq |
6c81fe79 | 750 | ct_user_exit 1 |
60ffc30d | 751 | |
449f81a4 AT |
752 | ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks |
753 | tst x16, #_TIF_SYSCALL_WORK | |
754 | b.ne __sys_trace | |
60ffc30d CM |
755 | cmp scno, sc_nr // check upper syscall limit |
756 | b.hs ni_sys | |
757 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | |
d54e81f9 WD |
758 | blr x16 // call sys_* routine |
759 | b ret_fast_syscall | |
60ffc30d CM |
760 | ni_sys: |
761 | mov x0, sp | |
d54e81f9 WD |
762 | bl do_ni_syscall |
763 | b ret_fast_syscall | |
60ffc30d CM |
764 | ENDPROC(el0_svc) |
765 | ||
766 | /* | |
767 | * This is the really slow path. We're going to be doing context | |
768 | * switches, and waiting for our parent to respond. | |
769 | */ | |
770 | __sys_trace: | |
1014c81d AT |
771 | mov w0, #-1 // set default errno for |
772 | cmp scno, x0 // user-issued syscall(-1) | |
773 | b.ne 1f | |
774 | mov x0, #-ENOSYS | |
775 | str x0, [sp, #S_X0] | |
776 | 1: mov x0, sp | |
3157858f | 777 | bl syscall_trace_enter |
1014c81d AT |
778 | cmp w0, #-1 // skip the syscall? |
779 | b.eq __sys_trace_return_skipped | |
60ffc30d CM |
780 | uxtw scno, w0 // syscall number (possibly new) |
781 | mov x1, sp // pointer to regs | |
782 | cmp scno, sc_nr // check upper syscall limit | |
d54e81f9 | 783 | b.hs __ni_sys_trace |
60ffc30d CM |
784 | ldp x0, x1, [sp] // restore the syscall args |
785 | ldp x2, x3, [sp, #S_X2] | |
786 | ldp x4, x5, [sp, #S_X4] | |
787 | ldp x6, x7, [sp, #S_X6] | |
788 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | |
d54e81f9 | 789 | blr x16 // call sys_* routine |
60ffc30d CM |
790 | |
791 | __sys_trace_return: | |
1014c81d AT |
792 | str x0, [sp, #S_X0] // save returned x0 |
793 | __sys_trace_return_skipped: | |
3157858f AT |
794 | mov x0, sp |
795 | bl syscall_trace_exit | |
60ffc30d CM |
796 | b ret_to_user |
797 | ||
d54e81f9 WD |
798 | __ni_sys_trace: |
799 | mov x0, sp | |
800 | bl do_ni_syscall | |
801 | b __sys_trace_return | |
802 | ||
888b3c87 PA |
803 | .popsection // .entry.text |
804 | ||
60ffc30d CM |
805 | /* |
806 | * Special system call wrappers. | |
807 | */ | |
60ffc30d CM |
808 | ENTRY(sys_rt_sigreturn_wrapper) |
809 | mov x0, sp | |
810 | b sys_rt_sigreturn | |
811 | ENDPROC(sys_rt_sigreturn_wrapper) |