]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Low-level exception handling code | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * Will Deacon <will.deacon@arm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <linux/init.h> | |
22 | #include <linux/linkage.h> | |
23 | ||
24 | #include <asm/alternative.h> | |
25 | #include <asm/assembler.h> | |
26 | #include <asm/asm-offsets.h> | |
27 | #include <asm/cpufeature.h> | |
28 | #include <asm/errno.h> | |
29 | #include <asm/esr.h> | |
30 | #include <asm/irq.h> | |
31 | #include <asm/memory.h> | |
32 | #include <asm/mmu.h> | |
33 | #include <asm/processor.h> | |
34 | #include <asm/ptrace.h> | |
35 | #include <asm/thread_info.h> | |
36 | #include <asm/asm-uaccess.h> | |
37 | #include <asm/unistd.h> | |
38 | ||
39 | /* | |
40 | * Context tracking subsystem. Used to instrument transitions | |
41 | * between user and kernel mode. | |
42 | */ | |
43 | .macro ct_user_exit, syscall = 0 | |
44 | #ifdef CONFIG_CONTEXT_TRACKING | |
45 | bl context_tracking_user_exit | |
46 | .if \syscall == 1 | |
47 | /* | |
48 | * Save/restore needed during syscalls. Restore syscall arguments from | |
49 | * the values already saved on stack during kernel_entry. | |
50 | */ | |
51 | ldp x0, x1, [sp] | |
52 | ldp x2, x3, [sp, #S_X2] | |
53 | ldp x4, x5, [sp, #S_X4] | |
54 | ldp x6, x7, [sp, #S_X6] | |
55 | .endif | |
56 | #endif | |
57 | .endm | |
58 | ||
59 | .macro ct_user_enter | |
60 | #ifdef CONFIG_CONTEXT_TRACKING | |
61 | bl context_tracking_user_enter | |
62 | #endif | |
63 | .endm | |
64 | ||
65 | /* | |
66 | * Bad Abort numbers | |
67 | *----------------- | |
68 | */ | |
69 | #define BAD_SYNC 0 | |
70 | #define BAD_IRQ 1 | |
71 | #define BAD_FIQ 2 | |
72 | #define BAD_ERROR 3 | |
73 | ||
74 | .macro kernel_ventry, el, label, regsize = 64 | |
75 | .align 7 | |
76 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
77 | alternative_if ARM64_UNMAP_KERNEL_AT_EL0 | |
78 | .if \el == 0 | |
79 | .if \regsize == 64 | |
80 | mrs x30, tpidrro_el0 | |
81 | msr tpidrro_el0, xzr | |
82 | .else | |
83 | mov x30, xzr | |
84 | .endif | |
85 | .endif | |
86 | alternative_else_nop_endif | |
87 | #endif | |
88 | ||
89 | sub sp, sp, #S_FRAME_SIZE | |
90 | #ifdef CONFIG_VMAP_STACK | |
91 | /* | |
92 | * Test whether the SP has overflowed, without corrupting a GPR. | |
93 | * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). | |
94 | */ | |
95 | add sp, sp, x0 // sp' = sp + x0 | |
96 | sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp | |
97 | tbnz x0, #THREAD_SHIFT, 0f | |
98 | sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 | |
99 | sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp | |
100 | b el\()\el\()_\label | |
101 | ||
102 | 0: | |
103 | /* | |
104 | * Either we've just detected an overflow, or we've taken an exception | |
105 | * while on the overflow stack. Either way, we won't return to | |
106 | * userspace, and can clobber EL0 registers to free up GPRs. | |
107 | */ | |
108 | ||
109 | /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */ | |
110 | msr tpidr_el0, x0 | |
111 | ||
112 | /* Recover the original x0 value and stash it in tpidrro_el0 */ | |
113 | sub x0, sp, x0 | |
114 | msr tpidrro_el0, x0 | |
115 | ||
116 | /* Switch to the overflow stack */ | |
117 | adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 | |
118 | ||
119 | /* | |
120 | * Check whether we were already on the overflow stack. This may happen | |
121 | * after panic() re-enables interrupts. | |
122 | */ | |
123 | mrs x0, tpidr_el0 // sp of interrupted context | |
124 | sub x0, sp, x0 // delta with top of overflow stack | |
125 | tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? | |
126 | b.ne __bad_stack // no? -> bad stack pointer | |
127 | ||
128 | /* We were already on the overflow stack. Restore sp/x0 and carry on. */ | |
129 | sub sp, sp, x0 | |
130 | mrs x0, tpidrro_el0 | |
131 | #endif | |
132 | b el\()\el\()_\label | |
133 | .endm | |
134 | ||
135 | .macro tramp_alias, dst, sym | |
136 | mov_q \dst, TRAMP_VALIAS | |
137 | add \dst, \dst, #(\sym - .entry.tramp.text) | |
138 | .endm | |
139 | ||
140 | .macro kernel_entry, el, regsize = 64 | |
141 | .if \regsize == 32 | |
142 | mov w0, w0 // zero upper 32 bits of x0 | |
143 | .endif | |
144 | stp x0, x1, [sp, #16 * 0] | |
145 | stp x2, x3, [sp, #16 * 1] | |
146 | stp x4, x5, [sp, #16 * 2] | |
147 | stp x6, x7, [sp, #16 * 3] | |
148 | stp x8, x9, [sp, #16 * 4] | |
149 | stp x10, x11, [sp, #16 * 5] | |
150 | stp x12, x13, [sp, #16 * 6] | |
151 | stp x14, x15, [sp, #16 * 7] | |
152 | stp x16, x17, [sp, #16 * 8] | |
153 | stp x18, x19, [sp, #16 * 9] | |
154 | stp x20, x21, [sp, #16 * 10] | |
155 | stp x22, x23, [sp, #16 * 11] | |
156 | stp x24, x25, [sp, #16 * 12] | |
157 | stp x26, x27, [sp, #16 * 13] | |
158 | stp x28, x29, [sp, #16 * 14] | |
159 | ||
160 | .if \el == 0 | |
161 | mrs x21, sp_el0 | |
162 | ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, | |
163 | ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug | |
164 | disable_step_tsk x19, x20 // exceptions when scheduling. | |
165 | ||
166 | mov x29, xzr // fp pointed to user-space | |
167 | .else | |
168 | add x21, sp, #S_FRAME_SIZE | |
169 | get_thread_info tsk | |
170 | /* Save the task's original addr_limit and set USER_DS */ | |
171 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] | |
172 | str x20, [sp, #S_ORIG_ADDR_LIMIT] | |
173 | mov x20, #USER_DS | |
174 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] | |
175 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ | |
176 | .endif /* \el == 0 */ | |
177 | mrs x22, elr_el1 | |
178 | mrs x23, spsr_el1 | |
179 | stp lr, x21, [sp, #S_LR] | |
180 | ||
181 | /* | |
182 | * In order to be able to dump the contents of struct pt_regs at the | |
183 | * time the exception was taken (in case we attempt to walk the call | |
184 | * stack later), chain it together with the stack frames. | |
185 | */ | |
186 | .if \el == 0 | |
187 | stp xzr, xzr, [sp, #S_STACKFRAME] | |
188 | .else | |
189 | stp x29, x22, [sp, #S_STACKFRAME] | |
190 | .endif | |
191 | add x29, sp, #S_STACKFRAME | |
192 | ||
193 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
194 | /* | |
195 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | |
196 | * EL0, there is no need to check the state of TTBR0_EL1 since | |
197 | * accesses are always enabled. | |
198 | * Note that the meaning of this bit differs from the ARMv8.1 PAN | |
199 | * feature as all TTBR0_EL1 accesses are disabled, not just those to | |
200 | * user mappings. | |
201 | */ | |
202 | alternative_if ARM64_HAS_PAN | |
203 | b 1f // skip TTBR0 PAN | |
204 | alternative_else_nop_endif | |
205 | ||
206 | .if \el != 0 | |
207 | mrs x21, ttbr0_el1 | |
208 | tst x21, #TTBR_ASID_MASK // Check for the reserved ASID | |
209 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR | |
210 | b.eq 1f // TTBR0 access already disabled | |
211 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | |
212 | .endif | |
213 | ||
214 | __uaccess_ttbr0_disable x21 | |
215 | 1: | |
216 | #endif | |
217 | ||
218 | stp x22, x23, [sp, #S_PC] | |
219 | ||
220 | /* Not in a syscall by default (el0_svc overwrites for real syscall) */ | |
221 | .if \el == 0 | |
222 | mov w21, #NO_SYSCALL | |
223 | str w21, [sp, #S_SYSCALLNO] | |
224 | .endif | |
225 | ||
226 | /* | |
227 | * Set sp_el0 to current thread_info. | |
228 | */ | |
229 | .if \el == 0 | |
230 | msr sp_el0, tsk | |
231 | .endif | |
232 | ||
233 | /* | |
234 | * Registers that may be useful after this macro is invoked: | |
235 | * | |
236 | * x21 - aborted SP | |
237 | * x22 - aborted PC | |
238 | * x23 - aborted PSTATE | |
239 | */ | |
240 | .endm | |
241 | ||
242 | .macro kernel_exit, el | |
243 | .if \el != 0 | |
244 | disable_daif | |
245 | ||
246 | /* Restore the task's original addr_limit. */ | |
247 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] | |
248 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] | |
249 | ||
250 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ | |
251 | .endif | |
252 | ||
253 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR | |
254 | .if \el == 0 | |
255 | ct_user_enter | |
256 | .endif | |
257 | ||
258 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
259 | /* | |
260 | * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR | |
261 | * PAN bit checking. | |
262 | */ | |
263 | alternative_if ARM64_HAS_PAN | |
264 | b 2f // skip TTBR0 PAN | |
265 | alternative_else_nop_endif | |
266 | ||
267 | .if \el != 0 | |
268 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | |
269 | .endif | |
270 | ||
271 | __uaccess_ttbr0_enable x0, x1 | |
272 | ||
273 | .if \el == 0 | |
274 | /* | |
275 | * Enable errata workarounds only if returning to user. The only | |
276 | * workaround currently required for TTBR0_EL1 changes are for the | |
277 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | |
278 | * corruption). | |
279 | */ | |
280 | bl post_ttbr_update_workaround | |
281 | .endif | |
282 | 1: | |
283 | .if \el != 0 | |
284 | and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit | |
285 | .endif | |
286 | 2: | |
287 | #endif | |
288 | ||
289 | .if \el == 0 | |
290 | ldr x23, [sp, #S_SP] // load return stack pointer | |
291 | msr sp_el0, x23 | |
292 | tst x22, #PSR_MODE32_BIT // native task? | |
293 | b.eq 3f | |
294 | ||
295 | #ifdef CONFIG_ARM64_ERRATUM_845719 | |
296 | alternative_if ARM64_WORKAROUND_845719 | |
297 | #ifdef CONFIG_PID_IN_CONTEXTIDR | |
298 | mrs x29, contextidr_el1 | |
299 | msr contextidr_el1, x29 | |
300 | #else | |
301 | msr contextidr_el1, xzr | |
302 | #endif | |
303 | alternative_else_nop_endif | |
304 | #endif | |
305 | 3: | |
306 | .endif | |
307 | ||
308 | msr elr_el1, x21 // set up the return data | |
309 | msr spsr_el1, x22 | |
310 | ldp x0, x1, [sp, #16 * 0] | |
311 | ldp x2, x3, [sp, #16 * 1] | |
312 | ldp x4, x5, [sp, #16 * 2] | |
313 | ldp x6, x7, [sp, #16 * 3] | |
314 | ldp x8, x9, [sp, #16 * 4] | |
315 | ldp x10, x11, [sp, #16 * 5] | |
316 | ldp x12, x13, [sp, #16 * 6] | |
317 | ldp x14, x15, [sp, #16 * 7] | |
318 | ldp x16, x17, [sp, #16 * 8] | |
319 | ldp x18, x19, [sp, #16 * 9] | |
320 | ldp x20, x21, [sp, #16 * 10] | |
321 | ldp x22, x23, [sp, #16 * 11] | |
322 | ldp x24, x25, [sp, #16 * 12] | |
323 | ldp x26, x27, [sp, #16 * 13] | |
324 | ldp x28, x29, [sp, #16 * 14] | |
325 | ldr lr, [sp, #S_LR] | |
326 | add sp, sp, #S_FRAME_SIZE // restore sp | |
327 | ||
328 | .if \el == 0 | |
329 | alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 | |
330 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
331 | bne 4f | |
332 | msr far_el1, x30 | |
333 | tramp_alias x30, tramp_exit_native | |
334 | br x30 | |
335 | 4: | |
336 | tramp_alias x30, tramp_exit_compat | |
337 | br x30 | |
338 | #endif | |
339 | .else | |
340 | eret | |
341 | .endif | |
342 | .endm | |
343 | ||
344 | .macro irq_stack_entry | |
345 | mov x19, sp // preserve the original sp | |
346 | ||
347 | /* | |
348 | * Compare sp with the base of the task stack. | |
349 | * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, | |
350 | * and should switch to the irq stack. | |
351 | */ | |
352 | ldr x25, [tsk, TSK_STACK] | |
353 | eor x25, x25, x19 | |
354 | and x25, x25, #~(THREAD_SIZE - 1) | |
355 | cbnz x25, 9998f | |
356 | ||
357 | ldr_this_cpu x25, irq_stack_ptr, x26 | |
358 | mov x26, #IRQ_STACK_SIZE | |
359 | add x26, x25, x26 | |
360 | ||
361 | /* switch to the irq stack */ | |
362 | mov sp, x26 | |
363 | 9998: | |
364 | .endm | |
365 | ||
366 | /* | |
367 | * x19 should be preserved between irq_stack_entry and | |
368 | * irq_stack_exit. | |
369 | */ | |
370 | .macro irq_stack_exit | |
371 | mov sp, x19 | |
372 | .endm | |
373 | ||
374 | /* | |
375 | * These are the registers used in the syscall handler, and allow us to | |
376 | * have in theory up to 7 arguments to a function - x0 to x6. | |
377 | * | |
378 | * x7 is reserved for the system call number in 32-bit mode. | |
379 | */ | |
380 | wsc_nr .req w25 // number of system calls | |
381 | xsc_nr .req x25 // number of system calls (zero-extended) | |
382 | wscno .req w26 // syscall number | |
383 | xscno .req x26 // syscall number (zero-extended) | |
384 | stbl .req x27 // syscall table pointer | |
385 | tsk .req x28 // current thread_info | |
386 | ||
387 | /* | |
388 | * Interrupt handling. | |
389 | */ | |
390 | .macro irq_handler | |
391 | ldr_l x1, handle_arch_irq | |
392 | mov x0, sp | |
393 | irq_stack_entry | |
394 | blr x1 | |
395 | irq_stack_exit | |
396 | .endm | |
397 | ||
398 | .text | |
399 | ||
400 | /* | |
401 | * Exception vectors. | |
402 | */ | |
403 | .pushsection ".entry.text", "ax" | |
404 | ||
405 | .align 11 | |
406 | ENTRY(vectors) | |
407 | kernel_ventry 1, sync_invalid // Synchronous EL1t | |
408 | kernel_ventry 1, irq_invalid // IRQ EL1t | |
409 | kernel_ventry 1, fiq_invalid // FIQ EL1t | |
410 | kernel_ventry 1, error_invalid // Error EL1t | |
411 | ||
412 | kernel_ventry 1, sync // Synchronous EL1h | |
413 | kernel_ventry 1, irq // IRQ EL1h | |
414 | kernel_ventry 1, fiq_invalid // FIQ EL1h | |
415 | kernel_ventry 1, error // Error EL1h | |
416 | ||
417 | kernel_ventry 0, sync // Synchronous 64-bit EL0 | |
418 | kernel_ventry 0, irq // IRQ 64-bit EL0 | |
419 | kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 | |
420 | kernel_ventry 0, error // Error 64-bit EL0 | |
421 | ||
422 | #ifdef CONFIG_COMPAT | |
423 | kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 | |
424 | kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 | |
425 | kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 | |
426 | kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 | |
427 | #else | |
428 | kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 | |
429 | kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 | |
430 | kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 | |
431 | kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 | |
432 | #endif | |
433 | END(vectors) | |
434 | ||
435 | #ifdef CONFIG_VMAP_STACK | |
436 | /* | |
437 | * We detected an overflow in kernel_ventry, which switched to the | |
438 | * overflow stack. Stash the exception regs, and head to our overflow | |
439 | * handler. | |
440 | */ | |
441 | __bad_stack: | |
442 | /* Restore the original x0 value */ | |
443 | mrs x0, tpidrro_el0 | |
444 | ||
445 | /* | |
446 | * Store the original GPRs to the new stack. The orginal SP (minus | |
447 | * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry. | |
448 | */ | |
449 | sub sp, sp, #S_FRAME_SIZE | |
450 | kernel_entry 1 | |
451 | mrs x0, tpidr_el0 | |
452 | add x0, x0, #S_FRAME_SIZE | |
453 | str x0, [sp, #S_SP] | |
454 | ||
455 | /* Stash the regs for handle_bad_stack */ | |
456 | mov x0, sp | |
457 | ||
458 | /* Time to die */ | |
459 | bl handle_bad_stack | |
460 | ASM_BUG() | |
461 | #endif /* CONFIG_VMAP_STACK */ | |
462 | ||
463 | /* | |
464 | * Invalid mode handlers | |
465 | */ | |
466 | .macro inv_entry, el, reason, regsize = 64 | |
467 | kernel_entry \el, \regsize | |
468 | mov x0, sp | |
469 | mov x1, #\reason | |
470 | mrs x2, esr_el1 | |
471 | bl bad_mode | |
472 | ASM_BUG() | |
473 | .endm | |
474 | ||
475 | el0_sync_invalid: | |
476 | inv_entry 0, BAD_SYNC | |
477 | ENDPROC(el0_sync_invalid) | |
478 | ||
479 | el0_irq_invalid: | |
480 | inv_entry 0, BAD_IRQ | |
481 | ENDPROC(el0_irq_invalid) | |
482 | ||
483 | el0_fiq_invalid: | |
484 | inv_entry 0, BAD_FIQ | |
485 | ENDPROC(el0_fiq_invalid) | |
486 | ||
487 | el0_error_invalid: | |
488 | inv_entry 0, BAD_ERROR | |
489 | ENDPROC(el0_error_invalid) | |
490 | ||
491 | #ifdef CONFIG_COMPAT | |
492 | el0_fiq_invalid_compat: | |
493 | inv_entry 0, BAD_FIQ, 32 | |
494 | ENDPROC(el0_fiq_invalid_compat) | |
495 | #endif | |
496 | ||
497 | el1_sync_invalid: | |
498 | inv_entry 1, BAD_SYNC | |
499 | ENDPROC(el1_sync_invalid) | |
500 | ||
501 | el1_irq_invalid: | |
502 | inv_entry 1, BAD_IRQ | |
503 | ENDPROC(el1_irq_invalid) | |
504 | ||
505 | el1_fiq_invalid: | |
506 | inv_entry 1, BAD_FIQ | |
507 | ENDPROC(el1_fiq_invalid) | |
508 | ||
509 | el1_error_invalid: | |
510 | inv_entry 1, BAD_ERROR | |
511 | ENDPROC(el1_error_invalid) | |
512 | ||
513 | /* | |
514 | * EL1 mode handlers. | |
515 | */ | |
516 | .align 6 | |
517 | el1_sync: | |
518 | kernel_entry 1 | |
519 | mrs x1, esr_el1 // read the syndrome register | |
520 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class | |
521 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | |
522 | b.eq el1_da | |
523 | cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 | |
524 | b.eq el1_ia | |
525 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap | |
526 | b.eq el1_undef | |
527 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception | |
528 | b.eq el1_sp_pc | |
529 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception | |
530 | b.eq el1_sp_pc | |
531 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 | |
532 | b.eq el1_undef | |
533 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 | |
534 | b.ge el1_dbg | |
535 | b el1_inv | |
536 | ||
537 | el1_ia: | |
538 | /* | |
539 | * Fall through to the Data abort case | |
540 | */ | |
541 | el1_da: | |
542 | /* | |
543 | * Data abort handling | |
544 | */ | |
545 | mrs x3, far_el1 | |
546 | inherit_daif pstate=x23, tmp=x2 | |
547 | clear_address_tag x0, x3 | |
548 | mov x2, sp // struct pt_regs | |
549 | bl do_mem_abort | |
550 | ||
551 | kernel_exit 1 | |
552 | el1_sp_pc: | |
553 | /* | |
554 | * Stack or PC alignment exception handling | |
555 | */ | |
556 | mrs x0, far_el1 | |
557 | inherit_daif pstate=x23, tmp=x2 | |
558 | mov x2, sp | |
559 | bl do_sp_pc_abort | |
560 | ASM_BUG() | |
561 | el1_undef: | |
562 | /* | |
563 | * Undefined instruction | |
564 | */ | |
565 | inherit_daif pstate=x23, tmp=x2 | |
566 | mov x0, sp | |
567 | bl do_undefinstr | |
568 | ASM_BUG() | |
569 | el1_dbg: | |
570 | /* | |
571 | * Debug exception handling | |
572 | */ | |
573 | cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 | |
574 | cinc x24, x24, eq // set bit '0' | |
575 | tbz x24, #0, el1_inv // EL1 only | |
576 | mrs x0, far_el1 | |
577 | mov x2, sp // struct pt_regs | |
578 | bl do_debug_exception | |
579 | kernel_exit 1 | |
580 | el1_inv: | |
581 | // TODO: add support for undefined instructions in kernel mode | |
582 | inherit_daif pstate=x23, tmp=x2 | |
583 | mov x0, sp | |
584 | mov x2, x1 | |
585 | mov x1, #BAD_SYNC | |
586 | bl bad_mode | |
587 | ASM_BUG() | |
588 | ENDPROC(el1_sync) | |
589 | ||
590 | .align 6 | |
591 | el1_irq: | |
592 | kernel_entry 1 | |
593 | enable_da_f | |
594 | #ifdef CONFIG_TRACE_IRQFLAGS | |
595 | bl trace_hardirqs_off | |
596 | #endif | |
597 | ||
598 | irq_handler | |
599 | ||
600 | #ifdef CONFIG_PREEMPT | |
601 | ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count | |
602 | cbnz w24, 1f // preempt count != 0 | |
603 | ldr x0, [tsk, #TSK_TI_FLAGS] // get flags | |
604 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? | |
605 | bl el1_preempt | |
606 | 1: | |
607 | #endif | |
608 | #ifdef CONFIG_TRACE_IRQFLAGS | |
609 | bl trace_hardirqs_on | |
610 | #endif | |
611 | kernel_exit 1 | |
612 | ENDPROC(el1_irq) | |
613 | ||
614 | #ifdef CONFIG_PREEMPT | |
615 | el1_preempt: | |
616 | mov x24, lr | |
617 | 1: bl preempt_schedule_irq // irq en/disable is done inside | |
618 | ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS | |
619 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? | |
620 | ret x24 | |
621 | #endif | |
622 | ||
623 | /* | |
624 | * EL0 mode handlers. | |
625 | */ | |
626 | .align 6 | |
627 | el0_sync: | |
628 | kernel_entry 0 | |
629 | mrs x25, esr_el1 // read the syndrome register | |
630 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class | |
631 | cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state | |
632 | b.eq el0_svc | |
633 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 | |
634 | b.eq el0_da | |
635 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 | |
636 | b.eq el0_ia | |
637 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access | |
638 | b.eq el0_fpsimd_acc | |
639 | cmp x24, #ESR_ELx_EC_SVE // SVE access | |
640 | b.eq el0_sve_acc | |
641 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception | |
642 | b.eq el0_fpsimd_exc | |
643 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap | |
644 | b.eq el0_sys | |
645 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception | |
646 | b.eq el0_sp_pc | |
647 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception | |
648 | b.eq el0_sp_pc | |
649 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 | |
650 | b.eq el0_undef | |
651 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 | |
652 | b.ge el0_dbg | |
653 | b el0_inv | |
654 | ||
655 | #ifdef CONFIG_COMPAT | |
656 | .align 6 | |
657 | el0_sync_compat: | |
658 | kernel_entry 0, 32 | |
659 | mrs x25, esr_el1 // read the syndrome register | |
660 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class | |
661 | cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state | |
662 | b.eq el0_svc_compat | |
663 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 | |
664 | b.eq el0_da | |
665 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 | |
666 | b.eq el0_ia | |
667 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access | |
668 | b.eq el0_fpsimd_acc | |
669 | cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception | |
670 | b.eq el0_fpsimd_exc | |
671 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception | |
672 | b.eq el0_sp_pc | |
673 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 | |
674 | b.eq el0_undef | |
675 | cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap | |
676 | b.eq el0_undef | |
677 | cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap | |
678 | b.eq el0_undef | |
679 | cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap | |
680 | b.eq el0_undef | |
681 | cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap | |
682 | b.eq el0_undef | |
683 | cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap | |
684 | b.eq el0_undef | |
685 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 | |
686 | b.ge el0_dbg | |
687 | b el0_inv | |
688 | el0_svc_compat: | |
689 | /* | |
690 | * AArch32 syscall handling | |
691 | */ | |
692 | ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags | |
693 | adrp stbl, compat_sys_call_table // load compat syscall table pointer | |
694 | mov wscno, w7 // syscall number in w7 (r7) | |
695 | mov wsc_nr, #__NR_compat_syscalls | |
696 | b el0_svc_naked | |
697 | ||
698 | .align 6 | |
699 | el0_irq_compat: | |
700 | kernel_entry 0, 32 | |
701 | b el0_irq_naked | |
702 | ||
703 | el0_error_compat: | |
704 | kernel_entry 0, 32 | |
705 | b el0_error_naked | |
706 | #endif | |
707 | ||
708 | el0_da: | |
709 | /* | |
710 | * Data abort handling | |
711 | */ | |
712 | mrs x26, far_el1 | |
713 | enable_daif | |
714 | ct_user_exit | |
715 | clear_address_tag x0, x26 | |
716 | mov x1, x25 | |
717 | mov x2, sp | |
718 | bl do_mem_abort | |
719 | b ret_to_user | |
720 | el0_ia: | |
721 | /* | |
722 | * Instruction abort handling | |
723 | */ | |
724 | mrs x26, far_el1 | |
725 | enable_da_f | |
726 | #ifdef CONFIG_TRACE_IRQFLAGS | |
727 | bl trace_hardirqs_off | |
728 | #endif | |
729 | ct_user_exit | |
730 | mov x0, x26 | |
731 | mov x1, x25 | |
732 | mov x2, sp | |
733 | bl do_el0_ia_bp_hardening | |
734 | b ret_to_user | |
735 | el0_fpsimd_acc: | |
736 | /* | |
737 | * Floating Point or Advanced SIMD access | |
738 | */ | |
739 | enable_daif | |
740 | ct_user_exit | |
741 | mov x0, x25 | |
742 | mov x1, sp | |
743 | bl do_fpsimd_acc | |
744 | b ret_to_user | |
745 | el0_sve_acc: | |
746 | /* | |
747 | * Scalable Vector Extension access | |
748 | */ | |
749 | enable_daif | |
750 | ct_user_exit | |
751 | mov x0, x25 | |
752 | mov x1, sp | |
753 | bl do_sve_acc | |
754 | b ret_to_user | |
755 | el0_fpsimd_exc: | |
756 | /* | |
757 | * Floating Point, Advanced SIMD or SVE exception | |
758 | */ | |
759 | enable_daif | |
760 | ct_user_exit | |
761 | mov x0, x25 | |
762 | mov x1, sp | |
763 | bl do_fpsimd_exc | |
764 | b ret_to_user | |
765 | el0_sp_pc: | |
766 | /* | |
767 | * Stack or PC alignment exception handling | |
768 | */ | |
769 | mrs x26, far_el1 | |
770 | enable_da_f | |
771 | #ifdef CONFIG_TRACE_IRQFLAGS | |
772 | bl trace_hardirqs_off | |
773 | #endif | |
774 | ct_user_exit | |
775 | mov x0, x26 | |
776 | mov x1, x25 | |
777 | mov x2, sp | |
778 | bl do_sp_pc_abort | |
779 | b ret_to_user | |
780 | el0_undef: | |
781 | /* | |
782 | * Undefined instruction | |
783 | */ | |
784 | enable_daif | |
785 | ct_user_exit | |
786 | mov x0, sp | |
787 | bl do_undefinstr | |
788 | b ret_to_user | |
789 | el0_sys: | |
790 | /* | |
791 | * System instructions, for trapped cache maintenance instructions | |
792 | */ | |
793 | enable_daif | |
794 | ct_user_exit | |
795 | mov x0, x25 | |
796 | mov x1, sp | |
797 | bl do_sysinstr | |
798 | b ret_to_user | |
799 | el0_dbg: | |
800 | /* | |
801 | * Debug exception handling | |
802 | */ | |
803 | tbnz x24, #0, el0_inv // EL0 only | |
804 | mrs x0, far_el1 | |
805 | mov x1, x25 | |
806 | mov x2, sp | |
807 | bl do_debug_exception | |
808 | enable_daif | |
809 | ct_user_exit | |
810 | b ret_to_user | |
811 | el0_inv: | |
812 | enable_daif | |
813 | ct_user_exit | |
814 | mov x0, sp | |
815 | mov x1, #BAD_SYNC | |
816 | mov x2, x25 | |
817 | bl bad_el0_sync | |
818 | b ret_to_user | |
819 | ENDPROC(el0_sync) | |
820 | ||
821 | .align 6 | |
822 | el0_irq: | |
823 | kernel_entry 0 | |
824 | el0_irq_naked: | |
825 | enable_da_f | |
826 | #ifdef CONFIG_TRACE_IRQFLAGS | |
827 | bl trace_hardirqs_off | |
828 | #endif | |
829 | ||
830 | ct_user_exit | |
831 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | |
832 | tbz x22, #55, 1f | |
833 | bl do_el0_irq_bp_hardening | |
834 | 1: | |
835 | #endif | |
836 | irq_handler | |
837 | ||
838 | #ifdef CONFIG_TRACE_IRQFLAGS | |
839 | bl trace_hardirqs_on | |
840 | #endif | |
841 | b ret_to_user | |
842 | ENDPROC(el0_irq) | |
843 | ||
844 | el1_error: | |
845 | kernel_entry 1 | |
846 | mrs x1, esr_el1 | |
847 | enable_dbg | |
848 | mov x0, sp | |
849 | bl do_serror | |
850 | kernel_exit 1 | |
851 | ENDPROC(el1_error) | |
852 | ||
853 | el0_error: | |
854 | kernel_entry 0 | |
855 | el0_error_naked: | |
856 | mrs x1, esr_el1 | |
857 | enable_dbg | |
858 | mov x0, sp | |
859 | bl do_serror | |
860 | enable_daif | |
861 | ct_user_exit | |
862 | b ret_to_user | |
863 | ENDPROC(el0_error) | |
864 | ||
865 | ||
866 | /* | |
867 | * This is the fast syscall return path. We do as little as possible here, | |
868 | * and this includes saving x0 back into the kernel stack. | |
869 | */ | |
870 | ret_fast_syscall: | |
871 | disable_daif | |
872 | str x0, [sp, #S_X0] // returned x0 | |
873 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing | |
874 | and x2, x1, #_TIF_SYSCALL_WORK | |
875 | cbnz x2, ret_fast_syscall_trace | |
876 | and x2, x1, #_TIF_WORK_MASK | |
877 | cbnz x2, work_pending | |
878 | enable_step_tsk x1, x2 | |
879 | kernel_exit 0 | |
880 | ret_fast_syscall_trace: | |
881 | enable_daif | |
882 | b __sys_trace_return_skipped // we already saved x0 | |
883 | ||
884 | /* | |
885 | * Ok, we need to do extra processing, enter the slow path. | |
886 | */ | |
887 | work_pending: | |
888 | mov x0, sp // 'regs' | |
889 | bl do_notify_resume | |
890 | #ifdef CONFIG_TRACE_IRQFLAGS | |
891 | bl trace_hardirqs_on // enabled while in userspace | |
892 | #endif | |
893 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step | |
894 | b finish_ret_to_user | |
895 | /* | |
896 | * "slow" syscall return path. | |
897 | */ | |
898 | ret_to_user: | |
899 | disable_daif | |
900 | ldr x1, [tsk, #TSK_TI_FLAGS] | |
901 | and x2, x1, #_TIF_WORK_MASK | |
902 | cbnz x2, work_pending | |
903 | finish_ret_to_user: | |
904 | enable_step_tsk x1, x2 | |
905 | kernel_exit 0 | |
906 | ENDPROC(ret_to_user) | |
907 | ||
908 | /* | |
909 | * SVC handler. | |
910 | */ | |
911 | .align 6 | |
912 | el0_svc: | |
913 | ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags | |
914 | adrp stbl, sys_call_table // load syscall table pointer | |
915 | mov wscno, w8 // syscall number in w8 | |
916 | mov wsc_nr, #__NR_syscalls | |
917 | ||
918 | #ifdef CONFIG_ARM64_SVE | |
919 | alternative_if_not ARM64_SVE | |
920 | b el0_svc_naked | |
921 | alternative_else_nop_endif | |
922 | tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set: | |
923 | bic x16, x16, #_TIF_SVE // discard SVE state | |
924 | str x16, [tsk, #TSK_TI_FLAGS] | |
925 | ||
926 | /* | |
927 | * task_fpsimd_load() won't be called to update CPACR_EL1 in | |
928 | * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only | |
929 | * happens if a context switch or kernel_neon_begin() or context | |
930 | * modification (sigreturn, ptrace) intervenes. | |
931 | * So, ensure that CPACR_EL1 is already correct for the fast-path case: | |
932 | */ | |
933 | mrs x9, cpacr_el1 | |
934 | bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0 | |
935 | msr cpacr_el1, x9 // synchronised by eret to el0 | |
936 | #endif | |
937 | ||
938 | el0_svc_naked: // compat entry point | |
939 | stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | |
940 | enable_daif | |
941 | ct_user_exit 1 | |
942 | ||
943 | tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks | |
944 | b.ne __sys_trace | |
945 | cmp wscno, wsc_nr // check upper syscall limit | |
946 | b.hs ni_sys | |
947 | mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number | |
948 | ldr x16, [stbl, xscno, lsl #3] // address in the syscall table | |
949 | blr x16 // call sys_* routine | |
950 | b ret_fast_syscall | |
951 | ni_sys: | |
952 | mov x0, sp | |
953 | bl do_ni_syscall | |
954 | b ret_fast_syscall | |
955 | ENDPROC(el0_svc) | |
956 | ||
957 | /* | |
958 | * This is the really slow path. We're going to be doing context | |
959 | * switches, and waiting for our parent to respond. | |
960 | */ | |
961 | __sys_trace: | |
962 | cmp wscno, #NO_SYSCALL // user-issued syscall(-1)? | |
963 | b.ne 1f | |
964 | mov x0, #-ENOSYS // set default errno if so | |
965 | str x0, [sp, #S_X0] | |
966 | 1: mov x0, sp | |
967 | bl syscall_trace_enter | |
968 | cmp w0, #NO_SYSCALL // skip the syscall? | |
969 | b.eq __sys_trace_return_skipped | |
970 | mov wscno, w0 // syscall number (possibly new) | |
971 | mov x1, sp // pointer to regs | |
972 | cmp wscno, wsc_nr // check upper syscall limit | |
973 | b.hs __ni_sys_trace | |
974 | ldp x0, x1, [sp] // restore the syscall args | |
975 | ldp x2, x3, [sp, #S_X2] | |
976 | ldp x4, x5, [sp, #S_X4] | |
977 | ldp x6, x7, [sp, #S_X6] | |
978 | ldr x16, [stbl, xscno, lsl #3] // address in the syscall table | |
979 | blr x16 // call sys_* routine | |
980 | ||
981 | __sys_trace_return: | |
982 | str x0, [sp, #S_X0] // save returned x0 | |
983 | __sys_trace_return_skipped: | |
984 | mov x0, sp | |
985 | bl syscall_trace_exit | |
986 | b ret_to_user | |
987 | ||
988 | __ni_sys_trace: | |
989 | mov x0, sp | |
990 | bl do_ni_syscall | |
991 | b __sys_trace_return | |
992 | ||
993 | .popsection // .entry.text | |
994 | ||
995 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
996 | /* | |
997 | * Exception vectors trampoline. | |
998 | */ | |
999 | .pushsection ".entry.tramp.text", "ax" | |
1000 | ||
1001 | .macro tramp_map_kernel, tmp | |
1002 | mrs \tmp, ttbr1_el1 | |
1003 | sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) | |
1004 | bic \tmp, \tmp, #USER_ASID_FLAG | |
1005 | msr ttbr1_el1, \tmp | |
1006 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 | |
1007 | alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 | |
1008 | /* ASID already in \tmp[63:48] */ | |
1009 | movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) | |
1010 | movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) | |
1011 | /* 2MB boundary containing the vectors, so we nobble the walk cache */ | |
1012 | movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) | |
1013 | isb | |
1014 | tlbi vae1, \tmp | |
1015 | dsb nsh | |
1016 | alternative_else_nop_endif | |
1017 | #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ | |
1018 | .endm | |
1019 | ||
1020 | .macro tramp_unmap_kernel, tmp | |
1021 | mrs \tmp, ttbr1_el1 | |
1022 | add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) | |
1023 | orr \tmp, \tmp, #USER_ASID_FLAG | |
1024 | msr ttbr1_el1, \tmp | |
1025 | /* | |
1026 | * We avoid running the post_ttbr_update_workaround here because | |
1027 | * it's only needed by Cavium ThunderX, which requires KPTI to be | |
1028 | * disabled. | |
1029 | */ | |
1030 | .endm | |
1031 | ||
1032 | .macro tramp_ventry, regsize = 64 | |
1033 | .align 7 | |
1034 | 1: | |
1035 | .if \regsize == 64 | |
1036 | msr tpidrro_el0, x30 // Restored in kernel_ventry | |
1037 | .endif | |
1038 | /* | |
1039 | * Defend against branch aliasing attacks by pushing a dummy | |
1040 | * entry onto the return stack and using a RET instruction to | |
1041 | * enter the full-fat kernel vectors. | |
1042 | */ | |
1043 | bl 2f | |
1044 | b . | |
1045 | 2: | |
1046 | tramp_map_kernel x30 | |
1047 | #ifdef CONFIG_RANDOMIZE_BASE | |
1048 | adr x30, tramp_vectors + PAGE_SIZE | |
1049 | alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 | |
1050 | ldr x30, [x30] | |
1051 | #else | |
1052 | ldr x30, =vectors | |
1053 | #endif | |
1054 | prfm plil1strm, [x30, #(1b - tramp_vectors)] | |
1055 | msr vbar_el1, x30 | |
1056 | add x30, x30, #(1b - tramp_vectors) | |
1057 | isb | |
1058 | ret | |
1059 | .endm | |
1060 | ||
1061 | .macro tramp_exit, regsize = 64 | |
1062 | adr x30, tramp_vectors | |
1063 | msr vbar_el1, x30 | |
1064 | tramp_unmap_kernel x30 | |
1065 | .if \regsize == 64 | |
1066 | mrs x30, far_el1 | |
1067 | .endif | |
1068 | eret | |
1069 | .endm | |
1070 | ||
1071 | .align 11 | |
1072 | ENTRY(tramp_vectors) | |
1073 | .space 0x400 | |
1074 | ||
1075 | tramp_ventry | |
1076 | tramp_ventry | |
1077 | tramp_ventry | |
1078 | tramp_ventry | |
1079 | ||
1080 | tramp_ventry 32 | |
1081 | tramp_ventry 32 | |
1082 | tramp_ventry 32 | |
1083 | tramp_ventry 32 | |
1084 | END(tramp_vectors) | |
1085 | ||
1086 | ENTRY(tramp_exit_native) | |
1087 | tramp_exit | |
1088 | END(tramp_exit_native) | |
1089 | ||
1090 | ENTRY(tramp_exit_compat) | |
1091 | tramp_exit 32 | |
1092 | END(tramp_exit_compat) | |
1093 | ||
1094 | .ltorg | |
1095 | .popsection // .entry.tramp.text | |
1096 | #ifdef CONFIG_RANDOMIZE_BASE | |
1097 | .pushsection ".rodata", "a" | |
1098 | .align PAGE_SHIFT | |
1099 | .globl __entry_tramp_data_start | |
1100 | __entry_tramp_data_start: | |
1101 | .quad vectors | |
1102 | .popsection // .rodata | |
1103 | #endif /* CONFIG_RANDOMIZE_BASE */ | |
1104 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | |
1105 | ||
1106 | /* | |
1107 | * Special system call wrappers. | |
1108 | */ | |
1109 | ENTRY(sys_rt_sigreturn_wrapper) | |
1110 | mov x0, sp | |
1111 | b sys_rt_sigreturn | |
1112 | ENDPROC(sys_rt_sigreturn_wrapper) | |
1113 | ||
1114 | /* | |
1115 | * Register switch for AArch64. The callee-saved registers need to be saved | |
1116 | * and restored. On entry: | |
1117 | * x0 = previous task_struct (must be preserved across the switch) | |
1118 | * x1 = next task_struct | |
1119 | * Previous and next are guaranteed not to be the same. | |
1120 | * | |
1121 | */ | |
1122 | ENTRY(cpu_switch_to) | |
1123 | mov x10, #THREAD_CPU_CONTEXT | |
1124 | add x8, x0, x10 | |
1125 | mov x9, sp | |
1126 | stp x19, x20, [x8], #16 // store callee-saved registers | |
1127 | stp x21, x22, [x8], #16 | |
1128 | stp x23, x24, [x8], #16 | |
1129 | stp x25, x26, [x8], #16 | |
1130 | stp x27, x28, [x8], #16 | |
1131 | stp x29, x9, [x8], #16 | |
1132 | str lr, [x8] | |
1133 | add x8, x1, x10 | |
1134 | ldp x19, x20, [x8], #16 // restore callee-saved registers | |
1135 | ldp x21, x22, [x8], #16 | |
1136 | ldp x23, x24, [x8], #16 | |
1137 | ldp x25, x26, [x8], #16 | |
1138 | ldp x27, x28, [x8], #16 | |
1139 | ldp x29, x9, [x8], #16 | |
1140 | ldr lr, [x8] | |
1141 | mov sp, x9 | |
1142 | msr sp_el0, x1 | |
1143 | ret | |
1144 | ENDPROC(cpu_switch_to) | |
1145 | NOKPROBE(cpu_switch_to) | |
1146 | ||
1147 | /* | |
1148 | * This is how we return from a fork. | |
1149 | */ | |
1150 | ENTRY(ret_from_fork) | |
1151 | bl schedule_tail | |
1152 | cbz x19, 1f // not a kernel thread | |
1153 | mov x0, x20 | |
1154 | blr x19 | |
1155 | 1: get_thread_info tsk | |
1156 | b ret_to_user | |
1157 | ENDPROC(ret_from_fork) | |
1158 | NOKPROBE(ret_from_fork) | |
1159 | ||
1160 | #ifdef CONFIG_ARM_SDE_INTERFACE | |
1161 | ||
1162 | #include <asm/sdei.h> | |
1163 | #include <uapi/linux/arm_sdei.h> | |
1164 | ||
1165 | /* | |
1166 | * Software Delegated Exception entry point. | |
1167 | * | |
1168 | * x0: Event number | |
1169 | * x1: struct sdei_registered_event argument from registration time. | |
1170 | * x2: interrupted PC | |
1171 | * x3: interrupted PSTATE | |
1172 | * | |
1173 | * Firmware has preserved x0->x17 for us, we must save/restore the rest to | |
1174 | * follow SMC-CC. We save (or retrieve) all the registers as the handler may | |
1175 | * want them. | |
1176 | */ | |
1177 | ENTRY(__sdei_asm_handler) | |
1178 | stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] | |
1179 | stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] | |
1180 | stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] | |
1181 | stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] | |
1182 | stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] | |
1183 | stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] | |
1184 | stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] | |
1185 | stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] | |
1186 | stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] | |
1187 | stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] | |
1188 | stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] | |
1189 | stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] | |
1190 | stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] | |
1191 | stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] | |
1192 | mov x4, sp | |
1193 | stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] | |
1194 | ||
1195 | mov x19, x1 | |
1196 | ||
1197 | #ifdef CONFIG_VMAP_STACK | |
1198 | /* | |
1199 | * entry.S may have been using sp as a scratch register, find whether | |
1200 | * this is a normal or critical event and switch to the appropriate | |
1201 | * stack for this CPU. | |
1202 | */ | |
1203 | ldrb w4, [x19, #SDEI_EVENT_PRIORITY] | |
1204 | cbnz w4, 1f | |
1205 | ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 | |
1206 | b 2f | |
1207 | 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 | |
1208 | 2: mov x6, #SDEI_STACK_SIZE | |
1209 | add x5, x5, x6 | |
1210 | mov sp, x5 | |
1211 | #endif | |
1212 | ||
1213 | /* | |
1214 | * We may have interrupted userspace, or a guest, or exit-from or | |
1215 | * return-to either of these. We can't trust sp_el0, restore it. | |
1216 | */ | |
1217 | mrs x28, sp_el0 | |
1218 | ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 | |
1219 | msr sp_el0, x0 | |
1220 | ||
1221 | /* If we interrupted the kernel point to the previous stack/frame. */ | |
1222 | and x0, x3, #0xc | |
1223 | mrs x1, CurrentEL | |
1224 | cmp x0, x1 | |
1225 | csel x29, x29, xzr, eq // fp, or zero | |
1226 | csel x4, x2, xzr, eq // elr, or zero | |
1227 | ||
1228 | stp x29, x4, [sp, #-16]! | |
1229 | mov x29, sp | |
1230 | ||
1231 | add x0, x19, #SDEI_EVENT_INTREGS | |
1232 | mov x1, x19 | |
1233 | bl __sdei_handler | |
1234 | ||
1235 | msr sp_el0, x28 | |
1236 | /* restore regs >x17 that we clobbered */ | |
1237 | ldp x28, x29, [x19, #SDEI_EVENT_INTREGS + 16 * 14] | |
1238 | ldp lr, x4, [x19, #SDEI_EVENT_INTREGS + S_LR] | |
1239 | mov sp, x4 | |
1240 | ldp x18, x19, [x19, #SDEI_EVENT_INTREGS + 16 * 9] | |
1241 | ||
1242 | mov x1, x0 // address to complete_and_resume | |
1243 | /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */ | |
1244 | cmp x0, #1 | |
1245 | mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE | |
1246 | mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME | |
1247 | csel x0, x2, x3, ls | |
1248 | ||
1249 | /* On success, this call never returns... */ | |
1250 | ldr_l x2, sdei_exit_mode | |
1251 | cmp x2, #SDEI_EXIT_SMC | |
1252 | b.ne 1f | |
1253 | smc #0 | |
1254 | b . | |
1255 | 1: hvc #0 | |
1256 | b . | |
1257 | ENDPROC(__sdei_asm_handler) | |
1258 | NOKPROBE(__sdei_asm_handler) | |
1259 | #endif /* CONFIG_ARM_SDE_INTERFACE */ |