]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmExceptionLib/AArch64/ExceptionSupport.S
ArmPkg/ArmExceptionLib: use EL0 stack for synchronous exceptions
[mirror_edk2.git] / ArmPkg / Library / ArmExceptionLib / AArch64 / ExceptionSupport.S
CommitLineData
2939c778
EC
1//\r
2// Copyright (c) 2011 - 2014 ARM LTD. All rights reserved.<BR>\r
3// Portion of Copyright (c) 2014 NVIDIA Corporation. All rights reserved.<BR>\r
4// Copyright (c) 2016 HP Development Company, L.P.\r
5//\r
6// This program and the accompanying materials\r
7// are licensed and made available under the terms and conditions of the BSD License\r
8// which accompanies this distribution. The full text of the license may be found at\r
9// http://opensource.org/licenses/bsd-license.php\r
10//\r
11// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
12// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
13//\r
14//------------------------------------------------------------------------------\r
15\r
16#include <Chipset/AArch64.h>\r
17#include <Library/PcdLib.h>\r
18#include <AsmMacroIoLibV8.h>\r
19#include <Protocol/DebugSupport.h> // for exception type definitions\r
20\r
21/*\r
22 This is the stack constructed by the exception handler (low address to high address).\r
23 X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.\r
24\r
25 UINT64 X0; 0x000\r
26 UINT64 X1; 0x008\r
27 UINT64 X2; 0x010\r
28 UINT64 X3; 0x018\r
29 UINT64 X4; 0x020\r
30 UINT64 X5; 0x028\r
31 UINT64 X6; 0x030\r
32 UINT64 X7; 0x038\r
33 UINT64 X8; 0x040\r
34 UINT64 X9; 0x048\r
35 UINT64 X10; 0x050\r
36 UINT64 X11; 0x058\r
37 UINT64 X12; 0x060\r
38 UINT64 X13; 0x068\r
39 UINT64 X14; 0x070\r
40 UINT64 X15; 0x078\r
41 UINT64 X16; 0x080\r
42 UINT64 X17; 0x088\r
43 UINT64 X18; 0x090\r
44 UINT64 X19; 0x098\r
45 UINT64 X20; 0x0a0\r
46 UINT64 X21; 0x0a8\r
47 UINT64 X22; 0x0b0\r
48 UINT64 X23; 0x0b8\r
49 UINT64 X24; 0x0c0\r
50 UINT64 X25; 0x0c8\r
51 UINT64 X26; 0x0d0\r
52 UINT64 X27; 0x0d8\r
53 UINT64 X28; 0x0e0\r
54 UINT64 FP; 0x0e8 // x29 - Frame Pointer\r
55 UINT64 LR; 0x0f0 // x30 - Link Register\r
56 UINT64 SP; 0x0f8 // x31 - Stack Pointer\r
57\r
58 // FP/SIMD Registers. 128bit if used as Q-regs.\r
59 UINT64 V0[2]; 0x100\r
60 UINT64 V1[2]; 0x110\r
61 UINT64 V2[2]; 0x120\r
62 UINT64 V3[2]; 0x130\r
63 UINT64 V4[2]; 0x140\r
64 UINT64 V5[2]; 0x150\r
65 UINT64 V6[2]; 0x160\r
66 UINT64 V7[2]; 0x170\r
67 UINT64 V8[2]; 0x180\r
68 UINT64 V9[2]; 0x190\r
69 UINT64 V10[2]; 0x1a0\r
70 UINT64 V11[2]; 0x1b0\r
71 UINT64 V12[2]; 0x1c0\r
72 UINT64 V13[2]; 0x1d0\r
73 UINT64 V14[2]; 0x1e0\r
74 UINT64 V15[2]; 0x1f0\r
75 UINT64 V16[2]; 0x200\r
76 UINT64 V17[2]; 0x210\r
77 UINT64 V18[2]; 0x220\r
78 UINT64 V19[2]; 0x230\r
79 UINT64 V20[2]; 0x240\r
80 UINT64 V21[2]; 0x250\r
81 UINT64 V22[2]; 0x260\r
82 UINT64 V23[2]; 0x270\r
83 UINT64 V24[2]; 0x280\r
84 UINT64 V25[2]; 0x290\r
85 UINT64 V26[2]; 0x2a0\r
86 UINT64 V27[2]; 0x2b0\r
87 UINT64 V28[2]; 0x2c0\r
88 UINT64 V29[2]; 0x2d0\r
89 UINT64 V30[2]; 0x2e0\r
90 UINT64 V31[2]; 0x2f0\r
91\r
92 // System Context\r
93 UINT64 ELR; 0x300 // Exception Link Register\r
94 UINT64 SPSR; 0x308 // Saved Processor Status Register\r
95 UINT64 FPSR; 0x310 // Floating Point Status Register\r
96 UINT64 ESR; 0x318 // Exception syndrome register\r
97 UINT64 FAR; 0x320 // Fault Address Register\r
98 UINT64 Padding;0x328 // Required for stack alignment\r
99*/\r
100\r
101GCC_ASM_EXPORT(ExceptionHandlersEnd)\r
2939c778 102GCC_ASM_EXPORT(CommonCExceptionHandler)\r
2d120489 103GCC_ASM_EXPORT(RegisterEl0Stack)\r
2939c778
EC
104\r
105.text\r
106\r
107#define GP_CONTEXT_SIZE (32 * 8)\r
108#define FP_CONTEXT_SIZE (32 * 16)\r
109#define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)\r
110\r
2939c778
EC
111//\r
112// There are two methods for installing AArch64 exception vectors:\r
113// 1. Install a copy of the vectors to a location specified by a PCD\r
114// 2. Write VBAR directly, requiring that vectors have proper alignment (2K)\r
115// The conditional below adjusts the alignment requirement based on which\r
116// exception vector initialization method is used.\r
117//\r
118\r
119#if defined(ARM_RELOCATE_VECTORS)\r
120GCC_ASM_EXPORT(ExceptionHandlersStart)\r
121ASM_PFX(ExceptionHandlersStart):\r
122#else\r
123VECTOR_BASE(ExceptionHandlersStart)\r
124#endif\r
125\r
2d120489
AB
126 .macro ExceptionEntry, val, sp=SPx\r
127 //\r
128 // Our backtrace and register dump code is written in C and so it requires\r
129 // a stack. This makes it difficult to produce meaningful diagnostics when\r
130 // the stack pointer has been corrupted. So in such cases (i.e., when taking\r
131 // synchronous exceptions), this macro is expanded with \sp set to SP0, in\r
132 // which case we switch to the SP_EL0 stack pointer, which has been\r
133 // initialized to point to a buffer that has been set aside for this purpose.\r
134 //\r
135 // Since 'sp' may no longer refer to the stack frame that was active when\r
136 // the exception was taken, we may have to switch back and forth between\r
137 // SP_EL0 and SP_ELx to record the correct value for SP in the context struct.\r
138 //\r
139 .ifnc \sp, SPx\r
140 msr SPsel, xzr\r
141 .endif\r
142\r
0dbbaa55
AB
143 // Move the stackpointer so we can reach our structure with the str instruction.\r
144 sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)\r
145\r
2d120489 146 // Push the GP registers so we can record the exception context\r
1b02a383
AB
147 stp x0, x1, [sp, #-GP_CONTEXT_SIZE]!\r
148 stp x2, x3, [sp, #0x10]\r
149 stp x4, x5, [sp, #0x20]\r
150 stp x6, x7, [sp, #0x30]\r
2d120489
AB
151 stp x8, x9, [sp, #0x40]\r
152 stp x10, x11, [sp, #0x50]\r
153 stp x12, x13, [sp, #0x60]\r
154 stp x14, x15, [sp, #0x70]\r
155 stp x16, x17, [sp, #0x80]\r
156 stp x18, x19, [sp, #0x90]\r
157 stp x20, x21, [sp, #0xa0]\r
158 stp x22, x23, [sp, #0xb0]\r
159 stp x24, x25, [sp, #0xc0]\r
160 stp x26, x27, [sp, #0xd0]\r
161 stp x28, x29, [sp, #0xe0]\r
162 add x28, sp, #(GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)\r
1b02a383 163\r
2d120489
AB
164 .ifnc \sp, SPx\r
165 msr SPsel, #1\r
166 mov x7, sp\r
167 msr SPsel, xzr\r
168 .else\r
169 mov x7, x28\r
170 .endif\r
1b02a383 171\r
2d120489 172 stp x30, x7, [sp, #0xf0]\r
0dbbaa55
AB
173\r
174 // Record the type of exception that occurred.\r
175 mov x0, #\val\r
176\r
177 // Jump to our general handler to deal with all the common parts and process the exception.\r
5d7238ca 178#if defined(ARM_RELOCATE_VECTORS)\r
0dbbaa55
AB
179 ldr x1, =ASM_PFX(CommonExceptionEntry)\r
180 br x1\r
181 .ltorg\r
5d7238ca
AB
182#else\r
183 b ASM_PFX(CommonExceptionEntry)\r
184#endif\r
0dbbaa55
AB
185 .endm\r
186\r
2939c778
EC
187//\r
188// Current EL with SP0 : 0x0 - 0x180\r
189//\r
190VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SYNC)\r
191ASM_PFX(SynchronousExceptionSP0):\r
0dbbaa55 192 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS\r
2939c778
EC
193\r
194VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_IRQ)\r
195ASM_PFX(IrqSP0):\r
0dbbaa55 196 ExceptionEntry EXCEPT_AARCH64_IRQ\r
2939c778
EC
197\r
198VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_FIQ)\r
199ASM_PFX(FiqSP0):\r
0dbbaa55 200 ExceptionEntry EXCEPT_AARCH64_FIQ\r
2939c778
EC
201\r
202VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SERR)\r
203ASM_PFX(SErrorSP0):\r
0dbbaa55 204 ExceptionEntry EXCEPT_AARCH64_SERROR\r
2939c778
EC
205\r
206//\r
207// Current EL with SPx: 0x200 - 0x380\r
208//\r
209VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SYNC)\r
210ASM_PFX(SynchronousExceptionSPx):\r
2d120489 211 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS, SP0\r
2939c778
EC
212\r
213VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_IRQ)\r
214ASM_PFX(IrqSPx):\r
0dbbaa55 215 ExceptionEntry EXCEPT_AARCH64_IRQ\r
2939c778
EC
216\r
217VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_FIQ)\r
218ASM_PFX(FiqSPx):\r
0dbbaa55 219 ExceptionEntry EXCEPT_AARCH64_FIQ\r
2939c778
EC
220\r
221VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SERR)\r
222ASM_PFX(SErrorSPx):\r
0dbbaa55 223 ExceptionEntry EXCEPT_AARCH64_SERROR\r
2939c778
EC
224\r
225//\r
226// Lower EL using AArch64 : 0x400 - 0x580\r
227//\r
228VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SYNC)\r
229ASM_PFX(SynchronousExceptionA64):\r
0dbbaa55 230 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS\r
2939c778
EC
231\r
232VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_IRQ)\r
233ASM_PFX(IrqA64):\r
0dbbaa55 234 ExceptionEntry EXCEPT_AARCH64_IRQ\r
2939c778
EC
235\r
236VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_FIQ)\r
237ASM_PFX(FiqA64):\r
0dbbaa55 238 ExceptionEntry EXCEPT_AARCH64_FIQ\r
2939c778
EC
239\r
240VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SERR)\r
241ASM_PFX(SErrorA64):\r
0dbbaa55 242 ExceptionEntry EXCEPT_AARCH64_SERROR\r
2939c778
EC
243\r
244//\r
245// Lower EL using AArch32 : 0x600 - 0x780\r
246//\r
247VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SYNC)\r
248ASM_PFX(SynchronousExceptionA32):\r
0dbbaa55 249 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS\r
2939c778
EC
250\r
251VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_IRQ)\r
252ASM_PFX(IrqA32):\r
0dbbaa55 253 ExceptionEntry EXCEPT_AARCH64_IRQ\r
2939c778
EC
254\r
255VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_FIQ)\r
256ASM_PFX(FiqA32):\r
0dbbaa55 257 ExceptionEntry EXCEPT_AARCH64_FIQ\r
2939c778
EC
258\r
259VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SERR)\r
260ASM_PFX(SErrorA32):\r
0dbbaa55 261 ExceptionEntry EXCEPT_AARCH64_SERROR\r
2939c778
EC
262\r
263VECTOR_END(ExceptionHandlersStart)\r
264\r
2939c778
EC
265ASM_PFX(ExceptionHandlersEnd):\r
266\r
267\r
0dbbaa55 268ASM_PFX(CommonExceptionEntry):\r
2939c778 269\r
2d120489
AB
270 EL1_OR_EL2_OR_EL3(x1)\r
2711:mrs x2, elr_el1 // Exception Link Register\r
272 mrs x3, spsr_el1 // Saved Processor Status Register 32bit\r
273 mrs x5, esr_el1 // EL1 Exception syndrome register 32bit\r
274 mrs x6, far_el1 // EL1 Fault Address Register\r
275 b 4f\r
276\r
2772:mrs x2, elr_el2 // Exception Link Register\r
278 mrs x3, spsr_el2 // Saved Processor Status Register 32bit\r
279 mrs x5, esr_el2 // EL2 Exception syndrome register 32bit\r
280 mrs x6, far_el2 // EL2 Fault Address Register\r
281 b 4f\r
282\r
2833:mrs x2, elr_el3 // Exception Link Register\r
284 mrs x3, spsr_el3 // Saved Processor Status Register 32bit\r
285 mrs x5, esr_el3 // EL3 Exception syndrome register 32bit\r
286 mrs x6, far_el3 // EL3 Fault Address Register\r
287\r
2884:mrs x4, fpsr // Floating point Status Register 32bit\r
2939c778
EC
289\r
290 // Save the SYS regs\r
1b02a383
AB
291 stp x2, x3, [x28, #-SYS_CONTEXT_SIZE]!\r
292 stp x4, x5, [x28, #0x10]\r
293 str x6, [x28, #0x20]\r
2939c778 294\r
1b02a383
AB
295 // Push FP regs to Stack.\r
296 stp q0, q1, [x28, #-FP_CONTEXT_SIZE]!\r
297 stp q2, q3, [x28, #0x20]\r
298 stp q4, q5, [x28, #0x40]\r
299 stp q6, q7, [x28, #0x60]\r
300 stp q8, q9, [x28, #0x80]\r
301 stp q10, q11, [x28, #0xa0]\r
302 stp q12, q13, [x28, #0xc0]\r
303 stp q14, q15, [x28, #0xe0]\r
304 stp q16, q17, [x28, #0x100]\r
305 stp q18, q19, [x28, #0x120]\r
306 stp q20, q21, [x28, #0x140]\r
307 stp q22, q23, [x28, #0x160]\r
308 stp q24, q25, [x28, #0x180]\r
309 stp q26, q27, [x28, #0x1a0]\r
310 stp q28, q29, [x28, #0x1c0]\r
311 stp q30, q31, [x28, #0x1e0]\r
2939c778
EC
312\r
313 // x0 still holds the exception type.\r
314 // Set x1 to point to the top of our struct on the Stack\r
315 mov x1, sp\r
316\r
317// CommonCExceptionHandler (\r
318// IN EFI_EXCEPTION_TYPE ExceptionType, R0\r
319// IN OUT EFI_SYSTEM_CONTEXT SystemContext R1\r
320// )\r
321\r
322 // Call the handler as defined above\r
323\r
324 // For now we spin in the handler if we received an abort of some kind.\r
325 // We do not try to recover.\r
326 bl ASM_PFX(CommonCExceptionHandler) // Call exception handler\r
327\r
1b02a383
AB
328 // Pop as many GP regs as we can before entering the critical section below\r
329 ldp x2, x3, [sp, #0x10]\r
330 ldp x4, x5, [sp, #0x20]\r
331 ldp x6, x7, [sp, #0x30]\r
332 ldp x8, x9, [sp, #0x40]\r
333 ldp x10, x11, [sp, #0x50]\r
334 ldp x12, x13, [sp, #0x60]\r
335 ldp x14, x15, [sp, #0x70]\r
336 ldp x16, x17, [sp, #0x80]\r
337 ldp x18, x19, [sp, #0x90]\r
338 ldp x20, x21, [sp, #0xa0]\r
339 ldp x22, x23, [sp, #0xb0]\r
340 ldp x24, x25, [sp, #0xc0]\r
341 ldp x26, x27, [sp, #0xd0]\r
342 ldp x0, x1, [sp], #0xe0\r
343\r
344 // Pop FP regs from Stack.\r
345 ldp q2, q3, [x28, #0x20]\r
346 ldp q4, q5, [x28, #0x40]\r
347 ldp q6, q7, [x28, #0x60]\r
348 ldp q8, q9, [x28, #0x80]\r
349 ldp q10, q11, [x28, #0xa0]\r
350 ldp q12, q13, [x28, #0xc0]\r
351 ldp q14, q15, [x28, #0xe0]\r
352 ldp q16, q17, [x28, #0x100]\r
353 ldp q18, q19, [x28, #0x120]\r
354 ldp q20, q21, [x28, #0x140]\r
355 ldp q22, q23, [x28, #0x160]\r
356 ldp q24, q25, [x28, #0x180]\r
357 ldp q26, q27, [x28, #0x1a0]\r
358 ldp q28, q29, [x28, #0x1c0]\r
359 ldp q30, q31, [x28, #0x1e0]\r
360 ldp q0, q1, [x28], #FP_CONTEXT_SIZE\r
361\r
362 // Pop the SYS regs we need\r
363 ldp x29, x30, [x28]\r
364 ldr x28, [x28, #0x10]\r
365 msr fpsr, x28\r
2939c778
EC
366\r
367 //\r
368 // Disable interrupt(IRQ and FIQ) before restoring context,\r
369 // or else the context will be corrupted by interrupt reentrance.\r
370 // Interrupt mask will be restored from spsr by hardware when we call eret\r
371 //\r
372 msr daifset, #3\r
373 isb\r
374\r
1b02a383
AB
375 EL1_OR_EL2_OR_EL3(x28)\r
3761:msr elr_el1, x29 // Exception Link Register\r
377 msr spsr_el1, x30 // Saved Processor Status Register 32bit\r
2939c778 378 b 4f\r
1b02a383
AB
3792:msr elr_el2, x29 // Exception Link Register\r
380 msr spsr_el2, x30 // Saved Processor Status Register 32bit\r
2939c778 381 b 4f\r
1b02a383
AB
3823:msr elr_el3, x29 // Exception Link Register\r
383 msr spsr_el3, x30 // Saved Processor Status Register 32bit\r
3844:\r
2939c778 385\r
1b02a383
AB
386 // pop remaining GP regs and return from exception.\r
387 ldr x30, [sp, #0xf0 - 0xe0]\r
388 ldp x28, x29, [sp], #GP_CONTEXT_SIZE - 0xe0\r
2939c778
EC
389\r
390 // Adjust SP to be where we started from when we came into the handler.\r
391 // The handler can not change the SP.\r
1b02a383 392 add sp, sp, #FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE\r
2939c778
EC
393\r
394 eret\r
2d120489
AB
395\r
396ASM_PFX(RegisterEl0Stack):\r
397 msr sp_el0, x0\r
398 ret\r