]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmExceptionLib/AArch64/ExceptionSupport.S
ArmPkg/ArmExceptionLib: use EL0 stack for synchronous exceptions
[mirror_edk2.git] / ArmPkg / Library / ArmExceptionLib / AArch64 / ExceptionSupport.S
1 //
2 // Copyright (c) 2011 - 2014 ARM LTD. All rights reserved.<BR>
3 // Portion of Copyright (c) 2014 NVIDIA Corporation. All rights reserved.<BR>
4 // Copyright (c) 2016 HP Development Company, L.P.
5 //
6 // This program and the accompanying materials
7 // are licensed and made available under the terms and conditions of the BSD License
8 // which accompanies this distribution. The full text of the license may be found at
9 // http://opensource.org/licenses/bsd-license.php
10 //
11 // THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 //
14 //------------------------------------------------------------------------------
15
16 #include <Chipset/AArch64.h>
17 #include <Library/PcdLib.h>
18 #include <AsmMacroIoLibV8.h>
19 #include <Protocol/DebugSupport.h> // for exception type definitions
20
21 /*
22 This is the stack constructed by the exception handler (low address to high address).
23 X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.
24
25 UINT64 X0; 0x000
26 UINT64 X1; 0x008
27 UINT64 X2; 0x010
28 UINT64 X3; 0x018
29 UINT64 X4; 0x020
30 UINT64 X5; 0x028
31 UINT64 X6; 0x030
32 UINT64 X7; 0x038
33 UINT64 X8; 0x040
34 UINT64 X9; 0x048
35 UINT64 X10; 0x050
36 UINT64 X11; 0x058
37 UINT64 X12; 0x060
38 UINT64 X13; 0x068
39 UINT64 X14; 0x070
40 UINT64 X15; 0x078
41 UINT64 X16; 0x080
42 UINT64 X17; 0x088
43 UINT64 X18; 0x090
44 UINT64 X19; 0x098
45 UINT64 X20; 0x0a0
46 UINT64 X21; 0x0a8
47 UINT64 X22; 0x0b0
48 UINT64 X23; 0x0b8
49 UINT64 X24; 0x0c0
50 UINT64 X25; 0x0c8
51 UINT64 X26; 0x0d0
52 UINT64 X27; 0x0d8
53 UINT64 X28; 0x0e0
54 UINT64 FP; 0x0e8 // x29 - Frame Pointer
55 UINT64 LR; 0x0f0 // x30 - Link Register
56 UINT64 SP; 0x0f8 // x31 - Stack Pointer
57
58 // FP/SIMD Registers. 128bit if used as Q-regs.
59 UINT64 V0[2]; 0x100
60 UINT64 V1[2]; 0x110
61 UINT64 V2[2]; 0x120
62 UINT64 V3[2]; 0x130
63 UINT64 V4[2]; 0x140
64 UINT64 V5[2]; 0x150
65 UINT64 V6[2]; 0x160
66 UINT64 V7[2]; 0x170
67 UINT64 V8[2]; 0x180
68 UINT64 V9[2]; 0x190
69 UINT64 V10[2]; 0x1a0
70 UINT64 V11[2]; 0x1b0
71 UINT64 V12[2]; 0x1c0
72 UINT64 V13[2]; 0x1d0
73 UINT64 V14[2]; 0x1e0
74 UINT64 V15[2]; 0x1f0
75 UINT64 V16[2]; 0x200
76 UINT64 V17[2]; 0x210
77 UINT64 V18[2]; 0x220
78 UINT64 V19[2]; 0x230
79 UINT64 V20[2]; 0x240
80 UINT64 V21[2]; 0x250
81 UINT64 V22[2]; 0x260
82 UINT64 V23[2]; 0x270
83 UINT64 V24[2]; 0x280
84 UINT64 V25[2]; 0x290
85 UINT64 V26[2]; 0x2a0
86 UINT64 V27[2]; 0x2b0
87 UINT64 V28[2]; 0x2c0
88 UINT64 V29[2]; 0x2d0
89 UINT64 V30[2]; 0x2e0
90 UINT64 V31[2]; 0x2f0
91
92 // System Context
93 UINT64 ELR; 0x300 // Exception Link Register
94 UINT64 SPSR; 0x308 // Saved Processor Status Register
95 UINT64 FPSR; 0x310 // Floating Point Status Register
96 UINT64 ESR; 0x318 // Exception syndrome register
97 UINT64 FAR; 0x320 // Fault Address Register
98 UINT64 Padding;0x328 // Required for stack alignment
99 */
100
101 GCC_ASM_EXPORT(ExceptionHandlersEnd)
102 GCC_ASM_EXPORT(CommonCExceptionHandler)
103 GCC_ASM_EXPORT(RegisterEl0Stack)
104
105 .text
106
107 #define GP_CONTEXT_SIZE (32 * 8)
108 #define FP_CONTEXT_SIZE (32 * 16)
109 #define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)
110
111 //
112 // There are two methods for installing AArch64 exception vectors:
113 // 1. Install a copy of the vectors to a location specified by a PCD
114 // 2. Write VBAR directly, requiring that vectors have proper alignment (2K)
115 // The conditional below adjusts the alignment requirement based on which
116 // exception vector initialization method is used.
117 //
118
119 #if defined(ARM_RELOCATE_VECTORS)
120 GCC_ASM_EXPORT(ExceptionHandlersStart)
121 ASM_PFX(ExceptionHandlersStart):
122 #else
123 VECTOR_BASE(ExceptionHandlersStart)
124 #endif
125
126 .macro ExceptionEntry, val, sp=SPx
127 //
128 // Our backtrace and register dump code is written in C and so it requires
129 // a stack. This makes it difficult to produce meaningful diagnostics when
130 // the stack pointer has been corrupted. So in such cases (i.e., when taking
131 // synchronous exceptions), this macro is expanded with \sp set to SP0, in
132 // which case we switch to the SP_EL0 stack pointer, which has been
133 // initialized to point to a buffer that has been set aside for this purpose.
134 //
135 // Since 'sp' may no longer refer to the stack frame that was active when
136 // the exception was taken, we may have to switch back and forth between
137 // SP_EL0 and SP_ELx to record the correct value for SP in the context struct.
138 //
139 .ifnc \sp, SPx
140 msr SPsel, xzr
141 .endif
142
143 // Move the stackpointer so we can reach our structure with the str instruction.
144 sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)
145
146 // Push the GP registers so we can record the exception context
147 stp x0, x1, [sp, #-GP_CONTEXT_SIZE]!
148 stp x2, x3, [sp, #0x10]
149 stp x4, x5, [sp, #0x20]
150 stp x6, x7, [sp, #0x30]
151 stp x8, x9, [sp, #0x40]
152 stp x10, x11, [sp, #0x50]
153 stp x12, x13, [sp, #0x60]
154 stp x14, x15, [sp, #0x70]
155 stp x16, x17, [sp, #0x80]
156 stp x18, x19, [sp, #0x90]
157 stp x20, x21, [sp, #0xa0]
158 stp x22, x23, [sp, #0xb0]
159 stp x24, x25, [sp, #0xc0]
160 stp x26, x27, [sp, #0xd0]
161 stp x28, x29, [sp, #0xe0]
162 add x28, sp, #(GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)
163
164 .ifnc \sp, SPx
165 msr SPsel, #1
166 mov x7, sp
167 msr SPsel, xzr
168 .else
169 mov x7, x28
170 .endif
171
172 stp x30, x7, [sp, #0xf0]
173
174 // Record the type of exception that occurred.
175 mov x0, #\val
176
177 // Jump to our general handler to deal with all the common parts and process the exception.
178 #if defined(ARM_RELOCATE_VECTORS)
179 ldr x1, =ASM_PFX(CommonExceptionEntry)
180 br x1
181 .ltorg
182 #else
183 b ASM_PFX(CommonExceptionEntry)
184 #endif
185 .endm
186
187 //
188 // Current EL with SP0 : 0x0 - 0x180
189 //
190 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SYNC)
191 ASM_PFX(SynchronousExceptionSP0):
192 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
193
194 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_IRQ)
195 ASM_PFX(IrqSP0):
196 ExceptionEntry EXCEPT_AARCH64_IRQ
197
198 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_FIQ)
199 ASM_PFX(FiqSP0):
200 ExceptionEntry EXCEPT_AARCH64_FIQ
201
202 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SERR)
203 ASM_PFX(SErrorSP0):
204 ExceptionEntry EXCEPT_AARCH64_SERROR
205
206 //
207 // Current EL with SPx: 0x200 - 0x380
208 //
209 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SYNC)
210 ASM_PFX(SynchronousExceptionSPx):
211 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS, SP0
212
213 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_IRQ)
214 ASM_PFX(IrqSPx):
215 ExceptionEntry EXCEPT_AARCH64_IRQ
216
217 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_FIQ)
218 ASM_PFX(FiqSPx):
219 ExceptionEntry EXCEPT_AARCH64_FIQ
220
221 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SERR)
222 ASM_PFX(SErrorSPx):
223 ExceptionEntry EXCEPT_AARCH64_SERROR
224
225 //
226 // Lower EL using AArch64 : 0x400 - 0x580
227 //
228 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SYNC)
229 ASM_PFX(SynchronousExceptionA64):
230 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
231
232 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_IRQ)
233 ASM_PFX(IrqA64):
234 ExceptionEntry EXCEPT_AARCH64_IRQ
235
236 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_FIQ)
237 ASM_PFX(FiqA64):
238 ExceptionEntry EXCEPT_AARCH64_FIQ
239
240 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SERR)
241 ASM_PFX(SErrorA64):
242 ExceptionEntry EXCEPT_AARCH64_SERROR
243
244 //
245 // Lower EL using AArch32 : 0x600 - 0x780
246 //
247 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SYNC)
248 ASM_PFX(SynchronousExceptionA32):
249 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
250
251 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_IRQ)
252 ASM_PFX(IrqA32):
253 ExceptionEntry EXCEPT_AARCH64_IRQ
254
255 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_FIQ)
256 ASM_PFX(FiqA32):
257 ExceptionEntry EXCEPT_AARCH64_FIQ
258
259 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SERR)
260 ASM_PFX(SErrorA32):
261 ExceptionEntry EXCEPT_AARCH64_SERROR
262
263 VECTOR_END(ExceptionHandlersStart)
264
265 ASM_PFX(ExceptionHandlersEnd):
266
267
268 ASM_PFX(CommonExceptionEntry):
269
270 EL1_OR_EL2_OR_EL3(x1)
271 1:mrs x2, elr_el1 // Exception Link Register
272 mrs x3, spsr_el1 // Saved Processor Status Register 32bit
273 mrs x5, esr_el1 // EL1 Exception syndrome register 32bit
274 mrs x6, far_el1 // EL1 Fault Address Register
275 b 4f
276
277 2:mrs x2, elr_el2 // Exception Link Register
278 mrs x3, spsr_el2 // Saved Processor Status Register 32bit
279 mrs x5, esr_el2 // EL2 Exception syndrome register 32bit
280 mrs x6, far_el2 // EL2 Fault Address Register
281 b 4f
282
283 3:mrs x2, elr_el3 // Exception Link Register
284 mrs x3, spsr_el3 // Saved Processor Status Register 32bit
285 mrs x5, esr_el3 // EL3 Exception syndrome register 32bit
286 mrs x6, far_el3 // EL3 Fault Address Register
287
288 4:mrs x4, fpsr // Floating point Status Register 32bit
289
290 // Save the SYS regs
291 stp x2, x3, [x28, #-SYS_CONTEXT_SIZE]!
292 stp x4, x5, [x28, #0x10]
293 str x6, [x28, #0x20]
294
295 // Push FP regs to Stack.
296 stp q0, q1, [x28, #-FP_CONTEXT_SIZE]!
297 stp q2, q3, [x28, #0x20]
298 stp q4, q5, [x28, #0x40]
299 stp q6, q7, [x28, #0x60]
300 stp q8, q9, [x28, #0x80]
301 stp q10, q11, [x28, #0xa0]
302 stp q12, q13, [x28, #0xc0]
303 stp q14, q15, [x28, #0xe0]
304 stp q16, q17, [x28, #0x100]
305 stp q18, q19, [x28, #0x120]
306 stp q20, q21, [x28, #0x140]
307 stp q22, q23, [x28, #0x160]
308 stp q24, q25, [x28, #0x180]
309 stp q26, q27, [x28, #0x1a0]
310 stp q28, q29, [x28, #0x1c0]
311 stp q30, q31, [x28, #0x1e0]
312
313 // x0 still holds the exception type.
314 // Set x1 to point to the top of our struct on the Stack
315 mov x1, sp
316
317 // CommonCExceptionHandler (
318 // IN EFI_EXCEPTION_TYPE ExceptionType, R0
319 // IN OUT EFI_SYSTEM_CONTEXT SystemContext R1
320 // )
321
322 // Call the handler as defined above
323
324 // For now we spin in the handler if we received an abort of some kind.
325 // We do not try to recover.
326 bl ASM_PFX(CommonCExceptionHandler) // Call exception handler
327
328 // Pop as many GP regs as we can before entering the critical section below
329 ldp x2, x3, [sp, #0x10]
330 ldp x4, x5, [sp, #0x20]
331 ldp x6, x7, [sp, #0x30]
332 ldp x8, x9, [sp, #0x40]
333 ldp x10, x11, [sp, #0x50]
334 ldp x12, x13, [sp, #0x60]
335 ldp x14, x15, [sp, #0x70]
336 ldp x16, x17, [sp, #0x80]
337 ldp x18, x19, [sp, #0x90]
338 ldp x20, x21, [sp, #0xa0]
339 ldp x22, x23, [sp, #0xb0]
340 ldp x24, x25, [sp, #0xc0]
341 ldp x26, x27, [sp, #0xd0]
342 ldp x0, x1, [sp], #0xe0
343
344 // Pop FP regs from Stack.
345 ldp q2, q3, [x28, #0x20]
346 ldp q4, q5, [x28, #0x40]
347 ldp q6, q7, [x28, #0x60]
348 ldp q8, q9, [x28, #0x80]
349 ldp q10, q11, [x28, #0xa0]
350 ldp q12, q13, [x28, #0xc0]
351 ldp q14, q15, [x28, #0xe0]
352 ldp q16, q17, [x28, #0x100]
353 ldp q18, q19, [x28, #0x120]
354 ldp q20, q21, [x28, #0x140]
355 ldp q22, q23, [x28, #0x160]
356 ldp q24, q25, [x28, #0x180]
357 ldp q26, q27, [x28, #0x1a0]
358 ldp q28, q29, [x28, #0x1c0]
359 ldp q30, q31, [x28, #0x1e0]
360 ldp q0, q1, [x28], #FP_CONTEXT_SIZE
361
362 // Pop the SYS regs we need
363 ldp x29, x30, [x28]
364 ldr x28, [x28, #0x10]
365 msr fpsr, x28
366
367 //
368 // Disable interrupt(IRQ and FIQ) before restoring context,
369 // or else the context will be corrupted by interrupt reentrance.
370 // Interrupt mask will be restored from spsr by hardware when we call eret
371 //
372 msr daifset, #3
373 isb
374
375 EL1_OR_EL2_OR_EL3(x28)
376 1:msr elr_el1, x29 // Exception Link Register
377 msr spsr_el1, x30 // Saved Processor Status Register 32bit
378 b 4f
379 2:msr elr_el2, x29 // Exception Link Register
380 msr spsr_el2, x30 // Saved Processor Status Register 32bit
381 b 4f
382 3:msr elr_el3, x29 // Exception Link Register
383 msr spsr_el3, x30 // Saved Processor Status Register 32bit
384 4:
385
386 // pop remaining GP regs and return from exception.
387 ldr x30, [sp, #0xf0 - 0xe0]
388 ldp x28, x29, [sp], #GP_CONTEXT_SIZE - 0xe0
389
390 // Adjust SP to be where we started from when we came into the handler.
391 // The handler can not change the SP.
392 add sp, sp, #FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
393
394 eret
395
396 ASM_PFX(RegisterEl0Stack):
397 msr sp_el0, x0
398 ret