2 // Copyright (c) 2011 - 2014 ARM LTD. All rights reserved.<BR>
3 // Portion of Copyright (c) 2014 NVIDIA Corporation. All rights reserved.<BR>
4 // Copyright (c) 2016 HP Development Company, L.P.
6 // This program and the accompanying materials
7 // are licensed and made available under the terms and conditions of the BSD License
8 // which accompanies this distribution. The full text of the license may be found at
9 // http://opensource.org/licenses/bsd-license.php
11 // THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14 //------------------------------------------------------------------------------
16 #include <Chipset/AArch64.h>
17 #include <Library/PcdLib.h>
18 #include <AsmMacroIoLibV8.h>
19 #include <Protocol/DebugSupport.h> // for exception type definitions
22 This is the stack constructed by the exception handler (low address to high address).
23 X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.
54 UINT64 FP; 0x0e8 // x29 - Frame Pointer
55 UINT64 LR; 0x0f0 // x30 - Link Register
56 UINT64 SP; 0x0f8 // x31 - Stack Pointer
58 // FP/SIMD Registers. 128bit if used as Q-regs.
93 UINT64 ELR; 0x300 // Exception Link Register
94 UINT64 SPSR; 0x308 // Saved Processor Status Register
95 UINT64 FPSR; 0x310 // Floating Point Status Register
96 UINT64 ESR; 0x318 // Exception syndrome register
97 UINT64 FAR; 0x320 // Fault Address Register
98 UINT64 Padding;0x328 // Required for stack alignment
101 GCC_ASM_EXPORT(ExceptionHandlersEnd)
102 GCC_ASM_EXPORT(CommonCExceptionHandler)
103 GCC_ASM_EXPORT(RegisterEl0Stack)
107 #define GP_CONTEXT_SIZE (32 * 8)
108 #define FP_CONTEXT_SIZE (32 * 16)
109 #define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)
112 // There are two methods for installing AArch64 exception vectors:
113 // 1. Install a copy of the vectors to a location specified by a PCD
114 // 2. Write VBAR directly, requiring that vectors have proper alignment (2K)
115 // The conditional below adjusts the alignment requirement based on which
116 // exception vector initialization method is used.
119 #if defined(ARM_RELOCATE_VECTORS)
120 GCC_ASM_EXPORT(ExceptionHandlersStart)
121 ASM_PFX(ExceptionHandlersStart):
123 VECTOR_BASE(ExceptionHandlersStart)
126 .macro ExceptionEntry, val, sp=SPx
128 // Our backtrace and register dump code is written in C and so it requires
129 // a stack. This makes it difficult to produce meaningful diagnostics when
130 // the stack pointer has been corrupted. So in such cases (i.e., when taking
131 // synchronous exceptions), this macro is expanded with \sp set to SP0, in
132 // which case we switch to the SP_EL0 stack pointer, which has been
133 // initialized to point to a buffer that has been set aside for this purpose.
135 // Since 'sp' may no longer refer to the stack frame that was active when
136 // the exception was taken, we may have to switch back and forth between
137 // SP_EL0 and SP_ELx to record the correct value for SP in the context struct.
143 // Move the stackpointer so we can reach our structure with the str instruction.
144 sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)
146 // Push the GP registers so we can record the exception context
147 stp x0, x1, [sp, #-GP_CONTEXT_SIZE]!
148 stp x2, x3, [sp, #0x10]
149 stp x4, x5, [sp, #0x20]
150 stp x6, x7, [sp, #0x30]
151 stp x8, x9, [sp, #0x40]
152 stp x10, x11, [sp, #0x50]
153 stp x12, x13, [sp, #0x60]
154 stp x14, x15, [sp, #0x70]
155 stp x16, x17, [sp, #0x80]
156 stp x18, x19, [sp, #0x90]
157 stp x20, x21, [sp, #0xa0]
158 stp x22, x23, [sp, #0xb0]
159 stp x24, x25, [sp, #0xc0]
160 stp x26, x27, [sp, #0xd0]
161 stp x28, x29, [sp, #0xe0]
162 add x28, sp, #(GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)
172 stp x30, x7, [sp, #0xf0]
174 // Record the type of exception that occurred.
177 // Jump to our general handler to deal with all the common parts and process the exception.
178 #if defined(ARM_RELOCATE_VECTORS)
179 ldr x1, =ASM_PFX(CommonExceptionEntry)
183 b ASM_PFX(CommonExceptionEntry)
188 // Current EL with SP0 : 0x0 - 0x180
190 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SYNC)
191 ASM_PFX(SynchronousExceptionSP0):
192 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
194 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_IRQ)
196 ExceptionEntry EXCEPT_AARCH64_IRQ
198 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_FIQ)
200 ExceptionEntry EXCEPT_AARCH64_FIQ
202 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SERR)
204 ExceptionEntry EXCEPT_AARCH64_SERROR
207 // Current EL with SPx: 0x200 - 0x380
209 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SYNC)
210 ASM_PFX(SynchronousExceptionSPx):
211 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS, SP0
213 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_IRQ)
215 ExceptionEntry EXCEPT_AARCH64_IRQ
217 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_FIQ)
219 ExceptionEntry EXCEPT_AARCH64_FIQ
221 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SERR)
223 ExceptionEntry EXCEPT_AARCH64_SERROR
226 // Lower EL using AArch64 : 0x400 - 0x580
228 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SYNC)
229 ASM_PFX(SynchronousExceptionA64):
230 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
232 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_IRQ)
234 ExceptionEntry EXCEPT_AARCH64_IRQ
236 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_FIQ)
238 ExceptionEntry EXCEPT_AARCH64_FIQ
240 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SERR)
242 ExceptionEntry EXCEPT_AARCH64_SERROR
245 // Lower EL using AArch32 : 0x600 - 0x780
247 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SYNC)
248 ASM_PFX(SynchronousExceptionA32):
249 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
251 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_IRQ)
253 ExceptionEntry EXCEPT_AARCH64_IRQ
255 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_FIQ)
257 ExceptionEntry EXCEPT_AARCH64_FIQ
259 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SERR)
261 ExceptionEntry EXCEPT_AARCH64_SERROR
263 VECTOR_END(ExceptionHandlersStart)
265 ASM_PFX(ExceptionHandlersEnd):
268 ASM_PFX(CommonExceptionEntry):
270 EL1_OR_EL2_OR_EL3(x1)
271 1:mrs x2, elr_el1 // Exception Link Register
272 mrs x3, spsr_el1 // Saved Processor Status Register 32bit
273 mrs x5, esr_el1 // EL1 Exception syndrome register 32bit
274 mrs x6, far_el1 // EL1 Fault Address Register
277 2:mrs x2, elr_el2 // Exception Link Register
278 mrs x3, spsr_el2 // Saved Processor Status Register 32bit
279 mrs x5, esr_el2 // EL2 Exception syndrome register 32bit
280 mrs x6, far_el2 // EL2 Fault Address Register
283 3:mrs x2, elr_el3 // Exception Link Register
284 mrs x3, spsr_el3 // Saved Processor Status Register 32bit
285 mrs x5, esr_el3 // EL3 Exception syndrome register 32bit
286 mrs x6, far_el3 // EL3 Fault Address Register
288 4:mrs x4, fpsr // Floating point Status Register 32bit
291 stp x2, x3, [x28, #-SYS_CONTEXT_SIZE]!
292 stp x4, x5, [x28, #0x10]
295 // Push FP regs to Stack.
296 stp q0, q1, [x28, #-FP_CONTEXT_SIZE]!
297 stp q2, q3, [x28, #0x20]
298 stp q4, q5, [x28, #0x40]
299 stp q6, q7, [x28, #0x60]
300 stp q8, q9, [x28, #0x80]
301 stp q10, q11, [x28, #0xa0]
302 stp q12, q13, [x28, #0xc0]
303 stp q14, q15, [x28, #0xe0]
304 stp q16, q17, [x28, #0x100]
305 stp q18, q19, [x28, #0x120]
306 stp q20, q21, [x28, #0x140]
307 stp q22, q23, [x28, #0x160]
308 stp q24, q25, [x28, #0x180]
309 stp q26, q27, [x28, #0x1a0]
310 stp q28, q29, [x28, #0x1c0]
311 stp q30, q31, [x28, #0x1e0]
313 // x0 still holds the exception type.
314 // Set x1 to point to the top of our struct on the Stack
317 // CommonCExceptionHandler (
318 // IN EFI_EXCEPTION_TYPE ExceptionType, R0
319 // IN OUT EFI_SYSTEM_CONTEXT SystemContext R1
322 // Call the handler as defined above
324 // For now we spin in the handler if we received an abort of some kind.
325 // We do not try to recover.
326 bl ASM_PFX(CommonCExceptionHandler) // Call exception handler
328 // Pop as many GP regs as we can before entering the critical section below
329 ldp x2, x3, [sp, #0x10]
330 ldp x4, x5, [sp, #0x20]
331 ldp x6, x7, [sp, #0x30]
332 ldp x8, x9, [sp, #0x40]
333 ldp x10, x11, [sp, #0x50]
334 ldp x12, x13, [sp, #0x60]
335 ldp x14, x15, [sp, #0x70]
336 ldp x16, x17, [sp, #0x80]
337 ldp x18, x19, [sp, #0x90]
338 ldp x20, x21, [sp, #0xa0]
339 ldp x22, x23, [sp, #0xb0]
340 ldp x24, x25, [sp, #0xc0]
341 ldp x26, x27, [sp, #0xd0]
342 ldp x0, x1, [sp], #0xe0
344 // Pop FP regs from Stack.
345 ldp q2, q3, [x28, #0x20]
346 ldp q4, q5, [x28, #0x40]
347 ldp q6, q7, [x28, #0x60]
348 ldp q8, q9, [x28, #0x80]
349 ldp q10, q11, [x28, #0xa0]
350 ldp q12, q13, [x28, #0xc0]
351 ldp q14, q15, [x28, #0xe0]
352 ldp q16, q17, [x28, #0x100]
353 ldp q18, q19, [x28, #0x120]
354 ldp q20, q21, [x28, #0x140]
355 ldp q22, q23, [x28, #0x160]
356 ldp q24, q25, [x28, #0x180]
357 ldp q26, q27, [x28, #0x1a0]
358 ldp q28, q29, [x28, #0x1c0]
359 ldp q30, q31, [x28, #0x1e0]
360 ldp q0, q1, [x28], #FP_CONTEXT_SIZE
362 // Pop the SYS regs we need
364 ldr x28, [x28, #0x10]
368 // Disable interrupt(IRQ and FIQ) before restoring context,
369 // or else the context will be corrupted by interrupt reentrance.
370 // Interrupt mask will be restored from spsr by hardware when we call eret
375 EL1_OR_EL2_OR_EL3(x28)
376 1:msr elr_el1, x29 // Exception Link Register
377 msr spsr_el1, x30 // Saved Processor Status Register 32bit
379 2:msr elr_el2, x29 // Exception Link Register
380 msr spsr_el2, x30 // Saved Processor Status Register 32bit
382 3:msr elr_el3, x29 // Exception Link Register
383 msr spsr_el3, x30 // Saved Processor Status Register 32bit
386 // pop remaining GP regs and return from exception.
387 ldr x30, [sp, #0xf0 - 0xe0]
388 ldp x28, x29, [sp], #GP_CONTEXT_SIZE - 0xe0
390 // Adjust SP to be where we started from when we came into the handler.
391 // The handler can not change the SP.
392 add sp, sp, #FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
396 ASM_PFX(RegisterEl0Stack):