]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmExceptionLib/AArch64/ExceptionSupport.S
ArmPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / ArmPkg / Library / ArmExceptionLib / AArch64 / ExceptionSupport.S
1 //
2 // Copyright (c) 2011 - 2014 ARM LTD. All rights reserved.<BR>
3 // Portion of Copyright (c) 2014 NVIDIA Corporation. All rights reserved.<BR>
4 // Copyright (c) 2016 HP Development Company, L.P.
5 //
6 // SPDX-License-Identifier: BSD-2-Clause-Patent
7 //
8 //------------------------------------------------------------------------------
9
10 #include <Chipset/AArch64.h>
11 #include <Library/PcdLib.h>
12 #include <AsmMacroIoLibV8.h>
13 #include <Protocol/DebugSupport.h> // for exception type definitions
14
15 /*
16 This is the stack constructed by the exception handler (low address to high address).
17 X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.
18
19 UINT64 X0; 0x000
20 UINT64 X1; 0x008
21 UINT64 X2; 0x010
22 UINT64 X3; 0x018
23 UINT64 X4; 0x020
24 UINT64 X5; 0x028
25 UINT64 X6; 0x030
26 UINT64 X7; 0x038
27 UINT64 X8; 0x040
28 UINT64 X9; 0x048
29 UINT64 X10; 0x050
30 UINT64 X11; 0x058
31 UINT64 X12; 0x060
32 UINT64 X13; 0x068
33 UINT64 X14; 0x070
34 UINT64 X15; 0x078
35 UINT64 X16; 0x080
36 UINT64 X17; 0x088
37 UINT64 X18; 0x090
38 UINT64 X19; 0x098
39 UINT64 X20; 0x0a0
40 UINT64 X21; 0x0a8
41 UINT64 X22; 0x0b0
42 UINT64 X23; 0x0b8
43 UINT64 X24; 0x0c0
44 UINT64 X25; 0x0c8
45 UINT64 X26; 0x0d0
46 UINT64 X27; 0x0d8
47 UINT64 X28; 0x0e0
48 UINT64 FP; 0x0e8 // x29 - Frame Pointer
49 UINT64 LR; 0x0f0 // x30 - Link Register
50 UINT64 SP; 0x0f8 // x31 - Stack Pointer
51
52 // FP/SIMD Registers. 128bit if used as Q-regs.
53 UINT64 V0[2]; 0x100
54 UINT64 V1[2]; 0x110
55 UINT64 V2[2]; 0x120
56 UINT64 V3[2]; 0x130
57 UINT64 V4[2]; 0x140
58 UINT64 V5[2]; 0x150
59 UINT64 V6[2]; 0x160
60 UINT64 V7[2]; 0x170
61 UINT64 V8[2]; 0x180
62 UINT64 V9[2]; 0x190
63 UINT64 V10[2]; 0x1a0
64 UINT64 V11[2]; 0x1b0
65 UINT64 V12[2]; 0x1c0
66 UINT64 V13[2]; 0x1d0
67 UINT64 V14[2]; 0x1e0
68 UINT64 V15[2]; 0x1f0
69 UINT64 V16[2]; 0x200
70 UINT64 V17[2]; 0x210
71 UINT64 V18[2]; 0x220
72 UINT64 V19[2]; 0x230
73 UINT64 V20[2]; 0x240
74 UINT64 V21[2]; 0x250
75 UINT64 V22[2]; 0x260
76 UINT64 V23[2]; 0x270
77 UINT64 V24[2]; 0x280
78 UINT64 V25[2]; 0x290
79 UINT64 V26[2]; 0x2a0
80 UINT64 V27[2]; 0x2b0
81 UINT64 V28[2]; 0x2c0
82 UINT64 V29[2]; 0x2d0
83 UINT64 V30[2]; 0x2e0
84 UINT64 V31[2]; 0x2f0
85
86 // System Context
87 UINT64 ELR; 0x300 // Exception Link Register
88 UINT64 SPSR; 0x308 // Saved Processor Status Register
89 UINT64 FPSR; 0x310 // Floating Point Status Register
90 UINT64 ESR; 0x318 // Exception syndrome register
91 UINT64 FAR; 0x320 // Fault Address Register
92 UINT64 Padding;0x328 // Required for stack alignment
93 */
94
95 GCC_ASM_EXPORT(ExceptionHandlersEnd)
96 GCC_ASM_EXPORT(CommonCExceptionHandler)
97 GCC_ASM_EXPORT(RegisterEl0Stack)
98
99 .text
100
101 #define GP_CONTEXT_SIZE (32 * 8)
102 #define FP_CONTEXT_SIZE (32 * 16)
103 #define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)
104
105 //
106 // There are two methods for installing AArch64 exception vectors:
107 // 1. Install a copy of the vectors to a location specified by a PCD
108 // 2. Write VBAR directly, requiring that vectors have proper alignment (2K)
109 // The conditional below adjusts the alignment requirement based on which
110 // exception vector initialization method is used.
111 //
112
113 #if defined(ARM_RELOCATE_VECTORS)
114 GCC_ASM_EXPORT(ExceptionHandlersStart)
115 ASM_PFX(ExceptionHandlersStart):
116 #else
117 VECTOR_BASE(ExceptionHandlersStart)
118 #endif
119
120 .macro ExceptionEntry, val, sp=SPx
121 //
122 // Our backtrace and register dump code is written in C and so it requires
123 // a stack. This makes it difficult to produce meaningful diagnostics when
124 // the stack pointer has been corrupted. So in such cases (i.e., when taking
125 // synchronous exceptions), this macro is expanded with \sp set to SP0, in
126 // which case we switch to the SP_EL0 stack pointer, which has been
127 // initialized to point to a buffer that has been set aside for this purpose.
128 //
129 // Since 'sp' may no longer refer to the stack frame that was active when
130 // the exception was taken, we may have to switch back and forth between
131 // SP_EL0 and SP_ELx to record the correct value for SP in the context struct.
132 //
133 .ifnc \sp, SPx
134 msr SPsel, xzr
135 .endif
136
137 // Move the stackpointer so we can reach our structure with the str instruction.
138 sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)
139
140 // Push the GP registers so we can record the exception context
141 stp x0, x1, [sp, #-GP_CONTEXT_SIZE]!
142 stp x2, x3, [sp, #0x10]
143 stp x4, x5, [sp, #0x20]
144 stp x6, x7, [sp, #0x30]
145 stp x8, x9, [sp, #0x40]
146 stp x10, x11, [sp, #0x50]
147 stp x12, x13, [sp, #0x60]
148 stp x14, x15, [sp, #0x70]
149 stp x16, x17, [sp, #0x80]
150 stp x18, x19, [sp, #0x90]
151 stp x20, x21, [sp, #0xa0]
152 stp x22, x23, [sp, #0xb0]
153 stp x24, x25, [sp, #0xc0]
154 stp x26, x27, [sp, #0xd0]
155 stp x28, x29, [sp, #0xe0]
156 add x28, sp, #(GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE)
157
158 .ifnc \sp, SPx
159 msr SPsel, #1
160 mov x7, sp
161 msr SPsel, xzr
162 .else
163 mov x7, x28
164 .endif
165
166 stp x30, x7, [sp, #0xf0]
167
168 // Record the type of exception that occurred.
169 mov x0, #\val
170
171 // Jump to our general handler to deal with all the common parts and process the exception.
172 #if defined(ARM_RELOCATE_VECTORS)
173 ldr x1, =ASM_PFX(CommonExceptionEntry)
174 br x1
175 .ltorg
176 #else
177 b ASM_PFX(CommonExceptionEntry)
178 #endif
179 .endm
180
181 //
182 // Current EL with SP0 : 0x0 - 0x180
183 //
184 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SYNC)
185 ASM_PFX(SynchronousExceptionSP0):
186 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
187
188 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_IRQ)
189 ASM_PFX(IrqSP0):
190 ExceptionEntry EXCEPT_AARCH64_IRQ
191
192 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_FIQ)
193 ASM_PFX(FiqSP0):
194 ExceptionEntry EXCEPT_AARCH64_FIQ
195
196 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SERR)
197 ASM_PFX(SErrorSP0):
198 ExceptionEntry EXCEPT_AARCH64_SERROR
199
200 //
201 // Current EL with SPx: 0x200 - 0x380
202 //
203 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SYNC)
204 ASM_PFX(SynchronousExceptionSPx):
205 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS, SP0
206
207 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_IRQ)
208 ASM_PFX(IrqSPx):
209 ExceptionEntry EXCEPT_AARCH64_IRQ
210
211 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_FIQ)
212 ASM_PFX(FiqSPx):
213 ExceptionEntry EXCEPT_AARCH64_FIQ
214
215 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SERR)
216 ASM_PFX(SErrorSPx):
217 ExceptionEntry EXCEPT_AARCH64_SERROR
218
219 //
220 // Lower EL using AArch64 : 0x400 - 0x580
221 //
222 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SYNC)
223 ASM_PFX(SynchronousExceptionA64):
224 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
225
226 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_IRQ)
227 ASM_PFX(IrqA64):
228 ExceptionEntry EXCEPT_AARCH64_IRQ
229
230 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_FIQ)
231 ASM_PFX(FiqA64):
232 ExceptionEntry EXCEPT_AARCH64_FIQ
233
234 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SERR)
235 ASM_PFX(SErrorA64):
236 ExceptionEntry EXCEPT_AARCH64_SERROR
237
238 //
239 // Lower EL using AArch32 : 0x600 - 0x780
240 //
241 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SYNC)
242 ASM_PFX(SynchronousExceptionA32):
243 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
244
245 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_IRQ)
246 ASM_PFX(IrqA32):
247 ExceptionEntry EXCEPT_AARCH64_IRQ
248
249 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_FIQ)
250 ASM_PFX(FiqA32):
251 ExceptionEntry EXCEPT_AARCH64_FIQ
252
253 VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SERR)
254 ASM_PFX(SErrorA32):
255 ExceptionEntry EXCEPT_AARCH64_SERROR
256
257 VECTOR_END(ExceptionHandlersStart)
258
259 ASM_PFX(ExceptionHandlersEnd):
260
261
262 ASM_PFX(CommonExceptionEntry):
263
264 EL1_OR_EL2_OR_EL3(x1)
265 1:mrs x2, elr_el1 // Exception Link Register
266 mrs x3, spsr_el1 // Saved Processor Status Register 32bit
267 mrs x5, esr_el1 // EL1 Exception syndrome register 32bit
268 mrs x6, far_el1 // EL1 Fault Address Register
269 b 4f
270
271 2:mrs x2, elr_el2 // Exception Link Register
272 mrs x3, spsr_el2 // Saved Processor Status Register 32bit
273 mrs x5, esr_el2 // EL2 Exception syndrome register 32bit
274 mrs x6, far_el2 // EL2 Fault Address Register
275 b 4f
276
277 3:mrs x2, elr_el3 // Exception Link Register
278 mrs x3, spsr_el3 // Saved Processor Status Register 32bit
279 mrs x5, esr_el3 // EL3 Exception syndrome register 32bit
280 mrs x6, far_el3 // EL3 Fault Address Register
281
282 4:mrs x4, fpsr // Floating point Status Register 32bit
283
284 // Save the SYS regs
285 stp x2, x3, [x28, #-SYS_CONTEXT_SIZE]!
286 stp x4, x5, [x28, #0x10]
287 str x6, [x28, #0x20]
288
289 // Push FP regs to Stack.
290 stp q0, q1, [x28, #-FP_CONTEXT_SIZE]!
291 stp q2, q3, [x28, #0x20]
292 stp q4, q5, [x28, #0x40]
293 stp q6, q7, [x28, #0x60]
294 stp q8, q9, [x28, #0x80]
295 stp q10, q11, [x28, #0xa0]
296 stp q12, q13, [x28, #0xc0]
297 stp q14, q15, [x28, #0xe0]
298 stp q16, q17, [x28, #0x100]
299 stp q18, q19, [x28, #0x120]
300 stp q20, q21, [x28, #0x140]
301 stp q22, q23, [x28, #0x160]
302 stp q24, q25, [x28, #0x180]
303 stp q26, q27, [x28, #0x1a0]
304 stp q28, q29, [x28, #0x1c0]
305 stp q30, q31, [x28, #0x1e0]
306
307 // x0 still holds the exception type.
308 // Set x1 to point to the top of our struct on the Stack
309 mov x1, sp
310
311 // CommonCExceptionHandler (
312 // IN EFI_EXCEPTION_TYPE ExceptionType, R0
313 // IN OUT EFI_SYSTEM_CONTEXT SystemContext R1
314 // )
315
316 // Call the handler as defined above
317
318 // For now we spin in the handler if we received an abort of some kind.
319 // We do not try to recover.
320 bl ASM_PFX(CommonCExceptionHandler) // Call exception handler
321
322 // Pop as many GP regs as we can before entering the critical section below
323 ldp x2, x3, [sp, #0x10]
324 ldp x4, x5, [sp, #0x20]
325 ldp x6, x7, [sp, #0x30]
326 ldp x8, x9, [sp, #0x40]
327 ldp x10, x11, [sp, #0x50]
328 ldp x12, x13, [sp, #0x60]
329 ldp x14, x15, [sp, #0x70]
330 ldp x16, x17, [sp, #0x80]
331 ldp x18, x19, [sp, #0x90]
332 ldp x20, x21, [sp, #0xa0]
333 ldp x22, x23, [sp, #0xb0]
334 ldp x24, x25, [sp, #0xc0]
335 ldp x26, x27, [sp, #0xd0]
336 ldp x0, x1, [sp], #0xe0
337
338 // Pop FP regs from Stack.
339 ldp q2, q3, [x28, #0x20]
340 ldp q4, q5, [x28, #0x40]
341 ldp q6, q7, [x28, #0x60]
342 ldp q8, q9, [x28, #0x80]
343 ldp q10, q11, [x28, #0xa0]
344 ldp q12, q13, [x28, #0xc0]
345 ldp q14, q15, [x28, #0xe0]
346 ldp q16, q17, [x28, #0x100]
347 ldp q18, q19, [x28, #0x120]
348 ldp q20, q21, [x28, #0x140]
349 ldp q22, q23, [x28, #0x160]
350 ldp q24, q25, [x28, #0x180]
351 ldp q26, q27, [x28, #0x1a0]
352 ldp q28, q29, [x28, #0x1c0]
353 ldp q30, q31, [x28, #0x1e0]
354 ldp q0, q1, [x28], #FP_CONTEXT_SIZE
355
356 // Pop the SYS regs we need
357 ldp x29, x30, [x28]
358 ldr x28, [x28, #0x10]
359 msr fpsr, x28
360
361 //
362 // Disable interrupt(IRQ and FIQ) before restoring context,
363 // or else the context will be corrupted by interrupt reentrance.
364 // Interrupt mask will be restored from spsr by hardware when we call eret
365 //
366 msr daifset, #3
367 isb
368
369 EL1_OR_EL2_OR_EL3(x28)
370 1:msr elr_el1, x29 // Exception Link Register
371 msr spsr_el1, x30 // Saved Processor Status Register 32bit
372 b 4f
373 2:msr elr_el2, x29 // Exception Link Register
374 msr spsr_el2, x30 // Saved Processor Status Register 32bit
375 b 4f
376 3:msr elr_el3, x29 // Exception Link Register
377 msr spsr_el3, x30 // Saved Processor Status Register 32bit
378 4:
379
380 // pop remaining GP regs and return from exception.
381 ldr x30, [sp, #0xf0 - 0xe0]
382 ldp x28, x29, [sp], #GP_CONTEXT_SIZE - 0xe0
383
384 // Adjust SP to be where we started from when we came into the handler.
385 // The handler can not change the SP.
386 add sp, sp, #FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
387
388 eret
389
390 ASM_PFX(RegisterEl0Stack):
391 msr sp_el0, x0
392 ret