]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/CpuDxe/X64/CpuAsm.S
Add CPU DXE driver for IA32 & X64 processor architectures.
[mirror_edk2.git] / UefiCpuPkg / CpuDxe / X64 / CpuAsm.S
1 # TITLE CpuAsm.asm:
2
3 #------------------------------------------------------------------------------
4 #*
5 #* Copyright 2008 - 2009, Intel Corporation
6 #* All rights reserved. This program and the accompanying materials
7 #* are licensed and made available under the terms and conditions of the BSD License
8 #* which accompanies this distribution. The full text of the license may be found at
9 #* http://opensource.org/licenses/bsd-license.php
10 #*
11 #* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 #* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 #*
14 #* CpuAsm.S
15 #*
16 #* Abstract:
17 #*
18 #------------------------------------------------------------------------------
19
20
21 #text SEGMENT
22
23
24 #EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
25
26
27 #
28 # point to the external interrupt vector table
29 #
30 ExternalVectorTablePtr:
31 .byte 0, 0, 0, 0, 0, 0, 0, 0
32
33 .intel_syntax
34 ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr)
35 ASM_PFX(InitializeExternalVectorTablePtr):
36 lea %rax, [%rip+ExternalVectorTablePtr] # save vector number
37 mov [%rax], %rcx
38 ret
39
40
41 #------------------------------------------------------------------------------
42 # VOID
43 # SetCodeSelector (
44 # UINT16 Selector
45 # );
46 #------------------------------------------------------------------------------
47 .intel_syntax
48 ASM_GLOBAL ASM_PFX(SetCodeSelector)
49 ASM_PFX(SetCodeSelector):
50 sub %rsp, 0x10
51 lea %rax, [%rip+setCodeSelectorLongJump]
52 mov [%rsp], %rax
53 mov [%rsp+4], %cx
54 jmp fword ptr [%rsp]
55 setCodeSelectorLongJump:
56 add %rsp, 0x10
57 ret
58
59 #------------------------------------------------------------------------------
60 # VOID
61 # SetDataSelectors (
62 # UINT16 Selector
63 # );
64 #------------------------------------------------------------------------------
65 .intel_syntax
66 ASM_GLOBAL ASM_PFX(SetDataSelectors)
67 ASM_PFX(SetDataSelectors):
68 mov %ss, %cx
69 mov %ds, %cx
70 mov %es, %cx
71 mov %fs, %cx
72 mov %gs, %cx
73 ret
74
75 #---------------------------------------;
76 # CommonInterruptEntry ;
77 #---------------------------------------;
78 # The follow algorithm is used for the common interrupt routine.
79
80 .intel_syntax
81 ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
82 ASM_PFX(CommonInterruptEntry):
83 cli
84 #
85 # All interrupt handlers are invoked through interrupt gates, so
86 # IF flag automatically cleared at the entry point
87 #
88 #
89 # Calculate vector number
90 #
91 xchg %rcx, [%rsp] # get the return address of call, actually, it is the address of vector number.
92 movzx %ecx, word ptr [%rcx]
93 cmp %ecx, 32 # Intel reserved vector for exceptions?
94 jae NoErrorCode
95 push %rax
96 lea %rax, [%rip+ASM_PFX(mErrorCodeFlag)]
97 bt dword ptr [%rax], %ecx
98 pop %rax
99 jc CommonInterruptEntry_al_0000
100
101 NoErrorCode:
102
103 #
104 # Push a dummy error code on the stack
105 # to maintain coherent stack map
106 #
107 push [%rsp]
108 mov qword ptr [%rsp + 8], 0
109 CommonInterruptEntry_al_0000:
110 push %rbp
111 mov %rbp, %rsp
112
113 #
114 # Stack:
115 # +---------------------+ <-- 16-byte aligned ensured by processor
116 # + Old SS +
117 # +---------------------+
118 # + Old RSP +
119 # +---------------------+
120 # + RFlags +
121 # +---------------------+
122 # + CS +
123 # +---------------------+
124 # + RIP +
125 # +---------------------+
126 # + Error Code +
127 # +---------------------+
128 # + RCX / Vector Number +
129 # +---------------------+
130 # + RBP +
131 # +---------------------+ <-- RBP, 16-byte aligned
132 #
133
134
135 #
136 # Since here the stack pointer is 16-byte aligned, so
137 # EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
138 # is 16-byte aligned
139 #
140
141 #; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
142 #; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
143 push %r15
144 push %r14
145 push %r13
146 push %r12
147 push %r11
148 push %r10
149 push %r9
150 push %r8
151 push %rax
152 push qword ptr [%rbp + 8] # RCX
153 push %rdx
154 push %rbx
155 push qword ptr [%rbp + 48] # RSP
156 push qword ptr [%rbp] # RBP
157 push %rsi
158 push %rdi
159
160 #; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
161 movzx %rax, word ptr [%rbp + 56]
162 push %rax # for ss
163 movzx %rax, word ptr [%rbp + 32]
164 push %rax # for cs
165 mov %rax, %ds
166 push %rax
167 mov %rax, %es
168 push %rax
169 mov %rax, %fs
170 push %rax
171 mov %rax, %gs
172 push %rax
173
174 mov [%rbp + 8], %rcx # save vector number
175
176 #; UINT64 Rip;
177 push qword ptr [%rbp + 24]
178
179 #; UINT64 Gdtr[2], Idtr[2];
180 xor %rax, %rax
181 push %rax
182 push %rax
183 sidt [%rsp]
184 xchg %rax, [%rsp + 2]
185 xchg %rax, [%rsp]
186 xchg %rax, [%rsp + 8]
187
188 xor %rax, %rax
189 push %rax
190 push %rax
191 sgdt [%rsp]
192 xchg %rax, [%rsp + 2]
193 xchg %rax, [%rsp]
194 xchg %rax, [%rsp + 8]
195
196 #; UINT64 Ldtr, Tr;
197 xor %rax, %rax
198 str %ax
199 push %rax
200 sldt %ax
201 push %rax
202
203 #; UINT64 RFlags;
204 push qword ptr [%rbp + 40]
205
206 #; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
207 mov %rax, %cr8
208 push %rax
209 mov %rax, %cr4
210 or %rax, 0x208
211 mov %cr4, %rax
212 push %rax
213 mov %rax, %cr3
214 push %rax
215 mov %rax, %cr2
216 push %rax
217 xor %rax, %rax
218 push %rax
219 mov %rax, %cr0
220 push %rax
221
222 #; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
223 mov %rax, %dr7
224 push %rax
225 #; clear Dr7 while executing debugger itself
226 xor %rax, %rax
227 mov %dr7, %rax
228
229 mov %rax, %dr6
230 push %rax
231 #; insure all status bits in dr6 are clear...
232 xor %rax, %rax
233 mov %dr6, %rax
234
235 mov %rax, %dr3
236 push %rax
237 mov %rax, %dr2
238 push %rax
239 mov %rax, %dr1
240 push %rax
241 mov %rax, %dr0
242 push %rax
243
244 #; FX_SAVE_STATE_X64 FxSaveState;
245 sub %rsp, 512
246 mov %rdi, %rsp
247 .byte 0x0f, 0x0ae, 0x07 #fxsave [rdi]
248
249 #; UINT32 ExceptionData;
250 push qword ptr [%rbp + 16]
251
252 #; call into exception handler
253 mov %rcx, [%rbp + 8]
254 lea %rax, [%rip+ExternalVectorTablePtr]
255 mov %eax, [%eax]
256 mov %rax, [%rax + %rcx * 8]
257 or %rax, %rax # NULL?
258
259 je nonNullValue#
260
261 #; Prepare parameter and call
262 # mov rcx, [rbp + 8]
263 mov %rdx, %rsp
264 #
265 # Per X64 calling convention, allocate maximum parameter stack space
266 # and make sure RSP is 16-byte aligned
267 #
268 sub %rsp, 4 * 8 + 8
269 call %rax
270 add %rsp, 4 * 8 + 8
271
272 nonNullValue:
273 cli
274 #; UINT64 ExceptionData;
275 add %rsp, 8
276
277 #; FX_SAVE_STATE_X64 FxSaveState;
278
279 mov %rsi, %rsp
280 .byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi]
281 add %rsp, 512
282
283 #; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
284 pop %rax
285 mov %dr0, %rax
286 pop %rax
287 mov %dr1, %rax
288 pop %rax
289 mov %dr2, %rax
290 pop %rax
291 mov %dr3, %rax
292 #; skip restore of dr6. We cleared dr6 during the context save.
293 add %rsp, 8
294 pop %rax
295 mov %dr7, %rax
296
297 #; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
298 pop %rax
299 mov %cr0, %rax
300 add %rsp, 8 # not for Cr1
301 pop %rax
302 mov %cr2, %rax
303 pop %rax
304 mov %cr3, %rax
305 pop %rax
306 mov %cr4, %rax
307 pop %rax
308 mov %cr8, %rax
309
310 #; UINT64 RFlags;
311 pop qword ptr [%rbp + 40]
312
313 #; UINT64 Ldtr, Tr;
314 #; UINT64 Gdtr[2], Idtr[2];
315 #; Best not let anyone mess with these particular registers...
316 add %rsp, 48
317
318 #; UINT64 Rip;
319 pop qword ptr [%rbp + 24]
320
321 #; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
322 pop %rax
323 # mov gs, rax ; not for gs
324 pop %rax
325 # mov fs, rax ; not for fs
326 # (X64 will not use fs and gs, so we do not restore it)
327 pop %rax
328 mov %es, %rax
329 pop %rax
330 mov %ds, %rax
331 pop qword ptr [%rbp + 32] # for cs
332 pop qword ptr [%rbp + 56] # for ss
333
334 #; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
335 #; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
336 pop %rdi
337 pop %rsi
338 add %rsp, 8 # not for rbp
339 pop qword ptr [%rbp + 48] # for rsp
340 pop %rbx
341 pop %rdx
342 pop %rcx
343 pop %rax
344 pop %r8
345 pop %r9
346 pop %r10
347 pop %r11
348 pop %r12
349 pop %r13
350 pop %r14
351 pop %r15
352
353 mov %rsp, %rbp
354 pop %rbp
355 add %rsp, 16
356 iretq
357
358
359 #text ENDS
360
361 #END
362
363