]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
ShellPkg: Adds Local APIC parser to AcpiView
[mirror_edk2.git] / UefiCpuPkg / Library / MpInitLib / X64 / MpFuncs.nasm
CommitLineData
d94e5f67 1;------------------------------------------------------------------------------ ;\r
2aa107c0 2; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>\r
0acd8697 3; SPDX-License-Identifier: BSD-2-Clause-Patent\r
d94e5f67
JF
4;\r
5; Module Name:\r
6;\r
7; MpFuncs.nasm\r
8;\r
9; Abstract:\r
10;\r
11; This is the assembly code for MP support\r
12;\r
13;-------------------------------------------------------------------------------\r
14\r
15%include "MpEqu.inc"\r
16extern ASM_PFX(InitializeFloatingPointUnits)\r
17\r
e2289d19
BS
18%macro OneTimeCall 1\r
19 jmp %1\r
20%1 %+ OneTimerCallReturn:\r
21%endmacro\r
22\r
23%macro OneTimeCallRet 1\r
24 jmp %1 %+ OneTimerCallReturn\r
25%endmacro\r
26\r
d94e5f67
JF
27DEFAULT REL\r
28\r
29SECTION .text\r
30\r
31;-------------------------------------------------------------------------------------\r
32;RendezvousFunnelProc procedure follows. All APs execute their procedure. This\r
33;procedure serializes all the AP processors through an Init sequence. It must be\r
34;noted that APs arrive here very raw...ie: real mode, no stack.\r
35;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC\r
36;IS IN MACHINE CODE.\r
37;-------------------------------------------------------------------------------------\r
d94e5f67
JF
38RendezvousFunnelProcStart:\r
39; At this point CS = 0x(vv00) and ip= 0x0.\r
40; Save BIST information to ebp firstly\r
41\r
42BITS 16\r
43 mov ebp, eax ; Save BIST information\r
44\r
45 mov ax, cs\r
46 mov ds, ax\r
47 mov es, ax\r
48 mov ss, ax\r
49 xor ax, ax\r
50 mov fs, ax\r
51 mov gs, ax\r
52\r
2fba7d4e 53 mov si, MP_CPU_EXCHANGE_INFO_FIELD (BufferStart)\r
d94e5f67
JF
54 mov ebx, [si]\r
55\r
2fba7d4e 56 mov si, MP_CPU_EXCHANGE_INFO_FIELD (DataSegment)\r
f32bfe6d
JW
57 mov edx, [si]\r
58\r
59 ;\r
60 ; Get start address of 32-bit code in low memory (<1MB)\r
61 ;\r
2fba7d4e 62 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeTransitionMemory)\r
d94e5f67 63\r
2fba7d4e 64 mov si, MP_CPU_EXCHANGE_INFO_FIELD (GdtrProfile)\r
d94e5f67
JF
65o32 lgdt [cs:si]\r
66\r
2fba7d4e 67 mov si, MP_CPU_EXCHANGE_INFO_FIELD (IdtrProfile)\r
d94e5f67
JF
68o32 lidt [cs:si]\r
69\r
f32bfe6d
JW
70 ;\r
71 ; Switch to protected mode\r
72 ;\r
73 mov eax, cr0 ; Get control register 0\r
74 or eax, 000000003h ; Set PE bit (bit #0) & MP\r
75 mov cr0, eax\r
76\r
77 ; Switch to 32-bit code (>1MB)\r
78o32 jmp far [cs:di]\r
79\r
80;\r
81; Following code must be copied to memory with type of EfiBootServicesCode.\r
82; This is required if NX is enabled for EfiBootServicesCode of memory.\r
83;\r
84BITS 32\r
85Flat32Start: ; protected mode entry point\r
86 mov ds, dx\r
87 mov es, dx\r
88 mov fs, dx\r
89 mov gs, dx\r
90 mov ss, dx\r
5c66d125
JF
91\r
92 ;\r
93 ; Enable execute disable bit\r
94 ;\r
2fba7d4e 95 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (EnableExecuteDisable)\r
f32bfe6d
JW
96 cmp byte [ebx + esi], 0\r
97 jz SkipEnableExecuteDisableBit\r
98\r
5c66d125
JF
99 mov ecx, 0c0000080h ; EFER MSR number\r
100 rdmsr ; Read EFER\r
101 bts eax, 11 ; Enable Execute Disable Bit\r
102 wrmsr ; Write EFER\r
103\r
104SkipEnableExecuteDisableBit:\r
f32bfe6d
JW
105 ;\r
106 ; Enable PAE\r
107 ;\r
d94e5f67
JF
108 mov eax, cr4\r
109 bts eax, 5\r
09f69a87 110\r
2fba7d4e 111 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (Enable5LevelPaging)\r
09f69a87
RN
112 cmp byte [ebx + esi], 0\r
113 jz SkipEnable5LevelPaging\r
114\r
115 ;\r
116 ; Enable 5 Level Paging\r
117 ;\r
118 bts eax, 12 ; Set LA57=1.\r
119\r
120SkipEnable5LevelPaging:\r
121\r
d94e5f67
JF
122 mov cr4, eax\r
123\r
f32bfe6d
JW
124 ;\r
125 ; Load page table\r
126 ;\r
2fba7d4e 127 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (Cr3) ; Save CR3 in ecx\r
f32bfe6d 128 mov ecx, [ebx + esi]\r
d94e5f67
JF
129 mov cr3, ecx ; Load CR3\r
130\r
f32bfe6d
JW
131 ;\r
132 ; Enable long mode\r
133 ;\r
d94e5f67
JF
134 mov ecx, 0c0000080h ; EFER MSR number\r
135 rdmsr ; Read EFER\r
136 bts eax, 8 ; Set LME=1\r
137 wrmsr ; Write EFER\r
138\r
f32bfe6d
JW
139 ;\r
140 ; Enable paging\r
141 ;\r
d94e5f67
JF
142 mov eax, cr0 ; Read CR0\r
143 bts eax, 31 ; Set PG=1\r
144 mov cr0, eax ; Write CR0\r
145\r
f32bfe6d
JW
146 ;\r
147 ; Far jump to 64-bit code\r
148 ;\r
2fba7d4e 149 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeHighMemory)\r
f32bfe6d
JW
150 add edi, ebx\r
151 jmp far [edi]\r
152\r
d94e5f67 153BITS 64\r
e2289d19 154\r
d94e5f67 155LongModeStart:\r
845c5be1 156 mov esi, ebx\r
2fba7d4e 157 lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitFlag)]\r
845c5be1
JF
158 cmp qword [edi], 1 ; ApInitConfig\r
159 jnz GetApicId\r
160\r
0594ec41
ED
161 ; Increment the number of APs executing here as early as possible\r
162 ; This is decremented in C code when AP is finished executing\r
163 mov edi, esi\r
2fba7d4e 164 add edi, MP_CPU_EXCHANGE_INFO_FIELD (NumApsExecuting)\r
0594ec41
ED
165 lock inc dword [edi]\r
166\r
845c5be1 167 ; AP init\r
62f2cf57 168 mov edi, esi\r
2fba7d4e 169 add edi, MP_CPU_EXCHANGE_INFO_FIELD (ApIndex)\r
62f2cf57
RN
170 mov ebx, 1\r
171 lock xadd dword [edi], ebx ; EBX = ApIndex++\r
172 inc ebx ; EBX is CpuNumber\r
d94e5f67 173\r
845c5be1 174 ; program stack\r
d94e5f67 175 mov edi, esi\r
2fba7d4e 176 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackSize)\r
845c5be1
JF
177 mov eax, dword [edi]\r
178 mov ecx, ebx\r
179 inc ecx\r
180 mul ecx ; EAX = StackSize * (CpuNumber + 1)\r
d94e5f67 181 mov edi, esi\r
2fba7d4e 182 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackStart)\r
d94e5f67
JF
183 add rax, qword [edi]\r
184 mov rsp, rax\r
7b7508ad 185\r
7b7508ad 186 ;\r
e2289d19 187 ; Setup the GHCB when AMD SEV-ES active.\r
7b7508ad 188 ;\r
e2289d19 189 OneTimeCall SevEsSetupGhcb\r
845c5be1
JF
190 jmp CProcedureInvoke\r
191\r
192GetApicId:\r
7b7508ad 193 ;\r
e2289d19 194 ; Use the GHCB protocol to get the ApicId when SEV-ES is active.\r
7b7508ad 195 ;\r
e2289d19 196 OneTimeCall SevEsGetApicId\r
7b7508ad
TL
197\r
198DoCpuid:\r
845c5be1
JF
199 mov eax, 0\r
200 cpuid\r
201 cmp eax, 0bh\r
1cbd8330
LE
202 jb NoX2Apic ; CPUID level below CPUID_EXTENDED_TOPOLOGY\r
203\r
204 mov eax, 0bh\r
205 xor ecx, ecx\r
206 cpuid\r
207 test ebx, 0ffffh\r
208 jz NoX2Apic ; CPUID.0BH:EBX[15:0] is zero\r
209\r
210 ; Processor is x2APIC capable; 32-bit x2APIC ID is already in EDX\r
211 jmp GetProcessorNumber\r
212\r
213NoX2Apic:\r
845c5be1
JF
214 ; Processor is not x2APIC capable, so get 8-bit APIC ID\r
215 mov eax, 1\r
216 cpuid\r
217 shr ebx, 24\r
218 mov edx, ebx\r
845c5be1 219\r
845c5be1
JF
220GetProcessorNumber:\r
221 ;\r
222 ; Get processor number for this AP\r
223 ; Note that BSP may become an AP due to SwitchBsp()\r
224 ;\r
225 xor ebx, ebx\r
2fba7d4e 226 lea eax, [esi + MP_CPU_EXCHANGE_INFO_FIELD (CpuInfo)]\r
edd74ad3 227 mov rdi, [eax]\r
d94e5f67 228\r
845c5be1 229GetNextProcNumber:\r
2fba7d4e 230 cmp dword [rdi + CPU_INFO_IN_HOB.InitialApicId], edx ; APIC ID match?\r
845c5be1 231 jz ProgramStack\r
2fba7d4e 232 add rdi, CPU_INFO_IN_HOB_size\r
845c5be1 233 inc ebx\r
7367cc6c 234 jmp GetNextProcNumber\r
845c5be1
JF
235\r
236ProgramStack:\r
2fba7d4e 237 mov rsp, qword [rdi + CPU_INFO_IN_HOB.ApTopOfStack]\r
d94e5f67
JF
238\r
239CProcedureInvoke:\r
8396e2dd
JF
240 push rbp ; Push BIST data at top of AP stack\r
241 xor rbp, rbp ; Clear ebp for call stack trace\r
d94e5f67
JF
242 push rbp\r
243 mov rbp, rsp\r
244\r
2fba7d4e 245 mov rax, qword [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitializeFloatingPointUnits)]\r
d94e5f67
JF
246 sub rsp, 20h\r
247 call rax ; Call assembly function to initialize FPU per UEFI spec\r
248 add rsp, 20h\r
249\r
37676b9f 250 mov edx, ebx ; edx is ApIndex\r
d94e5f67 251 mov ecx, esi\r
2fba7d4e 252 add ecx, MP_CPU_EXCHANGE_INFO_OFFSET ; rcx is address of exchange info data buffer\r
d94e5f67
JF
253\r
254 mov edi, esi\r
2fba7d4e 255 add edi, MP_CPU_EXCHANGE_INFO_FIELD (CFunction)\r
d94e5f67
JF
256 mov rax, qword [edi]\r
257\r
258 sub rsp, 20h\r
8396e2dd 259 call rax ; Invoke C function\r
d94e5f67 260 add rsp, 20h\r
8396e2dd 261 jmp $ ; Should never reach here\r
d94e5f67 262\r
b4d7b9d2
RN
263;\r
264; Required for the AMD SEV helper functions\r
265;\r
266%include "AmdSev.nasm"\r
7b7508ad 267\r
b4d7b9d2 268RendezvousFunnelProcEnd:\r
7b7508ad 269\r
76157021 270;-------------------------------------------------------------------------------------\r
20da7ca4 271; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);\r
76157021 272;-------------------------------------------------------------------------------------\r
76157021 273AsmRelocateApLoopStart:\r
7b7508ad 274BITS 64\r
20da7ca4
TL
275 cmp qword [rsp + 56], 0 ; SevEsAPJumpTable\r
276 je NoSevEs\r
277\r
278 ;\r
279 ; Perform some SEV-ES related setup before leaving 64-bit mode\r
280 ;\r
281 push rcx\r
282 push rdx\r
283\r
284 ;\r
285 ; Get the RDX reset value using CPUID\r
286 ;\r
287 mov rax, 1\r
288 cpuid\r
289 mov rsi, rax ; Save off the reset value for RDX\r
290\r
291 ;\r
292 ; Prepare the GHCB for the AP_HLT_LOOP VMGEXIT call\r
293 ; - Must be done while in 64-bit long mode so that writes to\r
294 ; the GHCB memory will be unencrypted.\r
295 ; - No NAE events can be generated once this is set otherwise\r
296 ; the AP_RESET_HOLD SW_EXITCODE will be overwritten.\r
297 ;\r
298 mov rcx, 0xc0010130\r
299 rdmsr ; Retrieve current GHCB address\r
300 shl rdx, 32\r
301 or rdx, rax\r
302\r
303 mov rdi, rdx\r
304 xor rax, rax\r
305 mov rcx, 0x800\r
306 shr rcx, 3\r
307 rep stosq ; Clear the GHCB\r
308\r
309 mov rax, 0x80000004 ; VMGEXIT AP_RESET_HOLD\r
310 mov [rdx + 0x390], rax\r
fb2a1a36
TL
311 mov rax, 114 ; Set SwExitCode valid bit\r
312 bts [rdx + 0x3f0], rax\r
313 inc rax ; Set SwExitInfo1 valid bit\r
314 bts [rdx + 0x3f0], rax\r
315 inc rax ; Set SwExitInfo2 valid bit\r
316 bts [rdx + 0x3f0], rax\r
20da7ca4
TL
317\r
318 pop rdx\r
319 pop rcx\r
320\r
321NoSevEs:\r
a7bbe9d2 322 cli ; Disable interrupt before switching to 32-bit mode\r
9f91cb01
JF
323 mov rax, [rsp + 40] ; CountTofinish\r
324 lock dec dword [rax] ; (*CountTofinish)--\r
76157021 325\r
20da7ca4
TL
326 mov r10, [rsp + 48] ; Pm16CodeSegment\r
327 mov rax, [rsp + 56] ; SevEsAPJumpTable\r
328 mov rbx, [rsp + 64] ; WakeupBuffer\r
329 mov rsp, r9 ; TopOfApStack\r
330\r
331 push rax ; Save SevEsAPJumpTable\r
332 push rbx ; Save WakeupBuffer\r
333 push r10 ; Save Pm16CodeSegment\r
334 push rcx ; Save MwaitSupport\r
335 push rdx ; Save ApTargetCState\r
336\r
337 lea rax, [PmEntry] ; rax <- The start address of transition code\r
76157021
JF
338\r
339 push r8\r
20da7ca4
TL
340 push rax\r
341\r
342 ;\r
343 ; Clear R8 - R15, for reset, before going into 32-bit mode\r
344 ;\r
345 xor r8, r8\r
346 xor r9, r9\r
347 xor r10, r10\r
348 xor r11, r11\r
349 xor r12, r12\r
350 xor r13, r13\r
351 xor r14, r14\r
352 xor r15, r15\r
353\r
354 ;\r
355 ; Far return into 32-bit mode\r
356 ;\r
2aa107c0 357 retfq\r
20da7ca4 358\r
76157021
JF
359BITS 32\r
360PmEntry:\r
361 mov eax, cr0\r
362 btr eax, 31 ; Clear CR0.PG\r
363 mov cr0, eax ; Disable paging and caches\r
364\r
76157021
JF
365 mov ecx, 0xc0000080\r
366 rdmsr\r
367 and ah, ~ 1 ; Clear LME\r
368 wrmsr\r
369 mov eax, cr4\r
370 and al, ~ (1 << 5) ; Clear PAE\r
371 mov cr4, eax\r
372\r
373 pop edx\r
374 add esp, 4\r
375 pop ecx,\r
376 add esp, 4\r
20da7ca4
TL
377\r
378MwaitCheck:\r
76157021
JF
379 cmp cl, 1 ; Check mwait-monitor support\r
380 jnz HltLoop\r
381 mov ebx, edx ; Save C-State to ebx\r
382MwaitLoop:\r
a7bbe9d2 383 cli\r
76157021
JF
384 mov eax, esp ; Set Monitor Address\r
385 xor ecx, ecx ; ecx = 0\r
386 xor edx, edx ; edx = 0\r
387 monitor\r
76157021 388 mov eax, ebx ; Mwait Cx, Target C-State per eax[7:4]\r
f56379f3 389 shl eax, 4\r
76157021
JF
390 mwait\r
391 jmp MwaitLoop\r
20da7ca4 392\r
76157021 393HltLoop:\r
20da7ca4
TL
394 pop edx ; PM16CodeSegment\r
395 add esp, 4\r
396 pop ebx ; WakeupBuffer\r
397 add esp, 4\r
398 pop eax ; SevEsAPJumpTable\r
399 add esp, 4\r
400 cmp eax, 0 ; Check for SEV-ES\r
401 je DoHlt\r
402\r
403 cli\r
404 ;\r
405 ; SEV-ES is enabled, use VMGEXIT (GHCB information already\r
406 ; set by caller)\r
407 ;\r
408BITS 64\r
409 rep vmmcall\r
410BITS 32\r
411\r
412 ;\r
413 ; Back from VMGEXIT AP_HLT_LOOP\r
414 ; Push the FLAGS/CS/IP values to use\r
415 ;\r
416 push word 0x0002 ; EFLAGS\r
417 xor ecx, ecx\r
418 mov cx, [eax + 2] ; CS\r
419 push cx\r
420 mov cx, [eax] ; IP\r
421 push cx\r
422 push word 0x0000 ; For alignment, will be discarded\r
423\r
424 push edx\r
425 push ebx\r
426\r
427 mov edx, esi ; Restore RDX reset value\r
428\r
429 retf\r
430\r
431DoHlt:\r
76157021
JF
432 cli\r
433 hlt\r
20da7ca4
TL
434 jmp DoHlt\r
435\r
76157021
JF
436BITS 64\r
437AsmRelocateApLoopEnd:\r
438\r
d94e5f67
JF
439;-------------------------------------------------------------------------------------\r
440; AsmGetAddressMap (&AddressMap);\r
441;-------------------------------------------------------------------------------------\r
442global ASM_PFX(AsmGetAddressMap)\r
443ASM_PFX(AsmGetAddressMap):\r
76323c31 444 lea rax, [RendezvousFunnelProcStart]\r
2fba7d4e
RN
445 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelAddress], rax\r
446 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeEntryOffset], LongModeStart - RendezvousFunnelProcStart\r
447 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelSize], RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
76323c31 448 lea rax, [AsmRelocateApLoopStart]\r
2fba7d4e
RN
449 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], rax\r
450 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart\r
451 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart\r
2fba7d4e
RN
452 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start\r
453 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], PM16Mode - RendezvousFunnelProcStart\r
454 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeSize], SwitchToRealProcEnd - PM16Mode\r
d94e5f67
JF
455 ret\r
456\r
457;-------------------------------------------------------------------------------------\r
458;AsmExchangeRole procedure follows. This procedure executed by current BSP, that is\r
8396e2dd 459;about to become an AP. It switches its stack with the current AP.\r
d94e5f67
JF
460;AsmExchangeRole (IN CPU_EXCHANGE_INFO *MyInfo, IN CPU_EXCHANGE_INFO *OthersInfo);\r
461;-------------------------------------------------------------------------------------\r
462global ASM_PFX(AsmExchangeRole)\r
463ASM_PFX(AsmExchangeRole):\r
464 ; DO NOT call other functions in this function, since 2 CPU may use 1 stack\r
465 ; at the same time. If 1 CPU try to call a function, stack will be corrupted.\r
466\r
467 push rax\r
468 push rbx\r
469 push rcx\r
470 push rdx\r
471 push rsi\r
472 push rdi\r
473 push rbp\r
474 push r8\r
475 push r9\r
476 push r10\r
477 push r11\r
478 push r12\r
479 push r13\r
480 push r14\r
481 push r15\r
482\r
483 mov rax, cr0\r
484 push rax\r
485\r
486 mov rax, cr4\r
487 push rax\r
488\r
489 ; rsi contains MyInfo pointer\r
490 mov rsi, rcx\r
491\r
492 ; rdi contains OthersInfo pointer\r
493 mov rdi, rdx\r
494\r
495 ;Store EFLAGS, GDTR and IDTR regiter to stack\r
496 pushfq\r
2fba7d4e
RN
497 sgdt [rsi + CPU_EXCHANGE_ROLE_INFO.Gdtr]\r
498 sidt [rsi + CPU_EXCHANGE_ROLE_INFO.Idtr]\r
d94e5f67
JF
499\r
500 ; Store the its StackPointer\r
2fba7d4e 501 mov [rsi + CPU_EXCHANGE_ROLE_INFO.StackPointer], rsp\r
d94e5f67
JF
502\r
503 ; update its switch state to STORED\r
2fba7d4e 504 mov byte [rsi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED\r
d94e5f67
JF
505\r
506WaitForOtherStored:\r
507 ; wait until the other CPU finish storing its state\r
2fba7d4e 508 cmp byte [rdi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED\r
d94e5f67
JF
509 jz OtherStored\r
510 pause\r
511 jmp WaitForOtherStored\r
512\r
513OtherStored:\r
514 ; Since another CPU already stored its state, load them\r
515 ; load GDTR value\r
2fba7d4e 516 lgdt [rdi + CPU_EXCHANGE_ROLE_INFO.Gdtr]\r
d94e5f67
JF
517\r
518 ; load IDTR value\r
2fba7d4e 519 lidt [rdi + CPU_EXCHANGE_ROLE_INFO.Idtr]\r
d94e5f67
JF
520\r
521 ; load its future StackPointer\r
2fba7d4e 522 mov rsp, [rdi + CPU_EXCHANGE_ROLE_INFO.StackPointer]\r
d94e5f67
JF
523\r
524 ; update the other CPU's switch state to LOADED\r
2fba7d4e 525 mov byte [rdi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED\r
d94e5f67
JF
526\r
527WaitForOtherLoaded:\r
528 ; wait until the other CPU finish loading new state,\r
529 ; otherwise the data in stack may corrupt\r
2fba7d4e 530 cmp byte [rsi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED\r
d94e5f67
JF
531 jz OtherLoaded\r
532 pause\r
533 jmp WaitForOtherLoaded\r
534\r
535OtherLoaded:\r
536 ; since the other CPU already get the data it want, leave this procedure\r
537 popfq\r
538\r
539 pop rax\r
540 mov cr4, rax\r
541\r
542 pop rax\r
543 mov cr0, rax\r
544\r
545 pop r15\r
546 pop r14\r
547 pop r13\r
548 pop r12\r
549 pop r11\r
550 pop r10\r
551 pop r9\r
552 pop r8\r
553 pop rbp\r
554 pop rdi\r
555 pop rsi\r
556 pop rdx\r
557 pop rcx\r
558 pop rbx\r
559 pop rax\r
560\r
561 ret\r