]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
UefiCpuPkg: Simplify the implementation when separate exception stacks
[mirror_edk2.git] / UefiCpuPkg / Library / MpInitLib / X64 / MpFuncs.nasm
CommitLineData
d94e5f67 1;------------------------------------------------------------------------------ ;\r
2aa107c0 2; Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>\r
0acd8697 3; SPDX-License-Identifier: BSD-2-Clause-Patent\r
d94e5f67
JF
4;\r
5; Module Name:\r
6;\r
7; MpFuncs.nasm\r
8;\r
9; Abstract:\r
10;\r
11; This is the assembly code for MP support\r
12;\r
13;-------------------------------------------------------------------------------\r
14\r
15%include "MpEqu.inc"\r
16extern ASM_PFX(InitializeFloatingPointUnits)\r
17\r
e2289d19
BS
18%macro OneTimeCall 1\r
19 jmp %1\r
20%1 %+ OneTimerCallReturn:\r
21%endmacro\r
22\r
23%macro OneTimeCallRet 1\r
24 jmp %1 %+ OneTimerCallReturn\r
25%endmacro\r
26\r
d94e5f67
JF
27DEFAULT REL\r
28\r
29SECTION .text\r
30\r
31;-------------------------------------------------------------------------------------\r
32;RendezvousFunnelProc procedure follows. All APs execute their procedure. This\r
33;procedure serializes all the AP processors through an Init sequence. It must be\r
34;noted that APs arrive here very raw...ie: real mode, no stack.\r
35;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC\r
36;IS IN MACHINE CODE.\r
37;-------------------------------------------------------------------------------------\r
d94e5f67
JF
38RendezvousFunnelProcStart:\r
39; At this point CS = 0x(vv00) and ip= 0x0.\r
40; Save BIST information to ebp firstly\r
41\r
42BITS 16\r
43 mov ebp, eax ; Save BIST information\r
44\r
45 mov ax, cs\r
46 mov ds, ax\r
47 mov es, ax\r
48 mov ss, ax\r
49 xor ax, ax\r
50 mov fs, ax\r
51 mov gs, ax\r
52\r
2fba7d4e 53 mov si, MP_CPU_EXCHANGE_INFO_FIELD (BufferStart)\r
d94e5f67
JF
54 mov ebx, [si]\r
55\r
2fba7d4e 56 mov si, MP_CPU_EXCHANGE_INFO_FIELD (DataSegment)\r
f32bfe6d
JW
57 mov edx, [si]\r
58\r
59 ;\r
60 ; Get start address of 32-bit code in low memory (<1MB)\r
61 ;\r
2fba7d4e 62 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeTransitionMemory)\r
d94e5f67 63\r
2fba7d4e 64 mov si, MP_CPU_EXCHANGE_INFO_FIELD (GdtrProfile)\r
d94e5f67
JF
65o32 lgdt [cs:si]\r
66\r
f32bfe6d
JW
67 ;\r
68 ; Switch to protected mode\r
69 ;\r
70 mov eax, cr0 ; Get control register 0\r
71 or eax, 000000003h ; Set PE bit (bit #0) & MP\r
72 mov cr0, eax\r
73\r
74 ; Switch to 32-bit code (>1MB)\r
75o32 jmp far [cs:di]\r
76\r
77;\r
78; Following code must be copied to memory with type of EfiBootServicesCode.\r
79; This is required if NX is enabled for EfiBootServicesCode of memory.\r
80;\r
81BITS 32\r
82Flat32Start: ; protected mode entry point\r
83 mov ds, dx\r
84 mov es, dx\r
85 mov fs, dx\r
86 mov gs, dx\r
87 mov ss, dx\r
5c66d125
JF
88\r
89 ;\r
90 ; Enable execute disable bit\r
91 ;\r
2fba7d4e 92 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (EnableExecuteDisable)\r
f32bfe6d
JW
93 cmp byte [ebx + esi], 0\r
94 jz SkipEnableExecuteDisableBit\r
95\r
5c66d125
JF
96 mov ecx, 0c0000080h ; EFER MSR number\r
97 rdmsr ; Read EFER\r
98 bts eax, 11 ; Enable Execute Disable Bit\r
99 wrmsr ; Write EFER\r
100\r
101SkipEnableExecuteDisableBit:\r
f32bfe6d
JW
102 ;\r
103 ; Enable PAE\r
104 ;\r
d94e5f67
JF
105 mov eax, cr4\r
106 bts eax, 5\r
09f69a87 107\r
2fba7d4e 108 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (Enable5LevelPaging)\r
09f69a87
RN
109 cmp byte [ebx + esi], 0\r
110 jz SkipEnable5LevelPaging\r
111\r
112 ;\r
113 ; Enable 5 Level Paging\r
114 ;\r
115 bts eax, 12 ; Set LA57=1.\r
116\r
117SkipEnable5LevelPaging:\r
118\r
d94e5f67
JF
119 mov cr4, eax\r
120\r
f32bfe6d
JW
121 ;\r
122 ; Load page table\r
123 ;\r
2fba7d4e 124 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (Cr3) ; Save CR3 in ecx\r
f32bfe6d 125 mov ecx, [ebx + esi]\r
d94e5f67
JF
126 mov cr3, ecx ; Load CR3\r
127\r
f32bfe6d
JW
128 ;\r
129 ; Enable long mode\r
130 ;\r
d94e5f67
JF
131 mov ecx, 0c0000080h ; EFER MSR number\r
132 rdmsr ; Read EFER\r
133 bts eax, 8 ; Set LME=1\r
134 wrmsr ; Write EFER\r
135\r
f32bfe6d
JW
136 ;\r
137 ; Enable paging\r
138 ;\r
d94e5f67
JF
139 mov eax, cr0 ; Read CR0\r
140 bts eax, 31 ; Set PG=1\r
141 mov cr0, eax ; Write CR0\r
142\r
f32bfe6d
JW
143 ;\r
144 ; Far jump to 64-bit code\r
145 ;\r
2fba7d4e 146 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeHighMemory)\r
f32bfe6d
JW
147 add edi, ebx\r
148 jmp far [edi]\r
149\r
d94e5f67 150BITS 64\r
e2289d19 151\r
d94e5f67 152LongModeStart:\r
845c5be1 153 mov esi, ebx\r
367604b2
ZL
154\r
155 ; Set IDT table at the start of 64 bit code\r
156 lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (IdtrProfile)]\r
157 lidt [edi]\r
158\r
2fba7d4e 159 lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitFlag)]\r
845c5be1
JF
160 cmp qword [edi], 1 ; ApInitConfig\r
161 jnz GetApicId\r
162\r
0594ec41
ED
163 ; Increment the number of APs executing here as early as possible\r
164 ; This is decremented in C code when AP is finished executing\r
165 mov edi, esi\r
2fba7d4e 166 add edi, MP_CPU_EXCHANGE_INFO_FIELD (NumApsExecuting)\r
0594ec41
ED
167 lock inc dword [edi]\r
168\r
845c5be1 169 ; AP init\r
62f2cf57 170 mov edi, esi\r
2fba7d4e 171 add edi, MP_CPU_EXCHANGE_INFO_FIELD (ApIndex)\r
62f2cf57
RN
172 mov ebx, 1\r
173 lock xadd dword [edi], ebx ; EBX = ApIndex++\r
174 inc ebx ; EBX is CpuNumber\r
d94e5f67 175\r
845c5be1 176 ; program stack\r
d94e5f67 177 mov edi, esi\r
2fba7d4e 178 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackSize)\r
845c5be1
JF
179 mov eax, dword [edi]\r
180 mov ecx, ebx\r
181 inc ecx\r
182 mul ecx ; EAX = StackSize * (CpuNumber + 1)\r
d94e5f67 183 mov edi, esi\r
2fba7d4e 184 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackStart)\r
d94e5f67
JF
185 add rax, qword [edi]\r
186 mov rsp, rax\r
7b7508ad 187\r
7b7508ad 188 ;\r
e2289d19 189 ; Setup the GHCB when AMD SEV-ES active.\r
7b7508ad 190 ;\r
e2289d19 191 OneTimeCall SevEsSetupGhcb\r
845c5be1
JF
192 jmp CProcedureInvoke\r
193\r
194GetApicId:\r
7b7508ad 195 ;\r
e2289d19 196 ; Use the GHCB protocol to get the ApicId when SEV-ES is active.\r
7b7508ad 197 ;\r
e2289d19 198 OneTimeCall SevEsGetApicId\r
7b7508ad
TL
199\r
200DoCpuid:\r
845c5be1
JF
201 mov eax, 0\r
202 cpuid\r
203 cmp eax, 0bh\r
1cbd8330
LE
204 jb NoX2Apic ; CPUID level below CPUID_EXTENDED_TOPOLOGY\r
205\r
206 mov eax, 0bh\r
207 xor ecx, ecx\r
208 cpuid\r
209 test ebx, 0ffffh\r
210 jz NoX2Apic ; CPUID.0BH:EBX[15:0] is zero\r
211\r
212 ; Processor is x2APIC capable; 32-bit x2APIC ID is already in EDX\r
213 jmp GetProcessorNumber\r
214\r
215NoX2Apic:\r
845c5be1
JF
216 ; Processor is not x2APIC capable, so get 8-bit APIC ID\r
217 mov eax, 1\r
218 cpuid\r
219 shr ebx, 24\r
220 mov edx, ebx\r
845c5be1 221\r
845c5be1
JF
222GetProcessorNumber:\r
223 ;\r
224 ; Get processor number for this AP\r
225 ; Note that BSP may become an AP due to SwitchBsp()\r
226 ;\r
227 xor ebx, ebx\r
2fba7d4e 228 lea eax, [esi + MP_CPU_EXCHANGE_INFO_FIELD (CpuInfo)]\r
edd74ad3 229 mov rdi, [eax]\r
d94e5f67 230\r
845c5be1 231GetNextProcNumber:\r
2fba7d4e 232 cmp dword [rdi + CPU_INFO_IN_HOB.InitialApicId], edx ; APIC ID match?\r
845c5be1 233 jz ProgramStack\r
2fba7d4e 234 add rdi, CPU_INFO_IN_HOB_size\r
845c5be1 235 inc ebx\r
7367cc6c 236 jmp GetNextProcNumber\r
845c5be1
JF
237\r
238ProgramStack:\r
2fba7d4e 239 mov rsp, qword [rdi + CPU_INFO_IN_HOB.ApTopOfStack]\r
d94e5f67
JF
240\r
241CProcedureInvoke:\r
8396e2dd
JF
242 push rbp ; Push BIST data at top of AP stack\r
243 xor rbp, rbp ; Clear ebp for call stack trace\r
d94e5f67
JF
244 push rbp\r
245 mov rbp, rsp\r
246\r
2fba7d4e 247 mov rax, qword [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitializeFloatingPointUnits)]\r
d94e5f67
JF
248 sub rsp, 20h\r
249 call rax ; Call assembly function to initialize FPU per UEFI spec\r
250 add rsp, 20h\r
251\r
37676b9f 252 mov edx, ebx ; edx is ApIndex\r
d94e5f67 253 mov ecx, esi\r
2fba7d4e 254 add ecx, MP_CPU_EXCHANGE_INFO_OFFSET ; rcx is address of exchange info data buffer\r
d94e5f67
JF
255\r
256 mov edi, esi\r
2fba7d4e 257 add edi, MP_CPU_EXCHANGE_INFO_FIELD (CFunction)\r
d94e5f67
JF
258 mov rax, qword [edi]\r
259\r
260 sub rsp, 20h\r
8396e2dd 261 call rax ; Invoke C function\r
d94e5f67 262 add rsp, 20h\r
8396e2dd 263 jmp $ ; Should never reach here\r
d94e5f67 264\r
b4d7b9d2
RN
265;\r
266; Required for the AMD SEV helper functions\r
267;\r
268%include "AmdSev.nasm"\r
7b7508ad 269\r
b4d7b9d2 270RendezvousFunnelProcEnd:\r
7b7508ad 271\r
76157021 272;-------------------------------------------------------------------------------------\r
20da7ca4 273; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);\r
76157021 274;-------------------------------------------------------------------------------------\r
76157021 275AsmRelocateApLoopStart:\r
7b7508ad 276BITS 64\r
20da7ca4
TL
277 cmp qword [rsp + 56], 0 ; SevEsAPJumpTable\r
278 je NoSevEs\r
279\r
280 ;\r
281 ; Perform some SEV-ES related setup before leaving 64-bit mode\r
282 ;\r
283 push rcx\r
284 push rdx\r
285\r
286 ;\r
287 ; Get the RDX reset value using CPUID\r
288 ;\r
289 mov rax, 1\r
290 cpuid\r
291 mov rsi, rax ; Save off the reset value for RDX\r
292\r
293 ;\r
294 ; Prepare the GHCB for the AP_HLT_LOOP VMGEXIT call\r
295 ; - Must be done while in 64-bit long mode so that writes to\r
296 ; the GHCB memory will be unencrypted.\r
297 ; - No NAE events can be generated once this is set otherwise\r
298 ; the AP_RESET_HOLD SW_EXITCODE will be overwritten.\r
299 ;\r
300 mov rcx, 0xc0010130\r
301 rdmsr ; Retrieve current GHCB address\r
302 shl rdx, 32\r
303 or rdx, rax\r
304\r
305 mov rdi, rdx\r
306 xor rax, rax\r
307 mov rcx, 0x800\r
308 shr rcx, 3\r
309 rep stosq ; Clear the GHCB\r
310\r
311 mov rax, 0x80000004 ; VMGEXIT AP_RESET_HOLD\r
312 mov [rdx + 0x390], rax\r
fb2a1a36
TL
313 mov rax, 114 ; Set SwExitCode valid bit\r
314 bts [rdx + 0x3f0], rax\r
315 inc rax ; Set SwExitInfo1 valid bit\r
316 bts [rdx + 0x3f0], rax\r
317 inc rax ; Set SwExitInfo2 valid bit\r
318 bts [rdx + 0x3f0], rax\r
20da7ca4
TL
319\r
320 pop rdx\r
321 pop rcx\r
322\r
323NoSevEs:\r
a7bbe9d2 324 cli ; Disable interrupt before switching to 32-bit mode\r
9f91cb01
JF
325 mov rax, [rsp + 40] ; CountTofinish\r
326 lock dec dword [rax] ; (*CountTofinish)--\r
76157021 327\r
20da7ca4
TL
328 mov r10, [rsp + 48] ; Pm16CodeSegment\r
329 mov rax, [rsp + 56] ; SevEsAPJumpTable\r
330 mov rbx, [rsp + 64] ; WakeupBuffer\r
331 mov rsp, r9 ; TopOfApStack\r
332\r
333 push rax ; Save SevEsAPJumpTable\r
334 push rbx ; Save WakeupBuffer\r
335 push r10 ; Save Pm16CodeSegment\r
336 push rcx ; Save MwaitSupport\r
337 push rdx ; Save ApTargetCState\r
338\r
339 lea rax, [PmEntry] ; rax <- The start address of transition code\r
76157021
JF
340\r
341 push r8\r
20da7ca4
TL
342 push rax\r
343\r
344 ;\r
345 ; Clear R8 - R15, for reset, before going into 32-bit mode\r
346 ;\r
347 xor r8, r8\r
348 xor r9, r9\r
349 xor r10, r10\r
350 xor r11, r11\r
351 xor r12, r12\r
352 xor r13, r13\r
353 xor r14, r14\r
354 xor r15, r15\r
355\r
356 ;\r
357 ; Far return into 32-bit mode\r
358 ;\r
2aa107c0 359 retfq\r
20da7ca4 360\r
76157021
JF
361BITS 32\r
362PmEntry:\r
363 mov eax, cr0\r
364 btr eax, 31 ; Clear CR0.PG\r
365 mov cr0, eax ; Disable paging and caches\r
366\r
76157021
JF
367 mov ecx, 0xc0000080\r
368 rdmsr\r
369 and ah, ~ 1 ; Clear LME\r
370 wrmsr\r
371 mov eax, cr4\r
372 and al, ~ (1 << 5) ; Clear PAE\r
373 mov cr4, eax\r
374\r
375 pop edx\r
376 add esp, 4\r
377 pop ecx,\r
378 add esp, 4\r
20da7ca4
TL
379\r
380MwaitCheck:\r
76157021
JF
381 cmp cl, 1 ; Check mwait-monitor support\r
382 jnz HltLoop\r
383 mov ebx, edx ; Save C-State to ebx\r
384MwaitLoop:\r
a7bbe9d2 385 cli\r
76157021
JF
386 mov eax, esp ; Set Monitor Address\r
387 xor ecx, ecx ; ecx = 0\r
388 xor edx, edx ; edx = 0\r
389 monitor\r
76157021 390 mov eax, ebx ; Mwait Cx, Target C-State per eax[7:4]\r
f56379f3 391 shl eax, 4\r
76157021
JF
392 mwait\r
393 jmp MwaitLoop\r
20da7ca4 394\r
76157021 395HltLoop:\r
20da7ca4
TL
396 pop edx ; PM16CodeSegment\r
397 add esp, 4\r
398 pop ebx ; WakeupBuffer\r
399 add esp, 4\r
400 pop eax ; SevEsAPJumpTable\r
401 add esp, 4\r
402 cmp eax, 0 ; Check for SEV-ES\r
403 je DoHlt\r
404\r
405 cli\r
406 ;\r
407 ; SEV-ES is enabled, use VMGEXIT (GHCB information already\r
408 ; set by caller)\r
409 ;\r
410BITS 64\r
411 rep vmmcall\r
412BITS 32\r
413\r
414 ;\r
415 ; Back from VMGEXIT AP_HLT_LOOP\r
416 ; Push the FLAGS/CS/IP values to use\r
417 ;\r
418 push word 0x0002 ; EFLAGS\r
419 xor ecx, ecx\r
420 mov cx, [eax + 2] ; CS\r
421 push cx\r
422 mov cx, [eax] ; IP\r
423 push cx\r
424 push word 0x0000 ; For alignment, will be discarded\r
425\r
426 push edx\r
427 push ebx\r
428\r
429 mov edx, esi ; Restore RDX reset value\r
430\r
431 retf\r
432\r
433DoHlt:\r
76157021
JF
434 cli\r
435 hlt\r
20da7ca4
TL
436 jmp DoHlt\r
437\r
76157021
JF
438BITS 64\r
439AsmRelocateApLoopEnd:\r
440\r
d94e5f67
JF
441;-------------------------------------------------------------------------------------\r
442; AsmGetAddressMap (&AddressMap);\r
443;-------------------------------------------------------------------------------------\r
444global ASM_PFX(AsmGetAddressMap)\r
445ASM_PFX(AsmGetAddressMap):\r
76323c31 446 lea rax, [RendezvousFunnelProcStart]\r
2fba7d4e
RN
447 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelAddress], rax\r
448 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeEntryOffset], LongModeStart - RendezvousFunnelProcStart\r
449 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelSize], RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
76323c31 450 lea rax, [AsmRelocateApLoopStart]\r
2fba7d4e
RN
451 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], rax\r
452 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart\r
453 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart\r
2fba7d4e
RN
454 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start\r
455 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], PM16Mode - RendezvousFunnelProcStart\r
456 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeSize], SwitchToRealProcEnd - PM16Mode\r
d94e5f67
JF
457 ret\r
458\r
459;-------------------------------------------------------------------------------------\r
460;AsmExchangeRole procedure follows. This procedure executed by current BSP, that is\r
8396e2dd 461;about to become an AP. It switches its stack with the current AP.\r
d94e5f67
JF
462;AsmExchangeRole (IN CPU_EXCHANGE_INFO *MyInfo, IN CPU_EXCHANGE_INFO *OthersInfo);\r
463;-------------------------------------------------------------------------------------\r
464global ASM_PFX(AsmExchangeRole)\r
465ASM_PFX(AsmExchangeRole):\r
466 ; DO NOT call other functions in this function, since 2 CPU may use 1 stack\r
467 ; at the same time. If 1 CPU try to call a function, stack will be corrupted.\r
468\r
469 push rax\r
470 push rbx\r
471 push rcx\r
472 push rdx\r
473 push rsi\r
474 push rdi\r
475 push rbp\r
476 push r8\r
477 push r9\r
478 push r10\r
479 push r11\r
480 push r12\r
481 push r13\r
482 push r14\r
483 push r15\r
484\r
d94e5f67
JF
485 ; rsi contains MyInfo pointer\r
486 mov rsi, rcx\r
487\r
488 ; rdi contains OthersInfo pointer\r
489 mov rdi, rdx\r
490\r
d94e5f67 491 pushfq\r
d94e5f67
JF
492\r
493 ; Store the its StackPointer\r
2fba7d4e 494 mov [rsi + CPU_EXCHANGE_ROLE_INFO.StackPointer], rsp\r
d94e5f67
JF
495\r
496 ; update its switch state to STORED\r
2fba7d4e 497 mov byte [rsi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED\r
d94e5f67
JF
498\r
499WaitForOtherStored:\r
500 ; wait until the other CPU finish storing its state\r
2fba7d4e 501 cmp byte [rdi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED\r
d94e5f67
JF
502 jz OtherStored\r
503 pause\r
504 jmp WaitForOtherStored\r
505\r
506OtherStored:\r
d94e5f67
JF
507\r
508 ; load its future StackPointer\r
2fba7d4e 509 mov rsp, [rdi + CPU_EXCHANGE_ROLE_INFO.StackPointer]\r
d94e5f67
JF
510\r
511 ; update the other CPU's switch state to LOADED\r
2fba7d4e 512 mov byte [rdi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED\r
d94e5f67
JF
513\r
514WaitForOtherLoaded:\r
515 ; wait until the other CPU finish loading new state,\r
516 ; otherwise the data in stack may corrupt\r
2fba7d4e 517 cmp byte [rsi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED\r
d94e5f67
JF
518 jz OtherLoaded\r
519 pause\r
520 jmp WaitForOtherLoaded\r
521\r
522OtherLoaded:\r
523 ; since the other CPU already get the data it want, leave this procedure\r
524 popfq\r
525\r
d94e5f67
JF
526 pop r15\r
527 pop r14\r
528 pop r13\r
529 pop r12\r
530 pop r11\r
531 pop r10\r
532 pop r9\r
533 pop r8\r
534 pop rbp\r
535 pop rdi\r
536 pop rsi\r
537 pop rdx\r
538 pop rcx\r
539 pop rbx\r
540 pop rax\r
541\r
542 ret\r