1 ;------------------------------------------------------------------------------ ;
2 ; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
3 ; This program and the accompanying materials
4 ; are licensed and made available under the terms and conditions of the BSD License
5 ; which accompanies this distribution. The full text of the license may be found at
6 ; http://opensource.org/licenses/bsd-license.php.
8 ; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
9 ; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 ; Functions for relocating SMBASE's for all processors
19 ;-------------------------------------------------------------------------------
21 %include "StuffRsbNasm.inc"
23 extern ASM_PFX(SmmInitHandler)
24 extern ASM_PFX(mRebasedFlag)
25 extern ASM_PFX(mSmmRelocationOriginalAddress)
27 global ASM_PFX(gPatchSmmCr3)
28 global ASM_PFX(gPatchSmmCr4)
29 global ASM_PFX(gPatchSmmCr0)
30 global ASM_PFX(gPatchSmmInitStack)
31 global ASM_PFX(gcSmiInitGdtr)
32 global ASM_PFX(gcSmmInitSize)
33 global ASM_PFX(gcSmmInitTemplate)
34 global ASM_PFX(gPatchRebasedFlagAddr32)
35 global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
37 %define LONG_MODE_CS 0x38
42 ASM_PFX(gcSmiInitGdtr):
46 global ASM_PFX(SmmStartup)
50 mov eax, 0x80000001 ; read capability
52 mov ebx, edx ; rdmsr will change edx. keep it in ebx.
53 mov eax, strict dword 0 ; source operand will be patched
54 ASM_PFX(gPatchSmmCr3):
56 o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
57 mov eax, strict dword 0 ; source operand will be patched
58 ASM_PFX(gPatchSmmCr4):
59 or ah, 2 ; enable XMM registers access
61 mov ecx, 0xc0000080 ; IA32_EFER MSR
63 or ah, BIT0 ; set LME bit
64 test ebx, BIT20 ; check NXE capability
66 or ah, BIT3 ; set NXE bit
69 mov eax, strict dword 0 ; source operand will be patched
70 ASM_PFX(gPatchSmmCr0):
71 mov cr0, eax ; enable protected mode & paging
72 jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
76 @LongMode: ; long-mode starts here
77 mov rsp, strict qword 0 ; source operand will be patched
78 ASM_PFX(gPatchSmmInitStack):
79 and sp, 0xfff0 ; make sure RSP is 16-byte aligned
81 ; Accoring to X64 calling convention, XMM0~5 are volatile, we need to save
82 ; them before calling C-function.
86 movdqa [rsp + 0x10], xmm1
87 movdqa [rsp + 0x20], xmm2
88 movdqa [rsp + 0x30], xmm3
89 movdqa [rsp + 0x40], xmm4
90 movdqa [rsp + 0x50], xmm5
93 call ASM_PFX(SmmInitHandler)
97 ; Restore XMM0~5 after calling C-function.
100 movdqa xmm1, [rsp + 0x10]
101 movdqa xmm2, [rsp + 0x20]
102 movdqa xmm3, [rsp + 0x30]
103 movdqa xmm4, [rsp + 0x40]
104 movdqa xmm5, [rsp + 0x50]
110 ASM_PFX(gcSmmInitTemplate):
111 mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
115 DQ 0; ASM_PFX(SmmStartup)
117 ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
120 global ASM_PFX(SmmRelocationSemaphoreComplete)
121 ASM_PFX(SmmRelocationSemaphoreComplete):
123 mov rax, [ASM_PFX(mRebasedFlag)]
126 jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
129 ; Semaphore code running in 32-bit mode
132 global ASM_PFX(SmmRelocationSemaphoreComplete32)
133 ASM_PFX(SmmRelocationSemaphoreComplete32):
135 mov eax, strict dword 0 ; source operand will be patched
136 ASM_PFX(gPatchRebasedFlagAddr32):
139 jmp dword [dword 0] ; destination will be patched
140 ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
143 global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
144 ASM_PFX(PiSmmCpuSmmInitFixupAddress):
146 lea rcx, [@PatchLongModeOffset - 6]
149 lea rax, [ASM_PFX(SmmStartup)]