]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
UefiCpuPkg: PiSmmCpuDxeSmm Add the missing ASM_PFX in nasm code
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / SmmFuncsArch.c
CommitLineData
fe5f1949
JY
1/** @file\r
2 SMM CPU misc functions for x64 arch specific.\r
3 \r
4a0f88dd 4Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>\r
fe5f1949
JY
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
717fb604
JY
17EFI_PHYSICAL_ADDRESS mGdtBuffer;\r
18UINTN mGdtBufferSize;\r
19\r
20/**\r
21 Initialize IDT for SMM Stack Guard.\r
22\r
23**/\r
24VOID\r
25EFIAPI\r
26InitializeIDTSmmStackGuard (\r
27 VOID\r
28 )\r
29{\r
30 IA32_IDT_GATE_DESCRIPTOR *IdtGate;\r
31\r
32 //\r
33 // If SMM Stack Guard feature is enabled, set the IST field of\r
34 // the interrupt gate for Page Fault Exception to be 1\r
35 //\r
36 IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;\r
37 IdtGate += EXCEPT_IA32_PAGE_FAULT;\r
38 IdtGate->Bits.Reserved_0 = 1;\r
39}\r
40\r
fe5f1949
JY
41/**\r
42 Initialize Gdt for all processors.\r
43 \r
44 @param[in] Cr3 CR3 value.\r
45 @param[out] GdtStepSize The step size for GDT table.\r
46\r
47 @return GdtBase for processor 0.\r
48 GdtBase for processor X is: GdtBase + (GdtStepSize * X)\r
49**/\r
50VOID *\r
51InitGdt (\r
52 IN UINTN Cr3,\r
53 OUT UINTN *GdtStepSize\r
54 )\r
55{\r
56 UINTN Index;\r
57 IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
58 UINTN TssBase;\r
59 UINTN GdtTssTableSize;\r
60 UINT8 *GdtTssTables;\r
61 UINTN GdtTableStepSize;\r
62\r
63 //\r
64 // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
65 // on each SMI entry.\r
66 //\r
67 GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned\r
717fb604
JY
68 mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
69 GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));\r
fe5f1949 70 ASSERT (GdtTssTables != NULL);\r
717fb604 71 mGdtBuffer = (UINTN)GdtTssTables;\r
fe5f1949
JY
72 GdtTableStepSize = GdtTssTableSize;\r
73\r
74 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
75 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);\r
76\r
77 //\r
78 // Fixup TSS descriptors\r
79 //\r
80 TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
81 GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
82 GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;\r
83 GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);\r
84 GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);\r
85\r
86 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
87 //\r
88 // Setup top of known good stack as IST1 for each processor.\r
89 //\r
90 *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);\r
91 }\r
92 }\r
93\r
94 *GdtStepSize = GdtTableStepSize;\r
95 return GdtTssTables;\r
96}\r
4a0f88dd 97\r
e4435f71
JY
98/**\r
99 This function sets GDT/IDT buffer to be RO and XP.\r
100**/\r
101VOID\r
102PatchGdtIdtMap (\r
103 VOID\r
104 )\r
105{\r
106 EFI_PHYSICAL_ADDRESS BaseAddress;\r
107 UINTN Size;\r
108\r
109 //\r
110 // GDT\r
111 //\r
112 DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - GDT:\n"));\r
113\r
114 BaseAddress = mGdtBuffer;\r
115 Size = ALIGN_VALUE(mGdtBufferSize, SIZE_4KB);\r
116 SmmSetMemoryAttributes (\r
117 BaseAddress,\r
118 Size,\r
119 EFI_MEMORY_RO\r
120 );\r
121 SmmSetMemoryAttributes (\r
122 BaseAddress,\r
123 Size,\r
124 EFI_MEMORY_XP\r
125 );\r
126\r
127 //\r
128 // IDT\r
129 //\r
130 DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - IDT:\n"));\r
131\r
132 BaseAddress = gcSmiIdtr.Base;\r
133 Size = ALIGN_VALUE(gcSmiIdtr.Limit + 1, SIZE_4KB);\r
134 SmmSetMemoryAttributes (\r
135 BaseAddress,\r
136 Size,\r
137 EFI_MEMORY_RO\r
138 );\r
139 SmmSetMemoryAttributes (\r
140 BaseAddress,\r
141 Size,\r
142 EFI_MEMORY_XP\r
143 );\r
144}\r
145\r
45e3440a
JF
146/**\r
147 Get Protected mode code segment from current GDT table.\r
148\r
149 @return Protected mode code segment value.\r
150**/\r
151UINT16\r
152GetProtectedModeCS (\r
153 VOID\r
154 )\r
155{\r
156 IA32_DESCRIPTOR GdtrDesc;\r
157 IA32_SEGMENT_DESCRIPTOR *GdtEntry;\r
158 UINTN GdtEntryCount;\r
159 UINT16 Index;\r
160\r
161 Index = (UINT16) -1;\r
162 AsmReadGdtr (&GdtrDesc);\r
163 GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);\r
164 GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;\r
165 for (Index = 0; Index < GdtEntryCount; Index++) {\r
166 if (GdtEntry->Bits.L == 0) {\r
167 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.L == 0) {\r
168 break;\r
169 }\r
170 }\r
171 GdtEntry++;\r
172 }\r
173 ASSERT (Index != -1);\r
174 return Index * 8;\r
175}\r
176\r
4a0f88dd
JF
177/**\r
178 Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.\r
179\r
672b80c8
MK
180 @param[in] ApHltLoopCode The address of the safe hlt-loop function.\r
181 @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.\r
182 @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.\r
4a0f88dd
JF
183\r
184**/\r
185VOID\r
186TransferApToSafeState (\r
672b80c8
MK
187 IN UINTN ApHltLoopCode,\r
188 IN UINTN TopOfStack,\r
189 IN UINTN NumberToFinishAddress\r
4a0f88dd
JF
190 )\r
191{\r
45e3440a
JF
192 AsmDisablePaging64 (\r
193 GetProtectedModeCS (),\r
672b80c8
MK
194 (UINT32)ApHltLoopCode,\r
195 (UINT32)NumberToFinishAddress,\r
45e3440a 196 0,\r
672b80c8 197 (UINT32)TopOfStack\r
4a0f88dd
JF
198 );\r
199 //\r
200 // It should never reach here\r
201 //\r
202 ASSERT (FALSE);\r
203}\r
204\r