]>
Commit | Line | Data |
---|---|---|
529a5a86 MK |
1 | /** @file\r |
2 | Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r | |
3 | \r | |
cb4820b6 | 4 | Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>\r |
241f9149 LD |
5 | Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r |
6 | \r | |
0acd8697 | 7 | SPDX-License-Identifier: BSD-2-Clause-Patent\r |
529a5a86 MK |
8 | \r |
9 | **/\r | |
10 | \r | |
11 | #include "PiSmmCpuDxeSmm.h"\r | |
12 | \r | |
13 | //\r | |
14 | // SMM CPU Private Data structure that contains SMM Configuration Protocol\r | |
15 | // along its supporting fields.\r | |
16 | //\r | |
17 | SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r | |
18 | SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r | |
19 | NULL, // SmmCpuHandle\r | |
20 | NULL, // Pointer to ProcessorInfo array\r | |
21 | NULL, // Pointer to Operation array\r | |
22 | NULL, // Pointer to CpuSaveStateSize array\r | |
23 | NULL, // Pointer to CpuSaveState array\r | |
053e878b MK |
24 | {\r |
25 | { 0 }\r | |
26 | }, // SmmReservedSmramRegion\r | |
529a5a86 MK |
27 | {\r |
28 | SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r | |
29 | 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r | |
30 | 0, // SmmCoreEntryContext.NumberOfCpus\r | |
31 | NULL, // SmmCoreEntryContext.CpuSaveStateSize\r | |
32 | NULL // SmmCoreEntryContext.CpuSaveState\r | |
33 | },\r | |
34 | NULL, // SmmCoreEntry\r | |
35 | {\r | |
36 | mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r | |
37 | RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r | |
38 | },\r | |
51dd408a | 39 | NULL, // pointer to Ap Wrapper Func array\r |
053e878b | 40 | { NULL, NULL }, // List_Entry for Tokens.\r |
529a5a86 MK |
41 | };\r |
42 | \r | |
053e878b | 43 | CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r |
529a5a86 MK |
44 | CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r |
45 | 0, // Array Length of SmBase and APIC ID\r | |
46 | NULL, // Pointer to APIC ID array\r | |
47 | NULL, // Pointer to SMBASE array\r | |
48 | 0, // Reserved\r | |
49 | 0, // SmrrBase\r | |
50 | 0 // SmrrSize\r | |
51 | };\r | |
52 | \r | |
53 | //\r | |
54 | // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r | |
55 | //\r | |
56 | SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r | |
57 | \r | |
58 | //\r | |
59 | // SMM Relocation variables\r | |
60 | //\r | |
61 | volatile BOOLEAN *mRebased;\r | |
529a5a86 MK |
62 | \r |
63 | ///\r | |
64 | /// Handle for the SMM CPU Protocol\r | |
65 | ///\r | |
66 | EFI_HANDLE mSmmCpuHandle = NULL;\r | |
67 | \r | |
68 | ///\r | |
69 | /// SMM CPU Protocol instance\r | |
70 | ///\r | |
053e878b | 71 | EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r |
529a5a86 MK |
72 | SmmReadSaveState,\r |
73 | SmmWriteSaveState\r | |
74 | };\r | |
75 | \r | |
827330cc JW |
76 | ///\r |
77 | /// SMM Memory Attribute Protocol instance\r | |
78 | ///\r | |
053e878b | 79 | EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r |
827330cc JW |
80 | EdkiiSmmGetMemoryAttributes,\r |
81 | EdkiiSmmSetMemoryAttributes,\r | |
82 | EdkiiSmmClearMemoryAttributes\r | |
83 | };\r | |
84 | \r | |
053e878b | 85 | EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r |
529a5a86 | 86 | \r |
ec07fd0e WJ |
87 | BOOLEAN mSmmRelocated = FALSE;\r |
88 | volatile BOOLEAN *mSmmInitialized = NULL;\r | |
85c6c14c WJ |
89 | UINT32 mBspApicId = 0;\r |
90 | \r | |
529a5a86 MK |
91 | //\r |
92 | // SMM stack information\r | |
93 | //\r | |
053e878b MK |
94 | UINTN mSmmStackArrayBase;\r |
95 | UINTN mSmmStackArrayEnd;\r | |
96 | UINTN mSmmStackSize;\r | |
529a5a86 | 97 | \r |
053e878b MK |
98 | UINTN mSmmShadowStackSize;\r |
99 | BOOLEAN mCetSupported = TRUE;\r | |
3eb69b08 | 100 | \r |
053e878b MK |
101 | UINTN mMaxNumberOfCpus = 1;\r |
102 | UINTN mNumberOfCpus = 1;\r | |
529a5a86 MK |
103 | \r |
104 | //\r | |
105 | // SMM ready to lock flag\r | |
106 | //\r | |
053e878b | 107 | BOOLEAN mSmmReadyToLock = FALSE;\r |
529a5a86 MK |
108 | \r |
109 | //\r | |
110 | // Global used to cache PCD for SMM Code Access Check enable\r | |
111 | //\r | |
053e878b | 112 | BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r |
529a5a86 | 113 | \r |
241f9149 LD |
114 | //\r |
115 | // Global copy of the PcdPteMemoryEncryptionAddressOrMask\r | |
116 | //\r | |
053e878b | 117 | UINT64 mAddressEncMask = 0;\r |
241f9149 | 118 | \r |
529a5a86 MK |
119 | //\r |
120 | // Spin lock used to serialize setting of SMM Code Access Check feature\r | |
121 | //\r | |
053e878b | 122 | SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r |
529a5a86 | 123 | \r |
7ed6f781 JF |
124 | //\r |
125 | // Saved SMM ranges information\r | |
126 | //\r | |
053e878b MK |
127 | EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r |
128 | UINTN mSmmCpuSmramRangeCount;\r | |
7ed6f781 | 129 | \r |
053e878b | 130 | UINT8 mPhysicalAddressBits;\r |
51ce27fd | 131 | \r |
351b49c1 LE |
132 | //\r |
133 | // Control register contents saved for SMM S3 resume state initialization.\r | |
134 | //\r | |
053e878b MK |
135 | UINT32 mSmmCr0;\r |
136 | UINT32 mSmmCr4;\r | |
351b49c1 | 137 | \r |
529a5a86 MK |
138 | /**\r |
139 | Initialize IDT to setup exception handlers for SMM.\r | |
140 | \r | |
141 | **/\r | |
142 | VOID\r | |
143 | InitializeSmmIdt (\r | |
144 | VOID\r | |
145 | )\r | |
146 | {\r | |
053e878b MK |
147 | EFI_STATUS Status;\r |
148 | BOOLEAN InterruptState;\r | |
149 | IA32_DESCRIPTOR DxeIdtr;\r | |
717fb604 JY |
150 | \r |
151 | //\r | |
152 | // There are 32 (not 255) entries in it since only processor\r | |
153 | // generated exceptions will be handled.\r | |
154 | //\r | |
053e878b | 155 | gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r |
717fb604 JY |
156 | //\r |
157 | // Allocate page aligned IDT, because it might be set as read only.\r | |
158 | //\r | |
053e878b | 159 | gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));\r |
717fb604 JY |
160 | ASSERT (gcSmiIdtr.Base != 0);\r |
161 | ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r | |
162 | \r | |
529a5a86 MK |
163 | //\r |
164 | // Disable Interrupt and save DXE IDT table\r | |
165 | //\r | |
166 | InterruptState = SaveAndDisableInterrupts ();\r | |
167 | AsmReadIdtr (&DxeIdtr);\r | |
168 | //\r | |
169 | // Load SMM temporary IDT table\r | |
170 | //\r | |
171 | AsmWriteIdtr (&gcSmiIdtr);\r | |
172 | //\r | |
173 | // Setup SMM default exception handlers, SMM IDT table\r | |
174 | // will be updated and saved in gcSmiIdtr\r | |
175 | //\r | |
176 | Status = InitializeCpuExceptionHandlers (NULL);\r | |
177 | ASSERT_EFI_ERROR (Status);\r | |
178 | //\r | |
179 | // Restore DXE IDT table and CPU interrupt\r | |
180 | //\r | |
053e878b | 181 | AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);\r |
529a5a86 MK |
182 | SetInterruptState (InterruptState);\r |
183 | }\r | |
184 | \r | |
185 | /**\r | |
186 | Search module name by input IP address and output it.\r | |
187 | \r | |
188 | @param CallerIpAddress Caller instruction pointer.\r | |
189 | \r | |
190 | **/\r | |
191 | VOID\r | |
192 | DumpModuleInfoByIp (\r | |
053e878b | 193 | IN UINTN CallerIpAddress\r |
529a5a86 MK |
194 | )\r |
195 | {\r | |
053e878b MK |
196 | UINTN Pe32Data;\r |
197 | VOID *PdbPointer;\r | |
529a5a86 MK |
198 | \r |
199 | //\r | |
200 | // Find Image Base\r | |
201 | //\r | |
9e981317 | 202 | Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r |
529a5a86 | 203 | if (Pe32Data != 0) {\r |
053e878b MK |
204 | DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));\r |
205 | PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);\r | |
529a5a86 | 206 | if (PdbPointer != NULL) {\r |
b8caae19 | 207 | DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r |
529a5a86 MK |
208 | }\r |
209 | }\r | |
210 | }\r | |
211 | \r | |
212 | /**\r | |
213 | Read information from the CPU save state.\r | |
214 | \r | |
215 | @param This EFI_SMM_CPU_PROTOCOL instance\r | |
216 | @param Width The number of bytes to read from the CPU save state.\r | |
217 | @param Register Specifies the CPU register to read form the save state.\r | |
218 | @param CpuIndex Specifies the zero-based index of the CPU save state.\r | |
219 | @param Buffer Upon return, this holds the CPU register value read from the save state.\r | |
220 | \r | |
221 | @retval EFI_SUCCESS The register was read from Save State\r | |
222 | @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r | |
ef62da4f | 223 | @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r |
529a5a86 MK |
224 | \r |
225 | **/\r | |
226 | EFI_STATUS\r | |
227 | EFIAPI\r | |
228 | SmmReadSaveState (\r | |
053e878b MK |
229 | IN CONST EFI_SMM_CPU_PROTOCOL *This,\r |
230 | IN UINTN Width,\r | |
231 | IN EFI_SMM_SAVE_STATE_REGISTER Register,\r | |
232 | IN UINTN CpuIndex,\r | |
233 | OUT VOID *Buffer\r | |
529a5a86 MK |
234 | )\r |
235 | {\r | |
236 | EFI_STATUS Status;\r | |
237 | \r | |
238 | //\r | |
239 | // Retrieve pointer to the specified CPU's SMM Save State buffer\r | |
240 | //\r | |
241 | if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r | |
242 | return EFI_INVALID_PARAMETER;\r | |
243 | }\r | |
053e878b | 244 | \r |
5b02be4d | 245 | //\r |
b70ec0de HW |
246 | // The SpeculationBarrier() call here is to ensure the above check for the\r |
247 | // CpuIndex has been completed before the execution of subsequent codes.\r | |
5b02be4d | 248 | //\r |
b70ec0de | 249 | SpeculationBarrier ();\r |
529a5a86 MK |
250 | \r |
251 | //\r | |
252 | // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r | |
253 | //\r | |
254 | if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r | |
255 | //\r | |
256 | // The pseudo-register only supports the 64-bit size specified by Width.\r | |
257 | //\r | |
258 | if (Width != sizeof (UINT64)) {\r | |
259 | return EFI_INVALID_PARAMETER;\r | |
260 | }\r | |
053e878b | 261 | \r |
529a5a86 MK |
262 | //\r |
263 | // If the processor is in SMM at the time the SMI occurred,\r | |
264 | // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r | |
265 | // Otherwise, EFI_NOT_FOUND is returned.\r | |
266 | //\r | |
ed3d5ecb | 267 | if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r |
529a5a86 MK |
268 | *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r |
269 | return EFI_SUCCESS;\r | |
270 | } else {\r | |
271 | return EFI_NOT_FOUND;\r | |
272 | }\r | |
273 | }\r | |
274 | \r | |
ed3d5ecb | 275 | if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r |
529a5a86 MK |
276 | return EFI_INVALID_PARAMETER;\r |
277 | }\r | |
278 | \r | |
279 | Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
280 | if (Status == EFI_UNSUPPORTED) {\r | |
281 | Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
282 | }\r | |
053e878b | 283 | \r |
529a5a86 MK |
284 | return Status;\r |
285 | }\r | |
286 | \r | |
287 | /**\r | |
288 | Write data to the CPU save state.\r | |
289 | \r | |
290 | @param This EFI_SMM_CPU_PROTOCOL instance\r | |
291 | @param Width The number of bytes to read from the CPU save state.\r | |
292 | @param Register Specifies the CPU register to write to the save state.\r | |
293 | @param CpuIndex Specifies the zero-based index of the CPU save state\r | |
294 | @param Buffer Upon entry, this holds the new CPU register value.\r | |
295 | \r | |
296 | @retval EFI_SUCCESS The register was written from Save State\r | |
297 | @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r | |
ef62da4f | 298 | @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r |
529a5a86 MK |
299 | \r |
300 | **/\r | |
301 | EFI_STATUS\r | |
302 | EFIAPI\r | |
303 | SmmWriteSaveState (\r | |
053e878b MK |
304 | IN CONST EFI_SMM_CPU_PROTOCOL *This,\r |
305 | IN UINTN Width,\r | |
306 | IN EFI_SMM_SAVE_STATE_REGISTER Register,\r | |
307 | IN UINTN CpuIndex,\r | |
308 | IN CONST VOID *Buffer\r | |
529a5a86 MK |
309 | )\r |
310 | {\r | |
311 | EFI_STATUS Status;\r | |
312 | \r | |
313 | //\r | |
314 | // Retrieve pointer to the specified CPU's SMM Save State buffer\r | |
315 | //\r | |
316 | if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r | |
317 | return EFI_INVALID_PARAMETER;\r | |
318 | }\r | |
319 | \r | |
320 | //\r | |
321 | // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r | |
322 | //\r | |
323 | if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r | |
324 | return EFI_SUCCESS;\r | |
325 | }\r | |
326 | \r | |
327 | if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r | |
328 | return EFI_INVALID_PARAMETER;\r | |
329 | }\r | |
330 | \r | |
331 | Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
332 | if (Status == EFI_UNSUPPORTED) {\r | |
333 | Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
334 | }\r | |
053e878b | 335 | \r |
529a5a86 MK |
336 | return Status;\r |
337 | }\r | |
338 | \r | |
529a5a86 MK |
339 | /**\r |
340 | C function for SMI handler. To change all processor's SMMBase Register.\r | |
341 | \r | |
342 | **/\r | |
343 | VOID\r | |
344 | EFIAPI\r | |
345 | SmmInitHandler (\r | |
346 | VOID\r | |
347 | )\r | |
348 | {\r | |
85c6c14c WJ |
349 | UINT32 ApicId;\r |
350 | UINTN Index;\r | |
351 | BOOLEAN IsBsp;\r | |
529a5a86 MK |
352 | \r |
353 | //\r | |
354 | // Update SMM IDT entries' code segment and load IDT\r | |
355 | //\r | |
356 | AsmWriteIdtr (&gcSmiIdtr);\r | |
357 | ApicId = GetApicId ();\r | |
358 | \r | |
85c6c14c WJ |
359 | IsBsp = (BOOLEAN)(mBspApicId == ApicId);\r |
360 | \r | |
bb767506 | 361 | ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r |
529a5a86 MK |
362 | \r |
363 | for (Index = 0; Index < mNumberOfCpus; Index++) {\r | |
364 | if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r | |
365 | //\r | |
366 | // Initialize SMM specific features on the currently executing CPU\r | |
367 | //\r | |
368 | SmmCpuFeaturesInitializeProcessor (\r | |
369 | Index,\r | |
85c6c14c | 370 | IsBsp,\r |
529a5a86 MK |
371 | gSmmCpuPrivate->ProcessorInfo,\r |
372 | &mCpuHotPlugData\r | |
373 | );\r | |
374 | \r | |
a46a4c90 JF |
375 | if (!mSmmS3Flag) {\r |
376 | //\r | |
377 | // Check XD and BTS features on each processor on normal boot\r | |
378 | //\r | |
51773d49 | 379 | CheckFeatureSupported ();\r |
85c6c14c | 380 | } else if (IsBsp) {\r |
529a5a86 MK |
381 | //\r |
382 | // BSP rebase is already done above.\r | |
383 | // Initialize private data during S3 resume\r | |
384 | //\r | |
385 | InitializeMpSyncData ();\r | |
386 | }\r | |
387 | \r | |
ec07fd0e WJ |
388 | if (!mSmmRelocated) {\r |
389 | //\r | |
390 | // Hook return after RSM to set SMM re-based flag\r | |
391 | //\r | |
392 | SemaphoreHook (Index, &mRebased[Index]);\r | |
393 | }\r | |
529a5a86 MK |
394 | \r |
395 | return;\r | |
396 | }\r | |
397 | }\r | |
053e878b | 398 | \r |
529a5a86 MK |
399 | ASSERT (FALSE);\r |
400 | }\r | |
401 | \r | |
ec07fd0e WJ |
402 | /**\r |
403 | Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.\r | |
404 | \r | |
405 | **/\r | |
406 | VOID\r | |
407 | ExecuteFirstSmiInit (\r | |
408 | VOID\r | |
409 | )\r | |
410 | {\r | |
411 | UINTN Index;\r | |
412 | \r | |
413 | if (mSmmInitialized == NULL) {\r | |
414 | mSmmInitialized = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r | |
415 | }\r | |
416 | \r | |
417 | ASSERT (mSmmInitialized != NULL);\r | |
418 | if (mSmmInitialized == NULL) {\r | |
419 | return;\r | |
420 | }\r | |
421 | \r | |
422 | //\r | |
423 | // Reset the mSmmInitialized to false.\r | |
424 | //\r | |
425 | ZeroMem ((VOID *)mSmmInitialized, sizeof (BOOLEAN) * mMaxNumberOfCpus);\r | |
426 | \r | |
427 | //\r | |
428 | // Get the BSP ApicId.\r | |
429 | //\r | |
430 | mBspApicId = GetApicId ();\r | |
431 | \r | |
432 | //\r | |
433 | // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init\r | |
434 | //\r | |
435 | SendSmiIpi (mBspApicId);\r | |
436 | SendSmiIpiAllExcludingSelf ();\r | |
437 | \r | |
438 | //\r | |
439 | // Wait for all processors to finish its 1st SMI\r | |
440 | //\r | |
441 | for (Index = 0; Index < mNumberOfCpus; Index++) {\r | |
442 | while (!(BOOLEAN)mSmmInitialized[Index]) {\r | |
443 | }\r | |
444 | }\r | |
445 | }\r | |
446 | \r | |
529a5a86 MK |
447 | /**\r |
448 | Relocate SmmBases for each processor.\r | |
449 | \r | |
450 | Execute on first boot and all S3 resumes\r | |
451 | \r | |
452 | **/\r | |
453 | VOID\r | |
454 | EFIAPI\r | |
455 | SmmRelocateBases (\r | |
456 | VOID\r | |
457 | )\r | |
458 | {\r | |
459 | UINT8 BakBuf[BACK_BUF_SIZE];\r | |
460 | SMRAM_SAVE_STATE_MAP BakBuf2;\r | |
461 | SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r | |
462 | UINT8 *U8Ptr;\r | |
529a5a86 MK |
463 | UINTN Index;\r |
464 | UINTN BspIndex;\r | |
465 | \r | |
466 | //\r | |
467 | // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r | |
468 | //\r | |
469 | ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r | |
470 | \r | |
471 | //\r | |
472 | // Patch ASM code template with current CR0, CR3, and CR4 values\r | |
473 | //\r | |
f0053e83 LE |
474 | mSmmCr0 = (UINT32)AsmReadCr0 ();\r |
475 | PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r | |
6b0841c1 | 476 | PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r |
351b49c1 | 477 | mSmmCr4 = (UINT32)AsmReadCr4 ();\r |
3eb69b08 | 478 | PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r |
529a5a86 MK |
479 | \r |
480 | //\r | |
481 | // Patch GDTR for SMM base relocation\r | |
482 | //\r | |
483 | gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r | |
484 | gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r | |
485 | \r | |
053e878b | 486 | U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r |
529a5a86 MK |
487 | CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r |
488 | \r | |
489 | //\r | |
490 | // Backup original contents at address 0x38000\r | |
491 | //\r | |
492 | CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r | |
493 | CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r | |
494 | \r | |
495 | //\r | |
496 | // Load image for relocation\r | |
497 | //\r | |
498 | CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r | |
499 | \r | |
500 | //\r | |
501 | // Retrieve the local APIC ID of current processor\r | |
502 | //\r | |
85c6c14c | 503 | mBspApicId = GetApicId ();\r |
529a5a86 MK |
504 | \r |
505 | //\r | |
506 | // Relocate SM bases for all APs\r | |
507 | // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r | |
508 | //\r | |
529a5a86 MK |
509 | BspIndex = (UINTN)-1;\r |
510 | for (Index = 0; Index < mNumberOfCpus; Index++) {\r | |
511 | mRebased[Index] = FALSE;\r | |
85c6c14c | 512 | if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r |
529a5a86 MK |
513 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r |
514 | //\r | |
515 | // Wait for this AP to finish its 1st SMI\r | |
516 | //\r | |
053e878b MK |
517 | while (!mRebased[Index]) {\r |
518 | }\r | |
529a5a86 MK |
519 | } else {\r |
520 | //\r | |
521 | // BSP will be Relocated later\r | |
522 | //\r | |
523 | BspIndex = Index;\r | |
524 | }\r | |
525 | }\r | |
526 | \r | |
527 | //\r | |
528 | // Relocate BSP's SMM base\r | |
529 | //\r | |
530 | ASSERT (BspIndex != (UINTN)-1);\r | |
85c6c14c | 531 | SendSmiIpi (mBspApicId);\r |
529a5a86 MK |
532 | //\r |
533 | // Wait for the BSP to finish its 1st SMI\r | |
534 | //\r | |
053e878b MK |
535 | while (!mRebased[BspIndex]) {\r |
536 | }\r | |
529a5a86 MK |
537 | \r |
538 | //\r | |
539 | // Restore contents at address 0x38000\r | |
540 | //\r | |
541 | CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r | |
542 | CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r | |
543 | }\r | |
544 | \r | |
529a5a86 MK |
545 | /**\r |
546 | SMM Ready To Lock event notification handler.\r | |
547 | \r | |
548 | The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r | |
549 | perform additional lock actions that must be performed from SMM on the next SMI.\r | |
550 | \r | |
551 | @param[in] Protocol Points to the protocol's unique identifier.\r | |
552 | @param[in] Interface Points to the interface instance.\r | |
553 | @param[in] Handle The handle on which the interface was installed.\r | |
554 | \r | |
555 | @retval EFI_SUCCESS Notification handler runs successfully.\r | |
556 | **/\r | |
557 | EFI_STATUS\r | |
558 | EFIAPI\r | |
559 | SmmReadyToLockEventNotify (\r | |
560 | IN CONST EFI_GUID *Protocol,\r | |
561 | IN VOID *Interface,\r | |
562 | IN EFI_HANDLE Handle\r | |
563 | )\r | |
564 | {\r | |
0bdc9e75 | 565 | GetAcpiCpuData ();\r |
529a5a86 | 566 | \r |
d2fc7711 JY |
567 | //\r |
568 | // Cache a copy of UEFI memory map before we start profiling feature.\r | |
569 | //\r | |
570 | GetUefiMemoryMap ();\r | |
571 | \r | |
529a5a86 MK |
572 | //\r |
573 | // Set SMM ready to lock flag and return\r | |
574 | //\r | |
575 | mSmmReadyToLock = TRUE;\r | |
576 | return EFI_SUCCESS;\r | |
577 | }\r | |
578 | \r | |
579 | /**\r | |
580 | The module Entry Point of the CPU SMM driver.\r | |
581 | \r | |
582 | @param ImageHandle The firmware allocated handle for the EFI image.\r | |
583 | @param SystemTable A pointer to the EFI System Table.\r | |
584 | \r | |
585 | @retval EFI_SUCCESS The entry point is executed successfully.\r | |
586 | @retval Other Some error occurs when executing this entry point.\r | |
587 | \r | |
588 | **/\r | |
589 | EFI_STATUS\r | |
590 | EFIAPI\r | |
591 | PiCpuSmmEntry (\r | |
592 | IN EFI_HANDLE ImageHandle,\r | |
593 | IN EFI_SYSTEM_TABLE *SystemTable\r | |
594 | )\r | |
595 | {\r | |
053e878b MK |
596 | EFI_STATUS Status;\r |
597 | EFI_MP_SERVICES_PROTOCOL *MpServices;\r | |
598 | UINTN NumberOfEnabledProcessors;\r | |
599 | UINTN Index;\r | |
600 | VOID *Buffer;\r | |
601 | UINTN BufferPages;\r | |
602 | UINTN TileCodeSize;\r | |
603 | UINTN TileDataSize;\r | |
604 | UINTN TileSize;\r | |
605 | UINT8 *Stacks;\r | |
606 | VOID *Registration;\r | |
607 | UINT32 RegEax;\r | |
608 | UINT32 RegEbx;\r | |
609 | UINT32 RegEcx;\r | |
610 | UINT32 RegEdx;\r | |
611 | UINTN FamilyId;\r | |
612 | UINTN ModelId;\r | |
613 | UINT32 Cr3;\r | |
ec07fd0e WJ |
614 | EFI_HOB_GUID_TYPE *GuidHob;\r |
615 | SMM_BASE_HOB_DATA *SmmBaseHobData;\r | |
616 | \r | |
617 | GuidHob = NULL;\r | |
618 | SmmBaseHobData = NULL;\r | |
529a5a86 | 619 | \r |
e21e355e LG |
620 | //\r |
621 | // Initialize address fixup\r | |
622 | //\r | |
623 | PiSmmCpuSmmInitFixupAddress ();\r | |
624 | PiSmmCpuSmiEntryFixupAddress ();\r | |
625 | \r | |
529a5a86 MK |
626 | //\r |
627 | // Initialize Debug Agent to support source level debug in SMM code\r | |
628 | //\r | |
629 | InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r | |
630 | \r | |
631 | //\r | |
632 | // Report the start of CPU SMM initialization.\r | |
633 | //\r | |
634 | REPORT_STATUS_CODE (\r | |
635 | EFI_PROGRESS_CODE,\r | |
636 | EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r | |
637 | );\r | |
638 | \r | |
529a5a86 MK |
639 | //\r |
640 | // Find out SMRR Base and SMRR Size\r | |
641 | //\r | |
642 | FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r | |
643 | \r | |
644 | //\r | |
645 | // Get MP Services Protocol\r | |
646 | //\r | |
647 | Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r | |
648 | ASSERT_EFI_ERROR (Status);\r | |
649 | \r | |
650 | //\r | |
651 | // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r | |
652 | //\r | |
653 | Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r | |
654 | ASSERT_EFI_ERROR (Status);\r | |
655 | ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r | |
656 | \r | |
657 | //\r | |
658 | // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r | |
659 | // A constant BSP index makes no sense because it may be hot removed.\r | |
660 | //\r | |
7c2a6033 | 661 | DEBUG_CODE_BEGIN ();\r |
053e878b MK |
662 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r |
663 | ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r | |
664 | }\r | |
529a5a86 | 665 | \r |
7c2a6033 | 666 | DEBUG_CODE_END ();\r |
529a5a86 MK |
667 | \r |
668 | //\r | |
669 | // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r | |
670 | //\r | |
671 | mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r | |
96e1cba5 | 672 | DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r |
529a5a86 | 673 | \r |
241f9149 LD |
674 | //\r |
675 | // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r | |
676 | // Make sure AddressEncMask is contained to smallest supported address field.\r | |
677 | //\r | |
678 | mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r | |
96e1cba5 | 679 | DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r |
241f9149 | 680 | \r |
529a5a86 MK |
681 | //\r |
682 | // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r | |
683 | //\r | |
684 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r | |
685 | mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r | |
686 | } else {\r | |
687 | mMaxNumberOfCpus = mNumberOfCpus;\r | |
688 | }\r | |
053e878b | 689 | \r |
529a5a86 MK |
690 | gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r |
691 | \r | |
692 | //\r | |
693 | // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r | |
694 | // allocated buffer. The minimum size of this buffer for a uniprocessor system\r | |
695 | // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r | |
696 | // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r | |
697 | // then the SMI entry point and the CPU save state areas can be tiles to minimize\r | |
698 | // the total amount SMRAM required for all the CPUs. The tile size can be computed\r | |
699 | // by adding the // CPU save state size, any extra CPU specific context, and\r | |
700 | // the size of code that must be placed at the SMI entry point to transfer\r | |
701 | // control to a C function in the native SMM execution mode. This size is\r | |
702 | // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r | |
703 | // The total amount of memory required is the maximum number of CPUs that\r | |
704 | // platform supports times the tile size. The picture below shows the tiling,\r | |
705 | // where m is the number of tiles that fit in 32KB.\r | |
706 | //\r | |
707 | // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r | |
708 | // | CPU m+1 Save State |\r | |
709 | // +-----------------------------+\r | |
710 | // | CPU m+1 Extra Data |\r | |
711 | // +-----------------------------+\r | |
712 | // | Padding |\r | |
713 | // +-----------------------------+\r | |
714 | // | CPU 2m SMI Entry |\r | |
715 | // +#############################+ <-- Base of allocated buffer + 64 KB\r | |
716 | // | CPU m-1 Save State |\r | |
717 | // +-----------------------------+\r | |
718 | // | CPU m-1 Extra Data |\r | |
719 | // +-----------------------------+\r | |
720 | // | Padding |\r | |
721 | // +-----------------------------+\r | |
722 | // | CPU 2m-1 SMI Entry |\r | |
723 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
724 | // | . . . . . . . . . . . . |\r | |
725 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
726 | // | CPU 2 Save State |\r | |
727 | // +-----------------------------+\r | |
728 | // | CPU 2 Extra Data |\r | |
729 | // +-----------------------------+\r | |
730 | // | Padding |\r | |
731 | // +-----------------------------+\r | |
732 | // | CPU m+1 SMI Entry |\r | |
733 | // +=============================+ <-- Base of allocated buffer + 32 KB\r | |
734 | // | CPU 1 Save State |\r | |
735 | // +-----------------------------+\r | |
736 | // | CPU 1 Extra Data |\r | |
737 | // +-----------------------------+\r | |
738 | // | Padding |\r | |
739 | // +-----------------------------+\r | |
740 | // | CPU m SMI Entry |\r | |
741 | // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r | |
742 | // | CPU 0 Save State |\r | |
743 | // +-----------------------------+\r | |
744 | // | CPU 0 Extra Data |\r | |
745 | // +-----------------------------+\r | |
746 | // | Padding |\r | |
747 | // +-----------------------------+\r | |
748 | // | CPU m-1 SMI Entry |\r | |
749 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
750 | // | . . . . . . . . . . . . |\r | |
751 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
752 | // | Padding |\r | |
753 | // +-----------------------------+\r | |
754 | // | CPU 1 SMI Entry |\r | |
755 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
756 | // | Padding |\r | |
757 | // +-----------------------------+\r | |
758 | // | CPU 0 SMI Entry |\r | |
759 | // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r | |
760 | //\r | |
761 | \r | |
762 | //\r | |
763 | // Retrieve CPU Family\r | |
764 | //\r | |
e9b3a6c9 | 765 | AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r |
529a5a86 | 766 | FamilyId = (RegEax >> 8) & 0xf;\r |
053e878b MK |
767 | ModelId = (RegEax >> 4) & 0xf;\r |
768 | if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {\r | |
529a5a86 MK |
769 | ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r |
770 | }\r | |
771 | \r | |
e9b3a6c9 MK |
772 | RegEdx = 0;\r |
773 | AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r | |
774 | if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r | |
775 | AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r | |
776 | }\r | |
053e878b | 777 | \r |
529a5a86 MK |
778 | //\r |
779 | // Determine the mode of the CPU at the time an SMI occurs\r | |
780 | // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r | |
781 | // Volume 3C, Section 34.4.1.1\r | |
782 | //\r | |
783 | mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r | |
784 | if ((RegEdx & BIT29) != 0) {\r | |
785 | mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r | |
786 | }\r | |
053e878b | 787 | \r |
529a5a86 | 788 | if (FamilyId == 0x06) {\r |
053e878b | 789 | if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {\r |
529a5a86 MK |
790 | mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r |
791 | }\r | |
792 | }\r | |
793 | \r | |
3eb69b08 JY |
794 | DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r |
795 | if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r | |
5d34cc49 WH |
796 | AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r |
797 | if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r | |
3eb69b08 JY |
798 | AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r |
799 | DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r | |
800 | DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r | |
801 | DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r | |
802 | if ((RegEcx & CPUID_CET_SS) == 0) {\r | |
803 | mCetSupported = FALSE;\r | |
804 | PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r | |
805 | }\r | |
053e878b | 806 | \r |
3eb69b08 JY |
807 | if (mCetSupported) {\r |
808 | AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r | |
809 | DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r | |
810 | AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r | |
811 | DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r | |
053e878b | 812 | AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r |
3eb69b08 JY |
813 | DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r |
814 | }\r | |
5d34cc49 WH |
815 | } else {\r |
816 | mCetSupported = FALSE;\r | |
053e878b | 817 | PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r |
3eb69b08 JY |
818 | }\r |
819 | } else {\r | |
820 | mCetSupported = FALSE;\r | |
821 | PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r | |
822 | }\r | |
823 | \r | |
529a5a86 MK |
824 | //\r |
825 | // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r | |
f12367a0 MK |
826 | // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r |
827 | // This size is rounded up to nearest power of 2.\r | |
529a5a86 | 828 | //\r |
ae82a30b | 829 | TileCodeSize = GetSmiHandlerSize ();\r |
053e878b | 830 | TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);\r |
f12367a0 | 831 | TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r |
053e878b MK |
832 | TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);\r |
833 | TileSize = TileDataSize + TileCodeSize - 1;\r | |
834 | TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r | |
96e1cba5 | 835 | DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r |
529a5a86 MK |
836 | \r |
837 | //\r | |
f12367a0 MK |
838 | // If the TileSize is larger than space available for the SMI Handler of\r |
839 | // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r | |
840 | // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r | |
841 | // the SMI Handler size must be reduced or the size of the extra CPU specific\r | |
842 | // context must be reduced.\r | |
529a5a86 MK |
843 | //\r |
844 | ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r | |
845 | \r | |
846 | //\r | |
ec07fd0e WJ |
847 | // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,\r |
848 | // means the SmBase relocation has been done.\r | |
529a5a86 | 849 | //\r |
ec07fd0e WJ |
850 | GuidHob = GetFirstGuidHob (&gSmmBaseHobGuid);\r |
851 | if (GuidHob != NULL) {\r | |
852 | //\r | |
853 | // Check whether the Required TileSize is enough.\r | |
854 | //\r | |
855 | if (TileSize > SIZE_8KB) {\r | |
856 | DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));\r | |
857 | CpuDeadLoop ();\r | |
858 | return RETURN_BUFFER_TOO_SMALL;\r | |
859 | }\r | |
860 | \r | |
861 | SmmBaseHobData = GET_GUID_HOB_DATA (GuidHob);\r | |
862 | \r | |
863 | //\r | |
864 | // Assume single instance of HOB produced, expect the HOB.NumberOfProcessors equals to the mMaxNumberOfCpus.\r | |
865 | //\r | |
866 | ASSERT (SmmBaseHobData->NumberOfProcessors == (UINT32)mMaxNumberOfCpus && SmmBaseHobData->ProcessorIndex == 0);\r | |
867 | mSmmRelocated = TRUE;\r | |
529a5a86 | 868 | } else {\r |
ec07fd0e WJ |
869 | //\r |
870 | // When the HOB doesn't exist, allocate new SMBASE itself.\r | |
871 | //\r | |
872 | DEBUG ((DEBUG_INFO, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));\r | |
873 | //\r | |
874 | // Allocate buffer for all of the tiles.\r | |
875 | //\r | |
876 | // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r | |
877 | // Volume 3C, Section 34.11 SMBASE Relocation\r | |
878 | // For Pentium and Intel486 processors, the SMBASE values must be\r | |
879 | // aligned on a 32-KByte boundary or the processor will enter shutdown\r | |
880 | // state during the execution of a RSM instruction.\r | |
881 | //\r | |
882 | // Intel486 processors: FamilyId is 4\r | |
883 | // Pentium processors : FamilyId is 5\r | |
884 | //\r | |
885 | BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r | |
886 | if ((FamilyId == 4) || (FamilyId == 5)) {\r | |
887 | Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r | |
888 | } else {\r | |
889 | Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r | |
890 | }\r | |
053e878b | 891 | \r |
ec07fd0e WJ |
892 | ASSERT (Buffer != NULL);\r |
893 | DEBUG ((DEBUG_INFO, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));\r | |
894 | }\r | |
529a5a86 MK |
895 | \r |
896 | //\r | |
897 | // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r | |
898 | //\r | |
899 | gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r | |
900 | ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r | |
901 | \r | |
902 | gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r | |
903 | ASSERT (gSmmCpuPrivate->Operation != NULL);\r | |
904 | \r | |
905 | gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r | |
906 | ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r | |
907 | \r | |
908 | gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r | |
909 | ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r | |
910 | \r | |
911 | mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r | |
912 | mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r | |
529a5a86 MK |
913 | \r |
914 | //\r | |
915 | // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r | |
916 | //\r | |
917 | mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r | |
918 | ASSERT (mCpuHotPlugData.ApicId != NULL);\r | |
919 | mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r | |
920 | ASSERT (mCpuHotPlugData.SmBase != NULL);\r | |
921 | mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r | |
922 | \r | |
923 | //\r | |
924 | // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r | |
925 | // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r | |
926 | // size for each CPU in the platform\r | |
927 | //\r | |
928 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r | |
ec07fd0e WJ |
929 | mCpuHotPlugData.SmBase[Index] = mSmmRelocated ? (UINTN)SmmBaseHobData->SmBase[Index] : (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r |
930 | \r | |
053e878b | 931 | gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);\r |
529a5a86 | 932 | gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r |
053e878b | 933 | gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r |
529a5a86 MK |
934 | \r |
935 | if (Index < mNumberOfCpus) {\r | |
936 | Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r | |
937 | ASSERT_EFI_ERROR (Status);\r | |
938 | mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r | |
939 | \r | |
053e878b MK |
940 | DEBUG ((\r |
941 | DEBUG_INFO,\r | |
942 | "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r | |
529a5a86 MK |
943 | Index,\r |
944 | (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r | |
945 | mCpuHotPlugData.SmBase[Index],\r | |
946 | gSmmCpuPrivate->CpuSaveState[Index],\r | |
947 | gSmmCpuPrivate->CpuSaveStateSize[Index]\r | |
948 | ));\r | |
949 | } else {\r | |
950 | gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r | |
053e878b | 951 | mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r |
529a5a86 MK |
952 | }\r |
953 | }\r | |
954 | \r | |
955 | //\r | |
956 | // Allocate SMI stacks for all processors.\r | |
957 | //\r | |
3eb69b08 | 958 | mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r |
529a5a86 MK |
959 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r |
960 | //\r | |
455b0347 S |
961 | // SMM Stack Guard Enabled\r |
962 | // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r | |
529a5a86 | 963 | //\r |
455b0347 S |
964 | // +--------------------------------------------------+-----+--------------------------------------------------+\r |
965 | // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r | |
966 | // +--------------------------------------------------+-----+--------------------------------------------------+\r | |
967 | // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r | |
968 | // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r | |
969 | // | | | |\r | |
970 | // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r | |
529a5a86 | 971 | //\r |
3eb69b08 JY |
972 | mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r |
973 | }\r | |
974 | \r | |
975 | mSmmShadowStackSize = 0;\r | |
976 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r | |
3eb69b08 | 977 | mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r |
455b0347 | 978 | \r |
3eb69b08 | 979 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r |
455b0347 S |
980 | //\r |
981 | // SMM Stack Guard Enabled\r | |
982 | // Append Shadow Stack after normal stack\r | |
983 | // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r | |
984 | //\r | |
985 | // |= Stacks\r | |
986 | // +--------------------------------------------------+---------------------------------------------------------------+\r | |
987 | // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r | |
988 | // +--------------------------------------------------+---------------------------------------------------------------+\r | |
989 | // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r | |
990 | // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r | |
991 | // | |\r | |
992 | // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r | |
993 | //\r | |
3eb69b08 | 994 | mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r |
455b0347 S |
995 | } else {\r |
996 | //\r | |
997 | // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r | |
998 | // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r | |
999 | // 1 more pages is allocated for each processor, it is known good stack.\r | |
1000 | //\r | |
1001 | //\r | |
1002 | // |= Stacks\r | |
1003 | // +-------------------------------------+--------------------------------------------------+\r | |
1004 | // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r | |
1005 | // +-------------------------------------+--------------------------------------------------+\r | |
1006 | // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r | |
1007 | // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r | |
1008 | // | |\r | |
1009 | // |<-------------------------------- Processor N ----------------------------------------->|\r | |
1010 | //\r | |
1011 | mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r | |
1012 | mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r | |
3eb69b08 JY |
1013 | }\r |
1014 | }\r | |
1015 | \r | |
053e878b | 1016 | Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r |
3eb69b08 JY |
1017 | ASSERT (Stacks != NULL);\r |
1018 | mSmmStackArrayBase = (UINTN)Stacks;\r | |
053e878b | 1019 | mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r |
3eb69b08 JY |
1020 | \r |
1021 | DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r | |
1022 | DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r | |
1023 | DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r | |
1024 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r | |
1025 | DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r | |
529a5a86 MK |
1026 | }\r |
1027 | \r | |
1028 | //\r | |
1029 | // Set SMI stack for SMM base relocation\r | |
1030 | //\r | |
5830d2c3 LE |
1031 | PatchInstructionX86 (\r |
1032 | gPatchSmmInitStack,\r | |
053e878b | 1033 | (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),\r |
5830d2c3 LE |
1034 | sizeof (UINTN)\r |
1035 | );\r | |
529a5a86 MK |
1036 | \r |
1037 | //\r | |
1038 | // Initialize IDT\r | |
1039 | //\r | |
1040 | InitializeSmmIdt ();\r | |
1041 | \r | |
1042 | //\r | |
ec07fd0e WJ |
1043 | // Check whether Smm Relocation is done or not.\r |
1044 | // If not, will do the SmmBases Relocation here!!!\r | |
529a5a86 | 1045 | //\r |
ec07fd0e WJ |
1046 | if (!mSmmRelocated) {\r |
1047 | //\r | |
1048 | // Relocate SMM Base addresses to the ones allocated from SMRAM\r | |
1049 | //\r | |
1050 | mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r | |
1051 | ASSERT (mRebased != NULL);\r | |
1052 | SmmRelocateBases ();\r | |
529a5a86 | 1053 | \r |
ec07fd0e WJ |
1054 | //\r |
1055 | // Call hook for BSP to perform extra actions in normal mode after all\r | |
1056 | // SMM base addresses have been relocated on all CPUs\r | |
1057 | //\r | |
1058 | SmmCpuFeaturesSmmRelocationComplete ();\r | |
1059 | }\r | |
529a5a86 | 1060 | \r |
717fb604 JY |
1061 | DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r |
1062 | \r | |
529a5a86 MK |
1063 | //\r |
1064 | // SMM Time initialization\r | |
1065 | //\r | |
1066 | InitializeSmmTimer ();\r | |
1067 | \r | |
1068 | //\r | |
1069 | // Initialize MP globals\r | |
1070 | //\r | |
3eb69b08 JY |
1071 | Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r |
1072 | \r | |
1073 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r | |
1074 | for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r | |
1075 | SetShadowStack (\r | |
1076 | Cr3,\r | |
1077 | (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r | |
1078 | mSmmShadowStackSize\r | |
1079 | );\r | |
1080 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
1081 | SetNotPresentPage (\r | |
1082 | Cr3,\r | |
053e878b MK |
1083 | (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r |
1084 | EFI_PAGES_TO_SIZE (1)\r | |
3eb69b08 JY |
1085 | );\r |
1086 | }\r | |
1087 | }\r | |
1088 | }\r | |
529a5a86 | 1089 | \r |
ec07fd0e WJ |
1090 | //\r |
1091 | // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.\r | |
1092 | // Those MSRs & CSRs must be configured before normal SMI sources happen.\r | |
1093 | // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.\r | |
1094 | //\r | |
1095 | if (mSmmRelocated) {\r | |
1096 | ExecuteFirstSmiInit ();\r | |
1097 | \r | |
1098 | //\r | |
1099 | // Call hook for BSP to perform extra actions in normal mode after all\r | |
1100 | // SMM base addresses have been relocated on all CPUs\r | |
1101 | //\r | |
1102 | SmmCpuFeaturesSmmRelocationComplete ();\r | |
1103 | }\r | |
1104 | \r | |
529a5a86 MK |
1105 | //\r |
1106 | // Fill in SMM Reserved Regions\r | |
1107 | //\r | |
1108 | gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r | |
1109 | gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r | |
1110 | \r | |
1111 | //\r | |
1112 | // Install the SMM Configuration Protocol onto a new handle on the handle database.\r | |
1113 | // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r | |
1114 | // to an SMRAM address will be present in the handle database\r | |
1115 | //\r | |
1116 | Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r | |
1117 | &gSmmCpuPrivate->SmmCpuHandle,\r | |
053e878b MK |
1118 | &gEfiSmmConfigurationProtocolGuid,\r |
1119 | &gSmmCpuPrivate->SmmConfiguration,\r | |
529a5a86 MK |
1120 | NULL\r |
1121 | );\r | |
1122 | ASSERT_EFI_ERROR (Status);\r | |
1123 | \r | |
1124 | //\r | |
1125 | // Install the SMM CPU Protocol into SMM protocol database\r | |
1126 | //\r | |
1127 | Status = gSmst->SmmInstallProtocolInterface (\r | |
1128 | &mSmmCpuHandle,\r | |
1129 | &gEfiSmmCpuProtocolGuid,\r | |
1130 | EFI_NATIVE_INTERFACE,\r | |
1131 | &mSmmCpu\r | |
1132 | );\r | |
1133 | ASSERT_EFI_ERROR (Status);\r | |
1134 | \r | |
827330cc JW |
1135 | //\r |
1136 | // Install the SMM Memory Attribute Protocol into SMM protocol database\r | |
1137 | //\r | |
1138 | Status = gSmst->SmmInstallProtocolInterface (\r | |
1139 | &mSmmCpuHandle,\r | |
1140 | &gEdkiiSmmMemoryAttributeProtocolGuid,\r | |
1141 | EFI_NATIVE_INTERFACE,\r | |
1142 | &mSmmMemoryAttribute\r | |
1143 | );\r | |
1144 | ASSERT_EFI_ERROR (Status);\r | |
1145 | \r | |
51dd408a ED |
1146 | //\r |
1147 | // Initialize global buffer for MM MP.\r | |
1148 | //\r | |
1149 | InitializeDataForMmMp ();\r | |
1150 | \r | |
c14c4719 WJ |
1151 | //\r |
1152 | // Initialize Package First Thread Index Info.\r | |
1153 | //\r | |
1154 | InitPackageFirstThreadIndexInfo ();\r | |
1155 | \r | |
51dd408a ED |
1156 | //\r |
1157 | // Install the SMM Mp Protocol into SMM protocol database\r | |
1158 | //\r | |
1159 | Status = gSmst->SmmInstallProtocolInterface (\r | |
1160 | &mSmmCpuHandle,\r | |
1161 | &gEfiMmMpProtocolGuid,\r | |
1162 | EFI_NATIVE_INTERFACE,\r | |
1163 | &mSmmMp\r | |
1164 | );\r | |
1165 | ASSERT_EFI_ERROR (Status);\r | |
1166 | \r | |
529a5a86 MK |
1167 | //\r |
1168 | // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r | |
1169 | //\r | |
1170 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r | |
9838b016 MK |
1171 | Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r |
1172 | ASSERT_EFI_ERROR (Status);\r | |
529a5a86 MK |
1173 | }\r |
1174 | \r | |
1175 | //\r | |
1176 | // Initialize SMM CPU Services Support\r | |
1177 | //\r | |
1178 | Status = InitializeSmmCpuServices (mSmmCpuHandle);\r | |
1179 | ASSERT_EFI_ERROR (Status);\r | |
1180 | \r | |
529a5a86 MK |
1181 | //\r |
1182 | // register SMM Ready To Lock Protocol notification\r | |
1183 | //\r | |
1184 | Status = gSmst->SmmRegisterProtocolNotify (\r | |
1185 | &gEfiSmmReadyToLockProtocolGuid,\r | |
1186 | SmmReadyToLockEventNotify,\r | |
1187 | &Registration\r | |
1188 | );\r | |
1189 | ASSERT_EFI_ERROR (Status);\r | |
1190 | \r | |
529a5a86 MK |
1191 | //\r |
1192 | // Initialize SMM Profile feature\r | |
1193 | //\r | |
1194 | InitSmmProfile (Cr3);\r | |
1195 | \r | |
b10d5ddc | 1196 | GetAcpiS3EnableFlag ();\r |
0bdc9e75 | 1197 | InitSmmS3ResumeState (Cr3);\r |
529a5a86 | 1198 | \r |
96e1cba5 | 1199 | DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r |
529a5a86 MK |
1200 | \r |
1201 | return EFI_SUCCESS;\r | |
1202 | }\r | |
1203 | \r | |
1204 | /**\r | |
1205 | \r | |
1206 | Find out SMRAM information including SMRR base and SMRR size.\r | |
1207 | \r | |
1208 | @param SmrrBase SMRR base\r | |
1209 | @param SmrrSize SMRR size\r | |
1210 | \r | |
1211 | **/\r | |
1212 | VOID\r | |
1213 | FindSmramInfo (\r | |
053e878b MK |
1214 | OUT UINT32 *SmrrBase,\r |
1215 | OUT UINT32 *SmrrSize\r | |
529a5a86 MK |
1216 | )\r |
1217 | {\r | |
053e878b MK |
1218 | EFI_STATUS Status;\r |
1219 | UINTN Size;\r | |
1220 | EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r | |
1221 | EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r | |
1222 | UINTN Index;\r | |
1223 | UINT64 MaxSize;\r | |
1224 | BOOLEAN Found;\r | |
529a5a86 MK |
1225 | \r |
1226 | //\r | |
1227 | // Get SMM Access Protocol\r | |
1228 | //\r | |
1229 | Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r | |
1230 | ASSERT_EFI_ERROR (Status);\r | |
1231 | \r | |
1232 | //\r | |
1233 | // Get SMRAM information\r | |
1234 | //\r | |
053e878b | 1235 | Size = 0;\r |
529a5a86 MK |
1236 | Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r |
1237 | ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r | |
1238 | \r | |
7ed6f781 JF |
1239 | mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r |
1240 | ASSERT (mSmmCpuSmramRanges != NULL);\r | |
529a5a86 | 1241 | \r |
7ed6f781 | 1242 | Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r |
529a5a86 MK |
1243 | ASSERT_EFI_ERROR (Status);\r |
1244 | \r | |
7ed6f781 | 1245 | mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r |
529a5a86 MK |
1246 | \r |
1247 | //\r | |
1248 | // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r | |
1249 | //\r | |
1250 | CurrentSmramRange = NULL;\r | |
7ed6f781 | 1251 | for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r |
529a5a86 MK |
1252 | //\r |
1253 | // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r | |
1254 | //\r | |
7ed6f781 | 1255 | if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r |
529a5a86 MK |
1256 | continue;\r |
1257 | }\r | |
1258 | \r | |
7ed6f781 JF |
1259 | if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r |
1260 | if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r | |
1261 | if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r | |
053e878b | 1262 | MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r |
7ed6f781 | 1263 | CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r |
529a5a86 MK |
1264 | }\r |
1265 | }\r | |
1266 | }\r | |
1267 | }\r | |
1268 | \r | |
1269 | ASSERT (CurrentSmramRange != NULL);\r | |
1270 | \r | |
1271 | *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r | |
1272 | *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r | |
1273 | \r | |
1274 | do {\r | |
1275 | Found = FALSE;\r | |
7ed6f781 | 1276 | for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r |
053e878b MK |
1277 | if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&\r |
1278 | (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))\r | |
1279 | {\r | |
7ed6f781 JF |
1280 | *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r |
1281 | *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r | |
053e878b MK |
1282 | Found = TRUE;\r |
1283 | } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {\r | |
7ed6f781 | 1284 | *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r |
053e878b | 1285 | Found = TRUE;\r |
529a5a86 MK |
1286 | }\r |
1287 | }\r | |
1288 | } while (Found);\r | |
1289 | \r | |
96e1cba5 | 1290 | DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r |
529a5a86 MK |
1291 | }\r |
1292 | \r | |
1293 | /**\r | |
1294 | Configure SMM Code Access Check feature on an AP.\r | |
1295 | SMM Feature Control MSR will be locked after configuration.\r | |
1296 | \r | |
1297 | @param[in,out] Buffer Pointer to private data buffer.\r | |
1298 | **/\r | |
1299 | VOID\r | |
1300 | EFIAPI\r | |
1301 | ConfigSmmCodeAccessCheckOnCurrentProcessor (\r | |
1302 | IN OUT VOID *Buffer\r | |
1303 | )\r | |
1304 | {\r | |
1305 | UINTN CpuIndex;\r | |
1306 | UINT64 SmmFeatureControlMsr;\r | |
1307 | UINT64 NewSmmFeatureControlMsr;\r | |
1308 | \r | |
1309 | //\r | |
1310 | // Retrieve the CPU Index from the context passed in\r | |
1311 | //\r | |
1312 | CpuIndex = *(UINTN *)Buffer;\r | |
1313 | \r | |
1314 | //\r | |
1315 | // Get the current SMM Feature Control MSR value\r | |
1316 | //\r | |
1317 | SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r | |
1318 | \r | |
1319 | //\r | |
1320 | // Compute the new SMM Feature Control MSR value\r | |
1321 | //\r | |
1322 | NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r | |
1323 | if (mSmmCodeAccessCheckEnable) {\r | |
1324 | NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r | |
f6bc3a6d JF |
1325 | if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r |
1326 | NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r | |
1327 | }\r | |
529a5a86 MK |
1328 | }\r |
1329 | \r | |
1330 | //\r | |
1331 | // Only set the SMM Feature Control MSR value if the new value is different than the current value\r | |
1332 | //\r | |
1333 | if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r | |
1334 | SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r | |
1335 | }\r | |
1336 | \r | |
1337 | //\r | |
1338 | // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r | |
1339 | //\r | |
fe3a75bc | 1340 | ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r |
529a5a86 MK |
1341 | }\r |
1342 | \r | |
1343 | /**\r | |
1344 | Configure SMM Code Access Check feature for all processors.\r | |
1345 | SMM Feature Control MSR will be locked after configuration.\r | |
1346 | **/\r | |
1347 | VOID\r | |
1348 | ConfigSmmCodeAccessCheck (\r | |
1349 | VOID\r | |
1350 | )\r | |
1351 | {\r | |
1352 | UINTN Index;\r | |
1353 | EFI_STATUS Status;\r | |
1354 | \r | |
1355 | //\r | |
1356 | // Check to see if the Feature Control MSR is supported on this CPU\r | |
1357 | //\r | |
f6b0cb17 | 1358 | Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r |
529a5a86 MK |
1359 | if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r |
1360 | mSmmCodeAccessCheckEnable = FALSE;\r | |
1361 | return;\r | |
1362 | }\r | |
1363 | \r | |
1364 | //\r | |
1365 | // Check to see if the CPU supports the SMM Code Access Check feature\r | |
1366 | // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r | |
1367 | //\r | |
1368 | if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r | |
1369 | mSmmCodeAccessCheckEnable = FALSE;\r | |
529a5a86 MK |
1370 | return;\r |
1371 | }\r | |
1372 | \r | |
1373 | //\r | |
1374 | // Initialize the lock used to serialize the MSR programming in BSP and all APs\r | |
1375 | //\r | |
fe3a75bc | 1376 | InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r |
529a5a86 MK |
1377 | \r |
1378 | //\r | |
1379 | // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r | |
1380 | // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r | |
1381 | //\r | |
fe3a75bc | 1382 | AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r |
529a5a86 MK |
1383 | \r |
1384 | //\r | |
1385 | // Enable SMM Code Access Check feature on the BSP.\r | |
1386 | //\r | |
1387 | ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r | |
1388 | \r | |
1389 | //\r | |
1390 | // Enable SMM Code Access Check feature for the APs.\r | |
1391 | //\r | |
1392 | for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r | |
f6b0cb17 | 1393 | if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r |
b7025df8 JF |
1394 | if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r |
1395 | //\r | |
1396 | // If this processor does not exist\r | |
1397 | //\r | |
1398 | continue;\r | |
1399 | }\r | |
053e878b | 1400 | \r |
529a5a86 MK |
1401 | //\r |
1402 | // Acquire Config SMM Code Access Check spin lock. The AP will release the\r | |
1403 | // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r | |
1404 | //\r | |
fe3a75bc | 1405 | AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r |
529a5a86 MK |
1406 | \r |
1407 | //\r | |
1408 | // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r | |
1409 | //\r | |
1410 | Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r | |
1411 | ASSERT_EFI_ERROR (Status);\r | |
1412 | \r | |
1413 | //\r | |
1414 | // Wait for the AP to release the Config SMM Code Access Check spin lock.\r | |
1415 | //\r | |
fe3a75bc | 1416 | while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r |
529a5a86 MK |
1417 | CpuPause ();\r |
1418 | }\r | |
1419 | \r | |
1420 | //\r | |
1421 | // Release the Config SMM Code Access Check spin lock.\r | |
1422 | //\r | |
fe3a75bc | 1423 | ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r |
529a5a86 MK |
1424 | }\r |
1425 | }\r | |
1426 | }\r | |
1427 | \r | |
717fb604 JY |
1428 | /**\r |
1429 | Allocate pages for code.\r | |
1430 | \r | |
1431 | @param[in] Pages Number of pages to be allocated.\r | |
1432 | \r | |
1433 | @return Allocated memory.\r | |
1434 | **/\r | |
1435 | VOID *\r | |
1436 | AllocateCodePages (\r | |
053e878b | 1437 | IN UINTN Pages\r |
717fb604 JY |
1438 | )\r |
1439 | {\r | |
1440 | EFI_STATUS Status;\r | |
1441 | EFI_PHYSICAL_ADDRESS Memory;\r | |
1442 | \r | |
1443 | if (Pages == 0) {\r | |
1444 | return NULL;\r | |
1445 | }\r | |
1446 | \r | |
1447 | Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r | |
1448 | if (EFI_ERROR (Status)) {\r | |
1449 | return NULL;\r | |
1450 | }\r | |
053e878b MK |
1451 | \r |
1452 | return (VOID *)(UINTN)Memory;\r | |
717fb604 JY |
1453 | }\r |
1454 | \r | |
1455 | /**\r | |
1456 | Allocate aligned pages for code.\r | |
1457 | \r | |
1458 | @param[in] Pages Number of pages to be allocated.\r | |
1459 | @param[in] Alignment The requested alignment of the allocation.\r | |
1460 | Must be a power of two.\r | |
1461 | If Alignment is zero, then byte alignment is used.\r | |
1462 | \r | |
1463 | @return Allocated memory.\r | |
1464 | **/\r | |
1465 | VOID *\r | |
1466 | AllocateAlignedCodePages (\r | |
053e878b MK |
1467 | IN UINTN Pages,\r |
1468 | IN UINTN Alignment\r | |
717fb604 JY |
1469 | )\r |
1470 | {\r | |
1471 | EFI_STATUS Status;\r | |
1472 | EFI_PHYSICAL_ADDRESS Memory;\r | |
1473 | UINTN AlignedMemory;\r | |
1474 | UINTN AlignmentMask;\r | |
1475 | UINTN UnalignedPages;\r | |
1476 | UINTN RealPages;\r | |
1477 | \r | |
1478 | //\r | |
1479 | // Alignment must be a power of two or zero.\r | |
1480 | //\r | |
1481 | ASSERT ((Alignment & (Alignment - 1)) == 0);\r | |
1482 | \r | |
1483 | if (Pages == 0) {\r | |
1484 | return NULL;\r | |
1485 | }\r | |
053e878b | 1486 | \r |
717fb604 JY |
1487 | if (Alignment > EFI_PAGE_SIZE) {\r |
1488 | //\r | |
1489 | // Calculate the total number of pages since alignment is larger than page size.\r | |
1490 | //\r | |
053e878b MK |
1491 | AlignmentMask = Alignment - 1;\r |
1492 | RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r | |
717fb604 JY |
1493 | //\r |
1494 | // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r | |
1495 | //\r | |
1496 | ASSERT (RealPages > Pages);\r | |
1497 | \r | |
053e878b | 1498 | Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r |
717fb604 JY |
1499 | if (EFI_ERROR (Status)) {\r |
1500 | return NULL;\r | |
1501 | }\r | |
053e878b MK |
1502 | \r |
1503 | AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;\r | |
1504 | UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);\r | |
717fb604 JY |
1505 | if (UnalignedPages > 0) {\r |
1506 | //\r | |
1507 | // Free first unaligned page(s).\r | |
1508 | //\r | |
1509 | Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r | |
1510 | ASSERT_EFI_ERROR (Status);\r | |
1511 | }\r | |
053e878b | 1512 | \r |
8491e302 | 1513 | Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r |
717fb604 JY |
1514 | UnalignedPages = RealPages - Pages - UnalignedPages;\r |
1515 | if (UnalignedPages > 0) {\r | |
1516 | //\r | |
1517 | // Free last unaligned page(s).\r | |
1518 | //\r | |
1519 | Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r | |
1520 | ASSERT_EFI_ERROR (Status);\r | |
1521 | }\r | |
1522 | } else {\r | |
1523 | //\r | |
1524 | // Do not over-allocate pages in this case.\r | |
1525 | //\r | |
1526 | Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r | |
1527 | if (EFI_ERROR (Status)) {\r | |
1528 | return NULL;\r | |
1529 | }\r | |
053e878b MK |
1530 | \r |
1531 | AlignedMemory = (UINTN)Memory;\r | |
717fb604 | 1532 | }\r |
053e878b MK |
1533 | \r |
1534 | return (VOID *)AlignedMemory;\r | |
717fb604 JY |
1535 | }\r |
1536 | \r | |
529a5a86 MK |
1537 | /**\r |
1538 | Perform the remaining tasks.\r | |
1539 | \r | |
1540 | **/\r | |
1541 | VOID\r | |
1542 | PerformRemainingTasks (\r | |
1543 | VOID\r | |
1544 | )\r | |
1545 | {\r | |
1546 | if (mSmmReadyToLock) {\r | |
1547 | //\r | |
1548 | // Start SMM Profile feature\r | |
1549 | //\r | |
1550 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r | |
1551 | SmmProfileStart ();\r | |
1552 | }\r | |
053e878b | 1553 | \r |
529a5a86 MK |
1554 | //\r |
1555 | // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r | |
1556 | //\r | |
1557 | InitPaging ();\r | |
717fb604 JY |
1558 | \r |
1559 | //\r | |
1560 | // Mark critical region to be read-only in page table\r | |
1561 | //\r | |
d2fc7711 JY |
1562 | SetMemMapAttributes ();\r |
1563 | \r | |
79186ddc RN |
1564 | if (IsRestrictedMemoryAccess ()) {\r |
1565 | //\r | |
1566 | // For outside SMRAM, we only map SMM communication buffer or MMIO.\r | |
1567 | //\r | |
1568 | SetUefiMemMapAttributes ();\r | |
30f61485 | 1569 | \r |
79186ddc RN |
1570 | //\r |
1571 | // Set page table itself to be read-only\r | |
1572 | //\r | |
1573 | SetPageTableAttributes ();\r | |
1574 | }\r | |
717fb604 | 1575 | \r |
529a5a86 MK |
1576 | //\r |
1577 | // Configure SMM Code Access Check feature if available.\r | |
1578 | //\r | |
1579 | ConfigSmmCodeAccessCheck ();\r | |
1580 | \r | |
21c17193 JY |
1581 | SmmCpuFeaturesCompleteSmmReadyToLock ();\r |
1582 | \r | |
529a5a86 MK |
1583 | //\r |
1584 | // Clean SMM ready to lock flag\r | |
1585 | //\r | |
1586 | mSmmReadyToLock = FALSE;\r | |
1587 | }\r | |
1588 | }\r | |
9f419739 JY |
1589 | \r |
1590 | /**\r | |
1591 | Perform the pre tasks.\r | |
1592 | \r | |
1593 | **/\r | |
1594 | VOID\r | |
1595 | PerformPreTasks (\r | |
1596 | VOID\r | |
1597 | )\r | |
1598 | {\r | |
0bdc9e75 | 1599 | RestoreSmmConfigurationInS3 ();\r |
9f419739 | 1600 | }\r |