]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Replace mIsBsp by mBspApicId check
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
cb4820b6 4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
15// along its supporting fields.\r
16//\r
17SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
19 NULL, // SmmCpuHandle\r
20 NULL, // Pointer to ProcessorInfo array\r
21 NULL, // Pointer to Operation array\r
22 NULL, // Pointer to CpuSaveStateSize array\r
23 NULL, // Pointer to CpuSaveState array\r
053e878b
MK
24 {\r
25 { 0 }\r
26 }, // SmmReservedSmramRegion\r
529a5a86
MK
27 {\r
28 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
29 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
30 0, // SmmCoreEntryContext.NumberOfCpus\r
31 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
32 NULL // SmmCoreEntryContext.CpuSaveState\r
33 },\r
34 NULL, // SmmCoreEntry\r
35 {\r
36 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
37 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
38 },\r
51dd408a 39 NULL, // pointer to Ap Wrapper Func array\r
053e878b 40 { NULL, NULL }, // List_Entry for Tokens.\r
529a5a86
MK
41};\r
42\r
053e878b 43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
529a5a86
MK
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
529a5a86
MK
62\r
63///\r
64/// Handle for the SMM CPU Protocol\r
65///\r
66EFI_HANDLE mSmmCpuHandle = NULL;\r
67\r
68///\r
69/// SMM CPU Protocol instance\r
70///\r
053e878b 71EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
529a5a86
MK
72 SmmReadSaveState,\r
73 SmmWriteSaveState\r
74};\r
75\r
827330cc
JW
76///\r
77/// SMM Memory Attribute Protocol instance\r
78///\r
053e878b 79EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
827330cc
JW
80 EdkiiSmmGetMemoryAttributes,\r
81 EdkiiSmmSetMemoryAttributes,\r
82 EdkiiSmmClearMemoryAttributes\r
83};\r
84\r
053e878b 85EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
529a5a86 86\r
85c6c14c
WJ
87UINT32 mBspApicId = 0;\r
88\r
529a5a86
MK
89//\r
90// SMM stack information\r
91//\r
053e878b
MK
92UINTN mSmmStackArrayBase;\r
93UINTN mSmmStackArrayEnd;\r
94UINTN mSmmStackSize;\r
529a5a86 95\r
053e878b
MK
96UINTN mSmmShadowStackSize;\r
97BOOLEAN mCetSupported = TRUE;\r
3eb69b08 98\r
053e878b
MK
99UINTN mMaxNumberOfCpus = 1;\r
100UINTN mNumberOfCpus = 1;\r
529a5a86
MK
101\r
102//\r
103// SMM ready to lock flag\r
104//\r
053e878b 105BOOLEAN mSmmReadyToLock = FALSE;\r
529a5a86
MK
106\r
107//\r
108// Global used to cache PCD for SMM Code Access Check enable\r
109//\r
053e878b 110BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86 111\r
241f9149
LD
112//\r
113// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
114//\r
053e878b 115UINT64 mAddressEncMask = 0;\r
241f9149 116\r
529a5a86
MK
117//\r
118// Spin lock used to serialize setting of SMM Code Access Check feature\r
119//\r
053e878b 120SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 121\r
7ed6f781
JF
122//\r
123// Saved SMM ranges information\r
124//\r
053e878b
MK
125EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
126UINTN mSmmCpuSmramRangeCount;\r
7ed6f781 127\r
053e878b 128UINT8 mPhysicalAddressBits;\r
51ce27fd 129\r
351b49c1
LE
130//\r
131// Control register contents saved for SMM S3 resume state initialization.\r
132//\r
053e878b
MK
133UINT32 mSmmCr0;\r
134UINT32 mSmmCr4;\r
351b49c1 135\r
529a5a86
MK
136/**\r
137 Initialize IDT to setup exception handlers for SMM.\r
138\r
139**/\r
140VOID\r
141InitializeSmmIdt (\r
142 VOID\r
143 )\r
144{\r
053e878b
MK
145 EFI_STATUS Status;\r
146 BOOLEAN InterruptState;\r
147 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
148\r
149 //\r
150 // There are 32 (not 255) entries in it since only processor\r
151 // generated exceptions will be handled.\r
152 //\r
053e878b 153 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
717fb604
JY
154 //\r
155 // Allocate page aligned IDT, because it might be set as read only.\r
156 //\r
053e878b 157 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));\r
717fb604
JY
158 ASSERT (gcSmiIdtr.Base != 0);\r
159 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
160\r
529a5a86
MK
161 //\r
162 // Disable Interrupt and save DXE IDT table\r
163 //\r
164 InterruptState = SaveAndDisableInterrupts ();\r
165 AsmReadIdtr (&DxeIdtr);\r
166 //\r
167 // Load SMM temporary IDT table\r
168 //\r
169 AsmWriteIdtr (&gcSmiIdtr);\r
170 //\r
171 // Setup SMM default exception handlers, SMM IDT table\r
172 // will be updated and saved in gcSmiIdtr\r
173 //\r
174 Status = InitializeCpuExceptionHandlers (NULL);\r
175 ASSERT_EFI_ERROR (Status);\r
176 //\r
177 // Restore DXE IDT table and CPU interrupt\r
178 //\r
053e878b 179 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);\r
529a5a86
MK
180 SetInterruptState (InterruptState);\r
181}\r
182\r
183/**\r
184 Search module name by input IP address and output it.\r
185\r
186 @param CallerIpAddress Caller instruction pointer.\r
187\r
188**/\r
189VOID\r
190DumpModuleInfoByIp (\r
053e878b 191 IN UINTN CallerIpAddress\r
529a5a86
MK
192 )\r
193{\r
053e878b
MK
194 UINTN Pe32Data;\r
195 VOID *PdbPointer;\r
529a5a86
MK
196\r
197 //\r
198 // Find Image Base\r
199 //\r
9e981317 200 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 201 if (Pe32Data != 0) {\r
053e878b
MK
202 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));\r
203 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);\r
529a5a86 204 if (PdbPointer != NULL) {\r
b8caae19 205 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
206 }\r
207 }\r
208}\r
209\r
210/**\r
211 Read information from the CPU save state.\r
212\r
213 @param This EFI_SMM_CPU_PROTOCOL instance\r
214 @param Width The number of bytes to read from the CPU save state.\r
215 @param Register Specifies the CPU register to read form the save state.\r
216 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
217 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
218\r
219 @retval EFI_SUCCESS The register was read from Save State\r
220 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
ef62da4f 221 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r
529a5a86
MK
222\r
223**/\r
224EFI_STATUS\r
225EFIAPI\r
226SmmReadSaveState (\r
053e878b
MK
227 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
228 IN UINTN Width,\r
229 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
230 IN UINTN CpuIndex,\r
231 OUT VOID *Buffer\r
529a5a86
MK
232 )\r
233{\r
234 EFI_STATUS Status;\r
235\r
236 //\r
237 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
238 //\r
239 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
240 return EFI_INVALID_PARAMETER;\r
241 }\r
053e878b 242\r
5b02be4d 243 //\r
b70ec0de
HW
244 // The SpeculationBarrier() call here is to ensure the above check for the\r
245 // CpuIndex has been completed before the execution of subsequent codes.\r
5b02be4d 246 //\r
b70ec0de 247 SpeculationBarrier ();\r
529a5a86
MK
248\r
249 //\r
250 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
251 //\r
252 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
253 //\r
254 // The pseudo-register only supports the 64-bit size specified by Width.\r
255 //\r
256 if (Width != sizeof (UINT64)) {\r
257 return EFI_INVALID_PARAMETER;\r
258 }\r
053e878b 259\r
529a5a86
MK
260 //\r
261 // If the processor is in SMM at the time the SMI occurred,\r
262 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
263 // Otherwise, EFI_NOT_FOUND is returned.\r
264 //\r
ed3d5ecb 265 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
266 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
267 return EFI_SUCCESS;\r
268 } else {\r
269 return EFI_NOT_FOUND;\r
270 }\r
271 }\r
272\r
ed3d5ecb 273 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
274 return EFI_INVALID_PARAMETER;\r
275 }\r
276\r
277 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
278 if (Status == EFI_UNSUPPORTED) {\r
279 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
280 }\r
053e878b 281\r
529a5a86
MK
282 return Status;\r
283}\r
284\r
285/**\r
286 Write data to the CPU save state.\r
287\r
288 @param This EFI_SMM_CPU_PROTOCOL instance\r
289 @param Width The number of bytes to read from the CPU save state.\r
290 @param Register Specifies the CPU register to write to the save state.\r
291 @param CpuIndex Specifies the zero-based index of the CPU save state\r
292 @param Buffer Upon entry, this holds the new CPU register value.\r
293\r
294 @retval EFI_SUCCESS The register was written from Save State\r
295 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
ef62da4f 296 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r
529a5a86
MK
297\r
298**/\r
299EFI_STATUS\r
300EFIAPI\r
301SmmWriteSaveState (\r
053e878b
MK
302 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
303 IN UINTN Width,\r
304 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
305 IN UINTN CpuIndex,\r
306 IN CONST VOID *Buffer\r
529a5a86
MK
307 )\r
308{\r
309 EFI_STATUS Status;\r
310\r
311 //\r
312 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
313 //\r
314 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
315 return EFI_INVALID_PARAMETER;\r
316 }\r
317\r
318 //\r
319 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
320 //\r
321 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
322 return EFI_SUCCESS;\r
323 }\r
324\r
325 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
326 return EFI_INVALID_PARAMETER;\r
327 }\r
328\r
329 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
330 if (Status == EFI_UNSUPPORTED) {\r
331 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
332 }\r
053e878b 333\r
529a5a86
MK
334 return Status;\r
335}\r
336\r
529a5a86
MK
337/**\r
338 C function for SMI handler. To change all processor's SMMBase Register.\r
339\r
340**/\r
341VOID\r
342EFIAPI\r
343SmmInitHandler (\r
344 VOID\r
345 )\r
346{\r
85c6c14c
WJ
347 UINT32 ApicId;\r
348 UINTN Index;\r
349 BOOLEAN IsBsp;\r
529a5a86
MK
350\r
351 //\r
352 // Update SMM IDT entries' code segment and load IDT\r
353 //\r
354 AsmWriteIdtr (&gcSmiIdtr);\r
355 ApicId = GetApicId ();\r
356\r
85c6c14c
WJ
357 IsBsp = (BOOLEAN)(mBspApicId == ApicId);\r
358\r
bb767506 359 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
360\r
361 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
362 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
363 //\r
364 // Initialize SMM specific features on the currently executing CPU\r
365 //\r
366 SmmCpuFeaturesInitializeProcessor (\r
367 Index,\r
85c6c14c 368 IsBsp,\r
529a5a86
MK
369 gSmmCpuPrivate->ProcessorInfo,\r
370 &mCpuHotPlugData\r
371 );\r
372\r
a46a4c90
JF
373 if (!mSmmS3Flag) {\r
374 //\r
375 // Check XD and BTS features on each processor on normal boot\r
376 //\r
51773d49 377 CheckFeatureSupported ();\r
85c6c14c 378 } else if (IsBsp) {\r
529a5a86
MK
379 //\r
380 // BSP rebase is already done above.\r
381 // Initialize private data during S3 resume\r
382 //\r
383 InitializeMpSyncData ();\r
384 }\r
385\r
386 //\r
387 // Hook return after RSM to set SMM re-based flag\r
388 //\r
389 SemaphoreHook (Index, &mRebased[Index]);\r
390\r
391 return;\r
392 }\r
393 }\r
053e878b 394\r
529a5a86
MK
395 ASSERT (FALSE);\r
396}\r
397\r
398/**\r
399 Relocate SmmBases for each processor.\r
400\r
401 Execute on first boot and all S3 resumes\r
402\r
403**/\r
404VOID\r
405EFIAPI\r
406SmmRelocateBases (\r
407 VOID\r
408 )\r
409{\r
410 UINT8 BakBuf[BACK_BUF_SIZE];\r
411 SMRAM_SAVE_STATE_MAP BakBuf2;\r
412 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
413 UINT8 *U8Ptr;\r
529a5a86
MK
414 UINTN Index;\r
415 UINTN BspIndex;\r
416\r
417 //\r
418 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
419 //\r
420 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
421\r
422 //\r
423 // Patch ASM code template with current CR0, CR3, and CR4 values\r
424 //\r
f0053e83
LE
425 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
426 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
6b0841c1 427 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1 428 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
3eb69b08 429 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
529a5a86
MK
430\r
431 //\r
432 // Patch GDTR for SMM base relocation\r
433 //\r
434 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
435 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
436\r
053e878b 437 U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
529a5a86
MK
438 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
439\r
440 //\r
441 // Backup original contents at address 0x38000\r
442 //\r
443 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
444 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
445\r
446 //\r
447 // Load image for relocation\r
448 //\r
449 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
450\r
451 //\r
452 // Retrieve the local APIC ID of current processor\r
453 //\r
85c6c14c 454 mBspApicId = GetApicId ();\r
529a5a86
MK
455\r
456 //\r
457 // Relocate SM bases for all APs\r
458 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
459 //\r
529a5a86
MK
460 BspIndex = (UINTN)-1;\r
461 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
462 mRebased[Index] = FALSE;\r
85c6c14c 463 if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
529a5a86
MK
464 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
465 //\r
466 // Wait for this AP to finish its 1st SMI\r
467 //\r
053e878b
MK
468 while (!mRebased[Index]) {\r
469 }\r
529a5a86
MK
470 } else {\r
471 //\r
472 // BSP will be Relocated later\r
473 //\r
474 BspIndex = Index;\r
475 }\r
476 }\r
477\r
478 //\r
479 // Relocate BSP's SMM base\r
480 //\r
481 ASSERT (BspIndex != (UINTN)-1);\r
85c6c14c 482 SendSmiIpi (mBspApicId);\r
529a5a86
MK
483 //\r
484 // Wait for the BSP to finish its 1st SMI\r
485 //\r
053e878b
MK
486 while (!mRebased[BspIndex]) {\r
487 }\r
529a5a86
MK
488\r
489 //\r
490 // Restore contents at address 0x38000\r
491 //\r
492 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
493 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
494}\r
495\r
529a5a86
MK
496/**\r
497 SMM Ready To Lock event notification handler.\r
498\r
499 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
500 perform additional lock actions that must be performed from SMM on the next SMI.\r
501\r
502 @param[in] Protocol Points to the protocol's unique identifier.\r
503 @param[in] Interface Points to the interface instance.\r
504 @param[in] Handle The handle on which the interface was installed.\r
505\r
506 @retval EFI_SUCCESS Notification handler runs successfully.\r
507 **/\r
508EFI_STATUS\r
509EFIAPI\r
510SmmReadyToLockEventNotify (\r
511 IN CONST EFI_GUID *Protocol,\r
512 IN VOID *Interface,\r
513 IN EFI_HANDLE Handle\r
514 )\r
515{\r
0bdc9e75 516 GetAcpiCpuData ();\r
529a5a86 517\r
d2fc7711
JY
518 //\r
519 // Cache a copy of UEFI memory map before we start profiling feature.\r
520 //\r
521 GetUefiMemoryMap ();\r
522\r
529a5a86
MK
523 //\r
524 // Set SMM ready to lock flag and return\r
525 //\r
526 mSmmReadyToLock = TRUE;\r
527 return EFI_SUCCESS;\r
528}\r
529\r
530/**\r
531 The module Entry Point of the CPU SMM driver.\r
532\r
533 @param ImageHandle The firmware allocated handle for the EFI image.\r
534 @param SystemTable A pointer to the EFI System Table.\r
535\r
536 @retval EFI_SUCCESS The entry point is executed successfully.\r
537 @retval Other Some error occurs when executing this entry point.\r
538\r
539**/\r
540EFI_STATUS\r
541EFIAPI\r
542PiCpuSmmEntry (\r
543 IN EFI_HANDLE ImageHandle,\r
544 IN EFI_SYSTEM_TABLE *SystemTable\r
545 )\r
546{\r
053e878b
MK
547 EFI_STATUS Status;\r
548 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
549 UINTN NumberOfEnabledProcessors;\r
550 UINTN Index;\r
551 VOID *Buffer;\r
552 UINTN BufferPages;\r
553 UINTN TileCodeSize;\r
554 UINTN TileDataSize;\r
555 UINTN TileSize;\r
556 UINT8 *Stacks;\r
557 VOID *Registration;\r
558 UINT32 RegEax;\r
559 UINT32 RegEbx;\r
560 UINT32 RegEcx;\r
561 UINT32 RegEdx;\r
562 UINTN FamilyId;\r
563 UINTN ModelId;\r
564 UINT32 Cr3;\r
529a5a86 565\r
e21e355e
LG
566 //\r
567 // Initialize address fixup\r
568 //\r
569 PiSmmCpuSmmInitFixupAddress ();\r
570 PiSmmCpuSmiEntryFixupAddress ();\r
571\r
529a5a86
MK
572 //\r
573 // Initialize Debug Agent to support source level debug in SMM code\r
574 //\r
575 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
576\r
577 //\r
578 // Report the start of CPU SMM initialization.\r
579 //\r
580 REPORT_STATUS_CODE (\r
581 EFI_PROGRESS_CODE,\r
582 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
583 );\r
584\r
529a5a86
MK
585 //\r
586 // Find out SMRR Base and SMRR Size\r
587 //\r
588 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
589\r
590 //\r
591 // Get MP Services Protocol\r
592 //\r
593 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
594 ASSERT_EFI_ERROR (Status);\r
595\r
596 //\r
597 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
598 //\r
599 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
600 ASSERT_EFI_ERROR (Status);\r
601 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
602\r
603 //\r
604 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
605 // A constant BSP index makes no sense because it may be hot removed.\r
606 //\r
7c2a6033 607 DEBUG_CODE_BEGIN ();\r
053e878b
MK
608 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
609 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
610 }\r
529a5a86 611\r
7c2a6033 612 DEBUG_CODE_END ();\r
529a5a86
MK
613\r
614 //\r
615 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
616 //\r
617 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
96e1cba5 618 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
529a5a86 619\r
241f9149
LD
620 //\r
621 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
622 // Make sure AddressEncMask is contained to smallest supported address field.\r
623 //\r
624 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
96e1cba5 625 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
241f9149 626\r
529a5a86
MK
627 //\r
628 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
629 //\r
630 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
631 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
632 } else {\r
633 mMaxNumberOfCpus = mNumberOfCpus;\r
634 }\r
053e878b 635\r
529a5a86
MK
636 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
637\r
638 //\r
639 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
640 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
641 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
642 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
643 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
644 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
645 // by adding the // CPU save state size, any extra CPU specific context, and\r
646 // the size of code that must be placed at the SMI entry point to transfer\r
647 // control to a C function in the native SMM execution mode. This size is\r
648 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
649 // The total amount of memory required is the maximum number of CPUs that\r
650 // platform supports times the tile size. The picture below shows the tiling,\r
651 // where m is the number of tiles that fit in 32KB.\r
652 //\r
653 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
654 // | CPU m+1 Save State |\r
655 // +-----------------------------+\r
656 // | CPU m+1 Extra Data |\r
657 // +-----------------------------+\r
658 // | Padding |\r
659 // +-----------------------------+\r
660 // | CPU 2m SMI Entry |\r
661 // +#############################+ <-- Base of allocated buffer + 64 KB\r
662 // | CPU m-1 Save State |\r
663 // +-----------------------------+\r
664 // | CPU m-1 Extra Data |\r
665 // +-----------------------------+\r
666 // | Padding |\r
667 // +-----------------------------+\r
668 // | CPU 2m-1 SMI Entry |\r
669 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
670 // | . . . . . . . . . . . . |\r
671 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
672 // | CPU 2 Save State |\r
673 // +-----------------------------+\r
674 // | CPU 2 Extra Data |\r
675 // +-----------------------------+\r
676 // | Padding |\r
677 // +-----------------------------+\r
678 // | CPU m+1 SMI Entry |\r
679 // +=============================+ <-- Base of allocated buffer + 32 KB\r
680 // | CPU 1 Save State |\r
681 // +-----------------------------+\r
682 // | CPU 1 Extra Data |\r
683 // +-----------------------------+\r
684 // | Padding |\r
685 // +-----------------------------+\r
686 // | CPU m SMI Entry |\r
687 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
688 // | CPU 0 Save State |\r
689 // +-----------------------------+\r
690 // | CPU 0 Extra Data |\r
691 // +-----------------------------+\r
692 // | Padding |\r
693 // +-----------------------------+\r
694 // | CPU m-1 SMI Entry |\r
695 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
696 // | . . . . . . . . . . . . |\r
697 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
698 // | Padding |\r
699 // +-----------------------------+\r
700 // | CPU 1 SMI Entry |\r
701 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
702 // | Padding |\r
703 // +-----------------------------+\r
704 // | CPU 0 SMI Entry |\r
705 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
706 //\r
707\r
708 //\r
709 // Retrieve CPU Family\r
710 //\r
e9b3a6c9 711 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86 712 FamilyId = (RegEax >> 8) & 0xf;\r
053e878b
MK
713 ModelId = (RegEax >> 4) & 0xf;\r
714 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {\r
529a5a86
MK
715 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
716 }\r
717\r
e9b3a6c9
MK
718 RegEdx = 0;\r
719 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
720 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
721 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
722 }\r
053e878b 723\r
529a5a86
MK
724 //\r
725 // Determine the mode of the CPU at the time an SMI occurs\r
726 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
727 // Volume 3C, Section 34.4.1.1\r
728 //\r
729 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
730 if ((RegEdx & BIT29) != 0) {\r
731 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
732 }\r
053e878b 733\r
529a5a86 734 if (FamilyId == 0x06) {\r
053e878b 735 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {\r
529a5a86
MK
736 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
737 }\r
738 }\r
739\r
3eb69b08
JY
740 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
741 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
5d34cc49
WH
742 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
743 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
3eb69b08
JY
744 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
745 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
746 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
747 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
748 if ((RegEcx & CPUID_CET_SS) == 0) {\r
749 mCetSupported = FALSE;\r
750 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
751 }\r
053e878b 752\r
3eb69b08
JY
753 if (mCetSupported) {\r
754 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
755 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
756 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
757 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
053e878b 758 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
3eb69b08
JY
759 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
760 }\r
5d34cc49
WH
761 } else {\r
762 mCetSupported = FALSE;\r
053e878b 763 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
3eb69b08
JY
764 }\r
765 } else {\r
766 mCetSupported = FALSE;\r
767 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
768 }\r
769\r
529a5a86
MK
770 //\r
771 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
772 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
773 // This size is rounded up to nearest power of 2.\r
529a5a86 774 //\r
ae82a30b 775 TileCodeSize = GetSmiHandlerSize ();\r
053e878b 776 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);\r
f12367a0 777 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
053e878b
MK
778 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);\r
779 TileSize = TileDataSize + TileCodeSize - 1;\r
780 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
96e1cba5 781 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
782\r
783 //\r
f12367a0
MK
784 // If the TileSize is larger than space available for the SMI Handler of\r
785 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
786 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
787 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
788 // context must be reduced.\r
529a5a86
MK
789 //\r
790 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
791\r
792 //\r
793 // Allocate buffer for all of the tiles.\r
794 //\r
795 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
796 // Volume 3C, Section 34.11 SMBASE Relocation\r
797 // For Pentium and Intel486 processors, the SMBASE values must be\r
798 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
799 // state during the execution of a RSM instruction.\r
800 //\r
801 // Intel486 processors: FamilyId is 4\r
802 // Pentium processors : FamilyId is 5\r
803 //\r
ae82a30b 804 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 805 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 806 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 807 } else {\r
717fb604 808 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86 809 }\r
053e878b 810\r
529a5a86 811 ASSERT (Buffer != NULL);\r
053e878b 812 DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));\r
529a5a86
MK
813\r
814 //\r
815 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
816 //\r
817 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
818 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
819\r
820 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
821 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
822\r
823 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
824 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
825\r
826 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
827 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
828\r
829 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
830 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
831\r
832 //\r
833 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
834 //\r
835 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
836 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
837 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
838 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
839 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
840\r
841 //\r
842 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
843 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
844 // size for each CPU in the platform\r
845 //\r
846 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
053e878b
MK
847 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
848 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);\r
529a5a86 849 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
053e878b 850 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
529a5a86
MK
851\r
852 if (Index < mNumberOfCpus) {\r
853 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
854 ASSERT_EFI_ERROR (Status);\r
855 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
856\r
053e878b
MK
857 DEBUG ((\r
858 DEBUG_INFO,\r
859 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
529a5a86
MK
860 Index,\r
861 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
862 mCpuHotPlugData.SmBase[Index],\r
863 gSmmCpuPrivate->CpuSaveState[Index],\r
864 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
865 ));\r
866 } else {\r
867 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
053e878b 868 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
529a5a86
MK
869 }\r
870 }\r
871\r
872 //\r
873 // Allocate SMI stacks for all processors.\r
874 //\r
3eb69b08 875 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
529a5a86
MK
876 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
877 //\r
455b0347
S
878 // SMM Stack Guard Enabled\r
879 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r
529a5a86 880 //\r
455b0347
S
881 // +--------------------------------------------------+-----+--------------------------------------------------+\r
882 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
883 // +--------------------------------------------------+-----+--------------------------------------------------+\r
884 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r
885 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r
886 // | | | |\r
887 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r
529a5a86 888 //\r
3eb69b08
JY
889 mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
890 }\r
891\r
892 mSmmShadowStackSize = 0;\r
893 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
3eb69b08 894 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
455b0347 895\r
3eb69b08 896 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
455b0347
S
897 //\r
898 // SMM Stack Guard Enabled\r
899 // Append Shadow Stack after normal stack\r
900 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r
901 //\r
902 // |= Stacks\r
903 // +--------------------------------------------------+---------------------------------------------------------------+\r
904 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
905 // +--------------------------------------------------+---------------------------------------------------------------+\r
906 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r
907 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
908 // | |\r
909 // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
910 //\r
3eb69b08 911 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
455b0347
S
912 } else {\r
913 //\r
914 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r
915 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r
916 // 1 more pages is allocated for each processor, it is known good stack.\r
917 //\r
918 //\r
919 // |= Stacks\r
920 // +-------------------------------------+--------------------------------------------------+\r
921 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r
922 // +-------------------------------------+--------------------------------------------------+\r
923 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r
924 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r
925 // | |\r
926 // |<-------------------------------- Processor N ----------------------------------------->|\r
927 //\r
928 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r
929 mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r
3eb69b08
JY
930 }\r
931 }\r
932\r
053e878b 933 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
3eb69b08
JY
934 ASSERT (Stacks != NULL);\r
935 mSmmStackArrayBase = (UINTN)Stacks;\r
053e878b 936 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
3eb69b08
JY
937\r
938 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
939 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
940 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
941 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
942 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
529a5a86
MK
943 }\r
944\r
945 //\r
946 // Set SMI stack for SMM base relocation\r
947 //\r
5830d2c3
LE
948 PatchInstructionX86 (\r
949 gPatchSmmInitStack,\r
053e878b 950 (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),\r
5830d2c3
LE
951 sizeof (UINTN)\r
952 );\r
529a5a86
MK
953\r
954 //\r
955 // Initialize IDT\r
956 //\r
957 InitializeSmmIdt ();\r
958\r
959 //\r
960 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
961 //\r
962 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
963 ASSERT (mRebased != NULL);\r
964 SmmRelocateBases ();\r
965\r
966 //\r
967 // Call hook for BSP to perform extra actions in normal mode after all\r
968 // SMM base addresses have been relocated on all CPUs\r
969 //\r
970 SmmCpuFeaturesSmmRelocationComplete ();\r
971\r
717fb604
JY
972 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
973\r
529a5a86
MK
974 //\r
975 // SMM Time initialization\r
976 //\r
977 InitializeSmmTimer ();\r
978\r
979 //\r
980 // Initialize MP globals\r
981 //\r
3eb69b08
JY
982 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
983\r
984 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
985 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
986 SetShadowStack (\r
987 Cr3,\r
988 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
989 mSmmShadowStackSize\r
990 );\r
991 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
992 SetNotPresentPage (\r
993 Cr3,\r
053e878b
MK
994 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
995 EFI_PAGES_TO_SIZE (1)\r
3eb69b08
JY
996 );\r
997 }\r
998 }\r
999 }\r
529a5a86
MK
1000\r
1001 //\r
1002 // Fill in SMM Reserved Regions\r
1003 //\r
1004 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1005 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1006\r
1007 //\r
1008 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1009 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1010 // to an SMRAM address will be present in the handle database\r
1011 //\r
1012 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1013 &gSmmCpuPrivate->SmmCpuHandle,\r
053e878b
MK
1014 &gEfiSmmConfigurationProtocolGuid,\r
1015 &gSmmCpuPrivate->SmmConfiguration,\r
529a5a86
MK
1016 NULL\r
1017 );\r
1018 ASSERT_EFI_ERROR (Status);\r
1019\r
1020 //\r
1021 // Install the SMM CPU Protocol into SMM protocol database\r
1022 //\r
1023 Status = gSmst->SmmInstallProtocolInterface (\r
1024 &mSmmCpuHandle,\r
1025 &gEfiSmmCpuProtocolGuid,\r
1026 EFI_NATIVE_INTERFACE,\r
1027 &mSmmCpu\r
1028 );\r
1029 ASSERT_EFI_ERROR (Status);\r
1030\r
827330cc
JW
1031 //\r
1032 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
1033 //\r
1034 Status = gSmst->SmmInstallProtocolInterface (\r
1035 &mSmmCpuHandle,\r
1036 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
1037 EFI_NATIVE_INTERFACE,\r
1038 &mSmmMemoryAttribute\r
1039 );\r
1040 ASSERT_EFI_ERROR (Status);\r
1041\r
51dd408a
ED
1042 //\r
1043 // Initialize global buffer for MM MP.\r
1044 //\r
1045 InitializeDataForMmMp ();\r
1046\r
c14c4719
WJ
1047 //\r
1048 // Initialize Package First Thread Index Info.\r
1049 //\r
1050 InitPackageFirstThreadIndexInfo ();\r
1051\r
51dd408a
ED
1052 //\r
1053 // Install the SMM Mp Protocol into SMM protocol database\r
1054 //\r
1055 Status = gSmst->SmmInstallProtocolInterface (\r
1056 &mSmmCpuHandle,\r
1057 &gEfiMmMpProtocolGuid,\r
1058 EFI_NATIVE_INTERFACE,\r
1059 &mSmmMp\r
1060 );\r
1061 ASSERT_EFI_ERROR (Status);\r
1062\r
529a5a86
MK
1063 //\r
1064 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1065 //\r
1066 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1067 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1068 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1069 }\r
1070\r
1071 //\r
1072 // Initialize SMM CPU Services Support\r
1073 //\r
1074 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1075 ASSERT_EFI_ERROR (Status);\r
1076\r
529a5a86
MK
1077 //\r
1078 // register SMM Ready To Lock Protocol notification\r
1079 //\r
1080 Status = gSmst->SmmRegisterProtocolNotify (\r
1081 &gEfiSmmReadyToLockProtocolGuid,\r
1082 SmmReadyToLockEventNotify,\r
1083 &Registration\r
1084 );\r
1085 ASSERT_EFI_ERROR (Status);\r
1086\r
529a5a86
MK
1087 //\r
1088 // Initialize SMM Profile feature\r
1089 //\r
1090 InitSmmProfile (Cr3);\r
1091\r
b10d5ddc 1092 GetAcpiS3EnableFlag ();\r
0bdc9e75 1093 InitSmmS3ResumeState (Cr3);\r
529a5a86 1094\r
96e1cba5 1095 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
529a5a86
MK
1096\r
1097 return EFI_SUCCESS;\r
1098}\r
1099\r
1100/**\r
1101\r
1102 Find out SMRAM information including SMRR base and SMRR size.\r
1103\r
1104 @param SmrrBase SMRR base\r
1105 @param SmrrSize SMRR size\r
1106\r
1107**/\r
1108VOID\r
1109FindSmramInfo (\r
053e878b
MK
1110 OUT UINT32 *SmrrBase,\r
1111 OUT UINT32 *SmrrSize\r
529a5a86
MK
1112 )\r
1113{\r
053e878b
MK
1114 EFI_STATUS Status;\r
1115 UINTN Size;\r
1116 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1117 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1118 UINTN Index;\r
1119 UINT64 MaxSize;\r
1120 BOOLEAN Found;\r
529a5a86
MK
1121\r
1122 //\r
1123 // Get SMM Access Protocol\r
1124 //\r
1125 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1126 ASSERT_EFI_ERROR (Status);\r
1127\r
1128 //\r
1129 // Get SMRAM information\r
1130 //\r
053e878b 1131 Size = 0;\r
529a5a86
MK
1132 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1133 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1134\r
7ed6f781
JF
1135 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1136 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 1137\r
7ed6f781 1138 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1139 ASSERT_EFI_ERROR (Status);\r
1140\r
7ed6f781 1141 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1142\r
1143 //\r
1144 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1145 //\r
1146 CurrentSmramRange = NULL;\r
7ed6f781 1147 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1148 //\r
1149 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1150 //\r
7ed6f781 1151 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1152 continue;\r
1153 }\r
1154\r
7ed6f781
JF
1155 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1156 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1157 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
053e878b 1158 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
7ed6f781 1159 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1160 }\r
1161 }\r
1162 }\r
1163 }\r
1164\r
1165 ASSERT (CurrentSmramRange != NULL);\r
1166\r
1167 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1168 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1169\r
1170 do {\r
1171 Found = FALSE;\r
7ed6f781 1172 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
053e878b
MK
1173 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&\r
1174 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))\r
1175 {\r
7ed6f781
JF
1176 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1177 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
053e878b
MK
1178 Found = TRUE;\r
1179 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {\r
7ed6f781 1180 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
053e878b 1181 Found = TRUE;\r
529a5a86
MK
1182 }\r
1183 }\r
1184 } while (Found);\r
1185\r
96e1cba5 1186 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
529a5a86
MK
1187}\r
1188\r
1189/**\r
1190Configure SMM Code Access Check feature on an AP.\r
1191SMM Feature Control MSR will be locked after configuration.\r
1192\r
1193@param[in,out] Buffer Pointer to private data buffer.\r
1194**/\r
1195VOID\r
1196EFIAPI\r
1197ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1198 IN OUT VOID *Buffer\r
1199 )\r
1200{\r
1201 UINTN CpuIndex;\r
1202 UINT64 SmmFeatureControlMsr;\r
1203 UINT64 NewSmmFeatureControlMsr;\r
1204\r
1205 //\r
1206 // Retrieve the CPU Index from the context passed in\r
1207 //\r
1208 CpuIndex = *(UINTN *)Buffer;\r
1209\r
1210 //\r
1211 // Get the current SMM Feature Control MSR value\r
1212 //\r
1213 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1214\r
1215 //\r
1216 // Compute the new SMM Feature Control MSR value\r
1217 //\r
1218 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1219 if (mSmmCodeAccessCheckEnable) {\r
1220 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1221 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1222 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1223 }\r
529a5a86
MK
1224 }\r
1225\r
1226 //\r
1227 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1228 //\r
1229 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1230 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1231 }\r
1232\r
1233 //\r
1234 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1235 //\r
fe3a75bc 1236 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1237}\r
1238\r
1239/**\r
1240Configure SMM Code Access Check feature for all processors.\r
1241SMM Feature Control MSR will be locked after configuration.\r
1242**/\r
1243VOID\r
1244ConfigSmmCodeAccessCheck (\r
1245 VOID\r
1246 )\r
1247{\r
1248 UINTN Index;\r
1249 EFI_STATUS Status;\r
1250\r
1251 //\r
1252 // Check to see if the Feature Control MSR is supported on this CPU\r
1253 //\r
f6b0cb17 1254 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1255 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1256 mSmmCodeAccessCheckEnable = FALSE;\r
1257 return;\r
1258 }\r
1259\r
1260 //\r
1261 // Check to see if the CPU supports the SMM Code Access Check feature\r
1262 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1263 //\r
1264 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1265 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1266 return;\r
1267 }\r
1268\r
1269 //\r
1270 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1271 //\r
fe3a75bc 1272 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1273\r
1274 //\r
1275 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1276 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1277 //\r
fe3a75bc 1278 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1279\r
1280 //\r
1281 // Enable SMM Code Access Check feature on the BSP.\r
1282 //\r
1283 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1284\r
1285 //\r
1286 // Enable SMM Code Access Check feature for the APs.\r
1287 //\r
1288 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1289 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1290 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1291 //\r
1292 // If this processor does not exist\r
1293 //\r
1294 continue;\r
1295 }\r
053e878b 1296\r
529a5a86
MK
1297 //\r
1298 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1299 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1300 //\r
fe3a75bc 1301 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1302\r
1303 //\r
1304 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1305 //\r
1306 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1307 ASSERT_EFI_ERROR (Status);\r
1308\r
1309 //\r
1310 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1311 //\r
fe3a75bc 1312 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1313 CpuPause ();\r
1314 }\r
1315\r
1316 //\r
1317 // Release the Config SMM Code Access Check spin lock.\r
1318 //\r
fe3a75bc 1319 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1320 }\r
1321 }\r
1322}\r
1323\r
717fb604
JY
1324/**\r
1325 Allocate pages for code.\r
1326\r
1327 @param[in] Pages Number of pages to be allocated.\r
1328\r
1329 @return Allocated memory.\r
1330**/\r
1331VOID *\r
1332AllocateCodePages (\r
053e878b 1333 IN UINTN Pages\r
717fb604
JY
1334 )\r
1335{\r
1336 EFI_STATUS Status;\r
1337 EFI_PHYSICAL_ADDRESS Memory;\r
1338\r
1339 if (Pages == 0) {\r
1340 return NULL;\r
1341 }\r
1342\r
1343 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1344 if (EFI_ERROR (Status)) {\r
1345 return NULL;\r
1346 }\r
053e878b
MK
1347\r
1348 return (VOID *)(UINTN)Memory;\r
717fb604
JY
1349}\r
1350\r
1351/**\r
1352 Allocate aligned pages for code.\r
1353\r
1354 @param[in] Pages Number of pages to be allocated.\r
1355 @param[in] Alignment The requested alignment of the allocation.\r
1356 Must be a power of two.\r
1357 If Alignment is zero, then byte alignment is used.\r
1358\r
1359 @return Allocated memory.\r
1360**/\r
1361VOID *\r
1362AllocateAlignedCodePages (\r
053e878b
MK
1363 IN UINTN Pages,\r
1364 IN UINTN Alignment\r
717fb604
JY
1365 )\r
1366{\r
1367 EFI_STATUS Status;\r
1368 EFI_PHYSICAL_ADDRESS Memory;\r
1369 UINTN AlignedMemory;\r
1370 UINTN AlignmentMask;\r
1371 UINTN UnalignedPages;\r
1372 UINTN RealPages;\r
1373\r
1374 //\r
1375 // Alignment must be a power of two or zero.\r
1376 //\r
1377 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1378\r
1379 if (Pages == 0) {\r
1380 return NULL;\r
1381 }\r
053e878b 1382\r
717fb604
JY
1383 if (Alignment > EFI_PAGE_SIZE) {\r
1384 //\r
1385 // Calculate the total number of pages since alignment is larger than page size.\r
1386 //\r
053e878b
MK
1387 AlignmentMask = Alignment - 1;\r
1388 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
717fb604
JY
1389 //\r
1390 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1391 //\r
1392 ASSERT (RealPages > Pages);\r
1393\r
053e878b 1394 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
717fb604
JY
1395 if (EFI_ERROR (Status)) {\r
1396 return NULL;\r
1397 }\r
053e878b
MK
1398\r
1399 AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;\r
1400 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);\r
717fb604
JY
1401 if (UnalignedPages > 0) {\r
1402 //\r
1403 // Free first unaligned page(s).\r
1404 //\r
1405 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1406 ASSERT_EFI_ERROR (Status);\r
1407 }\r
053e878b 1408\r
8491e302 1409 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1410 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1411 if (UnalignedPages > 0) {\r
1412 //\r
1413 // Free last unaligned page(s).\r
1414 //\r
1415 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1416 ASSERT_EFI_ERROR (Status);\r
1417 }\r
1418 } else {\r
1419 //\r
1420 // Do not over-allocate pages in this case.\r
1421 //\r
1422 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1423 if (EFI_ERROR (Status)) {\r
1424 return NULL;\r
1425 }\r
053e878b
MK
1426\r
1427 AlignedMemory = (UINTN)Memory;\r
717fb604 1428 }\r
053e878b
MK
1429\r
1430 return (VOID *)AlignedMemory;\r
717fb604
JY
1431}\r
1432\r
529a5a86
MK
1433/**\r
1434 Perform the remaining tasks.\r
1435\r
1436**/\r
1437VOID\r
1438PerformRemainingTasks (\r
1439 VOID\r
1440 )\r
1441{\r
1442 if (mSmmReadyToLock) {\r
1443 //\r
1444 // Start SMM Profile feature\r
1445 //\r
1446 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1447 SmmProfileStart ();\r
1448 }\r
053e878b 1449\r
529a5a86
MK
1450 //\r
1451 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1452 //\r
1453 InitPaging ();\r
717fb604
JY
1454\r
1455 //\r
1456 // Mark critical region to be read-only in page table\r
1457 //\r
d2fc7711
JY
1458 SetMemMapAttributes ();\r
1459\r
79186ddc
RN
1460 if (IsRestrictedMemoryAccess ()) {\r
1461 //\r
1462 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1463 //\r
1464 SetUefiMemMapAttributes ();\r
30f61485 1465\r
79186ddc
RN
1466 //\r
1467 // Set page table itself to be read-only\r
1468 //\r
1469 SetPageTableAttributes ();\r
1470 }\r
717fb604 1471\r
529a5a86
MK
1472 //\r
1473 // Configure SMM Code Access Check feature if available.\r
1474 //\r
1475 ConfigSmmCodeAccessCheck ();\r
1476\r
21c17193
JY
1477 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1478\r
529a5a86
MK
1479 //\r
1480 // Clean SMM ready to lock flag\r
1481 //\r
1482 mSmmReadyToLock = FALSE;\r
1483 }\r
1484}\r
9f419739
JY
1485\r
1486/**\r
1487 Perform the pre tasks.\r
1488\r
1489**/\r
1490VOID\r
1491PerformPreTasks (\r
1492 VOID\r
1493 )\r
1494{\r
0bdc9e75 1495 RestoreSmmConfigurationInS3 ();\r
9f419739 1496}\r