]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Fix invalid InitializeMpSyncData call
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
cb4820b6 4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
15// along its supporting fields.\r
16//\r
17SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
19 NULL, // SmmCpuHandle\r
20 NULL, // Pointer to ProcessorInfo array\r
21 NULL, // Pointer to Operation array\r
22 NULL, // Pointer to CpuSaveStateSize array\r
23 NULL, // Pointer to CpuSaveState array\r
053e878b
MK
24 {\r
25 { 0 }\r
26 }, // SmmReservedSmramRegion\r
529a5a86
MK
27 {\r
28 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
29 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
30 0, // SmmCoreEntryContext.NumberOfCpus\r
31 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
32 NULL // SmmCoreEntryContext.CpuSaveState\r
33 },\r
34 NULL, // SmmCoreEntry\r
35 {\r
36 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
37 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
38 },\r
51dd408a 39 NULL, // pointer to Ap Wrapper Func array\r
053e878b 40 { NULL, NULL }, // List_Entry for Tokens.\r
529a5a86
MK
41};\r
42\r
053e878b 43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
529a5a86
MK
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
053e878b 72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
529a5a86
MK
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
827330cc
JW
77///\r
78/// SMM Memory Attribute Protocol instance\r
79///\r
053e878b 80EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
827330cc
JW
81 EdkiiSmmGetMemoryAttributes,\r
82 EdkiiSmmSetMemoryAttributes,\r
83 EdkiiSmmClearMemoryAttributes\r
84};\r
85\r
053e878b 86EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
529a5a86 87\r
529a5a86
MK
88//\r
89// SMM stack information\r
90//\r
053e878b
MK
91UINTN mSmmStackArrayBase;\r
92UINTN mSmmStackArrayEnd;\r
93UINTN mSmmStackSize;\r
529a5a86 94\r
053e878b
MK
95UINTN mSmmShadowStackSize;\r
96BOOLEAN mCetSupported = TRUE;\r
3eb69b08 97\r
053e878b
MK
98UINTN mMaxNumberOfCpus = 1;\r
99UINTN mNumberOfCpus = 1;\r
529a5a86
MK
100\r
101//\r
102// SMM ready to lock flag\r
103//\r
053e878b 104BOOLEAN mSmmReadyToLock = FALSE;\r
529a5a86
MK
105\r
106//\r
107// Global used to cache PCD for SMM Code Access Check enable\r
108//\r
053e878b 109BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86 110\r
241f9149
LD
111//\r
112// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
113//\r
053e878b 114UINT64 mAddressEncMask = 0;\r
241f9149 115\r
529a5a86
MK
116//\r
117// Spin lock used to serialize setting of SMM Code Access Check feature\r
118//\r
053e878b 119SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 120\r
7ed6f781
JF
121//\r
122// Saved SMM ranges information\r
123//\r
053e878b
MK
124EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
125UINTN mSmmCpuSmramRangeCount;\r
7ed6f781 126\r
053e878b 127UINT8 mPhysicalAddressBits;\r
51ce27fd 128\r
351b49c1
LE
129//\r
130// Control register contents saved for SMM S3 resume state initialization.\r
131//\r
053e878b
MK
132UINT32 mSmmCr0;\r
133UINT32 mSmmCr4;\r
351b49c1 134\r
529a5a86
MK
135/**\r
136 Initialize IDT to setup exception handlers for SMM.\r
137\r
138**/\r
139VOID\r
140InitializeSmmIdt (\r
141 VOID\r
142 )\r
143{\r
053e878b
MK
144 EFI_STATUS Status;\r
145 BOOLEAN InterruptState;\r
146 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
147\r
148 //\r
149 // There are 32 (not 255) entries in it since only processor\r
150 // generated exceptions will be handled.\r
151 //\r
053e878b 152 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
717fb604
JY
153 //\r
154 // Allocate page aligned IDT, because it might be set as read only.\r
155 //\r
053e878b 156 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));\r
717fb604
JY
157 ASSERT (gcSmiIdtr.Base != 0);\r
158 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
159\r
529a5a86
MK
160 //\r
161 // Disable Interrupt and save DXE IDT table\r
162 //\r
163 InterruptState = SaveAndDisableInterrupts ();\r
164 AsmReadIdtr (&DxeIdtr);\r
165 //\r
166 // Load SMM temporary IDT table\r
167 //\r
168 AsmWriteIdtr (&gcSmiIdtr);\r
169 //\r
170 // Setup SMM default exception handlers, SMM IDT table\r
171 // will be updated and saved in gcSmiIdtr\r
172 //\r
173 Status = InitializeCpuExceptionHandlers (NULL);\r
174 ASSERT_EFI_ERROR (Status);\r
175 //\r
176 // Restore DXE IDT table and CPU interrupt\r
177 //\r
053e878b 178 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);\r
529a5a86
MK
179 SetInterruptState (InterruptState);\r
180}\r
181\r
182/**\r
183 Search module name by input IP address and output it.\r
184\r
185 @param CallerIpAddress Caller instruction pointer.\r
186\r
187**/\r
188VOID\r
189DumpModuleInfoByIp (\r
053e878b 190 IN UINTN CallerIpAddress\r
529a5a86
MK
191 )\r
192{\r
053e878b
MK
193 UINTN Pe32Data;\r
194 VOID *PdbPointer;\r
529a5a86
MK
195\r
196 //\r
197 // Find Image Base\r
198 //\r
9e981317 199 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 200 if (Pe32Data != 0) {\r
053e878b
MK
201 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));\r
202 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);\r
529a5a86 203 if (PdbPointer != NULL) {\r
b8caae19 204 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
205 }\r
206 }\r
207}\r
208\r
209/**\r
210 Read information from the CPU save state.\r
211\r
212 @param This EFI_SMM_CPU_PROTOCOL instance\r
213 @param Width The number of bytes to read from the CPU save state.\r
214 @param Register Specifies the CPU register to read form the save state.\r
215 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
216 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
217\r
218 @retval EFI_SUCCESS The register was read from Save State\r
219 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
ef62da4f 220 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r
529a5a86
MK
221\r
222**/\r
223EFI_STATUS\r
224EFIAPI\r
225SmmReadSaveState (\r
053e878b
MK
226 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
227 IN UINTN Width,\r
228 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
229 IN UINTN CpuIndex,\r
230 OUT VOID *Buffer\r
529a5a86
MK
231 )\r
232{\r
233 EFI_STATUS Status;\r
234\r
235 //\r
236 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
237 //\r
238 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
239 return EFI_INVALID_PARAMETER;\r
240 }\r
053e878b 241\r
5b02be4d 242 //\r
b70ec0de
HW
243 // The SpeculationBarrier() call here is to ensure the above check for the\r
244 // CpuIndex has been completed before the execution of subsequent codes.\r
5b02be4d 245 //\r
b70ec0de 246 SpeculationBarrier ();\r
529a5a86
MK
247\r
248 //\r
249 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
250 //\r
251 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
252 //\r
253 // The pseudo-register only supports the 64-bit size specified by Width.\r
254 //\r
255 if (Width != sizeof (UINT64)) {\r
256 return EFI_INVALID_PARAMETER;\r
257 }\r
053e878b 258\r
529a5a86
MK
259 //\r
260 // If the processor is in SMM at the time the SMI occurred,\r
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
262 // Otherwise, EFI_NOT_FOUND is returned.\r
263 //\r
ed3d5ecb 264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
266 return EFI_SUCCESS;\r
267 } else {\r
268 return EFI_NOT_FOUND;\r
269 }\r
270 }\r
271\r
ed3d5ecb 272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
273 return EFI_INVALID_PARAMETER;\r
274 }\r
275\r
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
277 if (Status == EFI_UNSUPPORTED) {\r
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
279 }\r
053e878b 280\r
529a5a86
MK
281 return Status;\r
282}\r
283\r
284/**\r
285 Write data to the CPU save state.\r
286\r
287 @param This EFI_SMM_CPU_PROTOCOL instance\r
288 @param Width The number of bytes to read from the CPU save state.\r
289 @param Register Specifies the CPU register to write to the save state.\r
290 @param CpuIndex Specifies the zero-based index of the CPU save state\r
291 @param Buffer Upon entry, this holds the new CPU register value.\r
292\r
293 @retval EFI_SUCCESS The register was written from Save State\r
294 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
ef62da4f 295 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r
529a5a86
MK
296\r
297**/\r
298EFI_STATUS\r
299EFIAPI\r
300SmmWriteSaveState (\r
053e878b
MK
301 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
302 IN UINTN Width,\r
303 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
304 IN UINTN CpuIndex,\r
305 IN CONST VOID *Buffer\r
529a5a86
MK
306 )\r
307{\r
308 EFI_STATUS Status;\r
309\r
310 //\r
311 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
312 //\r
313 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
314 return EFI_INVALID_PARAMETER;\r
315 }\r
316\r
317 //\r
318 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
319 //\r
320 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
321 return EFI_SUCCESS;\r
322 }\r
323\r
324 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
325 return EFI_INVALID_PARAMETER;\r
326 }\r
327\r
328 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
329 if (Status == EFI_UNSUPPORTED) {\r
330 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
331 }\r
053e878b 332\r
529a5a86
MK
333 return Status;\r
334}\r
335\r
529a5a86
MK
336/**\r
337 C function for SMI handler. To change all processor's SMMBase Register.\r
338\r
339**/\r
340VOID\r
341EFIAPI\r
342SmmInitHandler (\r
343 VOID\r
344 )\r
345{\r
053e878b
MK
346 UINT32 ApicId;\r
347 UINTN Index;\r
529a5a86
MK
348\r
349 //\r
350 // Update SMM IDT entries' code segment and load IDT\r
351 //\r
352 AsmWriteIdtr (&gcSmiIdtr);\r
353 ApicId = GetApicId ();\r
354\r
bb767506 355 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
356\r
357 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
358 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
359 //\r
360 // Initialize SMM specific features on the currently executing CPU\r
361 //\r
362 SmmCpuFeaturesInitializeProcessor (\r
363 Index,\r
364 mIsBsp,\r
365 gSmmCpuPrivate->ProcessorInfo,\r
366 &mCpuHotPlugData\r
367 );\r
368\r
a46a4c90
JF
369 if (!mSmmS3Flag) {\r
370 //\r
371 // Check XD and BTS features on each processor on normal boot\r
372 //\r
51773d49 373 CheckFeatureSupported ();\r
cb4820b6 374 } else if (mIsBsp) {\r
529a5a86
MK
375 //\r
376 // BSP rebase is already done above.\r
377 // Initialize private data during S3 resume\r
378 //\r
379 InitializeMpSyncData ();\r
380 }\r
381\r
382 //\r
383 // Hook return after RSM to set SMM re-based flag\r
384 //\r
385 SemaphoreHook (Index, &mRebased[Index]);\r
386\r
387 return;\r
388 }\r
389 }\r
053e878b 390\r
529a5a86
MK
391 ASSERT (FALSE);\r
392}\r
393\r
394/**\r
395 Relocate SmmBases for each processor.\r
396\r
397 Execute on first boot and all S3 resumes\r
398\r
399**/\r
400VOID\r
401EFIAPI\r
402SmmRelocateBases (\r
403 VOID\r
404 )\r
405{\r
406 UINT8 BakBuf[BACK_BUF_SIZE];\r
407 SMRAM_SAVE_STATE_MAP BakBuf2;\r
408 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
409 UINT8 *U8Ptr;\r
410 UINT32 ApicId;\r
411 UINTN Index;\r
412 UINTN BspIndex;\r
413\r
414 //\r
415 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
416 //\r
417 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
418\r
419 //\r
420 // Patch ASM code template with current CR0, CR3, and CR4 values\r
421 //\r
f0053e83
LE
422 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
423 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
6b0841c1 424 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1 425 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
3eb69b08 426 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
529a5a86
MK
427\r
428 //\r
429 // Patch GDTR for SMM base relocation\r
430 //\r
431 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
432 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
433\r
053e878b 434 U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
529a5a86
MK
435 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
436\r
437 //\r
438 // Backup original contents at address 0x38000\r
439 //\r
440 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
441 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
442\r
443 //\r
444 // Load image for relocation\r
445 //\r
446 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
447\r
448 //\r
449 // Retrieve the local APIC ID of current processor\r
450 //\r
451 ApicId = GetApicId ();\r
452\r
453 //\r
454 // Relocate SM bases for all APs\r
455 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
456 //\r
457 mIsBsp = FALSE;\r
458 BspIndex = (UINTN)-1;\r
459 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
460 mRebased[Index] = FALSE;\r
461 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
462 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
463 //\r
464 // Wait for this AP to finish its 1st SMI\r
465 //\r
053e878b
MK
466 while (!mRebased[Index]) {\r
467 }\r
529a5a86
MK
468 } else {\r
469 //\r
470 // BSP will be Relocated later\r
471 //\r
472 BspIndex = Index;\r
473 }\r
474 }\r
475\r
476 //\r
477 // Relocate BSP's SMM base\r
478 //\r
479 ASSERT (BspIndex != (UINTN)-1);\r
480 mIsBsp = TRUE;\r
481 SendSmiIpi (ApicId);\r
482 //\r
483 // Wait for the BSP to finish its 1st SMI\r
484 //\r
053e878b
MK
485 while (!mRebased[BspIndex]) {\r
486 }\r
529a5a86
MK
487\r
488 //\r
489 // Restore contents at address 0x38000\r
490 //\r
491 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
492 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
493}\r
494\r
529a5a86
MK
495/**\r
496 SMM Ready To Lock event notification handler.\r
497\r
498 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
499 perform additional lock actions that must be performed from SMM on the next SMI.\r
500\r
501 @param[in] Protocol Points to the protocol's unique identifier.\r
502 @param[in] Interface Points to the interface instance.\r
503 @param[in] Handle The handle on which the interface was installed.\r
504\r
505 @retval EFI_SUCCESS Notification handler runs successfully.\r
506 **/\r
507EFI_STATUS\r
508EFIAPI\r
509SmmReadyToLockEventNotify (\r
510 IN CONST EFI_GUID *Protocol,\r
511 IN VOID *Interface,\r
512 IN EFI_HANDLE Handle\r
513 )\r
514{\r
0bdc9e75 515 GetAcpiCpuData ();\r
529a5a86 516\r
d2fc7711
JY
517 //\r
518 // Cache a copy of UEFI memory map before we start profiling feature.\r
519 //\r
520 GetUefiMemoryMap ();\r
521\r
529a5a86
MK
522 //\r
523 // Set SMM ready to lock flag and return\r
524 //\r
525 mSmmReadyToLock = TRUE;\r
526 return EFI_SUCCESS;\r
527}\r
528\r
529/**\r
530 The module Entry Point of the CPU SMM driver.\r
531\r
532 @param ImageHandle The firmware allocated handle for the EFI image.\r
533 @param SystemTable A pointer to the EFI System Table.\r
534\r
535 @retval EFI_SUCCESS The entry point is executed successfully.\r
536 @retval Other Some error occurs when executing this entry point.\r
537\r
538**/\r
539EFI_STATUS\r
540EFIAPI\r
541PiCpuSmmEntry (\r
542 IN EFI_HANDLE ImageHandle,\r
543 IN EFI_SYSTEM_TABLE *SystemTable\r
544 )\r
545{\r
053e878b
MK
546 EFI_STATUS Status;\r
547 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
548 UINTN NumberOfEnabledProcessors;\r
549 UINTN Index;\r
550 VOID *Buffer;\r
551 UINTN BufferPages;\r
552 UINTN TileCodeSize;\r
553 UINTN TileDataSize;\r
554 UINTN TileSize;\r
555 UINT8 *Stacks;\r
556 VOID *Registration;\r
557 UINT32 RegEax;\r
558 UINT32 RegEbx;\r
559 UINT32 RegEcx;\r
560 UINT32 RegEdx;\r
561 UINTN FamilyId;\r
562 UINTN ModelId;\r
563 UINT32 Cr3;\r
529a5a86 564\r
e21e355e
LG
565 //\r
566 // Initialize address fixup\r
567 //\r
568 PiSmmCpuSmmInitFixupAddress ();\r
569 PiSmmCpuSmiEntryFixupAddress ();\r
570\r
529a5a86
MK
571 //\r
572 // Initialize Debug Agent to support source level debug in SMM code\r
573 //\r
574 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
575\r
576 //\r
577 // Report the start of CPU SMM initialization.\r
578 //\r
579 REPORT_STATUS_CODE (\r
580 EFI_PROGRESS_CODE,\r
581 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
582 );\r
583\r
529a5a86
MK
584 //\r
585 // Find out SMRR Base and SMRR Size\r
586 //\r
587 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
588\r
589 //\r
590 // Get MP Services Protocol\r
591 //\r
592 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
593 ASSERT_EFI_ERROR (Status);\r
594\r
595 //\r
596 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
597 //\r
598 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
599 ASSERT_EFI_ERROR (Status);\r
600 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
601\r
602 //\r
603 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
604 // A constant BSP index makes no sense because it may be hot removed.\r
605 //\r
7c2a6033 606 DEBUG_CODE_BEGIN ();\r
053e878b
MK
607 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
608 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
609 }\r
529a5a86 610\r
7c2a6033 611 DEBUG_CODE_END ();\r
529a5a86
MK
612\r
613 //\r
614 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
615 //\r
616 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
96e1cba5 617 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
529a5a86 618\r
241f9149
LD
619 //\r
620 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
621 // Make sure AddressEncMask is contained to smallest supported address field.\r
622 //\r
623 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
96e1cba5 624 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
241f9149 625\r
529a5a86
MK
626 //\r
627 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
628 //\r
629 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
630 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
631 } else {\r
632 mMaxNumberOfCpus = mNumberOfCpus;\r
633 }\r
053e878b 634\r
529a5a86
MK
635 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
636\r
637 //\r
638 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
639 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
640 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
641 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
642 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
643 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
644 // by adding the // CPU save state size, any extra CPU specific context, and\r
645 // the size of code that must be placed at the SMI entry point to transfer\r
646 // control to a C function in the native SMM execution mode. This size is\r
647 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
648 // The total amount of memory required is the maximum number of CPUs that\r
649 // platform supports times the tile size. The picture below shows the tiling,\r
650 // where m is the number of tiles that fit in 32KB.\r
651 //\r
652 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
653 // | CPU m+1 Save State |\r
654 // +-----------------------------+\r
655 // | CPU m+1 Extra Data |\r
656 // +-----------------------------+\r
657 // | Padding |\r
658 // +-----------------------------+\r
659 // | CPU 2m SMI Entry |\r
660 // +#############################+ <-- Base of allocated buffer + 64 KB\r
661 // | CPU m-1 Save State |\r
662 // +-----------------------------+\r
663 // | CPU m-1 Extra Data |\r
664 // +-----------------------------+\r
665 // | Padding |\r
666 // +-----------------------------+\r
667 // | CPU 2m-1 SMI Entry |\r
668 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
669 // | . . . . . . . . . . . . |\r
670 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
671 // | CPU 2 Save State |\r
672 // +-----------------------------+\r
673 // | CPU 2 Extra Data |\r
674 // +-----------------------------+\r
675 // | Padding |\r
676 // +-----------------------------+\r
677 // | CPU m+1 SMI Entry |\r
678 // +=============================+ <-- Base of allocated buffer + 32 KB\r
679 // | CPU 1 Save State |\r
680 // +-----------------------------+\r
681 // | CPU 1 Extra Data |\r
682 // +-----------------------------+\r
683 // | Padding |\r
684 // +-----------------------------+\r
685 // | CPU m SMI Entry |\r
686 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
687 // | CPU 0 Save State |\r
688 // +-----------------------------+\r
689 // | CPU 0 Extra Data |\r
690 // +-----------------------------+\r
691 // | Padding |\r
692 // +-----------------------------+\r
693 // | CPU m-1 SMI Entry |\r
694 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
695 // | . . . . . . . . . . . . |\r
696 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
697 // | Padding |\r
698 // +-----------------------------+\r
699 // | CPU 1 SMI Entry |\r
700 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
701 // | Padding |\r
702 // +-----------------------------+\r
703 // | CPU 0 SMI Entry |\r
704 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
705 //\r
706\r
707 //\r
708 // Retrieve CPU Family\r
709 //\r
e9b3a6c9 710 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86 711 FamilyId = (RegEax >> 8) & 0xf;\r
053e878b
MK
712 ModelId = (RegEax >> 4) & 0xf;\r
713 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {\r
529a5a86
MK
714 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
715 }\r
716\r
e9b3a6c9
MK
717 RegEdx = 0;\r
718 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
719 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
720 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
721 }\r
053e878b 722\r
529a5a86
MK
723 //\r
724 // Determine the mode of the CPU at the time an SMI occurs\r
725 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
726 // Volume 3C, Section 34.4.1.1\r
727 //\r
728 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
729 if ((RegEdx & BIT29) != 0) {\r
730 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
731 }\r
053e878b 732\r
529a5a86 733 if (FamilyId == 0x06) {\r
053e878b 734 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {\r
529a5a86
MK
735 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
736 }\r
737 }\r
738\r
3eb69b08
JY
739 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
740 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
5d34cc49
WH
741 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
742 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
3eb69b08
JY
743 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
744 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
745 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
746 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
747 if ((RegEcx & CPUID_CET_SS) == 0) {\r
748 mCetSupported = FALSE;\r
749 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
750 }\r
053e878b 751\r
3eb69b08
JY
752 if (mCetSupported) {\r
753 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
754 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
755 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
756 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
053e878b 757 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
3eb69b08
JY
758 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
759 }\r
5d34cc49
WH
760 } else {\r
761 mCetSupported = FALSE;\r
053e878b 762 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
3eb69b08
JY
763 }\r
764 } else {\r
765 mCetSupported = FALSE;\r
766 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
767 }\r
768\r
529a5a86
MK
769 //\r
770 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
771 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
772 // This size is rounded up to nearest power of 2.\r
529a5a86 773 //\r
ae82a30b 774 TileCodeSize = GetSmiHandlerSize ();\r
053e878b 775 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);\r
f12367a0 776 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
053e878b
MK
777 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);\r
778 TileSize = TileDataSize + TileCodeSize - 1;\r
779 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
96e1cba5 780 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
781\r
782 //\r
f12367a0
MK
783 // If the TileSize is larger than space available for the SMI Handler of\r
784 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
785 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
786 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
787 // context must be reduced.\r
529a5a86
MK
788 //\r
789 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
790\r
791 //\r
792 // Allocate buffer for all of the tiles.\r
793 //\r
794 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
795 // Volume 3C, Section 34.11 SMBASE Relocation\r
796 // For Pentium and Intel486 processors, the SMBASE values must be\r
797 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
798 // state during the execution of a RSM instruction.\r
799 //\r
800 // Intel486 processors: FamilyId is 4\r
801 // Pentium processors : FamilyId is 5\r
802 //\r
ae82a30b 803 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 804 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 805 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 806 } else {\r
717fb604 807 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86 808 }\r
053e878b 809\r
529a5a86 810 ASSERT (Buffer != NULL);\r
053e878b 811 DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));\r
529a5a86
MK
812\r
813 //\r
814 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
815 //\r
816 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
817 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
818\r
819 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
820 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
821\r
822 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
823 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
824\r
825 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
826 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
827\r
828 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
829 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
830\r
831 //\r
832 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
833 //\r
834 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
835 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
836 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
837 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
838 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
839\r
840 //\r
841 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
842 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
843 // size for each CPU in the platform\r
844 //\r
845 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
053e878b
MK
846 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
847 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);\r
529a5a86 848 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
053e878b 849 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
529a5a86
MK
850\r
851 if (Index < mNumberOfCpus) {\r
852 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
853 ASSERT_EFI_ERROR (Status);\r
854 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
855\r
053e878b
MK
856 DEBUG ((\r
857 DEBUG_INFO,\r
858 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
529a5a86
MK
859 Index,\r
860 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
861 mCpuHotPlugData.SmBase[Index],\r
862 gSmmCpuPrivate->CpuSaveState[Index],\r
863 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
864 ));\r
865 } else {\r
866 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
053e878b 867 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
529a5a86
MK
868 }\r
869 }\r
870\r
871 //\r
872 // Allocate SMI stacks for all processors.\r
873 //\r
3eb69b08 874 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
529a5a86
MK
875 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
876 //\r
455b0347
S
877 // SMM Stack Guard Enabled\r
878 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r
529a5a86 879 //\r
455b0347
S
880 // +--------------------------------------------------+-----+--------------------------------------------------+\r
881 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
882 // +--------------------------------------------------+-----+--------------------------------------------------+\r
883 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r
884 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r
885 // | | | |\r
886 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r
529a5a86 887 //\r
3eb69b08
JY
888 mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
889 }\r
890\r
891 mSmmShadowStackSize = 0;\r
892 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
3eb69b08 893 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
455b0347 894\r
3eb69b08 895 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
455b0347
S
896 //\r
897 // SMM Stack Guard Enabled\r
898 // Append Shadow Stack after normal stack\r
899 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r
900 //\r
901 // |= Stacks\r
902 // +--------------------------------------------------+---------------------------------------------------------------+\r
903 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
904 // +--------------------------------------------------+---------------------------------------------------------------+\r
905 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r
906 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
907 // | |\r
908 // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
909 //\r
3eb69b08 910 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
455b0347
S
911 } else {\r
912 //\r
913 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r
914 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r
915 // 1 more pages is allocated for each processor, it is known good stack.\r
916 //\r
917 //\r
918 // |= Stacks\r
919 // +-------------------------------------+--------------------------------------------------+\r
920 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r
921 // +-------------------------------------+--------------------------------------------------+\r
922 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r
923 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r
924 // | |\r
925 // |<-------------------------------- Processor N ----------------------------------------->|\r
926 //\r
927 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r
928 mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r
3eb69b08
JY
929 }\r
930 }\r
931\r
053e878b 932 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
3eb69b08
JY
933 ASSERT (Stacks != NULL);\r
934 mSmmStackArrayBase = (UINTN)Stacks;\r
053e878b 935 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
3eb69b08
JY
936\r
937 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
938 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
939 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
940 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
941 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
529a5a86
MK
942 }\r
943\r
944 //\r
945 // Set SMI stack for SMM base relocation\r
946 //\r
5830d2c3
LE
947 PatchInstructionX86 (\r
948 gPatchSmmInitStack,\r
053e878b 949 (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),\r
5830d2c3
LE
950 sizeof (UINTN)\r
951 );\r
529a5a86
MK
952\r
953 //\r
954 // Initialize IDT\r
955 //\r
956 InitializeSmmIdt ();\r
957\r
958 //\r
959 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
960 //\r
961 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
962 ASSERT (mRebased != NULL);\r
963 SmmRelocateBases ();\r
964\r
965 //\r
966 // Call hook for BSP to perform extra actions in normal mode after all\r
967 // SMM base addresses have been relocated on all CPUs\r
968 //\r
969 SmmCpuFeaturesSmmRelocationComplete ();\r
970\r
717fb604
JY
971 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
972\r
529a5a86
MK
973 //\r
974 // SMM Time initialization\r
975 //\r
976 InitializeSmmTimer ();\r
977\r
978 //\r
979 // Initialize MP globals\r
980 //\r
3eb69b08
JY
981 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
982\r
983 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
984 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
985 SetShadowStack (\r
986 Cr3,\r
987 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
988 mSmmShadowStackSize\r
989 );\r
990 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
991 SetNotPresentPage (\r
992 Cr3,\r
053e878b
MK
993 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
994 EFI_PAGES_TO_SIZE (1)\r
3eb69b08
JY
995 );\r
996 }\r
997 }\r
998 }\r
529a5a86
MK
999\r
1000 //\r
1001 // Fill in SMM Reserved Regions\r
1002 //\r
1003 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1004 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1005\r
1006 //\r
1007 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1008 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1009 // to an SMRAM address will be present in the handle database\r
1010 //\r
1011 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1012 &gSmmCpuPrivate->SmmCpuHandle,\r
053e878b
MK
1013 &gEfiSmmConfigurationProtocolGuid,\r
1014 &gSmmCpuPrivate->SmmConfiguration,\r
529a5a86
MK
1015 NULL\r
1016 );\r
1017 ASSERT_EFI_ERROR (Status);\r
1018\r
1019 //\r
1020 // Install the SMM CPU Protocol into SMM protocol database\r
1021 //\r
1022 Status = gSmst->SmmInstallProtocolInterface (\r
1023 &mSmmCpuHandle,\r
1024 &gEfiSmmCpuProtocolGuid,\r
1025 EFI_NATIVE_INTERFACE,\r
1026 &mSmmCpu\r
1027 );\r
1028 ASSERT_EFI_ERROR (Status);\r
1029\r
827330cc
JW
1030 //\r
1031 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
1032 //\r
1033 Status = gSmst->SmmInstallProtocolInterface (\r
1034 &mSmmCpuHandle,\r
1035 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
1036 EFI_NATIVE_INTERFACE,\r
1037 &mSmmMemoryAttribute\r
1038 );\r
1039 ASSERT_EFI_ERROR (Status);\r
1040\r
51dd408a
ED
1041 //\r
1042 // Initialize global buffer for MM MP.\r
1043 //\r
1044 InitializeDataForMmMp ();\r
1045\r
c14c4719
WJ
1046 //\r
1047 // Initialize Package First Thread Index Info.\r
1048 //\r
1049 InitPackageFirstThreadIndexInfo ();\r
1050\r
51dd408a
ED
1051 //\r
1052 // Install the SMM Mp Protocol into SMM protocol database\r
1053 //\r
1054 Status = gSmst->SmmInstallProtocolInterface (\r
1055 &mSmmCpuHandle,\r
1056 &gEfiMmMpProtocolGuid,\r
1057 EFI_NATIVE_INTERFACE,\r
1058 &mSmmMp\r
1059 );\r
1060 ASSERT_EFI_ERROR (Status);\r
1061\r
529a5a86
MK
1062 //\r
1063 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1064 //\r
1065 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1066 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1067 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1068 }\r
1069\r
1070 //\r
1071 // Initialize SMM CPU Services Support\r
1072 //\r
1073 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1074 ASSERT_EFI_ERROR (Status);\r
1075\r
529a5a86
MK
1076 //\r
1077 // register SMM Ready To Lock Protocol notification\r
1078 //\r
1079 Status = gSmst->SmmRegisterProtocolNotify (\r
1080 &gEfiSmmReadyToLockProtocolGuid,\r
1081 SmmReadyToLockEventNotify,\r
1082 &Registration\r
1083 );\r
1084 ASSERT_EFI_ERROR (Status);\r
1085\r
529a5a86
MK
1086 //\r
1087 // Initialize SMM Profile feature\r
1088 //\r
1089 InitSmmProfile (Cr3);\r
1090\r
b10d5ddc 1091 GetAcpiS3EnableFlag ();\r
0bdc9e75 1092 InitSmmS3ResumeState (Cr3);\r
529a5a86 1093\r
96e1cba5 1094 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
529a5a86
MK
1095\r
1096 return EFI_SUCCESS;\r
1097}\r
1098\r
1099/**\r
1100\r
1101 Find out SMRAM information including SMRR base and SMRR size.\r
1102\r
1103 @param SmrrBase SMRR base\r
1104 @param SmrrSize SMRR size\r
1105\r
1106**/\r
1107VOID\r
1108FindSmramInfo (\r
053e878b
MK
1109 OUT UINT32 *SmrrBase,\r
1110 OUT UINT32 *SmrrSize\r
529a5a86
MK
1111 )\r
1112{\r
053e878b
MK
1113 EFI_STATUS Status;\r
1114 UINTN Size;\r
1115 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1116 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1117 UINTN Index;\r
1118 UINT64 MaxSize;\r
1119 BOOLEAN Found;\r
529a5a86
MK
1120\r
1121 //\r
1122 // Get SMM Access Protocol\r
1123 //\r
1124 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1125 ASSERT_EFI_ERROR (Status);\r
1126\r
1127 //\r
1128 // Get SMRAM information\r
1129 //\r
053e878b 1130 Size = 0;\r
529a5a86
MK
1131 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1132 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1133\r
7ed6f781
JF
1134 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1135 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 1136\r
7ed6f781 1137 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1138 ASSERT_EFI_ERROR (Status);\r
1139\r
7ed6f781 1140 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1141\r
1142 //\r
1143 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1144 //\r
1145 CurrentSmramRange = NULL;\r
7ed6f781 1146 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1147 //\r
1148 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1149 //\r
7ed6f781 1150 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1151 continue;\r
1152 }\r
1153\r
7ed6f781
JF
1154 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1155 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1156 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
053e878b 1157 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
7ed6f781 1158 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1159 }\r
1160 }\r
1161 }\r
1162 }\r
1163\r
1164 ASSERT (CurrentSmramRange != NULL);\r
1165\r
1166 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1167 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1168\r
1169 do {\r
1170 Found = FALSE;\r
7ed6f781 1171 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
053e878b
MK
1172 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&\r
1173 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))\r
1174 {\r
7ed6f781
JF
1175 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1176 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
053e878b
MK
1177 Found = TRUE;\r
1178 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {\r
7ed6f781 1179 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
053e878b 1180 Found = TRUE;\r
529a5a86
MK
1181 }\r
1182 }\r
1183 } while (Found);\r
1184\r
96e1cba5 1185 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
529a5a86
MK
1186}\r
1187\r
1188/**\r
1189Configure SMM Code Access Check feature on an AP.\r
1190SMM Feature Control MSR will be locked after configuration.\r
1191\r
1192@param[in,out] Buffer Pointer to private data buffer.\r
1193**/\r
1194VOID\r
1195EFIAPI\r
1196ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1197 IN OUT VOID *Buffer\r
1198 )\r
1199{\r
1200 UINTN CpuIndex;\r
1201 UINT64 SmmFeatureControlMsr;\r
1202 UINT64 NewSmmFeatureControlMsr;\r
1203\r
1204 //\r
1205 // Retrieve the CPU Index from the context passed in\r
1206 //\r
1207 CpuIndex = *(UINTN *)Buffer;\r
1208\r
1209 //\r
1210 // Get the current SMM Feature Control MSR value\r
1211 //\r
1212 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1213\r
1214 //\r
1215 // Compute the new SMM Feature Control MSR value\r
1216 //\r
1217 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1218 if (mSmmCodeAccessCheckEnable) {\r
1219 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1220 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1221 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1222 }\r
529a5a86
MK
1223 }\r
1224\r
1225 //\r
1226 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1227 //\r
1228 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1229 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1230 }\r
1231\r
1232 //\r
1233 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1234 //\r
fe3a75bc 1235 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1236}\r
1237\r
1238/**\r
1239Configure SMM Code Access Check feature for all processors.\r
1240SMM Feature Control MSR will be locked after configuration.\r
1241**/\r
1242VOID\r
1243ConfigSmmCodeAccessCheck (\r
1244 VOID\r
1245 )\r
1246{\r
1247 UINTN Index;\r
1248 EFI_STATUS Status;\r
1249\r
1250 //\r
1251 // Check to see if the Feature Control MSR is supported on this CPU\r
1252 //\r
f6b0cb17 1253 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1254 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1255 mSmmCodeAccessCheckEnable = FALSE;\r
1256 return;\r
1257 }\r
1258\r
1259 //\r
1260 // Check to see if the CPU supports the SMM Code Access Check feature\r
1261 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1262 //\r
1263 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1264 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1265 return;\r
1266 }\r
1267\r
1268 //\r
1269 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1270 //\r
fe3a75bc 1271 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1272\r
1273 //\r
1274 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1275 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1276 //\r
fe3a75bc 1277 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1278\r
1279 //\r
1280 // Enable SMM Code Access Check feature on the BSP.\r
1281 //\r
1282 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1283\r
1284 //\r
1285 // Enable SMM Code Access Check feature for the APs.\r
1286 //\r
1287 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1288 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1289 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1290 //\r
1291 // If this processor does not exist\r
1292 //\r
1293 continue;\r
1294 }\r
053e878b 1295\r
529a5a86
MK
1296 //\r
1297 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1298 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1299 //\r
fe3a75bc 1300 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1301\r
1302 //\r
1303 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1304 //\r
1305 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1306 ASSERT_EFI_ERROR (Status);\r
1307\r
1308 //\r
1309 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1310 //\r
fe3a75bc 1311 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1312 CpuPause ();\r
1313 }\r
1314\r
1315 //\r
1316 // Release the Config SMM Code Access Check spin lock.\r
1317 //\r
fe3a75bc 1318 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1319 }\r
1320 }\r
1321}\r
1322\r
717fb604
JY
1323/**\r
1324 Allocate pages for code.\r
1325\r
1326 @param[in] Pages Number of pages to be allocated.\r
1327\r
1328 @return Allocated memory.\r
1329**/\r
1330VOID *\r
1331AllocateCodePages (\r
053e878b 1332 IN UINTN Pages\r
717fb604
JY
1333 )\r
1334{\r
1335 EFI_STATUS Status;\r
1336 EFI_PHYSICAL_ADDRESS Memory;\r
1337\r
1338 if (Pages == 0) {\r
1339 return NULL;\r
1340 }\r
1341\r
1342 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1343 if (EFI_ERROR (Status)) {\r
1344 return NULL;\r
1345 }\r
053e878b
MK
1346\r
1347 return (VOID *)(UINTN)Memory;\r
717fb604
JY
1348}\r
1349\r
1350/**\r
1351 Allocate aligned pages for code.\r
1352\r
1353 @param[in] Pages Number of pages to be allocated.\r
1354 @param[in] Alignment The requested alignment of the allocation.\r
1355 Must be a power of two.\r
1356 If Alignment is zero, then byte alignment is used.\r
1357\r
1358 @return Allocated memory.\r
1359**/\r
1360VOID *\r
1361AllocateAlignedCodePages (\r
053e878b
MK
1362 IN UINTN Pages,\r
1363 IN UINTN Alignment\r
717fb604
JY
1364 )\r
1365{\r
1366 EFI_STATUS Status;\r
1367 EFI_PHYSICAL_ADDRESS Memory;\r
1368 UINTN AlignedMemory;\r
1369 UINTN AlignmentMask;\r
1370 UINTN UnalignedPages;\r
1371 UINTN RealPages;\r
1372\r
1373 //\r
1374 // Alignment must be a power of two or zero.\r
1375 //\r
1376 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1377\r
1378 if (Pages == 0) {\r
1379 return NULL;\r
1380 }\r
053e878b 1381\r
717fb604
JY
1382 if (Alignment > EFI_PAGE_SIZE) {\r
1383 //\r
1384 // Calculate the total number of pages since alignment is larger than page size.\r
1385 //\r
053e878b
MK
1386 AlignmentMask = Alignment - 1;\r
1387 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
717fb604
JY
1388 //\r
1389 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1390 //\r
1391 ASSERT (RealPages > Pages);\r
1392\r
053e878b 1393 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
717fb604
JY
1394 if (EFI_ERROR (Status)) {\r
1395 return NULL;\r
1396 }\r
053e878b
MK
1397\r
1398 AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;\r
1399 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);\r
717fb604
JY
1400 if (UnalignedPages > 0) {\r
1401 //\r
1402 // Free first unaligned page(s).\r
1403 //\r
1404 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1405 ASSERT_EFI_ERROR (Status);\r
1406 }\r
053e878b 1407\r
8491e302 1408 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1409 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1410 if (UnalignedPages > 0) {\r
1411 //\r
1412 // Free last unaligned page(s).\r
1413 //\r
1414 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1415 ASSERT_EFI_ERROR (Status);\r
1416 }\r
1417 } else {\r
1418 //\r
1419 // Do not over-allocate pages in this case.\r
1420 //\r
1421 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1422 if (EFI_ERROR (Status)) {\r
1423 return NULL;\r
1424 }\r
053e878b
MK
1425\r
1426 AlignedMemory = (UINTN)Memory;\r
717fb604 1427 }\r
053e878b
MK
1428\r
1429 return (VOID *)AlignedMemory;\r
717fb604
JY
1430}\r
1431\r
529a5a86
MK
1432/**\r
1433 Perform the remaining tasks.\r
1434\r
1435**/\r
1436VOID\r
1437PerformRemainingTasks (\r
1438 VOID\r
1439 )\r
1440{\r
1441 if (mSmmReadyToLock) {\r
1442 //\r
1443 // Start SMM Profile feature\r
1444 //\r
1445 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1446 SmmProfileStart ();\r
1447 }\r
053e878b 1448\r
529a5a86
MK
1449 //\r
1450 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1451 //\r
1452 InitPaging ();\r
717fb604
JY
1453\r
1454 //\r
1455 // Mark critical region to be read-only in page table\r
1456 //\r
d2fc7711
JY
1457 SetMemMapAttributes ();\r
1458\r
79186ddc
RN
1459 if (IsRestrictedMemoryAccess ()) {\r
1460 //\r
1461 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1462 //\r
1463 SetUefiMemMapAttributes ();\r
30f61485 1464\r
79186ddc
RN
1465 //\r
1466 // Set page table itself to be read-only\r
1467 //\r
1468 SetPageTableAttributes ();\r
1469 }\r
717fb604 1470\r
529a5a86
MK
1471 //\r
1472 // Configure SMM Code Access Check feature if available.\r
1473 //\r
1474 ConfigSmmCodeAccessCheck ();\r
1475\r
21c17193
JY
1476 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1477\r
529a5a86
MK
1478 //\r
1479 // Clean SMM ready to lock flag\r
1480 //\r
1481 mSmmReadyToLock = FALSE;\r
1482 }\r
1483}\r
9f419739
JY
1484\r
1485/**\r
1486 Perform the pre tasks.\r
1487\r
1488**/\r
1489VOID\r
1490PerformPreTasks (\r
1491 VOID\r
1492 )\r
1493{\r
0bdc9e75 1494 RestoreSmmConfigurationInS3 ();\r
9f419739 1495}\r