]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
15// along its supporting fields.\r
16//\r
17SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
19 NULL, // SmmCpuHandle\r
20 NULL, // Pointer to ProcessorInfo array\r
21 NULL, // Pointer to Operation array\r
22 NULL, // Pointer to CpuSaveStateSize array\r
23 NULL, // Pointer to CpuSaveState array\r
24 { {0} }, // SmmReservedSmramRegion\r
25 {\r
26 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
27 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
28 0, // SmmCoreEntryContext.NumberOfCpus\r
29 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
30 NULL // SmmCoreEntryContext.CpuSaveState\r
31 },\r
32 NULL, // SmmCoreEntry\r
33 {\r
34 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
35 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
36 },\r
51dd408a
ED
37 NULL, // pointer to Ap Wrapper Func array\r
38 {NULL, NULL}, // List_Entry for Tokens.\r
529a5a86
MK
39};\r
40\r
41CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
42 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
43 0, // Array Length of SmBase and APIC ID\r
44 NULL, // Pointer to APIC ID array\r
45 NULL, // Pointer to SMBASE array\r
46 0, // Reserved\r
47 0, // SmrrBase\r
48 0 // SmrrSize\r
49};\r
50\r
51//\r
52// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
53//\r
54SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
55\r
56//\r
57// SMM Relocation variables\r
58//\r
59volatile BOOLEAN *mRebased;\r
60volatile BOOLEAN mIsBsp;\r
61\r
62///\r
63/// Handle for the SMM CPU Protocol\r
64///\r
65EFI_HANDLE mSmmCpuHandle = NULL;\r
66\r
67///\r
68/// SMM CPU Protocol instance\r
69///\r
70EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
71 SmmReadSaveState,\r
72 SmmWriteSaveState\r
73};\r
74\r
827330cc
JW
75///\r
76/// SMM Memory Attribute Protocol instance\r
77///\r
78EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
79 EdkiiSmmGetMemoryAttributes,\r
80 EdkiiSmmSetMemoryAttributes,\r
81 EdkiiSmmClearMemoryAttributes\r
82};\r
83\r
529a5a86
MK
84EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
85\r
529a5a86
MK
86//\r
87// SMM stack information\r
88//\r
89UINTN mSmmStackArrayBase;\r
90UINTN mSmmStackArrayEnd;\r
91UINTN mSmmStackSize;\r
92\r
3eb69b08
JY
93UINTN mSmmShadowStackSize;\r
94BOOLEAN mCetSupported = TRUE;\r
95\r
529a5a86
MK
96UINTN mMaxNumberOfCpus = 1;\r
97UINTN mNumberOfCpus = 1;\r
98\r
99//\r
100// SMM ready to lock flag\r
101//\r
102BOOLEAN mSmmReadyToLock = FALSE;\r
103\r
104//\r
105// Global used to cache PCD for SMM Code Access Check enable\r
106//\r
107BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
108\r
241f9149
LD
109//\r
110// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
111//\r
112UINT64 mAddressEncMask = 0;\r
113\r
529a5a86
MK
114//\r
115// Spin lock used to serialize setting of SMM Code Access Check feature\r
116//\r
fe3a75bc 117SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 118\r
7ed6f781
JF
119//\r
120// Saved SMM ranges information\r
121//\r
122EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
123UINTN mSmmCpuSmramRangeCount;\r
124\r
51ce27fd
SZ
125UINT8 mPhysicalAddressBits;\r
126\r
351b49c1
LE
127//\r
128// Control register contents saved for SMM S3 resume state initialization.\r
129//\r
f0053e83 130UINT32 mSmmCr0;\r
351b49c1
LE
131UINT32 mSmmCr4;\r
132\r
529a5a86
MK
133/**\r
134 Initialize IDT to setup exception handlers for SMM.\r
135\r
136**/\r
137VOID\r
138InitializeSmmIdt (\r
139 VOID\r
140 )\r
141{\r
142 EFI_STATUS Status;\r
143 BOOLEAN InterruptState;\r
144 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
145\r
146 //\r
147 // There are 32 (not 255) entries in it since only processor\r
148 // generated exceptions will be handled.\r
149 //\r
150 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
151 //\r
152 // Allocate page aligned IDT, because it might be set as read only.\r
153 //\r
154 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
155 ASSERT (gcSmiIdtr.Base != 0);\r
156 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
157\r
529a5a86
MK
158 //\r
159 // Disable Interrupt and save DXE IDT table\r
160 //\r
161 InterruptState = SaveAndDisableInterrupts ();\r
162 AsmReadIdtr (&DxeIdtr);\r
163 //\r
164 // Load SMM temporary IDT table\r
165 //\r
166 AsmWriteIdtr (&gcSmiIdtr);\r
167 //\r
168 // Setup SMM default exception handlers, SMM IDT table\r
169 // will be updated and saved in gcSmiIdtr\r
170 //\r
171 Status = InitializeCpuExceptionHandlers (NULL);\r
172 ASSERT_EFI_ERROR (Status);\r
173 //\r
174 // Restore DXE IDT table and CPU interrupt\r
175 //\r
176 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
177 SetInterruptState (InterruptState);\r
178}\r
179\r
180/**\r
181 Search module name by input IP address and output it.\r
182\r
183 @param CallerIpAddress Caller instruction pointer.\r
184\r
185**/\r
186VOID\r
187DumpModuleInfoByIp (\r
188 IN UINTN CallerIpAddress\r
189 )\r
190{\r
191 UINTN Pe32Data;\r
529a5a86 192 VOID *PdbPointer;\r
529a5a86
MK
193\r
194 //\r
195 // Find Image Base\r
196 //\r
9e981317 197 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 198 if (Pe32Data != 0) {\r
b8caae19 199 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
529a5a86
MK
200 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
201 if (PdbPointer != NULL) {\r
b8caae19 202 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
203 }\r
204 }\r
205}\r
206\r
207/**\r
208 Read information from the CPU save state.\r
209\r
210 @param This EFI_SMM_CPU_PROTOCOL instance\r
211 @param Width The number of bytes to read from the CPU save state.\r
212 @param Register Specifies the CPU register to read form the save state.\r
213 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
214 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
215\r
216 @retval EFI_SUCCESS The register was read from Save State\r
217 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
ef62da4f 218 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r
529a5a86
MK
219\r
220**/\r
221EFI_STATUS\r
222EFIAPI\r
223SmmReadSaveState (\r
224 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
225 IN UINTN Width,\r
226 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
227 IN UINTN CpuIndex,\r
228 OUT VOID *Buffer\r
229 )\r
230{\r
231 EFI_STATUS Status;\r
232\r
233 //\r
234 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
235 //\r
236 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
237 return EFI_INVALID_PARAMETER;\r
238 }\r
5b02be4d 239 //\r
b70ec0de
HW
240 // The SpeculationBarrier() call here is to ensure the above check for the\r
241 // CpuIndex has been completed before the execution of subsequent codes.\r
5b02be4d 242 //\r
b70ec0de 243 SpeculationBarrier ();\r
529a5a86
MK
244\r
245 //\r
246 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
247 //\r
248 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
249 //\r
250 // The pseudo-register only supports the 64-bit size specified by Width.\r
251 //\r
252 if (Width != sizeof (UINT64)) {\r
253 return EFI_INVALID_PARAMETER;\r
254 }\r
255 //\r
256 // If the processor is in SMM at the time the SMI occurred,\r
257 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
258 // Otherwise, EFI_NOT_FOUND is returned.\r
259 //\r
ed3d5ecb 260 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
261 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
262 return EFI_SUCCESS;\r
263 } else {\r
264 return EFI_NOT_FOUND;\r
265 }\r
266 }\r
267\r
ed3d5ecb 268 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
269 return EFI_INVALID_PARAMETER;\r
270 }\r
271\r
272 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
273 if (Status == EFI_UNSUPPORTED) {\r
274 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
275 }\r
276 return Status;\r
277}\r
278\r
279/**\r
280 Write data to the CPU save state.\r
281\r
282 @param This EFI_SMM_CPU_PROTOCOL instance\r
283 @param Width The number of bytes to read from the CPU save state.\r
284 @param Register Specifies the CPU register to write to the save state.\r
285 @param CpuIndex Specifies the zero-based index of the CPU save state\r
286 @param Buffer Upon entry, this holds the new CPU register value.\r
287\r
288 @retval EFI_SUCCESS The register was written from Save State\r
289 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
ef62da4f 290 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r
529a5a86
MK
291\r
292**/\r
293EFI_STATUS\r
294EFIAPI\r
295SmmWriteSaveState (\r
296 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
297 IN UINTN Width,\r
298 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
299 IN UINTN CpuIndex,\r
300 IN CONST VOID *Buffer\r
301 )\r
302{\r
303 EFI_STATUS Status;\r
304\r
305 //\r
306 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
307 //\r
308 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
309 return EFI_INVALID_PARAMETER;\r
310 }\r
311\r
312 //\r
313 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
314 //\r
315 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
316 return EFI_SUCCESS;\r
317 }\r
318\r
319 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
320 return EFI_INVALID_PARAMETER;\r
321 }\r
322\r
323 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
324 if (Status == EFI_UNSUPPORTED) {\r
325 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
326 }\r
327 return Status;\r
328}\r
329\r
330\r
331/**\r
332 C function for SMI handler. To change all processor's SMMBase Register.\r
333\r
334**/\r
335VOID\r
336EFIAPI\r
337SmmInitHandler (\r
338 VOID\r
339 )\r
340{\r
341 UINT32 ApicId;\r
342 UINTN Index;\r
343\r
344 //\r
345 // Update SMM IDT entries' code segment and load IDT\r
346 //\r
347 AsmWriteIdtr (&gcSmiIdtr);\r
348 ApicId = GetApicId ();\r
349\r
bb767506 350 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
351\r
352 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
353 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
354 //\r
355 // Initialize SMM specific features on the currently executing CPU\r
356 //\r
357 SmmCpuFeaturesInitializeProcessor (\r
358 Index,\r
359 mIsBsp,\r
360 gSmmCpuPrivate->ProcessorInfo,\r
361 &mCpuHotPlugData\r
362 );\r
363\r
a46a4c90
JF
364 if (!mSmmS3Flag) {\r
365 //\r
366 // Check XD and BTS features on each processor on normal boot\r
367 //\r
51773d49 368 CheckFeatureSupported ();\r
a46a4c90
JF
369 }\r
370\r
529a5a86
MK
371 if (mIsBsp) {\r
372 //\r
373 // BSP rebase is already done above.\r
374 // Initialize private data during S3 resume\r
375 //\r
376 InitializeMpSyncData ();\r
377 }\r
378\r
379 //\r
380 // Hook return after RSM to set SMM re-based flag\r
381 //\r
382 SemaphoreHook (Index, &mRebased[Index]);\r
383\r
384 return;\r
385 }\r
386 }\r
387 ASSERT (FALSE);\r
388}\r
389\r
390/**\r
391 Relocate SmmBases for each processor.\r
392\r
393 Execute on first boot and all S3 resumes\r
394\r
395**/\r
396VOID\r
397EFIAPI\r
398SmmRelocateBases (\r
399 VOID\r
400 )\r
401{\r
402 UINT8 BakBuf[BACK_BUF_SIZE];\r
403 SMRAM_SAVE_STATE_MAP BakBuf2;\r
404 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
405 UINT8 *U8Ptr;\r
406 UINT32 ApicId;\r
407 UINTN Index;\r
408 UINTN BspIndex;\r
409\r
410 //\r
411 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
412 //\r
413 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
414\r
415 //\r
416 // Patch ASM code template with current CR0, CR3, and CR4 values\r
417 //\r
f0053e83
LE
418 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
419 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
6b0841c1 420 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1 421 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
3eb69b08 422 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
529a5a86
MK
423\r
424 //\r
425 // Patch GDTR for SMM base relocation\r
426 //\r
427 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
428 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
429\r
430 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
431 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
432\r
433 //\r
434 // Backup original contents at address 0x38000\r
435 //\r
436 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
437 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
438\r
439 //\r
440 // Load image for relocation\r
441 //\r
442 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
443\r
444 //\r
445 // Retrieve the local APIC ID of current processor\r
446 //\r
447 ApicId = GetApicId ();\r
448\r
449 //\r
450 // Relocate SM bases for all APs\r
451 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
452 //\r
453 mIsBsp = FALSE;\r
454 BspIndex = (UINTN)-1;\r
455 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
456 mRebased[Index] = FALSE;\r
457 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
458 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
459 //\r
460 // Wait for this AP to finish its 1st SMI\r
461 //\r
462 while (!mRebased[Index]);\r
463 } else {\r
464 //\r
465 // BSP will be Relocated later\r
466 //\r
467 BspIndex = Index;\r
468 }\r
469 }\r
470\r
471 //\r
472 // Relocate BSP's SMM base\r
473 //\r
474 ASSERT (BspIndex != (UINTN)-1);\r
475 mIsBsp = TRUE;\r
476 SendSmiIpi (ApicId);\r
477 //\r
478 // Wait for the BSP to finish its 1st SMI\r
479 //\r
480 while (!mRebased[BspIndex]);\r
481\r
482 //\r
483 // Restore contents at address 0x38000\r
484 //\r
485 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
486 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
487}\r
488\r
529a5a86
MK
489/**\r
490 SMM Ready To Lock event notification handler.\r
491\r
492 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
493 perform additional lock actions that must be performed from SMM on the next SMI.\r
494\r
495 @param[in] Protocol Points to the protocol's unique identifier.\r
496 @param[in] Interface Points to the interface instance.\r
497 @param[in] Handle The handle on which the interface was installed.\r
498\r
499 @retval EFI_SUCCESS Notification handler runs successfully.\r
500 **/\r
501EFI_STATUS\r
502EFIAPI\r
503SmmReadyToLockEventNotify (\r
504 IN CONST EFI_GUID *Protocol,\r
505 IN VOID *Interface,\r
506 IN EFI_HANDLE Handle\r
507 )\r
508{\r
0bdc9e75 509 GetAcpiCpuData ();\r
529a5a86 510\r
d2fc7711
JY
511 //\r
512 // Cache a copy of UEFI memory map before we start profiling feature.\r
513 //\r
514 GetUefiMemoryMap ();\r
515\r
529a5a86
MK
516 //\r
517 // Set SMM ready to lock flag and return\r
518 //\r
519 mSmmReadyToLock = TRUE;\r
520 return EFI_SUCCESS;\r
521}\r
522\r
523/**\r
524 The module Entry Point of the CPU SMM driver.\r
525\r
526 @param ImageHandle The firmware allocated handle for the EFI image.\r
527 @param SystemTable A pointer to the EFI System Table.\r
528\r
529 @retval EFI_SUCCESS The entry point is executed successfully.\r
530 @retval Other Some error occurs when executing this entry point.\r
531\r
532**/\r
533EFI_STATUS\r
534EFIAPI\r
535PiCpuSmmEntry (\r
536 IN EFI_HANDLE ImageHandle,\r
537 IN EFI_SYSTEM_TABLE *SystemTable\r
538 )\r
539{\r
540 EFI_STATUS Status;\r
541 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
542 UINTN NumberOfEnabledProcessors;\r
543 UINTN Index;\r
544 VOID *Buffer;\r
ae82a30b
JY
545 UINTN BufferPages;\r
546 UINTN TileCodeSize;\r
547 UINTN TileDataSize;\r
529a5a86 548 UINTN TileSize;\r
529a5a86
MK
549 UINT8 *Stacks;\r
550 VOID *Registration;\r
551 UINT32 RegEax;\r
3eb69b08
JY
552 UINT32 RegEbx;\r
553 UINT32 RegEcx;\r
529a5a86
MK
554 UINT32 RegEdx;\r
555 UINTN FamilyId;\r
556 UINTN ModelId;\r
557 UINT32 Cr3;\r
558\r
e21e355e
LG
559 //\r
560 // Initialize address fixup\r
561 //\r
562 PiSmmCpuSmmInitFixupAddress ();\r
563 PiSmmCpuSmiEntryFixupAddress ();\r
564\r
529a5a86
MK
565 //\r
566 // Initialize Debug Agent to support source level debug in SMM code\r
567 //\r
568 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
569\r
570 //\r
571 // Report the start of CPU SMM initialization.\r
572 //\r
573 REPORT_STATUS_CODE (\r
574 EFI_PROGRESS_CODE,\r
575 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
576 );\r
577\r
529a5a86
MK
578 //\r
579 // Find out SMRR Base and SMRR Size\r
580 //\r
581 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
582\r
583 //\r
584 // Get MP Services Protocol\r
585 //\r
586 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
587 ASSERT_EFI_ERROR (Status);\r
588\r
589 //\r
590 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
591 //\r
592 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
593 ASSERT_EFI_ERROR (Status);\r
594 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
595\r
596 //\r
597 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
598 // A constant BSP index makes no sense because it may be hot removed.\r
599 //\r
600 DEBUG_CODE (\r
601 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
602\r
603 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
604 }\r
605 );\r
606\r
607 //\r
608 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
609 //\r
610 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
96e1cba5 611 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
529a5a86 612\r
241f9149
LD
613 //\r
614 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
615 // Make sure AddressEncMask is contained to smallest supported address field.\r
616 //\r
617 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
96e1cba5 618 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
241f9149 619\r
529a5a86
MK
620 //\r
621 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
622 //\r
623 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
624 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
625 } else {\r
626 mMaxNumberOfCpus = mNumberOfCpus;\r
627 }\r
628 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
629\r
630 //\r
631 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
632 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
633 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
634 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
635 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
636 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
637 // by adding the // CPU save state size, any extra CPU specific context, and\r
638 // the size of code that must be placed at the SMI entry point to transfer\r
639 // control to a C function in the native SMM execution mode. This size is\r
640 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
641 // The total amount of memory required is the maximum number of CPUs that\r
642 // platform supports times the tile size. The picture below shows the tiling,\r
643 // where m is the number of tiles that fit in 32KB.\r
644 //\r
645 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
646 // | CPU m+1 Save State |\r
647 // +-----------------------------+\r
648 // | CPU m+1 Extra Data |\r
649 // +-----------------------------+\r
650 // | Padding |\r
651 // +-----------------------------+\r
652 // | CPU 2m SMI Entry |\r
653 // +#############################+ <-- Base of allocated buffer + 64 KB\r
654 // | CPU m-1 Save State |\r
655 // +-----------------------------+\r
656 // | CPU m-1 Extra Data |\r
657 // +-----------------------------+\r
658 // | Padding |\r
659 // +-----------------------------+\r
660 // | CPU 2m-1 SMI Entry |\r
661 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
662 // | . . . . . . . . . . . . |\r
663 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
664 // | CPU 2 Save State |\r
665 // +-----------------------------+\r
666 // | CPU 2 Extra Data |\r
667 // +-----------------------------+\r
668 // | Padding |\r
669 // +-----------------------------+\r
670 // | CPU m+1 SMI Entry |\r
671 // +=============================+ <-- Base of allocated buffer + 32 KB\r
672 // | CPU 1 Save State |\r
673 // +-----------------------------+\r
674 // | CPU 1 Extra Data |\r
675 // +-----------------------------+\r
676 // | Padding |\r
677 // +-----------------------------+\r
678 // | CPU m SMI Entry |\r
679 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
680 // | CPU 0 Save State |\r
681 // +-----------------------------+\r
682 // | CPU 0 Extra Data |\r
683 // +-----------------------------+\r
684 // | Padding |\r
685 // +-----------------------------+\r
686 // | CPU m-1 SMI Entry |\r
687 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
688 // | . . . . . . . . . . . . |\r
689 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
690 // | Padding |\r
691 // +-----------------------------+\r
692 // | CPU 1 SMI Entry |\r
693 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
694 // | Padding |\r
695 // +-----------------------------+\r
696 // | CPU 0 SMI Entry |\r
697 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
698 //\r
699\r
700 //\r
701 // Retrieve CPU Family\r
702 //\r
e9b3a6c9 703 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
704 FamilyId = (RegEax >> 8) & 0xf;\r
705 ModelId = (RegEax >> 4) & 0xf;\r
706 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
707 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
708 }\r
709\r
e9b3a6c9
MK
710 RegEdx = 0;\r
711 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
712 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
713 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
714 }\r
529a5a86
MK
715 //\r
716 // Determine the mode of the CPU at the time an SMI occurs\r
717 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
718 // Volume 3C, Section 34.4.1.1\r
719 //\r
720 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
721 if ((RegEdx & BIT29) != 0) {\r
722 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
723 }\r
724 if (FamilyId == 0x06) {\r
725 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
726 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
727 }\r
728 }\r
729\r
3eb69b08
JY
730 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
731 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
5d34cc49
WH
732 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
733 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
3eb69b08
JY
734 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
735 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
736 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
737 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
738 if ((RegEcx & CPUID_CET_SS) == 0) {\r
739 mCetSupported = FALSE;\r
740 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
741 }\r
742 if (mCetSupported) {\r
743 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
744 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
745 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
746 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
747 AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
748 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
749 }\r
5d34cc49
WH
750 } else {\r
751 mCetSupported = FALSE;\r
752 PatchInstructionX86(mPatchCetSupported, mCetSupported, 1);\r
3eb69b08
JY
753 }\r
754 } else {\r
755 mCetSupported = FALSE;\r
756 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
757 }\r
758\r
529a5a86
MK
759 //\r
760 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
761 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
762 // This size is rounded up to nearest power of 2.\r
529a5a86 763 //\r
ae82a30b
JY
764 TileCodeSize = GetSmiHandlerSize ();\r
765 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 766 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
767 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
768 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 769 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
96e1cba5 770 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
771\r
772 //\r
f12367a0
MK
773 // If the TileSize is larger than space available for the SMI Handler of\r
774 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
775 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
776 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
777 // context must be reduced.\r
529a5a86
MK
778 //\r
779 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
780\r
781 //\r
782 // Allocate buffer for all of the tiles.\r
783 //\r
784 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
785 // Volume 3C, Section 34.11 SMBASE Relocation\r
786 // For Pentium and Intel486 processors, the SMBASE values must be\r
787 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
788 // state during the execution of a RSM instruction.\r
789 //\r
790 // Intel486 processors: FamilyId is 4\r
791 // Pentium processors : FamilyId is 5\r
792 //\r
ae82a30b 793 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 794 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 795 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 796 } else {\r
717fb604 797 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
798 }\r
799 ASSERT (Buffer != NULL);\r
96e1cba5 800 DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
801\r
802 //\r
803 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
804 //\r
805 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
806 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
807\r
808 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
809 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
810\r
811 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
812 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
813\r
814 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
815 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
816\r
817 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
818 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
819\r
820 //\r
821 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
822 //\r
823 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
824 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
825 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
826 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
827 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
828\r
829 //\r
830 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
831 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
832 // size for each CPU in the platform\r
833 //\r
834 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
835 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
836 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
837 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
838 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
839\r
840 if (Index < mNumberOfCpus) {\r
841 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
842 ASSERT_EFI_ERROR (Status);\r
843 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
844\r
96e1cba5 845 DEBUG ((DEBUG_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
529a5a86
MK
846 Index,\r
847 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
848 mCpuHotPlugData.SmBase[Index],\r
849 gSmmCpuPrivate->CpuSaveState[Index],\r
850 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
851 ));\r
852 } else {\r
853 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
854 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
855 }\r
856 }\r
857\r
858 //\r
859 // Allocate SMI stacks for all processors.\r
860 //\r
3eb69b08 861 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
529a5a86
MK
862 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
863 //\r
455b0347
S
864 // SMM Stack Guard Enabled\r
865 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r
529a5a86 866 //\r
455b0347
S
867 // +--------------------------------------------------+-----+--------------------------------------------------+\r
868 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
869 // +--------------------------------------------------+-----+--------------------------------------------------+\r
870 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r
871 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r
872 // | | | |\r
873 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r
529a5a86 874 //\r
3eb69b08
JY
875 mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
876 }\r
877\r
878 mSmmShadowStackSize = 0;\r
879 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
3eb69b08 880 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
455b0347 881\r
3eb69b08 882 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
455b0347
S
883 //\r
884 // SMM Stack Guard Enabled\r
885 // Append Shadow Stack after normal stack\r
886 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r
887 //\r
888 // |= Stacks\r
889 // +--------------------------------------------------+---------------------------------------------------------------+\r
890 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
891 // +--------------------------------------------------+---------------------------------------------------------------+\r
892 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r
893 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
894 // | |\r
895 // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
896 //\r
3eb69b08 897 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
455b0347
S
898 } else {\r
899 //\r
900 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r
901 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r
902 // 1 more pages is allocated for each processor, it is known good stack.\r
903 //\r
904 //\r
905 // |= Stacks\r
906 // +-------------------------------------+--------------------------------------------------+\r
907 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r
908 // +-------------------------------------+--------------------------------------------------+\r
909 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r
910 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r
911 // | |\r
912 // |<-------------------------------- Processor N ----------------------------------------->|\r
913 //\r
914 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r
915 mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r
3eb69b08
JY
916 }\r
917 }\r
918\r
919 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
920 ASSERT (Stacks != NULL);\r
921 mSmmStackArrayBase = (UINTN)Stacks;\r
922 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
923\r
924 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
925 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
926 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
927 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
928 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
529a5a86
MK
929 }\r
930\r
931 //\r
932 // Set SMI stack for SMM base relocation\r
933 //\r
5830d2c3
LE
934 PatchInstructionX86 (\r
935 gPatchSmmInitStack,\r
936 (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),\r
937 sizeof (UINTN)\r
938 );\r
529a5a86
MK
939\r
940 //\r
941 // Initialize IDT\r
942 //\r
943 InitializeSmmIdt ();\r
944\r
945 //\r
946 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
947 //\r
948 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
949 ASSERT (mRebased != NULL);\r
950 SmmRelocateBases ();\r
951\r
952 //\r
953 // Call hook for BSP to perform extra actions in normal mode after all\r
954 // SMM base addresses have been relocated on all CPUs\r
955 //\r
956 SmmCpuFeaturesSmmRelocationComplete ();\r
957\r
717fb604
JY
958 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
959\r
529a5a86
MK
960 //\r
961 // SMM Time initialization\r
962 //\r
963 InitializeSmmTimer ();\r
964\r
965 //\r
966 // Initialize MP globals\r
967 //\r
3eb69b08
JY
968 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
969\r
970 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
971 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
972 SetShadowStack (\r
973 Cr3,\r
974 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
975 mSmmShadowStackSize\r
976 );\r
977 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
978 SetNotPresentPage (\r
979 Cr3,\r
980 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
981 EFI_PAGES_TO_SIZE(1)\r
982 );\r
983 }\r
984 }\r
985 }\r
529a5a86
MK
986\r
987 //\r
988 // Fill in SMM Reserved Regions\r
989 //\r
990 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
991 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
992\r
993 //\r
994 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
995 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
996 // to an SMRAM address will be present in the handle database\r
997 //\r
998 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
999 &gSmmCpuPrivate->SmmCpuHandle,\r
1000 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
1001 NULL\r
1002 );\r
1003 ASSERT_EFI_ERROR (Status);\r
1004\r
1005 //\r
1006 // Install the SMM CPU Protocol into SMM protocol database\r
1007 //\r
1008 Status = gSmst->SmmInstallProtocolInterface (\r
1009 &mSmmCpuHandle,\r
1010 &gEfiSmmCpuProtocolGuid,\r
1011 EFI_NATIVE_INTERFACE,\r
1012 &mSmmCpu\r
1013 );\r
1014 ASSERT_EFI_ERROR (Status);\r
1015\r
827330cc
JW
1016 //\r
1017 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
1018 //\r
1019 Status = gSmst->SmmInstallProtocolInterface (\r
1020 &mSmmCpuHandle,\r
1021 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
1022 EFI_NATIVE_INTERFACE,\r
1023 &mSmmMemoryAttribute\r
1024 );\r
1025 ASSERT_EFI_ERROR (Status);\r
1026\r
51dd408a
ED
1027 //\r
1028 // Initialize global buffer for MM MP.\r
1029 //\r
1030 InitializeDataForMmMp ();\r
1031\r
1032 //\r
1033 // Install the SMM Mp Protocol into SMM protocol database\r
1034 //\r
1035 Status = gSmst->SmmInstallProtocolInterface (\r
1036 &mSmmCpuHandle,\r
1037 &gEfiMmMpProtocolGuid,\r
1038 EFI_NATIVE_INTERFACE,\r
1039 &mSmmMp\r
1040 );\r
1041 ASSERT_EFI_ERROR (Status);\r
1042\r
529a5a86
MK
1043 //\r
1044 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1045 //\r
1046 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1047 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1048 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1049 }\r
1050\r
1051 //\r
1052 // Initialize SMM CPU Services Support\r
1053 //\r
1054 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1055 ASSERT_EFI_ERROR (Status);\r
1056\r
529a5a86
MK
1057 //\r
1058 // register SMM Ready To Lock Protocol notification\r
1059 //\r
1060 Status = gSmst->SmmRegisterProtocolNotify (\r
1061 &gEfiSmmReadyToLockProtocolGuid,\r
1062 SmmReadyToLockEventNotify,\r
1063 &Registration\r
1064 );\r
1065 ASSERT_EFI_ERROR (Status);\r
1066\r
529a5a86
MK
1067 //\r
1068 // Initialize SMM Profile feature\r
1069 //\r
1070 InitSmmProfile (Cr3);\r
1071\r
b10d5ddc 1072 GetAcpiS3EnableFlag ();\r
0bdc9e75 1073 InitSmmS3ResumeState (Cr3);\r
529a5a86 1074\r
96e1cba5 1075 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
529a5a86
MK
1076\r
1077 return EFI_SUCCESS;\r
1078}\r
1079\r
1080/**\r
1081\r
1082 Find out SMRAM information including SMRR base and SMRR size.\r
1083\r
1084 @param SmrrBase SMRR base\r
1085 @param SmrrSize SMRR size\r
1086\r
1087**/\r
1088VOID\r
1089FindSmramInfo (\r
1090 OUT UINT32 *SmrrBase,\r
1091 OUT UINT32 *SmrrSize\r
1092 )\r
1093{\r
1094 EFI_STATUS Status;\r
1095 UINTN Size;\r
1096 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1097 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
1098 UINTN Index;\r
1099 UINT64 MaxSize;\r
1100 BOOLEAN Found;\r
1101\r
1102 //\r
1103 // Get SMM Access Protocol\r
1104 //\r
1105 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1106 ASSERT_EFI_ERROR (Status);\r
1107\r
1108 //\r
1109 // Get SMRAM information\r
1110 //\r
1111 Size = 0;\r
1112 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1113 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1114\r
7ed6f781
JF
1115 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1116 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 1117\r
7ed6f781 1118 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1119 ASSERT_EFI_ERROR (Status);\r
1120\r
7ed6f781 1121 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1122\r
1123 //\r
1124 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1125 //\r
1126 CurrentSmramRange = NULL;\r
7ed6f781 1127 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1128 //\r
1129 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1130 //\r
7ed6f781 1131 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1132 continue;\r
1133 }\r
1134\r
7ed6f781
JF
1135 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1136 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1137 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1138 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1139 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1140 }\r
1141 }\r
1142 }\r
1143 }\r
1144\r
1145 ASSERT (CurrentSmramRange != NULL);\r
1146\r
1147 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1148 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1149\r
1150 do {\r
1151 Found = FALSE;\r
7ed6f781
JF
1152 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1153 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1154 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1155 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1156 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1157 Found = TRUE;\r
7ed6f781
JF
1158 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1159 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1160 Found = TRUE;\r
1161 }\r
1162 }\r
1163 } while (Found);\r
1164\r
96e1cba5 1165 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
529a5a86
MK
1166}\r
1167\r
1168/**\r
1169Configure SMM Code Access Check feature on an AP.\r
1170SMM Feature Control MSR will be locked after configuration.\r
1171\r
1172@param[in,out] Buffer Pointer to private data buffer.\r
1173**/\r
1174VOID\r
1175EFIAPI\r
1176ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1177 IN OUT VOID *Buffer\r
1178 )\r
1179{\r
1180 UINTN CpuIndex;\r
1181 UINT64 SmmFeatureControlMsr;\r
1182 UINT64 NewSmmFeatureControlMsr;\r
1183\r
1184 //\r
1185 // Retrieve the CPU Index from the context passed in\r
1186 //\r
1187 CpuIndex = *(UINTN *)Buffer;\r
1188\r
1189 //\r
1190 // Get the current SMM Feature Control MSR value\r
1191 //\r
1192 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1193\r
1194 //\r
1195 // Compute the new SMM Feature Control MSR value\r
1196 //\r
1197 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1198 if (mSmmCodeAccessCheckEnable) {\r
1199 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1200 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1201 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1202 }\r
529a5a86
MK
1203 }\r
1204\r
1205 //\r
1206 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1207 //\r
1208 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1209 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1210 }\r
1211\r
1212 //\r
1213 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1214 //\r
fe3a75bc 1215 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1216}\r
1217\r
1218/**\r
1219Configure SMM Code Access Check feature for all processors.\r
1220SMM Feature Control MSR will be locked after configuration.\r
1221**/\r
1222VOID\r
1223ConfigSmmCodeAccessCheck (\r
1224 VOID\r
1225 )\r
1226{\r
1227 UINTN Index;\r
1228 EFI_STATUS Status;\r
1229\r
1230 //\r
1231 // Check to see if the Feature Control MSR is supported on this CPU\r
1232 //\r
f6b0cb17 1233 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1234 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1235 mSmmCodeAccessCheckEnable = FALSE;\r
1236 return;\r
1237 }\r
1238\r
1239 //\r
1240 // Check to see if the CPU supports the SMM Code Access Check feature\r
1241 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1242 //\r
1243 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1244 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1245 return;\r
1246 }\r
1247\r
1248 //\r
1249 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1250 //\r
fe3a75bc 1251 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1252\r
1253 //\r
1254 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1255 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1256 //\r
fe3a75bc 1257 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1258\r
1259 //\r
1260 // Enable SMM Code Access Check feature on the BSP.\r
1261 //\r
1262 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1263\r
1264 //\r
1265 // Enable SMM Code Access Check feature for the APs.\r
1266 //\r
1267 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1268 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1269 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1270 //\r
1271 // If this processor does not exist\r
1272 //\r
1273 continue;\r
1274 }\r
529a5a86
MK
1275 //\r
1276 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1277 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1278 //\r
fe3a75bc 1279 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1280\r
1281 //\r
1282 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1283 //\r
1284 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1285 ASSERT_EFI_ERROR (Status);\r
1286\r
1287 //\r
1288 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1289 //\r
fe3a75bc 1290 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1291 CpuPause ();\r
1292 }\r
1293\r
1294 //\r
1295 // Release the Config SMM Code Access Check spin lock.\r
1296 //\r
fe3a75bc 1297 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1298 }\r
1299 }\r
1300}\r
1301\r
21c17193
JY
1302/**\r
1303 This API provides a way to allocate memory for page table.\r
1304\r
1305 This API can be called more once to allocate memory for page tables.\r
1306\r
1307 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1308 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1309 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1310 returned.\r
1311\r
1312 @param Pages The number of 4 KB pages to allocate.\r
1313\r
1314 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1315\r
1316**/\r
1317VOID *\r
1318AllocatePageTableMemory (\r
1319 IN UINTN Pages\r
1320 )\r
1321{\r
1322 VOID *Buffer;\r
1323\r
1324 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1325 if (Buffer != NULL) {\r
1326 return Buffer;\r
1327 }\r
1328 return AllocatePages (Pages);\r
1329}\r
1330\r
717fb604
JY
1331/**\r
1332 Allocate pages for code.\r
1333\r
1334 @param[in] Pages Number of pages to be allocated.\r
1335\r
1336 @return Allocated memory.\r
1337**/\r
1338VOID *\r
1339AllocateCodePages (\r
1340 IN UINTN Pages\r
1341 )\r
1342{\r
1343 EFI_STATUS Status;\r
1344 EFI_PHYSICAL_ADDRESS Memory;\r
1345\r
1346 if (Pages == 0) {\r
1347 return NULL;\r
1348 }\r
1349\r
1350 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1351 if (EFI_ERROR (Status)) {\r
1352 return NULL;\r
1353 }\r
1354 return (VOID *) (UINTN) Memory;\r
1355}\r
1356\r
1357/**\r
1358 Allocate aligned pages for code.\r
1359\r
1360 @param[in] Pages Number of pages to be allocated.\r
1361 @param[in] Alignment The requested alignment of the allocation.\r
1362 Must be a power of two.\r
1363 If Alignment is zero, then byte alignment is used.\r
1364\r
1365 @return Allocated memory.\r
1366**/\r
1367VOID *\r
1368AllocateAlignedCodePages (\r
1369 IN UINTN Pages,\r
1370 IN UINTN Alignment\r
1371 )\r
1372{\r
1373 EFI_STATUS Status;\r
1374 EFI_PHYSICAL_ADDRESS Memory;\r
1375 UINTN AlignedMemory;\r
1376 UINTN AlignmentMask;\r
1377 UINTN UnalignedPages;\r
1378 UINTN RealPages;\r
1379\r
1380 //\r
1381 // Alignment must be a power of two or zero.\r
1382 //\r
1383 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1384\r
1385 if (Pages == 0) {\r
1386 return NULL;\r
1387 }\r
1388 if (Alignment > EFI_PAGE_SIZE) {\r
1389 //\r
1390 // Calculate the total number of pages since alignment is larger than page size.\r
1391 //\r
1392 AlignmentMask = Alignment - 1;\r
1393 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1394 //\r
1395 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1396 //\r
1397 ASSERT (RealPages > Pages);\r
1398\r
1399 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1400 if (EFI_ERROR (Status)) {\r
1401 return NULL;\r
1402 }\r
1403 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1404 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1405 if (UnalignedPages > 0) {\r
1406 //\r
1407 // Free first unaligned page(s).\r
1408 //\r
1409 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1410 ASSERT_EFI_ERROR (Status);\r
1411 }\r
8491e302 1412 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1413 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1414 if (UnalignedPages > 0) {\r
1415 //\r
1416 // Free last unaligned page(s).\r
1417 //\r
1418 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1419 ASSERT_EFI_ERROR (Status);\r
1420 }\r
1421 } else {\r
1422 //\r
1423 // Do not over-allocate pages in this case.\r
1424 //\r
1425 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1426 if (EFI_ERROR (Status)) {\r
1427 return NULL;\r
1428 }\r
1429 AlignedMemory = (UINTN) Memory;\r
1430 }\r
1431 return (VOID *) AlignedMemory;\r
1432}\r
1433\r
529a5a86
MK
1434/**\r
1435 Perform the remaining tasks.\r
1436\r
1437**/\r
1438VOID\r
1439PerformRemainingTasks (\r
1440 VOID\r
1441 )\r
1442{\r
1443 if (mSmmReadyToLock) {\r
1444 //\r
1445 // Start SMM Profile feature\r
1446 //\r
1447 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1448 SmmProfileStart ();\r
1449 }\r
1450 //\r
1451 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1452 //\r
1453 InitPaging ();\r
717fb604
JY
1454\r
1455 //\r
1456 // Mark critical region to be read-only in page table\r
1457 //\r
d2fc7711
JY
1458 SetMemMapAttributes ();\r
1459\r
79186ddc
RN
1460 if (IsRestrictedMemoryAccess ()) {\r
1461 //\r
1462 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1463 //\r
1464 SetUefiMemMapAttributes ();\r
30f61485 1465\r
79186ddc
RN
1466 //\r
1467 // Set page table itself to be read-only\r
1468 //\r
1469 SetPageTableAttributes ();\r
1470 }\r
717fb604 1471\r
529a5a86
MK
1472 //\r
1473 // Configure SMM Code Access Check feature if available.\r
1474 //\r
1475 ConfigSmmCodeAccessCheck ();\r
1476\r
21c17193
JY
1477 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1478\r
529a5a86
MK
1479 //\r
1480 // Clean SMM ready to lock flag\r
1481 //\r
1482 mSmmReadyToLock = FALSE;\r
1483 }\r
1484}\r
9f419739
JY
1485\r
1486/**\r
1487 Perform the pre tasks.\r
1488\r
1489**/\r
1490VOID\r
1491PerformPreTasks (\r
1492 VOID\r
1493 )\r
1494{\r
0bdc9e75 1495 RestoreSmmConfigurationInS3 ();\r
9f419739 1496}\r