]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
... / ...
CommitLineData
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7SPDX-License-Identifier: BSD-2-Clause-Patent\r
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
15// along its supporting fields.\r
16//\r
17SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
18 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
19 NULL, // SmmCpuHandle\r
20 NULL, // Pointer to ProcessorInfo array\r
21 NULL, // Pointer to Operation array\r
22 NULL, // Pointer to CpuSaveStateSize array\r
23 NULL, // Pointer to CpuSaveState array\r
24 { {0} }, // SmmReservedSmramRegion\r
25 {\r
26 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
27 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
28 0, // SmmCoreEntryContext.NumberOfCpus\r
29 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
30 NULL // SmmCoreEntryContext.CpuSaveState\r
31 },\r
32 NULL, // SmmCoreEntry\r
33 {\r
34 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
35 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
36 },\r
37};\r
38\r
39CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
40 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
41 0, // Array Length of SmBase and APIC ID\r
42 NULL, // Pointer to APIC ID array\r
43 NULL, // Pointer to SMBASE array\r
44 0, // Reserved\r
45 0, // SmrrBase\r
46 0 // SmrrSize\r
47};\r
48\r
49//\r
50// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
51//\r
52SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
53\r
54//\r
55// SMM Relocation variables\r
56//\r
57volatile BOOLEAN *mRebased;\r
58volatile BOOLEAN mIsBsp;\r
59\r
60///\r
61/// Handle for the SMM CPU Protocol\r
62///\r
63EFI_HANDLE mSmmCpuHandle = NULL;\r
64\r
65///\r
66/// SMM CPU Protocol instance\r
67///\r
68EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
69 SmmReadSaveState,\r
70 SmmWriteSaveState\r
71};\r
72\r
73///\r
74/// SMM Memory Attribute Protocol instance\r
75///\r
76EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
77 EdkiiSmmGetMemoryAttributes,\r
78 EdkiiSmmSetMemoryAttributes,\r
79 EdkiiSmmClearMemoryAttributes\r
80};\r
81\r
82EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
83\r
84//\r
85// SMM stack information\r
86//\r
87UINTN mSmmStackArrayBase;\r
88UINTN mSmmStackArrayEnd;\r
89UINTN mSmmStackSize;\r
90\r
91UINTN mSmmShadowStackSize;\r
92BOOLEAN mCetSupported = TRUE;\r
93\r
94UINTN mMaxNumberOfCpus = 1;\r
95UINTN mNumberOfCpus = 1;\r
96\r
97//\r
98// SMM ready to lock flag\r
99//\r
100BOOLEAN mSmmReadyToLock = FALSE;\r
101\r
102//\r
103// Global used to cache PCD for SMM Code Access Check enable\r
104//\r
105BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
106\r
107//\r
108// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
109//\r
110UINT64 mAddressEncMask = 0;\r
111\r
112//\r
113// Spin lock used to serialize setting of SMM Code Access Check feature\r
114//\r
115SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
116\r
117//\r
118// Saved SMM ranges information\r
119//\r
120EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
121UINTN mSmmCpuSmramRangeCount;\r
122\r
123UINT8 mPhysicalAddressBits;\r
124\r
125//\r
126// Control register contents saved for SMM S3 resume state initialization.\r
127//\r
128UINT32 mSmmCr0;\r
129UINT32 mSmmCr4;\r
130\r
131/**\r
132 Initialize IDT to setup exception handlers for SMM.\r
133\r
134**/\r
135VOID\r
136InitializeSmmIdt (\r
137 VOID\r
138 )\r
139{\r
140 EFI_STATUS Status;\r
141 BOOLEAN InterruptState;\r
142 IA32_DESCRIPTOR DxeIdtr;\r
143\r
144 //\r
145 // There are 32 (not 255) entries in it since only processor\r
146 // generated exceptions will be handled.\r
147 //\r
148 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
149 //\r
150 // Allocate page aligned IDT, because it might be set as read only.\r
151 //\r
152 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
153 ASSERT (gcSmiIdtr.Base != 0);\r
154 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
155\r
156 //\r
157 // Disable Interrupt and save DXE IDT table\r
158 //\r
159 InterruptState = SaveAndDisableInterrupts ();\r
160 AsmReadIdtr (&DxeIdtr);\r
161 //\r
162 // Load SMM temporary IDT table\r
163 //\r
164 AsmWriteIdtr (&gcSmiIdtr);\r
165 //\r
166 // Setup SMM default exception handlers, SMM IDT table\r
167 // will be updated and saved in gcSmiIdtr\r
168 //\r
169 Status = InitializeCpuExceptionHandlers (NULL);\r
170 ASSERT_EFI_ERROR (Status);\r
171 //\r
172 // Restore DXE IDT table and CPU interrupt\r
173 //\r
174 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
175 SetInterruptState (InterruptState);\r
176}\r
177\r
178/**\r
179 Search module name by input IP address and output it.\r
180\r
181 @param CallerIpAddress Caller instruction pointer.\r
182\r
183**/\r
184VOID\r
185DumpModuleInfoByIp (\r
186 IN UINTN CallerIpAddress\r
187 )\r
188{\r
189 UINTN Pe32Data;\r
190 VOID *PdbPointer;\r
191\r
192 //\r
193 // Find Image Base\r
194 //\r
195 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
196 if (Pe32Data != 0) {\r
197 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
198 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
199 if (PdbPointer != NULL) {\r
200 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
201 }\r
202 }\r
203}\r
204\r
205/**\r
206 Read information from the CPU save state.\r
207\r
208 @param This EFI_SMM_CPU_PROTOCOL instance\r
209 @param Width The number of bytes to read from the CPU save state.\r
210 @param Register Specifies the CPU register to read form the save state.\r
211 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
212 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
213\r
214 @retval EFI_SUCCESS The register was read from Save State\r
215 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
216 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
217\r
218**/\r
219EFI_STATUS\r
220EFIAPI\r
221SmmReadSaveState (\r
222 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
223 IN UINTN Width,\r
224 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
225 IN UINTN CpuIndex,\r
226 OUT VOID *Buffer\r
227 )\r
228{\r
229 EFI_STATUS Status;\r
230\r
231 //\r
232 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
233 //\r
234 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
235 return EFI_INVALID_PARAMETER;\r
236 }\r
237 //\r
238 // The SpeculationBarrier() call here is to ensure the above check for the\r
239 // CpuIndex has been completed before the execution of subsequent codes.\r
240 //\r
241 SpeculationBarrier ();\r
242\r
243 //\r
244 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
245 //\r
246 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
247 //\r
248 // The pseudo-register only supports the 64-bit size specified by Width.\r
249 //\r
250 if (Width != sizeof (UINT64)) {\r
251 return EFI_INVALID_PARAMETER;\r
252 }\r
253 //\r
254 // If the processor is in SMM at the time the SMI occurred,\r
255 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
256 // Otherwise, EFI_NOT_FOUND is returned.\r
257 //\r
258 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
259 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
260 return EFI_SUCCESS;\r
261 } else {\r
262 return EFI_NOT_FOUND;\r
263 }\r
264 }\r
265\r
266 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
267 return EFI_INVALID_PARAMETER;\r
268 }\r
269\r
270 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
271 if (Status == EFI_UNSUPPORTED) {\r
272 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
273 }\r
274 return Status;\r
275}\r
276\r
277/**\r
278 Write data to the CPU save state.\r
279\r
280 @param This EFI_SMM_CPU_PROTOCOL instance\r
281 @param Width The number of bytes to read from the CPU save state.\r
282 @param Register Specifies the CPU register to write to the save state.\r
283 @param CpuIndex Specifies the zero-based index of the CPU save state\r
284 @param Buffer Upon entry, this holds the new CPU register value.\r
285\r
286 @retval EFI_SUCCESS The register was written from Save State\r
287 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
288 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
289\r
290**/\r
291EFI_STATUS\r
292EFIAPI\r
293SmmWriteSaveState (\r
294 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
295 IN UINTN Width,\r
296 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
297 IN UINTN CpuIndex,\r
298 IN CONST VOID *Buffer\r
299 )\r
300{\r
301 EFI_STATUS Status;\r
302\r
303 //\r
304 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
305 //\r
306 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
307 return EFI_INVALID_PARAMETER;\r
308 }\r
309\r
310 //\r
311 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
312 //\r
313 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
314 return EFI_SUCCESS;\r
315 }\r
316\r
317 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
318 return EFI_INVALID_PARAMETER;\r
319 }\r
320\r
321 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
322 if (Status == EFI_UNSUPPORTED) {\r
323 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
324 }\r
325 return Status;\r
326}\r
327\r
328\r
329/**\r
330 C function for SMI handler. To change all processor's SMMBase Register.\r
331\r
332**/\r
333VOID\r
334EFIAPI\r
335SmmInitHandler (\r
336 VOID\r
337 )\r
338{\r
339 UINT32 ApicId;\r
340 UINTN Index;\r
341\r
342 //\r
343 // Update SMM IDT entries' code segment and load IDT\r
344 //\r
345 AsmWriteIdtr (&gcSmiIdtr);\r
346 ApicId = GetApicId ();\r
347\r
348 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
349\r
350 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
351 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
352 //\r
353 // Initialize SMM specific features on the currently executing CPU\r
354 //\r
355 SmmCpuFeaturesInitializeProcessor (\r
356 Index,\r
357 mIsBsp,\r
358 gSmmCpuPrivate->ProcessorInfo,\r
359 &mCpuHotPlugData\r
360 );\r
361\r
362 if (!mSmmS3Flag) {\r
363 //\r
364 // Check XD and BTS features on each processor on normal boot\r
365 //\r
366 CheckFeatureSupported ();\r
367 }\r
368\r
369 if (mIsBsp) {\r
370 //\r
371 // BSP rebase is already done above.\r
372 // Initialize private data during S3 resume\r
373 //\r
374 InitializeMpSyncData ();\r
375 }\r
376\r
377 //\r
378 // Hook return after RSM to set SMM re-based flag\r
379 //\r
380 SemaphoreHook (Index, &mRebased[Index]);\r
381\r
382 return;\r
383 }\r
384 }\r
385 ASSERT (FALSE);\r
386}\r
387\r
388/**\r
389 Relocate SmmBases for each processor.\r
390\r
391 Execute on first boot and all S3 resumes\r
392\r
393**/\r
394VOID\r
395EFIAPI\r
396SmmRelocateBases (\r
397 VOID\r
398 )\r
399{\r
400 UINT8 BakBuf[BACK_BUF_SIZE];\r
401 SMRAM_SAVE_STATE_MAP BakBuf2;\r
402 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
403 UINT8 *U8Ptr;\r
404 UINT32 ApicId;\r
405 UINTN Index;\r
406 UINTN BspIndex;\r
407\r
408 //\r
409 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
410 //\r
411 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
412\r
413 //\r
414 // Patch ASM code template with current CR0, CR3, and CR4 values\r
415 //\r
416 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
417 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
418 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
419 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
420 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
421\r
422 //\r
423 // Patch GDTR for SMM base relocation\r
424 //\r
425 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
426 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
427\r
428 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
429 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
430\r
431 //\r
432 // Backup original contents at address 0x38000\r
433 //\r
434 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
435 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
436\r
437 //\r
438 // Load image for relocation\r
439 //\r
440 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
441\r
442 //\r
443 // Retrieve the local APIC ID of current processor\r
444 //\r
445 ApicId = GetApicId ();\r
446\r
447 //\r
448 // Relocate SM bases for all APs\r
449 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
450 //\r
451 mIsBsp = FALSE;\r
452 BspIndex = (UINTN)-1;\r
453 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
454 mRebased[Index] = FALSE;\r
455 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
456 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
457 //\r
458 // Wait for this AP to finish its 1st SMI\r
459 //\r
460 while (!mRebased[Index]);\r
461 } else {\r
462 //\r
463 // BSP will be Relocated later\r
464 //\r
465 BspIndex = Index;\r
466 }\r
467 }\r
468\r
469 //\r
470 // Relocate BSP's SMM base\r
471 //\r
472 ASSERT (BspIndex != (UINTN)-1);\r
473 mIsBsp = TRUE;\r
474 SendSmiIpi (ApicId);\r
475 //\r
476 // Wait for the BSP to finish its 1st SMI\r
477 //\r
478 while (!mRebased[BspIndex]);\r
479\r
480 //\r
481 // Restore contents at address 0x38000\r
482 //\r
483 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
484 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
485}\r
486\r
487/**\r
488 SMM Ready To Lock event notification handler.\r
489\r
490 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
491 perform additional lock actions that must be performed from SMM on the next SMI.\r
492\r
493 @param[in] Protocol Points to the protocol's unique identifier.\r
494 @param[in] Interface Points to the interface instance.\r
495 @param[in] Handle The handle on which the interface was installed.\r
496\r
497 @retval EFI_SUCCESS Notification handler runs successfully.\r
498 **/\r
499EFI_STATUS\r
500EFIAPI\r
501SmmReadyToLockEventNotify (\r
502 IN CONST EFI_GUID *Protocol,\r
503 IN VOID *Interface,\r
504 IN EFI_HANDLE Handle\r
505 )\r
506{\r
507 GetAcpiCpuData ();\r
508\r
509 //\r
510 // Cache a copy of UEFI memory map before we start profiling feature.\r
511 //\r
512 GetUefiMemoryMap ();\r
513\r
514 //\r
515 // Set SMM ready to lock flag and return\r
516 //\r
517 mSmmReadyToLock = TRUE;\r
518 return EFI_SUCCESS;\r
519}\r
520\r
521/**\r
522 The module Entry Point of the CPU SMM driver.\r
523\r
524 @param ImageHandle The firmware allocated handle for the EFI image.\r
525 @param SystemTable A pointer to the EFI System Table.\r
526\r
527 @retval EFI_SUCCESS The entry point is executed successfully.\r
528 @retval Other Some error occurs when executing this entry point.\r
529\r
530**/\r
531EFI_STATUS\r
532EFIAPI\r
533PiCpuSmmEntry (\r
534 IN EFI_HANDLE ImageHandle,\r
535 IN EFI_SYSTEM_TABLE *SystemTable\r
536 )\r
537{\r
538 EFI_STATUS Status;\r
539 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
540 UINTN NumberOfEnabledProcessors;\r
541 UINTN Index;\r
542 VOID *Buffer;\r
543 UINTN BufferPages;\r
544 UINTN TileCodeSize;\r
545 UINTN TileDataSize;\r
546 UINTN TileSize;\r
547 UINT8 *Stacks;\r
548 VOID *Registration;\r
549 UINT32 RegEax;\r
550 UINT32 RegEbx;\r
551 UINT32 RegEcx;\r
552 UINT32 RegEdx;\r
553 UINTN FamilyId;\r
554 UINTN ModelId;\r
555 UINT32 Cr3;\r
556\r
557 //\r
558 // Initialize address fixup\r
559 //\r
560 PiSmmCpuSmmInitFixupAddress ();\r
561 PiSmmCpuSmiEntryFixupAddress ();\r
562\r
563 //\r
564 // Initialize Debug Agent to support source level debug in SMM code\r
565 //\r
566 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
567\r
568 //\r
569 // Report the start of CPU SMM initialization.\r
570 //\r
571 REPORT_STATUS_CODE (\r
572 EFI_PROGRESS_CODE,\r
573 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
574 );\r
575\r
576 //\r
577 // Find out SMRR Base and SMRR Size\r
578 //\r
579 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
580\r
581 //\r
582 // Get MP Services Protocol\r
583 //\r
584 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
585 ASSERT_EFI_ERROR (Status);\r
586\r
587 //\r
588 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
589 //\r
590 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
591 ASSERT_EFI_ERROR (Status);\r
592 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
593\r
594 //\r
595 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
596 // A constant BSP index makes no sense because it may be hot removed.\r
597 //\r
598 DEBUG_CODE (\r
599 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
600\r
601 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
602 }\r
603 );\r
604\r
605 //\r
606 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
607 //\r
608 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
609 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
610\r
611 //\r
612 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
613 // Make sure AddressEncMask is contained to smallest supported address field.\r
614 //\r
615 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
616 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
617\r
618 //\r
619 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
620 //\r
621 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
622 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
623 } else {\r
624 mMaxNumberOfCpus = mNumberOfCpus;\r
625 }\r
626 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
627\r
628 //\r
629 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
630 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
631 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
632 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
633 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
634 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
635 // by adding the // CPU save state size, any extra CPU specific context, and\r
636 // the size of code that must be placed at the SMI entry point to transfer\r
637 // control to a C function in the native SMM execution mode. This size is\r
638 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
639 // The total amount of memory required is the maximum number of CPUs that\r
640 // platform supports times the tile size. The picture below shows the tiling,\r
641 // where m is the number of tiles that fit in 32KB.\r
642 //\r
643 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
644 // | CPU m+1 Save State |\r
645 // +-----------------------------+\r
646 // | CPU m+1 Extra Data |\r
647 // +-----------------------------+\r
648 // | Padding |\r
649 // +-----------------------------+\r
650 // | CPU 2m SMI Entry |\r
651 // +#############################+ <-- Base of allocated buffer + 64 KB\r
652 // | CPU m-1 Save State |\r
653 // +-----------------------------+\r
654 // | CPU m-1 Extra Data |\r
655 // +-----------------------------+\r
656 // | Padding |\r
657 // +-----------------------------+\r
658 // | CPU 2m-1 SMI Entry |\r
659 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
660 // | . . . . . . . . . . . . |\r
661 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
662 // | CPU 2 Save State |\r
663 // +-----------------------------+\r
664 // | CPU 2 Extra Data |\r
665 // +-----------------------------+\r
666 // | Padding |\r
667 // +-----------------------------+\r
668 // | CPU m+1 SMI Entry |\r
669 // +=============================+ <-- Base of allocated buffer + 32 KB\r
670 // | CPU 1 Save State |\r
671 // +-----------------------------+\r
672 // | CPU 1 Extra Data |\r
673 // +-----------------------------+\r
674 // | Padding |\r
675 // +-----------------------------+\r
676 // | CPU m SMI Entry |\r
677 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
678 // | CPU 0 Save State |\r
679 // +-----------------------------+\r
680 // | CPU 0 Extra Data |\r
681 // +-----------------------------+\r
682 // | Padding |\r
683 // +-----------------------------+\r
684 // | CPU m-1 SMI Entry |\r
685 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
686 // | . . . . . . . . . . . . |\r
687 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
688 // | Padding |\r
689 // +-----------------------------+\r
690 // | CPU 1 SMI Entry |\r
691 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
692 // | Padding |\r
693 // +-----------------------------+\r
694 // | CPU 0 SMI Entry |\r
695 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
696 //\r
697\r
698 //\r
699 // Retrieve CPU Family\r
700 //\r
701 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
702 FamilyId = (RegEax >> 8) & 0xf;\r
703 ModelId = (RegEax >> 4) & 0xf;\r
704 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
705 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
706 }\r
707\r
708 RegEdx = 0;\r
709 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
710 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
711 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
712 }\r
713 //\r
714 // Determine the mode of the CPU at the time an SMI occurs\r
715 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
716 // Volume 3C, Section 34.4.1.1\r
717 //\r
718 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
719 if ((RegEdx & BIT29) != 0) {\r
720 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
721 }\r
722 if (FamilyId == 0x06) {\r
723 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
724 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
725 }\r
726 }\r
727\r
728 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
729 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
730 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
731 if (RegEax > CPUID_EXTENDED_FUNCTION) {\r
732 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
733 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
734 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
735 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
736 if ((RegEcx & CPUID_CET_SS) == 0) {\r
737 mCetSupported = FALSE;\r
738 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
739 }\r
740 if (mCetSupported) {\r
741 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
742 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
743 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
744 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
745 AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
746 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
747 }\r
748 }\r
749 } else {\r
750 mCetSupported = FALSE;\r
751 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
752 }\r
753\r
754 //\r
755 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
756 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
757 // This size is rounded up to nearest power of 2.\r
758 //\r
759 TileCodeSize = GetSmiHandlerSize ();\r
760 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
761 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
762 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
763 TileSize = TileDataSize + TileCodeSize - 1;\r
764 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
765 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
766\r
767 //\r
768 // If the TileSize is larger than space available for the SMI Handler of\r
769 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
770 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
771 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
772 // context must be reduced.\r
773 //\r
774 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
775\r
776 //\r
777 // Allocate buffer for all of the tiles.\r
778 //\r
779 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
780 // Volume 3C, Section 34.11 SMBASE Relocation\r
781 // For Pentium and Intel486 processors, the SMBASE values must be\r
782 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
783 // state during the execution of a RSM instruction.\r
784 //\r
785 // Intel486 processors: FamilyId is 4\r
786 // Pentium processors : FamilyId is 5\r
787 //\r
788 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
789 if ((FamilyId == 4) || (FamilyId == 5)) {\r
790 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
791 } else {\r
792 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
793 }\r
794 ASSERT (Buffer != NULL);\r
795 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
796\r
797 //\r
798 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
799 //\r
800 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
801 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
802\r
803 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
804 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
805\r
806 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
807 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
808\r
809 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
810 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
811\r
812 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
813 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
814\r
815 //\r
816 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
817 //\r
818 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
819 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
820 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
821 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
822 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
823\r
824 //\r
825 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
826 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
827 // size for each CPU in the platform\r
828 //\r
829 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
830 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
831 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
832 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
833 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
834\r
835 if (Index < mNumberOfCpus) {\r
836 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
837 ASSERT_EFI_ERROR (Status);\r
838 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
839\r
840 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
841 Index,\r
842 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
843 mCpuHotPlugData.SmBase[Index],\r
844 gSmmCpuPrivate->CpuSaveState[Index],\r
845 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
846 ));\r
847 } else {\r
848 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
849 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
850 }\r
851 }\r
852\r
853 //\r
854 // Allocate SMI stacks for all processors.\r
855 //\r
856 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
857 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
858 //\r
859 // 2 more pages is allocated for each processor.\r
860 // one is guard page and the other is known good stack.\r
861 //\r
862 // +-------------------------------------------+-----+-------------------------------------------+\r
863 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
864 // +-------------------------------------------+-----+-------------------------------------------+\r
865 // | | | |\r
866 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
867 //\r
868 mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
869 }\r
870\r
871 mSmmShadowStackSize = 0;\r
872 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
873 //\r
874 // Append Shadow Stack after normal stack\r
875 //\r
876 // |= Stacks\r
877 // +--------------------------------------------------+---------------------------------------------------------------+\r
878 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
879 // +--------------------------------------------------+---------------------------------------------------------------+\r
880 // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|\r
881 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
882 // | |\r
883 // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
884 //\r
885 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
886 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
887 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
888 }\r
889 }\r
890\r
891 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
892 ASSERT (Stacks != NULL);\r
893 mSmmStackArrayBase = (UINTN)Stacks;\r
894 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
895\r
896 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
897 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
898 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
899 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
900 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
901 }\r
902\r
903 //\r
904 // Set SMI stack for SMM base relocation\r
905 //\r
906 PatchInstructionX86 (\r
907 gPatchSmmInitStack,\r
908 (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),\r
909 sizeof (UINTN)\r
910 );\r
911\r
912 //\r
913 // Initialize IDT\r
914 //\r
915 InitializeSmmIdt ();\r
916\r
917 //\r
918 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
919 //\r
920 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
921 ASSERT (mRebased != NULL);\r
922 SmmRelocateBases ();\r
923\r
924 //\r
925 // Call hook for BSP to perform extra actions in normal mode after all\r
926 // SMM base addresses have been relocated on all CPUs\r
927 //\r
928 SmmCpuFeaturesSmmRelocationComplete ();\r
929\r
930 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
931\r
932 //\r
933 // SMM Time initialization\r
934 //\r
935 InitializeSmmTimer ();\r
936\r
937 //\r
938 // Initialize MP globals\r
939 //\r
940 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
941\r
942 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
943 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
944 SetShadowStack (\r
945 Cr3,\r
946 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
947 mSmmShadowStackSize\r
948 );\r
949 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
950 SetNotPresentPage (\r
951 Cr3,\r
952 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
953 EFI_PAGES_TO_SIZE(1)\r
954 );\r
955 }\r
956 }\r
957 }\r
958\r
959 //\r
960 // Fill in SMM Reserved Regions\r
961 //\r
962 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
963 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
964\r
965 //\r
966 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
967 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
968 // to an SMRAM address will be present in the handle database\r
969 //\r
970 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
971 &gSmmCpuPrivate->SmmCpuHandle,\r
972 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
973 NULL\r
974 );\r
975 ASSERT_EFI_ERROR (Status);\r
976\r
977 //\r
978 // Install the SMM CPU Protocol into SMM protocol database\r
979 //\r
980 Status = gSmst->SmmInstallProtocolInterface (\r
981 &mSmmCpuHandle,\r
982 &gEfiSmmCpuProtocolGuid,\r
983 EFI_NATIVE_INTERFACE,\r
984 &mSmmCpu\r
985 );\r
986 ASSERT_EFI_ERROR (Status);\r
987\r
988 //\r
989 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
990 //\r
991 Status = gSmst->SmmInstallProtocolInterface (\r
992 &mSmmCpuHandle,\r
993 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
994 EFI_NATIVE_INTERFACE,\r
995 &mSmmMemoryAttribute\r
996 );\r
997 ASSERT_EFI_ERROR (Status);\r
998\r
999 //\r
1000 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1001 //\r
1002 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1003 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1004 ASSERT_EFI_ERROR (Status);\r
1005 }\r
1006\r
1007 //\r
1008 // Initialize SMM CPU Services Support\r
1009 //\r
1010 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1011 ASSERT_EFI_ERROR (Status);\r
1012\r
1013 //\r
1014 // register SMM Ready To Lock Protocol notification\r
1015 //\r
1016 Status = gSmst->SmmRegisterProtocolNotify (\r
1017 &gEfiSmmReadyToLockProtocolGuid,\r
1018 SmmReadyToLockEventNotify,\r
1019 &Registration\r
1020 );\r
1021 ASSERT_EFI_ERROR (Status);\r
1022\r
1023 //\r
1024 // Initialize SMM Profile feature\r
1025 //\r
1026 InitSmmProfile (Cr3);\r
1027\r
1028 GetAcpiS3EnableFlag ();\r
1029 InitSmmS3ResumeState (Cr3);\r
1030\r
1031 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1032\r
1033 return EFI_SUCCESS;\r
1034}\r
1035\r
1036/**\r
1037\r
1038 Find out SMRAM information including SMRR base and SMRR size.\r
1039\r
1040 @param SmrrBase SMRR base\r
1041 @param SmrrSize SMRR size\r
1042\r
1043**/\r
1044VOID\r
1045FindSmramInfo (\r
1046 OUT UINT32 *SmrrBase,\r
1047 OUT UINT32 *SmrrSize\r
1048 )\r
1049{\r
1050 EFI_STATUS Status;\r
1051 UINTN Size;\r
1052 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1053 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1054 UINTN Index;\r
1055 UINT64 MaxSize;\r
1056 BOOLEAN Found;\r
1057\r
1058 //\r
1059 // Get SMM Access Protocol\r
1060 //\r
1061 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1062 ASSERT_EFI_ERROR (Status);\r
1063\r
1064 //\r
1065 // Get SMRAM information\r
1066 //\r
1067 Size = 0;\r
1068 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1069 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1070\r
1071 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1072 ASSERT (mSmmCpuSmramRanges != NULL);\r
1073\r
1074 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
1075 ASSERT_EFI_ERROR (Status);\r
1076\r
1077 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1078\r
1079 //\r
1080 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1081 //\r
1082 CurrentSmramRange = NULL;\r
1083 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
1084 //\r
1085 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1086 //\r
1087 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1088 continue;\r
1089 }\r
1090\r
1091 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1092 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1093 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1094 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1095 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
1096 }\r
1097 }\r
1098 }\r
1099 }\r
1100\r
1101 ASSERT (CurrentSmramRange != NULL);\r
1102\r
1103 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1104 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1105\r
1106 do {\r
1107 Found = FALSE;\r
1108 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1109 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1110 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1111 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1112 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
1113 Found = TRUE;\r
1114 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1115 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
1116 Found = TRUE;\r
1117 }\r
1118 }\r
1119 } while (Found);\r
1120\r
1121 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1122}\r
1123\r
1124/**\r
1125Configure SMM Code Access Check feature on an AP.\r
1126SMM Feature Control MSR will be locked after configuration.\r
1127\r
1128@param[in,out] Buffer Pointer to private data buffer.\r
1129**/\r
1130VOID\r
1131EFIAPI\r
1132ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1133 IN OUT VOID *Buffer\r
1134 )\r
1135{\r
1136 UINTN CpuIndex;\r
1137 UINT64 SmmFeatureControlMsr;\r
1138 UINT64 NewSmmFeatureControlMsr;\r
1139\r
1140 //\r
1141 // Retrieve the CPU Index from the context passed in\r
1142 //\r
1143 CpuIndex = *(UINTN *)Buffer;\r
1144\r
1145 //\r
1146 // Get the current SMM Feature Control MSR value\r
1147 //\r
1148 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1149\r
1150 //\r
1151 // Compute the new SMM Feature Control MSR value\r
1152 //\r
1153 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1154 if (mSmmCodeAccessCheckEnable) {\r
1155 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
1156 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1157 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1158 }\r
1159 }\r
1160\r
1161 //\r
1162 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1163 //\r
1164 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1165 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1166 }\r
1167\r
1168 //\r
1169 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1170 //\r
1171 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
1172}\r
1173\r
1174/**\r
1175Configure SMM Code Access Check feature for all processors.\r
1176SMM Feature Control MSR will be locked after configuration.\r
1177**/\r
1178VOID\r
1179ConfigSmmCodeAccessCheck (\r
1180 VOID\r
1181 )\r
1182{\r
1183 UINTN Index;\r
1184 EFI_STATUS Status;\r
1185\r
1186 //\r
1187 // Check to see if the Feature Control MSR is supported on this CPU\r
1188 //\r
1189 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
1190 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1191 mSmmCodeAccessCheckEnable = FALSE;\r
1192 return;\r
1193 }\r
1194\r
1195 //\r
1196 // Check to see if the CPU supports the SMM Code Access Check feature\r
1197 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1198 //\r
1199 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1200 mSmmCodeAccessCheckEnable = FALSE;\r
1201 return;\r
1202 }\r
1203\r
1204 //\r
1205 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1206 //\r
1207 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
1208\r
1209 //\r
1210 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1211 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1212 //\r
1213 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
1214\r
1215 //\r
1216 // Enable SMM Code Access Check feature on the BSP.\r
1217 //\r
1218 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1219\r
1220 //\r
1221 // Enable SMM Code Access Check feature for the APs.\r
1222 //\r
1223 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
1224 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1225 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1226 //\r
1227 // If this processor does not exist\r
1228 //\r
1229 continue;\r
1230 }\r
1231 //\r
1232 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1233 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1234 //\r
1235 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
1236\r
1237 //\r
1238 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1239 //\r
1240 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1241 ASSERT_EFI_ERROR (Status);\r
1242\r
1243 //\r
1244 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1245 //\r
1246 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
1247 CpuPause ();\r
1248 }\r
1249\r
1250 //\r
1251 // Release the Config SMM Code Access Check spin lock.\r
1252 //\r
1253 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
1254 }\r
1255 }\r
1256}\r
1257\r
1258/**\r
1259 This API provides a way to allocate memory for page table.\r
1260\r
1261 This API can be called more once to allocate memory for page tables.\r
1262\r
1263 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1264 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1265 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1266 returned.\r
1267\r
1268 @param Pages The number of 4 KB pages to allocate.\r
1269\r
1270 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1271\r
1272**/\r
1273VOID *\r
1274AllocatePageTableMemory (\r
1275 IN UINTN Pages\r
1276 )\r
1277{\r
1278 VOID *Buffer;\r
1279\r
1280 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1281 if (Buffer != NULL) {\r
1282 return Buffer;\r
1283 }\r
1284 return AllocatePages (Pages);\r
1285}\r
1286\r
1287/**\r
1288 Allocate pages for code.\r
1289\r
1290 @param[in] Pages Number of pages to be allocated.\r
1291\r
1292 @return Allocated memory.\r
1293**/\r
1294VOID *\r
1295AllocateCodePages (\r
1296 IN UINTN Pages\r
1297 )\r
1298{\r
1299 EFI_STATUS Status;\r
1300 EFI_PHYSICAL_ADDRESS Memory;\r
1301\r
1302 if (Pages == 0) {\r
1303 return NULL;\r
1304 }\r
1305\r
1306 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1307 if (EFI_ERROR (Status)) {\r
1308 return NULL;\r
1309 }\r
1310 return (VOID *) (UINTN) Memory;\r
1311}\r
1312\r
1313/**\r
1314 Allocate aligned pages for code.\r
1315\r
1316 @param[in] Pages Number of pages to be allocated.\r
1317 @param[in] Alignment The requested alignment of the allocation.\r
1318 Must be a power of two.\r
1319 If Alignment is zero, then byte alignment is used.\r
1320\r
1321 @return Allocated memory.\r
1322**/\r
1323VOID *\r
1324AllocateAlignedCodePages (\r
1325 IN UINTN Pages,\r
1326 IN UINTN Alignment\r
1327 )\r
1328{\r
1329 EFI_STATUS Status;\r
1330 EFI_PHYSICAL_ADDRESS Memory;\r
1331 UINTN AlignedMemory;\r
1332 UINTN AlignmentMask;\r
1333 UINTN UnalignedPages;\r
1334 UINTN RealPages;\r
1335\r
1336 //\r
1337 // Alignment must be a power of two or zero.\r
1338 //\r
1339 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1340\r
1341 if (Pages == 0) {\r
1342 return NULL;\r
1343 }\r
1344 if (Alignment > EFI_PAGE_SIZE) {\r
1345 //\r
1346 // Calculate the total number of pages since alignment is larger than page size.\r
1347 //\r
1348 AlignmentMask = Alignment - 1;\r
1349 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1350 //\r
1351 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1352 //\r
1353 ASSERT (RealPages > Pages);\r
1354\r
1355 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1356 if (EFI_ERROR (Status)) {\r
1357 return NULL;\r
1358 }\r
1359 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1360 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1361 if (UnalignedPages > 0) {\r
1362 //\r
1363 // Free first unaligned page(s).\r
1364 //\r
1365 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1366 ASSERT_EFI_ERROR (Status);\r
1367 }\r
1368 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
1369 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1370 if (UnalignedPages > 0) {\r
1371 //\r
1372 // Free last unaligned page(s).\r
1373 //\r
1374 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1375 ASSERT_EFI_ERROR (Status);\r
1376 }\r
1377 } else {\r
1378 //\r
1379 // Do not over-allocate pages in this case.\r
1380 //\r
1381 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1382 if (EFI_ERROR (Status)) {\r
1383 return NULL;\r
1384 }\r
1385 AlignedMemory = (UINTN) Memory;\r
1386 }\r
1387 return (VOID *) AlignedMemory;\r
1388}\r
1389\r
1390/**\r
1391 Perform the remaining tasks.\r
1392\r
1393**/\r
1394VOID\r
1395PerformRemainingTasks (\r
1396 VOID\r
1397 )\r
1398{\r
1399 if (mSmmReadyToLock) {\r
1400 //\r
1401 // Start SMM Profile feature\r
1402 //\r
1403 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1404 SmmProfileStart ();\r
1405 }\r
1406 //\r
1407 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1408 //\r
1409 InitPaging ();\r
1410\r
1411 //\r
1412 // Mark critical region to be read-only in page table\r
1413 //\r
1414 SetMemMapAttributes ();\r
1415\r
1416 //\r
1417 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1418 //\r
1419 SetUefiMemMapAttributes ();\r
1420\r
1421 //\r
1422 // Set page table itself to be read-only\r
1423 //\r
1424 SetPageTableAttributes ();\r
1425\r
1426 //\r
1427 // Configure SMM Code Access Check feature if available.\r
1428 //\r
1429 ConfigSmmCodeAccessCheck ();\r
1430\r
1431 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1432\r
1433 //\r
1434 // Clean SMM ready to lock flag\r
1435 //\r
1436 mSmmReadyToLock = FALSE;\r
1437 }\r
1438}\r
1439\r
1440/**\r
1441 Perform the pre tasks.\r
1442\r
1443**/\r
1444VOID\r
1445PerformPreTasks (\r
1446 VOID\r
1447 )\r
1448{\r
1449 RestoreSmmConfigurationInS3 ();\r
1450}\r