]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpu: Add Shadow Stack Support for X86 SMM.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
827330cc
JW
79///\r
80/// SMM Memory Attribute Protocol instance\r
81///\r
82EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
83 EdkiiSmmGetMemoryAttributes,\r
84 EdkiiSmmSetMemoryAttributes,\r
85 EdkiiSmmClearMemoryAttributes\r
86};\r
87\r
529a5a86
MK
88EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
89\r
529a5a86
MK
90//\r
91// SMM stack information\r
92//\r
93UINTN mSmmStackArrayBase;\r
94UINTN mSmmStackArrayEnd;\r
95UINTN mSmmStackSize;\r
96\r
3eb69b08
JY
97UINTN mSmmShadowStackSize;\r
98BOOLEAN mCetSupported = TRUE;\r
99\r
529a5a86
MK
100UINTN mMaxNumberOfCpus = 1;\r
101UINTN mNumberOfCpus = 1;\r
102\r
103//\r
104// SMM ready to lock flag\r
105//\r
106BOOLEAN mSmmReadyToLock = FALSE;\r
107\r
108//\r
109// Global used to cache PCD for SMM Code Access Check enable\r
110//\r
111BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
112\r
241f9149
LD
113//\r
114// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
115//\r
116UINT64 mAddressEncMask = 0;\r
117\r
529a5a86
MK
118//\r
119// Spin lock used to serialize setting of SMM Code Access Check feature\r
120//\r
fe3a75bc 121SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 122\r
7ed6f781
JF
123//\r
124// Saved SMM ranges information\r
125//\r
126EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
127UINTN mSmmCpuSmramRangeCount;\r
128\r
51ce27fd
SZ
129UINT8 mPhysicalAddressBits;\r
130\r
351b49c1
LE
131//\r
132// Control register contents saved for SMM S3 resume state initialization.\r
133//\r
f0053e83 134UINT32 mSmmCr0;\r
351b49c1
LE
135UINT32 mSmmCr4;\r
136\r
529a5a86
MK
137/**\r
138 Initialize IDT to setup exception handlers for SMM.\r
139\r
140**/\r
141VOID\r
142InitializeSmmIdt (\r
143 VOID\r
144 )\r
145{\r
146 EFI_STATUS Status;\r
147 BOOLEAN InterruptState;\r
148 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
149\r
150 //\r
151 // There are 32 (not 255) entries in it since only processor\r
152 // generated exceptions will be handled.\r
153 //\r
154 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
155 //\r
156 // Allocate page aligned IDT, because it might be set as read only.\r
157 //\r
158 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
159 ASSERT (gcSmiIdtr.Base != 0);\r
160 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
161\r
529a5a86
MK
162 //\r
163 // Disable Interrupt and save DXE IDT table\r
164 //\r
165 InterruptState = SaveAndDisableInterrupts ();\r
166 AsmReadIdtr (&DxeIdtr);\r
167 //\r
168 // Load SMM temporary IDT table\r
169 //\r
170 AsmWriteIdtr (&gcSmiIdtr);\r
171 //\r
172 // Setup SMM default exception handlers, SMM IDT table\r
173 // will be updated and saved in gcSmiIdtr\r
174 //\r
175 Status = InitializeCpuExceptionHandlers (NULL);\r
176 ASSERT_EFI_ERROR (Status);\r
177 //\r
178 // Restore DXE IDT table and CPU interrupt\r
179 //\r
180 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
181 SetInterruptState (InterruptState);\r
182}\r
183\r
184/**\r
185 Search module name by input IP address and output it.\r
186\r
187 @param CallerIpAddress Caller instruction pointer.\r
188\r
189**/\r
190VOID\r
191DumpModuleInfoByIp (\r
192 IN UINTN CallerIpAddress\r
193 )\r
194{\r
195 UINTN Pe32Data;\r
529a5a86 196 VOID *PdbPointer;\r
529a5a86
MK
197\r
198 //\r
199 // Find Image Base\r
200 //\r
9e981317 201 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 202 if (Pe32Data != 0) {\r
b8caae19 203 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
529a5a86
MK
204 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
205 if (PdbPointer != NULL) {\r
b8caae19 206 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
207 }\r
208 }\r
209}\r
210\r
211/**\r
212 Read information from the CPU save state.\r
213\r
214 @param This EFI_SMM_CPU_PROTOCOL instance\r
215 @param Width The number of bytes to read from the CPU save state.\r
216 @param Register Specifies the CPU register to read form the save state.\r
217 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
218 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
219\r
220 @retval EFI_SUCCESS The register was read from Save State\r
221 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
222 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
223\r
224**/\r
225EFI_STATUS\r
226EFIAPI\r
227SmmReadSaveState (\r
228 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
229 IN UINTN Width,\r
230 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
231 IN UINTN CpuIndex,\r
232 OUT VOID *Buffer\r
233 )\r
234{\r
235 EFI_STATUS Status;\r
236\r
237 //\r
238 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
239 //\r
240 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
241 return EFI_INVALID_PARAMETER;\r
242 }\r
5b02be4d 243 //\r
b70ec0de
HW
244 // The SpeculationBarrier() call here is to ensure the above check for the\r
245 // CpuIndex has been completed before the execution of subsequent codes.\r
5b02be4d 246 //\r
b70ec0de 247 SpeculationBarrier ();\r
529a5a86
MK
248\r
249 //\r
250 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
251 //\r
252 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
253 //\r
254 // The pseudo-register only supports the 64-bit size specified by Width.\r
255 //\r
256 if (Width != sizeof (UINT64)) {\r
257 return EFI_INVALID_PARAMETER;\r
258 }\r
259 //\r
260 // If the processor is in SMM at the time the SMI occurred,\r
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
262 // Otherwise, EFI_NOT_FOUND is returned.\r
263 //\r
ed3d5ecb 264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
266 return EFI_SUCCESS;\r
267 } else {\r
268 return EFI_NOT_FOUND;\r
269 }\r
270 }\r
271\r
ed3d5ecb 272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
273 return EFI_INVALID_PARAMETER;\r
274 }\r
275\r
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
277 if (Status == EFI_UNSUPPORTED) {\r
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
279 }\r
280 return Status;\r
281}\r
282\r
283/**\r
284 Write data to the CPU save state.\r
285\r
286 @param This EFI_SMM_CPU_PROTOCOL instance\r
287 @param Width The number of bytes to read from the CPU save state.\r
288 @param Register Specifies the CPU register to write to the save state.\r
289 @param CpuIndex Specifies the zero-based index of the CPU save state\r
290 @param Buffer Upon entry, this holds the new CPU register value.\r
291\r
292 @retval EFI_SUCCESS The register was written from Save State\r
293 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
294 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
295\r
296**/\r
297EFI_STATUS\r
298EFIAPI\r
299SmmWriteSaveState (\r
300 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
301 IN UINTN Width,\r
302 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
303 IN UINTN CpuIndex,\r
304 IN CONST VOID *Buffer\r
305 )\r
306{\r
307 EFI_STATUS Status;\r
308\r
309 //\r
310 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
311 //\r
312 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
313 return EFI_INVALID_PARAMETER;\r
314 }\r
315\r
316 //\r
317 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
318 //\r
319 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
320 return EFI_SUCCESS;\r
321 }\r
322\r
323 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
324 return EFI_INVALID_PARAMETER;\r
325 }\r
326\r
327 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
328 if (Status == EFI_UNSUPPORTED) {\r
329 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
330 }\r
331 return Status;\r
332}\r
333\r
334\r
335/**\r
336 C function for SMI handler. To change all processor's SMMBase Register.\r
337\r
338**/\r
339VOID\r
340EFIAPI\r
341SmmInitHandler (\r
342 VOID\r
343 )\r
344{\r
345 UINT32 ApicId;\r
346 UINTN Index;\r
347\r
348 //\r
349 // Update SMM IDT entries' code segment and load IDT\r
350 //\r
351 AsmWriteIdtr (&gcSmiIdtr);\r
352 ApicId = GetApicId ();\r
353\r
bb767506 354 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
355\r
356 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
357 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
358 //\r
359 // Initialize SMM specific features on the currently executing CPU\r
360 //\r
361 SmmCpuFeaturesInitializeProcessor (\r
362 Index,\r
363 mIsBsp,\r
364 gSmmCpuPrivate->ProcessorInfo,\r
365 &mCpuHotPlugData\r
366 );\r
367\r
a46a4c90
JF
368 if (!mSmmS3Flag) {\r
369 //\r
370 // Check XD and BTS features on each processor on normal boot\r
371 //\r
51773d49 372 CheckFeatureSupported ();\r
a46a4c90
JF
373 }\r
374\r
529a5a86
MK
375 if (mIsBsp) {\r
376 //\r
377 // BSP rebase is already done above.\r
378 // Initialize private data during S3 resume\r
379 //\r
380 InitializeMpSyncData ();\r
381 }\r
382\r
383 //\r
384 // Hook return after RSM to set SMM re-based flag\r
385 //\r
386 SemaphoreHook (Index, &mRebased[Index]);\r
387\r
388 return;\r
389 }\r
390 }\r
391 ASSERT (FALSE);\r
392}\r
393\r
394/**\r
395 Relocate SmmBases for each processor.\r
396\r
397 Execute on first boot and all S3 resumes\r
398\r
399**/\r
400VOID\r
401EFIAPI\r
402SmmRelocateBases (\r
403 VOID\r
404 )\r
405{\r
406 UINT8 BakBuf[BACK_BUF_SIZE];\r
407 SMRAM_SAVE_STATE_MAP BakBuf2;\r
408 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
409 UINT8 *U8Ptr;\r
410 UINT32 ApicId;\r
411 UINTN Index;\r
412 UINTN BspIndex;\r
413\r
414 //\r
415 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
416 //\r
417 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
418\r
419 //\r
420 // Patch ASM code template with current CR0, CR3, and CR4 values\r
421 //\r
f0053e83
LE
422 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
423 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
6b0841c1 424 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1 425 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
3eb69b08 426 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r
529a5a86
MK
427\r
428 //\r
429 // Patch GDTR for SMM base relocation\r
430 //\r
431 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
432 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
433\r
434 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
435 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
436\r
437 //\r
438 // Backup original contents at address 0x38000\r
439 //\r
440 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
441 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
442\r
443 //\r
444 // Load image for relocation\r
445 //\r
446 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
447\r
448 //\r
449 // Retrieve the local APIC ID of current processor\r
450 //\r
451 ApicId = GetApicId ();\r
452\r
453 //\r
454 // Relocate SM bases for all APs\r
455 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
456 //\r
457 mIsBsp = FALSE;\r
458 BspIndex = (UINTN)-1;\r
459 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
460 mRebased[Index] = FALSE;\r
461 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
462 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
463 //\r
464 // Wait for this AP to finish its 1st SMI\r
465 //\r
466 while (!mRebased[Index]);\r
467 } else {\r
468 //\r
469 // BSP will be Relocated later\r
470 //\r
471 BspIndex = Index;\r
472 }\r
473 }\r
474\r
475 //\r
476 // Relocate BSP's SMM base\r
477 //\r
478 ASSERT (BspIndex != (UINTN)-1);\r
479 mIsBsp = TRUE;\r
480 SendSmiIpi (ApicId);\r
481 //\r
482 // Wait for the BSP to finish its 1st SMI\r
483 //\r
484 while (!mRebased[BspIndex]);\r
485\r
486 //\r
487 // Restore contents at address 0x38000\r
488 //\r
489 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
490 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
491}\r
492\r
529a5a86
MK
493/**\r
494 SMM Ready To Lock event notification handler.\r
495\r
496 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
497 perform additional lock actions that must be performed from SMM on the next SMI.\r
498\r
499 @param[in] Protocol Points to the protocol's unique identifier.\r
500 @param[in] Interface Points to the interface instance.\r
501 @param[in] Handle The handle on which the interface was installed.\r
502\r
503 @retval EFI_SUCCESS Notification handler runs successfully.\r
504 **/\r
505EFI_STATUS\r
506EFIAPI\r
507SmmReadyToLockEventNotify (\r
508 IN CONST EFI_GUID *Protocol,\r
509 IN VOID *Interface,\r
510 IN EFI_HANDLE Handle\r
511 )\r
512{\r
0bdc9e75 513 GetAcpiCpuData ();\r
529a5a86 514\r
d2fc7711
JY
515 //\r
516 // Cache a copy of UEFI memory map before we start profiling feature.\r
517 //\r
518 GetUefiMemoryMap ();\r
519\r
529a5a86
MK
520 //\r
521 // Set SMM ready to lock flag and return\r
522 //\r
523 mSmmReadyToLock = TRUE;\r
524 return EFI_SUCCESS;\r
525}\r
526\r
527/**\r
528 The module Entry Point of the CPU SMM driver.\r
529\r
530 @param ImageHandle The firmware allocated handle for the EFI image.\r
531 @param SystemTable A pointer to the EFI System Table.\r
532\r
533 @retval EFI_SUCCESS The entry point is executed successfully.\r
534 @retval Other Some error occurs when executing this entry point.\r
535\r
536**/\r
537EFI_STATUS\r
538EFIAPI\r
539PiCpuSmmEntry (\r
540 IN EFI_HANDLE ImageHandle,\r
541 IN EFI_SYSTEM_TABLE *SystemTable\r
542 )\r
543{\r
544 EFI_STATUS Status;\r
545 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
546 UINTN NumberOfEnabledProcessors;\r
547 UINTN Index;\r
548 VOID *Buffer;\r
ae82a30b
JY
549 UINTN BufferPages;\r
550 UINTN TileCodeSize;\r
551 UINTN TileDataSize;\r
529a5a86 552 UINTN TileSize;\r
529a5a86
MK
553 UINT8 *Stacks;\r
554 VOID *Registration;\r
555 UINT32 RegEax;\r
3eb69b08
JY
556 UINT32 RegEbx;\r
557 UINT32 RegEcx;\r
529a5a86
MK
558 UINT32 RegEdx;\r
559 UINTN FamilyId;\r
560 UINTN ModelId;\r
561 UINT32 Cr3;\r
562\r
e21e355e
LG
563 //\r
564 // Initialize address fixup\r
565 //\r
566 PiSmmCpuSmmInitFixupAddress ();\r
567 PiSmmCpuSmiEntryFixupAddress ();\r
568\r
529a5a86
MK
569 //\r
570 // Initialize Debug Agent to support source level debug in SMM code\r
571 //\r
572 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
573\r
574 //\r
575 // Report the start of CPU SMM initialization.\r
576 //\r
577 REPORT_STATUS_CODE (\r
578 EFI_PROGRESS_CODE,\r
579 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
580 );\r
581\r
529a5a86
MK
582 //\r
583 // Find out SMRR Base and SMRR Size\r
584 //\r
585 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
586\r
587 //\r
588 // Get MP Services Protocol\r
589 //\r
590 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
591 ASSERT_EFI_ERROR (Status);\r
592\r
593 //\r
594 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
595 //\r
596 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
597 ASSERT_EFI_ERROR (Status);\r
598 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
599\r
600 //\r
601 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
602 // A constant BSP index makes no sense because it may be hot removed.\r
603 //\r
604 DEBUG_CODE (\r
605 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
606\r
607 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
608 }\r
609 );\r
610\r
611 //\r
612 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
613 //\r
614 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
615 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
616\r
241f9149
LD
617 //\r
618 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
619 // Make sure AddressEncMask is contained to smallest supported address field.\r
620 //\r
621 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
622 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
623\r
529a5a86
MK
624 //\r
625 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
626 //\r
627 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
628 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
629 } else {\r
630 mMaxNumberOfCpus = mNumberOfCpus;\r
631 }\r
632 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
633\r
634 //\r
635 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
636 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
637 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
638 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
639 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
640 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
641 // by adding the // CPU save state size, any extra CPU specific context, and\r
642 // the size of code that must be placed at the SMI entry point to transfer\r
643 // control to a C function in the native SMM execution mode. This size is\r
644 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
645 // The total amount of memory required is the maximum number of CPUs that\r
646 // platform supports times the tile size. The picture below shows the tiling,\r
647 // where m is the number of tiles that fit in 32KB.\r
648 //\r
649 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
650 // | CPU m+1 Save State |\r
651 // +-----------------------------+\r
652 // | CPU m+1 Extra Data |\r
653 // +-----------------------------+\r
654 // | Padding |\r
655 // +-----------------------------+\r
656 // | CPU 2m SMI Entry |\r
657 // +#############################+ <-- Base of allocated buffer + 64 KB\r
658 // | CPU m-1 Save State |\r
659 // +-----------------------------+\r
660 // | CPU m-1 Extra Data |\r
661 // +-----------------------------+\r
662 // | Padding |\r
663 // +-----------------------------+\r
664 // | CPU 2m-1 SMI Entry |\r
665 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
666 // | . . . . . . . . . . . . |\r
667 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
668 // | CPU 2 Save State |\r
669 // +-----------------------------+\r
670 // | CPU 2 Extra Data |\r
671 // +-----------------------------+\r
672 // | Padding |\r
673 // +-----------------------------+\r
674 // | CPU m+1 SMI Entry |\r
675 // +=============================+ <-- Base of allocated buffer + 32 KB\r
676 // | CPU 1 Save State |\r
677 // +-----------------------------+\r
678 // | CPU 1 Extra Data |\r
679 // +-----------------------------+\r
680 // | Padding |\r
681 // +-----------------------------+\r
682 // | CPU m SMI Entry |\r
683 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
684 // | CPU 0 Save State |\r
685 // +-----------------------------+\r
686 // | CPU 0 Extra Data |\r
687 // +-----------------------------+\r
688 // | Padding |\r
689 // +-----------------------------+\r
690 // | CPU m-1 SMI Entry |\r
691 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
692 // | . . . . . . . . . . . . |\r
693 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
694 // | Padding |\r
695 // +-----------------------------+\r
696 // | CPU 1 SMI Entry |\r
697 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
698 // | Padding |\r
699 // +-----------------------------+\r
700 // | CPU 0 SMI Entry |\r
701 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
702 //\r
703\r
704 //\r
705 // Retrieve CPU Family\r
706 //\r
e9b3a6c9 707 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
708 FamilyId = (RegEax >> 8) & 0xf;\r
709 ModelId = (RegEax >> 4) & 0xf;\r
710 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
711 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
712 }\r
713\r
e9b3a6c9
MK
714 RegEdx = 0;\r
715 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
716 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
717 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
718 }\r
529a5a86
MK
719 //\r
720 // Determine the mode of the CPU at the time an SMI occurs\r
721 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
722 // Volume 3C, Section 34.4.1.1\r
723 //\r
724 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
725 if ((RegEdx & BIT29) != 0) {\r
726 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
727 }\r
728 if (FamilyId == 0x06) {\r
729 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
730 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
731 }\r
732 }\r
733\r
3eb69b08
JY
734 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r
735 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r
736 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
737 if (RegEax > CPUID_EXTENDED_FUNCTION) {\r
738 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r
739 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r
740 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r
741 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r
742 if ((RegEcx & CPUID_CET_SS) == 0) {\r
743 mCetSupported = FALSE;\r
744 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
745 }\r
746 if (mCetSupported) {\r
747 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r
748 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r
749 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r
750 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
751 AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r
752 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r
753 }\r
754 }\r
755 } else {\r
756 mCetSupported = FALSE;\r
757 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
758 }\r
759\r
529a5a86
MK
760 //\r
761 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
762 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
763 // This size is rounded up to nearest power of 2.\r
529a5a86 764 //\r
ae82a30b
JY
765 TileCodeSize = GetSmiHandlerSize ();\r
766 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 767 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
768 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
769 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 770 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 771 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
772\r
773 //\r
f12367a0
MK
774 // If the TileSize is larger than space available for the SMI Handler of\r
775 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
776 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
777 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
778 // context must be reduced.\r
529a5a86
MK
779 //\r
780 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
781\r
782 //\r
783 // Allocate buffer for all of the tiles.\r
784 //\r
785 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
786 // Volume 3C, Section 34.11 SMBASE Relocation\r
787 // For Pentium and Intel486 processors, the SMBASE values must be\r
788 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
789 // state during the execution of a RSM instruction.\r
790 //\r
791 // Intel486 processors: FamilyId is 4\r
792 // Pentium processors : FamilyId is 5\r
793 //\r
ae82a30b 794 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 795 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 796 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 797 } else {\r
717fb604 798 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
799 }\r
800 ASSERT (Buffer != NULL);\r
ae82a30b 801 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
802\r
803 //\r
804 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
805 //\r
806 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
807 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
808\r
809 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
810 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
811\r
812 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
813 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
814\r
815 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
816 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
817\r
818 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
819 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
820\r
821 //\r
822 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
823 //\r
824 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
825 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
826 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
827 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
828 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
829\r
830 //\r
831 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
832 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
833 // size for each CPU in the platform\r
834 //\r
835 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
836 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
837 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
838 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
839 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
840\r
841 if (Index < mNumberOfCpus) {\r
842 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
843 ASSERT_EFI_ERROR (Status);\r
844 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
845\r
846 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
847 Index,\r
848 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
849 mCpuHotPlugData.SmBase[Index],\r
850 gSmmCpuPrivate->CpuSaveState[Index],\r
851 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
852 ));\r
853 } else {\r
854 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
855 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
856 }\r
857 }\r
858\r
859 //\r
860 // Allocate SMI stacks for all processors.\r
861 //\r
3eb69b08 862 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r
529a5a86
MK
863 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
864 //\r
865 // 2 more pages is allocated for each processor.\r
866 // one is guard page and the other is known good stack.\r
867 //\r
868 // +-------------------------------------------+-----+-------------------------------------------+\r
869 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
870 // +-------------------------------------------+-----+-------------------------------------------+\r
871 // | | | |\r
872 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
873 //\r
3eb69b08
JY
874 mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r
875 }\r
876\r
877 mSmmShadowStackSize = 0;\r
878 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
879 //\r
880 // Append Shadow Stack after normal stack\r
881 //\r
882 // |= Stacks\r
883 // +--------------------------------------------------+---------------------------------------------------------------+\r
884 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r
885 // +--------------------------------------------------+---------------------------------------------------------------+\r
886 // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|\r
887 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r
888 // | |\r
889 // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r
890 //\r
891 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r
892 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
893 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r
894 }\r
895 }\r
896\r
897 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r
898 ASSERT (Stacks != NULL);\r
899 mSmmStackArrayBase = (UINTN)Stacks;\r
900 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r
901\r
902 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r
903 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r
904 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r
905 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
906 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r
529a5a86
MK
907 }\r
908\r
909 //\r
910 // Set SMI stack for SMM base relocation\r
911 //\r
5830d2c3
LE
912 PatchInstructionX86 (\r
913 gPatchSmmInitStack,\r
914 (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),\r
915 sizeof (UINTN)\r
916 );\r
529a5a86
MK
917\r
918 //\r
919 // Initialize IDT\r
920 //\r
921 InitializeSmmIdt ();\r
922\r
923 //\r
924 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
925 //\r
926 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
927 ASSERT (mRebased != NULL);\r
928 SmmRelocateBases ();\r
929\r
930 //\r
931 // Call hook for BSP to perform extra actions in normal mode after all\r
932 // SMM base addresses have been relocated on all CPUs\r
933 //\r
934 SmmCpuFeaturesSmmRelocationComplete ();\r
935\r
717fb604
JY
936 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
937\r
529a5a86
MK
938 //\r
939 // SMM Time initialization\r
940 //\r
941 InitializeSmmTimer ();\r
942\r
943 //\r
944 // Initialize MP globals\r
945 //\r
3eb69b08
JY
946 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r
947\r
948 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
949 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
950 SetShadowStack (\r
951 Cr3,\r
952 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
953 mSmmShadowStackSize\r
954 );\r
955 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
956 SetNotPresentPage (\r
957 Cr3,\r
958 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r
959 EFI_PAGES_TO_SIZE(1)\r
960 );\r
961 }\r
962 }\r
963 }\r
529a5a86
MK
964\r
965 //\r
966 // Fill in SMM Reserved Regions\r
967 //\r
968 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
969 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
970\r
971 //\r
972 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
973 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
974 // to an SMRAM address will be present in the handle database\r
975 //\r
976 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
977 &gSmmCpuPrivate->SmmCpuHandle,\r
978 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
979 NULL\r
980 );\r
981 ASSERT_EFI_ERROR (Status);\r
982\r
983 //\r
984 // Install the SMM CPU Protocol into SMM protocol database\r
985 //\r
986 Status = gSmst->SmmInstallProtocolInterface (\r
987 &mSmmCpuHandle,\r
988 &gEfiSmmCpuProtocolGuid,\r
989 EFI_NATIVE_INTERFACE,\r
990 &mSmmCpu\r
991 );\r
992 ASSERT_EFI_ERROR (Status);\r
993\r
827330cc
JW
994 //\r
995 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
996 //\r
997 Status = gSmst->SmmInstallProtocolInterface (\r
998 &mSmmCpuHandle,\r
999 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
1000 EFI_NATIVE_INTERFACE,\r
1001 &mSmmMemoryAttribute\r
1002 );\r
1003 ASSERT_EFI_ERROR (Status);\r
1004\r
529a5a86
MK
1005 //\r
1006 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1007 //\r
1008 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1009 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1010 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1011 }\r
1012\r
1013 //\r
1014 // Initialize SMM CPU Services Support\r
1015 //\r
1016 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1017 ASSERT_EFI_ERROR (Status);\r
1018\r
529a5a86
MK
1019 //\r
1020 // register SMM Ready To Lock Protocol notification\r
1021 //\r
1022 Status = gSmst->SmmRegisterProtocolNotify (\r
1023 &gEfiSmmReadyToLockProtocolGuid,\r
1024 SmmReadyToLockEventNotify,\r
1025 &Registration\r
1026 );\r
1027 ASSERT_EFI_ERROR (Status);\r
1028\r
529a5a86
MK
1029 //\r
1030 // Initialize SMM Profile feature\r
1031 //\r
1032 InitSmmProfile (Cr3);\r
1033\r
b10d5ddc 1034 GetAcpiS3EnableFlag ();\r
0bdc9e75 1035 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
1036\r
1037 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1038\r
1039 return EFI_SUCCESS;\r
1040}\r
1041\r
1042/**\r
1043\r
1044 Find out SMRAM information including SMRR base and SMRR size.\r
1045\r
1046 @param SmrrBase SMRR base\r
1047 @param SmrrSize SMRR size\r
1048\r
1049**/\r
1050VOID\r
1051FindSmramInfo (\r
1052 OUT UINT32 *SmrrBase,\r
1053 OUT UINT32 *SmrrSize\r
1054 )\r
1055{\r
1056 EFI_STATUS Status;\r
1057 UINTN Size;\r
1058 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1059 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
1060 UINTN Index;\r
1061 UINT64 MaxSize;\r
1062 BOOLEAN Found;\r
1063\r
1064 //\r
1065 // Get SMM Access Protocol\r
1066 //\r
1067 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1068 ASSERT_EFI_ERROR (Status);\r
1069\r
1070 //\r
1071 // Get SMRAM information\r
1072 //\r
1073 Size = 0;\r
1074 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1075 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1076\r
7ed6f781
JF
1077 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1078 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 1079\r
7ed6f781 1080 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1081 ASSERT_EFI_ERROR (Status);\r
1082\r
7ed6f781 1083 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1084\r
1085 //\r
1086 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1087 //\r
1088 CurrentSmramRange = NULL;\r
7ed6f781 1089 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1090 //\r
1091 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1092 //\r
7ed6f781 1093 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1094 continue;\r
1095 }\r
1096\r
7ed6f781
JF
1097 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1098 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1099 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1100 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1101 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1102 }\r
1103 }\r
1104 }\r
1105 }\r
1106\r
1107 ASSERT (CurrentSmramRange != NULL);\r
1108\r
1109 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1110 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1111\r
1112 do {\r
1113 Found = FALSE;\r
7ed6f781
JF
1114 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1115 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1116 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1117 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1118 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1119 Found = TRUE;\r
7ed6f781
JF
1120 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1121 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1122 Found = TRUE;\r
1123 }\r
1124 }\r
1125 } while (Found);\r
1126\r
1127 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1128}\r
1129\r
1130/**\r
1131Configure SMM Code Access Check feature on an AP.\r
1132SMM Feature Control MSR will be locked after configuration.\r
1133\r
1134@param[in,out] Buffer Pointer to private data buffer.\r
1135**/\r
1136VOID\r
1137EFIAPI\r
1138ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1139 IN OUT VOID *Buffer\r
1140 )\r
1141{\r
1142 UINTN CpuIndex;\r
1143 UINT64 SmmFeatureControlMsr;\r
1144 UINT64 NewSmmFeatureControlMsr;\r
1145\r
1146 //\r
1147 // Retrieve the CPU Index from the context passed in\r
1148 //\r
1149 CpuIndex = *(UINTN *)Buffer;\r
1150\r
1151 //\r
1152 // Get the current SMM Feature Control MSR value\r
1153 //\r
1154 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1155\r
1156 //\r
1157 // Compute the new SMM Feature Control MSR value\r
1158 //\r
1159 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1160 if (mSmmCodeAccessCheckEnable) {\r
1161 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1162 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1163 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1164 }\r
529a5a86
MK
1165 }\r
1166\r
1167 //\r
1168 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1169 //\r
1170 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1171 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1172 }\r
1173\r
1174 //\r
1175 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1176 //\r
fe3a75bc 1177 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1178}\r
1179\r
1180/**\r
1181Configure SMM Code Access Check feature for all processors.\r
1182SMM Feature Control MSR will be locked after configuration.\r
1183**/\r
1184VOID\r
1185ConfigSmmCodeAccessCheck (\r
1186 VOID\r
1187 )\r
1188{\r
1189 UINTN Index;\r
1190 EFI_STATUS Status;\r
1191\r
1192 //\r
1193 // Check to see if the Feature Control MSR is supported on this CPU\r
1194 //\r
f6b0cb17 1195 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1196 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1197 mSmmCodeAccessCheckEnable = FALSE;\r
1198 return;\r
1199 }\r
1200\r
1201 //\r
1202 // Check to see if the CPU supports the SMM Code Access Check feature\r
1203 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1204 //\r
1205 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1206 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1207 return;\r
1208 }\r
1209\r
1210 //\r
1211 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1212 //\r
fe3a75bc 1213 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1214\r
1215 //\r
1216 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1217 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1218 //\r
fe3a75bc 1219 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1220\r
1221 //\r
1222 // Enable SMM Code Access Check feature on the BSP.\r
1223 //\r
1224 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1225\r
1226 //\r
1227 // Enable SMM Code Access Check feature for the APs.\r
1228 //\r
1229 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1230 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1231 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1232 //\r
1233 // If this processor does not exist\r
1234 //\r
1235 continue;\r
1236 }\r
529a5a86
MK
1237 //\r
1238 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1239 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1240 //\r
fe3a75bc 1241 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1242\r
1243 //\r
1244 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1245 //\r
1246 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1247 ASSERT_EFI_ERROR (Status);\r
1248\r
1249 //\r
1250 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1251 //\r
fe3a75bc 1252 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1253 CpuPause ();\r
1254 }\r
1255\r
1256 //\r
1257 // Release the Config SMM Code Access Check spin lock.\r
1258 //\r
fe3a75bc 1259 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1260 }\r
1261 }\r
1262}\r
1263\r
21c17193
JY
1264/**\r
1265 This API provides a way to allocate memory for page table.\r
1266\r
1267 This API can be called more once to allocate memory for page tables.\r
1268\r
1269 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1270 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1271 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1272 returned.\r
1273\r
1274 @param Pages The number of 4 KB pages to allocate.\r
1275\r
1276 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1277\r
1278**/\r
1279VOID *\r
1280AllocatePageTableMemory (\r
1281 IN UINTN Pages\r
1282 )\r
1283{\r
1284 VOID *Buffer;\r
1285\r
1286 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1287 if (Buffer != NULL) {\r
1288 return Buffer;\r
1289 }\r
1290 return AllocatePages (Pages);\r
1291}\r
1292\r
717fb604
JY
1293/**\r
1294 Allocate pages for code.\r
1295\r
1296 @param[in] Pages Number of pages to be allocated.\r
1297\r
1298 @return Allocated memory.\r
1299**/\r
1300VOID *\r
1301AllocateCodePages (\r
1302 IN UINTN Pages\r
1303 )\r
1304{\r
1305 EFI_STATUS Status;\r
1306 EFI_PHYSICAL_ADDRESS Memory;\r
1307\r
1308 if (Pages == 0) {\r
1309 return NULL;\r
1310 }\r
1311\r
1312 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1313 if (EFI_ERROR (Status)) {\r
1314 return NULL;\r
1315 }\r
1316 return (VOID *) (UINTN) Memory;\r
1317}\r
1318\r
1319/**\r
1320 Allocate aligned pages for code.\r
1321\r
1322 @param[in] Pages Number of pages to be allocated.\r
1323 @param[in] Alignment The requested alignment of the allocation.\r
1324 Must be a power of two.\r
1325 If Alignment is zero, then byte alignment is used.\r
1326\r
1327 @return Allocated memory.\r
1328**/\r
1329VOID *\r
1330AllocateAlignedCodePages (\r
1331 IN UINTN Pages,\r
1332 IN UINTN Alignment\r
1333 )\r
1334{\r
1335 EFI_STATUS Status;\r
1336 EFI_PHYSICAL_ADDRESS Memory;\r
1337 UINTN AlignedMemory;\r
1338 UINTN AlignmentMask;\r
1339 UINTN UnalignedPages;\r
1340 UINTN RealPages;\r
1341\r
1342 //\r
1343 // Alignment must be a power of two or zero.\r
1344 //\r
1345 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1346\r
1347 if (Pages == 0) {\r
1348 return NULL;\r
1349 }\r
1350 if (Alignment > EFI_PAGE_SIZE) {\r
1351 //\r
1352 // Calculate the total number of pages since alignment is larger than page size.\r
1353 //\r
1354 AlignmentMask = Alignment - 1;\r
1355 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1356 //\r
1357 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1358 //\r
1359 ASSERT (RealPages > Pages);\r
1360\r
1361 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1362 if (EFI_ERROR (Status)) {\r
1363 return NULL;\r
1364 }\r
1365 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1366 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1367 if (UnalignedPages > 0) {\r
1368 //\r
1369 // Free first unaligned page(s).\r
1370 //\r
1371 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1372 ASSERT_EFI_ERROR (Status);\r
1373 }\r
8491e302 1374 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1375 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1376 if (UnalignedPages > 0) {\r
1377 //\r
1378 // Free last unaligned page(s).\r
1379 //\r
1380 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1381 ASSERT_EFI_ERROR (Status);\r
1382 }\r
1383 } else {\r
1384 //\r
1385 // Do not over-allocate pages in this case.\r
1386 //\r
1387 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1388 if (EFI_ERROR (Status)) {\r
1389 return NULL;\r
1390 }\r
1391 AlignedMemory = (UINTN) Memory;\r
1392 }\r
1393 return (VOID *) AlignedMemory;\r
1394}\r
1395\r
529a5a86
MK
1396/**\r
1397 Perform the remaining tasks.\r
1398\r
1399**/\r
1400VOID\r
1401PerformRemainingTasks (\r
1402 VOID\r
1403 )\r
1404{\r
1405 if (mSmmReadyToLock) {\r
1406 //\r
1407 // Start SMM Profile feature\r
1408 //\r
1409 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1410 SmmProfileStart ();\r
1411 }\r
1412 //\r
1413 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1414 //\r
1415 InitPaging ();\r
717fb604
JY
1416\r
1417 //\r
1418 // Mark critical region to be read-only in page table\r
1419 //\r
d2fc7711
JY
1420 SetMemMapAttributes ();\r
1421\r
1422 //\r
1423 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1424 //\r
1425 SetUefiMemMapAttributes ();\r
717fb604
JY
1426\r
1427 //\r
1428 // Set page table itself to be read-only\r
1429 //\r
1430 SetPageTableAttributes ();\r
1431\r
529a5a86
MK
1432 //\r
1433 // Configure SMM Code Access Check feature if available.\r
1434 //\r
1435 ConfigSmmCodeAccessCheck ();\r
1436\r
21c17193
JY
1437 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1438\r
529a5a86
MK
1439 //\r
1440 // Clean SMM ready to lock flag\r
1441 //\r
1442 mSmmReadyToLock = FALSE;\r
1443 }\r
1444}\r
9f419739
JY
1445\r
1446/**\r
1447 Perform the pre tasks.\r
1448\r
1449**/\r
1450VOID\r
1451PerformPreTasks (\r
1452 VOID\r
1453 )\r
1454{\r
0bdc9e75 1455 RestoreSmmConfigurationInS3 ();\r
9f419739 1456}\r