]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/CpuS3DataDxe: Consume PcdAcpiS3Enable to control the code
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
19// along its supporting fields.\r
20//\r
21SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
23 NULL, // SmmCpuHandle\r
24 NULL, // Pointer to ProcessorInfo array\r
25 NULL, // Pointer to Operation array\r
26 NULL, // Pointer to CpuSaveStateSize array\r
27 NULL, // Pointer to CpuSaveState array\r
28 { {0} }, // SmmReservedSmramRegion\r
29 {\r
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
32 0, // SmmCoreEntryContext.NumberOfCpus\r
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
34 NULL // SmmCoreEntryContext.CpuSaveState\r
35 },\r
36 NULL, // SmmCoreEntry\r
37 {\r
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
40 },\r
41};\r
42\r
43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
77EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
78\r
529a5a86
MK
79//\r
80// SMM stack information\r
81//\r
82UINTN mSmmStackArrayBase;\r
83UINTN mSmmStackArrayEnd;\r
84UINTN mSmmStackSize;\r
85\r
86//\r
87// Pointer to structure used during S3 Resume\r
88//\r
89SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
90\r
91UINTN mMaxNumberOfCpus = 1;\r
92UINTN mNumberOfCpus = 1;\r
93\r
94//\r
95// SMM ready to lock flag\r
96//\r
97BOOLEAN mSmmReadyToLock = FALSE;\r
98\r
70a7493d
JF
99//\r
100// S3 boot flag\r
101//\r
102BOOLEAN mSmmS3Flag = FALSE;\r
103\r
529a5a86
MK
104//\r
105// Global used to cache PCD for SMM Code Access Check enable\r
106//\r
107BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
108\r
109//\r
110// Spin lock used to serialize setting of SMM Code Access Check feature\r
111//\r
fe3a75bc 112SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86
MK
113\r
114/**\r
115 Initialize IDT to setup exception handlers for SMM.\r
116\r
117**/\r
118VOID\r
119InitializeSmmIdt (\r
120 VOID\r
121 )\r
122{\r
123 EFI_STATUS Status;\r
124 BOOLEAN InterruptState;\r
125 IA32_DESCRIPTOR DxeIdtr;\r
126 //\r
127 // Disable Interrupt and save DXE IDT table\r
128 //\r
129 InterruptState = SaveAndDisableInterrupts ();\r
130 AsmReadIdtr (&DxeIdtr);\r
131 //\r
132 // Load SMM temporary IDT table\r
133 //\r
134 AsmWriteIdtr (&gcSmiIdtr);\r
135 //\r
136 // Setup SMM default exception handlers, SMM IDT table\r
137 // will be updated and saved in gcSmiIdtr\r
138 //\r
139 Status = InitializeCpuExceptionHandlers (NULL);\r
140 ASSERT_EFI_ERROR (Status);\r
141 //\r
142 // Restore DXE IDT table and CPU interrupt\r
143 //\r
144 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
145 SetInterruptState (InterruptState);\r
146}\r
147\r
148/**\r
149 Search module name by input IP address and output it.\r
150\r
151 @param CallerIpAddress Caller instruction pointer.\r
152\r
153**/\r
154VOID\r
155DumpModuleInfoByIp (\r
156 IN UINTN CallerIpAddress\r
157 )\r
158{\r
159 UINTN Pe32Data;\r
160 EFI_IMAGE_DOS_HEADER *DosHdr;\r
161 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
162 VOID *PdbPointer;\r
163 UINT64 DumpIpAddress;\r
164\r
165 //\r
166 // Find Image Base\r
167 //\r
168 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
169 while (Pe32Data != 0) {\r
170 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
171 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
172 //\r
173 // DOS image header is present, so read the PE header after the DOS image header.\r
174 //\r
175 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
176 //\r
177 // Make sure PE header address does not overflow and is less than the initial address.\r
178 //\r
179 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
180 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
181 //\r
182 // It's PE image.\r
183 //\r
184 break;\r
185 }\r
186 }\r
187 }\r
188\r
189 //\r
190 // Not found the image base, check the previous aligned address\r
191 //\r
192 Pe32Data -= SIZE_4KB;\r
193 }\r
194\r
195 DumpIpAddress = CallerIpAddress;\r
196 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
197\r
198 if (Pe32Data != 0) {\r
199 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
200 if (PdbPointer != NULL) {\r
201 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
202 }\r
203 }\r
204}\r
205\r
206/**\r
207 Read information from the CPU save state.\r
208\r
209 @param This EFI_SMM_CPU_PROTOCOL instance\r
210 @param Width The number of bytes to read from the CPU save state.\r
211 @param Register Specifies the CPU register to read form the save state.\r
212 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
213 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
214\r
215 @retval EFI_SUCCESS The register was read from Save State\r
216 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
217 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
218\r
219**/\r
220EFI_STATUS\r
221EFIAPI\r
222SmmReadSaveState (\r
223 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
224 IN UINTN Width,\r
225 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
226 IN UINTN CpuIndex,\r
227 OUT VOID *Buffer\r
228 )\r
229{\r
230 EFI_STATUS Status;\r
231\r
232 //\r
233 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
234 //\r
235 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
236 return EFI_INVALID_PARAMETER;\r
237 }\r
238\r
239 //\r
240 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
241 //\r
242 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
243 //\r
244 // The pseudo-register only supports the 64-bit size specified by Width.\r
245 //\r
246 if (Width != sizeof (UINT64)) {\r
247 return EFI_INVALID_PARAMETER;\r
248 }\r
249 //\r
250 // If the processor is in SMM at the time the SMI occurred,\r
251 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
252 // Otherwise, EFI_NOT_FOUND is returned.\r
253 //\r
ed3d5ecb 254 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
255 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
256 return EFI_SUCCESS;\r
257 } else {\r
258 return EFI_NOT_FOUND;\r
259 }\r
260 }\r
261\r
ed3d5ecb 262 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
263 return EFI_INVALID_PARAMETER;\r
264 }\r
265\r
266 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
267 if (Status == EFI_UNSUPPORTED) {\r
268 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
269 }\r
270 return Status;\r
271}\r
272\r
273/**\r
274 Write data to the CPU save state.\r
275\r
276 @param This EFI_SMM_CPU_PROTOCOL instance\r
277 @param Width The number of bytes to read from the CPU save state.\r
278 @param Register Specifies the CPU register to write to the save state.\r
279 @param CpuIndex Specifies the zero-based index of the CPU save state\r
280 @param Buffer Upon entry, this holds the new CPU register value.\r
281\r
282 @retval EFI_SUCCESS The register was written from Save State\r
283 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
284 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
285\r
286**/\r
287EFI_STATUS\r
288EFIAPI\r
289SmmWriteSaveState (\r
290 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
291 IN UINTN Width,\r
292 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
293 IN UINTN CpuIndex,\r
294 IN CONST VOID *Buffer\r
295 )\r
296{\r
297 EFI_STATUS Status;\r
298\r
299 //\r
300 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
301 //\r
302 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
303 return EFI_INVALID_PARAMETER;\r
304 }\r
305\r
306 //\r
307 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
308 //\r
309 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
310 return EFI_SUCCESS;\r
311 }\r
312\r
313 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
314 return EFI_INVALID_PARAMETER;\r
315 }\r
316\r
317 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
318 if (Status == EFI_UNSUPPORTED) {\r
319 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
320 }\r
321 return Status;\r
322}\r
323\r
324\r
325/**\r
326 C function for SMI handler. To change all processor's SMMBase Register.\r
327\r
328**/\r
329VOID\r
330EFIAPI\r
331SmmInitHandler (\r
332 VOID\r
333 )\r
334{\r
335 UINT32 ApicId;\r
336 UINTN Index;\r
337\r
338 //\r
339 // Update SMM IDT entries' code segment and load IDT\r
340 //\r
341 AsmWriteIdtr (&gcSmiIdtr);\r
342 ApicId = GetApicId ();\r
343\r
344 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
345\r
346 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
347 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
348 //\r
349 // Initialize SMM specific features on the currently executing CPU\r
350 //\r
351 SmmCpuFeaturesInitializeProcessor (\r
352 Index,\r
353 mIsBsp,\r
354 gSmmCpuPrivate->ProcessorInfo,\r
355 &mCpuHotPlugData\r
356 );\r
357\r
a46a4c90
JF
358 if (!mSmmS3Flag) {\r
359 //\r
360 // Check XD and BTS features on each processor on normal boot\r
361 //\r
51773d49 362 CheckFeatureSupported ();\r
a46a4c90
JF
363 }\r
364\r
529a5a86
MK
365 if (mIsBsp) {\r
366 //\r
367 // BSP rebase is already done above.\r
368 // Initialize private data during S3 resume\r
369 //\r
370 InitializeMpSyncData ();\r
371 }\r
372\r
373 //\r
374 // Hook return after RSM to set SMM re-based flag\r
375 //\r
376 SemaphoreHook (Index, &mRebased[Index]);\r
377\r
378 return;\r
379 }\r
380 }\r
381 ASSERT (FALSE);\r
382}\r
383\r
384/**\r
385 Relocate SmmBases for each processor.\r
386\r
387 Execute on first boot and all S3 resumes\r
388\r
389**/\r
390VOID\r
391EFIAPI\r
392SmmRelocateBases (\r
393 VOID\r
394 )\r
395{\r
396 UINT8 BakBuf[BACK_BUF_SIZE];\r
397 SMRAM_SAVE_STATE_MAP BakBuf2;\r
398 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
399 UINT8 *U8Ptr;\r
400 UINT32 ApicId;\r
401 UINTN Index;\r
402 UINTN BspIndex;\r
403\r
404 //\r
405 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
406 //\r
407 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
408\r
409 //\r
410 // Patch ASM code template with current CR0, CR3, and CR4 values\r
411 //\r
412 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
413 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
414 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
415\r
416 //\r
417 // Patch GDTR for SMM base relocation\r
418 //\r
419 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
420 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
421\r
422 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
423 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
424\r
425 //\r
426 // Backup original contents at address 0x38000\r
427 //\r
428 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
429 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
430\r
431 //\r
432 // Load image for relocation\r
433 //\r
434 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
435\r
436 //\r
437 // Retrieve the local APIC ID of current processor\r
438 //\r
439 ApicId = GetApicId ();\r
440\r
441 //\r
442 // Relocate SM bases for all APs\r
443 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
444 //\r
445 mIsBsp = FALSE;\r
446 BspIndex = (UINTN)-1;\r
447 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
448 mRebased[Index] = FALSE;\r
449 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
450 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
451 //\r
452 // Wait for this AP to finish its 1st SMI\r
453 //\r
454 while (!mRebased[Index]);\r
455 } else {\r
456 //\r
457 // BSP will be Relocated later\r
458 //\r
459 BspIndex = Index;\r
460 }\r
461 }\r
462\r
463 //\r
464 // Relocate BSP's SMM base\r
465 //\r
466 ASSERT (BspIndex != (UINTN)-1);\r
467 mIsBsp = TRUE;\r
468 SendSmiIpi (ApicId);\r
469 //\r
470 // Wait for the BSP to finish its 1st SMI\r
471 //\r
472 while (!mRebased[BspIndex]);\r
473\r
474 //\r
475 // Restore contents at address 0x38000\r
476 //\r
477 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
478 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
479}\r
480\r
481/**\r
482 Perform SMM initialization for all processors in the S3 boot path.\r
483\r
484 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
485**/\r
486VOID\r
487EFIAPI\r
488SmmRestoreCpu (\r
489 VOID\r
490 )\r
491{\r
492 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
493 IA32_DESCRIPTOR Ia32Idtr;\r
494 IA32_DESCRIPTOR X64Idtr;\r
495 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
496 EFI_STATUS Status;\r
497\r
498 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
499\r
70a7493d
JF
500 mSmmS3Flag = TRUE;\r
501\r
6c4c15fa
JF
502 InitializeSpinLock (mMemoryMappedLock);\r
503\r
529a5a86
MK
504 //\r
505 // See if there is enough context to resume PEI Phase\r
506 //\r
507 if (mSmmS3ResumeState == NULL) {\r
508 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
509 CpuDeadLoop ();\r
510 }\r
511\r
512 SmmS3ResumeState = mSmmS3ResumeState;\r
513 ASSERT (SmmS3ResumeState != NULL);\r
514\r
515 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
516 //\r
517 // Save the IA32 IDT Descriptor\r
518 //\r
519 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
520\r
521 //\r
522 // Setup X64 IDT table\r
523 //\r
524 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
525 X64Idtr.Base = (UINTN) IdtEntryTable;\r
526 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
527 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
528\r
529 //\r
530 // Setup the default exception handler\r
531 //\r
532 Status = InitializeCpuExceptionHandlers (NULL);\r
533 ASSERT_EFI_ERROR (Status);\r
534\r
535 //\r
536 // Initialize Debug Agent to support source level debug\r
537 //\r
538 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
539 }\r
540\r
541 //\r
c2e5e70a 542 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 543 //\r
c2e5e70a 544 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 545 //\r
c2e5e70a 546 // First time microcode load and restore MTRRs\r
529a5a86 547 //\r
c2e5e70a 548 EarlyInitializeCpu ();\r
529a5a86
MK
549 }\r
550\r
551 //\r
552 // Restore SMBASE for BSP and all APs\r
553 //\r
554 SmmRelocateBases ();\r
555\r
556 //\r
c2e5e70a 557 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 558 //\r
c2e5e70a 559 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 560 //\r
c2e5e70a 561 // Restore MSRs for BSP and all APs\r
529a5a86 562 //\r
c2e5e70a 563 InitializeCpu ();\r
529a5a86
MK
564 }\r
565\r
566 //\r
567 // Set a flag to restore SMM configuration in S3 path.\r
568 //\r
569 mRestoreSmmConfigurationInS3 = TRUE;\r
570\r
571 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
572 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
573 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
574 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
575 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
576\r
577 //\r
578 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
579 //\r
580 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
581 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
582\r
583 SwitchStack (\r
584 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
585 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
586 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
587 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
588 );\r
589 }\r
590\r
591 //\r
592 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
593 //\r
594 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
595 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
596 //\r
597 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
598 //\r
599 SaveAndSetDebugTimerInterrupt (FALSE);\r
600 //\r
601 // Restore IA32 IDT table\r
602 //\r
603 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
604 AsmDisablePaging64 (\r
605 SmmS3ResumeState->ReturnCs,\r
606 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
607 (UINT32)SmmS3ResumeState->ReturnContext1,\r
608 (UINT32)SmmS3ResumeState->ReturnContext2,\r
609 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
610 );\r
611 }\r
612\r
613 //\r
614 // Can not resume PEI Phase\r
615 //\r
616 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
617 CpuDeadLoop ();\r
618}\r
619\r
620/**\r
621 Copy register table from ACPI NVS memory into SMRAM.\r
622\r
623 @param[in] DestinationRegisterTableList Points to destination register table.\r
624 @param[in] SourceRegisterTableList Points to source register table.\r
625 @param[in] NumberOfCpus Number of CPUs.\r
626\r
627**/\r
628VOID\r
629CopyRegisterTable (\r
630 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
631 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
632 IN UINT32 NumberOfCpus\r
633 )\r
634{\r
635 UINTN Index;\r
636 UINTN Index1;\r
637 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
638\r
639 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
640 for (Index = 0; Index < NumberOfCpus; Index++) {\r
641 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
642 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
643 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
644 //\r
645 // Go though all MSRs in register table to initialize MSR spin lock\r
646 //\r
647 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
648 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
649 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
650 //\r
651 // Initialize MSR spin lock only for those MSRs need bit field writing\r
652 //\r
653 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
654 }\r
655 }\r
656 }\r
657}\r
658\r
659/**\r
660 SMM Ready To Lock event notification handler.\r
661\r
662 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
663 perform additional lock actions that must be performed from SMM on the next SMI.\r
664\r
665 @param[in] Protocol Points to the protocol's unique identifier.\r
666 @param[in] Interface Points to the interface instance.\r
667 @param[in] Handle The handle on which the interface was installed.\r
668\r
669 @retval EFI_SUCCESS Notification handler runs successfully.\r
670 **/\r
671EFI_STATUS\r
672EFIAPI\r
673SmmReadyToLockEventNotify (\r
674 IN CONST EFI_GUID *Protocol,\r
675 IN VOID *Interface,\r
676 IN EFI_HANDLE Handle\r
677 )\r
678{\r
679 ACPI_CPU_DATA *AcpiCpuData;\r
680 IA32_DESCRIPTOR *Gdtr;\r
681 IA32_DESCRIPTOR *Idtr;\r
682\r
683 //\r
684 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
685 //\r
686 mAcpiCpuData.NumberOfCpus = 0;\r
687\r
529a5a86
MK
688 //\r
689 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
690 //\r
691 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
692 if (AcpiCpuData == 0) {\r
693 goto Done;\r
694 }\r
695\r
696 //\r
697 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
698 //\r
699 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
700\r
701 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
702 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
703\r
704 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
705\r
706 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
707 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
708\r
709 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
710\r
711 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
712 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
713\r
714 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
715\r
716 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
717 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
718\r
719 CopyRegisterTable (\r
720 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
721 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
722 mAcpiCpuData.NumberOfCpus\r
723 );\r
724\r
725 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
726 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
727\r
728 CopyRegisterTable (\r
729 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
730 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
731 mAcpiCpuData.NumberOfCpus\r
732 );\r
733\r
734 //\r
735 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
736 //\r
737 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
738 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
739\r
740 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
741 ASSERT (mGdtForAp != NULL);\r
742 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
743 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
744\r
745 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
746 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
747 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
748\r
749Done:\r
750 //\r
751 // Set SMM ready to lock flag and return\r
752 //\r
753 mSmmReadyToLock = TRUE;\r
754 return EFI_SUCCESS;\r
755}\r
756\r
757/**\r
758 The module Entry Point of the CPU SMM driver.\r
759\r
760 @param ImageHandle The firmware allocated handle for the EFI image.\r
761 @param SystemTable A pointer to the EFI System Table.\r
762\r
763 @retval EFI_SUCCESS The entry point is executed successfully.\r
764 @retval Other Some error occurs when executing this entry point.\r
765\r
766**/\r
767EFI_STATUS\r
768EFIAPI\r
769PiCpuSmmEntry (\r
770 IN EFI_HANDLE ImageHandle,\r
771 IN EFI_SYSTEM_TABLE *SystemTable\r
772 )\r
773{\r
774 EFI_STATUS Status;\r
775 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
776 UINTN NumberOfEnabledProcessors;\r
777 UINTN Index;\r
778 VOID *Buffer;\r
ae82a30b
JY
779 UINTN BufferPages;\r
780 UINTN TileCodeSize;\r
781 UINTN TileDataSize;\r
529a5a86
MK
782 UINTN TileSize;\r
783 VOID *GuidHob;\r
784 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
785 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
786 UINT8 *Stacks;\r
787 VOID *Registration;\r
788 UINT32 RegEax;\r
789 UINT32 RegEdx;\r
790 UINTN FamilyId;\r
791 UINTN ModelId;\r
792 UINT32 Cr3;\r
793\r
794 //\r
795 // Initialize Debug Agent to support source level debug in SMM code\r
796 //\r
797 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
798\r
799 //\r
800 // Report the start of CPU SMM initialization.\r
801 //\r
802 REPORT_STATUS_CODE (\r
803 EFI_PROGRESS_CODE,\r
804 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
805 );\r
806\r
807 //\r
808 // Fix segment address of the long-mode-switch jump\r
809 //\r
810 if (sizeof (UINTN) == sizeof (UINT64)) {\r
811 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
812 }\r
813\r
814 //\r
815 // Find out SMRR Base and SMRR Size\r
816 //\r
817 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
818\r
819 //\r
820 // Get MP Services Protocol\r
821 //\r
822 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
823 ASSERT_EFI_ERROR (Status);\r
824\r
825 //\r
826 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
827 //\r
828 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
829 ASSERT_EFI_ERROR (Status);\r
830 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
831\r
832 //\r
833 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
834 // A constant BSP index makes no sense because it may be hot removed.\r
835 //\r
836 DEBUG_CODE (\r
837 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
838\r
839 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
840 }\r
841 );\r
842\r
843 //\r
844 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
845 //\r
846 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
847 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
848\r
849 //\r
850 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
851 //\r
852 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
853 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
854 } else {\r
855 mMaxNumberOfCpus = mNumberOfCpus;\r
856 }\r
857 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
858\r
859 //\r
860 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
861 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
862 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
863 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
864 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
865 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
866 // by adding the // CPU save state size, any extra CPU specific context, and\r
867 // the size of code that must be placed at the SMI entry point to transfer\r
868 // control to a C function in the native SMM execution mode. This size is\r
869 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
870 // The total amount of memory required is the maximum number of CPUs that\r
871 // platform supports times the tile size. The picture below shows the tiling,\r
872 // where m is the number of tiles that fit in 32KB.\r
873 //\r
874 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
875 // | CPU m+1 Save State |\r
876 // +-----------------------------+\r
877 // | CPU m+1 Extra Data |\r
878 // +-----------------------------+\r
879 // | Padding |\r
880 // +-----------------------------+\r
881 // | CPU 2m SMI Entry |\r
882 // +#############################+ <-- Base of allocated buffer + 64 KB\r
883 // | CPU m-1 Save State |\r
884 // +-----------------------------+\r
885 // | CPU m-1 Extra Data |\r
886 // +-----------------------------+\r
887 // | Padding |\r
888 // +-----------------------------+\r
889 // | CPU 2m-1 SMI Entry |\r
890 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
891 // | . . . . . . . . . . . . |\r
892 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
893 // | CPU 2 Save State |\r
894 // +-----------------------------+\r
895 // | CPU 2 Extra Data |\r
896 // +-----------------------------+\r
897 // | Padding |\r
898 // +-----------------------------+\r
899 // | CPU m+1 SMI Entry |\r
900 // +=============================+ <-- Base of allocated buffer + 32 KB\r
901 // | CPU 1 Save State |\r
902 // +-----------------------------+\r
903 // | CPU 1 Extra Data |\r
904 // +-----------------------------+\r
905 // | Padding |\r
906 // +-----------------------------+\r
907 // | CPU m SMI Entry |\r
908 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
909 // | CPU 0 Save State |\r
910 // +-----------------------------+\r
911 // | CPU 0 Extra Data |\r
912 // +-----------------------------+\r
913 // | Padding |\r
914 // +-----------------------------+\r
915 // | CPU m-1 SMI Entry |\r
916 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
917 // | . . . . . . . . . . . . |\r
918 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
919 // | Padding |\r
920 // +-----------------------------+\r
921 // | CPU 1 SMI Entry |\r
922 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
923 // | Padding |\r
924 // +-----------------------------+\r
925 // | CPU 0 SMI Entry |\r
926 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
927 //\r
928\r
929 //\r
930 // Retrieve CPU Family\r
931 //\r
e9b3a6c9 932 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
933 FamilyId = (RegEax >> 8) & 0xf;\r
934 ModelId = (RegEax >> 4) & 0xf;\r
935 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
936 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
937 }\r
938\r
e9b3a6c9
MK
939 RegEdx = 0;\r
940 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
941 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
942 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
943 }\r
529a5a86
MK
944 //\r
945 // Determine the mode of the CPU at the time an SMI occurs\r
946 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
947 // Volume 3C, Section 34.4.1.1\r
948 //\r
949 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
950 if ((RegEdx & BIT29) != 0) {\r
951 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
952 }\r
953 if (FamilyId == 0x06) {\r
954 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
955 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
956 }\r
957 }\r
958\r
959 //\r
960 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
961 // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
962 // is rounded up to nearest power of 2.\r
963 //\r
ae82a30b
JY
964 TileCodeSize = GetSmiHandlerSize ();\r
965 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
966 TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR);\r
967 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
968 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 969 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 970 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
971\r
972 //\r
973 // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
974 // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
975 // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
976 // reduced.\r
977 //\r
978 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
979\r
980 //\r
981 // Allocate buffer for all of the tiles.\r
982 //\r
983 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
984 // Volume 3C, Section 34.11 SMBASE Relocation\r
985 // For Pentium and Intel486 processors, the SMBASE values must be\r
986 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
987 // state during the execution of a RSM instruction.\r
988 //\r
989 // Intel486 processors: FamilyId is 4\r
990 // Pentium processors : FamilyId is 5\r
991 //\r
ae82a30b 992 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 993 if ((FamilyId == 4) || (FamilyId == 5)) {\r
21c17193 994 Buffer = AllocateAlignedPages (BufferPages, SIZE_32KB);\r
529a5a86 995 } else {\r
21c17193 996 Buffer = AllocateAlignedPages (BufferPages, SIZE_4KB);\r
529a5a86
MK
997 }\r
998 ASSERT (Buffer != NULL);\r
ae82a30b 999 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
1000\r
1001 //\r
1002 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
1003 //\r
1004 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
1005 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
1006\r
1007 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
1008 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
1009\r
1010 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1011 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
1012\r
1013 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
1014 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
1015\r
1016 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
1017 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
1018\r
1019 //\r
1020 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
1021 //\r
1022 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
1023 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
1024 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1025 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
1026 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
1027\r
1028 //\r
1029 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
1030 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
1031 // size for each CPU in the platform\r
1032 //\r
1033 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1034 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
1035 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
1036 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
1037 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
1038\r
1039 if (Index < mNumberOfCpus) {\r
1040 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
1041 ASSERT_EFI_ERROR (Status);\r
1042 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
1043\r
1044 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
1045 Index,\r
1046 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
1047 mCpuHotPlugData.SmBase[Index],\r
1048 gSmmCpuPrivate->CpuSaveState[Index],\r
1049 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
1050 ));\r
1051 } else {\r
1052 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
1053 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
1054 }\r
1055 }\r
1056\r
1057 //\r
1058 // Allocate SMI stacks for all processors.\r
1059 //\r
1060 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1061 //\r
1062 // 2 more pages is allocated for each processor.\r
1063 // one is guard page and the other is known good stack.\r
1064 //\r
1065 // +-------------------------------------------+-----+-------------------------------------------+\r
1066 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
1067 // +-------------------------------------------+-----+-------------------------------------------+\r
1068 // | | | |\r
1069 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
1070 //\r
1071 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
1072 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
1073 ASSERT (Stacks != NULL);\r
1074 mSmmStackArrayBase = (UINTN)Stacks;\r
1075 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
1076 } else {\r
1077 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
1078 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
1079 ASSERT (Stacks != NULL);\r
1080 }\r
1081\r
1082 //\r
1083 // Set SMI stack for SMM base relocation\r
1084 //\r
1085 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
1086\r
1087 //\r
1088 // Initialize IDT\r
1089 //\r
1090 InitializeSmmIdt ();\r
1091\r
1092 //\r
1093 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
1094 //\r
1095 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
1096 ASSERT (mRebased != NULL);\r
1097 SmmRelocateBases ();\r
1098\r
1099 //\r
1100 // Call hook for BSP to perform extra actions in normal mode after all\r
1101 // SMM base addresses have been relocated on all CPUs\r
1102 //\r
1103 SmmCpuFeaturesSmmRelocationComplete ();\r
1104\r
1105 //\r
1106 // SMM Time initialization\r
1107 //\r
1108 InitializeSmmTimer ();\r
1109\r
1110 //\r
1111 // Initialize MP globals\r
1112 //\r
1113 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
1114\r
1115 //\r
1116 // Fill in SMM Reserved Regions\r
1117 //\r
1118 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1119 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1120\r
1121 //\r
1122 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1123 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1124 // to an SMRAM address will be present in the handle database\r
1125 //\r
1126 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1127 &gSmmCpuPrivate->SmmCpuHandle,\r
1128 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
1129 NULL\r
1130 );\r
1131 ASSERT_EFI_ERROR (Status);\r
1132\r
1133 //\r
1134 // Install the SMM CPU Protocol into SMM protocol database\r
1135 //\r
1136 Status = gSmst->SmmInstallProtocolInterface (\r
1137 &mSmmCpuHandle,\r
1138 &gEfiSmmCpuProtocolGuid,\r
1139 EFI_NATIVE_INTERFACE,\r
1140 &mSmmCpu\r
1141 );\r
1142 ASSERT_EFI_ERROR (Status);\r
1143\r
1144 //\r
1145 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1146 //\r
1147 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1148 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1149 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1150 }\r
1151\r
1152 //\r
1153 // Initialize SMM CPU Services Support\r
1154 //\r
1155 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1156 ASSERT_EFI_ERROR (Status);\r
1157\r
529a5a86
MK
1158 //\r
1159 // register SMM Ready To Lock Protocol notification\r
1160 //\r
1161 Status = gSmst->SmmRegisterProtocolNotify (\r
1162 &gEfiSmmReadyToLockProtocolGuid,\r
1163 SmmReadyToLockEventNotify,\r
1164 &Registration\r
1165 );\r
1166 ASSERT_EFI_ERROR (Status);\r
1167\r
1168 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
1169 if (GuidHob != NULL) {\r
1170 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
1171\r
1172 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
1173 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
1174\r
1175 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
1176 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
1177\r
1178 mSmmS3ResumeState = SmmS3ResumeState;\r
1179 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
1180\r
1181 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
1182\r
1183 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
1184 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
1185 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
1186 SmmS3ResumeState->SmmS3StackSize = 0;\r
1187 }\r
1188\r
1189 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
1190 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
1191 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
1192\r
1193 if (sizeof (UINTN) == sizeof (UINT64)) {\r
1194 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
1195 }\r
1196 if (sizeof (UINTN) == sizeof (UINT32)) {\r
1197 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
1198 }\r
1199 }\r
1200\r
529a5a86
MK
1201 //\r
1202 // Initialize SMM Profile feature\r
1203 //\r
1204 InitSmmProfile (Cr3);\r
1205\r
1206 //\r
1207 // Patch SmmS3ResumeState->SmmS3Cr3\r
1208 //\r
1209 InitSmmS3Cr3 ();\r
1210\r
1211 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1212\r
1213 return EFI_SUCCESS;\r
1214}\r
1215\r
1216/**\r
1217\r
1218 Find out SMRAM information including SMRR base and SMRR size.\r
1219\r
1220 @param SmrrBase SMRR base\r
1221 @param SmrrSize SMRR size\r
1222\r
1223**/\r
1224VOID\r
1225FindSmramInfo (\r
1226 OUT UINT32 *SmrrBase,\r
1227 OUT UINT32 *SmrrSize\r
1228 )\r
1229{\r
1230 EFI_STATUS Status;\r
1231 UINTN Size;\r
1232 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1233 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1234 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
1235 UINTN SmramRangeCount;\r
1236 UINTN Index;\r
1237 UINT64 MaxSize;\r
1238 BOOLEAN Found;\r
1239\r
1240 //\r
1241 // Get SMM Access Protocol\r
1242 //\r
1243 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1244 ASSERT_EFI_ERROR (Status);\r
1245\r
1246 //\r
1247 // Get SMRAM information\r
1248 //\r
1249 Size = 0;\r
1250 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1251 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1252\r
1253 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1254 ASSERT (SmramRanges != NULL);\r
1255\r
1256 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
1257 ASSERT_EFI_ERROR (Status);\r
1258\r
1259 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1260\r
1261 //\r
1262 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1263 //\r
1264 CurrentSmramRange = NULL;\r
1265 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
1266 //\r
1267 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1268 //\r
1269 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1270 continue;\r
1271 }\r
1272\r
1273 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1274 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1275 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1276 MaxSize = SmramRanges[Index].PhysicalSize;\r
1277 CurrentSmramRange = &SmramRanges[Index];\r
1278 }\r
1279 }\r
1280 }\r
1281 }\r
1282\r
1283 ASSERT (CurrentSmramRange != NULL);\r
1284\r
1285 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1286 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1287\r
1288 do {\r
1289 Found = FALSE;\r
1290 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1291 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1292 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1293 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1294 Found = TRUE;\r
1295 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1296 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1297 Found = TRUE;\r
1298 }\r
1299 }\r
1300 } while (Found);\r
1301\r
1302 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1303}\r
1304\r
1305/**\r
1306Configure SMM Code Access Check feature on an AP.\r
1307SMM Feature Control MSR will be locked after configuration.\r
1308\r
1309@param[in,out] Buffer Pointer to private data buffer.\r
1310**/\r
1311VOID\r
1312EFIAPI\r
1313ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1314 IN OUT VOID *Buffer\r
1315 )\r
1316{\r
1317 UINTN CpuIndex;\r
1318 UINT64 SmmFeatureControlMsr;\r
1319 UINT64 NewSmmFeatureControlMsr;\r
1320\r
1321 //\r
1322 // Retrieve the CPU Index from the context passed in\r
1323 //\r
1324 CpuIndex = *(UINTN *)Buffer;\r
1325\r
1326 //\r
1327 // Get the current SMM Feature Control MSR value\r
1328 //\r
1329 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1330\r
1331 //\r
1332 // Compute the new SMM Feature Control MSR value\r
1333 //\r
1334 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1335 if (mSmmCodeAccessCheckEnable) {\r
1336 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1337 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1338 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1339 }\r
529a5a86
MK
1340 }\r
1341\r
1342 //\r
1343 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1344 //\r
1345 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1346 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1347 }\r
1348\r
1349 //\r
1350 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1351 //\r
fe3a75bc 1352 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1353}\r
1354\r
1355/**\r
1356Configure SMM Code Access Check feature for all processors.\r
1357SMM Feature Control MSR will be locked after configuration.\r
1358**/\r
1359VOID\r
1360ConfigSmmCodeAccessCheck (\r
1361 VOID\r
1362 )\r
1363{\r
1364 UINTN Index;\r
1365 EFI_STATUS Status;\r
1366\r
1367 //\r
1368 // Check to see if the Feature Control MSR is supported on this CPU\r
1369 //\r
f6b0cb17 1370 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1371 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1372 mSmmCodeAccessCheckEnable = FALSE;\r
1373 return;\r
1374 }\r
1375\r
1376 //\r
1377 // Check to see if the CPU supports the SMM Code Access Check feature\r
1378 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1379 //\r
1380 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1381 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1382 return;\r
1383 }\r
1384\r
1385 //\r
1386 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1387 //\r
fe3a75bc 1388 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1389\r
1390 //\r
1391 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1392 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1393 //\r
fe3a75bc 1394 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1395\r
1396 //\r
1397 // Enable SMM Code Access Check feature on the BSP.\r
1398 //\r
1399 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1400\r
1401 //\r
1402 // Enable SMM Code Access Check feature for the APs.\r
1403 //\r
1404 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1405 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
529a5a86
MK
1406\r
1407 //\r
1408 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1409 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1410 //\r
fe3a75bc 1411 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1412\r
1413 //\r
1414 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1415 //\r
1416 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1417 ASSERT_EFI_ERROR (Status);\r
1418\r
1419 //\r
1420 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1421 //\r
fe3a75bc 1422 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1423 CpuPause ();\r
1424 }\r
1425\r
1426 //\r
1427 // Release the Config SMM Code Access Check spin lock.\r
1428 //\r
fe3a75bc 1429 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1430 }\r
1431 }\r
1432}\r
1433\r
21c17193
JY
1434/**\r
1435 This API provides a way to allocate memory for page table.\r
1436\r
1437 This API can be called more once to allocate memory for page tables.\r
1438\r
1439 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1440 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1441 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1442 returned.\r
1443\r
1444 @param Pages The number of 4 KB pages to allocate.\r
1445\r
1446 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1447\r
1448**/\r
1449VOID *\r
1450AllocatePageTableMemory (\r
1451 IN UINTN Pages\r
1452 )\r
1453{\r
1454 VOID *Buffer;\r
1455\r
1456 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1457 if (Buffer != NULL) {\r
1458 return Buffer;\r
1459 }\r
1460 return AllocatePages (Pages);\r
1461}\r
1462\r
529a5a86
MK
1463/**\r
1464 Perform the remaining tasks.\r
1465\r
1466**/\r
1467VOID\r
1468PerformRemainingTasks (\r
1469 VOID\r
1470 )\r
1471{\r
1472 if (mSmmReadyToLock) {\r
1473 //\r
1474 // Start SMM Profile feature\r
1475 //\r
1476 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1477 SmmProfileStart ();\r
1478 }\r
1479 //\r
1480 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1481 //\r
1482 InitPaging ();\r
1483 //\r
1484 // Configure SMM Code Access Check feature if available.\r
1485 //\r
1486 ConfigSmmCodeAccessCheck ();\r
1487\r
21c17193
JY
1488 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1489\r
529a5a86
MK
1490 //\r
1491 // Clean SMM ready to lock flag\r
1492 //\r
1493 mSmmReadyToLock = FALSE;\r
1494 }\r
1495}\r
9f419739
JY
1496\r
1497/**\r
1498 Perform the pre tasks.\r
1499\r
1500**/\r
1501VOID\r
1502PerformPreTasks (\r
1503 VOID\r
1504 )\r
1505{\r
1506 //\r
1507 // Restore SMM Configuration in S3 boot path.\r
1508 //\r
1509 if (mRestoreSmmConfigurationInS3) {\r
60113811
MK
1510 //\r
1511 // Need make sure gSmst is correct because below function may use them.\r
1512 //\r
1513 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
1514 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
1515 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1516 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
1517 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
1518\r
9f419739
JY
1519 //\r
1520 // Configure SMM Code Access Check feature if available.\r
1521 //\r
1522 ConfigSmmCodeAccessCheck ();\r
1523\r
21c17193
JY
1524 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1525\r
9f419739
JY
1526 mRestoreSmmConfigurationInS3 = FALSE;\r
1527 }\r
1528}\r