]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add SMM S3 boot flag
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
19// along its supporting fields.\r
20//\r
21SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
23 NULL, // SmmCpuHandle\r
24 NULL, // Pointer to ProcessorInfo array\r
25 NULL, // Pointer to Operation array\r
26 NULL, // Pointer to CpuSaveStateSize array\r
27 NULL, // Pointer to CpuSaveState array\r
28 { {0} }, // SmmReservedSmramRegion\r
29 {\r
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
32 0, // SmmCoreEntryContext.NumberOfCpus\r
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
34 NULL // SmmCoreEntryContext.CpuSaveState\r
35 },\r
36 NULL, // SmmCoreEntry\r
37 {\r
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
40 },\r
41};\r
42\r
43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
77EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
78\r
529a5a86
MK
79//\r
80// SMM stack information\r
81//\r
82UINTN mSmmStackArrayBase;\r
83UINTN mSmmStackArrayEnd;\r
84UINTN mSmmStackSize;\r
85\r
86//\r
87// Pointer to structure used during S3 Resume\r
88//\r
89SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
90\r
91UINTN mMaxNumberOfCpus = 1;\r
92UINTN mNumberOfCpus = 1;\r
93\r
94//\r
95// SMM ready to lock flag\r
96//\r
97BOOLEAN mSmmReadyToLock = FALSE;\r
98\r
70a7493d
JF
99//\r
100// S3 boot flag\r
101//\r
102BOOLEAN mSmmS3Flag = FALSE;\r
103\r
529a5a86
MK
104//\r
105// Global used to cache PCD for SMM Code Access Check enable\r
106//\r
107BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
108\r
109//\r
110// Spin lock used to serialize setting of SMM Code Access Check feature\r
111//\r
fe3a75bc 112SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86
MK
113\r
114/**\r
115 Initialize IDT to setup exception handlers for SMM.\r
116\r
117**/\r
118VOID\r
119InitializeSmmIdt (\r
120 VOID\r
121 )\r
122{\r
123 EFI_STATUS Status;\r
124 BOOLEAN InterruptState;\r
125 IA32_DESCRIPTOR DxeIdtr;\r
126 //\r
127 // Disable Interrupt and save DXE IDT table\r
128 //\r
129 InterruptState = SaveAndDisableInterrupts ();\r
130 AsmReadIdtr (&DxeIdtr);\r
131 //\r
132 // Load SMM temporary IDT table\r
133 //\r
134 AsmWriteIdtr (&gcSmiIdtr);\r
135 //\r
136 // Setup SMM default exception handlers, SMM IDT table\r
137 // will be updated and saved in gcSmiIdtr\r
138 //\r
139 Status = InitializeCpuExceptionHandlers (NULL);\r
140 ASSERT_EFI_ERROR (Status);\r
141 //\r
142 // Restore DXE IDT table and CPU interrupt\r
143 //\r
144 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
145 SetInterruptState (InterruptState);\r
146}\r
147\r
148/**\r
149 Search module name by input IP address and output it.\r
150\r
151 @param CallerIpAddress Caller instruction pointer.\r
152\r
153**/\r
154VOID\r
155DumpModuleInfoByIp (\r
156 IN UINTN CallerIpAddress\r
157 )\r
158{\r
159 UINTN Pe32Data;\r
160 EFI_IMAGE_DOS_HEADER *DosHdr;\r
161 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
162 VOID *PdbPointer;\r
163 UINT64 DumpIpAddress;\r
164\r
165 //\r
166 // Find Image Base\r
167 //\r
168 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
169 while (Pe32Data != 0) {\r
170 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
171 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
172 //\r
173 // DOS image header is present, so read the PE header after the DOS image header.\r
174 //\r
175 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
176 //\r
177 // Make sure PE header address does not overflow and is less than the initial address.\r
178 //\r
179 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
180 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
181 //\r
182 // It's PE image.\r
183 //\r
184 break;\r
185 }\r
186 }\r
187 }\r
188\r
189 //\r
190 // Not found the image base, check the previous aligned address\r
191 //\r
192 Pe32Data -= SIZE_4KB;\r
193 }\r
194\r
195 DumpIpAddress = CallerIpAddress;\r
196 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
197\r
198 if (Pe32Data != 0) {\r
199 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
200 if (PdbPointer != NULL) {\r
201 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
202 }\r
203 }\r
204}\r
205\r
206/**\r
207 Read information from the CPU save state.\r
208\r
209 @param This EFI_SMM_CPU_PROTOCOL instance\r
210 @param Width The number of bytes to read from the CPU save state.\r
211 @param Register Specifies the CPU register to read form the save state.\r
212 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
213 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
214\r
215 @retval EFI_SUCCESS The register was read from Save State\r
216 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
217 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
218\r
219**/\r
220EFI_STATUS\r
221EFIAPI\r
222SmmReadSaveState (\r
223 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
224 IN UINTN Width,\r
225 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
226 IN UINTN CpuIndex,\r
227 OUT VOID *Buffer\r
228 )\r
229{\r
230 EFI_STATUS Status;\r
231\r
232 //\r
233 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
234 //\r
235 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
236 return EFI_INVALID_PARAMETER;\r
237 }\r
238\r
239 //\r
240 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
241 //\r
242 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
243 //\r
244 // The pseudo-register only supports the 64-bit size specified by Width.\r
245 //\r
246 if (Width != sizeof (UINT64)) {\r
247 return EFI_INVALID_PARAMETER;\r
248 }\r
249 //\r
250 // If the processor is in SMM at the time the SMI occurred,\r
251 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
252 // Otherwise, EFI_NOT_FOUND is returned.\r
253 //\r
ed3d5ecb 254 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
255 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
256 return EFI_SUCCESS;\r
257 } else {\r
258 return EFI_NOT_FOUND;\r
259 }\r
260 }\r
261\r
ed3d5ecb 262 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
263 return EFI_INVALID_PARAMETER;\r
264 }\r
265\r
266 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
267 if (Status == EFI_UNSUPPORTED) {\r
268 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
269 }\r
270 return Status;\r
271}\r
272\r
273/**\r
274 Write data to the CPU save state.\r
275\r
276 @param This EFI_SMM_CPU_PROTOCOL instance\r
277 @param Width The number of bytes to read from the CPU save state.\r
278 @param Register Specifies the CPU register to write to the save state.\r
279 @param CpuIndex Specifies the zero-based index of the CPU save state\r
280 @param Buffer Upon entry, this holds the new CPU register value.\r
281\r
282 @retval EFI_SUCCESS The register was written from Save State\r
283 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
284 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
285\r
286**/\r
287EFI_STATUS\r
288EFIAPI\r
289SmmWriteSaveState (\r
290 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
291 IN UINTN Width,\r
292 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
293 IN UINTN CpuIndex,\r
294 IN CONST VOID *Buffer\r
295 )\r
296{\r
297 EFI_STATUS Status;\r
298\r
299 //\r
300 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
301 //\r
302 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
303 return EFI_INVALID_PARAMETER;\r
304 }\r
305\r
306 //\r
307 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
308 //\r
309 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
310 return EFI_SUCCESS;\r
311 }\r
312\r
313 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
314 return EFI_INVALID_PARAMETER;\r
315 }\r
316\r
317 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
318 if (Status == EFI_UNSUPPORTED) {\r
319 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
320 }\r
321 return Status;\r
322}\r
323\r
324\r
325/**\r
326 C function for SMI handler. To change all processor's SMMBase Register.\r
327\r
328**/\r
329VOID\r
330EFIAPI\r
331SmmInitHandler (\r
332 VOID\r
333 )\r
334{\r
335 UINT32 ApicId;\r
336 UINTN Index;\r
337\r
338 //\r
339 // Update SMM IDT entries' code segment and load IDT\r
340 //\r
341 AsmWriteIdtr (&gcSmiIdtr);\r
342 ApicId = GetApicId ();\r
343\r
344 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
345\r
346 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
347 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
348 //\r
349 // Initialize SMM specific features on the currently executing CPU\r
350 //\r
351 SmmCpuFeaturesInitializeProcessor (\r
352 Index,\r
353 mIsBsp,\r
354 gSmmCpuPrivate->ProcessorInfo,\r
355 &mCpuHotPlugData\r
356 );\r
357\r
358 if (mIsBsp) {\r
359 //\r
360 // BSP rebase is already done above.\r
361 // Initialize private data during S3 resume\r
362 //\r
363 InitializeMpSyncData ();\r
364 }\r
365\r
366 //\r
367 // Hook return after RSM to set SMM re-based flag\r
368 //\r
369 SemaphoreHook (Index, &mRebased[Index]);\r
370\r
371 return;\r
372 }\r
373 }\r
374 ASSERT (FALSE);\r
375}\r
376\r
377/**\r
378 Relocate SmmBases for each processor.\r
379\r
380 Execute on first boot and all S3 resumes\r
381\r
382**/\r
383VOID\r
384EFIAPI\r
385SmmRelocateBases (\r
386 VOID\r
387 )\r
388{\r
389 UINT8 BakBuf[BACK_BUF_SIZE];\r
390 SMRAM_SAVE_STATE_MAP BakBuf2;\r
391 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
392 UINT8 *U8Ptr;\r
393 UINT32 ApicId;\r
394 UINTN Index;\r
395 UINTN BspIndex;\r
396\r
397 //\r
398 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
399 //\r
400 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
401\r
402 //\r
403 // Patch ASM code template with current CR0, CR3, and CR4 values\r
404 //\r
405 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
406 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
407 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
408\r
409 //\r
410 // Patch GDTR for SMM base relocation\r
411 //\r
412 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
413 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
414\r
415 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
416 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
417\r
418 //\r
419 // Backup original contents at address 0x38000\r
420 //\r
421 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
422 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
423\r
424 //\r
425 // Load image for relocation\r
426 //\r
427 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
428\r
429 //\r
430 // Retrieve the local APIC ID of current processor\r
431 //\r
432 ApicId = GetApicId ();\r
433\r
434 //\r
435 // Relocate SM bases for all APs\r
436 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
437 //\r
438 mIsBsp = FALSE;\r
439 BspIndex = (UINTN)-1;\r
440 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
441 mRebased[Index] = FALSE;\r
442 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
443 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
444 //\r
445 // Wait for this AP to finish its 1st SMI\r
446 //\r
447 while (!mRebased[Index]);\r
448 } else {\r
449 //\r
450 // BSP will be Relocated later\r
451 //\r
452 BspIndex = Index;\r
453 }\r
454 }\r
455\r
456 //\r
457 // Relocate BSP's SMM base\r
458 //\r
459 ASSERT (BspIndex != (UINTN)-1);\r
460 mIsBsp = TRUE;\r
461 SendSmiIpi (ApicId);\r
462 //\r
463 // Wait for the BSP to finish its 1st SMI\r
464 //\r
465 while (!mRebased[BspIndex]);\r
466\r
467 //\r
468 // Restore contents at address 0x38000\r
469 //\r
470 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
471 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
472}\r
473\r
474/**\r
475 Perform SMM initialization for all processors in the S3 boot path.\r
476\r
477 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
478**/\r
479VOID\r
480EFIAPI\r
481SmmRestoreCpu (\r
482 VOID\r
483 )\r
484{\r
485 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
486 IA32_DESCRIPTOR Ia32Idtr;\r
487 IA32_DESCRIPTOR X64Idtr;\r
488 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
489 EFI_STATUS Status;\r
490\r
491 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
492\r
70a7493d
JF
493 mSmmS3Flag = TRUE;\r
494\r
6c4c15fa
JF
495 InitializeSpinLock (mMemoryMappedLock);\r
496\r
529a5a86
MK
497 //\r
498 // See if there is enough context to resume PEI Phase\r
499 //\r
500 if (mSmmS3ResumeState == NULL) {\r
501 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
502 CpuDeadLoop ();\r
503 }\r
504\r
505 SmmS3ResumeState = mSmmS3ResumeState;\r
506 ASSERT (SmmS3ResumeState != NULL);\r
507\r
508 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
509 //\r
510 // Save the IA32 IDT Descriptor\r
511 //\r
512 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
513\r
514 //\r
515 // Setup X64 IDT table\r
516 //\r
517 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
518 X64Idtr.Base = (UINTN) IdtEntryTable;\r
519 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
520 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
521\r
522 //\r
523 // Setup the default exception handler\r
524 //\r
525 Status = InitializeCpuExceptionHandlers (NULL);\r
526 ASSERT_EFI_ERROR (Status);\r
527\r
528 //\r
529 // Initialize Debug Agent to support source level debug\r
530 //\r
531 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
532 }\r
533\r
534 //\r
c2e5e70a 535 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 536 //\r
c2e5e70a 537 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 538 //\r
c2e5e70a 539 // First time microcode load and restore MTRRs\r
529a5a86 540 //\r
c2e5e70a 541 EarlyInitializeCpu ();\r
529a5a86
MK
542 }\r
543\r
544 //\r
545 // Restore SMBASE for BSP and all APs\r
546 //\r
547 SmmRelocateBases ();\r
548\r
549 //\r
c2e5e70a 550 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 551 //\r
c2e5e70a 552 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 553 //\r
c2e5e70a 554 // Restore MSRs for BSP and all APs\r
529a5a86 555 //\r
c2e5e70a 556 InitializeCpu ();\r
529a5a86
MK
557 }\r
558\r
559 //\r
560 // Set a flag to restore SMM configuration in S3 path.\r
561 //\r
562 mRestoreSmmConfigurationInS3 = TRUE;\r
563\r
564 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
565 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
566 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
567 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
568 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
569\r
570 //\r
571 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
572 //\r
573 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
574 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
575\r
576 SwitchStack (\r
577 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
578 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
579 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
580 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
581 );\r
582 }\r
583\r
584 //\r
585 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
586 //\r
587 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
588 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
589 //\r
590 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
591 //\r
592 SaveAndSetDebugTimerInterrupt (FALSE);\r
593 //\r
594 // Restore IA32 IDT table\r
595 //\r
596 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
597 AsmDisablePaging64 (\r
598 SmmS3ResumeState->ReturnCs,\r
599 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
600 (UINT32)SmmS3ResumeState->ReturnContext1,\r
601 (UINT32)SmmS3ResumeState->ReturnContext2,\r
602 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
603 );\r
604 }\r
605\r
606 //\r
607 // Can not resume PEI Phase\r
608 //\r
609 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
610 CpuDeadLoop ();\r
611}\r
612\r
613/**\r
614 Copy register table from ACPI NVS memory into SMRAM.\r
615\r
616 @param[in] DestinationRegisterTableList Points to destination register table.\r
617 @param[in] SourceRegisterTableList Points to source register table.\r
618 @param[in] NumberOfCpus Number of CPUs.\r
619\r
620**/\r
621VOID\r
622CopyRegisterTable (\r
623 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
624 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
625 IN UINT32 NumberOfCpus\r
626 )\r
627{\r
628 UINTN Index;\r
629 UINTN Index1;\r
630 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
631\r
632 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
633 for (Index = 0; Index < NumberOfCpus; Index++) {\r
634 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
635 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
636 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
637 //\r
638 // Go though all MSRs in register table to initialize MSR spin lock\r
639 //\r
640 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
641 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
642 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
643 //\r
644 // Initialize MSR spin lock only for those MSRs need bit field writing\r
645 //\r
646 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
647 }\r
648 }\r
649 }\r
650}\r
651\r
652/**\r
653 SMM Ready To Lock event notification handler.\r
654\r
655 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
656 perform additional lock actions that must be performed from SMM on the next SMI.\r
657\r
658 @param[in] Protocol Points to the protocol's unique identifier.\r
659 @param[in] Interface Points to the interface instance.\r
660 @param[in] Handle The handle on which the interface was installed.\r
661\r
662 @retval EFI_SUCCESS Notification handler runs successfully.\r
663 **/\r
664EFI_STATUS\r
665EFIAPI\r
666SmmReadyToLockEventNotify (\r
667 IN CONST EFI_GUID *Protocol,\r
668 IN VOID *Interface,\r
669 IN EFI_HANDLE Handle\r
670 )\r
671{\r
672 ACPI_CPU_DATA *AcpiCpuData;\r
673 IA32_DESCRIPTOR *Gdtr;\r
674 IA32_DESCRIPTOR *Idtr;\r
675\r
676 //\r
677 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
678 //\r
679 mAcpiCpuData.NumberOfCpus = 0;\r
680\r
529a5a86
MK
681 //\r
682 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
683 //\r
684 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
685 if (AcpiCpuData == 0) {\r
686 goto Done;\r
687 }\r
688\r
689 //\r
690 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
691 //\r
692 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
693\r
694 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
695 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
696\r
697 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
698\r
699 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
700 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
701\r
702 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
703\r
704 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
705 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
706\r
707 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
708\r
709 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
710 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
711\r
712 CopyRegisterTable (\r
713 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
714 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
715 mAcpiCpuData.NumberOfCpus\r
716 );\r
717\r
718 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
719 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
720\r
721 CopyRegisterTable (\r
722 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
723 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
724 mAcpiCpuData.NumberOfCpus\r
725 );\r
726\r
727 //\r
728 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
729 //\r
730 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
731 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
732\r
733 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
734 ASSERT (mGdtForAp != NULL);\r
735 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
736 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
737\r
738 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
739 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
740 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
741\r
742Done:\r
743 //\r
744 // Set SMM ready to lock flag and return\r
745 //\r
746 mSmmReadyToLock = TRUE;\r
747 return EFI_SUCCESS;\r
748}\r
749\r
750/**\r
751 The module Entry Point of the CPU SMM driver.\r
752\r
753 @param ImageHandle The firmware allocated handle for the EFI image.\r
754 @param SystemTable A pointer to the EFI System Table.\r
755\r
756 @retval EFI_SUCCESS The entry point is executed successfully.\r
757 @retval Other Some error occurs when executing this entry point.\r
758\r
759**/\r
760EFI_STATUS\r
761EFIAPI\r
762PiCpuSmmEntry (\r
763 IN EFI_HANDLE ImageHandle,\r
764 IN EFI_SYSTEM_TABLE *SystemTable\r
765 )\r
766{\r
767 EFI_STATUS Status;\r
768 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
769 UINTN NumberOfEnabledProcessors;\r
770 UINTN Index;\r
771 VOID *Buffer;\r
ae82a30b
JY
772 UINTN BufferPages;\r
773 UINTN TileCodeSize;\r
774 UINTN TileDataSize;\r
529a5a86
MK
775 UINTN TileSize;\r
776 VOID *GuidHob;\r
777 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
778 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
779 UINT8 *Stacks;\r
780 VOID *Registration;\r
781 UINT32 RegEax;\r
782 UINT32 RegEdx;\r
783 UINTN FamilyId;\r
784 UINTN ModelId;\r
785 UINT32 Cr3;\r
786\r
787 //\r
788 // Initialize Debug Agent to support source level debug in SMM code\r
789 //\r
790 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
791\r
792 //\r
793 // Report the start of CPU SMM initialization.\r
794 //\r
795 REPORT_STATUS_CODE (\r
796 EFI_PROGRESS_CODE,\r
797 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
798 );\r
799\r
800 //\r
801 // Fix segment address of the long-mode-switch jump\r
802 //\r
803 if (sizeof (UINTN) == sizeof (UINT64)) {\r
804 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
805 }\r
806\r
807 //\r
808 // Find out SMRR Base and SMRR Size\r
809 //\r
810 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
811\r
812 //\r
813 // Get MP Services Protocol\r
814 //\r
815 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
816 ASSERT_EFI_ERROR (Status);\r
817\r
818 //\r
819 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
820 //\r
821 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
822 ASSERT_EFI_ERROR (Status);\r
823 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
824\r
825 //\r
826 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
827 // A constant BSP index makes no sense because it may be hot removed.\r
828 //\r
829 DEBUG_CODE (\r
830 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
831\r
832 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
833 }\r
834 );\r
835\r
836 //\r
837 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
838 //\r
839 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
840 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
841\r
842 //\r
843 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
844 //\r
845 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
846 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
847 } else {\r
848 mMaxNumberOfCpus = mNumberOfCpus;\r
849 }\r
850 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
851\r
852 //\r
853 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
854 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
855 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
856 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
857 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
858 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
859 // by adding the // CPU save state size, any extra CPU specific context, and\r
860 // the size of code that must be placed at the SMI entry point to transfer\r
861 // control to a C function in the native SMM execution mode. This size is\r
862 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
863 // The total amount of memory required is the maximum number of CPUs that\r
864 // platform supports times the tile size. The picture below shows the tiling,\r
865 // where m is the number of tiles that fit in 32KB.\r
866 //\r
867 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
868 // | CPU m+1 Save State |\r
869 // +-----------------------------+\r
870 // | CPU m+1 Extra Data |\r
871 // +-----------------------------+\r
872 // | Padding |\r
873 // +-----------------------------+\r
874 // | CPU 2m SMI Entry |\r
875 // +#############################+ <-- Base of allocated buffer + 64 KB\r
876 // | CPU m-1 Save State |\r
877 // +-----------------------------+\r
878 // | CPU m-1 Extra Data |\r
879 // +-----------------------------+\r
880 // | Padding |\r
881 // +-----------------------------+\r
882 // | CPU 2m-1 SMI Entry |\r
883 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
884 // | . . . . . . . . . . . . |\r
885 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
886 // | CPU 2 Save State |\r
887 // +-----------------------------+\r
888 // | CPU 2 Extra Data |\r
889 // +-----------------------------+\r
890 // | Padding |\r
891 // +-----------------------------+\r
892 // | CPU m+1 SMI Entry |\r
893 // +=============================+ <-- Base of allocated buffer + 32 KB\r
894 // | CPU 1 Save State |\r
895 // +-----------------------------+\r
896 // | CPU 1 Extra Data |\r
897 // +-----------------------------+\r
898 // | Padding |\r
899 // +-----------------------------+\r
900 // | CPU m SMI Entry |\r
901 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
902 // | CPU 0 Save State |\r
903 // +-----------------------------+\r
904 // | CPU 0 Extra Data |\r
905 // +-----------------------------+\r
906 // | Padding |\r
907 // +-----------------------------+\r
908 // | CPU m-1 SMI Entry |\r
909 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
910 // | . . . . . . . . . . . . |\r
911 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
912 // | Padding |\r
913 // +-----------------------------+\r
914 // | CPU 1 SMI Entry |\r
915 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
916 // | Padding |\r
917 // +-----------------------------+\r
918 // | CPU 0 SMI Entry |\r
919 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
920 //\r
921\r
922 //\r
923 // Retrieve CPU Family\r
924 //\r
e9b3a6c9 925 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
926 FamilyId = (RegEax >> 8) & 0xf;\r
927 ModelId = (RegEax >> 4) & 0xf;\r
928 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
929 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
930 }\r
931\r
e9b3a6c9
MK
932 RegEdx = 0;\r
933 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
934 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
935 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
936 }\r
529a5a86
MK
937 //\r
938 // Determine the mode of the CPU at the time an SMI occurs\r
939 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
940 // Volume 3C, Section 34.4.1.1\r
941 //\r
942 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
943 if ((RegEdx & BIT29) != 0) {\r
944 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
945 }\r
946 if (FamilyId == 0x06) {\r
947 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
948 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
949 }\r
950 }\r
951\r
952 //\r
953 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
954 // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
955 // is rounded up to nearest power of 2.\r
956 //\r
ae82a30b
JY
957 TileCodeSize = GetSmiHandlerSize ();\r
958 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
959 TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR);\r
960 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
961 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 962 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 963 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
964\r
965 //\r
966 // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
967 // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
968 // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
969 // reduced.\r
970 //\r
971 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
972\r
973 //\r
974 // Allocate buffer for all of the tiles.\r
975 //\r
976 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
977 // Volume 3C, Section 34.11 SMBASE Relocation\r
978 // For Pentium and Intel486 processors, the SMBASE values must be\r
979 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
980 // state during the execution of a RSM instruction.\r
981 //\r
982 // Intel486 processors: FamilyId is 4\r
983 // Pentium processors : FamilyId is 5\r
984 //\r
ae82a30b 985 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 986 if ((FamilyId == 4) || (FamilyId == 5)) {\r
21c17193 987 Buffer = AllocateAlignedPages (BufferPages, SIZE_32KB);\r
529a5a86 988 } else {\r
21c17193 989 Buffer = AllocateAlignedPages (BufferPages, SIZE_4KB);\r
529a5a86
MK
990 }\r
991 ASSERT (Buffer != NULL);\r
ae82a30b 992 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
993\r
994 //\r
995 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
996 //\r
997 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
998 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
999\r
1000 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
1001 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
1002\r
1003 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1004 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
1005\r
1006 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
1007 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
1008\r
1009 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
1010 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
1011\r
1012 //\r
1013 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
1014 //\r
1015 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
1016 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
1017 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1018 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
1019 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
1020\r
1021 //\r
1022 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
1023 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
1024 // size for each CPU in the platform\r
1025 //\r
1026 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1027 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
1028 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
1029 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
1030 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
1031\r
1032 if (Index < mNumberOfCpus) {\r
1033 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
1034 ASSERT_EFI_ERROR (Status);\r
1035 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
1036\r
1037 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
1038 Index,\r
1039 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
1040 mCpuHotPlugData.SmBase[Index],\r
1041 gSmmCpuPrivate->CpuSaveState[Index],\r
1042 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
1043 ));\r
1044 } else {\r
1045 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
1046 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
1047 }\r
1048 }\r
1049\r
1050 //\r
1051 // Allocate SMI stacks for all processors.\r
1052 //\r
1053 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1054 //\r
1055 // 2 more pages is allocated for each processor.\r
1056 // one is guard page and the other is known good stack.\r
1057 //\r
1058 // +-------------------------------------------+-----+-------------------------------------------+\r
1059 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
1060 // +-------------------------------------------+-----+-------------------------------------------+\r
1061 // | | | |\r
1062 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
1063 //\r
1064 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
1065 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
1066 ASSERT (Stacks != NULL);\r
1067 mSmmStackArrayBase = (UINTN)Stacks;\r
1068 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
1069 } else {\r
1070 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
1071 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
1072 ASSERT (Stacks != NULL);\r
1073 }\r
1074\r
1075 //\r
1076 // Set SMI stack for SMM base relocation\r
1077 //\r
1078 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
1079\r
1080 //\r
1081 // Initialize IDT\r
1082 //\r
1083 InitializeSmmIdt ();\r
1084\r
1085 //\r
1086 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
1087 //\r
1088 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
1089 ASSERT (mRebased != NULL);\r
1090 SmmRelocateBases ();\r
1091\r
1092 //\r
1093 // Call hook for BSP to perform extra actions in normal mode after all\r
1094 // SMM base addresses have been relocated on all CPUs\r
1095 //\r
1096 SmmCpuFeaturesSmmRelocationComplete ();\r
1097\r
1098 //\r
1099 // SMM Time initialization\r
1100 //\r
1101 InitializeSmmTimer ();\r
1102\r
1103 //\r
1104 // Initialize MP globals\r
1105 //\r
1106 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
1107\r
1108 //\r
1109 // Fill in SMM Reserved Regions\r
1110 //\r
1111 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1112 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1113\r
1114 //\r
1115 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1116 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1117 // to an SMRAM address will be present in the handle database\r
1118 //\r
1119 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1120 &gSmmCpuPrivate->SmmCpuHandle,\r
1121 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
1122 NULL\r
1123 );\r
1124 ASSERT_EFI_ERROR (Status);\r
1125\r
1126 //\r
1127 // Install the SMM CPU Protocol into SMM protocol database\r
1128 //\r
1129 Status = gSmst->SmmInstallProtocolInterface (\r
1130 &mSmmCpuHandle,\r
1131 &gEfiSmmCpuProtocolGuid,\r
1132 EFI_NATIVE_INTERFACE,\r
1133 &mSmmCpu\r
1134 );\r
1135 ASSERT_EFI_ERROR (Status);\r
1136\r
1137 //\r
1138 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1139 //\r
1140 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1141 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1142 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1143 }\r
1144\r
1145 //\r
1146 // Initialize SMM CPU Services Support\r
1147 //\r
1148 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1149 ASSERT_EFI_ERROR (Status);\r
1150\r
529a5a86
MK
1151 //\r
1152 // register SMM Ready To Lock Protocol notification\r
1153 //\r
1154 Status = gSmst->SmmRegisterProtocolNotify (\r
1155 &gEfiSmmReadyToLockProtocolGuid,\r
1156 SmmReadyToLockEventNotify,\r
1157 &Registration\r
1158 );\r
1159 ASSERT_EFI_ERROR (Status);\r
1160\r
1161 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
1162 if (GuidHob != NULL) {\r
1163 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
1164\r
1165 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
1166 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
1167\r
1168 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
1169 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
1170\r
1171 mSmmS3ResumeState = SmmS3ResumeState;\r
1172 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
1173\r
1174 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
1175\r
1176 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
1177 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
1178 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
1179 SmmS3ResumeState->SmmS3StackSize = 0;\r
1180 }\r
1181\r
1182 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
1183 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
1184 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
1185\r
1186 if (sizeof (UINTN) == sizeof (UINT64)) {\r
1187 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
1188 }\r
1189 if (sizeof (UINTN) == sizeof (UINT32)) {\r
1190 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
1191 }\r
1192 }\r
1193\r
1194 //\r
1195 // Check XD and BTS features\r
1196 //\r
1197 CheckProcessorFeature ();\r
1198\r
1199 //\r
1200 // Initialize SMM Profile feature\r
1201 //\r
1202 InitSmmProfile (Cr3);\r
1203\r
1204 //\r
1205 // Patch SmmS3ResumeState->SmmS3Cr3\r
1206 //\r
1207 InitSmmS3Cr3 ();\r
1208\r
1209 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1210\r
1211 return EFI_SUCCESS;\r
1212}\r
1213\r
1214/**\r
1215\r
1216 Find out SMRAM information including SMRR base and SMRR size.\r
1217\r
1218 @param SmrrBase SMRR base\r
1219 @param SmrrSize SMRR size\r
1220\r
1221**/\r
1222VOID\r
1223FindSmramInfo (\r
1224 OUT UINT32 *SmrrBase,\r
1225 OUT UINT32 *SmrrSize\r
1226 )\r
1227{\r
1228 EFI_STATUS Status;\r
1229 UINTN Size;\r
1230 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1231 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1232 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
1233 UINTN SmramRangeCount;\r
1234 UINTN Index;\r
1235 UINT64 MaxSize;\r
1236 BOOLEAN Found;\r
1237\r
1238 //\r
1239 // Get SMM Access Protocol\r
1240 //\r
1241 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1242 ASSERT_EFI_ERROR (Status);\r
1243\r
1244 //\r
1245 // Get SMRAM information\r
1246 //\r
1247 Size = 0;\r
1248 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1249 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1250\r
1251 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1252 ASSERT (SmramRanges != NULL);\r
1253\r
1254 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
1255 ASSERT_EFI_ERROR (Status);\r
1256\r
1257 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1258\r
1259 //\r
1260 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1261 //\r
1262 CurrentSmramRange = NULL;\r
1263 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
1264 //\r
1265 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1266 //\r
1267 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1268 continue;\r
1269 }\r
1270\r
1271 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1272 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1273 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1274 MaxSize = SmramRanges[Index].PhysicalSize;\r
1275 CurrentSmramRange = &SmramRanges[Index];\r
1276 }\r
1277 }\r
1278 }\r
1279 }\r
1280\r
1281 ASSERT (CurrentSmramRange != NULL);\r
1282\r
1283 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1284 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1285\r
1286 do {\r
1287 Found = FALSE;\r
1288 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1289 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1290 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1291 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1292 Found = TRUE;\r
1293 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1294 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1295 Found = TRUE;\r
1296 }\r
1297 }\r
1298 } while (Found);\r
1299\r
1300 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1301}\r
1302\r
1303/**\r
1304Configure SMM Code Access Check feature on an AP.\r
1305SMM Feature Control MSR will be locked after configuration.\r
1306\r
1307@param[in,out] Buffer Pointer to private data buffer.\r
1308**/\r
1309VOID\r
1310EFIAPI\r
1311ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1312 IN OUT VOID *Buffer\r
1313 )\r
1314{\r
1315 UINTN CpuIndex;\r
1316 UINT64 SmmFeatureControlMsr;\r
1317 UINT64 NewSmmFeatureControlMsr;\r
1318\r
1319 //\r
1320 // Retrieve the CPU Index from the context passed in\r
1321 //\r
1322 CpuIndex = *(UINTN *)Buffer;\r
1323\r
1324 //\r
1325 // Get the current SMM Feature Control MSR value\r
1326 //\r
1327 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1328\r
1329 //\r
1330 // Compute the new SMM Feature Control MSR value\r
1331 //\r
1332 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1333 if (mSmmCodeAccessCheckEnable) {\r
1334 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1335 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1336 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1337 }\r
529a5a86
MK
1338 }\r
1339\r
1340 //\r
1341 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1342 //\r
1343 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1344 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1345 }\r
1346\r
1347 //\r
1348 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1349 //\r
fe3a75bc 1350 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1351}\r
1352\r
1353/**\r
1354Configure SMM Code Access Check feature for all processors.\r
1355SMM Feature Control MSR will be locked after configuration.\r
1356**/\r
1357VOID\r
1358ConfigSmmCodeAccessCheck (\r
1359 VOID\r
1360 )\r
1361{\r
1362 UINTN Index;\r
1363 EFI_STATUS Status;\r
1364\r
1365 //\r
1366 // Check to see if the Feature Control MSR is supported on this CPU\r
1367 //\r
f6b0cb17 1368 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1369 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1370 mSmmCodeAccessCheckEnable = FALSE;\r
1371 return;\r
1372 }\r
1373\r
1374 //\r
1375 // Check to see if the CPU supports the SMM Code Access Check feature\r
1376 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1377 //\r
1378 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1379 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1380 return;\r
1381 }\r
1382\r
1383 //\r
1384 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1385 //\r
fe3a75bc 1386 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1387\r
1388 //\r
1389 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1390 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1391 //\r
fe3a75bc 1392 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1393\r
1394 //\r
1395 // Enable SMM Code Access Check feature on the BSP.\r
1396 //\r
1397 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1398\r
1399 //\r
1400 // Enable SMM Code Access Check feature for the APs.\r
1401 //\r
1402 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1403 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
529a5a86
MK
1404\r
1405 //\r
1406 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1407 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1408 //\r
fe3a75bc 1409 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1410\r
1411 //\r
1412 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1413 //\r
1414 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1415 ASSERT_EFI_ERROR (Status);\r
1416\r
1417 //\r
1418 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1419 //\r
fe3a75bc 1420 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1421 CpuPause ();\r
1422 }\r
1423\r
1424 //\r
1425 // Release the Config SMM Code Access Check spin lock.\r
1426 //\r
fe3a75bc 1427 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1428 }\r
1429 }\r
1430}\r
1431\r
21c17193
JY
1432/**\r
1433 This API provides a way to allocate memory for page table.\r
1434\r
1435 This API can be called more once to allocate memory for page tables.\r
1436\r
1437 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1438 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1439 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1440 returned.\r
1441\r
1442 @param Pages The number of 4 KB pages to allocate.\r
1443\r
1444 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1445\r
1446**/\r
1447VOID *\r
1448AllocatePageTableMemory (\r
1449 IN UINTN Pages\r
1450 )\r
1451{\r
1452 VOID *Buffer;\r
1453\r
1454 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1455 if (Buffer != NULL) {\r
1456 return Buffer;\r
1457 }\r
1458 return AllocatePages (Pages);\r
1459}\r
1460\r
529a5a86
MK
1461/**\r
1462 Perform the remaining tasks.\r
1463\r
1464**/\r
1465VOID\r
1466PerformRemainingTasks (\r
1467 VOID\r
1468 )\r
1469{\r
1470 if (mSmmReadyToLock) {\r
1471 //\r
1472 // Start SMM Profile feature\r
1473 //\r
1474 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1475 SmmProfileStart ();\r
1476 }\r
1477 //\r
1478 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1479 //\r
1480 InitPaging ();\r
1481 //\r
1482 // Configure SMM Code Access Check feature if available.\r
1483 //\r
1484 ConfigSmmCodeAccessCheck ();\r
1485\r
21c17193
JY
1486 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1487\r
529a5a86
MK
1488 //\r
1489 // Clean SMM ready to lock flag\r
1490 //\r
1491 mSmmReadyToLock = FALSE;\r
1492 }\r
1493}\r
9f419739
JY
1494\r
1495/**\r
1496 Perform the pre tasks.\r
1497\r
1498**/\r
1499VOID\r
1500PerformPreTasks (\r
1501 VOID\r
1502 )\r
1503{\r
1504 //\r
1505 // Restore SMM Configuration in S3 boot path.\r
1506 //\r
1507 if (mRestoreSmmConfigurationInS3) {\r
60113811
MK
1508 //\r
1509 // Need make sure gSmst is correct because below function may use them.\r
1510 //\r
1511 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
1512 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
1513 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1514 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
1515 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
1516\r
9f419739
JY
1517 //\r
1518 // Configure SMM Code Access Check feature if available.\r
1519 //\r
1520 ConfigSmmCodeAccessCheck ();\r
1521\r
21c17193
JY
1522 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1523\r
9f419739
JY
1524 mRestoreSmmConfigurationInS3 = FALSE;\r
1525 }\r
1526}\r