]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Allocate buffer for each CPU semaphores
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
19// along its supporting fields.\r
20//\r
21SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
23 NULL, // SmmCpuHandle\r
24 NULL, // Pointer to ProcessorInfo array\r
25 NULL, // Pointer to Operation array\r
26 NULL, // Pointer to CpuSaveStateSize array\r
27 NULL, // Pointer to CpuSaveState array\r
28 { {0} }, // SmmReservedSmramRegion\r
29 {\r
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
32 0, // SmmCoreEntryContext.NumberOfCpus\r
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
34 NULL // SmmCoreEntryContext.CpuSaveState\r
35 },\r
36 NULL, // SmmCoreEntry\r
37 {\r
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
40 },\r
41};\r
42\r
43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
77EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
78\r
529a5a86
MK
79//\r
80// SMM stack information\r
81//\r
82UINTN mSmmStackArrayBase;\r
83UINTN mSmmStackArrayEnd;\r
84UINTN mSmmStackSize;\r
85\r
86//\r
87// Pointer to structure used during S3 Resume\r
88//\r
89SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
90\r
91UINTN mMaxNumberOfCpus = 1;\r
92UINTN mNumberOfCpus = 1;\r
93\r
94//\r
95// SMM ready to lock flag\r
96//\r
97BOOLEAN mSmmReadyToLock = FALSE;\r
98\r
99//\r
100// Global used to cache PCD for SMM Code Access Check enable\r
101//\r
102BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
103\r
104//\r
105// Spin lock used to serialize setting of SMM Code Access Check feature\r
106//\r
fe3a75bc 107SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86
MK
108\r
109/**\r
110 Initialize IDT to setup exception handlers for SMM.\r
111\r
112**/\r
113VOID\r
114InitializeSmmIdt (\r
115 VOID\r
116 )\r
117{\r
118 EFI_STATUS Status;\r
119 BOOLEAN InterruptState;\r
120 IA32_DESCRIPTOR DxeIdtr;\r
121 //\r
122 // Disable Interrupt and save DXE IDT table\r
123 //\r
124 InterruptState = SaveAndDisableInterrupts ();\r
125 AsmReadIdtr (&DxeIdtr);\r
126 //\r
127 // Load SMM temporary IDT table\r
128 //\r
129 AsmWriteIdtr (&gcSmiIdtr);\r
130 //\r
131 // Setup SMM default exception handlers, SMM IDT table\r
132 // will be updated and saved in gcSmiIdtr\r
133 //\r
134 Status = InitializeCpuExceptionHandlers (NULL);\r
135 ASSERT_EFI_ERROR (Status);\r
136 //\r
137 // Restore DXE IDT table and CPU interrupt\r
138 //\r
139 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
140 SetInterruptState (InterruptState);\r
141}\r
142\r
143/**\r
144 Search module name by input IP address and output it.\r
145\r
146 @param CallerIpAddress Caller instruction pointer.\r
147\r
148**/\r
149VOID\r
150DumpModuleInfoByIp (\r
151 IN UINTN CallerIpAddress\r
152 )\r
153{\r
154 UINTN Pe32Data;\r
155 EFI_IMAGE_DOS_HEADER *DosHdr;\r
156 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
157 VOID *PdbPointer;\r
158 UINT64 DumpIpAddress;\r
159\r
160 //\r
161 // Find Image Base\r
162 //\r
163 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
164 while (Pe32Data != 0) {\r
165 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
166 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
167 //\r
168 // DOS image header is present, so read the PE header after the DOS image header.\r
169 //\r
170 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
171 //\r
172 // Make sure PE header address does not overflow and is less than the initial address.\r
173 //\r
174 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
175 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
176 //\r
177 // It's PE image.\r
178 //\r
179 break;\r
180 }\r
181 }\r
182 }\r
183\r
184 //\r
185 // Not found the image base, check the previous aligned address\r
186 //\r
187 Pe32Data -= SIZE_4KB;\r
188 }\r
189\r
190 DumpIpAddress = CallerIpAddress;\r
191 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
192\r
193 if (Pe32Data != 0) {\r
194 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
195 if (PdbPointer != NULL) {\r
196 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
197 }\r
198 }\r
199}\r
200\r
201/**\r
202 Read information from the CPU save state.\r
203\r
204 @param This EFI_SMM_CPU_PROTOCOL instance\r
205 @param Width The number of bytes to read from the CPU save state.\r
206 @param Register Specifies the CPU register to read form the save state.\r
207 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
208 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
209\r
210 @retval EFI_SUCCESS The register was read from Save State\r
211 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
212 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
213\r
214**/\r
215EFI_STATUS\r
216EFIAPI\r
217SmmReadSaveState (\r
218 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
219 IN UINTN Width,\r
220 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
221 IN UINTN CpuIndex,\r
222 OUT VOID *Buffer\r
223 )\r
224{\r
225 EFI_STATUS Status;\r
226\r
227 //\r
228 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
229 //\r
230 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
231 return EFI_INVALID_PARAMETER;\r
232 }\r
233\r
234 //\r
235 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
236 //\r
237 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
238 //\r
239 // The pseudo-register only supports the 64-bit size specified by Width.\r
240 //\r
241 if (Width != sizeof (UINT64)) {\r
242 return EFI_INVALID_PARAMETER;\r
243 }\r
244 //\r
245 // If the processor is in SMM at the time the SMI occurred,\r
246 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
247 // Otherwise, EFI_NOT_FOUND is returned.\r
248 //\r
249 if (mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
250 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
251 return EFI_SUCCESS;\r
252 } else {\r
253 return EFI_NOT_FOUND;\r
254 }\r
255 }\r
256\r
257 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
258 return EFI_INVALID_PARAMETER;\r
259 }\r
260\r
261 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
262 if (Status == EFI_UNSUPPORTED) {\r
263 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
264 }\r
265 return Status;\r
266}\r
267\r
268/**\r
269 Write data to the CPU save state.\r
270\r
271 @param This EFI_SMM_CPU_PROTOCOL instance\r
272 @param Width The number of bytes to read from the CPU save state.\r
273 @param Register Specifies the CPU register to write to the save state.\r
274 @param CpuIndex Specifies the zero-based index of the CPU save state\r
275 @param Buffer Upon entry, this holds the new CPU register value.\r
276\r
277 @retval EFI_SUCCESS The register was written from Save State\r
278 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
279 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
280\r
281**/\r
282EFI_STATUS\r
283EFIAPI\r
284SmmWriteSaveState (\r
285 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
286 IN UINTN Width,\r
287 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
288 IN UINTN CpuIndex,\r
289 IN CONST VOID *Buffer\r
290 )\r
291{\r
292 EFI_STATUS Status;\r
293\r
294 //\r
295 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
296 //\r
297 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
298 return EFI_INVALID_PARAMETER;\r
299 }\r
300\r
301 //\r
302 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
303 //\r
304 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
305 return EFI_SUCCESS;\r
306 }\r
307\r
308 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
309 return EFI_INVALID_PARAMETER;\r
310 }\r
311\r
312 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
313 if (Status == EFI_UNSUPPORTED) {\r
314 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
315 }\r
316 return Status;\r
317}\r
318\r
319\r
320/**\r
321 C function for SMI handler. To change all processor's SMMBase Register.\r
322\r
323**/\r
324VOID\r
325EFIAPI\r
326SmmInitHandler (\r
327 VOID\r
328 )\r
329{\r
330 UINT32 ApicId;\r
331 UINTN Index;\r
332\r
333 //\r
334 // Update SMM IDT entries' code segment and load IDT\r
335 //\r
336 AsmWriteIdtr (&gcSmiIdtr);\r
337 ApicId = GetApicId ();\r
338\r
339 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
340\r
341 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
342 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
343 //\r
344 // Initialize SMM specific features on the currently executing CPU\r
345 //\r
346 SmmCpuFeaturesInitializeProcessor (\r
347 Index,\r
348 mIsBsp,\r
349 gSmmCpuPrivate->ProcessorInfo,\r
350 &mCpuHotPlugData\r
351 );\r
352\r
353 if (mIsBsp) {\r
354 //\r
355 // BSP rebase is already done above.\r
356 // Initialize private data during S3 resume\r
357 //\r
358 InitializeMpSyncData ();\r
359 }\r
360\r
361 //\r
362 // Hook return after RSM to set SMM re-based flag\r
363 //\r
364 SemaphoreHook (Index, &mRebased[Index]);\r
365\r
366 return;\r
367 }\r
368 }\r
369 ASSERT (FALSE);\r
370}\r
371\r
372/**\r
373 Relocate SmmBases for each processor.\r
374\r
375 Execute on first boot and all S3 resumes\r
376\r
377**/\r
378VOID\r
379EFIAPI\r
380SmmRelocateBases (\r
381 VOID\r
382 )\r
383{\r
384 UINT8 BakBuf[BACK_BUF_SIZE];\r
385 SMRAM_SAVE_STATE_MAP BakBuf2;\r
386 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
387 UINT8 *U8Ptr;\r
388 UINT32 ApicId;\r
389 UINTN Index;\r
390 UINTN BspIndex;\r
391\r
392 //\r
393 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
394 //\r
395 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
396\r
397 //\r
398 // Patch ASM code template with current CR0, CR3, and CR4 values\r
399 //\r
400 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
401 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
402 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
403\r
404 //\r
405 // Patch GDTR for SMM base relocation\r
406 //\r
407 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
408 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
409\r
410 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
411 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
412\r
413 //\r
414 // Backup original contents at address 0x38000\r
415 //\r
416 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
417 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
418\r
419 //\r
420 // Load image for relocation\r
421 //\r
422 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
423\r
424 //\r
425 // Retrieve the local APIC ID of current processor\r
426 //\r
427 ApicId = GetApicId ();\r
428\r
429 //\r
430 // Relocate SM bases for all APs\r
431 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
432 //\r
433 mIsBsp = FALSE;\r
434 BspIndex = (UINTN)-1;\r
435 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
436 mRebased[Index] = FALSE;\r
437 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
438 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
439 //\r
440 // Wait for this AP to finish its 1st SMI\r
441 //\r
442 while (!mRebased[Index]);\r
443 } else {\r
444 //\r
445 // BSP will be Relocated later\r
446 //\r
447 BspIndex = Index;\r
448 }\r
449 }\r
450\r
451 //\r
452 // Relocate BSP's SMM base\r
453 //\r
454 ASSERT (BspIndex != (UINTN)-1);\r
455 mIsBsp = TRUE;\r
456 SendSmiIpi (ApicId);\r
457 //\r
458 // Wait for the BSP to finish its 1st SMI\r
459 //\r
460 while (!mRebased[BspIndex]);\r
461\r
462 //\r
463 // Restore contents at address 0x38000\r
464 //\r
465 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
466 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
467}\r
468\r
469/**\r
470 Perform SMM initialization for all processors in the S3 boot path.\r
471\r
472 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
473**/\r
474VOID\r
475EFIAPI\r
476SmmRestoreCpu (\r
477 VOID\r
478 )\r
479{\r
480 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
481 IA32_DESCRIPTOR Ia32Idtr;\r
482 IA32_DESCRIPTOR X64Idtr;\r
483 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
484 EFI_STATUS Status;\r
485\r
486 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
487\r
488 //\r
489 // See if there is enough context to resume PEI Phase\r
490 //\r
491 if (mSmmS3ResumeState == NULL) {\r
492 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
493 CpuDeadLoop ();\r
494 }\r
495\r
496 SmmS3ResumeState = mSmmS3ResumeState;\r
497 ASSERT (SmmS3ResumeState != NULL);\r
498\r
499 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
500 //\r
501 // Save the IA32 IDT Descriptor\r
502 //\r
503 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
504\r
505 //\r
506 // Setup X64 IDT table\r
507 //\r
508 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
509 X64Idtr.Base = (UINTN) IdtEntryTable;\r
510 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
511 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
512\r
513 //\r
514 // Setup the default exception handler\r
515 //\r
516 Status = InitializeCpuExceptionHandlers (NULL);\r
517 ASSERT_EFI_ERROR (Status);\r
518\r
519 //\r
520 // Initialize Debug Agent to support source level debug\r
521 //\r
522 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
523 }\r
524\r
525 //\r
c2e5e70a 526 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 527 //\r
c2e5e70a 528 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 529 //\r
c2e5e70a 530 // First time microcode load and restore MTRRs\r
529a5a86 531 //\r
c2e5e70a 532 EarlyInitializeCpu ();\r
529a5a86
MK
533 }\r
534\r
535 //\r
536 // Restore SMBASE for BSP and all APs\r
537 //\r
538 SmmRelocateBases ();\r
539\r
540 //\r
c2e5e70a 541 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 542 //\r
c2e5e70a 543 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 544 //\r
c2e5e70a 545 // Restore MSRs for BSP and all APs\r
529a5a86 546 //\r
c2e5e70a 547 InitializeCpu ();\r
529a5a86
MK
548 }\r
549\r
550 //\r
551 // Set a flag to restore SMM configuration in S3 path.\r
552 //\r
553 mRestoreSmmConfigurationInS3 = TRUE;\r
554\r
555 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
556 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
557 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
558 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
559 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
560\r
561 //\r
562 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
563 //\r
564 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
565 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
566\r
567 SwitchStack (\r
568 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
569 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
570 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
571 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
572 );\r
573 }\r
574\r
575 //\r
576 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
577 //\r
578 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
579 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
580 //\r
581 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
582 //\r
583 SaveAndSetDebugTimerInterrupt (FALSE);\r
584 //\r
585 // Restore IA32 IDT table\r
586 //\r
587 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
588 AsmDisablePaging64 (\r
589 SmmS3ResumeState->ReturnCs,\r
590 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
591 (UINT32)SmmS3ResumeState->ReturnContext1,\r
592 (UINT32)SmmS3ResumeState->ReturnContext2,\r
593 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
594 );\r
595 }\r
596\r
597 //\r
598 // Can not resume PEI Phase\r
599 //\r
600 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
601 CpuDeadLoop ();\r
602}\r
603\r
604/**\r
605 Copy register table from ACPI NVS memory into SMRAM.\r
606\r
607 @param[in] DestinationRegisterTableList Points to destination register table.\r
608 @param[in] SourceRegisterTableList Points to source register table.\r
609 @param[in] NumberOfCpus Number of CPUs.\r
610\r
611**/\r
612VOID\r
613CopyRegisterTable (\r
614 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
615 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
616 IN UINT32 NumberOfCpus\r
617 )\r
618{\r
619 UINTN Index;\r
620 UINTN Index1;\r
621 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
622\r
623 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
624 for (Index = 0; Index < NumberOfCpus; Index++) {\r
625 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
626 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
627 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
628 //\r
629 // Go though all MSRs in register table to initialize MSR spin lock\r
630 //\r
631 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
632 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
633 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
634 //\r
635 // Initialize MSR spin lock only for those MSRs need bit field writing\r
636 //\r
637 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
638 }\r
639 }\r
640 }\r
641}\r
642\r
643/**\r
644 SMM Ready To Lock event notification handler.\r
645\r
646 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
647 perform additional lock actions that must be performed from SMM on the next SMI.\r
648\r
649 @param[in] Protocol Points to the protocol's unique identifier.\r
650 @param[in] Interface Points to the interface instance.\r
651 @param[in] Handle The handle on which the interface was installed.\r
652\r
653 @retval EFI_SUCCESS Notification handler runs successfully.\r
654 **/\r
655EFI_STATUS\r
656EFIAPI\r
657SmmReadyToLockEventNotify (\r
658 IN CONST EFI_GUID *Protocol,\r
659 IN VOID *Interface,\r
660 IN EFI_HANDLE Handle\r
661 )\r
662{\r
663 ACPI_CPU_DATA *AcpiCpuData;\r
664 IA32_DESCRIPTOR *Gdtr;\r
665 IA32_DESCRIPTOR *Idtr;\r
666\r
667 //\r
668 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
669 //\r
670 mAcpiCpuData.NumberOfCpus = 0;\r
671\r
529a5a86
MK
672 //\r
673 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
674 //\r
675 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
676 if (AcpiCpuData == 0) {\r
677 goto Done;\r
678 }\r
679\r
680 //\r
681 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
682 //\r
683 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
684\r
685 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
686 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
687\r
688 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
689\r
690 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
691 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
692\r
693 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
694\r
695 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
696 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
697\r
698 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
699\r
700 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
701 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
702\r
703 CopyRegisterTable (\r
704 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
705 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
706 mAcpiCpuData.NumberOfCpus\r
707 );\r
708\r
709 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
710 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
711\r
712 CopyRegisterTable (\r
713 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
714 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
715 mAcpiCpuData.NumberOfCpus\r
716 );\r
717\r
718 //\r
719 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
720 //\r
721 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
722 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
723\r
724 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
725 ASSERT (mGdtForAp != NULL);\r
726 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
727 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
728\r
729 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
730 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
731 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
732\r
733Done:\r
734 //\r
735 // Set SMM ready to lock flag and return\r
736 //\r
737 mSmmReadyToLock = TRUE;\r
738 return EFI_SUCCESS;\r
739}\r
740\r
741/**\r
742 The module Entry Point of the CPU SMM driver.\r
743\r
744 @param ImageHandle The firmware allocated handle for the EFI image.\r
745 @param SystemTable A pointer to the EFI System Table.\r
746\r
747 @retval EFI_SUCCESS The entry point is executed successfully.\r
748 @retval Other Some error occurs when executing this entry point.\r
749\r
750**/\r
751EFI_STATUS\r
752EFIAPI\r
753PiCpuSmmEntry (\r
754 IN EFI_HANDLE ImageHandle,\r
755 IN EFI_SYSTEM_TABLE *SystemTable\r
756 )\r
757{\r
758 EFI_STATUS Status;\r
759 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
760 UINTN NumberOfEnabledProcessors;\r
761 UINTN Index;\r
762 VOID *Buffer;\r
ae82a30b
JY
763 UINTN BufferPages;\r
764 UINTN TileCodeSize;\r
765 UINTN TileDataSize;\r
529a5a86
MK
766 UINTN TileSize;\r
767 VOID *GuidHob;\r
768 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
769 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
770 UINT8 *Stacks;\r
771 VOID *Registration;\r
772 UINT32 RegEax;\r
773 UINT32 RegEdx;\r
774 UINTN FamilyId;\r
775 UINTN ModelId;\r
776 UINT32 Cr3;\r
777\r
778 //\r
779 // Initialize Debug Agent to support source level debug in SMM code\r
780 //\r
781 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
782\r
783 //\r
784 // Report the start of CPU SMM initialization.\r
785 //\r
786 REPORT_STATUS_CODE (\r
787 EFI_PROGRESS_CODE,\r
788 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
789 );\r
790\r
791 //\r
792 // Fix segment address of the long-mode-switch jump\r
793 //\r
794 if (sizeof (UINTN) == sizeof (UINT64)) {\r
795 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
796 }\r
797\r
798 //\r
799 // Find out SMRR Base and SMRR Size\r
800 //\r
801 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
802\r
803 //\r
804 // Get MP Services Protocol\r
805 //\r
806 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
807 ASSERT_EFI_ERROR (Status);\r
808\r
809 //\r
810 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
811 //\r
812 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
813 ASSERT_EFI_ERROR (Status);\r
814 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
815\r
816 //\r
817 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
818 // A constant BSP index makes no sense because it may be hot removed.\r
819 //\r
820 DEBUG_CODE (\r
821 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
822\r
823 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
824 }\r
825 );\r
826\r
827 //\r
828 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
829 //\r
830 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
831 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
832\r
833 //\r
834 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
835 //\r
836 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
837 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
838 } else {\r
839 mMaxNumberOfCpus = mNumberOfCpus;\r
840 }\r
841 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
842\r
843 //\r
844 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
845 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
846 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
847 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
848 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
849 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
850 // by adding the // CPU save state size, any extra CPU specific context, and\r
851 // the size of code that must be placed at the SMI entry point to transfer\r
852 // control to a C function in the native SMM execution mode. This size is\r
853 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
854 // The total amount of memory required is the maximum number of CPUs that\r
855 // platform supports times the tile size. The picture below shows the tiling,\r
856 // where m is the number of tiles that fit in 32KB.\r
857 //\r
858 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
859 // | CPU m+1 Save State |\r
860 // +-----------------------------+\r
861 // | CPU m+1 Extra Data |\r
862 // +-----------------------------+\r
863 // | Padding |\r
864 // +-----------------------------+\r
865 // | CPU 2m SMI Entry |\r
866 // +#############################+ <-- Base of allocated buffer + 64 KB\r
867 // | CPU m-1 Save State |\r
868 // +-----------------------------+\r
869 // | CPU m-1 Extra Data |\r
870 // +-----------------------------+\r
871 // | Padding |\r
872 // +-----------------------------+\r
873 // | CPU 2m-1 SMI Entry |\r
874 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
875 // | . . . . . . . . . . . . |\r
876 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
877 // | CPU 2 Save State |\r
878 // +-----------------------------+\r
879 // | CPU 2 Extra Data |\r
880 // +-----------------------------+\r
881 // | Padding |\r
882 // +-----------------------------+\r
883 // | CPU m+1 SMI Entry |\r
884 // +=============================+ <-- Base of allocated buffer + 32 KB\r
885 // | CPU 1 Save State |\r
886 // +-----------------------------+\r
887 // | CPU 1 Extra Data |\r
888 // +-----------------------------+\r
889 // | Padding |\r
890 // +-----------------------------+\r
891 // | CPU m SMI Entry |\r
892 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
893 // | CPU 0 Save State |\r
894 // +-----------------------------+\r
895 // | CPU 0 Extra Data |\r
896 // +-----------------------------+\r
897 // | Padding |\r
898 // +-----------------------------+\r
899 // | CPU m-1 SMI Entry |\r
900 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
901 // | . . . . . . . . . . . . |\r
902 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
903 // | Padding |\r
904 // +-----------------------------+\r
905 // | CPU 1 SMI Entry |\r
906 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
907 // | Padding |\r
908 // +-----------------------------+\r
909 // | CPU 0 SMI Entry |\r
910 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
911 //\r
912\r
913 //\r
914 // Retrieve CPU Family\r
915 //\r
e9b3a6c9 916 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
917 FamilyId = (RegEax >> 8) & 0xf;\r
918 ModelId = (RegEax >> 4) & 0xf;\r
919 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
920 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
921 }\r
922\r
e9b3a6c9
MK
923 RegEdx = 0;\r
924 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
925 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
926 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
927 }\r
529a5a86
MK
928 //\r
929 // Determine the mode of the CPU at the time an SMI occurs\r
930 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
931 // Volume 3C, Section 34.4.1.1\r
932 //\r
933 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
934 if ((RegEdx & BIT29) != 0) {\r
935 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
936 }\r
937 if (FamilyId == 0x06) {\r
938 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
939 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
940 }\r
941 }\r
942\r
943 //\r
944 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
945 // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
946 // is rounded up to nearest power of 2.\r
947 //\r
ae82a30b
JY
948 TileCodeSize = GetSmiHandlerSize ();\r
949 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
950 TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR);\r
951 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
952 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 953 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 954 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
955\r
956 //\r
957 // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
958 // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
959 // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
960 // reduced.\r
961 //\r
962 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
963\r
964 //\r
965 // Allocate buffer for all of the tiles.\r
966 //\r
967 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
968 // Volume 3C, Section 34.11 SMBASE Relocation\r
969 // For Pentium and Intel486 processors, the SMBASE values must be\r
970 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
971 // state during the execution of a RSM instruction.\r
972 //\r
973 // Intel486 processors: FamilyId is 4\r
974 // Pentium processors : FamilyId is 5\r
975 //\r
ae82a30b 976 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 977 if ((FamilyId == 4) || (FamilyId == 5)) {\r
21c17193 978 Buffer = AllocateAlignedPages (BufferPages, SIZE_32KB);\r
529a5a86 979 } else {\r
21c17193 980 Buffer = AllocateAlignedPages (BufferPages, SIZE_4KB);\r
529a5a86
MK
981 }\r
982 ASSERT (Buffer != NULL);\r
ae82a30b 983 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
984\r
985 //\r
986 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
987 //\r
988 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
989 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
990\r
991 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
992 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
993\r
994 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
995 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
996\r
997 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
998 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
999\r
1000 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
1001 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
1002\r
1003 //\r
1004 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
1005 //\r
1006 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
1007 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
1008 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1009 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
1010 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
1011\r
1012 //\r
1013 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
1014 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
1015 // size for each CPU in the platform\r
1016 //\r
1017 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1018 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
1019 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
1020 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
1021 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
1022\r
1023 if (Index < mNumberOfCpus) {\r
1024 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
1025 ASSERT_EFI_ERROR (Status);\r
1026 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
1027\r
1028 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
1029 Index,\r
1030 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
1031 mCpuHotPlugData.SmBase[Index],\r
1032 gSmmCpuPrivate->CpuSaveState[Index],\r
1033 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
1034 ));\r
1035 } else {\r
1036 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
1037 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
1038 }\r
1039 }\r
1040\r
1041 //\r
1042 // Allocate SMI stacks for all processors.\r
1043 //\r
1044 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1045 //\r
1046 // 2 more pages is allocated for each processor.\r
1047 // one is guard page and the other is known good stack.\r
1048 //\r
1049 // +-------------------------------------------+-----+-------------------------------------------+\r
1050 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
1051 // +-------------------------------------------+-----+-------------------------------------------+\r
1052 // | | | |\r
1053 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
1054 //\r
1055 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
1056 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
1057 ASSERT (Stacks != NULL);\r
1058 mSmmStackArrayBase = (UINTN)Stacks;\r
1059 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
1060 } else {\r
1061 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
1062 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
1063 ASSERT (Stacks != NULL);\r
1064 }\r
1065\r
1066 //\r
1067 // Set SMI stack for SMM base relocation\r
1068 //\r
1069 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
1070\r
1071 //\r
1072 // Initialize IDT\r
1073 //\r
1074 InitializeSmmIdt ();\r
1075\r
1076 //\r
1077 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
1078 //\r
1079 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
1080 ASSERT (mRebased != NULL);\r
1081 SmmRelocateBases ();\r
1082\r
1083 //\r
1084 // Call hook for BSP to perform extra actions in normal mode after all\r
1085 // SMM base addresses have been relocated on all CPUs\r
1086 //\r
1087 SmmCpuFeaturesSmmRelocationComplete ();\r
1088\r
1089 //\r
1090 // SMM Time initialization\r
1091 //\r
1092 InitializeSmmTimer ();\r
1093\r
1094 //\r
1095 // Initialize MP globals\r
1096 //\r
1097 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
1098\r
1099 //\r
1100 // Fill in SMM Reserved Regions\r
1101 //\r
1102 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1103 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1104\r
1105 //\r
1106 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1107 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1108 // to an SMRAM address will be present in the handle database\r
1109 //\r
1110 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1111 &gSmmCpuPrivate->SmmCpuHandle,\r
1112 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
1113 NULL\r
1114 );\r
1115 ASSERT_EFI_ERROR (Status);\r
1116\r
1117 //\r
1118 // Install the SMM CPU Protocol into SMM protocol database\r
1119 //\r
1120 Status = gSmst->SmmInstallProtocolInterface (\r
1121 &mSmmCpuHandle,\r
1122 &gEfiSmmCpuProtocolGuid,\r
1123 EFI_NATIVE_INTERFACE,\r
1124 &mSmmCpu\r
1125 );\r
1126 ASSERT_EFI_ERROR (Status);\r
1127\r
1128 //\r
1129 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1130 //\r
1131 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1132 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1133 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1134 }\r
1135\r
1136 //\r
1137 // Initialize SMM CPU Services Support\r
1138 //\r
1139 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1140 ASSERT_EFI_ERROR (Status);\r
1141\r
529a5a86
MK
1142 //\r
1143 // register SMM Ready To Lock Protocol notification\r
1144 //\r
1145 Status = gSmst->SmmRegisterProtocolNotify (\r
1146 &gEfiSmmReadyToLockProtocolGuid,\r
1147 SmmReadyToLockEventNotify,\r
1148 &Registration\r
1149 );\r
1150 ASSERT_EFI_ERROR (Status);\r
1151\r
1152 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
1153 if (GuidHob != NULL) {\r
1154 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
1155\r
1156 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
1157 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
1158\r
1159 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
1160 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
1161\r
1162 mSmmS3ResumeState = SmmS3ResumeState;\r
1163 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
1164\r
1165 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
1166\r
1167 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
1168 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
1169 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
1170 SmmS3ResumeState->SmmS3StackSize = 0;\r
1171 }\r
1172\r
1173 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
1174 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
1175 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
1176\r
1177 if (sizeof (UINTN) == sizeof (UINT64)) {\r
1178 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
1179 }\r
1180 if (sizeof (UINTN) == sizeof (UINT32)) {\r
1181 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
1182 }\r
1183 }\r
1184\r
1185 //\r
1186 // Check XD and BTS features\r
1187 //\r
1188 CheckProcessorFeature ();\r
1189\r
1190 //\r
1191 // Initialize SMM Profile feature\r
1192 //\r
1193 InitSmmProfile (Cr3);\r
1194\r
1195 //\r
1196 // Patch SmmS3ResumeState->SmmS3Cr3\r
1197 //\r
1198 InitSmmS3Cr3 ();\r
1199\r
1200 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1201\r
1202 return EFI_SUCCESS;\r
1203}\r
1204\r
1205/**\r
1206\r
1207 Find out SMRAM information including SMRR base and SMRR size.\r
1208\r
1209 @param SmrrBase SMRR base\r
1210 @param SmrrSize SMRR size\r
1211\r
1212**/\r
1213VOID\r
1214FindSmramInfo (\r
1215 OUT UINT32 *SmrrBase,\r
1216 OUT UINT32 *SmrrSize\r
1217 )\r
1218{\r
1219 EFI_STATUS Status;\r
1220 UINTN Size;\r
1221 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1222 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1223 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
1224 UINTN SmramRangeCount;\r
1225 UINTN Index;\r
1226 UINT64 MaxSize;\r
1227 BOOLEAN Found;\r
1228\r
1229 //\r
1230 // Get SMM Access Protocol\r
1231 //\r
1232 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1233 ASSERT_EFI_ERROR (Status);\r
1234\r
1235 //\r
1236 // Get SMRAM information\r
1237 //\r
1238 Size = 0;\r
1239 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1240 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1241\r
1242 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1243 ASSERT (SmramRanges != NULL);\r
1244\r
1245 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
1246 ASSERT_EFI_ERROR (Status);\r
1247\r
1248 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1249\r
1250 //\r
1251 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1252 //\r
1253 CurrentSmramRange = NULL;\r
1254 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
1255 //\r
1256 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1257 //\r
1258 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1259 continue;\r
1260 }\r
1261\r
1262 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1263 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1264 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1265 MaxSize = SmramRanges[Index].PhysicalSize;\r
1266 CurrentSmramRange = &SmramRanges[Index];\r
1267 }\r
1268 }\r
1269 }\r
1270 }\r
1271\r
1272 ASSERT (CurrentSmramRange != NULL);\r
1273\r
1274 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1275 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1276\r
1277 do {\r
1278 Found = FALSE;\r
1279 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1280 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1281 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1282 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1283 Found = TRUE;\r
1284 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1285 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1286 Found = TRUE;\r
1287 }\r
1288 }\r
1289 } while (Found);\r
1290\r
1291 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1292}\r
1293\r
1294/**\r
1295Configure SMM Code Access Check feature on an AP.\r
1296SMM Feature Control MSR will be locked after configuration.\r
1297\r
1298@param[in,out] Buffer Pointer to private data buffer.\r
1299**/\r
1300VOID\r
1301EFIAPI\r
1302ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1303 IN OUT VOID *Buffer\r
1304 )\r
1305{\r
1306 UINTN CpuIndex;\r
1307 UINT64 SmmFeatureControlMsr;\r
1308 UINT64 NewSmmFeatureControlMsr;\r
1309\r
1310 //\r
1311 // Retrieve the CPU Index from the context passed in\r
1312 //\r
1313 CpuIndex = *(UINTN *)Buffer;\r
1314\r
1315 //\r
1316 // Get the current SMM Feature Control MSR value\r
1317 //\r
1318 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1319\r
1320 //\r
1321 // Compute the new SMM Feature Control MSR value\r
1322 //\r
1323 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1324 if (mSmmCodeAccessCheckEnable) {\r
1325 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1326 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1327 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1328 }\r
529a5a86
MK
1329 }\r
1330\r
1331 //\r
1332 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1333 //\r
1334 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1335 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1336 }\r
1337\r
1338 //\r
1339 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1340 //\r
fe3a75bc 1341 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1342}\r
1343\r
1344/**\r
1345Configure SMM Code Access Check feature for all processors.\r
1346SMM Feature Control MSR will be locked after configuration.\r
1347**/\r
1348VOID\r
1349ConfigSmmCodeAccessCheck (\r
1350 VOID\r
1351 )\r
1352{\r
1353 UINTN Index;\r
1354 EFI_STATUS Status;\r
1355\r
1356 //\r
1357 // Check to see if the Feature Control MSR is supported on this CPU\r
1358 //\r
f6b0cb17 1359 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1360 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1361 mSmmCodeAccessCheckEnable = FALSE;\r
1362 return;\r
1363 }\r
1364\r
1365 //\r
1366 // Check to see if the CPU supports the SMM Code Access Check feature\r
1367 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1368 //\r
1369 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1370 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1371 return;\r
1372 }\r
1373\r
1374 //\r
1375 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1376 //\r
fe3a75bc 1377 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1378\r
1379 //\r
1380 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1381 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1382 //\r
fe3a75bc 1383 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1384\r
1385 //\r
1386 // Enable SMM Code Access Check feature on the BSP.\r
1387 //\r
1388 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1389\r
1390 //\r
1391 // Enable SMM Code Access Check feature for the APs.\r
1392 //\r
1393 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1394 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
529a5a86
MK
1395\r
1396 //\r
1397 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1398 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1399 //\r
fe3a75bc 1400 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1401\r
1402 //\r
1403 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1404 //\r
1405 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1406 ASSERT_EFI_ERROR (Status);\r
1407\r
1408 //\r
1409 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1410 //\r
fe3a75bc 1411 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1412 CpuPause ();\r
1413 }\r
1414\r
1415 //\r
1416 // Release the Config SMM Code Access Check spin lock.\r
1417 //\r
fe3a75bc 1418 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1419 }\r
1420 }\r
1421}\r
1422\r
21c17193
JY
1423/**\r
1424 This API provides a way to allocate memory for page table.\r
1425\r
1426 This API can be called more once to allocate memory for page tables.\r
1427\r
1428 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1429 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1430 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1431 returned.\r
1432\r
1433 @param Pages The number of 4 KB pages to allocate.\r
1434\r
1435 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1436\r
1437**/\r
1438VOID *\r
1439AllocatePageTableMemory (\r
1440 IN UINTN Pages\r
1441 )\r
1442{\r
1443 VOID *Buffer;\r
1444\r
1445 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1446 if (Buffer != NULL) {\r
1447 return Buffer;\r
1448 }\r
1449 return AllocatePages (Pages);\r
1450}\r
1451\r
529a5a86
MK
1452/**\r
1453 Perform the remaining tasks.\r
1454\r
1455**/\r
1456VOID\r
1457PerformRemainingTasks (\r
1458 VOID\r
1459 )\r
1460{\r
1461 if (mSmmReadyToLock) {\r
1462 //\r
1463 // Start SMM Profile feature\r
1464 //\r
1465 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1466 SmmProfileStart ();\r
1467 }\r
1468 //\r
1469 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1470 //\r
1471 InitPaging ();\r
1472 //\r
1473 // Configure SMM Code Access Check feature if available.\r
1474 //\r
1475 ConfigSmmCodeAccessCheck ();\r
1476\r
21c17193
JY
1477 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1478\r
529a5a86
MK
1479 //\r
1480 // Clean SMM ready to lock flag\r
1481 //\r
1482 mSmmReadyToLock = FALSE;\r
1483 }\r
1484}\r
9f419739
JY
1485\r
1486/**\r
1487 Perform the pre tasks.\r
1488\r
1489**/\r
1490VOID\r
1491PerformPreTasks (\r
1492 VOID\r
1493 )\r
1494{\r
1495 //\r
1496 // Restore SMM Configuration in S3 boot path.\r
1497 //\r
1498 if (mRestoreSmmConfigurationInS3) {\r
60113811
MK
1499 //\r
1500 // Need make sure gSmst is correct because below function may use them.\r
1501 //\r
1502 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;\r
1503 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
1504 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1505 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;\r
1506 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;\r
1507\r
9f419739
JY
1508 //\r
1509 // Configure SMM Code Access Check feature if available.\r
1510 //\r
1511 ConfigSmmCodeAccessCheck ();\r
1512\r
21c17193
JY
1513 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1514\r
9f419739
JY
1515 mRestoreSmmConfigurationInS3 = FALSE;\r
1516 }\r
1517}\r