]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
ShellPkg: Add NULL check to pointer returned from 'AllocateZeroPool'.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
4Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
19// along its supporting fields.\r
20//\r
21SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
23 NULL, // SmmCpuHandle\r
24 NULL, // Pointer to ProcessorInfo array\r
25 NULL, // Pointer to Operation array\r
26 NULL, // Pointer to CpuSaveStateSize array\r
27 NULL, // Pointer to CpuSaveState array\r
28 { {0} }, // SmmReservedSmramRegion\r
29 {\r
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
32 0, // SmmCoreEntryContext.NumberOfCpus\r
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
34 NULL // SmmCoreEntryContext.CpuSaveState\r
35 },\r
36 NULL, // SmmCoreEntry\r
37 {\r
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
40 },\r
41};\r
42\r
43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
77EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
78\r
529a5a86
MK
79//\r
80// SMM stack information\r
81//\r
82UINTN mSmmStackArrayBase;\r
83UINTN mSmmStackArrayEnd;\r
84UINTN mSmmStackSize;\r
85\r
86//\r
87// Pointer to structure used during S3 Resume\r
88//\r
89SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
90\r
91UINTN mMaxNumberOfCpus = 1;\r
92UINTN mNumberOfCpus = 1;\r
93\r
94//\r
95// SMM ready to lock flag\r
96//\r
97BOOLEAN mSmmReadyToLock = FALSE;\r
98\r
99//\r
100// Global used to cache PCD for SMM Code Access Check enable\r
101//\r
102BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
103\r
104//\r
105// Spin lock used to serialize setting of SMM Code Access Check feature\r
106//\r
107SPIN_LOCK mConfigSmmCodeAccessCheckLock;\r
108\r
109/**\r
110 Initialize IDT to setup exception handlers for SMM.\r
111\r
112**/\r
113VOID\r
114InitializeSmmIdt (\r
115 VOID\r
116 )\r
117{\r
118 EFI_STATUS Status;\r
119 BOOLEAN InterruptState;\r
120 IA32_DESCRIPTOR DxeIdtr;\r
121 //\r
122 // Disable Interrupt and save DXE IDT table\r
123 //\r
124 InterruptState = SaveAndDisableInterrupts ();\r
125 AsmReadIdtr (&DxeIdtr);\r
126 //\r
127 // Load SMM temporary IDT table\r
128 //\r
129 AsmWriteIdtr (&gcSmiIdtr);\r
130 //\r
131 // Setup SMM default exception handlers, SMM IDT table\r
132 // will be updated and saved in gcSmiIdtr\r
133 //\r
134 Status = InitializeCpuExceptionHandlers (NULL);\r
135 ASSERT_EFI_ERROR (Status);\r
136 //\r
137 // Restore DXE IDT table and CPU interrupt\r
138 //\r
139 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
140 SetInterruptState (InterruptState);\r
141}\r
142\r
143/**\r
144 Search module name by input IP address and output it.\r
145\r
146 @param CallerIpAddress Caller instruction pointer.\r
147\r
148**/\r
149VOID\r
150DumpModuleInfoByIp (\r
151 IN UINTN CallerIpAddress\r
152 )\r
153{\r
154 UINTN Pe32Data;\r
155 EFI_IMAGE_DOS_HEADER *DosHdr;\r
156 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
157 VOID *PdbPointer;\r
158 UINT64 DumpIpAddress;\r
159\r
160 //\r
161 // Find Image Base\r
162 //\r
163 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
164 while (Pe32Data != 0) {\r
165 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
166 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
167 //\r
168 // DOS image header is present, so read the PE header after the DOS image header.\r
169 //\r
170 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
171 //\r
172 // Make sure PE header address does not overflow and is less than the initial address.\r
173 //\r
174 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
175 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
176 //\r
177 // It's PE image.\r
178 //\r
179 break;\r
180 }\r
181 }\r
182 }\r
183\r
184 //\r
185 // Not found the image base, check the previous aligned address\r
186 //\r
187 Pe32Data -= SIZE_4KB;\r
188 }\r
189\r
190 DumpIpAddress = CallerIpAddress;\r
191 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
192\r
193 if (Pe32Data != 0) {\r
194 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
195 if (PdbPointer != NULL) {\r
196 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
197 }\r
198 }\r
199}\r
200\r
201/**\r
202 Read information from the CPU save state.\r
203\r
204 @param This EFI_SMM_CPU_PROTOCOL instance\r
205 @param Width The number of bytes to read from the CPU save state.\r
206 @param Register Specifies the CPU register to read form the save state.\r
207 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
208 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
209\r
210 @retval EFI_SUCCESS The register was read from Save State\r
211 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
212 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
213\r
214**/\r
215EFI_STATUS\r
216EFIAPI\r
217SmmReadSaveState (\r
218 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
219 IN UINTN Width,\r
220 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
221 IN UINTN CpuIndex,\r
222 OUT VOID *Buffer\r
223 )\r
224{\r
225 EFI_STATUS Status;\r
226\r
227 //\r
228 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
229 //\r
230 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
231 return EFI_INVALID_PARAMETER;\r
232 }\r
233\r
234 //\r
235 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
236 //\r
237 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
238 //\r
239 // The pseudo-register only supports the 64-bit size specified by Width.\r
240 //\r
241 if (Width != sizeof (UINT64)) {\r
242 return EFI_INVALID_PARAMETER;\r
243 }\r
244 //\r
245 // If the processor is in SMM at the time the SMI occurred,\r
246 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
247 // Otherwise, EFI_NOT_FOUND is returned.\r
248 //\r
249 if (mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
250 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
251 return EFI_SUCCESS;\r
252 } else {\r
253 return EFI_NOT_FOUND;\r
254 }\r
255 }\r
256\r
257 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
258 return EFI_INVALID_PARAMETER;\r
259 }\r
260\r
261 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
262 if (Status == EFI_UNSUPPORTED) {\r
263 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
264 }\r
265 return Status;\r
266}\r
267\r
268/**\r
269 Write data to the CPU save state.\r
270\r
271 @param This EFI_SMM_CPU_PROTOCOL instance\r
272 @param Width The number of bytes to read from the CPU save state.\r
273 @param Register Specifies the CPU register to write to the save state.\r
274 @param CpuIndex Specifies the zero-based index of the CPU save state\r
275 @param Buffer Upon entry, this holds the new CPU register value.\r
276\r
277 @retval EFI_SUCCESS The register was written from Save State\r
278 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
279 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
280\r
281**/\r
282EFI_STATUS\r
283EFIAPI\r
284SmmWriteSaveState (\r
285 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
286 IN UINTN Width,\r
287 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
288 IN UINTN CpuIndex,\r
289 IN CONST VOID *Buffer\r
290 )\r
291{\r
292 EFI_STATUS Status;\r
293\r
294 //\r
295 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
296 //\r
297 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
298 return EFI_INVALID_PARAMETER;\r
299 }\r
300\r
301 //\r
302 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
303 //\r
304 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
305 return EFI_SUCCESS;\r
306 }\r
307\r
308 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
309 return EFI_INVALID_PARAMETER;\r
310 }\r
311\r
312 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
313 if (Status == EFI_UNSUPPORTED) {\r
314 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
315 }\r
316 return Status;\r
317}\r
318\r
319\r
320/**\r
321 C function for SMI handler. To change all processor's SMMBase Register.\r
322\r
323**/\r
324VOID\r
325EFIAPI\r
326SmmInitHandler (\r
327 VOID\r
328 )\r
329{\r
330 UINT32 ApicId;\r
331 UINTN Index;\r
332\r
333 //\r
334 // Update SMM IDT entries' code segment and load IDT\r
335 //\r
336 AsmWriteIdtr (&gcSmiIdtr);\r
337 ApicId = GetApicId ();\r
338\r
339 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
340\r
341 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
342 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
343 //\r
344 // Initialize SMM specific features on the currently executing CPU\r
345 //\r
346 SmmCpuFeaturesInitializeProcessor (\r
347 Index,\r
348 mIsBsp,\r
349 gSmmCpuPrivate->ProcessorInfo,\r
350 &mCpuHotPlugData\r
351 );\r
352\r
353 if (mIsBsp) {\r
354 //\r
355 // BSP rebase is already done above.\r
356 // Initialize private data during S3 resume\r
357 //\r
358 InitializeMpSyncData ();\r
359 }\r
360\r
361 //\r
362 // Hook return after RSM to set SMM re-based flag\r
363 //\r
364 SemaphoreHook (Index, &mRebased[Index]);\r
365\r
366 return;\r
367 }\r
368 }\r
369 ASSERT (FALSE);\r
370}\r
371\r
372/**\r
373 Relocate SmmBases for each processor.\r
374\r
375 Execute on first boot and all S3 resumes\r
376\r
377**/\r
378VOID\r
379EFIAPI\r
380SmmRelocateBases (\r
381 VOID\r
382 )\r
383{\r
384 UINT8 BakBuf[BACK_BUF_SIZE];\r
385 SMRAM_SAVE_STATE_MAP BakBuf2;\r
386 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
387 UINT8 *U8Ptr;\r
388 UINT32 ApicId;\r
389 UINTN Index;\r
390 UINTN BspIndex;\r
391\r
392 //\r
393 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
394 //\r
395 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
396\r
397 //\r
398 // Patch ASM code template with current CR0, CR3, and CR4 values\r
399 //\r
400 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
401 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
402 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
403\r
404 //\r
405 // Patch GDTR for SMM base relocation\r
406 //\r
407 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
408 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
409\r
410 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
411 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
412\r
413 //\r
414 // Backup original contents at address 0x38000\r
415 //\r
416 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
417 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
418\r
419 //\r
420 // Load image for relocation\r
421 //\r
422 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
423\r
424 //\r
425 // Retrieve the local APIC ID of current processor\r
426 //\r
427 ApicId = GetApicId ();\r
428\r
429 //\r
430 // Relocate SM bases for all APs\r
431 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
432 //\r
433 mIsBsp = FALSE;\r
434 BspIndex = (UINTN)-1;\r
435 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
436 mRebased[Index] = FALSE;\r
437 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
438 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
439 //\r
440 // Wait for this AP to finish its 1st SMI\r
441 //\r
442 while (!mRebased[Index]);\r
443 } else {\r
444 //\r
445 // BSP will be Relocated later\r
446 //\r
447 BspIndex = Index;\r
448 }\r
449 }\r
450\r
451 //\r
452 // Relocate BSP's SMM base\r
453 //\r
454 ASSERT (BspIndex != (UINTN)-1);\r
455 mIsBsp = TRUE;\r
456 SendSmiIpi (ApicId);\r
457 //\r
458 // Wait for the BSP to finish its 1st SMI\r
459 //\r
460 while (!mRebased[BspIndex]);\r
461\r
462 //\r
463 // Restore contents at address 0x38000\r
464 //\r
465 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
466 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
467}\r
468\r
469/**\r
470 Perform SMM initialization for all processors in the S3 boot path.\r
471\r
472 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
473**/\r
474VOID\r
475EFIAPI\r
476SmmRestoreCpu (\r
477 VOID\r
478 )\r
479{\r
480 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
481 IA32_DESCRIPTOR Ia32Idtr;\r
482 IA32_DESCRIPTOR X64Idtr;\r
483 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
484 EFI_STATUS Status;\r
485\r
486 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
487\r
488 //\r
489 // See if there is enough context to resume PEI Phase\r
490 //\r
491 if (mSmmS3ResumeState == NULL) {\r
492 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
493 CpuDeadLoop ();\r
494 }\r
495\r
496 SmmS3ResumeState = mSmmS3ResumeState;\r
497 ASSERT (SmmS3ResumeState != NULL);\r
498\r
499 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
500 //\r
501 // Save the IA32 IDT Descriptor\r
502 //\r
503 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
504\r
505 //\r
506 // Setup X64 IDT table\r
507 //\r
508 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
509 X64Idtr.Base = (UINTN) IdtEntryTable;\r
510 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
511 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
512\r
513 //\r
514 // Setup the default exception handler\r
515 //\r
516 Status = InitializeCpuExceptionHandlers (NULL);\r
517 ASSERT_EFI_ERROR (Status);\r
518\r
519 //\r
520 // Initialize Debug Agent to support source level debug\r
521 //\r
522 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
523 }\r
524\r
525 //\r
c2e5e70a 526 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 527 //\r
c2e5e70a 528 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 529 //\r
c2e5e70a 530 // First time microcode load and restore MTRRs\r
529a5a86 531 //\r
c2e5e70a 532 EarlyInitializeCpu ();\r
529a5a86
MK
533 }\r
534\r
535 //\r
536 // Restore SMBASE for BSP and all APs\r
537 //\r
538 SmmRelocateBases ();\r
539\r
540 //\r
c2e5e70a 541 // Skip initialization if mAcpiCpuData is not valid\r
529a5a86 542 //\r
c2e5e70a 543 if (mAcpiCpuData.NumberOfCpus > 0) {\r
529a5a86 544 //\r
c2e5e70a 545 // Restore MSRs for BSP and all APs\r
529a5a86 546 //\r
c2e5e70a 547 InitializeCpu ();\r
529a5a86
MK
548 }\r
549\r
550 //\r
551 // Set a flag to restore SMM configuration in S3 path.\r
552 //\r
553 mRestoreSmmConfigurationInS3 = TRUE;\r
554\r
555 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
556 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
557 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
558 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
559 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
560\r
561 //\r
562 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
563 //\r
564 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
565 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
566\r
567 SwitchStack (\r
568 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
569 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
570 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
571 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
572 );\r
573 }\r
574\r
575 //\r
576 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
577 //\r
578 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
579 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
580 //\r
581 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
582 //\r
583 SaveAndSetDebugTimerInterrupt (FALSE);\r
584 //\r
585 // Restore IA32 IDT table\r
586 //\r
587 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
588 AsmDisablePaging64 (\r
589 SmmS3ResumeState->ReturnCs,\r
590 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
591 (UINT32)SmmS3ResumeState->ReturnContext1,\r
592 (UINT32)SmmS3ResumeState->ReturnContext2,\r
593 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
594 );\r
595 }\r
596\r
597 //\r
598 // Can not resume PEI Phase\r
599 //\r
600 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
601 CpuDeadLoop ();\r
602}\r
603\r
604/**\r
605 Copy register table from ACPI NVS memory into SMRAM.\r
606\r
607 @param[in] DestinationRegisterTableList Points to destination register table.\r
608 @param[in] SourceRegisterTableList Points to source register table.\r
609 @param[in] NumberOfCpus Number of CPUs.\r
610\r
611**/\r
612VOID\r
613CopyRegisterTable (\r
614 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
615 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
616 IN UINT32 NumberOfCpus\r
617 )\r
618{\r
619 UINTN Index;\r
620 UINTN Index1;\r
621 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
622\r
623 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
624 for (Index = 0; Index < NumberOfCpus; Index++) {\r
625 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
626 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
627 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
628 //\r
629 // Go though all MSRs in register table to initialize MSR spin lock\r
630 //\r
631 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
632 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
633 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
634 //\r
635 // Initialize MSR spin lock only for those MSRs need bit field writing\r
636 //\r
637 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
638 }\r
639 }\r
640 }\r
641}\r
642\r
643/**\r
644 SMM Ready To Lock event notification handler.\r
645\r
646 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
647 perform additional lock actions that must be performed from SMM on the next SMI.\r
648\r
649 @param[in] Protocol Points to the protocol's unique identifier.\r
650 @param[in] Interface Points to the interface instance.\r
651 @param[in] Handle The handle on which the interface was installed.\r
652\r
653 @retval EFI_SUCCESS Notification handler runs successfully.\r
654 **/\r
655EFI_STATUS\r
656EFIAPI\r
657SmmReadyToLockEventNotify (\r
658 IN CONST EFI_GUID *Protocol,\r
659 IN VOID *Interface,\r
660 IN EFI_HANDLE Handle\r
661 )\r
662{\r
663 ACPI_CPU_DATA *AcpiCpuData;\r
664 IA32_DESCRIPTOR *Gdtr;\r
665 IA32_DESCRIPTOR *Idtr;\r
666\r
667 //\r
668 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
669 //\r
670 mAcpiCpuData.NumberOfCpus = 0;\r
671\r
529a5a86
MK
672 //\r
673 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
674 //\r
675 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
676 if (AcpiCpuData == 0) {\r
677 goto Done;\r
678 }\r
679\r
680 //\r
681 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
682 //\r
683 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
684\r
685 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
686 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
687\r
688 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
689\r
690 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
691 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
692\r
693 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
694\r
695 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
696 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
697\r
698 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
699\r
700 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
701 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
702\r
703 CopyRegisterTable (\r
704 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
705 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
706 mAcpiCpuData.NumberOfCpus\r
707 );\r
708\r
709 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
710 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
711\r
712 CopyRegisterTable (\r
713 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
714 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
715 mAcpiCpuData.NumberOfCpus\r
716 );\r
717\r
718 //\r
719 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
720 //\r
721 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
722 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
723\r
724 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
725 ASSERT (mGdtForAp != NULL);\r
726 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
727 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
728\r
729 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
730 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
731 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
732\r
733Done:\r
734 //\r
735 // Set SMM ready to lock flag and return\r
736 //\r
737 mSmmReadyToLock = TRUE;\r
738 return EFI_SUCCESS;\r
739}\r
740\r
741/**\r
742 The module Entry Point of the CPU SMM driver.\r
743\r
744 @param ImageHandle The firmware allocated handle for the EFI image.\r
745 @param SystemTable A pointer to the EFI System Table.\r
746\r
747 @retval EFI_SUCCESS The entry point is executed successfully.\r
748 @retval Other Some error occurs when executing this entry point.\r
749\r
750**/\r
751EFI_STATUS\r
752EFIAPI\r
753PiCpuSmmEntry (\r
754 IN EFI_HANDLE ImageHandle,\r
755 IN EFI_SYSTEM_TABLE *SystemTable\r
756 )\r
757{\r
758 EFI_STATUS Status;\r
759 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
760 UINTN NumberOfEnabledProcessors;\r
761 UINTN Index;\r
762 VOID *Buffer;\r
763 UINTN TileSize;\r
764 VOID *GuidHob;\r
765 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
766 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
767 UINT8 *Stacks;\r
768 VOID *Registration;\r
769 UINT32 RegEax;\r
770 UINT32 RegEdx;\r
771 UINTN FamilyId;\r
772 UINTN ModelId;\r
773 UINT32 Cr3;\r
774\r
775 //\r
776 // Initialize Debug Agent to support source level debug in SMM code\r
777 //\r
778 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
779\r
780 //\r
781 // Report the start of CPU SMM initialization.\r
782 //\r
783 REPORT_STATUS_CODE (\r
784 EFI_PROGRESS_CODE,\r
785 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
786 );\r
787\r
788 //\r
789 // Fix segment address of the long-mode-switch jump\r
790 //\r
791 if (sizeof (UINTN) == sizeof (UINT64)) {\r
792 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
793 }\r
794\r
795 //\r
796 // Find out SMRR Base and SMRR Size\r
797 //\r
798 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
799\r
800 //\r
801 // Get MP Services Protocol\r
802 //\r
803 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
804 ASSERT_EFI_ERROR (Status);\r
805\r
806 //\r
807 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
808 //\r
809 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
810 ASSERT_EFI_ERROR (Status);\r
811 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
812\r
813 //\r
814 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
815 // A constant BSP index makes no sense because it may be hot removed.\r
816 //\r
817 DEBUG_CODE (\r
818 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
819\r
820 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
821 }\r
822 );\r
823\r
824 //\r
825 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
826 //\r
827 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
828 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
829\r
830 //\r
831 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
832 //\r
833 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
834 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
835 } else {\r
836 mMaxNumberOfCpus = mNumberOfCpus;\r
837 }\r
838 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
839\r
840 //\r
841 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
842 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
843 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
844 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
845 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
846 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
847 // by adding the // CPU save state size, any extra CPU specific context, and\r
848 // the size of code that must be placed at the SMI entry point to transfer\r
849 // control to a C function in the native SMM execution mode. This size is\r
850 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
851 // The total amount of memory required is the maximum number of CPUs that\r
852 // platform supports times the tile size. The picture below shows the tiling,\r
853 // where m is the number of tiles that fit in 32KB.\r
854 //\r
855 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
856 // | CPU m+1 Save State |\r
857 // +-----------------------------+\r
858 // | CPU m+1 Extra Data |\r
859 // +-----------------------------+\r
860 // | Padding |\r
861 // +-----------------------------+\r
862 // | CPU 2m SMI Entry |\r
863 // +#############################+ <-- Base of allocated buffer + 64 KB\r
864 // | CPU m-1 Save State |\r
865 // +-----------------------------+\r
866 // | CPU m-1 Extra Data |\r
867 // +-----------------------------+\r
868 // | Padding |\r
869 // +-----------------------------+\r
870 // | CPU 2m-1 SMI Entry |\r
871 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
872 // | . . . . . . . . . . . . |\r
873 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
874 // | CPU 2 Save State |\r
875 // +-----------------------------+\r
876 // | CPU 2 Extra Data |\r
877 // +-----------------------------+\r
878 // | Padding |\r
879 // +-----------------------------+\r
880 // | CPU m+1 SMI Entry |\r
881 // +=============================+ <-- Base of allocated buffer + 32 KB\r
882 // | CPU 1 Save State |\r
883 // +-----------------------------+\r
884 // | CPU 1 Extra Data |\r
885 // +-----------------------------+\r
886 // | Padding |\r
887 // +-----------------------------+\r
888 // | CPU m SMI Entry |\r
889 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
890 // | CPU 0 Save State |\r
891 // +-----------------------------+\r
892 // | CPU 0 Extra Data |\r
893 // +-----------------------------+\r
894 // | Padding |\r
895 // +-----------------------------+\r
896 // | CPU m-1 SMI Entry |\r
897 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
898 // | . . . . . . . . . . . . |\r
899 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
900 // | Padding |\r
901 // +-----------------------------+\r
902 // | CPU 1 SMI Entry |\r
903 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
904 // | Padding |\r
905 // +-----------------------------+\r
906 // | CPU 0 SMI Entry |\r
907 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
908 //\r
909\r
910 //\r
911 // Retrieve CPU Family\r
912 //\r
913 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
914 FamilyId = (RegEax >> 8) & 0xf;\r
915 ModelId = (RegEax >> 4) & 0xf;\r
916 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
917 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
918 }\r
919\r
920 //\r
921 // Determine the mode of the CPU at the time an SMI occurs\r
922 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
923 // Volume 3C, Section 34.4.1.1\r
924 //\r
925 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
926 if ((RegEdx & BIT29) != 0) {\r
927 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
928 }\r
929 if (FamilyId == 0x06) {\r
930 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
931 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
932 }\r
933 }\r
934\r
935 //\r
936 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
937 // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
938 // is rounded up to nearest power of 2.\r
939 //\r
940 TileSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + GetSmiHandlerSize () - 1;\r
941 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
942 DEBUG ((EFI_D_INFO, "SMRAM TileSize = %08x\n", TileSize));\r
943\r
944 //\r
945 // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
946 // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
947 // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
948 // reduced.\r
949 //\r
950 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
951\r
952 //\r
953 // Allocate buffer for all of the tiles.\r
954 //\r
955 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
956 // Volume 3C, Section 34.11 SMBASE Relocation\r
957 // For Pentium and Intel486 processors, the SMBASE values must be\r
958 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
959 // state during the execution of a RSM instruction.\r
960 //\r
961 // Intel486 processors: FamilyId is 4\r
962 // Pentium processors : FamilyId is 5\r
963 //\r
964 if ((FamilyId == 4) || (FamilyId == 5)) {\r
965 Buffer = AllocateAlignedPages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)), SIZE_32KB);\r
966 } else {\r
967 Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)));\r
968 }\r
969 ASSERT (Buffer != NULL);\r
970\r
971 //\r
972 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
973 //\r
974 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
975 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
976\r
977 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
978 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
979\r
980 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
981 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
982\r
983 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
984 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
985\r
986 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
987 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
988\r
989 //\r
990 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
991 //\r
992 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
993 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
994 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
995 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
996 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
997\r
998 //\r
999 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
1000 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
1001 // size for each CPU in the platform\r
1002 //\r
1003 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1004 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
1005 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
1006 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
1007 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
1008\r
1009 if (Index < mNumberOfCpus) {\r
1010 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
1011 ASSERT_EFI_ERROR (Status);\r
1012 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
1013\r
1014 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
1015 Index,\r
1016 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
1017 mCpuHotPlugData.SmBase[Index],\r
1018 gSmmCpuPrivate->CpuSaveState[Index],\r
1019 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
1020 ));\r
1021 } else {\r
1022 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
1023 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
1024 }\r
1025 }\r
1026\r
1027 //\r
1028 // Allocate SMI stacks for all processors.\r
1029 //\r
1030 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1031 //\r
1032 // 2 more pages is allocated for each processor.\r
1033 // one is guard page and the other is known good stack.\r
1034 //\r
1035 // +-------------------------------------------+-----+-------------------------------------------+\r
1036 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
1037 // +-------------------------------------------+-----+-------------------------------------------+\r
1038 // | | | |\r
1039 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
1040 //\r
1041 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
1042 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
1043 ASSERT (Stacks != NULL);\r
1044 mSmmStackArrayBase = (UINTN)Stacks;\r
1045 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
1046 } else {\r
1047 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
1048 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
1049 ASSERT (Stacks != NULL);\r
1050 }\r
1051\r
1052 //\r
1053 // Set SMI stack for SMM base relocation\r
1054 //\r
1055 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
1056\r
1057 //\r
1058 // Initialize IDT\r
1059 //\r
1060 InitializeSmmIdt ();\r
1061\r
1062 //\r
1063 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
1064 //\r
1065 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
1066 ASSERT (mRebased != NULL);\r
1067 SmmRelocateBases ();\r
1068\r
1069 //\r
1070 // Call hook for BSP to perform extra actions in normal mode after all\r
1071 // SMM base addresses have been relocated on all CPUs\r
1072 //\r
1073 SmmCpuFeaturesSmmRelocationComplete ();\r
1074\r
1075 //\r
1076 // SMM Time initialization\r
1077 //\r
1078 InitializeSmmTimer ();\r
1079\r
1080 //\r
1081 // Initialize MP globals\r
1082 //\r
1083 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
1084\r
1085 //\r
1086 // Fill in SMM Reserved Regions\r
1087 //\r
1088 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1089 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1090\r
1091 //\r
1092 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1093 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1094 // to an SMRAM address will be present in the handle database\r
1095 //\r
1096 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1097 &gSmmCpuPrivate->SmmCpuHandle,\r
1098 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
1099 NULL\r
1100 );\r
1101 ASSERT_EFI_ERROR (Status);\r
1102\r
1103 //\r
1104 // Install the SMM CPU Protocol into SMM protocol database\r
1105 //\r
1106 Status = gSmst->SmmInstallProtocolInterface (\r
1107 &mSmmCpuHandle,\r
1108 &gEfiSmmCpuProtocolGuid,\r
1109 EFI_NATIVE_INTERFACE,\r
1110 &mSmmCpu\r
1111 );\r
1112 ASSERT_EFI_ERROR (Status);\r
1113\r
1114 //\r
1115 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1116 //\r
1117 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
1118 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1119 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
1120 }\r
1121\r
1122 //\r
1123 // Initialize SMM CPU Services Support\r
1124 //\r
1125 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1126 ASSERT_EFI_ERROR (Status);\r
1127\r
529a5a86
MK
1128 //\r
1129 // register SMM Ready To Lock Protocol notification\r
1130 //\r
1131 Status = gSmst->SmmRegisterProtocolNotify (\r
1132 &gEfiSmmReadyToLockProtocolGuid,\r
1133 SmmReadyToLockEventNotify,\r
1134 &Registration\r
1135 );\r
1136 ASSERT_EFI_ERROR (Status);\r
1137\r
1138 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
1139 if (GuidHob != NULL) {\r
1140 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
1141\r
1142 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
1143 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
1144\r
1145 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
1146 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
1147\r
1148 mSmmS3ResumeState = SmmS3ResumeState;\r
1149 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
1150\r
1151 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
1152\r
1153 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
1154 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
1155 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
1156 SmmS3ResumeState->SmmS3StackSize = 0;\r
1157 }\r
1158\r
1159 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
1160 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
1161 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
1162\r
1163 if (sizeof (UINTN) == sizeof (UINT64)) {\r
1164 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
1165 }\r
1166 if (sizeof (UINTN) == sizeof (UINT32)) {\r
1167 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
1168 }\r
1169 }\r
1170\r
1171 //\r
1172 // Check XD and BTS features\r
1173 //\r
1174 CheckProcessorFeature ();\r
1175\r
1176 //\r
1177 // Initialize SMM Profile feature\r
1178 //\r
1179 InitSmmProfile (Cr3);\r
1180\r
1181 //\r
1182 // Patch SmmS3ResumeState->SmmS3Cr3\r
1183 //\r
1184 InitSmmS3Cr3 ();\r
1185\r
1186 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1187\r
1188 return EFI_SUCCESS;\r
1189}\r
1190\r
1191/**\r
1192\r
1193 Find out SMRAM information including SMRR base and SMRR size.\r
1194\r
1195 @param SmrrBase SMRR base\r
1196 @param SmrrSize SMRR size\r
1197\r
1198**/\r
1199VOID\r
1200FindSmramInfo (\r
1201 OUT UINT32 *SmrrBase,\r
1202 OUT UINT32 *SmrrSize\r
1203 )\r
1204{\r
1205 EFI_STATUS Status;\r
1206 UINTN Size;\r
1207 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1208 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1209 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
1210 UINTN SmramRangeCount;\r
1211 UINTN Index;\r
1212 UINT64 MaxSize;\r
1213 BOOLEAN Found;\r
1214\r
1215 //\r
1216 // Get SMM Access Protocol\r
1217 //\r
1218 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1219 ASSERT_EFI_ERROR (Status);\r
1220\r
1221 //\r
1222 // Get SMRAM information\r
1223 //\r
1224 Size = 0;\r
1225 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1226 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1227\r
1228 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1229 ASSERT (SmramRanges != NULL);\r
1230\r
1231 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
1232 ASSERT_EFI_ERROR (Status);\r
1233\r
1234 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1235\r
1236 //\r
1237 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1238 //\r
1239 CurrentSmramRange = NULL;\r
1240 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
1241 //\r
1242 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1243 //\r
1244 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1245 continue;\r
1246 }\r
1247\r
1248 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1249 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1250 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1251 MaxSize = SmramRanges[Index].PhysicalSize;\r
1252 CurrentSmramRange = &SmramRanges[Index];\r
1253 }\r
1254 }\r
1255 }\r
1256 }\r
1257\r
1258 ASSERT (CurrentSmramRange != NULL);\r
1259\r
1260 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1261 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1262\r
1263 do {\r
1264 Found = FALSE;\r
1265 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1266 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1267 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1268 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1269 Found = TRUE;\r
1270 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1271 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1272 Found = TRUE;\r
1273 }\r
1274 }\r
1275 } while (Found);\r
1276\r
1277 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1278}\r
1279\r
1280/**\r
1281Configure SMM Code Access Check feature on an AP.\r
1282SMM Feature Control MSR will be locked after configuration.\r
1283\r
1284@param[in,out] Buffer Pointer to private data buffer.\r
1285**/\r
1286VOID\r
1287EFIAPI\r
1288ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1289 IN OUT VOID *Buffer\r
1290 )\r
1291{\r
1292 UINTN CpuIndex;\r
1293 UINT64 SmmFeatureControlMsr;\r
1294 UINT64 NewSmmFeatureControlMsr;\r
1295\r
1296 //\r
1297 // Retrieve the CPU Index from the context passed in\r
1298 //\r
1299 CpuIndex = *(UINTN *)Buffer;\r
1300\r
1301 //\r
1302 // Get the current SMM Feature Control MSR value\r
1303 //\r
1304 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1305\r
1306 //\r
1307 // Compute the new SMM Feature Control MSR value\r
1308 //\r
1309 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1310 if (mSmmCodeAccessCheckEnable) {\r
1311 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1312 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1313 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1314 }\r
529a5a86
MK
1315 }\r
1316\r
1317 //\r
1318 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1319 //\r
1320 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1321 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1322 }\r
1323\r
1324 //\r
1325 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1326 //\r
1327 ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1328}\r
1329\r
1330/**\r
1331Configure SMM Code Access Check feature for all processors.\r
1332SMM Feature Control MSR will be locked after configuration.\r
1333**/\r
1334VOID\r
1335ConfigSmmCodeAccessCheck (\r
1336 VOID\r
1337 )\r
1338{\r
1339 UINTN Index;\r
1340 EFI_STATUS Status;\r
1341\r
1342 //\r
1343 // Check to see if the Feature Control MSR is supported on this CPU\r
1344 //\r
f6b0cb17 1345 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1346 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1347 mSmmCodeAccessCheckEnable = FALSE;\r
1348 return;\r
1349 }\r
1350\r
1351 //\r
1352 // Check to see if the CPU supports the SMM Code Access Check feature\r
1353 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1354 //\r
1355 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1356 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1357 return;\r
1358 }\r
1359\r
1360 //\r
1361 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1362 //\r
1363 InitializeSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1364\r
1365 //\r
1366 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1367 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1368 //\r
1369 AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1370\r
1371 //\r
1372 // Enable SMM Code Access Check feature on the BSP.\r
1373 //\r
1374 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1375\r
1376 //\r
1377 // Enable SMM Code Access Check feature for the APs.\r
1378 //\r
1379 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1380 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
529a5a86
MK
1381\r
1382 //\r
1383 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1384 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1385 //\r
1386 AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1387\r
1388 //\r
1389 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1390 //\r
1391 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1392 ASSERT_EFI_ERROR (Status);\r
1393\r
1394 //\r
1395 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1396 //\r
1397 while (!AcquireSpinLockOrFail (&mConfigSmmCodeAccessCheckLock)) {\r
1398 CpuPause ();\r
1399 }\r
1400\r
1401 //\r
1402 // Release the Config SMM Code Access Check spin lock.\r
1403 //\r
1404 ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1405 }\r
1406 }\r
1407}\r
1408\r
1409/**\r
1410 Perform the remaining tasks.\r
1411\r
1412**/\r
1413VOID\r
1414PerformRemainingTasks (\r
1415 VOID\r
1416 )\r
1417{\r
1418 if (mSmmReadyToLock) {\r
1419 //\r
1420 // Start SMM Profile feature\r
1421 //\r
1422 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1423 SmmProfileStart ();\r
1424 }\r
1425 //\r
1426 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1427 //\r
1428 InitPaging ();\r
1429 //\r
1430 // Configure SMM Code Access Check feature if available.\r
1431 //\r
1432 ConfigSmmCodeAccessCheck ();\r
1433\r
1434 //\r
1435 // Clean SMM ready to lock flag\r
1436 //\r
1437 mSmmReadyToLock = FALSE;\r
1438 }\r
1439}\r
9f419739
JY
1440\r
1441/**\r
1442 Perform the pre tasks.\r
1443\r
1444**/\r
1445VOID\r
1446PerformPreTasks (\r
1447 VOID\r
1448 )\r
1449{\r
1450 //\r
1451 // Restore SMM Configuration in S3 boot path.\r
1452 //\r
1453 if (mRestoreSmmConfigurationInS3) {\r
1454 //\r
1455 // Configure SMM Code Access Check feature if available.\r
1456 //\r
1457 ConfigSmmCodeAccessCheck ();\r
1458\r
1459 mRestoreSmmConfigurationInS3 = FALSE;\r
1460 }\r
1461}\r