UefiCpuPkg: Add PiSmmCpuDxeSmm module no IA32/X64 files
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
4Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
19// along its supporting fields.\r
20//\r
21SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
23 NULL, // SmmCpuHandle\r
24 NULL, // Pointer to ProcessorInfo array\r
25 NULL, // Pointer to Operation array\r
26 NULL, // Pointer to CpuSaveStateSize array\r
27 NULL, // Pointer to CpuSaveState array\r
28 { {0} }, // SmmReservedSmramRegion\r
29 {\r
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
32 0, // SmmCoreEntryContext.NumberOfCpus\r
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
34 NULL // SmmCoreEntryContext.CpuSaveState\r
35 },\r
36 NULL, // SmmCoreEntry\r
37 {\r
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
40 },\r
41};\r
42\r
43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
77EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
78\r
79///\r
80/// SMM CPU Save State Protocol instance\r
81///\r
82EFI_SMM_CPU_SAVE_STATE_PROTOCOL mSmmCpuSaveState = {\r
83 NULL\r
84};\r
85\r
86//\r
87// SMM stack information\r
88//\r
89UINTN mSmmStackArrayBase;\r
90UINTN mSmmStackArrayEnd;\r
91UINTN mSmmStackSize;\r
92\r
93//\r
94// Pointer to structure used during S3 Resume\r
95//\r
96SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;\r
97\r
98UINTN mMaxNumberOfCpus = 1;\r
99UINTN mNumberOfCpus = 1;\r
100\r
101//\r
102// SMM ready to lock flag\r
103//\r
104BOOLEAN mSmmReadyToLock = FALSE;\r
105\r
106//\r
107// Global used to cache PCD for SMM Code Access Check enable\r
108//\r
109BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
110\r
111//\r
112// Spin lock used to serialize setting of SMM Code Access Check feature\r
113//\r
114SPIN_LOCK mConfigSmmCodeAccessCheckLock;\r
115\r
116/**\r
117 Initialize IDT to setup exception handlers for SMM.\r
118\r
119**/\r
120VOID\r
121InitializeSmmIdt (\r
122 VOID\r
123 )\r
124{\r
125 EFI_STATUS Status;\r
126 BOOLEAN InterruptState;\r
127 IA32_DESCRIPTOR DxeIdtr;\r
128 //\r
129 // Disable Interrupt and save DXE IDT table\r
130 //\r
131 InterruptState = SaveAndDisableInterrupts ();\r
132 AsmReadIdtr (&DxeIdtr);\r
133 //\r
134 // Load SMM temporary IDT table\r
135 //\r
136 AsmWriteIdtr (&gcSmiIdtr);\r
137 //\r
138 // Setup SMM default exception handlers, SMM IDT table\r
139 // will be updated and saved in gcSmiIdtr\r
140 //\r
141 Status = InitializeCpuExceptionHandlers (NULL);\r
142 ASSERT_EFI_ERROR (Status);\r
143 //\r
144 // Restore DXE IDT table and CPU interrupt\r
145 //\r
146 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
147 SetInterruptState (InterruptState);\r
148}\r
149\r
150/**\r
151 Search module name by input IP address and output it.\r
152\r
153 @param CallerIpAddress Caller instruction pointer.\r
154\r
155**/\r
156VOID\r
157DumpModuleInfoByIp (\r
158 IN UINTN CallerIpAddress\r
159 )\r
160{\r
161 UINTN Pe32Data;\r
162 EFI_IMAGE_DOS_HEADER *DosHdr;\r
163 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
164 VOID *PdbPointer;\r
165 UINT64 DumpIpAddress;\r
166\r
167 //\r
168 // Find Image Base\r
169 //\r
170 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
171 while (Pe32Data != 0) {\r
172 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
173 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
174 //\r
175 // DOS image header is present, so read the PE header after the DOS image header.\r
176 //\r
177 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
178 //\r
179 // Make sure PE header address does not overflow and is less than the initial address.\r
180 //\r
181 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
182 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
183 //\r
184 // It's PE image.\r
185 //\r
186 break;\r
187 }\r
188 }\r
189 }\r
190\r
191 //\r
192 // Not found the image base, check the previous aligned address\r
193 //\r
194 Pe32Data -= SIZE_4KB;\r
195 }\r
196\r
197 DumpIpAddress = CallerIpAddress;\r
198 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
199\r
200 if (Pe32Data != 0) {\r
201 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
202 if (PdbPointer != NULL) {\r
203 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
204 }\r
205 }\r
206}\r
207\r
208/**\r
209 Read information from the CPU save state.\r
210\r
211 @param This EFI_SMM_CPU_PROTOCOL instance\r
212 @param Width The number of bytes to read from the CPU save state.\r
213 @param Register Specifies the CPU register to read form the save state.\r
214 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
215 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
216\r
217 @retval EFI_SUCCESS The register was read from Save State\r
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
220\r
221**/\r
222EFI_STATUS\r
223EFIAPI\r
224SmmReadSaveState (\r
225 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
226 IN UINTN Width,\r
227 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
228 IN UINTN CpuIndex,\r
229 OUT VOID *Buffer\r
230 )\r
231{\r
232 EFI_STATUS Status;\r
233\r
234 //\r
235 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
236 //\r
237 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
238 return EFI_INVALID_PARAMETER;\r
239 }\r
240\r
241 //\r
242 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
243 //\r
244 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
245 //\r
246 // The pseudo-register only supports the 64-bit size specified by Width.\r
247 //\r
248 if (Width != sizeof (UINT64)) {\r
249 return EFI_INVALID_PARAMETER;\r
250 }\r
251 //\r
252 // If the processor is in SMM at the time the SMI occurred,\r
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
254 // Otherwise, EFI_NOT_FOUND is returned.\r
255 //\r
256 if (mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
258 return EFI_SUCCESS;\r
259 } else {\r
260 return EFI_NOT_FOUND;\r
261 }\r
262 }\r
263\r
264 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
265 return EFI_INVALID_PARAMETER;\r
266 }\r
267\r
268 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
269 if (Status == EFI_UNSUPPORTED) {\r
270 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
271 }\r
272 return Status;\r
273}\r
274\r
275/**\r
276 Write data to the CPU save state.\r
277\r
278 @param This EFI_SMM_CPU_PROTOCOL instance\r
279 @param Width The number of bytes to read from the CPU save state.\r
280 @param Register Specifies the CPU register to write to the save state.\r
281 @param CpuIndex Specifies the zero-based index of the CPU save state\r
282 @param Buffer Upon entry, this holds the new CPU register value.\r
283\r
284 @retval EFI_SUCCESS The register was written from Save State\r
285 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
286 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
287\r
288**/\r
289EFI_STATUS\r
290EFIAPI\r
291SmmWriteSaveState (\r
292 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
293 IN UINTN Width,\r
294 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
295 IN UINTN CpuIndex,\r
296 IN CONST VOID *Buffer\r
297 )\r
298{\r
299 EFI_STATUS Status;\r
300\r
301 //\r
302 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
303 //\r
304 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
305 return EFI_INVALID_PARAMETER;\r
306 }\r
307\r
308 //\r
309 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
310 //\r
311 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
312 return EFI_SUCCESS;\r
313 }\r
314\r
315 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
316 return EFI_INVALID_PARAMETER;\r
317 }\r
318\r
319 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
320 if (Status == EFI_UNSUPPORTED) {\r
321 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
322 }\r
323 return Status;\r
324}\r
325\r
326\r
327/**\r
328 C function for SMI handler. To change all processor's SMMBase Register.\r
329\r
330**/\r
331VOID\r
332EFIAPI\r
333SmmInitHandler (\r
334 VOID\r
335 )\r
336{\r
337 UINT32 ApicId;\r
338 UINTN Index;\r
339\r
340 //\r
341 // Update SMM IDT entries' code segment and load IDT\r
342 //\r
343 AsmWriteIdtr (&gcSmiIdtr);\r
344 ApicId = GetApicId ();\r
345\r
346 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
347\r
348 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
349 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
350 //\r
351 // Initialize SMM specific features on the currently executing CPU\r
352 //\r
353 SmmCpuFeaturesInitializeProcessor (\r
354 Index,\r
355 mIsBsp,\r
356 gSmmCpuPrivate->ProcessorInfo,\r
357 &mCpuHotPlugData\r
358 );\r
359\r
360 if (mIsBsp) {\r
361 //\r
362 // BSP rebase is already done above.\r
363 // Initialize private data during S3 resume\r
364 //\r
365 InitializeMpSyncData ();\r
366 }\r
367\r
368 //\r
369 // Hook return after RSM to set SMM re-based flag\r
370 //\r
371 SemaphoreHook (Index, &mRebased[Index]);\r
372\r
373 return;\r
374 }\r
375 }\r
376 ASSERT (FALSE);\r
377}\r
378\r
379/**\r
380 Relocate SmmBases for each processor.\r
381\r
382 Execute on first boot and all S3 resumes\r
383\r
384**/\r
385VOID\r
386EFIAPI\r
387SmmRelocateBases (\r
388 VOID\r
389 )\r
390{\r
391 UINT8 BakBuf[BACK_BUF_SIZE];\r
392 SMRAM_SAVE_STATE_MAP BakBuf2;\r
393 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
394 UINT8 *U8Ptr;\r
395 UINT32 ApicId;\r
396 UINTN Index;\r
397 UINTN BspIndex;\r
398\r
399 //\r
400 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
401 //\r
402 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
403\r
404 //\r
405 // Patch ASM code template with current CR0, CR3, and CR4 values\r
406 //\r
407 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
408 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
409 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
410\r
411 //\r
412 // Patch GDTR for SMM base relocation\r
413 //\r
414 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
415 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
416\r
417 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
418 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
419\r
420 //\r
421 // Backup original contents at address 0x38000\r
422 //\r
423 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
424 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
425\r
426 //\r
427 // Load image for relocation\r
428 //\r
429 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
430\r
431 //\r
432 // Retrieve the local APIC ID of current processor\r
433 //\r
434 ApicId = GetApicId ();\r
435\r
436 //\r
437 // Relocate SM bases for all APs\r
438 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
439 //\r
440 mIsBsp = FALSE;\r
441 BspIndex = (UINTN)-1;\r
442 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
443 mRebased[Index] = FALSE;\r
444 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
445 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
446 //\r
447 // Wait for this AP to finish its 1st SMI\r
448 //\r
449 while (!mRebased[Index]);\r
450 } else {\r
451 //\r
452 // BSP will be Relocated later\r
453 //\r
454 BspIndex = Index;\r
455 }\r
456 }\r
457\r
458 //\r
459 // Relocate BSP's SMM base\r
460 //\r
461 ASSERT (BspIndex != (UINTN)-1);\r
462 mIsBsp = TRUE;\r
463 SendSmiIpi (ApicId);\r
464 //\r
465 // Wait for the BSP to finish its 1st SMI\r
466 //\r
467 while (!mRebased[BspIndex]);\r
468\r
469 //\r
470 // Restore contents at address 0x38000\r
471 //\r
472 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
473 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
474}\r
475\r
476/**\r
477 Perform SMM initialization for all processors in the S3 boot path.\r
478\r
479 For a native platform, MP initialization in the S3 boot path is also performed in this function.\r
480**/\r
481VOID\r
482EFIAPI\r
483SmmRestoreCpu (\r
484 VOID\r
485 )\r
486{\r
487 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
488 IA32_DESCRIPTOR Ia32Idtr;\r
489 IA32_DESCRIPTOR X64Idtr;\r
490 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];\r
491 EFI_STATUS Status;\r
492\r
493 DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));\r
494\r
495 //\r
496 // See if there is enough context to resume PEI Phase\r
497 //\r
498 if (mSmmS3ResumeState == NULL) {\r
499 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
500 CpuDeadLoop ();\r
501 }\r
502\r
503 SmmS3ResumeState = mSmmS3ResumeState;\r
504 ASSERT (SmmS3ResumeState != NULL);\r
505\r
506 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
507 //\r
508 // Save the IA32 IDT Descriptor\r
509 //\r
510 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
511\r
512 //\r
513 // Setup X64 IDT table\r
514 //\r
515 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);\r
516 X64Idtr.Base = (UINTN) IdtEntryTable;\r
517 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);\r
518 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);\r
519\r
520 //\r
521 // Setup the default exception handler\r
522 //\r
523 Status = InitializeCpuExceptionHandlers (NULL);\r
524 ASSERT_EFI_ERROR (Status);\r
525\r
526 //\r
527 // Initialize Debug Agent to support source level debug\r
528 //\r
529 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);\r
530 }\r
531\r
532 //\r
533 // Do below CPU things for native platform only\r
534 //\r
535 if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {\r
536 //\r
537 // Skip initialization if mAcpiCpuData is not valid\r
538 //\r
539 if (mAcpiCpuData.NumberOfCpus > 0) {\r
540 //\r
541 // First time microcode load and restore MTRRs\r
542 //\r
543 EarlyInitializeCpu ();\r
544 }\r
545 }\r
546\r
547 //\r
548 // Restore SMBASE for BSP and all APs\r
549 //\r
550 SmmRelocateBases ();\r
551\r
552 //\r
553 // Do below CPU things for native platform only\r
554 //\r
555 if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {\r
556 //\r
557 // Skip initialization if mAcpiCpuData is not valid\r
558 //\r
559 if (mAcpiCpuData.NumberOfCpus > 0) {\r
560 //\r
561 // Restore MSRs for BSP and all APs\r
562 //\r
563 InitializeCpu ();\r
564 }\r
565 }\r
566\r
567 //\r
568 // Set a flag to restore SMM configuration in S3 path.\r
569 //\r
570 mRestoreSmmConfigurationInS3 = TRUE;\r
571\r
572 DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));\r
573 DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));\r
574 DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));\r
575 DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));\r
576 DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));\r
577\r
578 //\r
579 // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase\r
580 //\r
581 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {\r
582 DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));\r
583\r
584 SwitchStack (\r
585 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,\r
586 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,\r
587 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,\r
588 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer\r
589 );\r
590 }\r
591\r
592 //\r
593 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase\r
594 //\r
595 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {\r
596 DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));\r
597 //\r
598 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.\r
599 //\r
600 SaveAndSetDebugTimerInterrupt (FALSE);\r
601 //\r
602 // Restore IA32 IDT table\r
603 //\r
604 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);\r
605 AsmDisablePaging64 (\r
606 SmmS3ResumeState->ReturnCs,\r
607 (UINT32)SmmS3ResumeState->ReturnEntryPoint,\r
608 (UINT32)SmmS3ResumeState->ReturnContext1,\r
609 (UINT32)SmmS3ResumeState->ReturnContext2,\r
610 (UINT32)SmmS3ResumeState->ReturnStackPointer\r
611 );\r
612 }\r
613\r
614 //\r
615 // Can not resume PEI Phase\r
616 //\r
617 DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));\r
618 CpuDeadLoop ();\r
619}\r
620\r
621/**\r
622 Copy register table from ACPI NVS memory into SMRAM.\r
623\r
624 @param[in] DestinationRegisterTableList Points to destination register table.\r
625 @param[in] SourceRegisterTableList Points to source register table.\r
626 @param[in] NumberOfCpus Number of CPUs.\r
627\r
628**/\r
629VOID\r
630CopyRegisterTable (\r
631 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,\r
632 IN CPU_REGISTER_TABLE *SourceRegisterTableList,\r
633 IN UINT32 NumberOfCpus\r
634 )\r
635{\r
636 UINTN Index;\r
637 UINTN Index1;\r
638 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;\r
639\r
640 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
641 for (Index = 0; Index < NumberOfCpus; Index++) {\r
642 DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);\r
643 ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);\r
644 CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);\r
645 //\r
646 // Go though all MSRs in register table to initialize MSR spin lock\r
647 //\r
648 RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry;\r
649 for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {\r
650 if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {\r
651 //\r
652 // Initialize MSR spin lock only for those MSRs need bit field writing\r
653 //\r
654 InitMsrSpinLockByIndex (RegisterTableEntry->Index);\r
655 }\r
656 }\r
657 }\r
658}\r
659\r
660/**\r
661 SMM Ready To Lock event notification handler.\r
662\r
663 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
664 perform additional lock actions that must be performed from SMM on the next SMI.\r
665\r
666 @param[in] Protocol Points to the protocol's unique identifier.\r
667 @param[in] Interface Points to the interface instance.\r
668 @param[in] Handle The handle on which the interface was installed.\r
669\r
670 @retval EFI_SUCCESS Notification handler runs successfully.\r
671 **/\r
672EFI_STATUS\r
673EFIAPI\r
674SmmReadyToLockEventNotify (\r
675 IN CONST EFI_GUID *Protocol,\r
676 IN VOID *Interface,\r
677 IN EFI_HANDLE Handle\r
678 )\r
679{\r
680 ACPI_CPU_DATA *AcpiCpuData;\r
681 IA32_DESCRIPTOR *Gdtr;\r
682 IA32_DESCRIPTOR *Idtr;\r
683\r
684 //\r
685 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0\r
686 //\r
687 mAcpiCpuData.NumberOfCpus = 0;\r
688\r
689 //\r
690 // If FrameworkCompatibilitySspport is enabled, then do not copy CPU S3 Data into SMRAM\r
691 //\r
692 if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {\r
693 goto Done;\r
694 }\r
695\r
696 //\r
697 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM\r
698 //\r
699 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);\r
700 if (AcpiCpuData == 0) {\r
701 goto Done;\r
702 }\r
703\r
704 //\r
705 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.\r
706 //\r
707 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));\r
708\r
709 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));\r
710 ASSERT (mAcpiCpuData.MtrrTable != 0);\r
711\r
712 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));\r
713\r
714 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
715 ASSERT (mAcpiCpuData.GdtrProfile != 0);\r
716\r
717 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));\r
718\r
719 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));\r
720 ASSERT (mAcpiCpuData.IdtrProfile != 0);\r
721\r
722 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));\r
723\r
724 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
725 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);\r
726\r
727 CopyRegisterTable (\r
728 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,\r
729 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,\r
730 mAcpiCpuData.NumberOfCpus\r
731 );\r
732\r
733 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));\r
734 ASSERT (mAcpiCpuData.RegisterTable != 0);\r
735\r
736 CopyRegisterTable (\r
737 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,\r
738 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,\r
739 mAcpiCpuData.NumberOfCpus\r
740 );\r
741\r
742 //\r
743 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.\r
744 //\r
745 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;\r
746 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;\r
747\r
748 mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);\r
749 ASSERT (mGdtForAp != NULL);\r
750 mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));\r
751 mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));\r
752\r
753 CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);\r
754 CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);\r
755 CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);\r
756\r
757Done:\r
758 //\r
759 // Set SMM ready to lock flag and return\r
760 //\r
761 mSmmReadyToLock = TRUE;\r
762 return EFI_SUCCESS;\r
763}\r
764\r
765/**\r
766 The module Entry Point of the CPU SMM driver.\r
767\r
768 @param ImageHandle The firmware allocated handle for the EFI image.\r
769 @param SystemTable A pointer to the EFI System Table.\r
770\r
771 @retval EFI_SUCCESS The entry point is executed successfully.\r
772 @retval Other Some error occurs when executing this entry point.\r
773\r
774**/\r
775EFI_STATUS\r
776EFIAPI\r
777PiCpuSmmEntry (\r
778 IN EFI_HANDLE ImageHandle,\r
779 IN EFI_SYSTEM_TABLE *SystemTable\r
780 )\r
781{\r
782 EFI_STATUS Status;\r
783 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
784 UINTN NumberOfEnabledProcessors;\r
785 UINTN Index;\r
786 VOID *Buffer;\r
787 UINTN TileSize;\r
788 VOID *GuidHob;\r
789 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
790 SMM_S3_RESUME_STATE *SmmS3ResumeState;\r
791 UINT8 *Stacks;\r
792 VOID *Registration;\r
793 UINT32 RegEax;\r
794 UINT32 RegEdx;\r
795 UINTN FamilyId;\r
796 UINTN ModelId;\r
797 UINT32 Cr3;\r
798\r
799 //\r
800 // Initialize Debug Agent to support source level debug in SMM code\r
801 //\r
802 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
803\r
804 //\r
805 // Report the start of CPU SMM initialization.\r
806 //\r
807 REPORT_STATUS_CODE (\r
808 EFI_PROGRESS_CODE,\r
809 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
810 );\r
811\r
812 //\r
813 // Fix segment address of the long-mode-switch jump\r
814 //\r
815 if (sizeof (UINTN) == sizeof (UINT64)) {\r
816 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
817 }\r
818\r
819 //\r
820 // Find out SMRR Base and SMRR Size\r
821 //\r
822 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
823\r
824 //\r
825 // Get MP Services Protocol\r
826 //\r
827 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
828 ASSERT_EFI_ERROR (Status);\r
829\r
830 //\r
831 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
832 //\r
833 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
834 ASSERT_EFI_ERROR (Status);\r
835 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
836\r
837 //\r
838 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
839 // A constant BSP index makes no sense because it may be hot removed.\r
840 //\r
841 DEBUG_CODE (\r
842 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
843\r
844 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
845 }\r
846 );\r
847\r
848 //\r
849 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
850 //\r
851 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
852 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
853\r
854 //\r
855 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
856 //\r
857 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
858 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
859 } else {\r
860 mMaxNumberOfCpus = mNumberOfCpus;\r
861 }\r
862 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
863\r
864 //\r
865 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
866 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
867 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
868 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
869 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
870 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
871 // by adding the // CPU save state size, any extra CPU specific context, and\r
872 // the size of code that must be placed at the SMI entry point to transfer\r
873 // control to a C function in the native SMM execution mode. This size is\r
874 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
875 // The total amount of memory required is the maximum number of CPUs that\r
876 // platform supports times the tile size. The picture below shows the tiling,\r
877 // where m is the number of tiles that fit in 32KB.\r
878 //\r
879 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
880 // | CPU m+1 Save State |\r
881 // +-----------------------------+\r
882 // | CPU m+1 Extra Data |\r
883 // +-----------------------------+\r
884 // | Padding |\r
885 // +-----------------------------+\r
886 // | CPU 2m SMI Entry |\r
887 // +#############################+ <-- Base of allocated buffer + 64 KB\r
888 // | CPU m-1 Save State |\r
889 // +-----------------------------+\r
890 // | CPU m-1 Extra Data |\r
891 // +-----------------------------+\r
892 // | Padding |\r
893 // +-----------------------------+\r
894 // | CPU 2m-1 SMI Entry |\r
895 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
896 // | . . . . . . . . . . . . |\r
897 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
898 // | CPU 2 Save State |\r
899 // +-----------------------------+\r
900 // | CPU 2 Extra Data |\r
901 // +-----------------------------+\r
902 // | Padding |\r
903 // +-----------------------------+\r
904 // | CPU m+1 SMI Entry |\r
905 // +=============================+ <-- Base of allocated buffer + 32 KB\r
906 // | CPU 1 Save State |\r
907 // +-----------------------------+\r
908 // | CPU 1 Extra Data |\r
909 // +-----------------------------+\r
910 // | Padding |\r
911 // +-----------------------------+\r
912 // | CPU m SMI Entry |\r
913 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
914 // | CPU 0 Save State |\r
915 // +-----------------------------+\r
916 // | CPU 0 Extra Data |\r
917 // +-----------------------------+\r
918 // | Padding |\r
919 // +-----------------------------+\r
920 // | CPU m-1 SMI Entry |\r
921 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
922 // | . . . . . . . . . . . . |\r
923 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
924 // | Padding |\r
925 // +-----------------------------+\r
926 // | CPU 1 SMI Entry |\r
927 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
928 // | Padding |\r
929 // +-----------------------------+\r
930 // | CPU 0 SMI Entry |\r
931 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
932 //\r
933\r
934 //\r
935 // Retrieve CPU Family\r
936 //\r
937 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);\r
938 FamilyId = (RegEax >> 8) & 0xf;\r
939 ModelId = (RegEax >> 4) & 0xf;\r
940 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
941 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
942 }\r
943\r
944 //\r
945 // Determine the mode of the CPU at the time an SMI occurs\r
946 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
947 // Volume 3C, Section 34.4.1.1\r
948 //\r
949 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
950 if ((RegEdx & BIT29) != 0) {\r
951 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
952 }\r
953 if (FamilyId == 0x06) {\r
954 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
955 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
956 }\r
957 }\r
958\r
959 //\r
960 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
961 // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size\r
962 // is rounded up to nearest power of 2.\r
963 //\r
964 TileSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + GetSmiHandlerSize () - 1;\r
965 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
966 DEBUG ((EFI_D_INFO, "SMRAM TileSize = %08x\n", TileSize));\r
967\r
968 //\r
969 // If the TileSize is larger than space available for the SMI Handler of CPU[i],\r
970 // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1],\r
971 // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be\r
972 // reduced.\r
973 //\r
974 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
975\r
976 //\r
977 // Allocate buffer for all of the tiles.\r
978 //\r
979 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
980 // Volume 3C, Section 34.11 SMBASE Relocation\r
981 // For Pentium and Intel486 processors, the SMBASE values must be\r
982 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
983 // state during the execution of a RSM instruction.\r
984 //\r
985 // Intel486 processors: FamilyId is 4\r
986 // Pentium processors : FamilyId is 5\r
987 //\r
988 if ((FamilyId == 4) || (FamilyId == 5)) {\r
989 Buffer = AllocateAlignedPages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)), SIZE_32KB);\r
990 } else {\r
991 Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)));\r
992 }\r
993 ASSERT (Buffer != NULL);\r
994\r
995 //\r
996 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
997 //\r
998 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
999 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
1000\r
1001 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
1002 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
1003\r
1004 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1005 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
1006\r
1007 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
1008 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
1009\r
1010 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
1011 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
1012 mSmmCpuSaveState.CpuSaveState = (EFI_SMM_CPU_STATE **)gSmmCpuPrivate->CpuSaveState;\r
1013\r
1014 //\r
1015 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
1016 //\r
1017 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
1018 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
1019 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
1020 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
1021 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
1022\r
1023 //\r
1024 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
1025 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
1026 // size for each CPU in the platform\r
1027 //\r
1028 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1029 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
1030 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
1031 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
1032 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
1033\r
1034 if (Index < mNumberOfCpus) {\r
1035 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
1036 ASSERT_EFI_ERROR (Status);\r
1037 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
1038\r
1039 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
1040 Index,\r
1041 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
1042 mCpuHotPlugData.SmBase[Index],\r
1043 gSmmCpuPrivate->CpuSaveState[Index],\r
1044 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
1045 ));\r
1046 } else {\r
1047 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
1048 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
1049 }\r
1050 }\r
1051\r
1052 //\r
1053 // Allocate SMI stacks for all processors.\r
1054 //\r
1055 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1056 //\r
1057 // 2 more pages is allocated for each processor.\r
1058 // one is guard page and the other is known good stack.\r
1059 //\r
1060 // +-------------------------------------------+-----+-------------------------------------------+\r
1061 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
1062 // +-------------------------------------------+-----+-------------------------------------------+\r
1063 // | | | |\r
1064 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
1065 //\r
1066 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
1067 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
1068 ASSERT (Stacks != NULL);\r
1069 mSmmStackArrayBase = (UINTN)Stacks;\r
1070 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
1071 } else {\r
1072 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
1073 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
1074 ASSERT (Stacks != NULL);\r
1075 }\r
1076\r
1077 //\r
1078 // Set SMI stack for SMM base relocation\r
1079 //\r
1080 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
1081\r
1082 //\r
1083 // Initialize IDT\r
1084 //\r
1085 InitializeSmmIdt ();\r
1086\r
1087 //\r
1088 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
1089 //\r
1090 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
1091 ASSERT (mRebased != NULL);\r
1092 SmmRelocateBases ();\r
1093\r
1094 //\r
1095 // Call hook for BSP to perform extra actions in normal mode after all\r
1096 // SMM base addresses have been relocated on all CPUs\r
1097 //\r
1098 SmmCpuFeaturesSmmRelocationComplete ();\r
1099\r
1100 //\r
1101 // SMM Time initialization\r
1102 //\r
1103 InitializeSmmTimer ();\r
1104\r
1105 //\r
1106 // Initialize MP globals\r
1107 //\r
1108 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
1109\r
1110 //\r
1111 // Fill in SMM Reserved Regions\r
1112 //\r
1113 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
1114 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
1115\r
1116 //\r
1117 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
1118 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
1119 // to an SMRAM address will be present in the handle database\r
1120 //\r
1121 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1122 &gSmmCpuPrivate->SmmCpuHandle,\r
1123 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
1124 NULL\r
1125 );\r
1126 ASSERT_EFI_ERROR (Status);\r
1127\r
1128 //\r
1129 // Install the SMM CPU Protocol into SMM protocol database\r
1130 //\r
1131 Status = gSmst->SmmInstallProtocolInterface (\r
1132 &mSmmCpuHandle,\r
1133 &gEfiSmmCpuProtocolGuid,\r
1134 EFI_NATIVE_INTERFACE,\r
1135 &mSmmCpu\r
1136 );\r
1137 ASSERT_EFI_ERROR (Status);\r
1138\r
1139 //\r
1140 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
1141 //\r
1142 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1143 PcdSet64 (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
1144 }\r
1145\r
1146 //\r
1147 // Initialize SMM CPU Services Support\r
1148 //\r
1149 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
1150 ASSERT_EFI_ERROR (Status);\r
1151\r
1152 if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {\r
1153 //\r
1154 // Install Framework SMM Save State Protocol into UEFI protocol database for backward compatibility\r
1155 //\r
1156 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
1157 &gSmmCpuPrivate->SmmCpuHandle,\r
1158 &gEfiSmmCpuSaveStateProtocolGuid,\r
1159 &mSmmCpuSaveState,\r
1160 NULL\r
1161 );\r
1162 ASSERT_EFI_ERROR (Status);\r
1163 //\r
1164 // The SmmStartupThisAp service in Framework SMST should always be non-null.\r
1165 // Update SmmStartupThisAp pointer in PI SMST here so that PI/Framework SMM thunk\r
1166 // can have it ready when constructing Framework SMST.\r
1167 //\r
1168 gSmst->SmmStartupThisAp = SmmStartupThisAp;\r
1169 }\r
1170\r
1171 //\r
1172 // register SMM Ready To Lock Protocol notification\r
1173 //\r
1174 Status = gSmst->SmmRegisterProtocolNotify (\r
1175 &gEfiSmmReadyToLockProtocolGuid,\r
1176 SmmReadyToLockEventNotify,\r
1177 &Registration\r
1178 );\r
1179 ASSERT_EFI_ERROR (Status);\r
1180\r
1181 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);\r
1182 if (GuidHob != NULL) {\r
1183 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
1184\r
1185 DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));\r
1186 DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));\r
1187\r
1188 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;\r
1189 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));\r
1190\r
1191 mSmmS3ResumeState = SmmS3ResumeState;\r
1192 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;\r
1193\r
1194 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;\r
1195\r
1196 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;\r
1197 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));\r
1198 if (SmmS3ResumeState->SmmS3StackBase == 0) {\r
1199 SmmS3ResumeState->SmmS3StackSize = 0;\r
1200 }\r
1201\r
1202 SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;\r
1203 SmmS3ResumeState->SmmS3Cr3 = Cr3;\r
1204 SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;\r
1205\r
1206 if (sizeof (UINTN) == sizeof (UINT64)) {\r
1207 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;\r
1208 }\r
1209 if (sizeof (UINTN) == sizeof (UINT32)) {\r
1210 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;\r
1211 }\r
1212 }\r
1213\r
1214 //\r
1215 // Check XD and BTS features\r
1216 //\r
1217 CheckProcessorFeature ();\r
1218\r
1219 //\r
1220 // Initialize SMM Profile feature\r
1221 //\r
1222 InitSmmProfile (Cr3);\r
1223\r
1224 //\r
1225 // Patch SmmS3ResumeState->SmmS3Cr3\r
1226 //\r
1227 InitSmmS3Cr3 ();\r
1228\r
1229 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
1230\r
1231 return EFI_SUCCESS;\r
1232}\r
1233\r
1234/**\r
1235\r
1236 Find out SMRAM information including SMRR base and SMRR size.\r
1237\r
1238 @param SmrrBase SMRR base\r
1239 @param SmrrSize SMRR size\r
1240\r
1241**/\r
1242VOID\r
1243FindSmramInfo (\r
1244 OUT UINT32 *SmrrBase,\r
1245 OUT UINT32 *SmrrSize\r
1246 )\r
1247{\r
1248 EFI_STATUS Status;\r
1249 UINTN Size;\r
1250 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
1251 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
1252 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
1253 UINTN SmramRangeCount;\r
1254 UINTN Index;\r
1255 UINT64 MaxSize;\r
1256 BOOLEAN Found;\r
1257\r
1258 //\r
1259 // Get SMM Access Protocol\r
1260 //\r
1261 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
1262 ASSERT_EFI_ERROR (Status);\r
1263\r
1264 //\r
1265 // Get SMRAM information\r
1266 //\r
1267 Size = 0;\r
1268 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1269 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1270\r
1271 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1272 ASSERT (SmramRanges != NULL);\r
1273\r
1274 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
1275 ASSERT_EFI_ERROR (Status);\r
1276\r
1277 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1278\r
1279 //\r
1280 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1281 //\r
1282 CurrentSmramRange = NULL;\r
1283 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
1284 //\r
1285 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1286 //\r
1287 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1288 continue;\r
1289 }\r
1290\r
1291 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1292 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1293 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1294 MaxSize = SmramRanges[Index].PhysicalSize;\r
1295 CurrentSmramRange = &SmramRanges[Index];\r
1296 }\r
1297 }\r
1298 }\r
1299 }\r
1300\r
1301 ASSERT (CurrentSmramRange != NULL);\r
1302\r
1303 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1304 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1305\r
1306 do {\r
1307 Found = FALSE;\r
1308 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1309 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1310 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1311 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1312 Found = TRUE;\r
1313 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1314 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1315 Found = TRUE;\r
1316 }\r
1317 }\r
1318 } while (Found);\r
1319\r
1320 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1321}\r
1322\r
1323/**\r
1324Configure SMM Code Access Check feature on an AP.\r
1325SMM Feature Control MSR will be locked after configuration.\r
1326\r
1327@param[in,out] Buffer Pointer to private data buffer.\r
1328**/\r
1329VOID\r
1330EFIAPI\r
1331ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1332 IN OUT VOID *Buffer\r
1333 )\r
1334{\r
1335 UINTN CpuIndex;\r
1336 UINT64 SmmFeatureControlMsr;\r
1337 UINT64 NewSmmFeatureControlMsr;\r
1338\r
1339 //\r
1340 // Retrieve the CPU Index from the context passed in\r
1341 //\r
1342 CpuIndex = *(UINTN *)Buffer;\r
1343\r
1344 //\r
1345 // Get the current SMM Feature Control MSR value\r
1346 //\r
1347 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1348\r
1349 //\r
1350 // Compute the new SMM Feature Control MSR value\r
1351 //\r
1352 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1353 if (mSmmCodeAccessCheckEnable) {\r
1354 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
1355 }\r
1356 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1357 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1358 }\r
1359\r
1360 //\r
1361 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1362 //\r
1363 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1364 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1365 }\r
1366\r
1367 //\r
1368 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1369 //\r
1370 ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1371}\r
1372\r
1373/**\r
1374Configure SMM Code Access Check feature for all processors.\r
1375SMM Feature Control MSR will be locked after configuration.\r
1376**/\r
1377VOID\r
1378ConfigSmmCodeAccessCheck (\r
1379 VOID\r
1380 )\r
1381{\r
1382 UINTN Index;\r
1383 EFI_STATUS Status;\r
1384\r
1385 //\r
1386 // Check to see if the Feature Control MSR is supported on this CPU\r
1387 //\r
1388 Index = gSmst->CurrentlyExecutingCpu;\r
1389 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1390 mSmmCodeAccessCheckEnable = FALSE;\r
1391 return;\r
1392 }\r
1393\r
1394 //\r
1395 // Check to see if the CPU supports the SMM Code Access Check feature\r
1396 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1397 //\r
1398 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1399 mSmmCodeAccessCheckEnable = FALSE;\r
1400 }\r
1401\r
1402 //\r
1403 // If the SMM Code Access Check feature is disabled and the Feature Control MSR\r
1404 // is not being locked, then no additional work is required\r
1405 //\r
1406 if (!mSmmCodeAccessCheckEnable && !FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1407 return;\r
1408 }\r
1409\r
1410 //\r
1411 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1412 //\r
1413 InitializeSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1414\r
1415 //\r
1416 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1417 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1418 //\r
1419 AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1420\r
1421 //\r
1422 // Enable SMM Code Access Check feature on the BSP.\r
1423 //\r
1424 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1425\r
1426 //\r
1427 // Enable SMM Code Access Check feature for the APs.\r
1428 //\r
1429 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
1430 if (Index != gSmst->CurrentlyExecutingCpu) {\r
1431\r
1432 //\r
1433 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1434 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1435 //\r
1436 AcquireSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1437\r
1438 //\r
1439 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1440 //\r
1441 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1442 ASSERT_EFI_ERROR (Status);\r
1443\r
1444 //\r
1445 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1446 //\r
1447 while (!AcquireSpinLockOrFail (&mConfigSmmCodeAccessCheckLock)) {\r
1448 CpuPause ();\r
1449 }\r
1450\r
1451 //\r
1452 // Release the Config SMM Code Access Check spin lock.\r
1453 //\r
1454 ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock);\r
1455 }\r
1456 }\r
1457}\r
1458\r
1459/**\r
1460 Perform the remaining tasks.\r
1461\r
1462**/\r
1463VOID\r
1464PerformRemainingTasks (\r
1465 VOID\r
1466 )\r
1467{\r
1468 if (mSmmReadyToLock) {\r
1469 //\r
1470 // Start SMM Profile feature\r
1471 //\r
1472 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1473 SmmProfileStart ();\r
1474 }\r
1475 //\r
1476 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1477 //\r
1478 InitPaging ();\r
1479 //\r
1480 // Configure SMM Code Access Check feature if available.\r
1481 //\r
1482 ConfigSmmCodeAccessCheck ();\r
1483\r
1484 //\r
1485 // Clean SMM ready to lock flag\r
1486 //\r
1487 mSmmReadyToLock = FALSE;\r
1488 }\r
1489}\r