]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmmCr4" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
e21e355e 4Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
827330cc
JW
79///\r
80/// SMM Memory Attribute Protocol instance\r
81///\r
82EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
83 EdkiiSmmGetMemoryAttributes,\r
84 EdkiiSmmSetMemoryAttributes,\r
85 EdkiiSmmClearMemoryAttributes\r
86};\r
87\r
529a5a86
MK
88EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
89\r
529a5a86
MK
90//\r
91// SMM stack information\r
92//\r
93UINTN mSmmStackArrayBase;\r
94UINTN mSmmStackArrayEnd;\r
95UINTN mSmmStackSize;\r
96\r
529a5a86
MK
97UINTN mMaxNumberOfCpus = 1;\r
98UINTN mNumberOfCpus = 1;\r
99\r
100//\r
101// SMM ready to lock flag\r
102//\r
103BOOLEAN mSmmReadyToLock = FALSE;\r
104\r
105//\r
106// Global used to cache PCD for SMM Code Access Check enable\r
107//\r
108BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
109\r
241f9149
LD
110//\r
111// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
112//\r
113UINT64 mAddressEncMask = 0;\r
114\r
529a5a86
MK
115//\r
116// Spin lock used to serialize setting of SMM Code Access Check feature\r
117//\r
fe3a75bc 118SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 119\r
7ed6f781
JF
120//\r
121// Saved SMM ranges information\r
122//\r
123EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
124UINTN mSmmCpuSmramRangeCount;\r
125\r
51ce27fd
SZ
126UINT8 mPhysicalAddressBits;\r
127\r
351b49c1
LE
128//\r
129// Control register contents saved for SMM S3 resume state initialization.\r
130//\r
131UINT32 mSmmCr4;\r
132\r
529a5a86
MK
133/**\r
134 Initialize IDT to setup exception handlers for SMM.\r
135\r
136**/\r
137VOID\r
138InitializeSmmIdt (\r
139 VOID\r
140 )\r
141{\r
142 EFI_STATUS Status;\r
143 BOOLEAN InterruptState;\r
144 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
145\r
146 //\r
147 // There are 32 (not 255) entries in it since only processor\r
148 // generated exceptions will be handled.\r
149 //\r
150 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
151 //\r
152 // Allocate page aligned IDT, because it might be set as read only.\r
153 //\r
154 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
155 ASSERT (gcSmiIdtr.Base != 0);\r
156 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
157\r
529a5a86
MK
158 //\r
159 // Disable Interrupt and save DXE IDT table\r
160 //\r
161 InterruptState = SaveAndDisableInterrupts ();\r
162 AsmReadIdtr (&DxeIdtr);\r
163 //\r
164 // Load SMM temporary IDT table\r
165 //\r
166 AsmWriteIdtr (&gcSmiIdtr);\r
167 //\r
168 // Setup SMM default exception handlers, SMM IDT table\r
169 // will be updated and saved in gcSmiIdtr\r
170 //\r
171 Status = InitializeCpuExceptionHandlers (NULL);\r
172 ASSERT_EFI_ERROR (Status);\r
173 //\r
174 // Restore DXE IDT table and CPU interrupt\r
175 //\r
176 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
177 SetInterruptState (InterruptState);\r
178}\r
179\r
180/**\r
181 Search module name by input IP address and output it.\r
182\r
183 @param CallerIpAddress Caller instruction pointer.\r
184\r
185**/\r
186VOID\r
187DumpModuleInfoByIp (\r
188 IN UINTN CallerIpAddress\r
189 )\r
190{\r
191 UINTN Pe32Data;\r
529a5a86 192 VOID *PdbPointer;\r
529a5a86
MK
193\r
194 //\r
195 // Find Image Base\r
196 //\r
9e981317 197 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 198 if (Pe32Data != 0) {\r
b8caae19 199 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
529a5a86
MK
200 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
201 if (PdbPointer != NULL) {\r
b8caae19 202 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
203 }\r
204 }\r
205}\r
206\r
207/**\r
208 Read information from the CPU save state.\r
209\r
210 @param This EFI_SMM_CPU_PROTOCOL instance\r
211 @param Width The number of bytes to read from the CPU save state.\r
212 @param Register Specifies the CPU register to read form the save state.\r
213 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
214 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
215\r
216 @retval EFI_SUCCESS The register was read from Save State\r
217 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
218 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
219\r
220**/\r
221EFI_STATUS\r
222EFIAPI\r
223SmmReadSaveState (\r
224 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
225 IN UINTN Width,\r
226 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
227 IN UINTN CpuIndex,\r
228 OUT VOID *Buffer\r
229 )\r
230{\r
231 EFI_STATUS Status;\r
232\r
233 //\r
234 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
235 //\r
236 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
237 return EFI_INVALID_PARAMETER;\r
238 }\r
239\r
240 //\r
241 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
242 //\r
243 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
244 //\r
245 // The pseudo-register only supports the 64-bit size specified by Width.\r
246 //\r
247 if (Width != sizeof (UINT64)) {\r
248 return EFI_INVALID_PARAMETER;\r
249 }\r
250 //\r
251 // If the processor is in SMM at the time the SMI occurred,\r
252 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
253 // Otherwise, EFI_NOT_FOUND is returned.\r
254 //\r
ed3d5ecb 255 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
256 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
257 return EFI_SUCCESS;\r
258 } else {\r
259 return EFI_NOT_FOUND;\r
260 }\r
261 }\r
262\r
ed3d5ecb 263 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
264 return EFI_INVALID_PARAMETER;\r
265 }\r
266\r
267 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
268 if (Status == EFI_UNSUPPORTED) {\r
269 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
270 }\r
271 return Status;\r
272}\r
273\r
274/**\r
275 Write data to the CPU save state.\r
276\r
277 @param This EFI_SMM_CPU_PROTOCOL instance\r
278 @param Width The number of bytes to read from the CPU save state.\r
279 @param Register Specifies the CPU register to write to the save state.\r
280 @param CpuIndex Specifies the zero-based index of the CPU save state\r
281 @param Buffer Upon entry, this holds the new CPU register value.\r
282\r
283 @retval EFI_SUCCESS The register was written from Save State\r
284 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
285 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
286\r
287**/\r
288EFI_STATUS\r
289EFIAPI\r
290SmmWriteSaveState (\r
291 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
292 IN UINTN Width,\r
293 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
294 IN UINTN CpuIndex,\r
295 IN CONST VOID *Buffer\r
296 )\r
297{\r
298 EFI_STATUS Status;\r
299\r
300 //\r
301 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
302 //\r
303 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
304 return EFI_INVALID_PARAMETER;\r
305 }\r
306\r
307 //\r
308 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
309 //\r
310 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
311 return EFI_SUCCESS;\r
312 }\r
313\r
314 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
315 return EFI_INVALID_PARAMETER;\r
316 }\r
317\r
318 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
319 if (Status == EFI_UNSUPPORTED) {\r
320 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
321 }\r
322 return Status;\r
323}\r
324\r
325\r
326/**\r
327 C function for SMI handler. To change all processor's SMMBase Register.\r
328\r
329**/\r
330VOID\r
331EFIAPI\r
332SmmInitHandler (\r
333 VOID\r
334 )\r
335{\r
336 UINT32 ApicId;\r
337 UINTN Index;\r
338\r
339 //\r
340 // Update SMM IDT entries' code segment and load IDT\r
341 //\r
342 AsmWriteIdtr (&gcSmiIdtr);\r
343 ApicId = GetApicId ();\r
344\r
bb767506 345 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
346\r
347 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
348 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
349 //\r
350 // Initialize SMM specific features on the currently executing CPU\r
351 //\r
352 SmmCpuFeaturesInitializeProcessor (\r
353 Index,\r
354 mIsBsp,\r
355 gSmmCpuPrivate->ProcessorInfo,\r
356 &mCpuHotPlugData\r
357 );\r
358\r
a46a4c90
JF
359 if (!mSmmS3Flag) {\r
360 //\r
361 // Check XD and BTS features on each processor on normal boot\r
362 //\r
51773d49 363 CheckFeatureSupported ();\r
a46a4c90
JF
364 }\r
365\r
529a5a86
MK
366 if (mIsBsp) {\r
367 //\r
368 // BSP rebase is already done above.\r
369 // Initialize private data during S3 resume\r
370 //\r
371 InitializeMpSyncData ();\r
372 }\r
373\r
374 //\r
375 // Hook return after RSM to set SMM re-based flag\r
376 //\r
377 SemaphoreHook (Index, &mRebased[Index]);\r
378\r
379 return;\r
380 }\r
381 }\r
382 ASSERT (FALSE);\r
383}\r
384\r
385/**\r
386 Relocate SmmBases for each processor.\r
387\r
388 Execute on first boot and all S3 resumes\r
389\r
390**/\r
391VOID\r
392EFIAPI\r
393SmmRelocateBases (\r
394 VOID\r
395 )\r
396{\r
397 UINT8 BakBuf[BACK_BUF_SIZE];\r
398 SMRAM_SAVE_STATE_MAP BakBuf2;\r
399 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
400 UINT8 *U8Ptr;\r
401 UINT32 ApicId;\r
402 UINTN Index;\r
403 UINTN BspIndex;\r
404\r
405 //\r
406 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
407 //\r
408 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
409\r
410 //\r
411 // Patch ASM code template with current CR0, CR3, and CR4 values\r
412 //\r
413 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
6b0841c1 414 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1
LE
415 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
416 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4, 4);\r
529a5a86
MK
417\r
418 //\r
419 // Patch GDTR for SMM base relocation\r
420 //\r
421 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
422 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
423\r
424 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
425 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
426\r
427 //\r
428 // Backup original contents at address 0x38000\r
429 //\r
430 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
431 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
432\r
433 //\r
434 // Load image for relocation\r
435 //\r
436 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
437\r
438 //\r
439 // Retrieve the local APIC ID of current processor\r
440 //\r
441 ApicId = GetApicId ();\r
442\r
443 //\r
444 // Relocate SM bases for all APs\r
445 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
446 //\r
447 mIsBsp = FALSE;\r
448 BspIndex = (UINTN)-1;\r
449 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
450 mRebased[Index] = FALSE;\r
451 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
452 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
453 //\r
454 // Wait for this AP to finish its 1st SMI\r
455 //\r
456 while (!mRebased[Index]);\r
457 } else {\r
458 //\r
459 // BSP will be Relocated later\r
460 //\r
461 BspIndex = Index;\r
462 }\r
463 }\r
464\r
465 //\r
466 // Relocate BSP's SMM base\r
467 //\r
468 ASSERT (BspIndex != (UINTN)-1);\r
469 mIsBsp = TRUE;\r
470 SendSmiIpi (ApicId);\r
471 //\r
472 // Wait for the BSP to finish its 1st SMI\r
473 //\r
474 while (!mRebased[BspIndex]);\r
475\r
476 //\r
477 // Restore contents at address 0x38000\r
478 //\r
479 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
480 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
481}\r
482\r
529a5a86
MK
483/**\r
484 SMM Ready To Lock event notification handler.\r
485\r
486 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
487 perform additional lock actions that must be performed from SMM on the next SMI.\r
488\r
489 @param[in] Protocol Points to the protocol's unique identifier.\r
490 @param[in] Interface Points to the interface instance.\r
491 @param[in] Handle The handle on which the interface was installed.\r
492\r
493 @retval EFI_SUCCESS Notification handler runs successfully.\r
494 **/\r
495EFI_STATUS\r
496EFIAPI\r
497SmmReadyToLockEventNotify (\r
498 IN CONST EFI_GUID *Protocol,\r
499 IN VOID *Interface,\r
500 IN EFI_HANDLE Handle\r
501 )\r
502{\r
0bdc9e75 503 GetAcpiCpuData ();\r
529a5a86 504\r
d2fc7711
JY
505 //\r
506 // Cache a copy of UEFI memory map before we start profiling feature.\r
507 //\r
508 GetUefiMemoryMap ();\r
509\r
529a5a86
MK
510 //\r
511 // Set SMM ready to lock flag and return\r
512 //\r
513 mSmmReadyToLock = TRUE;\r
514 return EFI_SUCCESS;\r
515}\r
516\r
517/**\r
518 The module Entry Point of the CPU SMM driver.\r
519\r
520 @param ImageHandle The firmware allocated handle for the EFI image.\r
521 @param SystemTable A pointer to the EFI System Table.\r
522\r
523 @retval EFI_SUCCESS The entry point is executed successfully.\r
524 @retval Other Some error occurs when executing this entry point.\r
525\r
526**/\r
527EFI_STATUS\r
528EFIAPI\r
529PiCpuSmmEntry (\r
530 IN EFI_HANDLE ImageHandle,\r
531 IN EFI_SYSTEM_TABLE *SystemTable\r
532 )\r
533{\r
534 EFI_STATUS Status;\r
535 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
536 UINTN NumberOfEnabledProcessors;\r
537 UINTN Index;\r
538 VOID *Buffer;\r
ae82a30b
JY
539 UINTN BufferPages;\r
540 UINTN TileCodeSize;\r
541 UINTN TileDataSize;\r
529a5a86 542 UINTN TileSize;\r
529a5a86
MK
543 UINT8 *Stacks;\r
544 VOID *Registration;\r
545 UINT32 RegEax;\r
546 UINT32 RegEdx;\r
547 UINTN FamilyId;\r
548 UINTN ModelId;\r
549 UINT32 Cr3;\r
550\r
e21e355e
LG
551 //\r
552 // Initialize address fixup\r
553 //\r
554 PiSmmCpuSmmInitFixupAddress ();\r
555 PiSmmCpuSmiEntryFixupAddress ();\r
556\r
529a5a86
MK
557 //\r
558 // Initialize Debug Agent to support source level debug in SMM code\r
559 //\r
560 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
561\r
562 //\r
563 // Report the start of CPU SMM initialization.\r
564 //\r
565 REPORT_STATUS_CODE (\r
566 EFI_PROGRESS_CODE,\r
567 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
568 );\r
569\r
570 //\r
571 // Fix segment address of the long-mode-switch jump\r
572 //\r
573 if (sizeof (UINTN) == sizeof (UINT64)) {\r
574 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
575 }\r
576\r
577 //\r
578 // Find out SMRR Base and SMRR Size\r
579 //\r
580 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
581\r
582 //\r
583 // Get MP Services Protocol\r
584 //\r
585 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
586 ASSERT_EFI_ERROR (Status);\r
587\r
588 //\r
589 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
590 //\r
591 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
592 ASSERT_EFI_ERROR (Status);\r
593 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
594\r
595 //\r
596 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
597 // A constant BSP index makes no sense because it may be hot removed.\r
598 //\r
599 DEBUG_CODE (\r
600 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
601\r
602 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
603 }\r
604 );\r
605\r
606 //\r
607 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
608 //\r
609 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
610 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
611\r
241f9149
LD
612 //\r
613 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
614 // Make sure AddressEncMask is contained to smallest supported address field.\r
615 //\r
616 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
617 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
618\r
529a5a86
MK
619 //\r
620 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
621 //\r
622 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
623 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
624 } else {\r
625 mMaxNumberOfCpus = mNumberOfCpus;\r
626 }\r
627 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
628\r
629 //\r
630 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
631 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
632 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
633 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
634 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
635 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
636 // by adding the // CPU save state size, any extra CPU specific context, and\r
637 // the size of code that must be placed at the SMI entry point to transfer\r
638 // control to a C function in the native SMM execution mode. This size is\r
639 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
640 // The total amount of memory required is the maximum number of CPUs that\r
641 // platform supports times the tile size. The picture below shows the tiling,\r
642 // where m is the number of tiles that fit in 32KB.\r
643 //\r
644 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
645 // | CPU m+1 Save State |\r
646 // +-----------------------------+\r
647 // | CPU m+1 Extra Data |\r
648 // +-----------------------------+\r
649 // | Padding |\r
650 // +-----------------------------+\r
651 // | CPU 2m SMI Entry |\r
652 // +#############################+ <-- Base of allocated buffer + 64 KB\r
653 // | CPU m-1 Save State |\r
654 // +-----------------------------+\r
655 // | CPU m-1 Extra Data |\r
656 // +-----------------------------+\r
657 // | Padding |\r
658 // +-----------------------------+\r
659 // | CPU 2m-1 SMI Entry |\r
660 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
661 // | . . . . . . . . . . . . |\r
662 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
663 // | CPU 2 Save State |\r
664 // +-----------------------------+\r
665 // | CPU 2 Extra Data |\r
666 // +-----------------------------+\r
667 // | Padding |\r
668 // +-----------------------------+\r
669 // | CPU m+1 SMI Entry |\r
670 // +=============================+ <-- Base of allocated buffer + 32 KB\r
671 // | CPU 1 Save State |\r
672 // +-----------------------------+\r
673 // | CPU 1 Extra Data |\r
674 // +-----------------------------+\r
675 // | Padding |\r
676 // +-----------------------------+\r
677 // | CPU m SMI Entry |\r
678 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
679 // | CPU 0 Save State |\r
680 // +-----------------------------+\r
681 // | CPU 0 Extra Data |\r
682 // +-----------------------------+\r
683 // | Padding |\r
684 // +-----------------------------+\r
685 // | CPU m-1 SMI Entry |\r
686 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
687 // | . . . . . . . . . . . . |\r
688 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
689 // | Padding |\r
690 // +-----------------------------+\r
691 // | CPU 1 SMI Entry |\r
692 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
693 // | Padding |\r
694 // +-----------------------------+\r
695 // | CPU 0 SMI Entry |\r
696 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
697 //\r
698\r
699 //\r
700 // Retrieve CPU Family\r
701 //\r
e9b3a6c9 702 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
703 FamilyId = (RegEax >> 8) & 0xf;\r
704 ModelId = (RegEax >> 4) & 0xf;\r
705 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
706 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
707 }\r
708\r
e9b3a6c9
MK
709 RegEdx = 0;\r
710 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
711 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
712 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
713 }\r
529a5a86
MK
714 //\r
715 // Determine the mode of the CPU at the time an SMI occurs\r
716 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
717 // Volume 3C, Section 34.4.1.1\r
718 //\r
719 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
720 if ((RegEdx & BIT29) != 0) {\r
721 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
722 }\r
723 if (FamilyId == 0x06) {\r
724 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
725 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
726 }\r
727 }\r
728\r
729 //\r
730 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
731 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
732 // This size is rounded up to nearest power of 2.\r
529a5a86 733 //\r
ae82a30b
JY
734 TileCodeSize = GetSmiHandlerSize ();\r
735 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 736 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
737 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
738 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 739 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 740 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
741\r
742 //\r
f12367a0
MK
743 // If the TileSize is larger than space available for the SMI Handler of\r
744 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
745 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
746 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
747 // context must be reduced.\r
529a5a86
MK
748 //\r
749 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
750\r
751 //\r
752 // Allocate buffer for all of the tiles.\r
753 //\r
754 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
755 // Volume 3C, Section 34.11 SMBASE Relocation\r
756 // For Pentium and Intel486 processors, the SMBASE values must be\r
757 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
758 // state during the execution of a RSM instruction.\r
759 //\r
760 // Intel486 processors: FamilyId is 4\r
761 // Pentium processors : FamilyId is 5\r
762 //\r
ae82a30b 763 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 764 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 765 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 766 } else {\r
717fb604 767 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
768 }\r
769 ASSERT (Buffer != NULL);\r
ae82a30b 770 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
771\r
772 //\r
773 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
774 //\r
775 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
776 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
777\r
778 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
779 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
780\r
781 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
782 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
783\r
784 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
785 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
786\r
787 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
788 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
789\r
790 //\r
791 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
792 //\r
793 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
794 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
795 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
796 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
797 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
798\r
799 //\r
800 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
801 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
802 // size for each CPU in the platform\r
803 //\r
804 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
805 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
806 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
807 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
808 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
809\r
810 if (Index < mNumberOfCpus) {\r
811 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
812 ASSERT_EFI_ERROR (Status);\r
813 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
814\r
815 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
816 Index,\r
817 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
818 mCpuHotPlugData.SmBase[Index],\r
819 gSmmCpuPrivate->CpuSaveState[Index],\r
820 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
821 ));\r
822 } else {\r
823 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
824 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
825 }\r
826 }\r
827\r
828 //\r
829 // Allocate SMI stacks for all processors.\r
830 //\r
831 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
832 //\r
833 // 2 more pages is allocated for each processor.\r
834 // one is guard page and the other is known good stack.\r
835 //\r
836 // +-------------------------------------------+-----+-------------------------------------------+\r
837 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
838 // +-------------------------------------------+-----+-------------------------------------------+\r
839 // | | | |\r
840 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
841 //\r
842 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
843 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
844 ASSERT (Stacks != NULL);\r
845 mSmmStackArrayBase = (UINTN)Stacks;\r
846 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
847 } else {\r
848 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
849 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
850 ASSERT (Stacks != NULL);\r
851 }\r
852\r
853 //\r
854 // Set SMI stack for SMM base relocation\r
855 //\r
856 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
857\r
858 //\r
859 // Initialize IDT\r
860 //\r
861 InitializeSmmIdt ();\r
862\r
863 //\r
864 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
865 //\r
866 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
867 ASSERT (mRebased != NULL);\r
868 SmmRelocateBases ();\r
869\r
870 //\r
871 // Call hook for BSP to perform extra actions in normal mode after all\r
872 // SMM base addresses have been relocated on all CPUs\r
873 //\r
874 SmmCpuFeaturesSmmRelocationComplete ();\r
875\r
717fb604
JY
876 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
877\r
529a5a86
MK
878 //\r
879 // SMM Time initialization\r
880 //\r
881 InitializeSmmTimer ();\r
882\r
883 //\r
884 // Initialize MP globals\r
885 //\r
886 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
887\r
888 //\r
889 // Fill in SMM Reserved Regions\r
890 //\r
891 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
892 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
893\r
894 //\r
895 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
896 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
897 // to an SMRAM address will be present in the handle database\r
898 //\r
899 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
900 &gSmmCpuPrivate->SmmCpuHandle,\r
901 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
902 NULL\r
903 );\r
904 ASSERT_EFI_ERROR (Status);\r
905\r
906 //\r
907 // Install the SMM CPU Protocol into SMM protocol database\r
908 //\r
909 Status = gSmst->SmmInstallProtocolInterface (\r
910 &mSmmCpuHandle,\r
911 &gEfiSmmCpuProtocolGuid,\r
912 EFI_NATIVE_INTERFACE,\r
913 &mSmmCpu\r
914 );\r
915 ASSERT_EFI_ERROR (Status);\r
916\r
827330cc
JW
917 //\r
918 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
919 //\r
920 Status = gSmst->SmmInstallProtocolInterface (\r
921 &mSmmCpuHandle,\r
922 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
923 EFI_NATIVE_INTERFACE,\r
924 &mSmmMemoryAttribute\r
925 );\r
926 ASSERT_EFI_ERROR (Status);\r
927\r
529a5a86
MK
928 //\r
929 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
930 //\r
931 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
932 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
933 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
934 }\r
935\r
936 //\r
937 // Initialize SMM CPU Services Support\r
938 //\r
939 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
940 ASSERT_EFI_ERROR (Status);\r
941\r
529a5a86
MK
942 //\r
943 // register SMM Ready To Lock Protocol notification\r
944 //\r
945 Status = gSmst->SmmRegisterProtocolNotify (\r
946 &gEfiSmmReadyToLockProtocolGuid,\r
947 SmmReadyToLockEventNotify,\r
948 &Registration\r
949 );\r
950 ASSERT_EFI_ERROR (Status);\r
951\r
529a5a86
MK
952 //\r
953 // Initialize SMM Profile feature\r
954 //\r
955 InitSmmProfile (Cr3);\r
956\r
b10d5ddc 957 GetAcpiS3EnableFlag ();\r
0bdc9e75 958 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
959\r
960 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
961\r
962 return EFI_SUCCESS;\r
963}\r
964\r
965/**\r
966\r
967 Find out SMRAM information including SMRR base and SMRR size.\r
968\r
969 @param SmrrBase SMRR base\r
970 @param SmrrSize SMRR size\r
971\r
972**/\r
973VOID\r
974FindSmramInfo (\r
975 OUT UINT32 *SmrrBase,\r
976 OUT UINT32 *SmrrSize\r
977 )\r
978{\r
979 EFI_STATUS Status;\r
980 UINTN Size;\r
981 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
982 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
983 UINTN Index;\r
984 UINT64 MaxSize;\r
985 BOOLEAN Found;\r
986\r
987 //\r
988 // Get SMM Access Protocol\r
989 //\r
990 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
991 ASSERT_EFI_ERROR (Status);\r
992\r
993 //\r
994 // Get SMRAM information\r
995 //\r
996 Size = 0;\r
997 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
998 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
999\r
7ed6f781
JF
1000 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1001 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 1002\r
7ed6f781 1003 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1004 ASSERT_EFI_ERROR (Status);\r
1005\r
7ed6f781 1006 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1007\r
1008 //\r
1009 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1010 //\r
1011 CurrentSmramRange = NULL;\r
7ed6f781 1012 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1013 //\r
1014 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1015 //\r
7ed6f781 1016 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1017 continue;\r
1018 }\r
1019\r
7ed6f781
JF
1020 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1021 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1022 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1023 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1024 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1025 }\r
1026 }\r
1027 }\r
1028 }\r
1029\r
1030 ASSERT (CurrentSmramRange != NULL);\r
1031\r
1032 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1033 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1034\r
1035 do {\r
1036 Found = FALSE;\r
7ed6f781
JF
1037 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1038 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1039 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1040 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1041 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1042 Found = TRUE;\r
7ed6f781
JF
1043 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1044 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1045 Found = TRUE;\r
1046 }\r
1047 }\r
1048 } while (Found);\r
1049\r
1050 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1051}\r
1052\r
1053/**\r
1054Configure SMM Code Access Check feature on an AP.\r
1055SMM Feature Control MSR will be locked after configuration.\r
1056\r
1057@param[in,out] Buffer Pointer to private data buffer.\r
1058**/\r
1059VOID\r
1060EFIAPI\r
1061ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1062 IN OUT VOID *Buffer\r
1063 )\r
1064{\r
1065 UINTN CpuIndex;\r
1066 UINT64 SmmFeatureControlMsr;\r
1067 UINT64 NewSmmFeatureControlMsr;\r
1068\r
1069 //\r
1070 // Retrieve the CPU Index from the context passed in\r
1071 //\r
1072 CpuIndex = *(UINTN *)Buffer;\r
1073\r
1074 //\r
1075 // Get the current SMM Feature Control MSR value\r
1076 //\r
1077 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1078\r
1079 //\r
1080 // Compute the new SMM Feature Control MSR value\r
1081 //\r
1082 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1083 if (mSmmCodeAccessCheckEnable) {\r
1084 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1085 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1086 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1087 }\r
529a5a86
MK
1088 }\r
1089\r
1090 //\r
1091 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1092 //\r
1093 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1094 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1095 }\r
1096\r
1097 //\r
1098 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1099 //\r
fe3a75bc 1100 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1101}\r
1102\r
1103/**\r
1104Configure SMM Code Access Check feature for all processors.\r
1105SMM Feature Control MSR will be locked after configuration.\r
1106**/\r
1107VOID\r
1108ConfigSmmCodeAccessCheck (\r
1109 VOID\r
1110 )\r
1111{\r
1112 UINTN Index;\r
1113 EFI_STATUS Status;\r
1114\r
1115 //\r
1116 // Check to see if the Feature Control MSR is supported on this CPU\r
1117 //\r
f6b0cb17 1118 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1119 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1120 mSmmCodeAccessCheckEnable = FALSE;\r
1121 return;\r
1122 }\r
1123\r
1124 //\r
1125 // Check to see if the CPU supports the SMM Code Access Check feature\r
1126 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1127 //\r
1128 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1129 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1130 return;\r
1131 }\r
1132\r
1133 //\r
1134 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1135 //\r
fe3a75bc 1136 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1137\r
1138 //\r
1139 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1140 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1141 //\r
fe3a75bc 1142 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1143\r
1144 //\r
1145 // Enable SMM Code Access Check feature on the BSP.\r
1146 //\r
1147 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1148\r
1149 //\r
1150 // Enable SMM Code Access Check feature for the APs.\r
1151 //\r
1152 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1153 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1154 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1155 //\r
1156 // If this processor does not exist\r
1157 //\r
1158 continue;\r
1159 }\r
529a5a86
MK
1160 //\r
1161 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1162 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1163 //\r
fe3a75bc 1164 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1165\r
1166 //\r
1167 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1168 //\r
1169 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1170 ASSERT_EFI_ERROR (Status);\r
1171\r
1172 //\r
1173 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1174 //\r
fe3a75bc 1175 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1176 CpuPause ();\r
1177 }\r
1178\r
1179 //\r
1180 // Release the Config SMM Code Access Check spin lock.\r
1181 //\r
fe3a75bc 1182 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1183 }\r
1184 }\r
1185}\r
1186\r
21c17193
JY
1187/**\r
1188 This API provides a way to allocate memory for page table.\r
1189\r
1190 This API can be called more once to allocate memory for page tables.\r
1191\r
1192 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1193 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1194 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1195 returned.\r
1196\r
1197 @param Pages The number of 4 KB pages to allocate.\r
1198\r
1199 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1200\r
1201**/\r
1202VOID *\r
1203AllocatePageTableMemory (\r
1204 IN UINTN Pages\r
1205 )\r
1206{\r
1207 VOID *Buffer;\r
1208\r
1209 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1210 if (Buffer != NULL) {\r
1211 return Buffer;\r
1212 }\r
1213 return AllocatePages (Pages);\r
1214}\r
1215\r
717fb604
JY
1216/**\r
1217 Allocate pages for code.\r
1218\r
1219 @param[in] Pages Number of pages to be allocated.\r
1220\r
1221 @return Allocated memory.\r
1222**/\r
1223VOID *\r
1224AllocateCodePages (\r
1225 IN UINTN Pages\r
1226 )\r
1227{\r
1228 EFI_STATUS Status;\r
1229 EFI_PHYSICAL_ADDRESS Memory;\r
1230\r
1231 if (Pages == 0) {\r
1232 return NULL;\r
1233 }\r
1234\r
1235 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1236 if (EFI_ERROR (Status)) {\r
1237 return NULL;\r
1238 }\r
1239 return (VOID *) (UINTN) Memory;\r
1240}\r
1241\r
1242/**\r
1243 Allocate aligned pages for code.\r
1244\r
1245 @param[in] Pages Number of pages to be allocated.\r
1246 @param[in] Alignment The requested alignment of the allocation.\r
1247 Must be a power of two.\r
1248 If Alignment is zero, then byte alignment is used.\r
1249\r
1250 @return Allocated memory.\r
1251**/\r
1252VOID *\r
1253AllocateAlignedCodePages (\r
1254 IN UINTN Pages,\r
1255 IN UINTN Alignment\r
1256 )\r
1257{\r
1258 EFI_STATUS Status;\r
1259 EFI_PHYSICAL_ADDRESS Memory;\r
1260 UINTN AlignedMemory;\r
1261 UINTN AlignmentMask;\r
1262 UINTN UnalignedPages;\r
1263 UINTN RealPages;\r
1264\r
1265 //\r
1266 // Alignment must be a power of two or zero.\r
1267 //\r
1268 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1269\r
1270 if (Pages == 0) {\r
1271 return NULL;\r
1272 }\r
1273 if (Alignment > EFI_PAGE_SIZE) {\r
1274 //\r
1275 // Calculate the total number of pages since alignment is larger than page size.\r
1276 //\r
1277 AlignmentMask = Alignment - 1;\r
1278 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1279 //\r
1280 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1281 //\r
1282 ASSERT (RealPages > Pages);\r
1283\r
1284 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1285 if (EFI_ERROR (Status)) {\r
1286 return NULL;\r
1287 }\r
1288 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1289 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1290 if (UnalignedPages > 0) {\r
1291 //\r
1292 // Free first unaligned page(s).\r
1293 //\r
1294 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1295 ASSERT_EFI_ERROR (Status);\r
1296 }\r
8491e302 1297 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1298 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1299 if (UnalignedPages > 0) {\r
1300 //\r
1301 // Free last unaligned page(s).\r
1302 //\r
1303 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1304 ASSERT_EFI_ERROR (Status);\r
1305 }\r
1306 } else {\r
1307 //\r
1308 // Do not over-allocate pages in this case.\r
1309 //\r
1310 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1311 if (EFI_ERROR (Status)) {\r
1312 return NULL;\r
1313 }\r
1314 AlignedMemory = (UINTN) Memory;\r
1315 }\r
1316 return (VOID *) AlignedMemory;\r
1317}\r
1318\r
529a5a86
MK
1319/**\r
1320 Perform the remaining tasks.\r
1321\r
1322**/\r
1323VOID\r
1324PerformRemainingTasks (\r
1325 VOID\r
1326 )\r
1327{\r
1328 if (mSmmReadyToLock) {\r
1329 //\r
1330 // Start SMM Profile feature\r
1331 //\r
1332 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1333 SmmProfileStart ();\r
1334 }\r
1335 //\r
1336 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1337 //\r
1338 InitPaging ();\r
717fb604
JY
1339\r
1340 //\r
1341 // Mark critical region to be read-only in page table\r
1342 //\r
d2fc7711
JY
1343 SetMemMapAttributes ();\r
1344\r
1345 //\r
1346 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1347 //\r
1348 SetUefiMemMapAttributes ();\r
717fb604
JY
1349\r
1350 //\r
1351 // Set page table itself to be read-only\r
1352 //\r
1353 SetPageTableAttributes ();\r
1354\r
529a5a86
MK
1355 //\r
1356 // Configure SMM Code Access Check feature if available.\r
1357 //\r
1358 ConfigSmmCodeAccessCheck ();\r
1359\r
21c17193
JY
1360 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1361\r
529a5a86
MK
1362 //\r
1363 // Clean SMM ready to lock flag\r
1364 //\r
1365 mSmmReadyToLock = FALSE;\r
1366 }\r
1367}\r
9f419739
JY
1368\r
1369/**\r
1370 Perform the pre tasks.\r
1371\r
1372**/\r
1373VOID\r
1374PerformPreTasks (\r
1375 VOID\r
1376 )\r
1377{\r
0bdc9e75 1378 RestoreSmmConfigurationInS3 ();\r
9f419739 1379}\r