]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmmCr0" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
e21e355e 4Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
827330cc
JW
79///\r
80/// SMM Memory Attribute Protocol instance\r
81///\r
82EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
83 EdkiiSmmGetMemoryAttributes,\r
84 EdkiiSmmSetMemoryAttributes,\r
85 EdkiiSmmClearMemoryAttributes\r
86};\r
87\r
529a5a86
MK
88EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
89\r
529a5a86
MK
90//\r
91// SMM stack information\r
92//\r
93UINTN mSmmStackArrayBase;\r
94UINTN mSmmStackArrayEnd;\r
95UINTN mSmmStackSize;\r
96\r
529a5a86
MK
97UINTN mMaxNumberOfCpus = 1;\r
98UINTN mNumberOfCpus = 1;\r
99\r
100//\r
101// SMM ready to lock flag\r
102//\r
103BOOLEAN mSmmReadyToLock = FALSE;\r
104\r
105//\r
106// Global used to cache PCD for SMM Code Access Check enable\r
107//\r
108BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
109\r
241f9149
LD
110//\r
111// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
112//\r
113UINT64 mAddressEncMask = 0;\r
114\r
529a5a86
MK
115//\r
116// Spin lock used to serialize setting of SMM Code Access Check feature\r
117//\r
fe3a75bc 118SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 119\r
7ed6f781
JF
120//\r
121// Saved SMM ranges information\r
122//\r
123EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
124UINTN mSmmCpuSmramRangeCount;\r
125\r
51ce27fd
SZ
126UINT8 mPhysicalAddressBits;\r
127\r
351b49c1
LE
128//\r
129// Control register contents saved for SMM S3 resume state initialization.\r
130//\r
f0053e83 131UINT32 mSmmCr0;\r
351b49c1
LE
132UINT32 mSmmCr4;\r
133\r
529a5a86
MK
134/**\r
135 Initialize IDT to setup exception handlers for SMM.\r
136\r
137**/\r
138VOID\r
139InitializeSmmIdt (\r
140 VOID\r
141 )\r
142{\r
143 EFI_STATUS Status;\r
144 BOOLEAN InterruptState;\r
145 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
146\r
147 //\r
148 // There are 32 (not 255) entries in it since only processor\r
149 // generated exceptions will be handled.\r
150 //\r
151 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
152 //\r
153 // Allocate page aligned IDT, because it might be set as read only.\r
154 //\r
155 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
156 ASSERT (gcSmiIdtr.Base != 0);\r
157 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
158\r
529a5a86
MK
159 //\r
160 // Disable Interrupt and save DXE IDT table\r
161 //\r
162 InterruptState = SaveAndDisableInterrupts ();\r
163 AsmReadIdtr (&DxeIdtr);\r
164 //\r
165 // Load SMM temporary IDT table\r
166 //\r
167 AsmWriteIdtr (&gcSmiIdtr);\r
168 //\r
169 // Setup SMM default exception handlers, SMM IDT table\r
170 // will be updated and saved in gcSmiIdtr\r
171 //\r
172 Status = InitializeCpuExceptionHandlers (NULL);\r
173 ASSERT_EFI_ERROR (Status);\r
174 //\r
175 // Restore DXE IDT table and CPU interrupt\r
176 //\r
177 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
178 SetInterruptState (InterruptState);\r
179}\r
180\r
181/**\r
182 Search module name by input IP address and output it.\r
183\r
184 @param CallerIpAddress Caller instruction pointer.\r
185\r
186**/\r
187VOID\r
188DumpModuleInfoByIp (\r
189 IN UINTN CallerIpAddress\r
190 )\r
191{\r
192 UINTN Pe32Data;\r
529a5a86 193 VOID *PdbPointer;\r
529a5a86
MK
194\r
195 //\r
196 // Find Image Base\r
197 //\r
9e981317 198 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 199 if (Pe32Data != 0) {\r
b8caae19 200 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
529a5a86
MK
201 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
202 if (PdbPointer != NULL) {\r
b8caae19 203 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
204 }\r
205 }\r
206}\r
207\r
208/**\r
209 Read information from the CPU save state.\r
210\r
211 @param This EFI_SMM_CPU_PROTOCOL instance\r
212 @param Width The number of bytes to read from the CPU save state.\r
213 @param Register Specifies the CPU register to read form the save state.\r
214 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
215 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
216\r
217 @retval EFI_SUCCESS The register was read from Save State\r
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
220\r
221**/\r
222EFI_STATUS\r
223EFIAPI\r
224SmmReadSaveState (\r
225 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
226 IN UINTN Width,\r
227 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
228 IN UINTN CpuIndex,\r
229 OUT VOID *Buffer\r
230 )\r
231{\r
232 EFI_STATUS Status;\r
233\r
234 //\r
235 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
236 //\r
237 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
238 return EFI_INVALID_PARAMETER;\r
239 }\r
240\r
241 //\r
242 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
243 //\r
244 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
245 //\r
246 // The pseudo-register only supports the 64-bit size specified by Width.\r
247 //\r
248 if (Width != sizeof (UINT64)) {\r
249 return EFI_INVALID_PARAMETER;\r
250 }\r
251 //\r
252 // If the processor is in SMM at the time the SMI occurred,\r
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
254 // Otherwise, EFI_NOT_FOUND is returned.\r
255 //\r
ed3d5ecb 256 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
258 return EFI_SUCCESS;\r
259 } else {\r
260 return EFI_NOT_FOUND;\r
261 }\r
262 }\r
263\r
ed3d5ecb 264 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
265 return EFI_INVALID_PARAMETER;\r
266 }\r
267\r
268 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
269 if (Status == EFI_UNSUPPORTED) {\r
270 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
271 }\r
272 return Status;\r
273}\r
274\r
275/**\r
276 Write data to the CPU save state.\r
277\r
278 @param This EFI_SMM_CPU_PROTOCOL instance\r
279 @param Width The number of bytes to read from the CPU save state.\r
280 @param Register Specifies the CPU register to write to the save state.\r
281 @param CpuIndex Specifies the zero-based index of the CPU save state\r
282 @param Buffer Upon entry, this holds the new CPU register value.\r
283\r
284 @retval EFI_SUCCESS The register was written from Save State\r
285 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
286 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
287\r
288**/\r
289EFI_STATUS\r
290EFIAPI\r
291SmmWriteSaveState (\r
292 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
293 IN UINTN Width,\r
294 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
295 IN UINTN CpuIndex,\r
296 IN CONST VOID *Buffer\r
297 )\r
298{\r
299 EFI_STATUS Status;\r
300\r
301 //\r
302 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
303 //\r
304 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
305 return EFI_INVALID_PARAMETER;\r
306 }\r
307\r
308 //\r
309 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
310 //\r
311 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
312 return EFI_SUCCESS;\r
313 }\r
314\r
315 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
316 return EFI_INVALID_PARAMETER;\r
317 }\r
318\r
319 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
320 if (Status == EFI_UNSUPPORTED) {\r
321 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
322 }\r
323 return Status;\r
324}\r
325\r
326\r
327/**\r
328 C function for SMI handler. To change all processor's SMMBase Register.\r
329\r
330**/\r
331VOID\r
332EFIAPI\r
333SmmInitHandler (\r
334 VOID\r
335 )\r
336{\r
337 UINT32 ApicId;\r
338 UINTN Index;\r
339\r
340 //\r
341 // Update SMM IDT entries' code segment and load IDT\r
342 //\r
343 AsmWriteIdtr (&gcSmiIdtr);\r
344 ApicId = GetApicId ();\r
345\r
bb767506 346 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
347\r
348 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
349 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
350 //\r
351 // Initialize SMM specific features on the currently executing CPU\r
352 //\r
353 SmmCpuFeaturesInitializeProcessor (\r
354 Index,\r
355 mIsBsp,\r
356 gSmmCpuPrivate->ProcessorInfo,\r
357 &mCpuHotPlugData\r
358 );\r
359\r
a46a4c90
JF
360 if (!mSmmS3Flag) {\r
361 //\r
362 // Check XD and BTS features on each processor on normal boot\r
363 //\r
51773d49 364 CheckFeatureSupported ();\r
a46a4c90
JF
365 }\r
366\r
529a5a86
MK
367 if (mIsBsp) {\r
368 //\r
369 // BSP rebase is already done above.\r
370 // Initialize private data during S3 resume\r
371 //\r
372 InitializeMpSyncData ();\r
373 }\r
374\r
375 //\r
376 // Hook return after RSM to set SMM re-based flag\r
377 //\r
378 SemaphoreHook (Index, &mRebased[Index]);\r
379\r
380 return;\r
381 }\r
382 }\r
383 ASSERT (FALSE);\r
384}\r
385\r
386/**\r
387 Relocate SmmBases for each processor.\r
388\r
389 Execute on first boot and all S3 resumes\r
390\r
391**/\r
392VOID\r
393EFIAPI\r
394SmmRelocateBases (\r
395 VOID\r
396 )\r
397{\r
398 UINT8 BakBuf[BACK_BUF_SIZE];\r
399 SMRAM_SAVE_STATE_MAP BakBuf2;\r
400 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
401 UINT8 *U8Ptr;\r
402 UINT32 ApicId;\r
403 UINTN Index;\r
404 UINTN BspIndex;\r
405\r
406 //\r
407 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
408 //\r
409 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
410\r
411 //\r
412 // Patch ASM code template with current CR0, CR3, and CR4 values\r
413 //\r
f0053e83
LE
414 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
415 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
6b0841c1 416 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1
LE
417 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
418 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4, 4);\r
529a5a86
MK
419\r
420 //\r
421 // Patch GDTR for SMM base relocation\r
422 //\r
423 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
424 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
425\r
426 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
427 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
428\r
429 //\r
430 // Backup original contents at address 0x38000\r
431 //\r
432 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
433 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
434\r
435 //\r
436 // Load image for relocation\r
437 //\r
438 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
439\r
440 //\r
441 // Retrieve the local APIC ID of current processor\r
442 //\r
443 ApicId = GetApicId ();\r
444\r
445 //\r
446 // Relocate SM bases for all APs\r
447 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
448 //\r
449 mIsBsp = FALSE;\r
450 BspIndex = (UINTN)-1;\r
451 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
452 mRebased[Index] = FALSE;\r
453 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
454 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
455 //\r
456 // Wait for this AP to finish its 1st SMI\r
457 //\r
458 while (!mRebased[Index]);\r
459 } else {\r
460 //\r
461 // BSP will be Relocated later\r
462 //\r
463 BspIndex = Index;\r
464 }\r
465 }\r
466\r
467 //\r
468 // Relocate BSP's SMM base\r
469 //\r
470 ASSERT (BspIndex != (UINTN)-1);\r
471 mIsBsp = TRUE;\r
472 SendSmiIpi (ApicId);\r
473 //\r
474 // Wait for the BSP to finish its 1st SMI\r
475 //\r
476 while (!mRebased[BspIndex]);\r
477\r
478 //\r
479 // Restore contents at address 0x38000\r
480 //\r
481 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
482 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
483}\r
484\r
529a5a86
MK
485/**\r
486 SMM Ready To Lock event notification handler.\r
487\r
488 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
489 perform additional lock actions that must be performed from SMM on the next SMI.\r
490\r
491 @param[in] Protocol Points to the protocol's unique identifier.\r
492 @param[in] Interface Points to the interface instance.\r
493 @param[in] Handle The handle on which the interface was installed.\r
494\r
495 @retval EFI_SUCCESS Notification handler runs successfully.\r
496 **/\r
497EFI_STATUS\r
498EFIAPI\r
499SmmReadyToLockEventNotify (\r
500 IN CONST EFI_GUID *Protocol,\r
501 IN VOID *Interface,\r
502 IN EFI_HANDLE Handle\r
503 )\r
504{\r
0bdc9e75 505 GetAcpiCpuData ();\r
529a5a86 506\r
d2fc7711
JY
507 //\r
508 // Cache a copy of UEFI memory map before we start profiling feature.\r
509 //\r
510 GetUefiMemoryMap ();\r
511\r
529a5a86
MK
512 //\r
513 // Set SMM ready to lock flag and return\r
514 //\r
515 mSmmReadyToLock = TRUE;\r
516 return EFI_SUCCESS;\r
517}\r
518\r
519/**\r
520 The module Entry Point of the CPU SMM driver.\r
521\r
522 @param ImageHandle The firmware allocated handle for the EFI image.\r
523 @param SystemTable A pointer to the EFI System Table.\r
524\r
525 @retval EFI_SUCCESS The entry point is executed successfully.\r
526 @retval Other Some error occurs when executing this entry point.\r
527\r
528**/\r
529EFI_STATUS\r
530EFIAPI\r
531PiCpuSmmEntry (\r
532 IN EFI_HANDLE ImageHandle,\r
533 IN EFI_SYSTEM_TABLE *SystemTable\r
534 )\r
535{\r
536 EFI_STATUS Status;\r
537 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
538 UINTN NumberOfEnabledProcessors;\r
539 UINTN Index;\r
540 VOID *Buffer;\r
ae82a30b
JY
541 UINTN BufferPages;\r
542 UINTN TileCodeSize;\r
543 UINTN TileDataSize;\r
529a5a86 544 UINTN TileSize;\r
529a5a86
MK
545 UINT8 *Stacks;\r
546 VOID *Registration;\r
547 UINT32 RegEax;\r
548 UINT32 RegEdx;\r
549 UINTN FamilyId;\r
550 UINTN ModelId;\r
551 UINT32 Cr3;\r
552\r
e21e355e
LG
553 //\r
554 // Initialize address fixup\r
555 //\r
556 PiSmmCpuSmmInitFixupAddress ();\r
557 PiSmmCpuSmiEntryFixupAddress ();\r
558\r
529a5a86
MK
559 //\r
560 // Initialize Debug Agent to support source level debug in SMM code\r
561 //\r
562 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
563\r
564 //\r
565 // Report the start of CPU SMM initialization.\r
566 //\r
567 REPORT_STATUS_CODE (\r
568 EFI_PROGRESS_CODE,\r
569 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
570 );\r
571\r
572 //\r
573 // Fix segment address of the long-mode-switch jump\r
574 //\r
575 if (sizeof (UINTN) == sizeof (UINT64)) {\r
576 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
577 }\r
578\r
579 //\r
580 // Find out SMRR Base and SMRR Size\r
581 //\r
582 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
583\r
584 //\r
585 // Get MP Services Protocol\r
586 //\r
587 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
588 ASSERT_EFI_ERROR (Status);\r
589\r
590 //\r
591 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
592 //\r
593 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
594 ASSERT_EFI_ERROR (Status);\r
595 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
596\r
597 //\r
598 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
599 // A constant BSP index makes no sense because it may be hot removed.\r
600 //\r
601 DEBUG_CODE (\r
602 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
603\r
604 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
605 }\r
606 );\r
607\r
608 //\r
609 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
610 //\r
611 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
612 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
613\r
241f9149
LD
614 //\r
615 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
616 // Make sure AddressEncMask is contained to smallest supported address field.\r
617 //\r
618 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
619 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
620\r
529a5a86
MK
621 //\r
622 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
623 //\r
624 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
625 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
626 } else {\r
627 mMaxNumberOfCpus = mNumberOfCpus;\r
628 }\r
629 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
630\r
631 //\r
632 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
633 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
634 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
635 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
636 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
637 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
638 // by adding the // CPU save state size, any extra CPU specific context, and\r
639 // the size of code that must be placed at the SMI entry point to transfer\r
640 // control to a C function in the native SMM execution mode. This size is\r
641 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
642 // The total amount of memory required is the maximum number of CPUs that\r
643 // platform supports times the tile size. The picture below shows the tiling,\r
644 // where m is the number of tiles that fit in 32KB.\r
645 //\r
646 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
647 // | CPU m+1 Save State |\r
648 // +-----------------------------+\r
649 // | CPU m+1 Extra Data |\r
650 // +-----------------------------+\r
651 // | Padding |\r
652 // +-----------------------------+\r
653 // | CPU 2m SMI Entry |\r
654 // +#############################+ <-- Base of allocated buffer + 64 KB\r
655 // | CPU m-1 Save State |\r
656 // +-----------------------------+\r
657 // | CPU m-1 Extra Data |\r
658 // +-----------------------------+\r
659 // | Padding |\r
660 // +-----------------------------+\r
661 // | CPU 2m-1 SMI Entry |\r
662 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
663 // | . . . . . . . . . . . . |\r
664 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
665 // | CPU 2 Save State |\r
666 // +-----------------------------+\r
667 // | CPU 2 Extra Data |\r
668 // +-----------------------------+\r
669 // | Padding |\r
670 // +-----------------------------+\r
671 // | CPU m+1 SMI Entry |\r
672 // +=============================+ <-- Base of allocated buffer + 32 KB\r
673 // | CPU 1 Save State |\r
674 // +-----------------------------+\r
675 // | CPU 1 Extra Data |\r
676 // +-----------------------------+\r
677 // | Padding |\r
678 // +-----------------------------+\r
679 // | CPU m SMI Entry |\r
680 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
681 // | CPU 0 Save State |\r
682 // +-----------------------------+\r
683 // | CPU 0 Extra Data |\r
684 // +-----------------------------+\r
685 // | Padding |\r
686 // +-----------------------------+\r
687 // | CPU m-1 SMI Entry |\r
688 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
689 // | . . . . . . . . . . . . |\r
690 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
691 // | Padding |\r
692 // +-----------------------------+\r
693 // | CPU 1 SMI Entry |\r
694 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
695 // | Padding |\r
696 // +-----------------------------+\r
697 // | CPU 0 SMI Entry |\r
698 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
699 //\r
700\r
701 //\r
702 // Retrieve CPU Family\r
703 //\r
e9b3a6c9 704 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
705 FamilyId = (RegEax >> 8) & 0xf;\r
706 ModelId = (RegEax >> 4) & 0xf;\r
707 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
708 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
709 }\r
710\r
e9b3a6c9
MK
711 RegEdx = 0;\r
712 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
713 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
714 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
715 }\r
529a5a86
MK
716 //\r
717 // Determine the mode of the CPU at the time an SMI occurs\r
718 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
719 // Volume 3C, Section 34.4.1.1\r
720 //\r
721 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
722 if ((RegEdx & BIT29) != 0) {\r
723 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
724 }\r
725 if (FamilyId == 0x06) {\r
726 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
727 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
728 }\r
729 }\r
730\r
731 //\r
732 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
733 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
734 // This size is rounded up to nearest power of 2.\r
529a5a86 735 //\r
ae82a30b
JY
736 TileCodeSize = GetSmiHandlerSize ();\r
737 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 738 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
739 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
740 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 741 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 742 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
743\r
744 //\r
f12367a0
MK
745 // If the TileSize is larger than space available for the SMI Handler of\r
746 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
747 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
748 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
749 // context must be reduced.\r
529a5a86
MK
750 //\r
751 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
752\r
753 //\r
754 // Allocate buffer for all of the tiles.\r
755 //\r
756 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
757 // Volume 3C, Section 34.11 SMBASE Relocation\r
758 // For Pentium and Intel486 processors, the SMBASE values must be\r
759 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
760 // state during the execution of a RSM instruction.\r
761 //\r
762 // Intel486 processors: FamilyId is 4\r
763 // Pentium processors : FamilyId is 5\r
764 //\r
ae82a30b 765 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 766 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 767 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 768 } else {\r
717fb604 769 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
770 }\r
771 ASSERT (Buffer != NULL);\r
ae82a30b 772 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
773\r
774 //\r
775 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
776 //\r
777 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
778 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
779\r
780 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
781 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
782\r
783 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
784 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
785\r
786 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
787 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
788\r
789 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
790 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
791\r
792 //\r
793 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
794 //\r
795 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
796 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
797 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
798 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
799 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
800\r
801 //\r
802 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
803 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
804 // size for each CPU in the platform\r
805 //\r
806 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
807 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
808 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
809 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
810 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
811\r
812 if (Index < mNumberOfCpus) {\r
813 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
814 ASSERT_EFI_ERROR (Status);\r
815 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
816\r
817 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
818 Index,\r
819 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
820 mCpuHotPlugData.SmBase[Index],\r
821 gSmmCpuPrivate->CpuSaveState[Index],\r
822 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
823 ));\r
824 } else {\r
825 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
826 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
827 }\r
828 }\r
829\r
830 //\r
831 // Allocate SMI stacks for all processors.\r
832 //\r
833 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
834 //\r
835 // 2 more pages is allocated for each processor.\r
836 // one is guard page and the other is known good stack.\r
837 //\r
838 // +-------------------------------------------+-----+-------------------------------------------+\r
839 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
840 // +-------------------------------------------+-----+-------------------------------------------+\r
841 // | | | |\r
842 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
843 //\r
844 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
845 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
846 ASSERT (Stacks != NULL);\r
847 mSmmStackArrayBase = (UINTN)Stacks;\r
848 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
849 } else {\r
850 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
851 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
852 ASSERT (Stacks != NULL);\r
853 }\r
854\r
855 //\r
856 // Set SMI stack for SMM base relocation\r
857 //\r
858 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
859\r
860 //\r
861 // Initialize IDT\r
862 //\r
863 InitializeSmmIdt ();\r
864\r
865 //\r
866 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
867 //\r
868 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
869 ASSERT (mRebased != NULL);\r
870 SmmRelocateBases ();\r
871\r
872 //\r
873 // Call hook for BSP to perform extra actions in normal mode after all\r
874 // SMM base addresses have been relocated on all CPUs\r
875 //\r
876 SmmCpuFeaturesSmmRelocationComplete ();\r
877\r
717fb604
JY
878 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
879\r
529a5a86
MK
880 //\r
881 // SMM Time initialization\r
882 //\r
883 InitializeSmmTimer ();\r
884\r
885 //\r
886 // Initialize MP globals\r
887 //\r
888 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
889\r
890 //\r
891 // Fill in SMM Reserved Regions\r
892 //\r
893 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
894 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
895\r
896 //\r
897 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
898 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
899 // to an SMRAM address will be present in the handle database\r
900 //\r
901 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
902 &gSmmCpuPrivate->SmmCpuHandle,\r
903 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
904 NULL\r
905 );\r
906 ASSERT_EFI_ERROR (Status);\r
907\r
908 //\r
909 // Install the SMM CPU Protocol into SMM protocol database\r
910 //\r
911 Status = gSmst->SmmInstallProtocolInterface (\r
912 &mSmmCpuHandle,\r
913 &gEfiSmmCpuProtocolGuid,\r
914 EFI_NATIVE_INTERFACE,\r
915 &mSmmCpu\r
916 );\r
917 ASSERT_EFI_ERROR (Status);\r
918\r
827330cc
JW
919 //\r
920 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
921 //\r
922 Status = gSmst->SmmInstallProtocolInterface (\r
923 &mSmmCpuHandle,\r
924 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
925 EFI_NATIVE_INTERFACE,\r
926 &mSmmMemoryAttribute\r
927 );\r
928 ASSERT_EFI_ERROR (Status);\r
929\r
529a5a86
MK
930 //\r
931 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
932 //\r
933 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
934 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
935 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
936 }\r
937\r
938 //\r
939 // Initialize SMM CPU Services Support\r
940 //\r
941 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
942 ASSERT_EFI_ERROR (Status);\r
943\r
529a5a86
MK
944 //\r
945 // register SMM Ready To Lock Protocol notification\r
946 //\r
947 Status = gSmst->SmmRegisterProtocolNotify (\r
948 &gEfiSmmReadyToLockProtocolGuid,\r
949 SmmReadyToLockEventNotify,\r
950 &Registration\r
951 );\r
952 ASSERT_EFI_ERROR (Status);\r
953\r
529a5a86
MK
954 //\r
955 // Initialize SMM Profile feature\r
956 //\r
957 InitSmmProfile (Cr3);\r
958\r
b10d5ddc 959 GetAcpiS3EnableFlag ();\r
0bdc9e75 960 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
961\r
962 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
963\r
964 return EFI_SUCCESS;\r
965}\r
966\r
967/**\r
968\r
969 Find out SMRAM information including SMRR base and SMRR size.\r
970\r
971 @param SmrrBase SMRR base\r
972 @param SmrrSize SMRR size\r
973\r
974**/\r
975VOID\r
976FindSmramInfo (\r
977 OUT UINT32 *SmrrBase,\r
978 OUT UINT32 *SmrrSize\r
979 )\r
980{\r
981 EFI_STATUS Status;\r
982 UINTN Size;\r
983 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
984 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
985 UINTN Index;\r
986 UINT64 MaxSize;\r
987 BOOLEAN Found;\r
988\r
989 //\r
990 // Get SMM Access Protocol\r
991 //\r
992 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
993 ASSERT_EFI_ERROR (Status);\r
994\r
995 //\r
996 // Get SMRAM information\r
997 //\r
998 Size = 0;\r
999 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
1000 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
1001\r
7ed6f781
JF
1002 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
1003 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 1004\r
7ed6f781 1005 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1006 ASSERT_EFI_ERROR (Status);\r
1007\r
7ed6f781 1008 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1009\r
1010 //\r
1011 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1012 //\r
1013 CurrentSmramRange = NULL;\r
7ed6f781 1014 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1015 //\r
1016 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1017 //\r
7ed6f781 1018 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1019 continue;\r
1020 }\r
1021\r
7ed6f781
JF
1022 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1023 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1024 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1025 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1026 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1027 }\r
1028 }\r
1029 }\r
1030 }\r
1031\r
1032 ASSERT (CurrentSmramRange != NULL);\r
1033\r
1034 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1035 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1036\r
1037 do {\r
1038 Found = FALSE;\r
7ed6f781
JF
1039 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1040 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1041 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1042 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1043 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1044 Found = TRUE;\r
7ed6f781
JF
1045 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1046 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1047 Found = TRUE;\r
1048 }\r
1049 }\r
1050 } while (Found);\r
1051\r
1052 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1053}\r
1054\r
1055/**\r
1056Configure SMM Code Access Check feature on an AP.\r
1057SMM Feature Control MSR will be locked after configuration.\r
1058\r
1059@param[in,out] Buffer Pointer to private data buffer.\r
1060**/\r
1061VOID\r
1062EFIAPI\r
1063ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1064 IN OUT VOID *Buffer\r
1065 )\r
1066{\r
1067 UINTN CpuIndex;\r
1068 UINT64 SmmFeatureControlMsr;\r
1069 UINT64 NewSmmFeatureControlMsr;\r
1070\r
1071 //\r
1072 // Retrieve the CPU Index from the context passed in\r
1073 //\r
1074 CpuIndex = *(UINTN *)Buffer;\r
1075\r
1076 //\r
1077 // Get the current SMM Feature Control MSR value\r
1078 //\r
1079 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1080\r
1081 //\r
1082 // Compute the new SMM Feature Control MSR value\r
1083 //\r
1084 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1085 if (mSmmCodeAccessCheckEnable) {\r
1086 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1087 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1088 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1089 }\r
529a5a86
MK
1090 }\r
1091\r
1092 //\r
1093 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1094 //\r
1095 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1096 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1097 }\r
1098\r
1099 //\r
1100 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1101 //\r
fe3a75bc 1102 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1103}\r
1104\r
1105/**\r
1106Configure SMM Code Access Check feature for all processors.\r
1107SMM Feature Control MSR will be locked after configuration.\r
1108**/\r
1109VOID\r
1110ConfigSmmCodeAccessCheck (\r
1111 VOID\r
1112 )\r
1113{\r
1114 UINTN Index;\r
1115 EFI_STATUS Status;\r
1116\r
1117 //\r
1118 // Check to see if the Feature Control MSR is supported on this CPU\r
1119 //\r
f6b0cb17 1120 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1121 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1122 mSmmCodeAccessCheckEnable = FALSE;\r
1123 return;\r
1124 }\r
1125\r
1126 //\r
1127 // Check to see if the CPU supports the SMM Code Access Check feature\r
1128 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1129 //\r
1130 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1131 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1132 return;\r
1133 }\r
1134\r
1135 //\r
1136 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1137 //\r
fe3a75bc 1138 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1139\r
1140 //\r
1141 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1142 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1143 //\r
fe3a75bc 1144 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1145\r
1146 //\r
1147 // Enable SMM Code Access Check feature on the BSP.\r
1148 //\r
1149 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1150\r
1151 //\r
1152 // Enable SMM Code Access Check feature for the APs.\r
1153 //\r
1154 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1155 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1156 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1157 //\r
1158 // If this processor does not exist\r
1159 //\r
1160 continue;\r
1161 }\r
529a5a86
MK
1162 //\r
1163 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1164 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1165 //\r
fe3a75bc 1166 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1167\r
1168 //\r
1169 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1170 //\r
1171 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1172 ASSERT_EFI_ERROR (Status);\r
1173\r
1174 //\r
1175 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1176 //\r
fe3a75bc 1177 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1178 CpuPause ();\r
1179 }\r
1180\r
1181 //\r
1182 // Release the Config SMM Code Access Check spin lock.\r
1183 //\r
fe3a75bc 1184 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1185 }\r
1186 }\r
1187}\r
1188\r
21c17193
JY
1189/**\r
1190 This API provides a way to allocate memory for page table.\r
1191\r
1192 This API can be called more once to allocate memory for page tables.\r
1193\r
1194 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1195 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1196 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1197 returned.\r
1198\r
1199 @param Pages The number of 4 KB pages to allocate.\r
1200\r
1201 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1202\r
1203**/\r
1204VOID *\r
1205AllocatePageTableMemory (\r
1206 IN UINTN Pages\r
1207 )\r
1208{\r
1209 VOID *Buffer;\r
1210\r
1211 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1212 if (Buffer != NULL) {\r
1213 return Buffer;\r
1214 }\r
1215 return AllocatePages (Pages);\r
1216}\r
1217\r
717fb604
JY
1218/**\r
1219 Allocate pages for code.\r
1220\r
1221 @param[in] Pages Number of pages to be allocated.\r
1222\r
1223 @return Allocated memory.\r
1224**/\r
1225VOID *\r
1226AllocateCodePages (\r
1227 IN UINTN Pages\r
1228 )\r
1229{\r
1230 EFI_STATUS Status;\r
1231 EFI_PHYSICAL_ADDRESS Memory;\r
1232\r
1233 if (Pages == 0) {\r
1234 return NULL;\r
1235 }\r
1236\r
1237 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1238 if (EFI_ERROR (Status)) {\r
1239 return NULL;\r
1240 }\r
1241 return (VOID *) (UINTN) Memory;\r
1242}\r
1243\r
1244/**\r
1245 Allocate aligned pages for code.\r
1246\r
1247 @param[in] Pages Number of pages to be allocated.\r
1248 @param[in] Alignment The requested alignment of the allocation.\r
1249 Must be a power of two.\r
1250 If Alignment is zero, then byte alignment is used.\r
1251\r
1252 @return Allocated memory.\r
1253**/\r
1254VOID *\r
1255AllocateAlignedCodePages (\r
1256 IN UINTN Pages,\r
1257 IN UINTN Alignment\r
1258 )\r
1259{\r
1260 EFI_STATUS Status;\r
1261 EFI_PHYSICAL_ADDRESS Memory;\r
1262 UINTN AlignedMemory;\r
1263 UINTN AlignmentMask;\r
1264 UINTN UnalignedPages;\r
1265 UINTN RealPages;\r
1266\r
1267 //\r
1268 // Alignment must be a power of two or zero.\r
1269 //\r
1270 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1271\r
1272 if (Pages == 0) {\r
1273 return NULL;\r
1274 }\r
1275 if (Alignment > EFI_PAGE_SIZE) {\r
1276 //\r
1277 // Calculate the total number of pages since alignment is larger than page size.\r
1278 //\r
1279 AlignmentMask = Alignment - 1;\r
1280 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1281 //\r
1282 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1283 //\r
1284 ASSERT (RealPages > Pages);\r
1285\r
1286 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1287 if (EFI_ERROR (Status)) {\r
1288 return NULL;\r
1289 }\r
1290 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1291 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1292 if (UnalignedPages > 0) {\r
1293 //\r
1294 // Free first unaligned page(s).\r
1295 //\r
1296 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1297 ASSERT_EFI_ERROR (Status);\r
1298 }\r
8491e302 1299 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1300 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1301 if (UnalignedPages > 0) {\r
1302 //\r
1303 // Free last unaligned page(s).\r
1304 //\r
1305 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1306 ASSERT_EFI_ERROR (Status);\r
1307 }\r
1308 } else {\r
1309 //\r
1310 // Do not over-allocate pages in this case.\r
1311 //\r
1312 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1313 if (EFI_ERROR (Status)) {\r
1314 return NULL;\r
1315 }\r
1316 AlignedMemory = (UINTN) Memory;\r
1317 }\r
1318 return (VOID *) AlignedMemory;\r
1319}\r
1320\r
529a5a86
MK
1321/**\r
1322 Perform the remaining tasks.\r
1323\r
1324**/\r
1325VOID\r
1326PerformRemainingTasks (\r
1327 VOID\r
1328 )\r
1329{\r
1330 if (mSmmReadyToLock) {\r
1331 //\r
1332 // Start SMM Profile feature\r
1333 //\r
1334 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1335 SmmProfileStart ();\r
1336 }\r
1337 //\r
1338 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1339 //\r
1340 InitPaging ();\r
717fb604
JY
1341\r
1342 //\r
1343 // Mark critical region to be read-only in page table\r
1344 //\r
d2fc7711
JY
1345 SetMemMapAttributes ();\r
1346\r
1347 //\r
1348 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1349 //\r
1350 SetUefiMemMapAttributes ();\r
717fb604
JY
1351\r
1352 //\r
1353 // Set page table itself to be read-only\r
1354 //\r
1355 SetPageTableAttributes ();\r
1356\r
529a5a86
MK
1357 //\r
1358 // Configure SMM Code Access Check feature if available.\r
1359 //\r
1360 ConfigSmmCodeAccessCheck ();\r
1361\r
21c17193
JY
1362 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1363\r
529a5a86
MK
1364 //\r
1365 // Clean SMM ready to lock flag\r
1366 //\r
1367 mSmmReadyToLock = FALSE;\r
1368 }\r
1369}\r
9f419739
JY
1370\r
1371/**\r
1372 Perform the pre tasks.\r
1373\r
1374**/\r
1375VOID\r
1376PerformPreTasks (\r
1377 VOID\r
1378 )\r
1379{\r
0bdc9e75 1380 RestoreSmmConfigurationInS3 ();\r
9f419739 1381}\r