]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: eliminate "gSmmJmpAddr" and related DBs
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
e21e355e 4Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
827330cc
JW
79///\r
80/// SMM Memory Attribute Protocol instance\r
81///\r
82EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
83 EdkiiSmmGetMemoryAttributes,\r
84 EdkiiSmmSetMemoryAttributes,\r
85 EdkiiSmmClearMemoryAttributes\r
86};\r
87\r
529a5a86
MK
88EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
89\r
529a5a86
MK
90//\r
91// SMM stack information\r
92//\r
93UINTN mSmmStackArrayBase;\r
94UINTN mSmmStackArrayEnd;\r
95UINTN mSmmStackSize;\r
96\r
529a5a86
MK
97UINTN mMaxNumberOfCpus = 1;\r
98UINTN mNumberOfCpus = 1;\r
99\r
100//\r
101// SMM ready to lock flag\r
102//\r
103BOOLEAN mSmmReadyToLock = FALSE;\r
104\r
105//\r
106// Global used to cache PCD for SMM Code Access Check enable\r
107//\r
108BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
109\r
241f9149
LD
110//\r
111// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
112//\r
113UINT64 mAddressEncMask = 0;\r
114\r
529a5a86
MK
115//\r
116// Spin lock used to serialize setting of SMM Code Access Check feature\r
117//\r
fe3a75bc 118SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 119\r
7ed6f781
JF
120//\r
121// Saved SMM ranges information\r
122//\r
123EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
124UINTN mSmmCpuSmramRangeCount;\r
125\r
51ce27fd
SZ
126UINT8 mPhysicalAddressBits;\r
127\r
351b49c1
LE
128//\r
129// Control register contents saved for SMM S3 resume state initialization.\r
130//\r
f0053e83 131UINT32 mSmmCr0;\r
351b49c1
LE
132UINT32 mSmmCr4;\r
133\r
529a5a86
MK
134/**\r
135 Initialize IDT to setup exception handlers for SMM.\r
136\r
137**/\r
138VOID\r
139InitializeSmmIdt (\r
140 VOID\r
141 )\r
142{\r
143 EFI_STATUS Status;\r
144 BOOLEAN InterruptState;\r
145 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
146\r
147 //\r
148 // There are 32 (not 255) entries in it since only processor\r
149 // generated exceptions will be handled.\r
150 //\r
151 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
152 //\r
153 // Allocate page aligned IDT, because it might be set as read only.\r
154 //\r
155 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
156 ASSERT (gcSmiIdtr.Base != 0);\r
157 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
158\r
529a5a86
MK
159 //\r
160 // Disable Interrupt and save DXE IDT table\r
161 //\r
162 InterruptState = SaveAndDisableInterrupts ();\r
163 AsmReadIdtr (&DxeIdtr);\r
164 //\r
165 // Load SMM temporary IDT table\r
166 //\r
167 AsmWriteIdtr (&gcSmiIdtr);\r
168 //\r
169 // Setup SMM default exception handlers, SMM IDT table\r
170 // will be updated and saved in gcSmiIdtr\r
171 //\r
172 Status = InitializeCpuExceptionHandlers (NULL);\r
173 ASSERT_EFI_ERROR (Status);\r
174 //\r
175 // Restore DXE IDT table and CPU interrupt\r
176 //\r
177 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
178 SetInterruptState (InterruptState);\r
179}\r
180\r
181/**\r
182 Search module name by input IP address and output it.\r
183\r
184 @param CallerIpAddress Caller instruction pointer.\r
185\r
186**/\r
187VOID\r
188DumpModuleInfoByIp (\r
189 IN UINTN CallerIpAddress\r
190 )\r
191{\r
192 UINTN Pe32Data;\r
529a5a86 193 VOID *PdbPointer;\r
529a5a86
MK
194\r
195 //\r
196 // Find Image Base\r
197 //\r
9e981317 198 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 199 if (Pe32Data != 0) {\r
b8caae19 200 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
529a5a86
MK
201 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
202 if (PdbPointer != NULL) {\r
b8caae19 203 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
204 }\r
205 }\r
206}\r
207\r
208/**\r
209 Read information from the CPU save state.\r
210\r
211 @param This EFI_SMM_CPU_PROTOCOL instance\r
212 @param Width The number of bytes to read from the CPU save state.\r
213 @param Register Specifies the CPU register to read form the save state.\r
214 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
215 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
216\r
217 @retval EFI_SUCCESS The register was read from Save State\r
218 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
219 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
220\r
221**/\r
222EFI_STATUS\r
223EFIAPI\r
224SmmReadSaveState (\r
225 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
226 IN UINTN Width,\r
227 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
228 IN UINTN CpuIndex,\r
229 OUT VOID *Buffer\r
230 )\r
231{\r
232 EFI_STATUS Status;\r
233\r
234 //\r
235 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
236 //\r
237 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
238 return EFI_INVALID_PARAMETER;\r
239 }\r
240\r
241 //\r
242 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
243 //\r
244 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
245 //\r
246 // The pseudo-register only supports the 64-bit size specified by Width.\r
247 //\r
248 if (Width != sizeof (UINT64)) {\r
249 return EFI_INVALID_PARAMETER;\r
250 }\r
251 //\r
252 // If the processor is in SMM at the time the SMI occurred,\r
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
254 // Otherwise, EFI_NOT_FOUND is returned.\r
255 //\r
ed3d5ecb 256 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
258 return EFI_SUCCESS;\r
259 } else {\r
260 return EFI_NOT_FOUND;\r
261 }\r
262 }\r
263\r
ed3d5ecb 264 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
265 return EFI_INVALID_PARAMETER;\r
266 }\r
267\r
268 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
269 if (Status == EFI_UNSUPPORTED) {\r
270 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
271 }\r
272 return Status;\r
273}\r
274\r
275/**\r
276 Write data to the CPU save state.\r
277\r
278 @param This EFI_SMM_CPU_PROTOCOL instance\r
279 @param Width The number of bytes to read from the CPU save state.\r
280 @param Register Specifies the CPU register to write to the save state.\r
281 @param CpuIndex Specifies the zero-based index of the CPU save state\r
282 @param Buffer Upon entry, this holds the new CPU register value.\r
283\r
284 @retval EFI_SUCCESS The register was written from Save State\r
285 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
286 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
287\r
288**/\r
289EFI_STATUS\r
290EFIAPI\r
291SmmWriteSaveState (\r
292 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
293 IN UINTN Width,\r
294 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
295 IN UINTN CpuIndex,\r
296 IN CONST VOID *Buffer\r
297 )\r
298{\r
299 EFI_STATUS Status;\r
300\r
301 //\r
302 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
303 //\r
304 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
305 return EFI_INVALID_PARAMETER;\r
306 }\r
307\r
308 //\r
309 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
310 //\r
311 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
312 return EFI_SUCCESS;\r
313 }\r
314\r
315 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
316 return EFI_INVALID_PARAMETER;\r
317 }\r
318\r
319 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
320 if (Status == EFI_UNSUPPORTED) {\r
321 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
322 }\r
323 return Status;\r
324}\r
325\r
326\r
327/**\r
328 C function for SMI handler. To change all processor's SMMBase Register.\r
329\r
330**/\r
331VOID\r
332EFIAPI\r
333SmmInitHandler (\r
334 VOID\r
335 )\r
336{\r
337 UINT32 ApicId;\r
338 UINTN Index;\r
339\r
340 //\r
341 // Update SMM IDT entries' code segment and load IDT\r
342 //\r
343 AsmWriteIdtr (&gcSmiIdtr);\r
344 ApicId = GetApicId ();\r
345\r
bb767506 346 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
347\r
348 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
349 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
350 //\r
351 // Initialize SMM specific features on the currently executing CPU\r
352 //\r
353 SmmCpuFeaturesInitializeProcessor (\r
354 Index,\r
355 mIsBsp,\r
356 gSmmCpuPrivate->ProcessorInfo,\r
357 &mCpuHotPlugData\r
358 );\r
359\r
a46a4c90
JF
360 if (!mSmmS3Flag) {\r
361 //\r
362 // Check XD and BTS features on each processor on normal boot\r
363 //\r
51773d49 364 CheckFeatureSupported ();\r
a46a4c90
JF
365 }\r
366\r
529a5a86
MK
367 if (mIsBsp) {\r
368 //\r
369 // BSP rebase is already done above.\r
370 // Initialize private data during S3 resume\r
371 //\r
372 InitializeMpSyncData ();\r
373 }\r
374\r
375 //\r
376 // Hook return after RSM to set SMM re-based flag\r
377 //\r
378 SemaphoreHook (Index, &mRebased[Index]);\r
379\r
380 return;\r
381 }\r
382 }\r
383 ASSERT (FALSE);\r
384}\r
385\r
386/**\r
387 Relocate SmmBases for each processor.\r
388\r
389 Execute on first boot and all S3 resumes\r
390\r
391**/\r
392VOID\r
393EFIAPI\r
394SmmRelocateBases (\r
395 VOID\r
396 )\r
397{\r
398 UINT8 BakBuf[BACK_BUF_SIZE];\r
399 SMRAM_SAVE_STATE_MAP BakBuf2;\r
400 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
401 UINT8 *U8Ptr;\r
402 UINT32 ApicId;\r
403 UINTN Index;\r
404 UINTN BspIndex;\r
405\r
406 //\r
407 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
408 //\r
409 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
410\r
411 //\r
412 // Patch ASM code template with current CR0, CR3, and CR4 values\r
413 //\r
f0053e83
LE
414 mSmmCr0 = (UINT32)AsmReadCr0 ();\r
415 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r
6b0841c1 416 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
351b49c1
LE
417 mSmmCr4 = (UINT32)AsmReadCr4 ();\r
418 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4, 4);\r
529a5a86
MK
419\r
420 //\r
421 // Patch GDTR for SMM base relocation\r
422 //\r
423 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
424 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
425\r
426 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
427 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
428\r
429 //\r
430 // Backup original contents at address 0x38000\r
431 //\r
432 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
433 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
434\r
435 //\r
436 // Load image for relocation\r
437 //\r
438 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
439\r
440 //\r
441 // Retrieve the local APIC ID of current processor\r
442 //\r
443 ApicId = GetApicId ();\r
444\r
445 //\r
446 // Relocate SM bases for all APs\r
447 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
448 //\r
449 mIsBsp = FALSE;\r
450 BspIndex = (UINTN)-1;\r
451 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
452 mRebased[Index] = FALSE;\r
453 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
454 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
455 //\r
456 // Wait for this AP to finish its 1st SMI\r
457 //\r
458 while (!mRebased[Index]);\r
459 } else {\r
460 //\r
461 // BSP will be Relocated later\r
462 //\r
463 BspIndex = Index;\r
464 }\r
465 }\r
466\r
467 //\r
468 // Relocate BSP's SMM base\r
469 //\r
470 ASSERT (BspIndex != (UINTN)-1);\r
471 mIsBsp = TRUE;\r
472 SendSmiIpi (ApicId);\r
473 //\r
474 // Wait for the BSP to finish its 1st SMI\r
475 //\r
476 while (!mRebased[BspIndex]);\r
477\r
478 //\r
479 // Restore contents at address 0x38000\r
480 //\r
481 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
482 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
483}\r
484\r
529a5a86
MK
485/**\r
486 SMM Ready To Lock event notification handler.\r
487\r
488 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
489 perform additional lock actions that must be performed from SMM on the next SMI.\r
490\r
491 @param[in] Protocol Points to the protocol's unique identifier.\r
492 @param[in] Interface Points to the interface instance.\r
493 @param[in] Handle The handle on which the interface was installed.\r
494\r
495 @retval EFI_SUCCESS Notification handler runs successfully.\r
496 **/\r
497EFI_STATUS\r
498EFIAPI\r
499SmmReadyToLockEventNotify (\r
500 IN CONST EFI_GUID *Protocol,\r
501 IN VOID *Interface,\r
502 IN EFI_HANDLE Handle\r
503 )\r
504{\r
0bdc9e75 505 GetAcpiCpuData ();\r
529a5a86 506\r
d2fc7711
JY
507 //\r
508 // Cache a copy of UEFI memory map before we start profiling feature.\r
509 //\r
510 GetUefiMemoryMap ();\r
511\r
529a5a86
MK
512 //\r
513 // Set SMM ready to lock flag and return\r
514 //\r
515 mSmmReadyToLock = TRUE;\r
516 return EFI_SUCCESS;\r
517}\r
518\r
519/**\r
520 The module Entry Point of the CPU SMM driver.\r
521\r
522 @param ImageHandle The firmware allocated handle for the EFI image.\r
523 @param SystemTable A pointer to the EFI System Table.\r
524\r
525 @retval EFI_SUCCESS The entry point is executed successfully.\r
526 @retval Other Some error occurs when executing this entry point.\r
527\r
528**/\r
529EFI_STATUS\r
530EFIAPI\r
531PiCpuSmmEntry (\r
532 IN EFI_HANDLE ImageHandle,\r
533 IN EFI_SYSTEM_TABLE *SystemTable\r
534 )\r
535{\r
536 EFI_STATUS Status;\r
537 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
538 UINTN NumberOfEnabledProcessors;\r
539 UINTN Index;\r
540 VOID *Buffer;\r
ae82a30b
JY
541 UINTN BufferPages;\r
542 UINTN TileCodeSize;\r
543 UINTN TileDataSize;\r
529a5a86 544 UINTN TileSize;\r
529a5a86
MK
545 UINT8 *Stacks;\r
546 VOID *Registration;\r
547 UINT32 RegEax;\r
548 UINT32 RegEdx;\r
549 UINTN FamilyId;\r
550 UINTN ModelId;\r
551 UINT32 Cr3;\r
552\r
e21e355e
LG
553 //\r
554 // Initialize address fixup\r
555 //\r
556 PiSmmCpuSmmInitFixupAddress ();\r
557 PiSmmCpuSmiEntryFixupAddress ();\r
558\r
529a5a86
MK
559 //\r
560 // Initialize Debug Agent to support source level debug in SMM code\r
561 //\r
562 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
563\r
564 //\r
565 // Report the start of CPU SMM initialization.\r
566 //\r
567 REPORT_STATUS_CODE (\r
568 EFI_PROGRESS_CODE,\r
569 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
570 );\r
571\r
529a5a86
MK
572 //\r
573 // Find out SMRR Base and SMRR Size\r
574 //\r
575 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
576\r
577 //\r
578 // Get MP Services Protocol\r
579 //\r
580 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
581 ASSERT_EFI_ERROR (Status);\r
582\r
583 //\r
584 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
585 //\r
586 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
587 ASSERT_EFI_ERROR (Status);\r
588 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
589\r
590 //\r
591 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
592 // A constant BSP index makes no sense because it may be hot removed.\r
593 //\r
594 DEBUG_CODE (\r
595 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
596\r
597 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
598 }\r
599 );\r
600\r
601 //\r
602 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
603 //\r
604 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
605 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
606\r
241f9149
LD
607 //\r
608 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
609 // Make sure AddressEncMask is contained to smallest supported address field.\r
610 //\r
611 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
612 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
613\r
529a5a86
MK
614 //\r
615 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
616 //\r
617 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
618 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
619 } else {\r
620 mMaxNumberOfCpus = mNumberOfCpus;\r
621 }\r
622 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
623\r
624 //\r
625 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
626 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
627 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
628 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
629 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
630 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
631 // by adding the // CPU save state size, any extra CPU specific context, and\r
632 // the size of code that must be placed at the SMI entry point to transfer\r
633 // control to a C function in the native SMM execution mode. This size is\r
634 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
635 // The total amount of memory required is the maximum number of CPUs that\r
636 // platform supports times the tile size. The picture below shows the tiling,\r
637 // where m is the number of tiles that fit in 32KB.\r
638 //\r
639 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
640 // | CPU m+1 Save State |\r
641 // +-----------------------------+\r
642 // | CPU m+1 Extra Data |\r
643 // +-----------------------------+\r
644 // | Padding |\r
645 // +-----------------------------+\r
646 // | CPU 2m SMI Entry |\r
647 // +#############################+ <-- Base of allocated buffer + 64 KB\r
648 // | CPU m-1 Save State |\r
649 // +-----------------------------+\r
650 // | CPU m-1 Extra Data |\r
651 // +-----------------------------+\r
652 // | Padding |\r
653 // +-----------------------------+\r
654 // | CPU 2m-1 SMI Entry |\r
655 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
656 // | . . . . . . . . . . . . |\r
657 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
658 // | CPU 2 Save State |\r
659 // +-----------------------------+\r
660 // | CPU 2 Extra Data |\r
661 // +-----------------------------+\r
662 // | Padding |\r
663 // +-----------------------------+\r
664 // | CPU m+1 SMI Entry |\r
665 // +=============================+ <-- Base of allocated buffer + 32 KB\r
666 // | CPU 1 Save State |\r
667 // +-----------------------------+\r
668 // | CPU 1 Extra Data |\r
669 // +-----------------------------+\r
670 // | Padding |\r
671 // +-----------------------------+\r
672 // | CPU m SMI Entry |\r
673 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
674 // | CPU 0 Save State |\r
675 // +-----------------------------+\r
676 // | CPU 0 Extra Data |\r
677 // +-----------------------------+\r
678 // | Padding |\r
679 // +-----------------------------+\r
680 // | CPU m-1 SMI Entry |\r
681 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
682 // | . . . . . . . . . . . . |\r
683 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
684 // | Padding |\r
685 // +-----------------------------+\r
686 // | CPU 1 SMI Entry |\r
687 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
688 // | Padding |\r
689 // +-----------------------------+\r
690 // | CPU 0 SMI Entry |\r
691 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
692 //\r
693\r
694 //\r
695 // Retrieve CPU Family\r
696 //\r
e9b3a6c9 697 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
698 FamilyId = (RegEax >> 8) & 0xf;\r
699 ModelId = (RegEax >> 4) & 0xf;\r
700 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
701 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
702 }\r
703\r
e9b3a6c9
MK
704 RegEdx = 0;\r
705 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
706 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
707 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
708 }\r
529a5a86
MK
709 //\r
710 // Determine the mode of the CPU at the time an SMI occurs\r
711 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
712 // Volume 3C, Section 34.4.1.1\r
713 //\r
714 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
715 if ((RegEdx & BIT29) != 0) {\r
716 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
717 }\r
718 if (FamilyId == 0x06) {\r
719 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
720 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
721 }\r
722 }\r
723\r
724 //\r
725 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
726 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
727 // This size is rounded up to nearest power of 2.\r
529a5a86 728 //\r
ae82a30b
JY
729 TileCodeSize = GetSmiHandlerSize ();\r
730 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 731 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
732 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
733 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 734 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 735 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
736\r
737 //\r
f12367a0
MK
738 // If the TileSize is larger than space available for the SMI Handler of\r
739 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
740 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
741 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
742 // context must be reduced.\r
529a5a86
MK
743 //\r
744 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
745\r
746 //\r
747 // Allocate buffer for all of the tiles.\r
748 //\r
749 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
750 // Volume 3C, Section 34.11 SMBASE Relocation\r
751 // For Pentium and Intel486 processors, the SMBASE values must be\r
752 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
753 // state during the execution of a RSM instruction.\r
754 //\r
755 // Intel486 processors: FamilyId is 4\r
756 // Pentium processors : FamilyId is 5\r
757 //\r
ae82a30b 758 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 759 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 760 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 761 } else {\r
717fb604 762 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
763 }\r
764 ASSERT (Buffer != NULL);\r
ae82a30b 765 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
766\r
767 //\r
768 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
769 //\r
770 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
771 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
772\r
773 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
774 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
775\r
776 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
777 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
778\r
779 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
780 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
781\r
782 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
783 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
784\r
785 //\r
786 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
787 //\r
788 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
789 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
790 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
791 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
792 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
793\r
794 //\r
795 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
796 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
797 // size for each CPU in the platform\r
798 //\r
799 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
800 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
801 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
802 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
803 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
804\r
805 if (Index < mNumberOfCpus) {\r
806 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
807 ASSERT_EFI_ERROR (Status);\r
808 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
809\r
810 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
811 Index,\r
812 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
813 mCpuHotPlugData.SmBase[Index],\r
814 gSmmCpuPrivate->CpuSaveState[Index],\r
815 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
816 ));\r
817 } else {\r
818 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
819 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
820 }\r
821 }\r
822\r
823 //\r
824 // Allocate SMI stacks for all processors.\r
825 //\r
826 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
827 //\r
828 // 2 more pages is allocated for each processor.\r
829 // one is guard page and the other is known good stack.\r
830 //\r
831 // +-------------------------------------------+-----+-------------------------------------------+\r
832 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
833 // +-------------------------------------------+-----+-------------------------------------------+\r
834 // | | | |\r
835 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
836 //\r
837 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
838 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
839 ASSERT (Stacks != NULL);\r
840 mSmmStackArrayBase = (UINTN)Stacks;\r
841 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
842 } else {\r
843 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
844 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
845 ASSERT (Stacks != NULL);\r
846 }\r
847\r
848 //\r
849 // Set SMI stack for SMM base relocation\r
850 //\r
851 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
852\r
853 //\r
854 // Initialize IDT\r
855 //\r
856 InitializeSmmIdt ();\r
857\r
858 //\r
859 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
860 //\r
861 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
862 ASSERT (mRebased != NULL);\r
863 SmmRelocateBases ();\r
864\r
865 //\r
866 // Call hook for BSP to perform extra actions in normal mode after all\r
867 // SMM base addresses have been relocated on all CPUs\r
868 //\r
869 SmmCpuFeaturesSmmRelocationComplete ();\r
870\r
717fb604
JY
871 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
872\r
529a5a86
MK
873 //\r
874 // SMM Time initialization\r
875 //\r
876 InitializeSmmTimer ();\r
877\r
878 //\r
879 // Initialize MP globals\r
880 //\r
881 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
882\r
883 //\r
884 // Fill in SMM Reserved Regions\r
885 //\r
886 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
887 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
888\r
889 //\r
890 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
891 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
892 // to an SMRAM address will be present in the handle database\r
893 //\r
894 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
895 &gSmmCpuPrivate->SmmCpuHandle,\r
896 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
897 NULL\r
898 );\r
899 ASSERT_EFI_ERROR (Status);\r
900\r
901 //\r
902 // Install the SMM CPU Protocol into SMM protocol database\r
903 //\r
904 Status = gSmst->SmmInstallProtocolInterface (\r
905 &mSmmCpuHandle,\r
906 &gEfiSmmCpuProtocolGuid,\r
907 EFI_NATIVE_INTERFACE,\r
908 &mSmmCpu\r
909 );\r
910 ASSERT_EFI_ERROR (Status);\r
911\r
827330cc
JW
912 //\r
913 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
914 //\r
915 Status = gSmst->SmmInstallProtocolInterface (\r
916 &mSmmCpuHandle,\r
917 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
918 EFI_NATIVE_INTERFACE,\r
919 &mSmmMemoryAttribute\r
920 );\r
921 ASSERT_EFI_ERROR (Status);\r
922\r
529a5a86
MK
923 //\r
924 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
925 //\r
926 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
927 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
928 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
929 }\r
930\r
931 //\r
932 // Initialize SMM CPU Services Support\r
933 //\r
934 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
935 ASSERT_EFI_ERROR (Status);\r
936\r
529a5a86
MK
937 //\r
938 // register SMM Ready To Lock Protocol notification\r
939 //\r
940 Status = gSmst->SmmRegisterProtocolNotify (\r
941 &gEfiSmmReadyToLockProtocolGuid,\r
942 SmmReadyToLockEventNotify,\r
943 &Registration\r
944 );\r
945 ASSERT_EFI_ERROR (Status);\r
946\r
529a5a86
MK
947 //\r
948 // Initialize SMM Profile feature\r
949 //\r
950 InitSmmProfile (Cr3);\r
951\r
b10d5ddc 952 GetAcpiS3EnableFlag ();\r
0bdc9e75 953 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
954\r
955 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
956\r
957 return EFI_SUCCESS;\r
958}\r
959\r
960/**\r
961\r
962 Find out SMRAM information including SMRR base and SMRR size.\r
963\r
964 @param SmrrBase SMRR base\r
965 @param SmrrSize SMRR size\r
966\r
967**/\r
968VOID\r
969FindSmramInfo (\r
970 OUT UINT32 *SmrrBase,\r
971 OUT UINT32 *SmrrSize\r
972 )\r
973{\r
974 EFI_STATUS Status;\r
975 UINTN Size;\r
976 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
977 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
978 UINTN Index;\r
979 UINT64 MaxSize;\r
980 BOOLEAN Found;\r
981\r
982 //\r
983 // Get SMM Access Protocol\r
984 //\r
985 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
986 ASSERT_EFI_ERROR (Status);\r
987\r
988 //\r
989 // Get SMRAM information\r
990 //\r
991 Size = 0;\r
992 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
993 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
994\r
7ed6f781
JF
995 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
996 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 997\r
7ed6f781 998 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
999 ASSERT_EFI_ERROR (Status);\r
1000\r
7ed6f781 1001 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1002\r
1003 //\r
1004 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1005 //\r
1006 CurrentSmramRange = NULL;\r
7ed6f781 1007 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1008 //\r
1009 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1010 //\r
7ed6f781 1011 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1012 continue;\r
1013 }\r
1014\r
7ed6f781
JF
1015 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1016 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1017 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1018 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1019 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1020 }\r
1021 }\r
1022 }\r
1023 }\r
1024\r
1025 ASSERT (CurrentSmramRange != NULL);\r
1026\r
1027 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1028 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1029\r
1030 do {\r
1031 Found = FALSE;\r
7ed6f781
JF
1032 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1033 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1034 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1035 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1036 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1037 Found = TRUE;\r
7ed6f781
JF
1038 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1039 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1040 Found = TRUE;\r
1041 }\r
1042 }\r
1043 } while (Found);\r
1044\r
1045 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1046}\r
1047\r
1048/**\r
1049Configure SMM Code Access Check feature on an AP.\r
1050SMM Feature Control MSR will be locked after configuration.\r
1051\r
1052@param[in,out] Buffer Pointer to private data buffer.\r
1053**/\r
1054VOID\r
1055EFIAPI\r
1056ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1057 IN OUT VOID *Buffer\r
1058 )\r
1059{\r
1060 UINTN CpuIndex;\r
1061 UINT64 SmmFeatureControlMsr;\r
1062 UINT64 NewSmmFeatureControlMsr;\r
1063\r
1064 //\r
1065 // Retrieve the CPU Index from the context passed in\r
1066 //\r
1067 CpuIndex = *(UINTN *)Buffer;\r
1068\r
1069 //\r
1070 // Get the current SMM Feature Control MSR value\r
1071 //\r
1072 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1073\r
1074 //\r
1075 // Compute the new SMM Feature Control MSR value\r
1076 //\r
1077 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1078 if (mSmmCodeAccessCheckEnable) {\r
1079 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1080 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1081 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1082 }\r
529a5a86
MK
1083 }\r
1084\r
1085 //\r
1086 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1087 //\r
1088 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1089 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1090 }\r
1091\r
1092 //\r
1093 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1094 //\r
fe3a75bc 1095 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1096}\r
1097\r
1098/**\r
1099Configure SMM Code Access Check feature for all processors.\r
1100SMM Feature Control MSR will be locked after configuration.\r
1101**/\r
1102VOID\r
1103ConfigSmmCodeAccessCheck (\r
1104 VOID\r
1105 )\r
1106{\r
1107 UINTN Index;\r
1108 EFI_STATUS Status;\r
1109\r
1110 //\r
1111 // Check to see if the Feature Control MSR is supported on this CPU\r
1112 //\r
f6b0cb17 1113 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1114 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1115 mSmmCodeAccessCheckEnable = FALSE;\r
1116 return;\r
1117 }\r
1118\r
1119 //\r
1120 // Check to see if the CPU supports the SMM Code Access Check feature\r
1121 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1122 //\r
1123 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1124 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1125 return;\r
1126 }\r
1127\r
1128 //\r
1129 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1130 //\r
fe3a75bc 1131 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1132\r
1133 //\r
1134 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1135 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1136 //\r
fe3a75bc 1137 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1138\r
1139 //\r
1140 // Enable SMM Code Access Check feature on the BSP.\r
1141 //\r
1142 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1143\r
1144 //\r
1145 // Enable SMM Code Access Check feature for the APs.\r
1146 //\r
1147 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1148 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1149 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1150 //\r
1151 // If this processor does not exist\r
1152 //\r
1153 continue;\r
1154 }\r
529a5a86
MK
1155 //\r
1156 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1157 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1158 //\r
fe3a75bc 1159 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1160\r
1161 //\r
1162 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1163 //\r
1164 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1165 ASSERT_EFI_ERROR (Status);\r
1166\r
1167 //\r
1168 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1169 //\r
fe3a75bc 1170 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1171 CpuPause ();\r
1172 }\r
1173\r
1174 //\r
1175 // Release the Config SMM Code Access Check spin lock.\r
1176 //\r
fe3a75bc 1177 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1178 }\r
1179 }\r
1180}\r
1181\r
21c17193
JY
1182/**\r
1183 This API provides a way to allocate memory for page table.\r
1184\r
1185 This API can be called more once to allocate memory for page tables.\r
1186\r
1187 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1188 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1189 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1190 returned.\r
1191\r
1192 @param Pages The number of 4 KB pages to allocate.\r
1193\r
1194 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1195\r
1196**/\r
1197VOID *\r
1198AllocatePageTableMemory (\r
1199 IN UINTN Pages\r
1200 )\r
1201{\r
1202 VOID *Buffer;\r
1203\r
1204 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1205 if (Buffer != NULL) {\r
1206 return Buffer;\r
1207 }\r
1208 return AllocatePages (Pages);\r
1209}\r
1210\r
717fb604
JY
1211/**\r
1212 Allocate pages for code.\r
1213\r
1214 @param[in] Pages Number of pages to be allocated.\r
1215\r
1216 @return Allocated memory.\r
1217**/\r
1218VOID *\r
1219AllocateCodePages (\r
1220 IN UINTN Pages\r
1221 )\r
1222{\r
1223 EFI_STATUS Status;\r
1224 EFI_PHYSICAL_ADDRESS Memory;\r
1225\r
1226 if (Pages == 0) {\r
1227 return NULL;\r
1228 }\r
1229\r
1230 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1231 if (EFI_ERROR (Status)) {\r
1232 return NULL;\r
1233 }\r
1234 return (VOID *) (UINTN) Memory;\r
1235}\r
1236\r
1237/**\r
1238 Allocate aligned pages for code.\r
1239\r
1240 @param[in] Pages Number of pages to be allocated.\r
1241 @param[in] Alignment The requested alignment of the allocation.\r
1242 Must be a power of two.\r
1243 If Alignment is zero, then byte alignment is used.\r
1244\r
1245 @return Allocated memory.\r
1246**/\r
1247VOID *\r
1248AllocateAlignedCodePages (\r
1249 IN UINTN Pages,\r
1250 IN UINTN Alignment\r
1251 )\r
1252{\r
1253 EFI_STATUS Status;\r
1254 EFI_PHYSICAL_ADDRESS Memory;\r
1255 UINTN AlignedMemory;\r
1256 UINTN AlignmentMask;\r
1257 UINTN UnalignedPages;\r
1258 UINTN RealPages;\r
1259\r
1260 //\r
1261 // Alignment must be a power of two or zero.\r
1262 //\r
1263 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1264\r
1265 if (Pages == 0) {\r
1266 return NULL;\r
1267 }\r
1268 if (Alignment > EFI_PAGE_SIZE) {\r
1269 //\r
1270 // Calculate the total number of pages since alignment is larger than page size.\r
1271 //\r
1272 AlignmentMask = Alignment - 1;\r
1273 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1274 //\r
1275 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1276 //\r
1277 ASSERT (RealPages > Pages);\r
1278\r
1279 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1280 if (EFI_ERROR (Status)) {\r
1281 return NULL;\r
1282 }\r
1283 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1284 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1285 if (UnalignedPages > 0) {\r
1286 //\r
1287 // Free first unaligned page(s).\r
1288 //\r
1289 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1290 ASSERT_EFI_ERROR (Status);\r
1291 }\r
8491e302 1292 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1293 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1294 if (UnalignedPages > 0) {\r
1295 //\r
1296 // Free last unaligned page(s).\r
1297 //\r
1298 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1299 ASSERT_EFI_ERROR (Status);\r
1300 }\r
1301 } else {\r
1302 //\r
1303 // Do not over-allocate pages in this case.\r
1304 //\r
1305 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1306 if (EFI_ERROR (Status)) {\r
1307 return NULL;\r
1308 }\r
1309 AlignedMemory = (UINTN) Memory;\r
1310 }\r
1311 return (VOID *) AlignedMemory;\r
1312}\r
1313\r
529a5a86
MK
1314/**\r
1315 Perform the remaining tasks.\r
1316\r
1317**/\r
1318VOID\r
1319PerformRemainingTasks (\r
1320 VOID\r
1321 )\r
1322{\r
1323 if (mSmmReadyToLock) {\r
1324 //\r
1325 // Start SMM Profile feature\r
1326 //\r
1327 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1328 SmmProfileStart ();\r
1329 }\r
1330 //\r
1331 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1332 //\r
1333 InitPaging ();\r
717fb604
JY
1334\r
1335 //\r
1336 // Mark critical region to be read-only in page table\r
1337 //\r
d2fc7711
JY
1338 SetMemMapAttributes ();\r
1339\r
1340 //\r
1341 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1342 //\r
1343 SetUefiMemMapAttributes ();\r
717fb604
JY
1344\r
1345 //\r
1346 // Set page table itself to be read-only\r
1347 //\r
1348 SetPageTableAttributes ();\r
1349\r
529a5a86
MK
1350 //\r
1351 // Configure SMM Code Access Check feature if available.\r
1352 //\r
1353 ConfigSmmCodeAccessCheck ();\r
1354\r
21c17193
JY
1355 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1356\r
529a5a86
MK
1357 //\r
1358 // Clean SMM ready to lock flag\r
1359 //\r
1360 mSmmReadyToLock = FALSE;\r
1361 }\r
1362}\r
9f419739
JY
1363\r
1364/**\r
1365 Perform the pre tasks.\r
1366\r
1367**/\r
1368VOID\r
1369PerformPreTasks (\r
1370 VOID\r
1371 )\r
1372{\r
0bdc9e75 1373 RestoreSmmConfigurationInS3 ();\r
9f419739 1374}\r