]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmmCr3" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
e21e355e 4Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
827330cc
JW
79///\r
80/// SMM Memory Attribute Protocol instance\r
81///\r
82EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r
83 EdkiiSmmGetMemoryAttributes,\r
84 EdkiiSmmSetMemoryAttributes,\r
85 EdkiiSmmClearMemoryAttributes\r
86};\r
87\r
529a5a86
MK
88EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
89\r
529a5a86
MK
90//\r
91// SMM stack information\r
92//\r
93UINTN mSmmStackArrayBase;\r
94UINTN mSmmStackArrayEnd;\r
95UINTN mSmmStackSize;\r
96\r
529a5a86
MK
97UINTN mMaxNumberOfCpus = 1;\r
98UINTN mNumberOfCpus = 1;\r
99\r
100//\r
101// SMM ready to lock flag\r
102//\r
103BOOLEAN mSmmReadyToLock = FALSE;\r
104\r
105//\r
106// Global used to cache PCD for SMM Code Access Check enable\r
107//\r
108BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
109\r
241f9149
LD
110//\r
111// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
112//\r
113UINT64 mAddressEncMask = 0;\r
114\r
529a5a86
MK
115//\r
116// Spin lock used to serialize setting of SMM Code Access Check feature\r
117//\r
fe3a75bc 118SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 119\r
7ed6f781
JF
120//\r
121// Saved SMM ranges information\r
122//\r
123EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
124UINTN mSmmCpuSmramRangeCount;\r
125\r
51ce27fd
SZ
126UINT8 mPhysicalAddressBits;\r
127\r
529a5a86
MK
128/**\r
129 Initialize IDT to setup exception handlers for SMM.\r
130\r
131**/\r
132VOID\r
133InitializeSmmIdt (\r
134 VOID\r
135 )\r
136{\r
137 EFI_STATUS Status;\r
138 BOOLEAN InterruptState;\r
139 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
140\r
141 //\r
142 // There are 32 (not 255) entries in it since only processor\r
143 // generated exceptions will be handled.\r
144 //\r
145 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
146 //\r
147 // Allocate page aligned IDT, because it might be set as read only.\r
148 //\r
149 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
150 ASSERT (gcSmiIdtr.Base != 0);\r
151 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
152\r
529a5a86
MK
153 //\r
154 // Disable Interrupt and save DXE IDT table\r
155 //\r
156 InterruptState = SaveAndDisableInterrupts ();\r
157 AsmReadIdtr (&DxeIdtr);\r
158 //\r
159 // Load SMM temporary IDT table\r
160 //\r
161 AsmWriteIdtr (&gcSmiIdtr);\r
162 //\r
163 // Setup SMM default exception handlers, SMM IDT table\r
164 // will be updated and saved in gcSmiIdtr\r
165 //\r
166 Status = InitializeCpuExceptionHandlers (NULL);\r
167 ASSERT_EFI_ERROR (Status);\r
168 //\r
169 // Restore DXE IDT table and CPU interrupt\r
170 //\r
171 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
172 SetInterruptState (InterruptState);\r
173}\r
174\r
175/**\r
176 Search module name by input IP address and output it.\r
177\r
178 @param CallerIpAddress Caller instruction pointer.\r
179\r
180**/\r
181VOID\r
182DumpModuleInfoByIp (\r
183 IN UINTN CallerIpAddress\r
184 )\r
185{\r
186 UINTN Pe32Data;\r
529a5a86 187 VOID *PdbPointer;\r
529a5a86
MK
188\r
189 //\r
190 // Find Image Base\r
191 //\r
9e981317 192 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r
529a5a86 193 if (Pe32Data != 0) {\r
b8caae19 194 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));\r
529a5a86
MK
195 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
196 if (PdbPointer != NULL) {\r
b8caae19 197 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r
529a5a86
MK
198 }\r
199 }\r
200}\r
201\r
202/**\r
203 Read information from the CPU save state.\r
204\r
205 @param This EFI_SMM_CPU_PROTOCOL instance\r
206 @param Width The number of bytes to read from the CPU save state.\r
207 @param Register Specifies the CPU register to read form the save state.\r
208 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
209 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
210\r
211 @retval EFI_SUCCESS The register was read from Save State\r
212 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
213 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
214\r
215**/\r
216EFI_STATUS\r
217EFIAPI\r
218SmmReadSaveState (\r
219 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
220 IN UINTN Width,\r
221 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
222 IN UINTN CpuIndex,\r
223 OUT VOID *Buffer\r
224 )\r
225{\r
226 EFI_STATUS Status;\r
227\r
228 //\r
229 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
230 //\r
231 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
232 return EFI_INVALID_PARAMETER;\r
233 }\r
234\r
235 //\r
236 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
237 //\r
238 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
239 //\r
240 // The pseudo-register only supports the 64-bit size specified by Width.\r
241 //\r
242 if (Width != sizeof (UINT64)) {\r
243 return EFI_INVALID_PARAMETER;\r
244 }\r
245 //\r
246 // If the processor is in SMM at the time the SMI occurred,\r
247 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
248 // Otherwise, EFI_NOT_FOUND is returned.\r
249 //\r
ed3d5ecb 250 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
251 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
252 return EFI_SUCCESS;\r
253 } else {\r
254 return EFI_NOT_FOUND;\r
255 }\r
256 }\r
257\r
ed3d5ecb 258 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
259 return EFI_INVALID_PARAMETER;\r
260 }\r
261\r
262 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
263 if (Status == EFI_UNSUPPORTED) {\r
264 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
265 }\r
266 return Status;\r
267}\r
268\r
269/**\r
270 Write data to the CPU save state.\r
271\r
272 @param This EFI_SMM_CPU_PROTOCOL instance\r
273 @param Width The number of bytes to read from the CPU save state.\r
274 @param Register Specifies the CPU register to write to the save state.\r
275 @param CpuIndex Specifies the zero-based index of the CPU save state\r
276 @param Buffer Upon entry, this holds the new CPU register value.\r
277\r
278 @retval EFI_SUCCESS The register was written from Save State\r
279 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
280 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
281\r
282**/\r
283EFI_STATUS\r
284EFIAPI\r
285SmmWriteSaveState (\r
286 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
287 IN UINTN Width,\r
288 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
289 IN UINTN CpuIndex,\r
290 IN CONST VOID *Buffer\r
291 )\r
292{\r
293 EFI_STATUS Status;\r
294\r
295 //\r
296 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
297 //\r
298 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
299 return EFI_INVALID_PARAMETER;\r
300 }\r
301\r
302 //\r
303 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
304 //\r
305 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
306 return EFI_SUCCESS;\r
307 }\r
308\r
309 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
310 return EFI_INVALID_PARAMETER;\r
311 }\r
312\r
313 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
314 if (Status == EFI_UNSUPPORTED) {\r
315 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
316 }\r
317 return Status;\r
318}\r
319\r
320\r
321/**\r
322 C function for SMI handler. To change all processor's SMMBase Register.\r
323\r
324**/\r
325VOID\r
326EFIAPI\r
327SmmInitHandler (\r
328 VOID\r
329 )\r
330{\r
331 UINT32 ApicId;\r
332 UINTN Index;\r
333\r
334 //\r
335 // Update SMM IDT entries' code segment and load IDT\r
336 //\r
337 AsmWriteIdtr (&gcSmiIdtr);\r
338 ApicId = GetApicId ();\r
339\r
bb767506 340 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
341\r
342 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
343 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
344 //\r
345 // Initialize SMM specific features on the currently executing CPU\r
346 //\r
347 SmmCpuFeaturesInitializeProcessor (\r
348 Index,\r
349 mIsBsp,\r
350 gSmmCpuPrivate->ProcessorInfo,\r
351 &mCpuHotPlugData\r
352 );\r
353\r
a46a4c90
JF
354 if (!mSmmS3Flag) {\r
355 //\r
356 // Check XD and BTS features on each processor on normal boot\r
357 //\r
51773d49 358 CheckFeatureSupported ();\r
a46a4c90
JF
359 }\r
360\r
529a5a86
MK
361 if (mIsBsp) {\r
362 //\r
363 // BSP rebase is already done above.\r
364 // Initialize private data during S3 resume\r
365 //\r
366 InitializeMpSyncData ();\r
367 }\r
368\r
369 //\r
370 // Hook return after RSM to set SMM re-based flag\r
371 //\r
372 SemaphoreHook (Index, &mRebased[Index]);\r
373\r
374 return;\r
375 }\r
376 }\r
377 ASSERT (FALSE);\r
378}\r
379\r
380/**\r
381 Relocate SmmBases for each processor.\r
382\r
383 Execute on first boot and all S3 resumes\r
384\r
385**/\r
386VOID\r
387EFIAPI\r
388SmmRelocateBases (\r
389 VOID\r
390 )\r
391{\r
392 UINT8 BakBuf[BACK_BUF_SIZE];\r
393 SMRAM_SAVE_STATE_MAP BakBuf2;\r
394 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
395 UINT8 *U8Ptr;\r
396 UINT32 ApicId;\r
397 UINTN Index;\r
398 UINTN BspIndex;\r
399\r
400 //\r
401 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
402 //\r
403 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
404\r
405 //\r
406 // Patch ASM code template with current CR0, CR3, and CR4 values\r
407 //\r
408 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
6b0841c1 409 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r
529a5a86
MK
410 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
411\r
412 //\r
413 // Patch GDTR for SMM base relocation\r
414 //\r
415 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
416 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
417\r
418 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
419 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
420\r
421 //\r
422 // Backup original contents at address 0x38000\r
423 //\r
424 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
425 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
426\r
427 //\r
428 // Load image for relocation\r
429 //\r
430 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
431\r
432 //\r
433 // Retrieve the local APIC ID of current processor\r
434 //\r
435 ApicId = GetApicId ();\r
436\r
437 //\r
438 // Relocate SM bases for all APs\r
439 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
440 //\r
441 mIsBsp = FALSE;\r
442 BspIndex = (UINTN)-1;\r
443 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
444 mRebased[Index] = FALSE;\r
445 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
446 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
447 //\r
448 // Wait for this AP to finish its 1st SMI\r
449 //\r
450 while (!mRebased[Index]);\r
451 } else {\r
452 //\r
453 // BSP will be Relocated later\r
454 //\r
455 BspIndex = Index;\r
456 }\r
457 }\r
458\r
459 //\r
460 // Relocate BSP's SMM base\r
461 //\r
462 ASSERT (BspIndex != (UINTN)-1);\r
463 mIsBsp = TRUE;\r
464 SendSmiIpi (ApicId);\r
465 //\r
466 // Wait for the BSP to finish its 1st SMI\r
467 //\r
468 while (!mRebased[BspIndex]);\r
469\r
470 //\r
471 // Restore contents at address 0x38000\r
472 //\r
473 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
474 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
475}\r
476\r
529a5a86
MK
477/**\r
478 SMM Ready To Lock event notification handler.\r
479\r
480 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
481 perform additional lock actions that must be performed from SMM on the next SMI.\r
482\r
483 @param[in] Protocol Points to the protocol's unique identifier.\r
484 @param[in] Interface Points to the interface instance.\r
485 @param[in] Handle The handle on which the interface was installed.\r
486\r
487 @retval EFI_SUCCESS Notification handler runs successfully.\r
488 **/\r
489EFI_STATUS\r
490EFIAPI\r
491SmmReadyToLockEventNotify (\r
492 IN CONST EFI_GUID *Protocol,\r
493 IN VOID *Interface,\r
494 IN EFI_HANDLE Handle\r
495 )\r
496{\r
0bdc9e75 497 GetAcpiCpuData ();\r
529a5a86 498\r
d2fc7711
JY
499 //\r
500 // Cache a copy of UEFI memory map before we start profiling feature.\r
501 //\r
502 GetUefiMemoryMap ();\r
503\r
529a5a86
MK
504 //\r
505 // Set SMM ready to lock flag and return\r
506 //\r
507 mSmmReadyToLock = TRUE;\r
508 return EFI_SUCCESS;\r
509}\r
510\r
511/**\r
512 The module Entry Point of the CPU SMM driver.\r
513\r
514 @param ImageHandle The firmware allocated handle for the EFI image.\r
515 @param SystemTable A pointer to the EFI System Table.\r
516\r
517 @retval EFI_SUCCESS The entry point is executed successfully.\r
518 @retval Other Some error occurs when executing this entry point.\r
519\r
520**/\r
521EFI_STATUS\r
522EFIAPI\r
523PiCpuSmmEntry (\r
524 IN EFI_HANDLE ImageHandle,\r
525 IN EFI_SYSTEM_TABLE *SystemTable\r
526 )\r
527{\r
528 EFI_STATUS Status;\r
529 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
530 UINTN NumberOfEnabledProcessors;\r
531 UINTN Index;\r
532 VOID *Buffer;\r
ae82a30b
JY
533 UINTN BufferPages;\r
534 UINTN TileCodeSize;\r
535 UINTN TileDataSize;\r
529a5a86 536 UINTN TileSize;\r
529a5a86
MK
537 UINT8 *Stacks;\r
538 VOID *Registration;\r
539 UINT32 RegEax;\r
540 UINT32 RegEdx;\r
541 UINTN FamilyId;\r
542 UINTN ModelId;\r
543 UINT32 Cr3;\r
544\r
e21e355e
LG
545 //\r
546 // Initialize address fixup\r
547 //\r
548 PiSmmCpuSmmInitFixupAddress ();\r
549 PiSmmCpuSmiEntryFixupAddress ();\r
550\r
529a5a86
MK
551 //\r
552 // Initialize Debug Agent to support source level debug in SMM code\r
553 //\r
554 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
555\r
556 //\r
557 // Report the start of CPU SMM initialization.\r
558 //\r
559 REPORT_STATUS_CODE (\r
560 EFI_PROGRESS_CODE,\r
561 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
562 );\r
563\r
564 //\r
565 // Fix segment address of the long-mode-switch jump\r
566 //\r
567 if (sizeof (UINTN) == sizeof (UINT64)) {\r
568 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
569 }\r
570\r
571 //\r
572 // Find out SMRR Base and SMRR Size\r
573 //\r
574 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
575\r
576 //\r
577 // Get MP Services Protocol\r
578 //\r
579 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
580 ASSERT_EFI_ERROR (Status);\r
581\r
582 //\r
583 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
584 //\r
585 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
586 ASSERT_EFI_ERROR (Status);\r
587 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
588\r
589 //\r
590 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
591 // A constant BSP index makes no sense because it may be hot removed.\r
592 //\r
593 DEBUG_CODE (\r
594 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
595\r
596 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
597 }\r
598 );\r
599\r
600 //\r
601 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
602 //\r
603 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
604 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
605\r
241f9149
LD
606 //\r
607 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
608 // Make sure AddressEncMask is contained to smallest supported address field.\r
609 //\r
610 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
611 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
612\r
529a5a86
MK
613 //\r
614 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
615 //\r
616 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
617 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
618 } else {\r
619 mMaxNumberOfCpus = mNumberOfCpus;\r
620 }\r
621 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
622\r
623 //\r
624 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
625 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
626 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
627 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
628 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
629 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
630 // by adding the // CPU save state size, any extra CPU specific context, and\r
631 // the size of code that must be placed at the SMI entry point to transfer\r
632 // control to a C function in the native SMM execution mode. This size is\r
633 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
634 // The total amount of memory required is the maximum number of CPUs that\r
635 // platform supports times the tile size. The picture below shows the tiling,\r
636 // where m is the number of tiles that fit in 32KB.\r
637 //\r
638 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
639 // | CPU m+1 Save State |\r
640 // +-----------------------------+\r
641 // | CPU m+1 Extra Data |\r
642 // +-----------------------------+\r
643 // | Padding |\r
644 // +-----------------------------+\r
645 // | CPU 2m SMI Entry |\r
646 // +#############################+ <-- Base of allocated buffer + 64 KB\r
647 // | CPU m-1 Save State |\r
648 // +-----------------------------+\r
649 // | CPU m-1 Extra Data |\r
650 // +-----------------------------+\r
651 // | Padding |\r
652 // +-----------------------------+\r
653 // | CPU 2m-1 SMI Entry |\r
654 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
655 // | . . . . . . . . . . . . |\r
656 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
657 // | CPU 2 Save State |\r
658 // +-----------------------------+\r
659 // | CPU 2 Extra Data |\r
660 // +-----------------------------+\r
661 // | Padding |\r
662 // +-----------------------------+\r
663 // | CPU m+1 SMI Entry |\r
664 // +=============================+ <-- Base of allocated buffer + 32 KB\r
665 // | CPU 1 Save State |\r
666 // +-----------------------------+\r
667 // | CPU 1 Extra Data |\r
668 // +-----------------------------+\r
669 // | Padding |\r
670 // +-----------------------------+\r
671 // | CPU m SMI Entry |\r
672 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
673 // | CPU 0 Save State |\r
674 // +-----------------------------+\r
675 // | CPU 0 Extra Data |\r
676 // +-----------------------------+\r
677 // | Padding |\r
678 // +-----------------------------+\r
679 // | CPU m-1 SMI Entry |\r
680 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
681 // | . . . . . . . . . . . . |\r
682 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
683 // | Padding |\r
684 // +-----------------------------+\r
685 // | CPU 1 SMI Entry |\r
686 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
687 // | Padding |\r
688 // +-----------------------------+\r
689 // | CPU 0 SMI Entry |\r
690 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
691 //\r
692\r
693 //\r
694 // Retrieve CPU Family\r
695 //\r
e9b3a6c9 696 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
697 FamilyId = (RegEax >> 8) & 0xf;\r
698 ModelId = (RegEax >> 4) & 0xf;\r
699 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
700 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
701 }\r
702\r
e9b3a6c9
MK
703 RegEdx = 0;\r
704 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
705 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
706 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
707 }\r
529a5a86
MK
708 //\r
709 // Determine the mode of the CPU at the time an SMI occurs\r
710 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
711 // Volume 3C, Section 34.4.1.1\r
712 //\r
713 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
714 if ((RegEdx & BIT29) != 0) {\r
715 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
716 }\r
717 if (FamilyId == 0x06) {\r
718 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
719 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
720 }\r
721 }\r
722\r
723 //\r
724 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
725 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
726 // This size is rounded up to nearest power of 2.\r
529a5a86 727 //\r
ae82a30b
JY
728 TileCodeSize = GetSmiHandlerSize ();\r
729 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 730 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
731 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
732 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 733 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 734 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
735\r
736 //\r
f12367a0
MK
737 // If the TileSize is larger than space available for the SMI Handler of\r
738 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
739 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
740 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
741 // context must be reduced.\r
529a5a86
MK
742 //\r
743 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
744\r
745 //\r
746 // Allocate buffer for all of the tiles.\r
747 //\r
748 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
749 // Volume 3C, Section 34.11 SMBASE Relocation\r
750 // For Pentium and Intel486 processors, the SMBASE values must be\r
751 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
752 // state during the execution of a RSM instruction.\r
753 //\r
754 // Intel486 processors: FamilyId is 4\r
755 // Pentium processors : FamilyId is 5\r
756 //\r
ae82a30b 757 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 758 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 759 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 760 } else {\r
717fb604 761 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
762 }\r
763 ASSERT (Buffer != NULL);\r
ae82a30b 764 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
765\r
766 //\r
767 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
768 //\r
769 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
770 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
771\r
772 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
773 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
774\r
775 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
776 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
777\r
778 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
779 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
780\r
781 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
782 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
783\r
784 //\r
785 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
786 //\r
787 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
788 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
789 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
790 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
791 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
792\r
793 //\r
794 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
795 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
796 // size for each CPU in the platform\r
797 //\r
798 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
799 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
800 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
801 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
802 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
803\r
804 if (Index < mNumberOfCpus) {\r
805 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
806 ASSERT_EFI_ERROR (Status);\r
807 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
808\r
809 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
810 Index,\r
811 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
812 mCpuHotPlugData.SmBase[Index],\r
813 gSmmCpuPrivate->CpuSaveState[Index],\r
814 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
815 ));\r
816 } else {\r
817 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
818 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
819 }\r
820 }\r
821\r
822 //\r
823 // Allocate SMI stacks for all processors.\r
824 //\r
825 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
826 //\r
827 // 2 more pages is allocated for each processor.\r
828 // one is guard page and the other is known good stack.\r
829 //\r
830 // +-------------------------------------------+-----+-------------------------------------------+\r
831 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
832 // +-------------------------------------------+-----+-------------------------------------------+\r
833 // | | | |\r
834 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
835 //\r
836 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
837 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
838 ASSERT (Stacks != NULL);\r
839 mSmmStackArrayBase = (UINTN)Stacks;\r
840 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
841 } else {\r
842 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
843 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
844 ASSERT (Stacks != NULL);\r
845 }\r
846\r
847 //\r
848 // Set SMI stack for SMM base relocation\r
849 //\r
850 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
851\r
852 //\r
853 // Initialize IDT\r
854 //\r
855 InitializeSmmIdt ();\r
856\r
857 //\r
858 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
859 //\r
860 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
861 ASSERT (mRebased != NULL);\r
862 SmmRelocateBases ();\r
863\r
864 //\r
865 // Call hook for BSP to perform extra actions in normal mode after all\r
866 // SMM base addresses have been relocated on all CPUs\r
867 //\r
868 SmmCpuFeaturesSmmRelocationComplete ();\r
869\r
717fb604
JY
870 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
871\r
529a5a86
MK
872 //\r
873 // SMM Time initialization\r
874 //\r
875 InitializeSmmTimer ();\r
876\r
877 //\r
878 // Initialize MP globals\r
879 //\r
880 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
881\r
882 //\r
883 // Fill in SMM Reserved Regions\r
884 //\r
885 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
886 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
887\r
888 //\r
889 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
890 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
891 // to an SMRAM address will be present in the handle database\r
892 //\r
893 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
894 &gSmmCpuPrivate->SmmCpuHandle,\r
895 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
896 NULL\r
897 );\r
898 ASSERT_EFI_ERROR (Status);\r
899\r
900 //\r
901 // Install the SMM CPU Protocol into SMM protocol database\r
902 //\r
903 Status = gSmst->SmmInstallProtocolInterface (\r
904 &mSmmCpuHandle,\r
905 &gEfiSmmCpuProtocolGuid,\r
906 EFI_NATIVE_INTERFACE,\r
907 &mSmmCpu\r
908 );\r
909 ASSERT_EFI_ERROR (Status);\r
910\r
827330cc
JW
911 //\r
912 // Install the SMM Memory Attribute Protocol into SMM protocol database\r
913 //\r
914 Status = gSmst->SmmInstallProtocolInterface (\r
915 &mSmmCpuHandle,\r
916 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
917 EFI_NATIVE_INTERFACE,\r
918 &mSmmMemoryAttribute\r
919 );\r
920 ASSERT_EFI_ERROR (Status);\r
921\r
529a5a86
MK
922 //\r
923 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
924 //\r
925 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
926 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
927 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
928 }\r
929\r
930 //\r
931 // Initialize SMM CPU Services Support\r
932 //\r
933 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
934 ASSERT_EFI_ERROR (Status);\r
935\r
529a5a86
MK
936 //\r
937 // register SMM Ready To Lock Protocol notification\r
938 //\r
939 Status = gSmst->SmmRegisterProtocolNotify (\r
940 &gEfiSmmReadyToLockProtocolGuid,\r
941 SmmReadyToLockEventNotify,\r
942 &Registration\r
943 );\r
944 ASSERT_EFI_ERROR (Status);\r
945\r
529a5a86
MK
946 //\r
947 // Initialize SMM Profile feature\r
948 //\r
949 InitSmmProfile (Cr3);\r
950\r
b10d5ddc 951 GetAcpiS3EnableFlag ();\r
0bdc9e75 952 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
953\r
954 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
955\r
956 return EFI_SUCCESS;\r
957}\r
958\r
959/**\r
960\r
961 Find out SMRAM information including SMRR base and SMRR size.\r
962\r
963 @param SmrrBase SMRR base\r
964 @param SmrrSize SMRR size\r
965\r
966**/\r
967VOID\r
968FindSmramInfo (\r
969 OUT UINT32 *SmrrBase,\r
970 OUT UINT32 *SmrrSize\r
971 )\r
972{\r
973 EFI_STATUS Status;\r
974 UINTN Size;\r
975 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
976 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
977 UINTN Index;\r
978 UINT64 MaxSize;\r
979 BOOLEAN Found;\r
980\r
981 //\r
982 // Get SMM Access Protocol\r
983 //\r
984 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
985 ASSERT_EFI_ERROR (Status);\r
986\r
987 //\r
988 // Get SMRAM information\r
989 //\r
990 Size = 0;\r
991 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
992 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
993\r
7ed6f781
JF
994 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
995 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 996\r
7ed6f781 997 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
998 ASSERT_EFI_ERROR (Status);\r
999\r
7ed6f781 1000 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1001\r
1002 //\r
1003 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1004 //\r
1005 CurrentSmramRange = NULL;\r
7ed6f781 1006 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1007 //\r
1008 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1009 //\r
7ed6f781 1010 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1011 continue;\r
1012 }\r
1013\r
7ed6f781
JF
1014 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1015 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1016 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1017 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1018 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1019 }\r
1020 }\r
1021 }\r
1022 }\r
1023\r
1024 ASSERT (CurrentSmramRange != NULL);\r
1025\r
1026 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1027 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1028\r
1029 do {\r
1030 Found = FALSE;\r
7ed6f781
JF
1031 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1032 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1033 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1034 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1035 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1036 Found = TRUE;\r
7ed6f781
JF
1037 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1038 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1039 Found = TRUE;\r
1040 }\r
1041 }\r
1042 } while (Found);\r
1043\r
1044 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1045}\r
1046\r
1047/**\r
1048Configure SMM Code Access Check feature on an AP.\r
1049SMM Feature Control MSR will be locked after configuration.\r
1050\r
1051@param[in,out] Buffer Pointer to private data buffer.\r
1052**/\r
1053VOID\r
1054EFIAPI\r
1055ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1056 IN OUT VOID *Buffer\r
1057 )\r
1058{\r
1059 UINTN CpuIndex;\r
1060 UINT64 SmmFeatureControlMsr;\r
1061 UINT64 NewSmmFeatureControlMsr;\r
1062\r
1063 //\r
1064 // Retrieve the CPU Index from the context passed in\r
1065 //\r
1066 CpuIndex = *(UINTN *)Buffer;\r
1067\r
1068 //\r
1069 // Get the current SMM Feature Control MSR value\r
1070 //\r
1071 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1072\r
1073 //\r
1074 // Compute the new SMM Feature Control MSR value\r
1075 //\r
1076 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1077 if (mSmmCodeAccessCheckEnable) {\r
1078 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1079 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1080 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1081 }\r
529a5a86
MK
1082 }\r
1083\r
1084 //\r
1085 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1086 //\r
1087 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1088 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1089 }\r
1090\r
1091 //\r
1092 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1093 //\r
fe3a75bc 1094 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1095}\r
1096\r
1097/**\r
1098Configure SMM Code Access Check feature for all processors.\r
1099SMM Feature Control MSR will be locked after configuration.\r
1100**/\r
1101VOID\r
1102ConfigSmmCodeAccessCheck (\r
1103 VOID\r
1104 )\r
1105{\r
1106 UINTN Index;\r
1107 EFI_STATUS Status;\r
1108\r
1109 //\r
1110 // Check to see if the Feature Control MSR is supported on this CPU\r
1111 //\r
f6b0cb17 1112 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1113 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1114 mSmmCodeAccessCheckEnable = FALSE;\r
1115 return;\r
1116 }\r
1117\r
1118 //\r
1119 // Check to see if the CPU supports the SMM Code Access Check feature\r
1120 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1121 //\r
1122 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1123 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1124 return;\r
1125 }\r
1126\r
1127 //\r
1128 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1129 //\r
fe3a75bc 1130 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1131\r
1132 //\r
1133 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1134 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1135 //\r
fe3a75bc 1136 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1137\r
1138 //\r
1139 // Enable SMM Code Access Check feature on the BSP.\r
1140 //\r
1141 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1142\r
1143 //\r
1144 // Enable SMM Code Access Check feature for the APs.\r
1145 //\r
1146 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1147 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
b7025df8
JF
1148 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r
1149 //\r
1150 // If this processor does not exist\r
1151 //\r
1152 continue;\r
1153 }\r
529a5a86
MK
1154 //\r
1155 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1156 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1157 //\r
fe3a75bc 1158 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1159\r
1160 //\r
1161 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1162 //\r
1163 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1164 ASSERT_EFI_ERROR (Status);\r
1165\r
1166 //\r
1167 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1168 //\r
fe3a75bc 1169 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1170 CpuPause ();\r
1171 }\r
1172\r
1173 //\r
1174 // Release the Config SMM Code Access Check spin lock.\r
1175 //\r
fe3a75bc 1176 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1177 }\r
1178 }\r
1179}\r
1180\r
21c17193
JY
1181/**\r
1182 This API provides a way to allocate memory for page table.\r
1183\r
1184 This API can be called more once to allocate memory for page tables.\r
1185\r
1186 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1187 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1188 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1189 returned.\r
1190\r
1191 @param Pages The number of 4 KB pages to allocate.\r
1192\r
1193 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1194\r
1195**/\r
1196VOID *\r
1197AllocatePageTableMemory (\r
1198 IN UINTN Pages\r
1199 )\r
1200{\r
1201 VOID *Buffer;\r
1202\r
1203 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1204 if (Buffer != NULL) {\r
1205 return Buffer;\r
1206 }\r
1207 return AllocatePages (Pages);\r
1208}\r
1209\r
717fb604
JY
1210/**\r
1211 Allocate pages for code.\r
1212\r
1213 @param[in] Pages Number of pages to be allocated.\r
1214\r
1215 @return Allocated memory.\r
1216**/\r
1217VOID *\r
1218AllocateCodePages (\r
1219 IN UINTN Pages\r
1220 )\r
1221{\r
1222 EFI_STATUS Status;\r
1223 EFI_PHYSICAL_ADDRESS Memory;\r
1224\r
1225 if (Pages == 0) {\r
1226 return NULL;\r
1227 }\r
1228\r
1229 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1230 if (EFI_ERROR (Status)) {\r
1231 return NULL;\r
1232 }\r
1233 return (VOID *) (UINTN) Memory;\r
1234}\r
1235\r
1236/**\r
1237 Allocate aligned pages for code.\r
1238\r
1239 @param[in] Pages Number of pages to be allocated.\r
1240 @param[in] Alignment The requested alignment of the allocation.\r
1241 Must be a power of two.\r
1242 If Alignment is zero, then byte alignment is used.\r
1243\r
1244 @return Allocated memory.\r
1245**/\r
1246VOID *\r
1247AllocateAlignedCodePages (\r
1248 IN UINTN Pages,\r
1249 IN UINTN Alignment\r
1250 )\r
1251{\r
1252 EFI_STATUS Status;\r
1253 EFI_PHYSICAL_ADDRESS Memory;\r
1254 UINTN AlignedMemory;\r
1255 UINTN AlignmentMask;\r
1256 UINTN UnalignedPages;\r
1257 UINTN RealPages;\r
1258\r
1259 //\r
1260 // Alignment must be a power of two or zero.\r
1261 //\r
1262 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1263\r
1264 if (Pages == 0) {\r
1265 return NULL;\r
1266 }\r
1267 if (Alignment > EFI_PAGE_SIZE) {\r
1268 //\r
1269 // Calculate the total number of pages since alignment is larger than page size.\r
1270 //\r
1271 AlignmentMask = Alignment - 1;\r
1272 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1273 //\r
1274 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1275 //\r
1276 ASSERT (RealPages > Pages);\r
1277\r
1278 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1279 if (EFI_ERROR (Status)) {\r
1280 return NULL;\r
1281 }\r
1282 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1283 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1284 if (UnalignedPages > 0) {\r
1285 //\r
1286 // Free first unaligned page(s).\r
1287 //\r
1288 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1289 ASSERT_EFI_ERROR (Status);\r
1290 }\r
8491e302 1291 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1292 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1293 if (UnalignedPages > 0) {\r
1294 //\r
1295 // Free last unaligned page(s).\r
1296 //\r
1297 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1298 ASSERT_EFI_ERROR (Status);\r
1299 }\r
1300 } else {\r
1301 //\r
1302 // Do not over-allocate pages in this case.\r
1303 //\r
1304 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1305 if (EFI_ERROR (Status)) {\r
1306 return NULL;\r
1307 }\r
1308 AlignedMemory = (UINTN) Memory;\r
1309 }\r
1310 return (VOID *) AlignedMemory;\r
1311}\r
1312\r
529a5a86
MK
1313/**\r
1314 Perform the remaining tasks.\r
1315\r
1316**/\r
1317VOID\r
1318PerformRemainingTasks (\r
1319 VOID\r
1320 )\r
1321{\r
1322 if (mSmmReadyToLock) {\r
1323 //\r
1324 // Start SMM Profile feature\r
1325 //\r
1326 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1327 SmmProfileStart ();\r
1328 }\r
1329 //\r
1330 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1331 //\r
1332 InitPaging ();\r
717fb604
JY
1333\r
1334 //\r
1335 // Mark critical region to be read-only in page table\r
1336 //\r
d2fc7711
JY
1337 SetMemMapAttributes ();\r
1338\r
1339 //\r
1340 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1341 //\r
1342 SetUefiMemMapAttributes ();\r
717fb604
JY
1343\r
1344 //\r
1345 // Set page table itself to be read-only\r
1346 //\r
1347 SetPageTableAttributes ();\r
1348\r
529a5a86
MK
1349 //\r
1350 // Configure SMM Code Access Check feature if available.\r
1351 //\r
1352 ConfigSmmCodeAccessCheck ();\r
1353\r
21c17193
JY
1354 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1355\r
529a5a86
MK
1356 //\r
1357 // Clean SMM ready to lock flag\r
1358 //\r
1359 mSmmReadyToLock = FALSE;\r
1360 }\r
1361}\r
9f419739
JY
1362\r
1363/**\r
1364 Perform the pre tasks.\r
1365\r
1366**/\r
1367VOID\r
1368PerformPreTasks (\r
1369 VOID\r
1370 )\r
1371{\r
0bdc9e75 1372 RestoreSmmConfigurationInS3 ();\r
9f419739 1373}\r