]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg: Refine casting expression result to bigger size
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
... / ...
CommitLineData
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
4Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
79EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
80\r
81//\r
82// SMM stack information\r
83//\r
84UINTN mSmmStackArrayBase;\r
85UINTN mSmmStackArrayEnd;\r
86UINTN mSmmStackSize;\r
87\r
88UINTN mMaxNumberOfCpus = 1;\r
89UINTN mNumberOfCpus = 1;\r
90\r
91//\r
92// SMM ready to lock flag\r
93//\r
94BOOLEAN mSmmReadyToLock = FALSE;\r
95\r
96//\r
97// Global used to cache PCD for SMM Code Access Check enable\r
98//\r
99BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
100\r
101//\r
102// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
103//\r
104UINT64 mAddressEncMask = 0;\r
105\r
106//\r
107// Spin lock used to serialize setting of SMM Code Access Check feature\r
108//\r
109SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
110\r
111/**\r
112 Initialize IDT to setup exception handlers for SMM.\r
113\r
114**/\r
115VOID\r
116InitializeSmmIdt (\r
117 VOID\r
118 )\r
119{\r
120 EFI_STATUS Status;\r
121 BOOLEAN InterruptState;\r
122 IA32_DESCRIPTOR DxeIdtr;\r
123\r
124 //\r
125 // There are 32 (not 255) entries in it since only processor\r
126 // generated exceptions will be handled.\r
127 //\r
128 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
129 //\r
130 // Allocate page aligned IDT, because it might be set as read only.\r
131 //\r
132 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
133 ASSERT (gcSmiIdtr.Base != 0);\r
134 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
135\r
136 //\r
137 // Disable Interrupt and save DXE IDT table\r
138 //\r
139 InterruptState = SaveAndDisableInterrupts ();\r
140 AsmReadIdtr (&DxeIdtr);\r
141 //\r
142 // Load SMM temporary IDT table\r
143 //\r
144 AsmWriteIdtr (&gcSmiIdtr);\r
145 //\r
146 // Setup SMM default exception handlers, SMM IDT table\r
147 // will be updated and saved in gcSmiIdtr\r
148 //\r
149 Status = InitializeCpuExceptionHandlers (NULL);\r
150 ASSERT_EFI_ERROR (Status);\r
151 //\r
152 // Restore DXE IDT table and CPU interrupt\r
153 //\r
154 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
155 SetInterruptState (InterruptState);\r
156}\r
157\r
158/**\r
159 Search module name by input IP address and output it.\r
160\r
161 @param CallerIpAddress Caller instruction pointer.\r
162\r
163**/\r
164VOID\r
165DumpModuleInfoByIp (\r
166 IN UINTN CallerIpAddress\r
167 )\r
168{\r
169 UINTN Pe32Data;\r
170 EFI_IMAGE_DOS_HEADER *DosHdr;\r
171 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
172 VOID *PdbPointer;\r
173 UINT64 DumpIpAddress;\r
174\r
175 //\r
176 // Find Image Base\r
177 //\r
178 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
179 while (Pe32Data != 0) {\r
180 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
181 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
182 //\r
183 // DOS image header is present, so read the PE header after the DOS image header.\r
184 //\r
185 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
186 //\r
187 // Make sure PE header address does not overflow and is less than the initial address.\r
188 //\r
189 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
190 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
191 //\r
192 // It's PE image.\r
193 //\r
194 break;\r
195 }\r
196 }\r
197 }\r
198\r
199 //\r
200 // Not found the image base, check the previous aligned address\r
201 //\r
202 Pe32Data -= SIZE_4KB;\r
203 }\r
204\r
205 DumpIpAddress = CallerIpAddress;\r
206 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
207\r
208 if (Pe32Data != 0) {\r
209 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
210 if (PdbPointer != NULL) {\r
211 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
212 }\r
213 }\r
214}\r
215\r
216/**\r
217 Read information from the CPU save state.\r
218\r
219 @param This EFI_SMM_CPU_PROTOCOL instance\r
220 @param Width The number of bytes to read from the CPU save state.\r
221 @param Register Specifies the CPU register to read form the save state.\r
222 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
223 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
224\r
225 @retval EFI_SUCCESS The register was read from Save State\r
226 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
227 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
228\r
229**/\r
230EFI_STATUS\r
231EFIAPI\r
232SmmReadSaveState (\r
233 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
234 IN UINTN Width,\r
235 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
236 IN UINTN CpuIndex,\r
237 OUT VOID *Buffer\r
238 )\r
239{\r
240 EFI_STATUS Status;\r
241\r
242 //\r
243 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
244 //\r
245 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
246 return EFI_INVALID_PARAMETER;\r
247 }\r
248\r
249 //\r
250 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
251 //\r
252 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
253 //\r
254 // The pseudo-register only supports the 64-bit size specified by Width.\r
255 //\r
256 if (Width != sizeof (UINT64)) {\r
257 return EFI_INVALID_PARAMETER;\r
258 }\r
259 //\r
260 // If the processor is in SMM at the time the SMI occurred,\r
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
262 // Otherwise, EFI_NOT_FOUND is returned.\r
263 //\r
264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
266 return EFI_SUCCESS;\r
267 } else {\r
268 return EFI_NOT_FOUND;\r
269 }\r
270 }\r
271\r
272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
273 return EFI_INVALID_PARAMETER;\r
274 }\r
275\r
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
277 if (Status == EFI_UNSUPPORTED) {\r
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
279 }\r
280 return Status;\r
281}\r
282\r
283/**\r
284 Write data to the CPU save state.\r
285\r
286 @param This EFI_SMM_CPU_PROTOCOL instance\r
287 @param Width The number of bytes to read from the CPU save state.\r
288 @param Register Specifies the CPU register to write to the save state.\r
289 @param CpuIndex Specifies the zero-based index of the CPU save state\r
290 @param Buffer Upon entry, this holds the new CPU register value.\r
291\r
292 @retval EFI_SUCCESS The register was written from Save State\r
293 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
294 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
295\r
296**/\r
297EFI_STATUS\r
298EFIAPI\r
299SmmWriteSaveState (\r
300 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
301 IN UINTN Width,\r
302 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
303 IN UINTN CpuIndex,\r
304 IN CONST VOID *Buffer\r
305 )\r
306{\r
307 EFI_STATUS Status;\r
308\r
309 //\r
310 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
311 //\r
312 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
313 return EFI_INVALID_PARAMETER;\r
314 }\r
315\r
316 //\r
317 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
318 //\r
319 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
320 return EFI_SUCCESS;\r
321 }\r
322\r
323 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
324 return EFI_INVALID_PARAMETER;\r
325 }\r
326\r
327 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
328 if (Status == EFI_UNSUPPORTED) {\r
329 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
330 }\r
331 return Status;\r
332}\r
333\r
334\r
335/**\r
336 C function for SMI handler. To change all processor's SMMBase Register.\r
337\r
338**/\r
339VOID\r
340EFIAPI\r
341SmmInitHandler (\r
342 VOID\r
343 )\r
344{\r
345 UINT32 ApicId;\r
346 UINTN Index;\r
347\r
348 //\r
349 // Update SMM IDT entries' code segment and load IDT\r
350 //\r
351 AsmWriteIdtr (&gcSmiIdtr);\r
352 ApicId = GetApicId ();\r
353\r
354 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
355\r
356 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
357 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
358 //\r
359 // Initialize SMM specific features on the currently executing CPU\r
360 //\r
361 SmmCpuFeaturesInitializeProcessor (\r
362 Index,\r
363 mIsBsp,\r
364 gSmmCpuPrivate->ProcessorInfo,\r
365 &mCpuHotPlugData\r
366 );\r
367\r
368 if (!mSmmS3Flag) {\r
369 //\r
370 // Check XD and BTS features on each processor on normal boot\r
371 //\r
372 CheckFeatureSupported ();\r
373 }\r
374\r
375 if (mIsBsp) {\r
376 //\r
377 // BSP rebase is already done above.\r
378 // Initialize private data during S3 resume\r
379 //\r
380 InitializeMpSyncData ();\r
381 }\r
382\r
383 //\r
384 // Hook return after RSM to set SMM re-based flag\r
385 //\r
386 SemaphoreHook (Index, &mRebased[Index]);\r
387\r
388 return;\r
389 }\r
390 }\r
391 ASSERT (FALSE);\r
392}\r
393\r
394/**\r
395 Relocate SmmBases for each processor.\r
396\r
397 Execute on first boot and all S3 resumes\r
398\r
399**/\r
400VOID\r
401EFIAPI\r
402SmmRelocateBases (\r
403 VOID\r
404 )\r
405{\r
406 UINT8 BakBuf[BACK_BUF_SIZE];\r
407 SMRAM_SAVE_STATE_MAP BakBuf2;\r
408 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
409 UINT8 *U8Ptr;\r
410 UINT32 ApicId;\r
411 UINTN Index;\r
412 UINTN BspIndex;\r
413\r
414 //\r
415 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
416 //\r
417 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
418\r
419 //\r
420 // Patch ASM code template with current CR0, CR3, and CR4 values\r
421 //\r
422 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
423 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
424 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
425\r
426 //\r
427 // Patch GDTR for SMM base relocation\r
428 //\r
429 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
430 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
431\r
432 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
433 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
434\r
435 //\r
436 // Backup original contents at address 0x38000\r
437 //\r
438 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
439 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
440\r
441 //\r
442 // Load image for relocation\r
443 //\r
444 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
445\r
446 //\r
447 // Retrieve the local APIC ID of current processor\r
448 //\r
449 ApicId = GetApicId ();\r
450\r
451 //\r
452 // Relocate SM bases for all APs\r
453 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
454 //\r
455 mIsBsp = FALSE;\r
456 BspIndex = (UINTN)-1;\r
457 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
458 mRebased[Index] = FALSE;\r
459 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
460 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
461 //\r
462 // Wait for this AP to finish its 1st SMI\r
463 //\r
464 while (!mRebased[Index]);\r
465 } else {\r
466 //\r
467 // BSP will be Relocated later\r
468 //\r
469 BspIndex = Index;\r
470 }\r
471 }\r
472\r
473 //\r
474 // Relocate BSP's SMM base\r
475 //\r
476 ASSERT (BspIndex != (UINTN)-1);\r
477 mIsBsp = TRUE;\r
478 SendSmiIpi (ApicId);\r
479 //\r
480 // Wait for the BSP to finish its 1st SMI\r
481 //\r
482 while (!mRebased[BspIndex]);\r
483\r
484 //\r
485 // Restore contents at address 0x38000\r
486 //\r
487 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
488 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
489}\r
490\r
491/**\r
492 SMM Ready To Lock event notification handler.\r
493\r
494 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
495 perform additional lock actions that must be performed from SMM on the next SMI.\r
496\r
497 @param[in] Protocol Points to the protocol's unique identifier.\r
498 @param[in] Interface Points to the interface instance.\r
499 @param[in] Handle The handle on which the interface was installed.\r
500\r
501 @retval EFI_SUCCESS Notification handler runs successfully.\r
502 **/\r
503EFI_STATUS\r
504EFIAPI\r
505SmmReadyToLockEventNotify (\r
506 IN CONST EFI_GUID *Protocol,\r
507 IN VOID *Interface,\r
508 IN EFI_HANDLE Handle\r
509 )\r
510{\r
511 GetAcpiCpuData ();\r
512\r
513 //\r
514 // Cache a copy of UEFI memory map before we start profiling feature.\r
515 //\r
516 GetUefiMemoryMap ();\r
517\r
518 //\r
519 // Set SMM ready to lock flag and return\r
520 //\r
521 mSmmReadyToLock = TRUE;\r
522 return EFI_SUCCESS;\r
523}\r
524\r
525/**\r
526 The module Entry Point of the CPU SMM driver.\r
527\r
528 @param ImageHandle The firmware allocated handle for the EFI image.\r
529 @param SystemTable A pointer to the EFI System Table.\r
530\r
531 @retval EFI_SUCCESS The entry point is executed successfully.\r
532 @retval Other Some error occurs when executing this entry point.\r
533\r
534**/\r
535EFI_STATUS\r
536EFIAPI\r
537PiCpuSmmEntry (\r
538 IN EFI_HANDLE ImageHandle,\r
539 IN EFI_SYSTEM_TABLE *SystemTable\r
540 )\r
541{\r
542 EFI_STATUS Status;\r
543 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
544 UINTN NumberOfEnabledProcessors;\r
545 UINTN Index;\r
546 VOID *Buffer;\r
547 UINTN BufferPages;\r
548 UINTN TileCodeSize;\r
549 UINTN TileDataSize;\r
550 UINTN TileSize;\r
551 UINT8 *Stacks;\r
552 VOID *Registration;\r
553 UINT32 RegEax;\r
554 UINT32 RegEdx;\r
555 UINTN FamilyId;\r
556 UINTN ModelId;\r
557 UINT32 Cr3;\r
558\r
559 //\r
560 // Initialize Debug Agent to support source level debug in SMM code\r
561 //\r
562 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
563\r
564 //\r
565 // Report the start of CPU SMM initialization.\r
566 //\r
567 REPORT_STATUS_CODE (\r
568 EFI_PROGRESS_CODE,\r
569 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
570 );\r
571\r
572 //\r
573 // Fix segment address of the long-mode-switch jump\r
574 //\r
575 if (sizeof (UINTN) == sizeof (UINT64)) {\r
576 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
577 }\r
578\r
579 //\r
580 // Find out SMRR Base and SMRR Size\r
581 //\r
582 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
583\r
584 //\r
585 // Get MP Services Protocol\r
586 //\r
587 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
588 ASSERT_EFI_ERROR (Status);\r
589\r
590 //\r
591 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
592 //\r
593 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
594 ASSERT_EFI_ERROR (Status);\r
595 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
596\r
597 //\r
598 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
599 // A constant BSP index makes no sense because it may be hot removed.\r
600 //\r
601 DEBUG_CODE (\r
602 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
603\r
604 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
605 }\r
606 );\r
607\r
608 //\r
609 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
610 //\r
611 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
612 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
613\r
614 //\r
615 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
616 // Make sure AddressEncMask is contained to smallest supported address field.\r
617 //\r
618 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
619 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
620\r
621 //\r
622 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
623 //\r
624 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
625 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
626 } else {\r
627 mMaxNumberOfCpus = mNumberOfCpus;\r
628 }\r
629 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
630\r
631 //\r
632 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
633 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
634 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
635 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
636 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
637 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
638 // by adding the // CPU save state size, any extra CPU specific context, and\r
639 // the size of code that must be placed at the SMI entry point to transfer\r
640 // control to a C function in the native SMM execution mode. This size is\r
641 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
642 // The total amount of memory required is the maximum number of CPUs that\r
643 // platform supports times the tile size. The picture below shows the tiling,\r
644 // where m is the number of tiles that fit in 32KB.\r
645 //\r
646 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
647 // | CPU m+1 Save State |\r
648 // +-----------------------------+\r
649 // | CPU m+1 Extra Data |\r
650 // +-----------------------------+\r
651 // | Padding |\r
652 // +-----------------------------+\r
653 // | CPU 2m SMI Entry |\r
654 // +#############################+ <-- Base of allocated buffer + 64 KB\r
655 // | CPU m-1 Save State |\r
656 // +-----------------------------+\r
657 // | CPU m-1 Extra Data |\r
658 // +-----------------------------+\r
659 // | Padding |\r
660 // +-----------------------------+\r
661 // | CPU 2m-1 SMI Entry |\r
662 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
663 // | . . . . . . . . . . . . |\r
664 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
665 // | CPU 2 Save State |\r
666 // +-----------------------------+\r
667 // | CPU 2 Extra Data |\r
668 // +-----------------------------+\r
669 // | Padding |\r
670 // +-----------------------------+\r
671 // | CPU m+1 SMI Entry |\r
672 // +=============================+ <-- Base of allocated buffer + 32 KB\r
673 // | CPU 1 Save State |\r
674 // +-----------------------------+\r
675 // | CPU 1 Extra Data |\r
676 // +-----------------------------+\r
677 // | Padding |\r
678 // +-----------------------------+\r
679 // | CPU m SMI Entry |\r
680 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
681 // | CPU 0 Save State |\r
682 // +-----------------------------+\r
683 // | CPU 0 Extra Data |\r
684 // +-----------------------------+\r
685 // | Padding |\r
686 // +-----------------------------+\r
687 // | CPU m-1 SMI Entry |\r
688 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
689 // | . . . . . . . . . . . . |\r
690 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
691 // | Padding |\r
692 // +-----------------------------+\r
693 // | CPU 1 SMI Entry |\r
694 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
695 // | Padding |\r
696 // +-----------------------------+\r
697 // | CPU 0 SMI Entry |\r
698 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
699 //\r
700\r
701 //\r
702 // Retrieve CPU Family\r
703 //\r
704 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
705 FamilyId = (RegEax >> 8) & 0xf;\r
706 ModelId = (RegEax >> 4) & 0xf;\r
707 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
708 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
709 }\r
710\r
711 RegEdx = 0;\r
712 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
713 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
714 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
715 }\r
716 //\r
717 // Determine the mode of the CPU at the time an SMI occurs\r
718 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
719 // Volume 3C, Section 34.4.1.1\r
720 //\r
721 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
722 if ((RegEdx & BIT29) != 0) {\r
723 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
724 }\r
725 if (FamilyId == 0x06) {\r
726 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
727 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
728 }\r
729 }\r
730\r
731 //\r
732 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
733 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
734 // This size is rounded up to nearest power of 2.\r
735 //\r
736 TileCodeSize = GetSmiHandlerSize ();\r
737 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
738 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
739 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
740 TileSize = TileDataSize + TileCodeSize - 1;\r
741 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
742 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
743\r
744 //\r
745 // If the TileSize is larger than space available for the SMI Handler of\r
746 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
747 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
748 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
749 // context must be reduced.\r
750 //\r
751 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
752\r
753 //\r
754 // Allocate buffer for all of the tiles.\r
755 //\r
756 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
757 // Volume 3C, Section 34.11 SMBASE Relocation\r
758 // For Pentium and Intel486 processors, the SMBASE values must be\r
759 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
760 // state during the execution of a RSM instruction.\r
761 //\r
762 // Intel486 processors: FamilyId is 4\r
763 // Pentium processors : FamilyId is 5\r
764 //\r
765 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
766 if ((FamilyId == 4) || (FamilyId == 5)) {\r
767 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
768 } else {\r
769 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
770 }\r
771 ASSERT (Buffer != NULL);\r
772 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
773\r
774 //\r
775 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
776 //\r
777 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
778 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
779\r
780 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
781 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
782\r
783 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
784 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
785\r
786 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
787 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
788\r
789 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
790 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
791\r
792 //\r
793 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
794 //\r
795 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
796 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
797 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
798 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
799 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
800\r
801 //\r
802 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
803 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
804 // size for each CPU in the platform\r
805 //\r
806 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
807 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
808 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
809 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
810 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
811\r
812 if (Index < mNumberOfCpus) {\r
813 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
814 ASSERT_EFI_ERROR (Status);\r
815 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
816\r
817 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
818 Index,\r
819 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
820 mCpuHotPlugData.SmBase[Index],\r
821 gSmmCpuPrivate->CpuSaveState[Index],\r
822 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
823 ));\r
824 } else {\r
825 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
826 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
827 }\r
828 }\r
829\r
830 //\r
831 // Allocate SMI stacks for all processors.\r
832 //\r
833 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
834 //\r
835 // 2 more pages is allocated for each processor.\r
836 // one is guard page and the other is known good stack.\r
837 //\r
838 // +-------------------------------------------+-----+-------------------------------------------+\r
839 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
840 // +-------------------------------------------+-----+-------------------------------------------+\r
841 // | | | |\r
842 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
843 //\r
844 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
845 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
846 ASSERT (Stacks != NULL);\r
847 mSmmStackArrayBase = (UINTN)Stacks;\r
848 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
849 } else {\r
850 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
851 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
852 ASSERT (Stacks != NULL);\r
853 }\r
854\r
855 //\r
856 // Set SMI stack for SMM base relocation\r
857 //\r
858 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
859\r
860 //\r
861 // Initialize IDT\r
862 //\r
863 InitializeSmmIdt ();\r
864\r
865 //\r
866 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
867 //\r
868 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
869 ASSERT (mRebased != NULL);\r
870 SmmRelocateBases ();\r
871\r
872 //\r
873 // Call hook for BSP to perform extra actions in normal mode after all\r
874 // SMM base addresses have been relocated on all CPUs\r
875 //\r
876 SmmCpuFeaturesSmmRelocationComplete ();\r
877\r
878 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
879\r
880 //\r
881 // SMM Time initialization\r
882 //\r
883 InitializeSmmTimer ();\r
884\r
885 //\r
886 // Initialize MP globals\r
887 //\r
888 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
889\r
890 //\r
891 // Fill in SMM Reserved Regions\r
892 //\r
893 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
894 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
895\r
896 //\r
897 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
898 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
899 // to an SMRAM address will be present in the handle database\r
900 //\r
901 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
902 &gSmmCpuPrivate->SmmCpuHandle,\r
903 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
904 NULL\r
905 );\r
906 ASSERT_EFI_ERROR (Status);\r
907\r
908 //\r
909 // Install the SMM CPU Protocol into SMM protocol database\r
910 //\r
911 Status = gSmst->SmmInstallProtocolInterface (\r
912 &mSmmCpuHandle,\r
913 &gEfiSmmCpuProtocolGuid,\r
914 EFI_NATIVE_INTERFACE,\r
915 &mSmmCpu\r
916 );\r
917 ASSERT_EFI_ERROR (Status);\r
918\r
919 //\r
920 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
921 //\r
922 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
923 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
924 ASSERT_EFI_ERROR (Status);\r
925 }\r
926\r
927 //\r
928 // Initialize SMM CPU Services Support\r
929 //\r
930 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
931 ASSERT_EFI_ERROR (Status);\r
932\r
933 //\r
934 // register SMM Ready To Lock Protocol notification\r
935 //\r
936 Status = gSmst->SmmRegisterProtocolNotify (\r
937 &gEfiSmmReadyToLockProtocolGuid,\r
938 SmmReadyToLockEventNotify,\r
939 &Registration\r
940 );\r
941 ASSERT_EFI_ERROR (Status);\r
942\r
943 //\r
944 // Initialize SMM Profile feature\r
945 //\r
946 InitSmmProfile (Cr3);\r
947\r
948 GetAcpiS3EnableFlag ();\r
949 InitSmmS3ResumeState (Cr3);\r
950\r
951 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
952\r
953 return EFI_SUCCESS;\r
954}\r
955\r
956/**\r
957\r
958 Find out SMRAM information including SMRR base and SMRR size.\r
959\r
960 @param SmrrBase SMRR base\r
961 @param SmrrSize SMRR size\r
962\r
963**/\r
964VOID\r
965FindSmramInfo (\r
966 OUT UINT32 *SmrrBase,\r
967 OUT UINT32 *SmrrSize\r
968 )\r
969{\r
970 EFI_STATUS Status;\r
971 UINTN Size;\r
972 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
973 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
974 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
975 UINTN SmramRangeCount;\r
976 UINTN Index;\r
977 UINT64 MaxSize;\r
978 BOOLEAN Found;\r
979\r
980 //\r
981 // Get SMM Access Protocol\r
982 //\r
983 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
984 ASSERT_EFI_ERROR (Status);\r
985\r
986 //\r
987 // Get SMRAM information\r
988 //\r
989 Size = 0;\r
990 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
991 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
992\r
993 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
994 ASSERT (SmramRanges != NULL);\r
995\r
996 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
997 ASSERT_EFI_ERROR (Status);\r
998\r
999 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
1000\r
1001 //\r
1002 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1003 //\r
1004 CurrentSmramRange = NULL;\r
1005 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
1006 //\r
1007 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1008 //\r
1009 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
1010 continue;\r
1011 }\r
1012\r
1013 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1014 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1015 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1016 MaxSize = SmramRanges[Index].PhysicalSize;\r
1017 CurrentSmramRange = &SmramRanges[Index];\r
1018 }\r
1019 }\r
1020 }\r
1021 }\r
1022\r
1023 ASSERT (CurrentSmramRange != NULL);\r
1024\r
1025 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1026 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1027\r
1028 do {\r
1029 Found = FALSE;\r
1030 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1031 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1032 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1033 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1034 Found = TRUE;\r
1035 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1036 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1037 Found = TRUE;\r
1038 }\r
1039 }\r
1040 } while (Found);\r
1041\r
1042 FreePool (SmramRanges);\r
1043 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1044}\r
1045\r
1046/**\r
1047Configure SMM Code Access Check feature on an AP.\r
1048SMM Feature Control MSR will be locked after configuration.\r
1049\r
1050@param[in,out] Buffer Pointer to private data buffer.\r
1051**/\r
1052VOID\r
1053EFIAPI\r
1054ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1055 IN OUT VOID *Buffer\r
1056 )\r
1057{\r
1058 UINTN CpuIndex;\r
1059 UINT64 SmmFeatureControlMsr;\r
1060 UINT64 NewSmmFeatureControlMsr;\r
1061\r
1062 //\r
1063 // Retrieve the CPU Index from the context passed in\r
1064 //\r
1065 CpuIndex = *(UINTN *)Buffer;\r
1066\r
1067 //\r
1068 // Get the current SMM Feature Control MSR value\r
1069 //\r
1070 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1071\r
1072 //\r
1073 // Compute the new SMM Feature Control MSR value\r
1074 //\r
1075 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1076 if (mSmmCodeAccessCheckEnable) {\r
1077 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
1078 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1079 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1080 }\r
1081 }\r
1082\r
1083 //\r
1084 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1085 //\r
1086 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1087 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1088 }\r
1089\r
1090 //\r
1091 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1092 //\r
1093 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
1094}\r
1095\r
1096/**\r
1097Configure SMM Code Access Check feature for all processors.\r
1098SMM Feature Control MSR will be locked after configuration.\r
1099**/\r
1100VOID\r
1101ConfigSmmCodeAccessCheck (\r
1102 VOID\r
1103 )\r
1104{\r
1105 UINTN Index;\r
1106 EFI_STATUS Status;\r
1107\r
1108 //\r
1109 // Check to see if the Feature Control MSR is supported on this CPU\r
1110 //\r
1111 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
1112 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1113 mSmmCodeAccessCheckEnable = FALSE;\r
1114 return;\r
1115 }\r
1116\r
1117 //\r
1118 // Check to see if the CPU supports the SMM Code Access Check feature\r
1119 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1120 //\r
1121 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1122 mSmmCodeAccessCheckEnable = FALSE;\r
1123 return;\r
1124 }\r
1125\r
1126 //\r
1127 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1128 //\r
1129 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
1130\r
1131 //\r
1132 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1133 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1134 //\r
1135 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
1136\r
1137 //\r
1138 // Enable SMM Code Access Check feature on the BSP.\r
1139 //\r
1140 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1141\r
1142 //\r
1143 // Enable SMM Code Access Check feature for the APs.\r
1144 //\r
1145 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
1146 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1147\r
1148 //\r
1149 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1150 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1151 //\r
1152 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
1153\r
1154 //\r
1155 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1156 //\r
1157 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1158 ASSERT_EFI_ERROR (Status);\r
1159\r
1160 //\r
1161 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1162 //\r
1163 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
1164 CpuPause ();\r
1165 }\r
1166\r
1167 //\r
1168 // Release the Config SMM Code Access Check spin lock.\r
1169 //\r
1170 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
1171 }\r
1172 }\r
1173}\r
1174\r
1175/**\r
1176 This API provides a way to allocate memory for page table.\r
1177\r
1178 This API can be called more once to allocate memory for page tables.\r
1179\r
1180 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1181 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1182 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1183 returned.\r
1184\r
1185 @param Pages The number of 4 KB pages to allocate.\r
1186\r
1187 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1188\r
1189**/\r
1190VOID *\r
1191AllocatePageTableMemory (\r
1192 IN UINTN Pages\r
1193 )\r
1194{\r
1195 VOID *Buffer;\r
1196\r
1197 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1198 if (Buffer != NULL) {\r
1199 return Buffer;\r
1200 }\r
1201 return AllocatePages (Pages);\r
1202}\r
1203\r
1204/**\r
1205 Allocate pages for code.\r
1206\r
1207 @param[in] Pages Number of pages to be allocated.\r
1208\r
1209 @return Allocated memory.\r
1210**/\r
1211VOID *\r
1212AllocateCodePages (\r
1213 IN UINTN Pages\r
1214 )\r
1215{\r
1216 EFI_STATUS Status;\r
1217 EFI_PHYSICAL_ADDRESS Memory;\r
1218\r
1219 if (Pages == 0) {\r
1220 return NULL;\r
1221 }\r
1222\r
1223 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1224 if (EFI_ERROR (Status)) {\r
1225 return NULL;\r
1226 }\r
1227 return (VOID *) (UINTN) Memory;\r
1228}\r
1229\r
1230/**\r
1231 Allocate aligned pages for code.\r
1232\r
1233 @param[in] Pages Number of pages to be allocated.\r
1234 @param[in] Alignment The requested alignment of the allocation.\r
1235 Must be a power of two.\r
1236 If Alignment is zero, then byte alignment is used.\r
1237\r
1238 @return Allocated memory.\r
1239**/\r
1240VOID *\r
1241AllocateAlignedCodePages (\r
1242 IN UINTN Pages,\r
1243 IN UINTN Alignment\r
1244 )\r
1245{\r
1246 EFI_STATUS Status;\r
1247 EFI_PHYSICAL_ADDRESS Memory;\r
1248 UINTN AlignedMemory;\r
1249 UINTN AlignmentMask;\r
1250 UINTN UnalignedPages;\r
1251 UINTN RealPages;\r
1252\r
1253 //\r
1254 // Alignment must be a power of two or zero.\r
1255 //\r
1256 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1257\r
1258 if (Pages == 0) {\r
1259 return NULL;\r
1260 }\r
1261 if (Alignment > EFI_PAGE_SIZE) {\r
1262 //\r
1263 // Calculate the total number of pages since alignment is larger than page size.\r
1264 //\r
1265 AlignmentMask = Alignment - 1;\r
1266 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1267 //\r
1268 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1269 //\r
1270 ASSERT (RealPages > Pages);\r
1271\r
1272 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1273 if (EFI_ERROR (Status)) {\r
1274 return NULL;\r
1275 }\r
1276 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1277 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1278 if (UnalignedPages > 0) {\r
1279 //\r
1280 // Free first unaligned page(s).\r
1281 //\r
1282 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1283 ASSERT_EFI_ERROR (Status);\r
1284 }\r
1285 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
1286 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1287 if (UnalignedPages > 0) {\r
1288 //\r
1289 // Free last unaligned page(s).\r
1290 //\r
1291 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1292 ASSERT_EFI_ERROR (Status);\r
1293 }\r
1294 } else {\r
1295 //\r
1296 // Do not over-allocate pages in this case.\r
1297 //\r
1298 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1299 if (EFI_ERROR (Status)) {\r
1300 return NULL;\r
1301 }\r
1302 AlignedMemory = (UINTN) Memory;\r
1303 }\r
1304 return (VOID *) AlignedMemory;\r
1305}\r
1306\r
1307/**\r
1308 Perform the remaining tasks.\r
1309\r
1310**/\r
1311VOID\r
1312PerformRemainingTasks (\r
1313 VOID\r
1314 )\r
1315{\r
1316 if (mSmmReadyToLock) {\r
1317 //\r
1318 // Start SMM Profile feature\r
1319 //\r
1320 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1321 SmmProfileStart ();\r
1322 }\r
1323 //\r
1324 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1325 //\r
1326 InitPaging ();\r
1327\r
1328 //\r
1329 // Mark critical region to be read-only in page table\r
1330 //\r
1331 SetMemMapAttributes ();\r
1332\r
1333 //\r
1334 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1335 //\r
1336 SetUefiMemMapAttributes ();\r
1337\r
1338 //\r
1339 // Set page table itself to be read-only\r
1340 //\r
1341 SetPageTableAttributes ();\r
1342\r
1343 //\r
1344 // Configure SMM Code Access Check feature if available.\r
1345 //\r
1346 ConfigSmmCodeAccessCheck ();\r
1347\r
1348 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1349\r
1350 //\r
1351 // Clean SMM ready to lock flag\r
1352 //\r
1353 mSmmReadyToLock = FALSE;\r
1354 }\r
1355}\r
1356\r
1357/**\r
1358 Perform the pre tasks.\r
1359\r
1360**/\r
1361VOID\r
1362PerformPreTasks (\r
1363 VOID\r
1364 )\r
1365{\r
1366 RestoreSmmConfigurationInS3 ();\r
1367}\r