]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/CpuExceptionHandlerLib: Add DumpCpuContext() implementation
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
8491e302 4Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
21// along its supporting fields.\r
22//\r
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
25 NULL, // SmmCpuHandle\r
26 NULL, // Pointer to ProcessorInfo array\r
27 NULL, // Pointer to Operation array\r
28 NULL, // Pointer to CpuSaveStateSize array\r
29 NULL, // Pointer to CpuSaveState array\r
30 { {0} }, // SmmReservedSmramRegion\r
31 {\r
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
34 0, // SmmCoreEntryContext.NumberOfCpus\r
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
36 NULL // SmmCoreEntryContext.CpuSaveState\r
37 },\r
38 NULL, // SmmCoreEntry\r
39 {\r
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
42 },\r
43};\r
44\r
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
47 0, // Array Length of SmBase and APIC ID\r
48 NULL, // Pointer to APIC ID array\r
49 NULL, // Pointer to SMBASE array\r
50 0, // Reserved\r
51 0, // SmrrBase\r
52 0 // SmrrSize\r
53};\r
54\r
55//\r
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
57//\r
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
59\r
60//\r
61// SMM Relocation variables\r
62//\r
63volatile BOOLEAN *mRebased;\r
64volatile BOOLEAN mIsBsp;\r
65\r
66///\r
67/// Handle for the SMM CPU Protocol\r
68///\r
69EFI_HANDLE mSmmCpuHandle = NULL;\r
70\r
71///\r
72/// SMM CPU Protocol instance\r
73///\r
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
75 SmmReadSaveState,\r
76 SmmWriteSaveState\r
77};\r
78\r
79EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
80\r
529a5a86
MK
81//\r
82// SMM stack information\r
83//\r
84UINTN mSmmStackArrayBase;\r
85UINTN mSmmStackArrayEnd;\r
86UINTN mSmmStackSize;\r
87\r
529a5a86
MK
88UINTN mMaxNumberOfCpus = 1;\r
89UINTN mNumberOfCpus = 1;\r
90\r
91//\r
92// SMM ready to lock flag\r
93//\r
94BOOLEAN mSmmReadyToLock = FALSE;\r
95\r
96//\r
97// Global used to cache PCD for SMM Code Access Check enable\r
98//\r
99BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
100\r
241f9149
LD
101//\r
102// Global copy of the PcdPteMemoryEncryptionAddressOrMask\r
103//\r
104UINT64 mAddressEncMask = 0;\r
105\r
529a5a86
MK
106//\r
107// Spin lock used to serialize setting of SMM Code Access Check feature\r
108//\r
fe3a75bc 109SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86 110\r
7ed6f781
JF
111//\r
112// Saved SMM ranges information\r
113//\r
114EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r
115UINTN mSmmCpuSmramRangeCount;\r
116\r
529a5a86
MK
117/**\r
118 Initialize IDT to setup exception handlers for SMM.\r
119\r
120**/\r
121VOID\r
122InitializeSmmIdt (\r
123 VOID\r
124 )\r
125{\r
126 EFI_STATUS Status;\r
127 BOOLEAN InterruptState;\r
128 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
129\r
130 //\r
131 // There are 32 (not 255) entries in it since only processor\r
132 // generated exceptions will be handled.\r
133 //\r
134 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
135 //\r
136 // Allocate page aligned IDT, because it might be set as read only.\r
137 //\r
138 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
139 ASSERT (gcSmiIdtr.Base != 0);\r
140 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
141\r
529a5a86
MK
142 //\r
143 // Disable Interrupt and save DXE IDT table\r
144 //\r
145 InterruptState = SaveAndDisableInterrupts ();\r
146 AsmReadIdtr (&DxeIdtr);\r
147 //\r
148 // Load SMM temporary IDT table\r
149 //\r
150 AsmWriteIdtr (&gcSmiIdtr);\r
151 //\r
152 // Setup SMM default exception handlers, SMM IDT table\r
153 // will be updated and saved in gcSmiIdtr\r
154 //\r
155 Status = InitializeCpuExceptionHandlers (NULL);\r
156 ASSERT_EFI_ERROR (Status);\r
157 //\r
158 // Restore DXE IDT table and CPU interrupt\r
159 //\r
160 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
161 SetInterruptState (InterruptState);\r
162}\r
163\r
164/**\r
165 Search module name by input IP address and output it.\r
166\r
167 @param CallerIpAddress Caller instruction pointer.\r
168\r
169**/\r
170VOID\r
171DumpModuleInfoByIp (\r
172 IN UINTN CallerIpAddress\r
173 )\r
174{\r
175 UINTN Pe32Data;\r
176 EFI_IMAGE_DOS_HEADER *DosHdr;\r
177 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
178 VOID *PdbPointer;\r
179 UINT64 DumpIpAddress;\r
180\r
181 //\r
182 // Find Image Base\r
183 //\r
184 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
185 while (Pe32Data != 0) {\r
186 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
187 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
188 //\r
189 // DOS image header is present, so read the PE header after the DOS image header.\r
190 //\r
191 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
192 //\r
193 // Make sure PE header address does not overflow and is less than the initial address.\r
194 //\r
195 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
196 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
197 //\r
198 // It's PE image.\r
199 //\r
200 break;\r
201 }\r
202 }\r
203 }\r
204\r
205 //\r
206 // Not found the image base, check the previous aligned address\r
207 //\r
208 Pe32Data -= SIZE_4KB;\r
209 }\r
210\r
211 DumpIpAddress = CallerIpAddress;\r
212 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
213\r
214 if (Pe32Data != 0) {\r
215 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
216 if (PdbPointer != NULL) {\r
217 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
218 }\r
219 }\r
220}\r
221\r
222/**\r
223 Read information from the CPU save state.\r
224\r
225 @param This EFI_SMM_CPU_PROTOCOL instance\r
226 @param Width The number of bytes to read from the CPU save state.\r
227 @param Register Specifies the CPU register to read form the save state.\r
228 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
229 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
230\r
231 @retval EFI_SUCCESS The register was read from Save State\r
232 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
233 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
234\r
235**/\r
236EFI_STATUS\r
237EFIAPI\r
238SmmReadSaveState (\r
239 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
240 IN UINTN Width,\r
241 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
242 IN UINTN CpuIndex,\r
243 OUT VOID *Buffer\r
244 )\r
245{\r
246 EFI_STATUS Status;\r
247\r
248 //\r
249 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
250 //\r
251 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
252 return EFI_INVALID_PARAMETER;\r
253 }\r
254\r
255 //\r
256 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
257 //\r
258 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
259 //\r
260 // The pseudo-register only supports the 64-bit size specified by Width.\r
261 //\r
262 if (Width != sizeof (UINT64)) {\r
263 return EFI_INVALID_PARAMETER;\r
264 }\r
265 //\r
266 // If the processor is in SMM at the time the SMI occurred,\r
267 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
268 // Otherwise, EFI_NOT_FOUND is returned.\r
269 //\r
ed3d5ecb 270 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
271 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
272 return EFI_SUCCESS;\r
273 } else {\r
274 return EFI_NOT_FOUND;\r
275 }\r
276 }\r
277\r
ed3d5ecb 278 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
279 return EFI_INVALID_PARAMETER;\r
280 }\r
281\r
282 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
283 if (Status == EFI_UNSUPPORTED) {\r
284 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
285 }\r
286 return Status;\r
287}\r
288\r
289/**\r
290 Write data to the CPU save state.\r
291\r
292 @param This EFI_SMM_CPU_PROTOCOL instance\r
293 @param Width The number of bytes to read from the CPU save state.\r
294 @param Register Specifies the CPU register to write to the save state.\r
295 @param CpuIndex Specifies the zero-based index of the CPU save state\r
296 @param Buffer Upon entry, this holds the new CPU register value.\r
297\r
298 @retval EFI_SUCCESS The register was written from Save State\r
299 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
300 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
301\r
302**/\r
303EFI_STATUS\r
304EFIAPI\r
305SmmWriteSaveState (\r
306 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
307 IN UINTN Width,\r
308 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
309 IN UINTN CpuIndex,\r
310 IN CONST VOID *Buffer\r
311 )\r
312{\r
313 EFI_STATUS Status;\r
314\r
315 //\r
316 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
317 //\r
318 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
319 return EFI_INVALID_PARAMETER;\r
320 }\r
321\r
322 //\r
323 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
324 //\r
325 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
326 return EFI_SUCCESS;\r
327 }\r
328\r
329 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
330 return EFI_INVALID_PARAMETER;\r
331 }\r
332\r
333 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
334 if (Status == EFI_UNSUPPORTED) {\r
335 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
336 }\r
337 return Status;\r
338}\r
339\r
340\r
341/**\r
342 C function for SMI handler. To change all processor's SMMBase Register.\r
343\r
344**/\r
345VOID\r
346EFIAPI\r
347SmmInitHandler (\r
348 VOID\r
349 )\r
350{\r
351 UINT32 ApicId;\r
352 UINTN Index;\r
353\r
354 //\r
355 // Update SMM IDT entries' code segment and load IDT\r
356 //\r
357 AsmWriteIdtr (&gcSmiIdtr);\r
358 ApicId = GetApicId ();\r
359\r
bb767506 360 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
361\r
362 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
363 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
364 //\r
365 // Initialize SMM specific features on the currently executing CPU\r
366 //\r
367 SmmCpuFeaturesInitializeProcessor (\r
368 Index,\r
369 mIsBsp,\r
370 gSmmCpuPrivate->ProcessorInfo,\r
371 &mCpuHotPlugData\r
372 );\r
373\r
a46a4c90
JF
374 if (!mSmmS3Flag) {\r
375 //\r
376 // Check XD and BTS features on each processor on normal boot\r
377 //\r
51773d49 378 CheckFeatureSupported ();\r
a46a4c90
JF
379 }\r
380\r
529a5a86
MK
381 if (mIsBsp) {\r
382 //\r
383 // BSP rebase is already done above.\r
384 // Initialize private data during S3 resume\r
385 //\r
386 InitializeMpSyncData ();\r
387 }\r
388\r
389 //\r
390 // Hook return after RSM to set SMM re-based flag\r
391 //\r
392 SemaphoreHook (Index, &mRebased[Index]);\r
393\r
394 return;\r
395 }\r
396 }\r
397 ASSERT (FALSE);\r
398}\r
399\r
400/**\r
401 Relocate SmmBases for each processor.\r
402\r
403 Execute on first boot and all S3 resumes\r
404\r
405**/\r
406VOID\r
407EFIAPI\r
408SmmRelocateBases (\r
409 VOID\r
410 )\r
411{\r
412 UINT8 BakBuf[BACK_BUF_SIZE];\r
413 SMRAM_SAVE_STATE_MAP BakBuf2;\r
414 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
415 UINT8 *U8Ptr;\r
416 UINT32 ApicId;\r
417 UINTN Index;\r
418 UINTN BspIndex;\r
419\r
420 //\r
421 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
422 //\r
423 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
424\r
425 //\r
426 // Patch ASM code template with current CR0, CR3, and CR4 values\r
427 //\r
428 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
429 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
430 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
431\r
432 //\r
433 // Patch GDTR for SMM base relocation\r
434 //\r
435 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
436 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
437\r
438 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
439 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
440\r
441 //\r
442 // Backup original contents at address 0x38000\r
443 //\r
444 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
445 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
446\r
447 //\r
448 // Load image for relocation\r
449 //\r
450 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
451\r
452 //\r
453 // Retrieve the local APIC ID of current processor\r
454 //\r
455 ApicId = GetApicId ();\r
456\r
457 //\r
458 // Relocate SM bases for all APs\r
459 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
460 //\r
461 mIsBsp = FALSE;\r
462 BspIndex = (UINTN)-1;\r
463 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
464 mRebased[Index] = FALSE;\r
465 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
466 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
467 //\r
468 // Wait for this AP to finish its 1st SMI\r
469 //\r
470 while (!mRebased[Index]);\r
471 } else {\r
472 //\r
473 // BSP will be Relocated later\r
474 //\r
475 BspIndex = Index;\r
476 }\r
477 }\r
478\r
479 //\r
480 // Relocate BSP's SMM base\r
481 //\r
482 ASSERT (BspIndex != (UINTN)-1);\r
483 mIsBsp = TRUE;\r
484 SendSmiIpi (ApicId);\r
485 //\r
486 // Wait for the BSP to finish its 1st SMI\r
487 //\r
488 while (!mRebased[BspIndex]);\r
489\r
490 //\r
491 // Restore contents at address 0x38000\r
492 //\r
493 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
494 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
495}\r
496\r
529a5a86
MK
497/**\r
498 SMM Ready To Lock event notification handler.\r
499\r
500 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
501 perform additional lock actions that must be performed from SMM on the next SMI.\r
502\r
503 @param[in] Protocol Points to the protocol's unique identifier.\r
504 @param[in] Interface Points to the interface instance.\r
505 @param[in] Handle The handle on which the interface was installed.\r
506\r
507 @retval EFI_SUCCESS Notification handler runs successfully.\r
508 **/\r
509EFI_STATUS\r
510EFIAPI\r
511SmmReadyToLockEventNotify (\r
512 IN CONST EFI_GUID *Protocol,\r
513 IN VOID *Interface,\r
514 IN EFI_HANDLE Handle\r
515 )\r
516{\r
0bdc9e75 517 GetAcpiCpuData ();\r
529a5a86 518\r
d2fc7711
JY
519 //\r
520 // Cache a copy of UEFI memory map before we start profiling feature.\r
521 //\r
522 GetUefiMemoryMap ();\r
523\r
529a5a86
MK
524 //\r
525 // Set SMM ready to lock flag and return\r
526 //\r
527 mSmmReadyToLock = TRUE;\r
528 return EFI_SUCCESS;\r
529}\r
530\r
531/**\r
532 The module Entry Point of the CPU SMM driver.\r
533\r
534 @param ImageHandle The firmware allocated handle for the EFI image.\r
535 @param SystemTable A pointer to the EFI System Table.\r
536\r
537 @retval EFI_SUCCESS The entry point is executed successfully.\r
538 @retval Other Some error occurs when executing this entry point.\r
539\r
540**/\r
541EFI_STATUS\r
542EFIAPI\r
543PiCpuSmmEntry (\r
544 IN EFI_HANDLE ImageHandle,\r
545 IN EFI_SYSTEM_TABLE *SystemTable\r
546 )\r
547{\r
548 EFI_STATUS Status;\r
549 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
550 UINTN NumberOfEnabledProcessors;\r
551 UINTN Index;\r
552 VOID *Buffer;\r
ae82a30b
JY
553 UINTN BufferPages;\r
554 UINTN TileCodeSize;\r
555 UINTN TileDataSize;\r
529a5a86 556 UINTN TileSize;\r
529a5a86
MK
557 UINT8 *Stacks;\r
558 VOID *Registration;\r
559 UINT32 RegEax;\r
560 UINT32 RegEdx;\r
561 UINTN FamilyId;\r
562 UINTN ModelId;\r
563 UINT32 Cr3;\r
564\r
565 //\r
566 // Initialize Debug Agent to support source level debug in SMM code\r
567 //\r
568 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
569\r
570 //\r
571 // Report the start of CPU SMM initialization.\r
572 //\r
573 REPORT_STATUS_CODE (\r
574 EFI_PROGRESS_CODE,\r
575 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
576 );\r
577\r
578 //\r
579 // Fix segment address of the long-mode-switch jump\r
580 //\r
581 if (sizeof (UINTN) == sizeof (UINT64)) {\r
582 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
583 }\r
584\r
585 //\r
586 // Find out SMRR Base and SMRR Size\r
587 //\r
588 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
589\r
590 //\r
591 // Get MP Services Protocol\r
592 //\r
593 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
594 ASSERT_EFI_ERROR (Status);\r
595\r
596 //\r
597 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
598 //\r
599 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
600 ASSERT_EFI_ERROR (Status);\r
601 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
602\r
603 //\r
604 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
605 // A constant BSP index makes no sense because it may be hot removed.\r
606 //\r
607 DEBUG_CODE (\r
608 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
609\r
610 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
611 }\r
612 );\r
613\r
614 //\r
615 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
616 //\r
617 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
618 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
619\r
241f9149
LD
620 //\r
621 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r
622 // Make sure AddressEncMask is contained to smallest supported address field.\r
623 //\r
624 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
625 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r
626\r
529a5a86
MK
627 //\r
628 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
629 //\r
630 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
631 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
632 } else {\r
633 mMaxNumberOfCpus = mNumberOfCpus;\r
634 }\r
635 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
636\r
637 //\r
638 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
639 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
640 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
641 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
642 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
643 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
644 // by adding the // CPU save state size, any extra CPU specific context, and\r
645 // the size of code that must be placed at the SMI entry point to transfer\r
646 // control to a C function in the native SMM execution mode. This size is\r
647 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
648 // The total amount of memory required is the maximum number of CPUs that\r
649 // platform supports times the tile size. The picture below shows the tiling,\r
650 // where m is the number of tiles that fit in 32KB.\r
651 //\r
652 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
653 // | CPU m+1 Save State |\r
654 // +-----------------------------+\r
655 // | CPU m+1 Extra Data |\r
656 // +-----------------------------+\r
657 // | Padding |\r
658 // +-----------------------------+\r
659 // | CPU 2m SMI Entry |\r
660 // +#############################+ <-- Base of allocated buffer + 64 KB\r
661 // | CPU m-1 Save State |\r
662 // +-----------------------------+\r
663 // | CPU m-1 Extra Data |\r
664 // +-----------------------------+\r
665 // | Padding |\r
666 // +-----------------------------+\r
667 // | CPU 2m-1 SMI Entry |\r
668 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
669 // | . . . . . . . . . . . . |\r
670 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
671 // | CPU 2 Save State |\r
672 // +-----------------------------+\r
673 // | CPU 2 Extra Data |\r
674 // +-----------------------------+\r
675 // | Padding |\r
676 // +-----------------------------+\r
677 // | CPU m+1 SMI Entry |\r
678 // +=============================+ <-- Base of allocated buffer + 32 KB\r
679 // | CPU 1 Save State |\r
680 // +-----------------------------+\r
681 // | CPU 1 Extra Data |\r
682 // +-----------------------------+\r
683 // | Padding |\r
684 // +-----------------------------+\r
685 // | CPU m SMI Entry |\r
686 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
687 // | CPU 0 Save State |\r
688 // +-----------------------------+\r
689 // | CPU 0 Extra Data |\r
690 // +-----------------------------+\r
691 // | Padding |\r
692 // +-----------------------------+\r
693 // | CPU m-1 SMI Entry |\r
694 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
695 // | . . . . . . . . . . . . |\r
696 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
697 // | Padding |\r
698 // +-----------------------------+\r
699 // | CPU 1 SMI Entry |\r
700 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
701 // | Padding |\r
702 // +-----------------------------+\r
703 // | CPU 0 SMI Entry |\r
704 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
705 //\r
706\r
707 //\r
708 // Retrieve CPU Family\r
709 //\r
e9b3a6c9 710 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
711 FamilyId = (RegEax >> 8) & 0xf;\r
712 ModelId = (RegEax >> 4) & 0xf;\r
713 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
714 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
715 }\r
716\r
e9b3a6c9
MK
717 RegEdx = 0;\r
718 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
719 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
720 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
721 }\r
529a5a86
MK
722 //\r
723 // Determine the mode of the CPU at the time an SMI occurs\r
724 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
725 // Volume 3C, Section 34.4.1.1\r
726 //\r
727 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
728 if ((RegEdx & BIT29) != 0) {\r
729 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
730 }\r
731 if (FamilyId == 0x06) {\r
732 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
733 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
734 }\r
735 }\r
736\r
737 //\r
738 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
739 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
740 // This size is rounded up to nearest power of 2.\r
529a5a86 741 //\r
ae82a30b
JY
742 TileCodeSize = GetSmiHandlerSize ();\r
743 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 744 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
745 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
746 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 747 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 748 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
749\r
750 //\r
f12367a0
MK
751 // If the TileSize is larger than space available for the SMI Handler of\r
752 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
753 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
754 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
755 // context must be reduced.\r
529a5a86
MK
756 //\r
757 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
758\r
759 //\r
760 // Allocate buffer for all of the tiles.\r
761 //\r
762 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
763 // Volume 3C, Section 34.11 SMBASE Relocation\r
764 // For Pentium and Intel486 processors, the SMBASE values must be\r
765 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
766 // state during the execution of a RSM instruction.\r
767 //\r
768 // Intel486 processors: FamilyId is 4\r
769 // Pentium processors : FamilyId is 5\r
770 //\r
ae82a30b 771 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 772 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 773 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 774 } else {\r
717fb604 775 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
776 }\r
777 ASSERT (Buffer != NULL);\r
ae82a30b 778 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
779\r
780 //\r
781 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
782 //\r
783 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
784 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
785\r
786 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
787 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
788\r
789 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
790 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
791\r
792 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
793 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
794\r
795 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
796 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
797\r
798 //\r
799 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
800 //\r
801 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
802 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
803 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
804 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
805 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
806\r
807 //\r
808 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
809 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
810 // size for each CPU in the platform\r
811 //\r
812 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
813 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
814 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
815 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
816 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
817\r
818 if (Index < mNumberOfCpus) {\r
819 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
820 ASSERT_EFI_ERROR (Status);\r
821 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
822\r
823 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
824 Index,\r
825 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
826 mCpuHotPlugData.SmBase[Index],\r
827 gSmmCpuPrivate->CpuSaveState[Index],\r
828 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
829 ));\r
830 } else {\r
831 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
832 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
833 }\r
834 }\r
835\r
836 //\r
837 // Allocate SMI stacks for all processors.\r
838 //\r
839 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
840 //\r
841 // 2 more pages is allocated for each processor.\r
842 // one is guard page and the other is known good stack.\r
843 //\r
844 // +-------------------------------------------+-----+-------------------------------------------+\r
845 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
846 // +-------------------------------------------+-----+-------------------------------------------+\r
847 // | | | |\r
848 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
849 //\r
850 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
851 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
852 ASSERT (Stacks != NULL);\r
853 mSmmStackArrayBase = (UINTN)Stacks;\r
854 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
855 } else {\r
856 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
857 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
858 ASSERT (Stacks != NULL);\r
859 }\r
860\r
861 //\r
862 // Set SMI stack for SMM base relocation\r
863 //\r
864 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
865\r
866 //\r
867 // Initialize IDT\r
868 //\r
869 InitializeSmmIdt ();\r
870\r
871 //\r
872 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
873 //\r
874 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
875 ASSERT (mRebased != NULL);\r
876 SmmRelocateBases ();\r
877\r
878 //\r
879 // Call hook for BSP to perform extra actions in normal mode after all\r
880 // SMM base addresses have been relocated on all CPUs\r
881 //\r
882 SmmCpuFeaturesSmmRelocationComplete ();\r
883\r
717fb604
JY
884 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
885\r
529a5a86
MK
886 //\r
887 // SMM Time initialization\r
888 //\r
889 InitializeSmmTimer ();\r
890\r
891 //\r
892 // Initialize MP globals\r
893 //\r
894 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
895\r
896 //\r
897 // Fill in SMM Reserved Regions\r
898 //\r
899 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
900 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
901\r
902 //\r
903 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
904 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
905 // to an SMRAM address will be present in the handle database\r
906 //\r
907 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
908 &gSmmCpuPrivate->SmmCpuHandle,\r
909 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
910 NULL\r
911 );\r
912 ASSERT_EFI_ERROR (Status);\r
913\r
914 //\r
915 // Install the SMM CPU Protocol into SMM protocol database\r
916 //\r
917 Status = gSmst->SmmInstallProtocolInterface (\r
918 &mSmmCpuHandle,\r
919 &gEfiSmmCpuProtocolGuid,\r
920 EFI_NATIVE_INTERFACE,\r
921 &mSmmCpu\r
922 );\r
923 ASSERT_EFI_ERROR (Status);\r
924\r
925 //\r
926 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
927 //\r
928 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
929 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
930 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
931 }\r
932\r
933 //\r
934 // Initialize SMM CPU Services Support\r
935 //\r
936 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
937 ASSERT_EFI_ERROR (Status);\r
938\r
529a5a86
MK
939 //\r
940 // register SMM Ready To Lock Protocol notification\r
941 //\r
942 Status = gSmst->SmmRegisterProtocolNotify (\r
943 &gEfiSmmReadyToLockProtocolGuid,\r
944 SmmReadyToLockEventNotify,\r
945 &Registration\r
946 );\r
947 ASSERT_EFI_ERROR (Status);\r
948\r
529a5a86
MK
949 //\r
950 // Initialize SMM Profile feature\r
951 //\r
952 InitSmmProfile (Cr3);\r
953\r
b10d5ddc 954 GetAcpiS3EnableFlag ();\r
0bdc9e75 955 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
956\r
957 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
958\r
959 return EFI_SUCCESS;\r
960}\r
961\r
962/**\r
963\r
964 Find out SMRAM information including SMRR base and SMRR size.\r
965\r
966 @param SmrrBase SMRR base\r
967 @param SmrrSize SMRR size\r
968\r
969**/\r
970VOID\r
971FindSmramInfo (\r
972 OUT UINT32 *SmrrBase,\r
973 OUT UINT32 *SmrrSize\r
974 )\r
975{\r
976 EFI_STATUS Status;\r
977 UINTN Size;\r
978 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
979 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
529a5a86
MK
980 UINTN Index;\r
981 UINT64 MaxSize;\r
982 BOOLEAN Found;\r
983\r
984 //\r
985 // Get SMM Access Protocol\r
986 //\r
987 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
988 ASSERT_EFI_ERROR (Status);\r
989\r
990 //\r
991 // Get SMRAM information\r
992 //\r
993 Size = 0;\r
994 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
995 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
996\r
7ed6f781
JF
997 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
998 ASSERT (mSmmCpuSmramRanges != NULL);\r
529a5a86 999\r
7ed6f781 1000 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r
529a5a86
MK
1001 ASSERT_EFI_ERROR (Status);\r
1002\r
7ed6f781 1003 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
529a5a86
MK
1004\r
1005 //\r
1006 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
1007 //\r
1008 CurrentSmramRange = NULL;\r
7ed6f781 1009 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r
529a5a86
MK
1010 //\r
1011 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
1012 //\r
7ed6f781 1013 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
529a5a86
MK
1014 continue;\r
1015 }\r
1016\r
7ed6f781
JF
1017 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r
1018 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r
1019 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r
1020 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r
1021 CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r
529a5a86
MK
1022 }\r
1023 }\r
1024 }\r
1025 }\r
1026\r
1027 ASSERT (CurrentSmramRange != NULL);\r
1028\r
1029 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1030 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1031\r
1032 do {\r
1033 Found = FALSE;\r
7ed6f781
JF
1034 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
1035 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&\r
1036 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {\r
1037 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r
1038 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86 1039 Found = TRUE;\r
7ed6f781
JF
1040 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {\r
1041 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r
529a5a86
MK
1042 Found = TRUE;\r
1043 }\r
1044 }\r
1045 } while (Found);\r
1046\r
1047 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1048}\r
1049\r
1050/**\r
1051Configure SMM Code Access Check feature on an AP.\r
1052SMM Feature Control MSR will be locked after configuration.\r
1053\r
1054@param[in,out] Buffer Pointer to private data buffer.\r
1055**/\r
1056VOID\r
1057EFIAPI\r
1058ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1059 IN OUT VOID *Buffer\r
1060 )\r
1061{\r
1062 UINTN CpuIndex;\r
1063 UINT64 SmmFeatureControlMsr;\r
1064 UINT64 NewSmmFeatureControlMsr;\r
1065\r
1066 //\r
1067 // Retrieve the CPU Index from the context passed in\r
1068 //\r
1069 CpuIndex = *(UINTN *)Buffer;\r
1070\r
1071 //\r
1072 // Get the current SMM Feature Control MSR value\r
1073 //\r
1074 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1075\r
1076 //\r
1077 // Compute the new SMM Feature Control MSR value\r
1078 //\r
1079 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1080 if (mSmmCodeAccessCheckEnable) {\r
1081 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1082 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1083 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1084 }\r
529a5a86
MK
1085 }\r
1086\r
1087 //\r
1088 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1089 //\r
1090 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1091 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1092 }\r
1093\r
1094 //\r
1095 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1096 //\r
fe3a75bc 1097 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1098}\r
1099\r
1100/**\r
1101Configure SMM Code Access Check feature for all processors.\r
1102SMM Feature Control MSR will be locked after configuration.\r
1103**/\r
1104VOID\r
1105ConfigSmmCodeAccessCheck (\r
1106 VOID\r
1107 )\r
1108{\r
1109 UINTN Index;\r
1110 EFI_STATUS Status;\r
1111\r
1112 //\r
1113 // Check to see if the Feature Control MSR is supported on this CPU\r
1114 //\r
f6b0cb17 1115 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1116 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1117 mSmmCodeAccessCheckEnable = FALSE;\r
1118 return;\r
1119 }\r
1120\r
1121 //\r
1122 // Check to see if the CPU supports the SMM Code Access Check feature\r
1123 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1124 //\r
1125 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1126 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1127 return;\r
1128 }\r
1129\r
1130 //\r
1131 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1132 //\r
fe3a75bc 1133 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1134\r
1135 //\r
1136 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1137 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1138 //\r
fe3a75bc 1139 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1140\r
1141 //\r
1142 // Enable SMM Code Access Check feature on the BSP.\r
1143 //\r
1144 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1145\r
1146 //\r
1147 // Enable SMM Code Access Check feature for the APs.\r
1148 //\r
1149 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1150 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
529a5a86
MK
1151\r
1152 //\r
1153 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1154 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1155 //\r
fe3a75bc 1156 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1157\r
1158 //\r
1159 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1160 //\r
1161 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1162 ASSERT_EFI_ERROR (Status);\r
1163\r
1164 //\r
1165 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1166 //\r
fe3a75bc 1167 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1168 CpuPause ();\r
1169 }\r
1170\r
1171 //\r
1172 // Release the Config SMM Code Access Check spin lock.\r
1173 //\r
fe3a75bc 1174 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1175 }\r
1176 }\r
1177}\r
1178\r
21c17193
JY
1179/**\r
1180 This API provides a way to allocate memory for page table.\r
1181\r
1182 This API can be called more once to allocate memory for page tables.\r
1183\r
1184 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1185 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1186 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1187 returned.\r
1188\r
1189 @param Pages The number of 4 KB pages to allocate.\r
1190\r
1191 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1192\r
1193**/\r
1194VOID *\r
1195AllocatePageTableMemory (\r
1196 IN UINTN Pages\r
1197 )\r
1198{\r
1199 VOID *Buffer;\r
1200\r
1201 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1202 if (Buffer != NULL) {\r
1203 return Buffer;\r
1204 }\r
1205 return AllocatePages (Pages);\r
1206}\r
1207\r
717fb604
JY
1208/**\r
1209 Allocate pages for code.\r
1210\r
1211 @param[in] Pages Number of pages to be allocated.\r
1212\r
1213 @return Allocated memory.\r
1214**/\r
1215VOID *\r
1216AllocateCodePages (\r
1217 IN UINTN Pages\r
1218 )\r
1219{\r
1220 EFI_STATUS Status;\r
1221 EFI_PHYSICAL_ADDRESS Memory;\r
1222\r
1223 if (Pages == 0) {\r
1224 return NULL;\r
1225 }\r
1226\r
1227 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1228 if (EFI_ERROR (Status)) {\r
1229 return NULL;\r
1230 }\r
1231 return (VOID *) (UINTN) Memory;\r
1232}\r
1233\r
1234/**\r
1235 Allocate aligned pages for code.\r
1236\r
1237 @param[in] Pages Number of pages to be allocated.\r
1238 @param[in] Alignment The requested alignment of the allocation.\r
1239 Must be a power of two.\r
1240 If Alignment is zero, then byte alignment is used.\r
1241\r
1242 @return Allocated memory.\r
1243**/\r
1244VOID *\r
1245AllocateAlignedCodePages (\r
1246 IN UINTN Pages,\r
1247 IN UINTN Alignment\r
1248 )\r
1249{\r
1250 EFI_STATUS Status;\r
1251 EFI_PHYSICAL_ADDRESS Memory;\r
1252 UINTN AlignedMemory;\r
1253 UINTN AlignmentMask;\r
1254 UINTN UnalignedPages;\r
1255 UINTN RealPages;\r
1256\r
1257 //\r
1258 // Alignment must be a power of two or zero.\r
1259 //\r
1260 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1261\r
1262 if (Pages == 0) {\r
1263 return NULL;\r
1264 }\r
1265 if (Alignment > EFI_PAGE_SIZE) {\r
1266 //\r
1267 // Calculate the total number of pages since alignment is larger than page size.\r
1268 //\r
1269 AlignmentMask = Alignment - 1;\r
1270 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1271 //\r
1272 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1273 //\r
1274 ASSERT (RealPages > Pages);\r
1275\r
1276 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1277 if (EFI_ERROR (Status)) {\r
1278 return NULL;\r
1279 }\r
1280 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1281 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1282 if (UnalignedPages > 0) {\r
1283 //\r
1284 // Free first unaligned page(s).\r
1285 //\r
1286 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1287 ASSERT_EFI_ERROR (Status);\r
1288 }\r
8491e302 1289 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r
717fb604
JY
1290 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1291 if (UnalignedPages > 0) {\r
1292 //\r
1293 // Free last unaligned page(s).\r
1294 //\r
1295 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1296 ASSERT_EFI_ERROR (Status);\r
1297 }\r
1298 } else {\r
1299 //\r
1300 // Do not over-allocate pages in this case.\r
1301 //\r
1302 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1303 if (EFI_ERROR (Status)) {\r
1304 return NULL;\r
1305 }\r
1306 AlignedMemory = (UINTN) Memory;\r
1307 }\r
1308 return (VOID *) AlignedMemory;\r
1309}\r
1310\r
529a5a86
MK
1311/**\r
1312 Perform the remaining tasks.\r
1313\r
1314**/\r
1315VOID\r
1316PerformRemainingTasks (\r
1317 VOID\r
1318 )\r
1319{\r
1320 if (mSmmReadyToLock) {\r
1321 //\r
1322 // Start SMM Profile feature\r
1323 //\r
1324 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1325 SmmProfileStart ();\r
1326 }\r
1327 //\r
1328 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1329 //\r
1330 InitPaging ();\r
717fb604
JY
1331\r
1332 //\r
1333 // Mark critical region to be read-only in page table\r
1334 //\r
d2fc7711
JY
1335 SetMemMapAttributes ();\r
1336\r
1337 //\r
1338 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1339 //\r
1340 SetUefiMemMapAttributes ();\r
717fb604
JY
1341\r
1342 //\r
1343 // Set page table itself to be read-only\r
1344 //\r
1345 SetPageTableAttributes ();\r
1346\r
529a5a86
MK
1347 //\r
1348 // Configure SMM Code Access Check feature if available.\r
1349 //\r
1350 ConfigSmmCodeAccessCheck ();\r
1351\r
21c17193
JY
1352 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1353\r
529a5a86
MK
1354 //\r
1355 // Clean SMM ready to lock flag\r
1356 //\r
1357 mSmmReadyToLock = FALSE;\r
1358 }\r
1359}\r
9f419739
JY
1360\r
1361/**\r
1362 Perform the pre tasks.\r
1363\r
1364**/\r
1365VOID\r
1366PerformPreTasks (\r
1367 VOID\r
1368 )\r
1369{\r
0bdc9e75 1370 RestoreSmmConfigurationInS3 ();\r
9f419739 1371}\r