]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpu: Add SMM Comm Buffer Paging Protection.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
CommitLineData
529a5a86
MK
1/** @file\r
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// SMM CPU Private Data structure that contains SMM Configuration Protocol\r
19// along its supporting fields.\r
20//\r
21SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r
22 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r
23 NULL, // SmmCpuHandle\r
24 NULL, // Pointer to ProcessorInfo array\r
25 NULL, // Pointer to Operation array\r
26 NULL, // Pointer to CpuSaveStateSize array\r
27 NULL, // Pointer to CpuSaveState array\r
28 { {0} }, // SmmReservedSmramRegion\r
29 {\r
30 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r
31 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r
32 0, // SmmCoreEntryContext.NumberOfCpus\r
33 NULL, // SmmCoreEntryContext.CpuSaveStateSize\r
34 NULL // SmmCoreEntryContext.CpuSaveState\r
35 },\r
36 NULL, // SmmCoreEntry\r
37 {\r
38 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r
39 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r
40 },\r
41};\r
42\r
43CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r
44 CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r
45 0, // Array Length of SmBase and APIC ID\r
46 NULL, // Pointer to APIC ID array\r
47 NULL, // Pointer to SMBASE array\r
48 0, // Reserved\r
49 0, // SmrrBase\r
50 0 // SmrrSize\r
51};\r
52\r
53//\r
54// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r
55//\r
56SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r
57\r
58//\r
59// SMM Relocation variables\r
60//\r
61volatile BOOLEAN *mRebased;\r
62volatile BOOLEAN mIsBsp;\r
63\r
64///\r
65/// Handle for the SMM CPU Protocol\r
66///\r
67EFI_HANDLE mSmmCpuHandle = NULL;\r
68\r
69///\r
70/// SMM CPU Protocol instance\r
71///\r
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r
73 SmmReadSaveState,\r
74 SmmWriteSaveState\r
75};\r
76\r
77EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r
78\r
529a5a86
MK
79//\r
80// SMM stack information\r
81//\r
82UINTN mSmmStackArrayBase;\r
83UINTN mSmmStackArrayEnd;\r
84UINTN mSmmStackSize;\r
85\r
529a5a86
MK
86UINTN mMaxNumberOfCpus = 1;\r
87UINTN mNumberOfCpus = 1;\r
88\r
89//\r
90// SMM ready to lock flag\r
91//\r
92BOOLEAN mSmmReadyToLock = FALSE;\r
93\r
94//\r
95// Global used to cache PCD for SMM Code Access Check enable\r
96//\r
97BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r
98\r
99//\r
100// Spin lock used to serialize setting of SMM Code Access Check feature\r
101//\r
fe3a75bc 102SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r
529a5a86
MK
103\r
104/**\r
105 Initialize IDT to setup exception handlers for SMM.\r
106\r
107**/\r
108VOID\r
109InitializeSmmIdt (\r
110 VOID\r
111 )\r
112{\r
113 EFI_STATUS Status;\r
114 BOOLEAN InterruptState;\r
115 IA32_DESCRIPTOR DxeIdtr;\r
717fb604
JY
116\r
117 //\r
118 // There are 32 (not 255) entries in it since only processor\r
119 // generated exceptions will be handled.\r
120 //\r
121 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r
122 //\r
123 // Allocate page aligned IDT, because it might be set as read only.\r
124 //\r
125 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));\r
126 ASSERT (gcSmiIdtr.Base != 0);\r
127 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r
128\r
529a5a86
MK
129 //\r
130 // Disable Interrupt and save DXE IDT table\r
131 //\r
132 InterruptState = SaveAndDisableInterrupts ();\r
133 AsmReadIdtr (&DxeIdtr);\r
134 //\r
135 // Load SMM temporary IDT table\r
136 //\r
137 AsmWriteIdtr (&gcSmiIdtr);\r
138 //\r
139 // Setup SMM default exception handlers, SMM IDT table\r
140 // will be updated and saved in gcSmiIdtr\r
141 //\r
142 Status = InitializeCpuExceptionHandlers (NULL);\r
143 ASSERT_EFI_ERROR (Status);\r
144 //\r
145 // Restore DXE IDT table and CPU interrupt\r
146 //\r
147 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);\r
148 SetInterruptState (InterruptState);\r
149}\r
150\r
151/**\r
152 Search module name by input IP address and output it.\r
153\r
154 @param CallerIpAddress Caller instruction pointer.\r
155\r
156**/\r
157VOID\r
158DumpModuleInfoByIp (\r
159 IN UINTN CallerIpAddress\r
160 )\r
161{\r
162 UINTN Pe32Data;\r
163 EFI_IMAGE_DOS_HEADER *DosHdr;\r
164 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;\r
165 VOID *PdbPointer;\r
166 UINT64 DumpIpAddress;\r
167\r
168 //\r
169 // Find Image Base\r
170 //\r
171 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);\r
172 while (Pe32Data != 0) {\r
173 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;\r
174 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {\r
175 //\r
176 // DOS image header is present, so read the PE header after the DOS image header.\r
177 //\r
178 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));\r
179 //\r
180 // Make sure PE header address does not overflow and is less than the initial address.\r
181 //\r
182 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {\r
183 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {\r
184 //\r
185 // It's PE image.\r
186 //\r
187 break;\r
188 }\r
189 }\r
190 }\r
191\r
192 //\r
193 // Not found the image base, check the previous aligned address\r
194 //\r
195 Pe32Data -= SIZE_4KB;\r
196 }\r
197\r
198 DumpIpAddress = CallerIpAddress;\r
199 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));\r
200\r
201 if (Pe32Data != 0) {\r
202 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);\r
203 if (PdbPointer != NULL) {\r
204 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));\r
205 }\r
206 }\r
207}\r
208\r
209/**\r
210 Read information from the CPU save state.\r
211\r
212 @param This EFI_SMM_CPU_PROTOCOL instance\r
213 @param Width The number of bytes to read from the CPU save state.\r
214 @param Register Specifies the CPU register to read form the save state.\r
215 @param CpuIndex Specifies the zero-based index of the CPU save state.\r
216 @param Buffer Upon return, this holds the CPU register value read from the save state.\r
217\r
218 @retval EFI_SUCCESS The register was read from Save State\r
219 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
220 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.\r
221\r
222**/\r
223EFI_STATUS\r
224EFIAPI\r
225SmmReadSaveState (\r
226 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
227 IN UINTN Width,\r
228 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
229 IN UINTN CpuIndex,\r
230 OUT VOID *Buffer\r
231 )\r
232{\r
233 EFI_STATUS Status;\r
234\r
235 //\r
236 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
237 //\r
238 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
239 return EFI_INVALID_PARAMETER;\r
240 }\r
241\r
242 //\r
243 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r
244 //\r
245 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
246 //\r
247 // The pseudo-register only supports the 64-bit size specified by Width.\r
248 //\r
249 if (Width != sizeof (UINT64)) {\r
250 return EFI_INVALID_PARAMETER;\r
251 }\r
252 //\r
253 // If the processor is in SMM at the time the SMI occurred,\r
254 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r
255 // Otherwise, EFI_NOT_FOUND is returned.\r
256 //\r
ed3d5ecb 257 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r
529a5a86
MK
258 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r
259 return EFI_SUCCESS;\r
260 } else {\r
261 return EFI_NOT_FOUND;\r
262 }\r
263 }\r
264\r
ed3d5ecb 265 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
529a5a86
MK
266 return EFI_INVALID_PARAMETER;\r
267 }\r
268\r
269 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
270 if (Status == EFI_UNSUPPORTED) {\r
271 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
272 }\r
273 return Status;\r
274}\r
275\r
276/**\r
277 Write data to the CPU save state.\r
278\r
279 @param This EFI_SMM_CPU_PROTOCOL instance\r
280 @param Width The number of bytes to read from the CPU save state.\r
281 @param Register Specifies the CPU register to write to the save state.\r
282 @param CpuIndex Specifies the zero-based index of the CPU save state\r
283 @param Buffer Upon entry, this holds the new CPU register value.\r
284\r
285 @retval EFI_SUCCESS The register was written from Save State\r
286 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r
287 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct\r
288\r
289**/\r
290EFI_STATUS\r
291EFIAPI\r
292SmmWriteSaveState (\r
293 IN CONST EFI_SMM_CPU_PROTOCOL *This,\r
294 IN UINTN Width,\r
295 IN EFI_SMM_SAVE_STATE_REGISTER Register,\r
296 IN UINTN CpuIndex,\r
297 IN CONST VOID *Buffer\r
298 )\r
299{\r
300 EFI_STATUS Status;\r
301\r
302 //\r
303 // Retrieve pointer to the specified CPU's SMM Save State buffer\r
304 //\r
305 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r
306 return EFI_INVALID_PARAMETER;\r
307 }\r
308\r
309 //\r
310 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r
311 //\r
312 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r
313 return EFI_SUCCESS;\r
314 }\r
315\r
316 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r
317 return EFI_INVALID_PARAMETER;\r
318 }\r
319\r
320 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
321 if (Status == EFI_UNSUPPORTED) {\r
322 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r
323 }\r
324 return Status;\r
325}\r
326\r
327\r
328/**\r
329 C function for SMI handler. To change all processor's SMMBase Register.\r
330\r
331**/\r
332VOID\r
333EFIAPI\r
334SmmInitHandler (\r
335 VOID\r
336 )\r
337{\r
338 UINT32 ApicId;\r
339 UINTN Index;\r
340\r
341 //\r
342 // Update SMM IDT entries' code segment and load IDT\r
343 //\r
344 AsmWriteIdtr (&gcSmiIdtr);\r
345 ApicId = GetApicId ();\r
346\r
bb767506 347 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r
529a5a86
MK
348\r
349 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
350 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
351 //\r
352 // Initialize SMM specific features on the currently executing CPU\r
353 //\r
354 SmmCpuFeaturesInitializeProcessor (\r
355 Index,\r
356 mIsBsp,\r
357 gSmmCpuPrivate->ProcessorInfo,\r
358 &mCpuHotPlugData\r
359 );\r
360\r
a46a4c90
JF
361 if (!mSmmS3Flag) {\r
362 //\r
363 // Check XD and BTS features on each processor on normal boot\r
364 //\r
51773d49 365 CheckFeatureSupported ();\r
a46a4c90
JF
366 }\r
367\r
529a5a86
MK
368 if (mIsBsp) {\r
369 //\r
370 // BSP rebase is already done above.\r
371 // Initialize private data during S3 resume\r
372 //\r
373 InitializeMpSyncData ();\r
374 }\r
375\r
376 //\r
377 // Hook return after RSM to set SMM re-based flag\r
378 //\r
379 SemaphoreHook (Index, &mRebased[Index]);\r
380\r
381 return;\r
382 }\r
383 }\r
384 ASSERT (FALSE);\r
385}\r
386\r
387/**\r
388 Relocate SmmBases for each processor.\r
389\r
390 Execute on first boot and all S3 resumes\r
391\r
392**/\r
393VOID\r
394EFIAPI\r
395SmmRelocateBases (\r
396 VOID\r
397 )\r
398{\r
399 UINT8 BakBuf[BACK_BUF_SIZE];\r
400 SMRAM_SAVE_STATE_MAP BakBuf2;\r
401 SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r
402 UINT8 *U8Ptr;\r
403 UINT32 ApicId;\r
404 UINTN Index;\r
405 UINTN BspIndex;\r
406\r
407 //\r
408 // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r
409 //\r
410 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r
411\r
412 //\r
413 // Patch ASM code template with current CR0, CR3, and CR4 values\r
414 //\r
415 gSmmCr0 = (UINT32)AsmReadCr0 ();\r
416 gSmmCr3 = (UINT32)AsmReadCr3 ();\r
417 gSmmCr4 = (UINT32)AsmReadCr4 ();\r
418\r
419 //\r
420 // Patch GDTR for SMM base relocation\r
421 //\r
422 gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r
423 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r
424\r
425 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r
426 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
427\r
428 //\r
429 // Backup original contents at address 0x38000\r
430 //\r
431 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r
432 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r
433\r
434 //\r
435 // Load image for relocation\r
436 //\r
437 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r
438\r
439 //\r
440 // Retrieve the local APIC ID of current processor\r
441 //\r
442 ApicId = GetApicId ();\r
443\r
444 //\r
445 // Relocate SM bases for all APs\r
446 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r
447 //\r
448 mIsBsp = FALSE;\r
449 BspIndex = (UINTN)-1;\r
450 for (Index = 0; Index < mNumberOfCpus; Index++) {\r
451 mRebased[Index] = FALSE;\r
452 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r
453 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
454 //\r
455 // Wait for this AP to finish its 1st SMI\r
456 //\r
457 while (!mRebased[Index]);\r
458 } else {\r
459 //\r
460 // BSP will be Relocated later\r
461 //\r
462 BspIndex = Index;\r
463 }\r
464 }\r
465\r
466 //\r
467 // Relocate BSP's SMM base\r
468 //\r
469 ASSERT (BspIndex != (UINTN)-1);\r
470 mIsBsp = TRUE;\r
471 SendSmiIpi (ApicId);\r
472 //\r
473 // Wait for the BSP to finish its 1st SMI\r
474 //\r
475 while (!mRebased[BspIndex]);\r
476\r
477 //\r
478 // Restore contents at address 0x38000\r
479 //\r
480 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r
481 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r
482}\r
483\r
529a5a86
MK
484/**\r
485 SMM Ready To Lock event notification handler.\r
486\r
487 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r
488 perform additional lock actions that must be performed from SMM on the next SMI.\r
489\r
490 @param[in] Protocol Points to the protocol's unique identifier.\r
491 @param[in] Interface Points to the interface instance.\r
492 @param[in] Handle The handle on which the interface was installed.\r
493\r
494 @retval EFI_SUCCESS Notification handler runs successfully.\r
495 **/\r
496EFI_STATUS\r
497EFIAPI\r
498SmmReadyToLockEventNotify (\r
499 IN CONST EFI_GUID *Protocol,\r
500 IN VOID *Interface,\r
501 IN EFI_HANDLE Handle\r
502 )\r
503{\r
0bdc9e75 504 GetAcpiCpuData ();\r
529a5a86 505\r
d2fc7711
JY
506 //\r
507 // Cache a copy of UEFI memory map before we start profiling feature.\r
508 //\r
509 GetUefiMemoryMap ();\r
510\r
529a5a86
MK
511 //\r
512 // Set SMM ready to lock flag and return\r
513 //\r
514 mSmmReadyToLock = TRUE;\r
515 return EFI_SUCCESS;\r
516}\r
517\r
518/**\r
519 The module Entry Point of the CPU SMM driver.\r
520\r
521 @param ImageHandle The firmware allocated handle for the EFI image.\r
522 @param SystemTable A pointer to the EFI System Table.\r
523\r
524 @retval EFI_SUCCESS The entry point is executed successfully.\r
525 @retval Other Some error occurs when executing this entry point.\r
526\r
527**/\r
528EFI_STATUS\r
529EFIAPI\r
530PiCpuSmmEntry (\r
531 IN EFI_HANDLE ImageHandle,\r
532 IN EFI_SYSTEM_TABLE *SystemTable\r
533 )\r
534{\r
535 EFI_STATUS Status;\r
536 EFI_MP_SERVICES_PROTOCOL *MpServices;\r
537 UINTN NumberOfEnabledProcessors;\r
538 UINTN Index;\r
539 VOID *Buffer;\r
ae82a30b
JY
540 UINTN BufferPages;\r
541 UINTN TileCodeSize;\r
542 UINTN TileDataSize;\r
529a5a86 543 UINTN TileSize;\r
529a5a86
MK
544 UINT8 *Stacks;\r
545 VOID *Registration;\r
546 UINT32 RegEax;\r
547 UINT32 RegEdx;\r
548 UINTN FamilyId;\r
549 UINTN ModelId;\r
550 UINT32 Cr3;\r
551\r
552 //\r
553 // Initialize Debug Agent to support source level debug in SMM code\r
554 //\r
555 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r
556\r
557 //\r
558 // Report the start of CPU SMM initialization.\r
559 //\r
560 REPORT_STATUS_CODE (\r
561 EFI_PROGRESS_CODE,\r
562 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r
563 );\r
564\r
565 //\r
566 // Fix segment address of the long-mode-switch jump\r
567 //\r
568 if (sizeof (UINTN) == sizeof (UINT64)) {\r
569 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;\r
570 }\r
571\r
572 //\r
573 // Find out SMRR Base and SMRR Size\r
574 //\r
575 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r
576\r
577 //\r
578 // Get MP Services Protocol\r
579 //\r
580 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r
581 ASSERT_EFI_ERROR (Status);\r
582\r
583 //\r
584 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r
585 //\r
586 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r
587 ASSERT_EFI_ERROR (Status);\r
588 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r
589\r
590 //\r
591 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r
592 // A constant BSP index makes no sense because it may be hot removed.\r
593 //\r
594 DEBUG_CODE (\r
595 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
596\r
597 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r
598 }\r
599 );\r
600\r
601 //\r
602 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r
603 //\r
604 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r
605 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r
606\r
607 //\r
608 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r
609 //\r
610 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
611 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r
612 } else {\r
613 mMaxNumberOfCpus = mNumberOfCpus;\r
614 }\r
615 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r
616\r
617 //\r
618 // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r
619 // allocated buffer. The minimum size of this buffer for a uniprocessor system\r
620 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r
621 // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r
622 // then the SMI entry point and the CPU save state areas can be tiles to minimize\r
623 // the total amount SMRAM required for all the CPUs. The tile size can be computed\r
624 // by adding the // CPU save state size, any extra CPU specific context, and\r
625 // the size of code that must be placed at the SMI entry point to transfer\r
626 // control to a C function in the native SMM execution mode. This size is\r
627 // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r
628 // The total amount of memory required is the maximum number of CPUs that\r
629 // platform supports times the tile size. The picture below shows the tiling,\r
630 // where m is the number of tiles that fit in 32KB.\r
631 //\r
632 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r
633 // | CPU m+1 Save State |\r
634 // +-----------------------------+\r
635 // | CPU m+1 Extra Data |\r
636 // +-----------------------------+\r
637 // | Padding |\r
638 // +-----------------------------+\r
639 // | CPU 2m SMI Entry |\r
640 // +#############################+ <-- Base of allocated buffer + 64 KB\r
641 // | CPU m-1 Save State |\r
642 // +-----------------------------+\r
643 // | CPU m-1 Extra Data |\r
644 // +-----------------------------+\r
645 // | Padding |\r
646 // +-----------------------------+\r
647 // | CPU 2m-1 SMI Entry |\r
648 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
649 // | . . . . . . . . . . . . |\r
650 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
651 // | CPU 2 Save State |\r
652 // +-----------------------------+\r
653 // | CPU 2 Extra Data |\r
654 // +-----------------------------+\r
655 // | Padding |\r
656 // +-----------------------------+\r
657 // | CPU m+1 SMI Entry |\r
658 // +=============================+ <-- Base of allocated buffer + 32 KB\r
659 // | CPU 1 Save State |\r
660 // +-----------------------------+\r
661 // | CPU 1 Extra Data |\r
662 // +-----------------------------+\r
663 // | Padding |\r
664 // +-----------------------------+\r
665 // | CPU m SMI Entry |\r
666 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r
667 // | CPU 0 Save State |\r
668 // +-----------------------------+\r
669 // | CPU 0 Extra Data |\r
670 // +-----------------------------+\r
671 // | Padding |\r
672 // +-----------------------------+\r
673 // | CPU m-1 SMI Entry |\r
674 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
675 // | . . . . . . . . . . . . |\r
676 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
677 // | Padding |\r
678 // +-----------------------------+\r
679 // | CPU 1 SMI Entry |\r
680 // +=============================+ <-- 2^n offset from Base of allocated buffer\r
681 // | Padding |\r
682 // +-----------------------------+\r
683 // | CPU 0 SMI Entry |\r
684 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r
685 //\r
686\r
687 //\r
688 // Retrieve CPU Family\r
689 //\r
e9b3a6c9 690 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r
529a5a86
MK
691 FamilyId = (RegEax >> 8) & 0xf;\r
692 ModelId = (RegEax >> 4) & 0xf;\r
693 if (FamilyId == 0x06 || FamilyId == 0x0f) {\r
694 ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r
695 }\r
696\r
e9b3a6c9
MK
697 RegEdx = 0;\r
698 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
699 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
700 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
701 }\r
529a5a86
MK
702 //\r
703 // Determine the mode of the CPU at the time an SMI occurs\r
704 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
705 // Volume 3C, Section 34.4.1.1\r
706 //\r
707 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r
708 if ((RegEdx & BIT29) != 0) {\r
709 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
710 }\r
711 if (FamilyId == 0x06) {\r
712 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {\r
713 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r
714 }\r
715 }\r
716\r
717 //\r
718 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r
f12367a0
MK
719 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r
720 // This size is rounded up to nearest power of 2.\r
529a5a86 721 //\r
ae82a30b
JY
722 TileCodeSize = GetSmiHandlerSize ();\r
723 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);\r
f12367a0 724 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r
ae82a30b
JY
725 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);\r
726 TileSize = TileDataSize + TileCodeSize - 1;\r
529a5a86 727 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r
ae82a30b 728 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r
529a5a86
MK
729\r
730 //\r
f12367a0
MK
731 // If the TileSize is larger than space available for the SMI Handler of\r
732 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r
733 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r
734 // the SMI Handler size must be reduced or the size of the extra CPU specific\r
735 // context must be reduced.\r
529a5a86
MK
736 //\r
737 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r
738\r
739 //\r
740 // Allocate buffer for all of the tiles.\r
741 //\r
742 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r
743 // Volume 3C, Section 34.11 SMBASE Relocation\r
744 // For Pentium and Intel486 processors, the SMBASE values must be\r
745 // aligned on a 32-KByte boundary or the processor will enter shutdown\r
746 // state during the execution of a RSM instruction.\r
747 //\r
748 // Intel486 processors: FamilyId is 4\r
749 // Pentium processors : FamilyId is 5\r
750 //\r
ae82a30b 751 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r
529a5a86 752 if ((FamilyId == 4) || (FamilyId == 5)) {\r
717fb604 753 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r
529a5a86 754 } else {\r
717fb604 755 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r
529a5a86
MK
756 }\r
757 ASSERT (Buffer != NULL);\r
ae82a30b 758 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));\r
529a5a86
MK
759\r
760 //\r
761 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r
762 //\r
763 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r
764 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r
765\r
766 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r
767 ASSERT (gSmmCpuPrivate->Operation != NULL);\r
768\r
769 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
770 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r
771\r
772 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r
773 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r
774\r
775 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r
776 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r
529a5a86
MK
777\r
778 //\r
779 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r
780 //\r
781 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r
782 ASSERT (mCpuHotPlugData.ApicId != NULL);\r
783 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r
784 ASSERT (mCpuHotPlugData.SmBase != NULL);\r
785 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r
786\r
787 //\r
788 // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r
789 // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r
790 // size for each CPU in the platform\r
791 //\r
792 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
793 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r
794 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);\r
795 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r
796 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r
797\r
798 if (Index < mNumberOfCpus) {\r
799 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r
800 ASSERT_EFI_ERROR (Status);\r
801 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r
802\r
803 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r
804 Index,\r
805 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r
806 mCpuHotPlugData.SmBase[Index],\r
807 gSmmCpuPrivate->CpuSaveState[Index],\r
808 gSmmCpuPrivate->CpuSaveStateSize[Index]\r
809 ));\r
810 } else {\r
811 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r
812 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r
813 }\r
814 }\r
815\r
816 //\r
817 // Allocate SMI stacks for all processors.\r
818 //\r
819 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
820 //\r
821 // 2 more pages is allocated for each processor.\r
822 // one is guard page and the other is known good stack.\r
823 //\r
824 // +-------------------------------------------+-----+-------------------------------------------+\r
825 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r
826 // +-------------------------------------------+-----+-------------------------------------------+\r
827 // | | | |\r
828 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|\r
829 //\r
830 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);\r
831 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));\r
832 ASSERT (Stacks != NULL);\r
833 mSmmStackArrayBase = (UINTN)Stacks;\r
834 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;\r
835 } else {\r
836 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);\r
837 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));\r
838 ASSERT (Stacks != NULL);\r
839 }\r
840\r
841 //\r
842 // Set SMI stack for SMM base relocation\r
843 //\r
844 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));\r
845\r
846 //\r
847 // Initialize IDT\r
848 //\r
849 InitializeSmmIdt ();\r
850\r
851 //\r
852 // Relocate SMM Base addresses to the ones allocated from SMRAM\r
853 //\r
854 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r
855 ASSERT (mRebased != NULL);\r
856 SmmRelocateBases ();\r
857\r
858 //\r
859 // Call hook for BSP to perform extra actions in normal mode after all\r
860 // SMM base addresses have been relocated on all CPUs\r
861 //\r
862 SmmCpuFeaturesSmmRelocationComplete ();\r
863\r
717fb604
JY
864 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r
865\r
529a5a86
MK
866 //\r
867 // SMM Time initialization\r
868 //\r
869 InitializeSmmTimer ();\r
870\r
871 //\r
872 // Initialize MP globals\r
873 //\r
874 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);\r
875\r
876 //\r
877 // Fill in SMM Reserved Regions\r
878 //\r
879 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r
880 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r
881\r
882 //\r
883 // Install the SMM Configuration Protocol onto a new handle on the handle database.\r
884 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r
885 // to an SMRAM address will be present in the handle database\r
886 //\r
887 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r
888 &gSmmCpuPrivate->SmmCpuHandle,\r
889 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,\r
890 NULL\r
891 );\r
892 ASSERT_EFI_ERROR (Status);\r
893\r
894 //\r
895 // Install the SMM CPU Protocol into SMM protocol database\r
896 //\r
897 Status = gSmst->SmmInstallProtocolInterface (\r
898 &mSmmCpuHandle,\r
899 &gEfiSmmCpuProtocolGuid,\r
900 EFI_NATIVE_INTERFACE,\r
901 &mSmmCpu\r
902 );\r
903 ASSERT_EFI_ERROR (Status);\r
904\r
905 //\r
906 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r
907 //\r
908 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
9838b016
MK
909 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r
910 ASSERT_EFI_ERROR (Status);\r
529a5a86
MK
911 }\r
912\r
913 //\r
914 // Initialize SMM CPU Services Support\r
915 //\r
916 Status = InitializeSmmCpuServices (mSmmCpuHandle);\r
917 ASSERT_EFI_ERROR (Status);\r
918\r
529a5a86
MK
919 //\r
920 // register SMM Ready To Lock Protocol notification\r
921 //\r
922 Status = gSmst->SmmRegisterProtocolNotify (\r
923 &gEfiSmmReadyToLockProtocolGuid,\r
924 SmmReadyToLockEventNotify,\r
925 &Registration\r
926 );\r
927 ASSERT_EFI_ERROR (Status);\r
928\r
529a5a86
MK
929 //\r
930 // Initialize SMM Profile feature\r
931 //\r
932 InitSmmProfile (Cr3);\r
933\r
b10d5ddc 934 GetAcpiS3EnableFlag ();\r
0bdc9e75 935 InitSmmS3ResumeState (Cr3);\r
529a5a86
MK
936\r
937 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r
938\r
939 return EFI_SUCCESS;\r
940}\r
941\r
942/**\r
943\r
944 Find out SMRAM information including SMRR base and SMRR size.\r
945\r
946 @param SmrrBase SMRR base\r
947 @param SmrrSize SMRR size\r
948\r
949**/\r
950VOID\r
951FindSmramInfo (\r
952 OUT UINT32 *SmrrBase,\r
953 OUT UINT32 *SmrrSize\r
954 )\r
955{\r
956 EFI_STATUS Status;\r
957 UINTN Size;\r
958 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r
959 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r
960 EFI_SMRAM_DESCRIPTOR *SmramRanges;\r
961 UINTN SmramRangeCount;\r
962 UINTN Index;\r
963 UINT64 MaxSize;\r
964 BOOLEAN Found;\r
965\r
966 //\r
967 // Get SMM Access Protocol\r
968 //\r
969 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r
970 ASSERT_EFI_ERROR (Status);\r
971\r
972 //\r
973 // Get SMRAM information\r
974 //\r
975 Size = 0;\r
976 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r
977 ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r
978\r
979 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r
980 ASSERT (SmramRanges != NULL);\r
981\r
982 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);\r
983 ASSERT_EFI_ERROR (Status);\r
984\r
985 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r
986\r
987 //\r
988 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r
989 //\r
990 CurrentSmramRange = NULL;\r
991 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {\r
992 //\r
993 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r
994 //\r
995 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r
996 continue;\r
997 }\r
998\r
999 if (SmramRanges[Index].CpuStart >= BASE_1MB) {\r
1000 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {\r
1001 if (SmramRanges[Index].PhysicalSize >= MaxSize) {\r
1002 MaxSize = SmramRanges[Index].PhysicalSize;\r
1003 CurrentSmramRange = &SmramRanges[Index];\r
1004 }\r
1005 }\r
1006 }\r
1007 }\r
1008\r
1009 ASSERT (CurrentSmramRange != NULL);\r
1010\r
1011 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r
1012 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r
1013\r
1014 do {\r
1015 Found = FALSE;\r
1016 for (Index = 0; Index < SmramRangeCount; Index++) {\r
1017 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {\r
1018 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;\r
1019 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1020 Found = TRUE;\r
1021 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {\r
1022 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);\r
1023 Found = TRUE;\r
1024 }\r
1025 }\r
1026 } while (Found);\r
1027\r
e242cdfb 1028 FreePool (SmramRanges);\r
529a5a86
MK
1029 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r
1030}\r
1031\r
1032/**\r
1033Configure SMM Code Access Check feature on an AP.\r
1034SMM Feature Control MSR will be locked after configuration.\r
1035\r
1036@param[in,out] Buffer Pointer to private data buffer.\r
1037**/\r
1038VOID\r
1039EFIAPI\r
1040ConfigSmmCodeAccessCheckOnCurrentProcessor (\r
1041 IN OUT VOID *Buffer\r
1042 )\r
1043{\r
1044 UINTN CpuIndex;\r
1045 UINT64 SmmFeatureControlMsr;\r
1046 UINT64 NewSmmFeatureControlMsr;\r
1047\r
1048 //\r
1049 // Retrieve the CPU Index from the context passed in\r
1050 //\r
1051 CpuIndex = *(UINTN *)Buffer;\r
1052\r
1053 //\r
1054 // Get the current SMM Feature Control MSR value\r
1055 //\r
1056 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r
1057\r
1058 //\r
1059 // Compute the new SMM Feature Control MSR value\r
1060 //\r
1061 NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r
1062 if (mSmmCodeAccessCheckEnable) {\r
1063 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r
f6bc3a6d
JF
1064 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r
1065 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r
1066 }\r
529a5a86
MK
1067 }\r
1068\r
1069 //\r
1070 // Only set the SMM Feature Control MSR value if the new value is different than the current value\r
1071 //\r
1072 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r
1073 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r
1074 }\r
1075\r
1076 //\r
1077 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r
1078 //\r
fe3a75bc 1079 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1080}\r
1081\r
1082/**\r
1083Configure SMM Code Access Check feature for all processors.\r
1084SMM Feature Control MSR will be locked after configuration.\r
1085**/\r
1086VOID\r
1087ConfigSmmCodeAccessCheck (\r
1088 VOID\r
1089 )\r
1090{\r
1091 UINTN Index;\r
1092 EFI_STATUS Status;\r
1093\r
1094 //\r
1095 // Check to see if the Feature Control MSR is supported on this CPU\r
1096 //\r
f6b0cb17 1097 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r
529a5a86
MK
1098 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r
1099 mSmmCodeAccessCheckEnable = FALSE;\r
1100 return;\r
1101 }\r
1102\r
1103 //\r
1104 // Check to see if the CPU supports the SMM Code Access Check feature\r
1105 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r
1106 //\r
1107 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r
1108 mSmmCodeAccessCheckEnable = FALSE;\r
529a5a86
MK
1109 return;\r
1110 }\r
1111\r
1112 //\r
1113 // Initialize the lock used to serialize the MSR programming in BSP and all APs\r
1114 //\r
fe3a75bc 1115 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1116\r
1117 //\r
1118 // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r
1119 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1120 //\r
fe3a75bc 1121 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1122\r
1123 //\r
1124 // Enable SMM Code Access Check feature on the BSP.\r
1125 //\r
1126 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r
1127\r
1128 //\r
1129 // Enable SMM Code Access Check feature for the APs.\r
1130 //\r
1131 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
f6b0cb17 1132 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
529a5a86
MK
1133\r
1134 //\r
1135 // Acquire Config SMM Code Access Check spin lock. The AP will release the\r
1136 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r
1137 //\r
fe3a75bc 1138 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1139\r
1140 //\r
1141 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r
1142 //\r
1143 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r
1144 ASSERT_EFI_ERROR (Status);\r
1145\r
1146 //\r
1147 // Wait for the AP to release the Config SMM Code Access Check spin lock.\r
1148 //\r
fe3a75bc 1149 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r
529a5a86
MK
1150 CpuPause ();\r
1151 }\r
1152\r
1153 //\r
1154 // Release the Config SMM Code Access Check spin lock.\r
1155 //\r
fe3a75bc 1156 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r
529a5a86
MK
1157 }\r
1158 }\r
1159}\r
1160\r
21c17193
JY
1161/**\r
1162 This API provides a way to allocate memory for page table.\r
1163\r
1164 This API can be called more once to allocate memory for page tables.\r
1165\r
1166 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r
1167 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r
1168 is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r
1169 returned.\r
1170\r
1171 @param Pages The number of 4 KB pages to allocate.\r
1172\r
1173 @return A pointer to the allocated buffer or NULL if allocation fails.\r
1174\r
1175**/\r
1176VOID *\r
1177AllocatePageTableMemory (\r
1178 IN UINTN Pages\r
1179 )\r
1180{\r
1181 VOID *Buffer;\r
1182\r
1183 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r
1184 if (Buffer != NULL) {\r
1185 return Buffer;\r
1186 }\r
1187 return AllocatePages (Pages);\r
1188}\r
1189\r
717fb604
JY
1190/**\r
1191 Allocate pages for code.\r
1192\r
1193 @param[in] Pages Number of pages to be allocated.\r
1194\r
1195 @return Allocated memory.\r
1196**/\r
1197VOID *\r
1198AllocateCodePages (\r
1199 IN UINTN Pages\r
1200 )\r
1201{\r
1202 EFI_STATUS Status;\r
1203 EFI_PHYSICAL_ADDRESS Memory;\r
1204\r
1205 if (Pages == 0) {\r
1206 return NULL;\r
1207 }\r
1208\r
1209 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1210 if (EFI_ERROR (Status)) {\r
1211 return NULL;\r
1212 }\r
1213 return (VOID *) (UINTN) Memory;\r
1214}\r
1215\r
1216/**\r
1217 Allocate aligned pages for code.\r
1218\r
1219 @param[in] Pages Number of pages to be allocated.\r
1220 @param[in] Alignment The requested alignment of the allocation.\r
1221 Must be a power of two.\r
1222 If Alignment is zero, then byte alignment is used.\r
1223\r
1224 @return Allocated memory.\r
1225**/\r
1226VOID *\r
1227AllocateAlignedCodePages (\r
1228 IN UINTN Pages,\r
1229 IN UINTN Alignment\r
1230 )\r
1231{\r
1232 EFI_STATUS Status;\r
1233 EFI_PHYSICAL_ADDRESS Memory;\r
1234 UINTN AlignedMemory;\r
1235 UINTN AlignmentMask;\r
1236 UINTN UnalignedPages;\r
1237 UINTN RealPages;\r
1238\r
1239 //\r
1240 // Alignment must be a power of two or zero.\r
1241 //\r
1242 ASSERT ((Alignment & (Alignment - 1)) == 0);\r
1243\r
1244 if (Pages == 0) {\r
1245 return NULL;\r
1246 }\r
1247 if (Alignment > EFI_PAGE_SIZE) {\r
1248 //\r
1249 // Calculate the total number of pages since alignment is larger than page size.\r
1250 //\r
1251 AlignmentMask = Alignment - 1;\r
1252 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r
1253 //\r
1254 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r
1255 //\r
1256 ASSERT (RealPages > Pages);\r
1257\r
1258 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r
1259 if (EFI_ERROR (Status)) {\r
1260 return NULL;\r
1261 }\r
1262 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;\r
1263 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);\r
1264 if (UnalignedPages > 0) {\r
1265 //\r
1266 // Free first unaligned page(s).\r
1267 //\r
1268 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1269 ASSERT_EFI_ERROR (Status);\r
1270 }\r
1271 Memory = (EFI_PHYSICAL_ADDRESS) (AlignedMemory + EFI_PAGES_TO_SIZE (Pages));\r
1272 UnalignedPages = RealPages - Pages - UnalignedPages;\r
1273 if (UnalignedPages > 0) {\r
1274 //\r
1275 // Free last unaligned page(s).\r
1276 //\r
1277 Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r
1278 ASSERT_EFI_ERROR (Status);\r
1279 }\r
1280 } else {\r
1281 //\r
1282 // Do not over-allocate pages in this case.\r
1283 //\r
1284 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r
1285 if (EFI_ERROR (Status)) {\r
1286 return NULL;\r
1287 }\r
1288 AlignedMemory = (UINTN) Memory;\r
1289 }\r
1290 return (VOID *) AlignedMemory;\r
1291}\r
1292\r
529a5a86
MK
1293/**\r
1294 Perform the remaining tasks.\r
1295\r
1296**/\r
1297VOID\r
1298PerformRemainingTasks (\r
1299 VOID\r
1300 )\r
1301{\r
1302 if (mSmmReadyToLock) {\r
1303 //\r
1304 // Start SMM Profile feature\r
1305 //\r
1306 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1307 SmmProfileStart ();\r
1308 }\r
1309 //\r
1310 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r
1311 //\r
1312 InitPaging ();\r
717fb604
JY
1313\r
1314 //\r
1315 // Mark critical region to be read-only in page table\r
1316 //\r
d2fc7711
JY
1317 SetMemMapAttributes ();\r
1318\r
1319 //\r
1320 // For outside SMRAM, we only map SMM communication buffer or MMIO.\r
1321 //\r
1322 SetUefiMemMapAttributes ();\r
717fb604
JY
1323\r
1324 //\r
1325 // Set page table itself to be read-only\r
1326 //\r
1327 SetPageTableAttributes ();\r
1328\r
529a5a86
MK
1329 //\r
1330 // Configure SMM Code Access Check feature if available.\r
1331 //\r
1332 ConfigSmmCodeAccessCheck ();\r
1333\r
21c17193
JY
1334 SmmCpuFeaturesCompleteSmmReadyToLock ();\r
1335\r
529a5a86
MK
1336 //\r
1337 // Clean SMM ready to lock flag\r
1338 //\r
1339 mSmmReadyToLock = FALSE;\r
1340 }\r
1341}\r
9f419739
JY
1342\r
1343/**\r
1344 Perform the pre tasks.\r
1345\r
1346**/\r
1347VOID\r
1348PerformPreTasks (\r
1349 VOID\r
1350 )\r
1351{\r
0bdc9e75 1352 RestoreSmmConfigurationInS3 ();\r
9f419739 1353}\r