]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add support for PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / PiSmmCpuDxeSmm.c
1 /** @file
2 Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4 Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18
19 //
20 // SMM CPU Private Data structure that contains SMM Configuration Protocol
21 // along its supporting fields.
22 //
23 SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43 };
44
45 CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53 };
54
55 //
56 // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57 //
58 SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60 //
61 // SMM Relocation variables
62 //
63 volatile BOOLEAN *mRebased;
64 volatile BOOLEAN mIsBsp;
65
66 ///
67 /// Handle for the SMM CPU Protocol
68 ///
69 EFI_HANDLE mSmmCpuHandle = NULL;
70
71 ///
72 /// SMM CPU Protocol instance
73 ///
74 EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77 };
78
79 EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
80
81 //
82 // SMM stack information
83 //
84 UINTN mSmmStackArrayBase;
85 UINTN mSmmStackArrayEnd;
86 UINTN mSmmStackSize;
87
88 UINTN mMaxNumberOfCpus = 1;
89 UINTN mNumberOfCpus = 1;
90
91 //
92 // SMM ready to lock flag
93 //
94 BOOLEAN mSmmReadyToLock = FALSE;
95
96 //
97 // Global used to cache PCD for SMM Code Access Check enable
98 //
99 BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
100
101 //
102 // Global copy of the PcdPteMemoryEncryptionAddressOrMask
103 //
104 UINT64 mAddressEncMask = 0;
105
106 //
107 // Spin lock used to serialize setting of SMM Code Access Check feature
108 //
109 SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
110
111 /**
112 Initialize IDT to setup exception handlers for SMM.
113
114 **/
115 VOID
116 InitializeSmmIdt (
117 VOID
118 )
119 {
120 EFI_STATUS Status;
121 BOOLEAN InterruptState;
122 IA32_DESCRIPTOR DxeIdtr;
123
124 //
125 // There are 32 (not 255) entries in it since only processor
126 // generated exceptions will be handled.
127 //
128 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
129 //
130 // Allocate page aligned IDT, because it might be set as read only.
131 //
132 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
133 ASSERT (gcSmiIdtr.Base != 0);
134 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
135
136 //
137 // Disable Interrupt and save DXE IDT table
138 //
139 InterruptState = SaveAndDisableInterrupts ();
140 AsmReadIdtr (&DxeIdtr);
141 //
142 // Load SMM temporary IDT table
143 //
144 AsmWriteIdtr (&gcSmiIdtr);
145 //
146 // Setup SMM default exception handlers, SMM IDT table
147 // will be updated and saved in gcSmiIdtr
148 //
149 Status = InitializeCpuExceptionHandlers (NULL);
150 ASSERT_EFI_ERROR (Status);
151 //
152 // Restore DXE IDT table and CPU interrupt
153 //
154 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
155 SetInterruptState (InterruptState);
156 }
157
158 /**
159 Search module name by input IP address and output it.
160
161 @param CallerIpAddress Caller instruction pointer.
162
163 **/
164 VOID
165 DumpModuleInfoByIp (
166 IN UINTN CallerIpAddress
167 )
168 {
169 UINTN Pe32Data;
170 EFI_IMAGE_DOS_HEADER *DosHdr;
171 EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;
172 VOID *PdbPointer;
173 UINT64 DumpIpAddress;
174
175 //
176 // Find Image Base
177 //
178 Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1);
179 while (Pe32Data != 0) {
180 DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data;
181 if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {
182 //
183 // DOS image header is present, so read the PE header after the DOS image header.
184 //
185 Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff));
186 //
187 // Make sure PE header address does not overflow and is less than the initial address.
188 //
189 if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) {
190 if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) {
191 //
192 // It's PE image.
193 //
194 break;
195 }
196 }
197 }
198
199 //
200 // Not found the image base, check the previous aligned address
201 //
202 Pe32Data -= SIZE_4KB;
203 }
204
205 DumpIpAddress = CallerIpAddress;
206 DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress));
207
208 if (Pe32Data != 0) {
209 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
210 if (PdbPointer != NULL) {
211 DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer));
212 }
213 }
214 }
215
216 /**
217 Read information from the CPU save state.
218
219 @param This EFI_SMM_CPU_PROTOCOL instance
220 @param Width The number of bytes to read from the CPU save state.
221 @param Register Specifies the CPU register to read form the save state.
222 @param CpuIndex Specifies the zero-based index of the CPU save state.
223 @param Buffer Upon return, this holds the CPU register value read from the save state.
224
225 @retval EFI_SUCCESS The register was read from Save State
226 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
227 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
228
229 **/
230 EFI_STATUS
231 EFIAPI
232 SmmReadSaveState (
233 IN CONST EFI_SMM_CPU_PROTOCOL *This,
234 IN UINTN Width,
235 IN EFI_SMM_SAVE_STATE_REGISTER Register,
236 IN UINTN CpuIndex,
237 OUT VOID *Buffer
238 )
239 {
240 EFI_STATUS Status;
241
242 //
243 // Retrieve pointer to the specified CPU's SMM Save State buffer
244 //
245 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
246 return EFI_INVALID_PARAMETER;
247 }
248
249 //
250 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
251 //
252 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
253 //
254 // The pseudo-register only supports the 64-bit size specified by Width.
255 //
256 if (Width != sizeof (UINT64)) {
257 return EFI_INVALID_PARAMETER;
258 }
259 //
260 // If the processor is in SMM at the time the SMI occurred,
261 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
262 // Otherwise, EFI_NOT_FOUND is returned.
263 //
264 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
265 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
266 return EFI_SUCCESS;
267 } else {
268 return EFI_NOT_FOUND;
269 }
270 }
271
272 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
273 return EFI_INVALID_PARAMETER;
274 }
275
276 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
277 if (Status == EFI_UNSUPPORTED) {
278 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
279 }
280 return Status;
281 }
282
283 /**
284 Write data to the CPU save state.
285
286 @param This EFI_SMM_CPU_PROTOCOL instance
287 @param Width The number of bytes to read from the CPU save state.
288 @param Register Specifies the CPU register to write to the save state.
289 @param CpuIndex Specifies the zero-based index of the CPU save state
290 @param Buffer Upon entry, this holds the new CPU register value.
291
292 @retval EFI_SUCCESS The register was written from Save State
293 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
294 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
295
296 **/
297 EFI_STATUS
298 EFIAPI
299 SmmWriteSaveState (
300 IN CONST EFI_SMM_CPU_PROTOCOL *This,
301 IN UINTN Width,
302 IN EFI_SMM_SAVE_STATE_REGISTER Register,
303 IN UINTN CpuIndex,
304 IN CONST VOID *Buffer
305 )
306 {
307 EFI_STATUS Status;
308
309 //
310 // Retrieve pointer to the specified CPU's SMM Save State buffer
311 //
312 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
313 return EFI_INVALID_PARAMETER;
314 }
315
316 //
317 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
318 //
319 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
320 return EFI_SUCCESS;
321 }
322
323 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
324 return EFI_INVALID_PARAMETER;
325 }
326
327 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
328 if (Status == EFI_UNSUPPORTED) {
329 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
330 }
331 return Status;
332 }
333
334
335 /**
336 C function for SMI handler. To change all processor's SMMBase Register.
337
338 **/
339 VOID
340 EFIAPI
341 SmmInitHandler (
342 VOID
343 )
344 {
345 UINT32 ApicId;
346 UINTN Index;
347
348 //
349 // Update SMM IDT entries' code segment and load IDT
350 //
351 AsmWriteIdtr (&gcSmiIdtr);
352 ApicId = GetApicId ();
353
354 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
355
356 for (Index = 0; Index < mNumberOfCpus; Index++) {
357 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
358 //
359 // Initialize SMM specific features on the currently executing CPU
360 //
361 SmmCpuFeaturesInitializeProcessor (
362 Index,
363 mIsBsp,
364 gSmmCpuPrivate->ProcessorInfo,
365 &mCpuHotPlugData
366 );
367
368 if (!mSmmS3Flag) {
369 //
370 // Check XD and BTS features on each processor on normal boot
371 //
372 CheckFeatureSupported ();
373 }
374
375 if (mIsBsp) {
376 //
377 // BSP rebase is already done above.
378 // Initialize private data during S3 resume
379 //
380 InitializeMpSyncData ();
381 }
382
383 //
384 // Hook return after RSM to set SMM re-based flag
385 //
386 SemaphoreHook (Index, &mRebased[Index]);
387
388 return;
389 }
390 }
391 ASSERT (FALSE);
392 }
393
394 /**
395 Relocate SmmBases for each processor.
396
397 Execute on first boot and all S3 resumes
398
399 **/
400 VOID
401 EFIAPI
402 SmmRelocateBases (
403 VOID
404 )
405 {
406 UINT8 BakBuf[BACK_BUF_SIZE];
407 SMRAM_SAVE_STATE_MAP BakBuf2;
408 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
409 UINT8 *U8Ptr;
410 UINT32 ApicId;
411 UINTN Index;
412 UINTN BspIndex;
413
414 //
415 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
416 //
417 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
418
419 //
420 // Patch ASM code template with current CR0, CR3, and CR4 values
421 //
422 gSmmCr0 = (UINT32)AsmReadCr0 ();
423 gSmmCr3 = (UINT32)AsmReadCr3 ();
424 gSmmCr4 = (UINT32)AsmReadCr4 ();
425
426 //
427 // Patch GDTR for SMM base relocation
428 //
429 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
430 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
431
432 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
433 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
434
435 //
436 // Backup original contents at address 0x38000
437 //
438 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
439 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
440
441 //
442 // Load image for relocation
443 //
444 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
445
446 //
447 // Retrieve the local APIC ID of current processor
448 //
449 ApicId = GetApicId ();
450
451 //
452 // Relocate SM bases for all APs
453 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
454 //
455 mIsBsp = FALSE;
456 BspIndex = (UINTN)-1;
457 for (Index = 0; Index < mNumberOfCpus; Index++) {
458 mRebased[Index] = FALSE;
459 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
460 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
461 //
462 // Wait for this AP to finish its 1st SMI
463 //
464 while (!mRebased[Index]);
465 } else {
466 //
467 // BSP will be Relocated later
468 //
469 BspIndex = Index;
470 }
471 }
472
473 //
474 // Relocate BSP's SMM base
475 //
476 ASSERT (BspIndex != (UINTN)-1);
477 mIsBsp = TRUE;
478 SendSmiIpi (ApicId);
479 //
480 // Wait for the BSP to finish its 1st SMI
481 //
482 while (!mRebased[BspIndex]);
483
484 //
485 // Restore contents at address 0x38000
486 //
487 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
488 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
489 }
490
491 /**
492 SMM Ready To Lock event notification handler.
493
494 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
495 perform additional lock actions that must be performed from SMM on the next SMI.
496
497 @param[in] Protocol Points to the protocol's unique identifier.
498 @param[in] Interface Points to the interface instance.
499 @param[in] Handle The handle on which the interface was installed.
500
501 @retval EFI_SUCCESS Notification handler runs successfully.
502 **/
503 EFI_STATUS
504 EFIAPI
505 SmmReadyToLockEventNotify (
506 IN CONST EFI_GUID *Protocol,
507 IN VOID *Interface,
508 IN EFI_HANDLE Handle
509 )
510 {
511 GetAcpiCpuData ();
512
513 //
514 // Cache a copy of UEFI memory map before we start profiling feature.
515 //
516 GetUefiMemoryMap ();
517
518 //
519 // Set SMM ready to lock flag and return
520 //
521 mSmmReadyToLock = TRUE;
522 return EFI_SUCCESS;
523 }
524
525 /**
526 The module Entry Point of the CPU SMM driver.
527
528 @param ImageHandle The firmware allocated handle for the EFI image.
529 @param SystemTable A pointer to the EFI System Table.
530
531 @retval EFI_SUCCESS The entry point is executed successfully.
532 @retval Other Some error occurs when executing this entry point.
533
534 **/
535 EFI_STATUS
536 EFIAPI
537 PiCpuSmmEntry (
538 IN EFI_HANDLE ImageHandle,
539 IN EFI_SYSTEM_TABLE *SystemTable
540 )
541 {
542 EFI_STATUS Status;
543 EFI_MP_SERVICES_PROTOCOL *MpServices;
544 UINTN NumberOfEnabledProcessors;
545 UINTN Index;
546 VOID *Buffer;
547 UINTN BufferPages;
548 UINTN TileCodeSize;
549 UINTN TileDataSize;
550 UINTN TileSize;
551 UINT8 *Stacks;
552 VOID *Registration;
553 UINT32 RegEax;
554 UINT32 RegEdx;
555 UINTN FamilyId;
556 UINTN ModelId;
557 UINT32 Cr3;
558
559 //
560 // Initialize Debug Agent to support source level debug in SMM code
561 //
562 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
563
564 //
565 // Report the start of CPU SMM initialization.
566 //
567 REPORT_STATUS_CODE (
568 EFI_PROGRESS_CODE,
569 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
570 );
571
572 //
573 // Fix segment address of the long-mode-switch jump
574 //
575 if (sizeof (UINTN) == sizeof (UINT64)) {
576 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;
577 }
578
579 //
580 // Find out SMRR Base and SMRR Size
581 //
582 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
583
584 //
585 // Get MP Services Protocol
586 //
587 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
588 ASSERT_EFI_ERROR (Status);
589
590 //
591 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
592 //
593 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
594 ASSERT_EFI_ERROR (Status);
595 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
596
597 //
598 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
599 // A constant BSP index makes no sense because it may be hot removed.
600 //
601 DEBUG_CODE (
602 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
603
604 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
605 }
606 );
607
608 //
609 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
610 //
611 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
612 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
613
614 //
615 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
616 // Make sure AddressEncMask is contained to smallest supported address field.
617 //
618 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
619 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
620
621 //
622 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
623 //
624 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
625 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
626 } else {
627 mMaxNumberOfCpus = mNumberOfCpus;
628 }
629 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
630
631 //
632 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
633 // allocated buffer. The minimum size of this buffer for a uniprocessor system
634 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
635 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
636 // then the SMI entry point and the CPU save state areas can be tiles to minimize
637 // the total amount SMRAM required for all the CPUs. The tile size can be computed
638 // by adding the // CPU save state size, any extra CPU specific context, and
639 // the size of code that must be placed at the SMI entry point to transfer
640 // control to a C function in the native SMM execution mode. This size is
641 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
642 // The total amount of memory required is the maximum number of CPUs that
643 // platform supports times the tile size. The picture below shows the tiling,
644 // where m is the number of tiles that fit in 32KB.
645 //
646 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
647 // | CPU m+1 Save State |
648 // +-----------------------------+
649 // | CPU m+1 Extra Data |
650 // +-----------------------------+
651 // | Padding |
652 // +-----------------------------+
653 // | CPU 2m SMI Entry |
654 // +#############################+ <-- Base of allocated buffer + 64 KB
655 // | CPU m-1 Save State |
656 // +-----------------------------+
657 // | CPU m-1 Extra Data |
658 // +-----------------------------+
659 // | Padding |
660 // +-----------------------------+
661 // | CPU 2m-1 SMI Entry |
662 // +=============================+ <-- 2^n offset from Base of allocated buffer
663 // | . . . . . . . . . . . . |
664 // +=============================+ <-- 2^n offset from Base of allocated buffer
665 // | CPU 2 Save State |
666 // +-----------------------------+
667 // | CPU 2 Extra Data |
668 // +-----------------------------+
669 // | Padding |
670 // +-----------------------------+
671 // | CPU m+1 SMI Entry |
672 // +=============================+ <-- Base of allocated buffer + 32 KB
673 // | CPU 1 Save State |
674 // +-----------------------------+
675 // | CPU 1 Extra Data |
676 // +-----------------------------+
677 // | Padding |
678 // +-----------------------------+
679 // | CPU m SMI Entry |
680 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
681 // | CPU 0 Save State |
682 // +-----------------------------+
683 // | CPU 0 Extra Data |
684 // +-----------------------------+
685 // | Padding |
686 // +-----------------------------+
687 // | CPU m-1 SMI Entry |
688 // +=============================+ <-- 2^n offset from Base of allocated buffer
689 // | . . . . . . . . . . . . |
690 // +=============================+ <-- 2^n offset from Base of allocated buffer
691 // | Padding |
692 // +-----------------------------+
693 // | CPU 1 SMI Entry |
694 // +=============================+ <-- 2^n offset from Base of allocated buffer
695 // | Padding |
696 // +-----------------------------+
697 // | CPU 0 SMI Entry |
698 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
699 //
700
701 //
702 // Retrieve CPU Family
703 //
704 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
705 FamilyId = (RegEax >> 8) & 0xf;
706 ModelId = (RegEax >> 4) & 0xf;
707 if (FamilyId == 0x06 || FamilyId == 0x0f) {
708 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
709 }
710
711 RegEdx = 0;
712 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
713 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
714 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
715 }
716 //
717 // Determine the mode of the CPU at the time an SMI occurs
718 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
719 // Volume 3C, Section 34.4.1.1
720 //
721 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
722 if ((RegEdx & BIT29) != 0) {
723 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
724 }
725 if (FamilyId == 0x06) {
726 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
727 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
728 }
729 }
730
731 //
732 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
733 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
734 // This size is rounded up to nearest power of 2.
735 //
736 TileCodeSize = GetSmiHandlerSize ();
737 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
738 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
739 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
740 TileSize = TileDataSize + TileCodeSize - 1;
741 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
742 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
743
744 //
745 // If the TileSize is larger than space available for the SMI Handler of
746 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
747 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
748 // the SMI Handler size must be reduced or the size of the extra CPU specific
749 // context must be reduced.
750 //
751 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
752
753 //
754 // Allocate buffer for all of the tiles.
755 //
756 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
757 // Volume 3C, Section 34.11 SMBASE Relocation
758 // For Pentium and Intel486 processors, the SMBASE values must be
759 // aligned on a 32-KByte boundary or the processor will enter shutdown
760 // state during the execution of a RSM instruction.
761 //
762 // Intel486 processors: FamilyId is 4
763 // Pentium processors : FamilyId is 5
764 //
765 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
766 if ((FamilyId == 4) || (FamilyId == 5)) {
767 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
768 } else {
769 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
770 }
771 ASSERT (Buffer != NULL);
772 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
773
774 //
775 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
776 //
777 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
778 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
779
780 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
781 ASSERT (gSmmCpuPrivate->Operation != NULL);
782
783 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
784 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
785
786 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
787 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
788
789 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
790 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
791
792 //
793 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
794 //
795 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
796 ASSERT (mCpuHotPlugData.ApicId != NULL);
797 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
798 ASSERT (mCpuHotPlugData.SmBase != NULL);
799 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
800
801 //
802 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
803 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
804 // size for each CPU in the platform
805 //
806 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
807 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
808 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
809 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
810 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
811
812 if (Index < mNumberOfCpus) {
813 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
814 ASSERT_EFI_ERROR (Status);
815 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
816
817 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
818 Index,
819 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
820 mCpuHotPlugData.SmBase[Index],
821 gSmmCpuPrivate->CpuSaveState[Index],
822 gSmmCpuPrivate->CpuSaveStateSize[Index]
823 ));
824 } else {
825 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
826 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
827 }
828 }
829
830 //
831 // Allocate SMI stacks for all processors.
832 //
833 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
834 //
835 // 2 more pages is allocated for each processor.
836 // one is guard page and the other is known good stack.
837 //
838 // +-------------------------------------------+-----+-------------------------------------------+
839 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
840 // +-------------------------------------------+-----+-------------------------------------------+
841 // | | | |
842 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
843 //
844 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
845 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
846 ASSERT (Stacks != NULL);
847 mSmmStackArrayBase = (UINTN)Stacks;
848 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
849 } else {
850 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
851 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
852 ASSERT (Stacks != NULL);
853 }
854
855 //
856 // Set SMI stack for SMM base relocation
857 //
858 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
859
860 //
861 // Initialize IDT
862 //
863 InitializeSmmIdt ();
864
865 //
866 // Relocate SMM Base addresses to the ones allocated from SMRAM
867 //
868 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
869 ASSERT (mRebased != NULL);
870 SmmRelocateBases ();
871
872 //
873 // Call hook for BSP to perform extra actions in normal mode after all
874 // SMM base addresses have been relocated on all CPUs
875 //
876 SmmCpuFeaturesSmmRelocationComplete ();
877
878 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
879
880 //
881 // SMM Time initialization
882 //
883 InitializeSmmTimer ();
884
885 //
886 // Initialize MP globals
887 //
888 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
889
890 //
891 // Fill in SMM Reserved Regions
892 //
893 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
894 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
895
896 //
897 // Install the SMM Configuration Protocol onto a new handle on the handle database.
898 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
899 // to an SMRAM address will be present in the handle database
900 //
901 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
902 &gSmmCpuPrivate->SmmCpuHandle,
903 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
904 NULL
905 );
906 ASSERT_EFI_ERROR (Status);
907
908 //
909 // Install the SMM CPU Protocol into SMM protocol database
910 //
911 Status = gSmst->SmmInstallProtocolInterface (
912 &mSmmCpuHandle,
913 &gEfiSmmCpuProtocolGuid,
914 EFI_NATIVE_INTERFACE,
915 &mSmmCpu
916 );
917 ASSERT_EFI_ERROR (Status);
918
919 //
920 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
921 //
922 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
923 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
924 ASSERT_EFI_ERROR (Status);
925 }
926
927 //
928 // Initialize SMM CPU Services Support
929 //
930 Status = InitializeSmmCpuServices (mSmmCpuHandle);
931 ASSERT_EFI_ERROR (Status);
932
933 //
934 // register SMM Ready To Lock Protocol notification
935 //
936 Status = gSmst->SmmRegisterProtocolNotify (
937 &gEfiSmmReadyToLockProtocolGuid,
938 SmmReadyToLockEventNotify,
939 &Registration
940 );
941 ASSERT_EFI_ERROR (Status);
942
943 //
944 // Initialize SMM Profile feature
945 //
946 InitSmmProfile (Cr3);
947
948 GetAcpiS3EnableFlag ();
949 InitSmmS3ResumeState (Cr3);
950
951 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
952
953 return EFI_SUCCESS;
954 }
955
956 /**
957
958 Find out SMRAM information including SMRR base and SMRR size.
959
960 @param SmrrBase SMRR base
961 @param SmrrSize SMRR size
962
963 **/
964 VOID
965 FindSmramInfo (
966 OUT UINT32 *SmrrBase,
967 OUT UINT32 *SmrrSize
968 )
969 {
970 EFI_STATUS Status;
971 UINTN Size;
972 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
973 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
974 EFI_SMRAM_DESCRIPTOR *SmramRanges;
975 UINTN SmramRangeCount;
976 UINTN Index;
977 UINT64 MaxSize;
978 BOOLEAN Found;
979
980 //
981 // Get SMM Access Protocol
982 //
983 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
984 ASSERT_EFI_ERROR (Status);
985
986 //
987 // Get SMRAM information
988 //
989 Size = 0;
990 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
991 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
992
993 SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
994 ASSERT (SmramRanges != NULL);
995
996 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);
997 ASSERT_EFI_ERROR (Status);
998
999 SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1000
1001 //
1002 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1003 //
1004 CurrentSmramRange = NULL;
1005 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {
1006 //
1007 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1008 //
1009 if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1010 continue;
1011 }
1012
1013 if (SmramRanges[Index].CpuStart >= BASE_1MB) {
1014 if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {
1015 if (SmramRanges[Index].PhysicalSize >= MaxSize) {
1016 MaxSize = SmramRanges[Index].PhysicalSize;
1017 CurrentSmramRange = &SmramRanges[Index];
1018 }
1019 }
1020 }
1021 }
1022
1023 ASSERT (CurrentSmramRange != NULL);
1024
1025 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1026 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1027
1028 do {
1029 Found = FALSE;
1030 for (Index = 0; Index < SmramRangeCount; Index++) {
1031 if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {
1032 *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;
1033 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);
1034 Found = TRUE;
1035 } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {
1036 *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);
1037 Found = TRUE;
1038 }
1039 }
1040 } while (Found);
1041
1042 FreePool (SmramRanges);
1043 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1044 }
1045
1046 /**
1047 Configure SMM Code Access Check feature on an AP.
1048 SMM Feature Control MSR will be locked after configuration.
1049
1050 @param[in,out] Buffer Pointer to private data buffer.
1051 **/
1052 VOID
1053 EFIAPI
1054 ConfigSmmCodeAccessCheckOnCurrentProcessor (
1055 IN OUT VOID *Buffer
1056 )
1057 {
1058 UINTN CpuIndex;
1059 UINT64 SmmFeatureControlMsr;
1060 UINT64 NewSmmFeatureControlMsr;
1061
1062 //
1063 // Retrieve the CPU Index from the context passed in
1064 //
1065 CpuIndex = *(UINTN *)Buffer;
1066
1067 //
1068 // Get the current SMM Feature Control MSR value
1069 //
1070 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1071
1072 //
1073 // Compute the new SMM Feature Control MSR value
1074 //
1075 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1076 if (mSmmCodeAccessCheckEnable) {
1077 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1078 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1079 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1080 }
1081 }
1082
1083 //
1084 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1085 //
1086 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1087 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1088 }
1089
1090 //
1091 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1092 //
1093 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1094 }
1095
1096 /**
1097 Configure SMM Code Access Check feature for all processors.
1098 SMM Feature Control MSR will be locked after configuration.
1099 **/
1100 VOID
1101 ConfigSmmCodeAccessCheck (
1102 VOID
1103 )
1104 {
1105 UINTN Index;
1106 EFI_STATUS Status;
1107
1108 //
1109 // Check to see if the Feature Control MSR is supported on this CPU
1110 //
1111 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1112 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1113 mSmmCodeAccessCheckEnable = FALSE;
1114 return;
1115 }
1116
1117 //
1118 // Check to see if the CPU supports the SMM Code Access Check feature
1119 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1120 //
1121 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1122 mSmmCodeAccessCheckEnable = FALSE;
1123 return;
1124 }
1125
1126 //
1127 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1128 //
1129 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1130
1131 //
1132 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1133 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1134 //
1135 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1136
1137 //
1138 // Enable SMM Code Access Check feature on the BSP.
1139 //
1140 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1141
1142 //
1143 // Enable SMM Code Access Check feature for the APs.
1144 //
1145 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1146 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1147
1148 //
1149 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1150 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1151 //
1152 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1153
1154 //
1155 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1156 //
1157 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1158 ASSERT_EFI_ERROR (Status);
1159
1160 //
1161 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1162 //
1163 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1164 CpuPause ();
1165 }
1166
1167 //
1168 // Release the Config SMM Code Access Check spin lock.
1169 //
1170 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1171 }
1172 }
1173 }
1174
1175 /**
1176 This API provides a way to allocate memory for page table.
1177
1178 This API can be called more once to allocate memory for page tables.
1179
1180 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1181 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1182 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1183 returned.
1184
1185 @param Pages The number of 4 KB pages to allocate.
1186
1187 @return A pointer to the allocated buffer or NULL if allocation fails.
1188
1189 **/
1190 VOID *
1191 AllocatePageTableMemory (
1192 IN UINTN Pages
1193 )
1194 {
1195 VOID *Buffer;
1196
1197 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1198 if (Buffer != NULL) {
1199 return Buffer;
1200 }
1201 return AllocatePages (Pages);
1202 }
1203
1204 /**
1205 Allocate pages for code.
1206
1207 @param[in] Pages Number of pages to be allocated.
1208
1209 @return Allocated memory.
1210 **/
1211 VOID *
1212 AllocateCodePages (
1213 IN UINTN Pages
1214 )
1215 {
1216 EFI_STATUS Status;
1217 EFI_PHYSICAL_ADDRESS Memory;
1218
1219 if (Pages == 0) {
1220 return NULL;
1221 }
1222
1223 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1224 if (EFI_ERROR (Status)) {
1225 return NULL;
1226 }
1227 return (VOID *) (UINTN) Memory;
1228 }
1229
1230 /**
1231 Allocate aligned pages for code.
1232
1233 @param[in] Pages Number of pages to be allocated.
1234 @param[in] Alignment The requested alignment of the allocation.
1235 Must be a power of two.
1236 If Alignment is zero, then byte alignment is used.
1237
1238 @return Allocated memory.
1239 **/
1240 VOID *
1241 AllocateAlignedCodePages (
1242 IN UINTN Pages,
1243 IN UINTN Alignment
1244 )
1245 {
1246 EFI_STATUS Status;
1247 EFI_PHYSICAL_ADDRESS Memory;
1248 UINTN AlignedMemory;
1249 UINTN AlignmentMask;
1250 UINTN UnalignedPages;
1251 UINTN RealPages;
1252
1253 //
1254 // Alignment must be a power of two or zero.
1255 //
1256 ASSERT ((Alignment & (Alignment - 1)) == 0);
1257
1258 if (Pages == 0) {
1259 return NULL;
1260 }
1261 if (Alignment > EFI_PAGE_SIZE) {
1262 //
1263 // Calculate the total number of pages since alignment is larger than page size.
1264 //
1265 AlignmentMask = Alignment - 1;
1266 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1267 //
1268 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1269 //
1270 ASSERT (RealPages > Pages);
1271
1272 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1273 if (EFI_ERROR (Status)) {
1274 return NULL;
1275 }
1276 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1277 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1278 if (UnalignedPages > 0) {
1279 //
1280 // Free first unaligned page(s).
1281 //
1282 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1283 ASSERT_EFI_ERROR (Status);
1284 }
1285 Memory = (EFI_PHYSICAL_ADDRESS) (AlignedMemory + EFI_PAGES_TO_SIZE (Pages));
1286 UnalignedPages = RealPages - Pages - UnalignedPages;
1287 if (UnalignedPages > 0) {
1288 //
1289 // Free last unaligned page(s).
1290 //
1291 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1292 ASSERT_EFI_ERROR (Status);
1293 }
1294 } else {
1295 //
1296 // Do not over-allocate pages in this case.
1297 //
1298 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1299 if (EFI_ERROR (Status)) {
1300 return NULL;
1301 }
1302 AlignedMemory = (UINTN) Memory;
1303 }
1304 return (VOID *) AlignedMemory;
1305 }
1306
1307 /**
1308 Perform the remaining tasks.
1309
1310 **/
1311 VOID
1312 PerformRemainingTasks (
1313 VOID
1314 )
1315 {
1316 if (mSmmReadyToLock) {
1317 //
1318 // Start SMM Profile feature
1319 //
1320 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1321 SmmProfileStart ();
1322 }
1323 //
1324 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1325 //
1326 InitPaging ();
1327
1328 //
1329 // Mark critical region to be read-only in page table
1330 //
1331 SetMemMapAttributes ();
1332
1333 //
1334 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1335 //
1336 SetUefiMemMapAttributes ();
1337
1338 //
1339 // Set page table itself to be read-only
1340 //
1341 SetPageTableAttributes ();
1342
1343 //
1344 // Configure SMM Code Access Check feature if available.
1345 //
1346 ConfigSmmCodeAccessCheck ();
1347
1348 SmmCpuFeaturesCompleteSmmReadyToLock ();
1349
1350 //
1351 // Clean SMM ready to lock flag
1352 //
1353 mSmmReadyToLock = FALSE;
1354 }
1355 }
1356
1357 /**
1358 Perform the pre tasks.
1359
1360 **/
1361 VOID
1362 PerformPreTasks (
1363 VOID
1364 )
1365 {
1366 RestoreSmmConfigurationInS3 ();
1367 }