}\r
\r
/**\r
- The Entry point of the MP CPU PEIM.\r
+ Get GDT register value.\r
\r
- This function will wakeup APs and collect CPU AP count and install the\r
- Mp Service Ppi.\r
+ This function is mainly for AP purpose because AP may have different GDT\r
+ table than BSP.\r
\r
- @param FileHandle Handle of the file being invoked.\r
- @param PeiServices Describes the list of possible PEI Services.\r
+ @param[in,out] Buffer The pointer to private data buffer.\r
\r
- @retval EFI_SUCCESS MpServicePpi is installed successfully.\r
+**/\r
+VOID\r
+EFIAPI\r
+GetGdtr (\r
+ IN OUT VOID *Buffer\r
+ )\r
+{\r
+ AsmReadGdtr ((IA32_DESCRIPTOR *)Buffer);\r
+}\r
+\r
+/**\r
+ Initializes CPU exceptions handlers for the sake of stack switch requirement.\r
+\r
+ This function is a wrapper of InitializeCpuExceptionHandlersEx. It's mainly\r
+ for the sake of AP's init because of EFI_AP_PROCEDURE API requirement.\r
+\r
+ @param[in,out] Buffer The pointer to private data buffer.\r
\r
**/\r
-EFI_STATUS\r
+VOID\r
EFIAPI\r
-CpuMpPeimInit (\r
- IN EFI_PEI_FILE_HANDLE FileHandle,\r
+InitializeExceptionStackSwitchHandlers (\r
+ IN OUT VOID *Buffer\r
+ )\r
+{\r
+ CPU_EXCEPTION_INIT_DATA *EssData;\r
+ IA32_DESCRIPTOR Idtr;\r
+ EFI_STATUS Status;\r
+\r
+ EssData = Buffer;\r
+ //\r
+ // We don't plan to replace IDT table with a new one, but we should not assume\r
+ // the AP's IDT is the same as BSP's IDT either.\r
+ //\r
+ AsmReadIdtr (&Idtr);\r
+ EssData->Ia32.IdtTable = (VOID *)Idtr.Base;\r
+ EssData->Ia32.IdtTableSize = Idtr.Limit + 1;\r
+ Status = InitializeCpuExceptionHandlersEx (NULL, EssData);\r
+ ASSERT_EFI_ERROR (Status);\r
+}\r
+\r
+/**\r
+ Initializes MP exceptions handlers for the sake of stack switch requirement.\r
+\r
+ This function will allocate required resources required to setup stack switch\r
+ and pass them through CPU_EXCEPTION_INIT_DATA to each logic processor.\r
+\r
+**/\r
+VOID\r
+InitializeMpExceptionStackSwitchHandlers (\r
+ VOID\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN Index;\r
+ UINTN Bsp;\r
+ UINTN ExceptionNumber;\r
+ UINTN OldGdtSize;\r
+ UINTN NewGdtSize;\r
+ UINTN NewStackSize;\r
+ IA32_DESCRIPTOR Gdtr;\r
+ CPU_EXCEPTION_INIT_DATA EssData;\r
+ UINT8 *GdtBuffer;\r
+ UINT8 *StackTop;\r
+ UINTN NumberOfProcessors;\r
+\r
+ if (!PcdGetBool (PcdCpuStackGuard)) {\r
+ return;\r
+ }\r
+\r
+ MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);\r
+ MpInitLibWhoAmI (&Bsp);\r
+\r
+ ExceptionNumber = FixedPcdGetSize (PcdCpuStackSwitchExceptionList);\r
+ NewStackSize = FixedPcdGet32 (PcdCpuKnownGoodStackSize) * ExceptionNumber;\r
+\r
+ Status = PeiServicesAllocatePool (\r
+ NewStackSize * NumberOfProcessors,\r
+ (VOID **)&StackTop\r
+ );\r
+ ASSERT(StackTop != NULL);\r
+ if (EFI_ERROR (Status)) {\r
+ ASSERT_EFI_ERROR (Status);\r
+ return;\r
+ }\r
+ StackTop += NewStackSize * NumberOfProcessors;\r
+\r
+ //\r
+ // The default exception handlers must have been initialized. Let's just skip\r
+ // it in this method.\r
+ //\r
+ EssData.Ia32.Revision = CPU_EXCEPTION_INIT_DATA_REV;\r
+ EssData.Ia32.InitDefaultHandlers = FALSE;\r
+\r
+ EssData.Ia32.StackSwitchExceptions = FixedPcdGetPtr(PcdCpuStackSwitchExceptionList);\r
+ EssData.Ia32.StackSwitchExceptionNumber = ExceptionNumber;\r
+ EssData.Ia32.KnownGoodStackSize = FixedPcdGet32(PcdCpuKnownGoodStackSize);\r
+\r
+ //\r
+ // Initialize Gdtr to suppress incorrect compiler/analyzer warnings.\r
+ //\r
+ Gdtr.Base = 0;\r
+ Gdtr.Limit = 0;\r
+ for (Index = 0; Index < NumberOfProcessors; ++Index) {\r
+ //\r
+ // To support stack switch, we need to re-construct GDT but not IDT.\r
+ //\r
+ if (Index == Bsp) {\r
+ GetGdtr(&Gdtr);\r
+ } else {\r
+ //\r
+ // AP might have different size of GDT from BSP.\r
+ //\r
+ MpInitLibStartupThisAP (GetGdtr, Index, NULL, 0, (VOID *)&Gdtr, NULL);\r
+ }\r
+\r
+ //\r
+ // X64 needs only one TSS of current task working for all exceptions\r
+ // because of its IST feature. IA32 needs one TSS for each exception\r
+ // in addition to current task. Since AP is not supposed to allocate\r
+ // memory, we have to do it in BSP. To simplify the code, we allocate\r
+ // memory for IA32 case to cover both IA32 and X64 exception stack\r
+ // switch.\r
+ //\r
+ // Layout of memory to allocate for each processor:\r
+ // --------------------------------\r
+ // | Alignment | (just in case)\r
+ // --------------------------------\r
+ // | |\r
+ // | Original GDT |\r
+ // | |\r
+ // --------------------------------\r
+ // | Current task descriptor |\r
+ // --------------------------------\r
+ // | |\r
+ // | Exception task descriptors | X ExceptionNumber\r
+ // | |\r
+ // --------------------------------\r
+ // | Current task-state segment |\r
+ // --------------------------------\r
+ // | |\r
+ // | Exception task-state segment | X ExceptionNumber\r
+ // | |\r
+ // --------------------------------\r
+ //\r
+ OldGdtSize = Gdtr.Limit + 1;\r
+ EssData.Ia32.ExceptionTssDescSize = sizeof (IA32_TSS_DESCRIPTOR) *\r
+ (ExceptionNumber + 1);\r
+ EssData.Ia32.ExceptionTssSize = sizeof (IA32_TASK_STATE_SEGMENT) *\r
+ (ExceptionNumber + 1);\r
+ NewGdtSize = sizeof (IA32_TSS_DESCRIPTOR) +\r
+ OldGdtSize +\r
+ EssData.Ia32.ExceptionTssDescSize +\r
+ EssData.Ia32.ExceptionTssSize;\r
+\r
+ Status = PeiServicesAllocatePool (\r
+ NewGdtSize,\r
+ (VOID **)&GdtBuffer\r
+ );\r
+ ASSERT (GdtBuffer != NULL);\r
+ if (EFI_ERROR (Status)) {\r
+ ASSERT_EFI_ERROR (Status);\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Make sure GDT table alignment\r
+ //\r
+ EssData.Ia32.GdtTable = ALIGN_POINTER(GdtBuffer, sizeof (IA32_TSS_DESCRIPTOR));\r
+ NewGdtSize -= ((UINT8 *)EssData.Ia32.GdtTable - GdtBuffer);\r
+ EssData.Ia32.GdtTableSize = NewGdtSize;\r
+\r
+ EssData.Ia32.ExceptionTssDesc = ((UINT8 *)EssData.Ia32.GdtTable + OldGdtSize);\r
+ EssData.Ia32.ExceptionTss = ((UINT8 *)EssData.Ia32.GdtTable + OldGdtSize +\r
+ EssData.Ia32.ExceptionTssDescSize);\r
+\r
+ EssData.Ia32.KnownGoodStackTop = (UINTN)StackTop;\r
+ DEBUG ((DEBUG_INFO,\r
+ "Exception stack top[cpu%lu]: 0x%lX\n",\r
+ (UINT64)(UINTN)Index,\r
+ (UINT64)(UINTN)StackTop));\r
+\r
+ if (Index == Bsp) {\r
+ InitializeExceptionStackSwitchHandlers (&EssData);\r
+ } else {\r
+ MpInitLibStartupThisAP (\r
+ InitializeExceptionStackSwitchHandlers,\r
+ Index,\r
+ NULL,\r
+ 0,\r
+ (VOID *)&EssData,\r
+ NULL\r
+ );\r
+ }\r
+\r
+ StackTop -= NewStackSize;\r
+ }\r
+}\r
+\r
+/**\r
+ Initializes MP and exceptions handlers.\r
+\r
+ @param PeiServices The pointer to the PEI Services Table.\r
+\r
+ @retval EFI_SUCCESS MP was successfully initialized.\r
+ @retval others Error occurred in MP initialization.\r
+\r
+**/\r
+EFI_STATUS\r
+InitializeCpuMpWorker (\r
IN CONST EFI_PEI_SERVICES **PeiServices\r
)\r
{\r
- EFI_STATUS Status;\r
+ EFI_STATUS Status;\r
EFI_VECTOR_HANDOFF_INFO *VectorInfo;\r
EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi;\r
\r
if (Status == EFI_SUCCESS) {\r
VectorInfo = VectorHandoffInfoPpi->Info;\r
}\r
- Status = InitializeCpuExceptionHandlers (VectorInfo);\r
- ASSERT_EFI_ERROR (Status);\r
\r
//\r
- // Wakeup APs to do initialization\r
+ // Initialize default handlers\r
//\r
+ Status = InitializeCpuExceptionHandlers (VectorInfo);\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
Status = MpInitLibInitialize ();\r
- ASSERT_EFI_ERROR (Status);\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Special initialization for the sake of Stack Guard\r
+ //\r
+ InitializeMpExceptionStackSwitchHandlers ();\r
\r
//\r
// Update and publish CPU BIST information\r
\r
return Status;\r
}\r
+\r
+/**\r
+ The Entry point of the MP CPU PEIM.\r
+\r
+ This function will wakeup APs and collect CPU AP count and install the\r
+ Mp Service Ppi.\r
+\r
+ @param FileHandle Handle of the file being invoked.\r
+ @param PeiServices Describes the list of possible PEI Services.\r
+\r
+ @retval EFI_SUCCESS MpServicePpi is installed successfully.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+CpuMpPeimInit (\r
+ IN EFI_PEI_FILE_HANDLE FileHandle,\r
+ IN CONST EFI_PEI_SERVICES **PeiServices\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // For the sake of special initialization needing to be done right after\r
+ // memory discovery.\r
+ //\r
+ Status = PeiServicesNotifyPpi (&mPostMemNotifyList[0]);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ return Status;\r
+}\r
--- /dev/null
+/** @file\r
+ Basic paging support for the CPU to enable Stack Guard.\r
+\r
+Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>\r
+\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Register/Cpuid.h>\r
+#include <Register/Msr.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/CpuLib.h>\r
+#include <Library/BaseLib.h>\r
+\r
+#include "CpuMpPei.h"\r
+\r
+#define IA32_PG_P BIT0\r
+#define IA32_PG_RW BIT1\r
+#define IA32_PG_U BIT2\r
+#define IA32_PG_A BIT5\r
+#define IA32_PG_D BIT6\r
+#define IA32_PG_PS BIT7\r
+#define IA32_PG_NX BIT63\r
+\r
+#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)\r
+#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\\r
+ PAGE_ATTRIBUTE_BITS)\r
+\r
+#define PAGING_PAE_INDEX_MASK 0x1FF\r
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull\r
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull\r
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull\r
+#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull\r
+\r
+typedef enum {\r
+ PageNone = 0,\r
+ PageMin = 1,\r
+ Page4K = PageMin,\r
+ Page2M = 2,\r
+ Page1G = 3,\r
+ Page512G = 4,\r
+ PageMax = Page512G\r
+} PAGE_ATTRIBUTE;\r
+\r
+typedef struct {\r
+ PAGE_ATTRIBUTE Attribute;\r
+ UINT64 Length;\r
+ UINT64 AddressMask;\r
+ UINTN AddressBitOffset;\r
+ UINTN AddressBitLength;\r
+} PAGE_ATTRIBUTE_TABLE;\r
+\r
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {\r
+ {PageNone, 0, 0, 0, 0},\r
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},\r
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},\r
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},\r
+ {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},\r
+};\r
+\r
+EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {\r
+ {\r
+ (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),\r
+ &gEfiPeiMemoryDiscoveredPpiGuid,\r
+ MemoryDiscoveredPpiNotifyCallback\r
+ }\r
+};\r
+\r
+/**\r
+ The function will check if IA32 PAE is supported.\r
+\r
+ @retval TRUE IA32 PAE is supported.\r
+ @retval FALSE IA32 PAE is not supported.\r
+\r
+**/\r
+BOOLEAN\r
+IsIa32PaeSupported (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ CPUID_VERSION_INFO_EDX RegEdx;\r
+\r
+ AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_VERSION_INFO) {\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
+ if (RegEdx.Bits.PAE != 0) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ This API provides a way to allocate memory for page table.\r
+\r
+ @param Pages The number of 4 KB pages to allocate.\r
+\r
+ @return A pointer to the allocated buffer or NULL if allocation fails.\r
+\r
+**/\r
+VOID *\r
+AllocatePageTableMemory (\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ VOID *Address;\r
+\r
+ Address = AllocatePages(Pages);\r
+ if (Address != NULL) {\r
+ ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));\r
+ }\r
+\r
+ return Address;\r
+}\r
+\r
+/**\r
+ Get the address width supported by current processor.\r
+\r
+ @retval 32 If processor is in 32-bit mode.\r
+ @retval 36-48 If processor is in 64-bit mode.\r
+\r
+**/\r
+UINTN\r
+GetPhysicalAddressWidth (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+\r
+ if (sizeof(UINTN) == 4) {\r
+ return 32;\r
+ }\r
+\r
+ AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);\r
+ RegEax &= 0xFF;\r
+ if (RegEax > 48) {\r
+ return 48;\r
+ }\r
+\r
+ return (UINTN)RegEax;\r
+ }\r
+\r
+ return 36;\r
+}\r
+\r
+/**\r
+ Get the type of top level page table.\r
+\r
+ @retval Page512G PML4 paging.\r
+ @retval Page1G PAE paing.\r
+\r
+**/\r
+PAGE_ATTRIBUTE\r
+GetPageTableTopLevelType (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_EFER_REGISTER MsrEfer;\r
+\r
+ MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);\r
+\r
+ return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;\r
+}\r
+\r
+/**\r
+ Return page table entry matching the address.\r
+\r
+ @param[in] Address The address to be checked.\r
+ @param[out] PageAttributes The page attribute of the page entry.\r
+\r
+ @return The page entry.\r
+**/\r
+VOID *\r
+GetPageTableEntry (\r
+ IN PHYSICAL_ADDRESS Address,\r
+ OUT PAGE_ATTRIBUTE *PageAttribute\r
+ )\r
+{\r
+ INTN Level;\r
+ UINTN Index;\r
+ UINT64 *PageTable;\r
+ UINT64 AddressEncMask;\r
+\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
+ PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);\r
+ for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {\r
+ Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);\r
+ Index &= PAGING_PAE_INDEX_MASK;\r
+\r
+ //\r
+ // No mapping?\r
+ //\r
+ if (PageTable[Index] == 0) {\r
+ *PageAttribute = PageNone;\r
+ return NULL;\r
+ }\r
+\r
+ //\r
+ // Page memory?\r
+ //\r
+ if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {\r
+ *PageAttribute = (PAGE_ATTRIBUTE)Level;\r
+ return &PageTable[Index];\r
+ }\r
+\r
+ //\r
+ // Page directory or table\r
+ //\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[Index] &\r
+ ~AddressEncMask &\r
+ PAGING_4K_ADDRESS_MASK_64);\r
+ }\r
+\r
+ *PageAttribute = PageNone;\r
+ return NULL;\r
+}\r
+\r
+/**\r
+ This function splits one page entry to smaller page entries.\r
+\r
+ @param[in] PageEntry The page entry to be splitted.\r
+ @param[in] PageAttribute The page attribute of the page entry.\r
+ @param[in] SplitAttribute How to split the page entry.\r
+ @param[in] Recursively Do the split recursively or not.\r
+\r
+ @retval RETURN_SUCCESS The page entry is splitted.\r
+ @retval RETURN_INVALID_PARAMETER If target page attribute is invalid\r
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.\r
+**/\r
+RETURN_STATUS\r
+SplitPage (\r
+ IN UINT64 *PageEntry,\r
+ IN PAGE_ATTRIBUTE PageAttribute,\r
+ IN PAGE_ATTRIBUTE SplitAttribute,\r
+ IN BOOLEAN Recursively\r
+ )\r
+{\r
+ UINT64 BaseAddress;\r
+ UINT64 *NewPageEntry;\r
+ UINTN Index;\r
+ UINT64 AddressEncMask;\r
+ PAGE_ATTRIBUTE SplitTo;\r
+\r
+ if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {\r
+ ASSERT (SplitAttribute != PageNone);\r
+ ASSERT (SplitAttribute < PageAttribute);\r
+ return RETURN_INVALID_PARAMETER;\r
+ }\r
+\r
+ NewPageEntry = AllocatePageTableMemory (1);\r
+ if (NewPageEntry == NULL) {\r
+ ASSERT (NewPageEntry != NULL);\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+\r
+ //\r
+ // One level down each step to achieve more compact page table.\r
+ //\r
+ SplitTo = PageAttribute - 1;\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
+ mPageAttributeTable[SplitTo].AddressMask;\r
+ BaseAddress = *PageEntry &\r
+ ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
+ mPageAttributeTable[PageAttribute].AddressMask;\r
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {\r
+ NewPageEntry[Index] = BaseAddress | AddressEncMask |\r
+ ((*PageEntry) & PAGE_PROGATE_BITS);\r
+\r
+ if (SplitTo != PageMin) {\r
+ NewPageEntry[Index] |= IA32_PG_PS;\r
+ }\r
+\r
+ if (Recursively && SplitTo > SplitAttribute) {\r
+ SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);\r
+ }\r
+\r
+ BaseAddress += mPageAttributeTable[SplitTo].Length;\r
+ }\r
+\r
+ (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+/**\r
+ This function modifies the page attributes for the memory region specified\r
+ by BaseAddress and Length from their current attributes to the attributes\r
+ specified by Attributes.\r
+\r
+ Caller should make sure BaseAddress and Length is at page boundary.\r
+\r
+ @param[in] BaseAddress Start address of a memory region.\r
+ @param[in] Length Size in bytes of the memory region.\r
+ @param[in] Attributes Bit mask of attributes to modify.\r
+\r
+ @retval RETURN_SUCCESS The attributes were modified for the memory\r
+ region.\r
+ @retval RETURN_INVALID_PARAMETER Length is zero; or,\r
+ Attributes specified an illegal combination\r
+ of attributes that cannot be set together; or\r
+ Addressis not 4KB aligned.\r
+ @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify\r
+ the attributes.\r
+ @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.\r
+\r
+**/\r
+RETURN_STATUS\r
+EFIAPI\r
+ConvertMemoryPageAttributes (\r
+ IN PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes\r
+ )\r
+{\r
+ UINT64 *PageEntry;\r
+ PAGE_ATTRIBUTE PageAttribute;\r
+ RETURN_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS MaximumAddress;\r
+\r
+ if (Length == 0 ||\r
+ (BaseAddress & (SIZE_4KB - 1)) != 0 ||\r
+ (Length & (SIZE_4KB - 1)) != 0) {\r
+\r
+ ASSERT (Length > 0);\r
+ ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);\r
+ ASSERT ((Length & (SIZE_4KB - 1)) == 0);\r
+\r
+ return RETURN_INVALID_PARAMETER;\r
+ }\r
+\r
+ MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;\r
+ if (BaseAddress > MaximumAddress ||\r
+ Length > MaximumAddress ||\r
+ (BaseAddress > MaximumAddress - (Length - 1))) {\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Below logic is to check 2M/4K page to make sure we do not waste memory.\r
+ //\r
+ while (Length != 0) {\r
+ PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);\r
+ if (PageEntry == NULL) {\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ if (PageAttribute != Page4K) {\r
+ Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);\r
+ if (RETURN_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+ //\r
+ // Do it again until the page is 4K.\r
+ //\r
+ continue;\r
+ }\r
+\r
+ //\r
+ // Just take care of 'present' bit for Stack Guard.\r
+ //\r
+ if ((Attributes & IA32_PG_P) != 0) {\r
+ *PageEntry |= (UINT64)IA32_PG_P;\r
+ } else {\r
+ *PageEntry &= ~((UINT64)IA32_PG_P);\r
+ }\r
+\r
+ //\r
+ // Convert success, move to next\r
+ //\r
+ BaseAddress += SIZE_4KB;\r
+ Length -= SIZE_4KB;\r
+ }\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+/**\r
+ Get maximum size of page memory supported by current processor.\r
+\r
+ @param[in] TopLevelType The type of top level page entry.\r
+\r
+ @retval Page1G If processor supports 1G page and PML4.\r
+ @retval Page2M For all other situations.\r
+\r
+**/\r
+PAGE_ATTRIBUTE\r
+GetMaxMemoryPage (\r
+ IN PAGE_ATTRIBUTE TopLevelType\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+\r
+ if (TopLevelType == Page512G) {\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT26) != 0) {\r
+ return Page1G;\r
+ }\r
+ }\r
+ }\r
+\r
+ return Page2M;\r
+}\r
+\r
+/**\r
+ Create PML4 or PAE page table.\r
+\r
+ @return The address of page table.\r
+\r
+**/\r
+UINTN\r
+CreatePageTable (\r
+ VOID\r
+ )\r
+{\r
+ RETURN_STATUS Status;\r
+ UINTN PhysicalAddressBits;\r
+ UINTN NumberOfEntries;\r
+ PAGE_ATTRIBUTE TopLevelPageAttr;\r
+ UINTN PageTable;\r
+ PAGE_ATTRIBUTE MaxMemoryPage;\r
+ UINTN Index;\r
+ UINT64 AddressEncMask;\r
+ UINT64 *PageEntry;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
+\r
+ TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();\r
+ PhysicalAddressBits = GetPhysicalAddressWidth ();\r
+ NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -\r
+ mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);\r
+\r
+ PageTable = (UINTN) AllocatePageTableMemory (1);\r
+ if (PageTable == 0) {\r
+ return 0;\r
+ }\r
+\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
+ AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;\r
+ MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);\r
+ PageEntry = (UINT64 *)PageTable;\r
+\r
+ PhysicalAddress = 0;\r
+ for (Index = 0; Index < NumberOfEntries; ++Index) {\r
+ *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+\r
+ //\r
+ // Split the top page table down to the maximum page size supported\r
+ //\r
+ if (MaxMemoryPage < TopLevelPageAttr) {\r
+ Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);\r
+ ASSERT_EFI_ERROR (Status);\r
+ }\r
+\r
+ if (TopLevelPageAttr == Page1G) {\r
+ //\r
+ // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.\r
+ //\r
+ *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);\r
+ }\r
+\r
+ PageEntry += 1;\r
+ PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;\r
+ }\r
+\r
+\r
+ return PageTable;\r
+}\r
+\r
+/**\r
+ Setup page tables and make them work.\r
+\r
+**/\r
+VOID\r
+EnablePaging (\r
+ VOID\r
+ )\r
+{\r
+ UINTN PageTable;\r
+\r
+ PageTable = CreatePageTable ();\r
+ ASSERT (PageTable != 0);\r
+ if (PageTable != 0) {\r
+ AsmWriteCr3(PageTable);\r
+ AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE\r
+ AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG\r
+ }\r
+}\r
+\r
+/**\r
+ Get the base address of current AP's stack.\r
+\r
+ This function is called in AP's context and assumes that whole calling stacks\r
+ (till this function) consumed by AP's wakeup procedure will not exceed 4KB.\r
+\r
+ PcdCpuApStackSize must be configured with value taking the Guard page into\r
+ account.\r
+\r
+ @param[in,out] Buffer The pointer to private data buffer.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+GetStackBase (\r
+ IN OUT VOID *Buffer\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS StackBase;\r
+\r
+ StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;\r
+ StackBase += BASE_4KB;\r
+ StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);\r
+ StackBase -= PcdGet32(PcdCpuApStackSize);\r
+\r
+ *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;\r
+}\r
+\r
+/**\r
+ Setup stack Guard page at the stack base of each processor. BSP and APs have\r
+ different way to get stack base address.\r
+\r
+**/\r
+VOID\r
+SetupStackGuardPage (\r
+ VOID\r
+ )\r
+{\r
+ EFI_PEI_HOB_POINTERS Hob;\r
+ EFI_PHYSICAL_ADDRESS StackBase;\r
+ UINTN NumberOfProcessors;\r
+ UINTN Bsp;\r
+ UINTN Index;\r
+\r
+ //\r
+ // One extra page at the bottom of the stack is needed for Guard page.\r
+ //\r
+ if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {\r
+ DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));\r
+ ASSERT (FALSE);\r
+ }\r
+\r
+ MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);\r
+ MpInitLibWhoAmI (&Bsp);\r
+ for (Index = 0; Index < NumberOfProcessors; ++Index) {\r
+ if (Index == Bsp) {\r
+ Hob.Raw = GetHobList ();\r
+ while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {\r
+ if (CompareGuid (&gEfiHobMemoryAllocStackGuid,\r
+ &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {\r
+ StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;\r
+ break;\r
+ }\r
+ Hob.Raw = GET_NEXT_HOB (Hob);\r
+ }\r
+ } else {\r
+ //\r
+ // Ask AP to return is stack base address.\r
+ //\r
+ MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);\r
+ }\r
+ //\r
+ // Set Guard page at stack base address.\r
+ //\r
+ ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);\r
+ DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",\r
+ (UINT64)StackBase, (UINT64)Index));\r
+ }\r
+\r
+ //\r
+ // Publish the changes of page table.\r
+ //\r
+ CpuFlushTlb ();\r
+}\r
+\r
+/**\r
+ Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.\r
+\r
+ Doing this in the memory-discovered callback is to make sure the Stack Guard\r
+ feature to cover as most PEI code as possible.\r
+\r
+ @param[in] PeiServices General purpose services available to every PEIM.\r
+ @param[in] NotifyDescriptor The notification structure this PEIM registered on install.\r
+ @param[in] Ppi The memory discovered PPI. Not used.\r
+\r
+ @retval EFI_SUCCESS The function completed successfully.\r
+ @retval others There's error in MP initialization.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+MemoryDiscoveredPpiNotifyCallback (\r
+ IN EFI_PEI_SERVICES **PeiServices,\r
+ IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,\r
+ IN VOID *Ppi\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ BOOLEAN InitStackGuard;\r
+\r
+ //\r
+ // Paging must be setup first. Otherwise the exception TSS setup during MP\r
+ // initialization later will not contain paging information and then fail\r
+ // the task switch (for the sake of stack switch).\r
+ //\r
+ InitStackGuard = FALSE;\r
+ if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {\r
+ EnablePaging ();\r
+ InitStackGuard = TRUE;\r
+ }\r
+\r
+ Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ if (InitStackGuard) {\r
+ SetupStackGuardPage ();\r
+ }\r
+\r
+ return Status;\r
+}\r
+\r