--- /dev/null
+/** @file\r
+ Basic paging support for the CPU to enable Stack Guard.\r
+\r
+Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>\r
+\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Register/Cpuid.h>\r
+#include <Register/Msr.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/CpuLib.h>\r
+#include <Library/BaseLib.h>\r
+\r
+#include "CpuMpPei.h"\r
+\r
+#define IA32_PG_P BIT0\r
+#define IA32_PG_RW BIT1\r
+#define IA32_PG_U BIT2\r
+#define IA32_PG_A BIT5\r
+#define IA32_PG_D BIT6\r
+#define IA32_PG_PS BIT7\r
+#define IA32_PG_NX BIT63\r
+\r
+#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)\r
+#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\\r
+ PAGE_ATTRIBUTE_BITS)\r
+\r
+#define PAGING_PAE_INDEX_MASK 0x1FF\r
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull\r
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull\r
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull\r
+#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull\r
+\r
+typedef enum {\r
+ PageNone = 0,\r
+ PageMin = 1,\r
+ Page4K = PageMin,\r
+ Page2M = 2,\r
+ Page1G = 3,\r
+ Page512G = 4,\r
+ PageMax = Page512G\r
+} PAGE_ATTRIBUTE;\r
+\r
+typedef struct {\r
+ PAGE_ATTRIBUTE Attribute;\r
+ UINT64 Length;\r
+ UINT64 AddressMask;\r
+ UINTN AddressBitOffset;\r
+ UINTN AddressBitLength;\r
+} PAGE_ATTRIBUTE_TABLE;\r
+\r
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {\r
+ {PageNone, 0, 0, 0, 0},\r
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},\r
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},\r
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},\r
+ {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},\r
+};\r
+\r
+EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {\r
+ {\r
+ (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),\r
+ &gEfiPeiMemoryDiscoveredPpiGuid,\r
+ MemoryDiscoveredPpiNotifyCallback\r
+ }\r
+};\r
+\r
+/**\r
+ The function will check if IA32 PAE is supported.\r
+\r
+ @retval TRUE IA32 PAE is supported.\r
+ @retval FALSE IA32 PAE is not supported.\r
+\r
+**/\r
+BOOLEAN\r
+IsIa32PaeSupported (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ CPUID_VERSION_INFO_EDX RegEdx;\r
+\r
+ AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_VERSION_INFO) {\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
+ if (RegEdx.Bits.PAE != 0) {\r
+ return TRUE;\r
+ }\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ This API provides a way to allocate memory for page table.\r
+\r
+ @param Pages The number of 4 KB pages to allocate.\r
+\r
+ @return A pointer to the allocated buffer or NULL if allocation fails.\r
+\r
+**/\r
+VOID *\r
+AllocatePageTableMemory (\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ VOID *Address;\r
+\r
+ Address = AllocatePages(Pages);\r
+ if (Address != NULL) {\r
+ ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));\r
+ }\r
+\r
+ return Address;\r
+}\r
+\r
+/**\r
+ Get the address width supported by current processor.\r
+\r
+ @retval 32 If processor is in 32-bit mode.\r
+ @retval 36-48 If processor is in 64-bit mode.\r
+\r
+**/\r
+UINTN\r
+GetPhysicalAddressWidth (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+\r
+ if (sizeof(UINTN) == 4) {\r
+ return 32;\r
+ }\r
+\r
+ AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);\r
+ RegEax &= 0xFF;\r
+ if (RegEax > 48) {\r
+ return 48;\r
+ }\r
+\r
+ return (UINTN)RegEax;\r
+ }\r
+\r
+ return 36;\r
+}\r
+\r
+/**\r
+ Get the type of top level page table.\r
+\r
+ @retval Page512G PML4 paging.\r
+ @retval Page1G PAE paing.\r
+\r
+**/\r
+PAGE_ATTRIBUTE\r
+GetPageTableTopLevelType (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_EFER_REGISTER MsrEfer;\r
+\r
+ MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);\r
+\r
+ return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;\r
+}\r
+\r
+/**\r
+ Return page table entry matching the address.\r
+\r
+ @param[in] Address The address to be checked.\r
+ @param[out] PageAttributes The page attribute of the page entry.\r
+\r
+ @return The page entry.\r
+**/\r
+VOID *\r
+GetPageTableEntry (\r
+ IN PHYSICAL_ADDRESS Address,\r
+ OUT PAGE_ATTRIBUTE *PageAttribute\r
+ )\r
+{\r
+ INTN Level;\r
+ UINTN Index;\r
+ UINT64 *PageTable;\r
+ UINT64 AddressEncMask;\r
+\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
+ PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);\r
+ for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {\r
+ Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);\r
+ Index &= PAGING_PAE_INDEX_MASK;\r
+\r
+ //\r
+ // No mapping?\r
+ //\r
+ if (PageTable[Index] == 0) {\r
+ *PageAttribute = PageNone;\r
+ return NULL;\r
+ }\r
+\r
+ //\r
+ // Page memory?\r
+ //\r
+ if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {\r
+ *PageAttribute = (PAGE_ATTRIBUTE)Level;\r
+ return &PageTable[Index];\r
+ }\r
+\r
+ //\r
+ // Page directory or table\r
+ //\r
+ PageTable = (UINT64 *)(UINTN)(PageTable[Index] &\r
+ ~AddressEncMask &\r
+ PAGING_4K_ADDRESS_MASK_64);\r
+ }\r
+\r
+ *PageAttribute = PageNone;\r
+ return NULL;\r
+}\r
+\r
+/**\r
+ This function splits one page entry to smaller page entries.\r
+\r
+ @param[in] PageEntry The page entry to be splitted.\r
+ @param[in] PageAttribute The page attribute of the page entry.\r
+ @param[in] SplitAttribute How to split the page entry.\r
+ @param[in] Recursively Do the split recursively or not.\r
+\r
+ @retval RETURN_SUCCESS The page entry is splitted.\r
+ @retval RETURN_INVALID_PARAMETER If target page attribute is invalid\r
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.\r
+**/\r
+RETURN_STATUS\r
+SplitPage (\r
+ IN UINT64 *PageEntry,\r
+ IN PAGE_ATTRIBUTE PageAttribute,\r
+ IN PAGE_ATTRIBUTE SplitAttribute,\r
+ IN BOOLEAN Recursively\r
+ )\r
+{\r
+ UINT64 BaseAddress;\r
+ UINT64 *NewPageEntry;\r
+ UINTN Index;\r
+ UINT64 AddressEncMask;\r
+ PAGE_ATTRIBUTE SplitTo;\r
+\r
+ if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {\r
+ ASSERT (SplitAttribute != PageNone);\r
+ ASSERT (SplitAttribute < PageAttribute);\r
+ return RETURN_INVALID_PARAMETER;\r
+ }\r
+\r
+ NewPageEntry = AllocatePageTableMemory (1);\r
+ if (NewPageEntry == NULL) {\r
+ ASSERT (NewPageEntry != NULL);\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+\r
+ //\r
+ // One level down each step to achieve more compact page table.\r
+ //\r
+ SplitTo = PageAttribute - 1;\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
+ mPageAttributeTable[SplitTo].AddressMask;\r
+ BaseAddress = *PageEntry &\r
+ ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
+ mPageAttributeTable[PageAttribute].AddressMask;\r
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {\r
+ NewPageEntry[Index] = BaseAddress | AddressEncMask |\r
+ ((*PageEntry) & PAGE_PROGATE_BITS);\r
+\r
+ if (SplitTo != PageMin) {\r
+ NewPageEntry[Index] |= IA32_PG_PS;\r
+ }\r
+\r
+ if (Recursively && SplitTo > SplitAttribute) {\r
+ SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);\r
+ }\r
+\r
+ BaseAddress += mPageAttributeTable[SplitTo].Length;\r
+ }\r
+\r
+ (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+/**\r
+ This function modifies the page attributes for the memory region specified\r
+ by BaseAddress and Length from their current attributes to the attributes\r
+ specified by Attributes.\r
+\r
+ Caller should make sure BaseAddress and Length is at page boundary.\r
+\r
+ @param[in] BaseAddress Start address of a memory region.\r
+ @param[in] Length Size in bytes of the memory region.\r
+ @param[in] Attributes Bit mask of attributes to modify.\r
+\r
+ @retval RETURN_SUCCESS The attributes were modified for the memory\r
+ region.\r
+ @retval RETURN_INVALID_PARAMETER Length is zero; or,\r
+ Attributes specified an illegal combination\r
+ of attributes that cannot be set together; or\r
+ Addressis not 4KB aligned.\r
+ @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify\r
+ the attributes.\r
+ @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.\r
+\r
+**/\r
+RETURN_STATUS\r
+EFIAPI\r
+ConvertMemoryPageAttributes (\r
+ IN PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes\r
+ )\r
+{\r
+ UINT64 *PageEntry;\r
+ PAGE_ATTRIBUTE PageAttribute;\r
+ RETURN_STATUS Status;\r
+ EFI_PHYSICAL_ADDRESS MaximumAddress;\r
+\r
+ if (Length == 0 ||\r
+ (BaseAddress & (SIZE_4KB - 1)) != 0 ||\r
+ (Length & (SIZE_4KB - 1)) != 0) {\r
+\r
+ ASSERT (Length > 0);\r
+ ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);\r
+ ASSERT ((Length & (SIZE_4KB - 1)) == 0);\r
+\r
+ return RETURN_INVALID_PARAMETER;\r
+ }\r
+\r
+ MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;\r
+ if (BaseAddress > MaximumAddress ||\r
+ Length > MaximumAddress ||\r
+ (BaseAddress > MaximumAddress - (Length - 1))) {\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ //\r
+ // Below logic is to check 2M/4K page to make sure we do not waste memory.\r
+ //\r
+ while (Length != 0) {\r
+ PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);\r
+ if (PageEntry == NULL) {\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ if (PageAttribute != Page4K) {\r
+ Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);\r
+ if (RETURN_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+ //\r
+ // Do it again until the page is 4K.\r
+ //\r
+ continue;\r
+ }\r
+\r
+ //\r
+ // Just take care of 'present' bit for Stack Guard.\r
+ //\r
+ if ((Attributes & IA32_PG_P) != 0) {\r
+ *PageEntry |= (UINT64)IA32_PG_P;\r
+ } else {\r
+ *PageEntry &= ~((UINT64)IA32_PG_P);\r
+ }\r
+\r
+ //\r
+ // Convert success, move to next\r
+ //\r
+ BaseAddress += SIZE_4KB;\r
+ Length -= SIZE_4KB;\r
+ }\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+/**\r
+ Get maximum size of page memory supported by current processor.\r
+\r
+ @param[in] TopLevelType The type of top level page entry.\r
+\r
+ @retval Page1G If processor supports 1G page and PML4.\r
+ @retval Page2M For all other situations.\r
+\r
+**/\r
+PAGE_ATTRIBUTE\r
+GetMaxMemoryPage (\r
+ IN PAGE_ATTRIBUTE TopLevelType\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+\r
+ if (TopLevelType == Page512G) {\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT26) != 0) {\r
+ return Page1G;\r
+ }\r
+ }\r
+ }\r
+\r
+ return Page2M;\r
+}\r
+\r
+/**\r
+ Create PML4 or PAE page table.\r
+\r
+ @return The address of page table.\r
+\r
+**/\r
+UINTN\r
+CreatePageTable (\r
+ VOID\r
+ )\r
+{\r
+ RETURN_STATUS Status;\r
+ UINTN PhysicalAddressBits;\r
+ UINTN NumberOfEntries;\r
+ PAGE_ATTRIBUTE TopLevelPageAttr;\r
+ UINTN PageTable;\r
+ PAGE_ATTRIBUTE MaxMemoryPage;\r
+ UINTN Index;\r
+ UINT64 AddressEncMask;\r
+ UINT64 *PageEntry;\r
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
+\r
+ TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();\r
+ PhysicalAddressBits = GetPhysicalAddressWidth ();\r
+ NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -\r
+ mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);\r
+\r
+ PageTable = (UINTN) AllocatePageTableMemory (1);\r
+ if (PageTable == 0) {\r
+ return 0;\r
+ }\r
+\r
+ AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
+ AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;\r
+ MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);\r
+ PageEntry = (UINT64 *)PageTable;\r
+\r
+ PhysicalAddress = 0;\r
+ for (Index = 0; Index < NumberOfEntries; ++Index) {\r
+ *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
+\r
+ //\r
+ // Split the top page table down to the maximum page size supported\r
+ //\r
+ if (MaxMemoryPage < TopLevelPageAttr) {\r
+ Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);\r
+ ASSERT_EFI_ERROR (Status);\r
+ }\r
+\r
+ if (TopLevelPageAttr == Page1G) {\r
+ //\r
+ // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.\r
+ //\r
+ *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);\r
+ }\r
+\r
+ PageEntry += 1;\r
+ PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;\r
+ }\r
+\r
+\r
+ return PageTable;\r
+}\r
+\r
+/**\r
+ Setup page tables and make them work.\r
+\r
+**/\r
+VOID\r
+EnablePaging (\r
+ VOID\r
+ )\r
+{\r
+ UINTN PageTable;\r
+\r
+ PageTable = CreatePageTable ();\r
+ ASSERT (PageTable != 0);\r
+ if (PageTable != 0) {\r
+ AsmWriteCr3(PageTable);\r
+ AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE\r
+ AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG\r
+ }\r
+}\r
+\r
+/**\r
+ Get the base address of current AP's stack.\r
+\r
+ This function is called in AP's context and assumes that whole calling stacks\r
+ (till this function) consumed by AP's wakeup procedure will not exceed 4KB.\r
+\r
+ PcdCpuApStackSize must be configured with value taking the Guard page into\r
+ account.\r
+\r
+ @param[in,out] Buffer The pointer to private data buffer.\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+GetStackBase (\r
+ IN OUT VOID *Buffer\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS StackBase;\r
+\r
+ StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;\r
+ StackBase += BASE_4KB;\r
+ StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);\r
+ StackBase -= PcdGet32(PcdCpuApStackSize);\r
+\r
+ *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;\r
+}\r
+\r
+/**\r
+ Setup stack Guard page at the stack base of each processor. BSP and APs have\r
+ different way to get stack base address.\r
+\r
+**/\r
+VOID\r
+SetupStackGuardPage (\r
+ VOID\r
+ )\r
+{\r
+ EFI_PEI_HOB_POINTERS Hob;\r
+ EFI_PHYSICAL_ADDRESS StackBase;\r
+ UINTN NumberOfProcessors;\r
+ UINTN Bsp;\r
+ UINTN Index;\r
+\r
+ //\r
+ // One extra page at the bottom of the stack is needed for Guard page.\r
+ //\r
+ if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {\r
+ DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));\r
+ ASSERT (FALSE);\r
+ }\r
+\r
+ MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);\r
+ MpInitLibWhoAmI (&Bsp);\r
+ for (Index = 0; Index < NumberOfProcessors; ++Index) {\r
+ if (Index == Bsp) {\r
+ Hob.Raw = GetHobList ();\r
+ while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {\r
+ if (CompareGuid (&gEfiHobMemoryAllocStackGuid,\r
+ &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {\r
+ StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;\r
+ break;\r
+ }\r
+ Hob.Raw = GET_NEXT_HOB (Hob);\r
+ }\r
+ } else {\r
+ //\r
+ // Ask AP to return is stack base address.\r
+ //\r
+ MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);\r
+ }\r
+ //\r
+ // Set Guard page at stack base address.\r
+ //\r
+ ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);\r
+ DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",\r
+ (UINT64)StackBase, (UINT64)Index));\r
+ }\r
+\r
+ //\r
+ // Publish the changes of page table.\r
+ //\r
+ CpuFlushTlb ();\r
+}\r
+\r
+/**\r
+ Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.\r
+\r
+ Doing this in the memory-discovered callback is to make sure the Stack Guard\r
+ feature to cover as most PEI code as possible.\r
+\r
+ @param[in] PeiServices General purpose services available to every PEIM.\r
+ @param[in] NotifyDescriptor The notification structure this PEIM registered on install.\r
+ @param[in] Ppi The memory discovered PPI. Not used.\r
+\r
+ @retval EFI_SUCCESS The function completed successfully.\r
+ @retval others There's error in MP initialization.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+MemoryDiscoveredPpiNotifyCallback (\r
+ IN EFI_PEI_SERVICES **PeiServices,\r
+ IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,\r
+ IN VOID *Ppi\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ BOOLEAN InitStackGuard;\r
+\r
+ //\r
+ // Paging must be setup first. Otherwise the exception TSS setup during MP\r
+ // initialization later will not contain paging information and then fail\r
+ // the task switch (for the sake of stack switch).\r
+ //\r
+ InitStackGuard = FALSE;\r
+ if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {\r
+ EnablePaging ();\r
+ InitStackGuard = TRUE;\r
+ }\r
+\r
+ Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ if (InitStackGuard) {\r
+ SetupStackGuardPage ();\r
+ }\r
+\r
+ return Status;\r
+}\r
+\r