/** @file\r
SMM MP service implementation\r
\r
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
This program and the accompanying materials\r
are licensed and made available under the terms and conditions of the BSD License\r
which accompanies this distribution. The full text of the license may be found at\r
Create 4G PageTable in SMRAM.\r
\r
@param ExtraPages Additional page numbers besides for 4G memory\r
+ @param Is32BitPageTable Whether the page table is 32-bit PAE\r
@return PageTable Address\r
\r
**/\r
UINT32\r
Gen4GPageTable (\r
- IN UINTN ExtraPages\r
+ IN UINTN ExtraPages,\r
+ IN BOOLEAN Is32BitPageTable\r
)\r
{\r
VOID *PageTable;\r
//\r
// Allocate the page table\r
//\r
- PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded);\r
+ PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);\r
ASSERT (PageTable != NULL);\r
\r
PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
// Set Page Directory Pointers\r
//\r
for (Index = 0; Index < 4; Index++) {\r
- Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P;\r
+ Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
}\r
Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
\r
// Fill in Page Directory Entries\r
//\r
for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
- Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P;\r
+ Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
Pdpte = (UINT64*)PageTable;\r
for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
- Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P;\r
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
//\r
// Fill in Page Table Entries\r
//\r
GuardPage = 0;\r
}\r
} else {\r
- Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P;\r
+ Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
}\r
PageAddress+= EFI_PAGE_SIZE;\r
}\r
//\r
// Allocate a page from SMRAM\r
//\r
- NewPageTableAddress = AllocatePages (1);\r
+ NewPageTableAddress = AllocatePageTableMemory (1);\r
ASSERT (NewPageTableAddress != NULL);\r
\r
NewPageTable = (UINT64 *)NewPageTableAddress;\r
NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
}\r
\r
- PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;\r
+ PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
}\r
\r
ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
}\r
\r
/**\r
- This funciton sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
+ This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
\r
NOTE: It might not be appreciated in runtime since it might\r
SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
- CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
AsmWriteDr6 (CpuSaveState->x86._DR6);\r
AsmWriteDr7 (CpuSaveState->x86._DR7);\r
}\r
\r
/**\r
- This funciton restores DR6 & DR7 to SMM save state.\r
+ This function restores DR6 & DR7 to SMM save state.\r
\r
NOTE: It might not be appreciated in runtime since it might\r
conflict with OS debugging facilities. Turn them off in RELEASE.\r
SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
\r
if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
- CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmst->CpuSaveState[CpuIndex];\r
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
BOOLEAN BspInProgress;\r
UINTN Index;\r
UINTN Cr2;\r
+ BOOLEAN XdDisableFlag;\r
\r
//\r
// Save Cr2 because Page Fault exception in SMM may override its value\r
}\r
\r
//\r
- // Try to enable NX\r
+ // Try to enable XD\r
//\r
+ XdDisableFlag = FALSE;\r
if (mXdSupported) {\r
+ if ((AsmReadMsr64 (MSR_IA32_MISC_ENABLE) & B_XD_DISABLE_BIT) != 0) {\r
+ XdDisableFlag = TRUE;\r
+ AsmMsrAnd64 (MSR_IA32_MISC_ENABLE, ~B_XD_DISABLE_BIT);\r
+ }\r
ActivateXd ();\r
}\r
\r
// BSP Handler is always called with a ValidSmi == TRUE\r
//\r
BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
-\r
} else {\r
APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
}\r
//\r
while (mSmmMpSyncData->AllCpusInSync) {\r
CpuPause ();\r
+ }\r
+\r
+ //\r
+ // Restore XD\r
+ //\r
+ if (XdDisableFlag) {\r
+ AsmMsrOr64 (MSR_IA32_MISC_ENABLE, B_XD_DISABLE_BIT);\r
}\r
}\r
\r