\r
#include "VirtualMemory.h"\r
\r
-STATIC BOOLEAN mAddressEncMaskChecked = FALSE;\r
-STATIC UINT64 mAddressEncMask;\r
-STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
+STATIC BOOLEAN mAddressEncMaskChecked = FALSE;\r
+STATIC UINT64 mAddressEncMask;\r
+STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
\r
typedef enum {\r
- SetCBit,\r
- ClearCBit\r
+ SetCBit,\r
+ ClearCBit\r
} MAP_RANGE_MODE;\r
\r
/**\r
- Get the memory encryption mask\r
+ Return the pagetable memory encryption mask.\r
\r
- @param[out] EncryptionMask contains the pte mask.\r
+ @return The pagetable memory encryption mask.\r
\r
**/\r
-STATIC\r
UINT64\r
-GetMemEncryptionAddressMask (\r
+EFIAPI\r
+InternalGetMemEncryptionAddressMask (\r
VOID\r
)\r
{\r
- UINT64 EncryptionMask;\r
+ UINT64 EncryptionMask;\r
\r
if (mAddressEncMaskChecked) {\r
return mAddressEncMask;\r
\r
EncryptionMask = MemEncryptSevGetEncryptionMask ();\r
\r
- mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;\r
+ mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;\r
mAddressEncMaskChecked = TRUE;\r
\r
return mAddressEncMask;\r
STATIC\r
BOOLEAN\r
InitializePageTablePool (\r
- IN UINTN PoolPages\r
+ IN UINTN PoolPages\r
)\r
{\r
- VOID *Buffer;\r
+ VOID *Buffer;\r
\r
//\r
// Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
// header.\r
//\r
PoolPages += 1; // Add one page for header.\r
- PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
- PAGE_TABLE_POOL_UNIT_PAGES;\r
+ PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
+ PAGE_TABLE_POOL_UNIT_PAGES;\r
Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
if (Buffer == NULL) {\r
DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
// Link all pools into a list for easier track later.\r
//\r
if (mPageTablePool == NULL) {\r
- mPageTablePool = Buffer;\r
+ mPageTablePool = Buffer;\r
mPageTablePool->NextPool = mPageTablePool;\r
} else {\r
((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
- mPageTablePool->NextPool = Buffer;\r
- mPageTablePool = Buffer;\r
+ mPageTablePool->NextPool = Buffer;\r
+ mPageTablePool = Buffer;\r
}\r
\r
//\r
// Reserve one page for pool header.\r
//\r
- mPageTablePool->FreePages = PoolPages - 1;\r
- mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
+ mPageTablePool->FreePages = PoolPages - 1;\r
+ mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
\r
return TRUE;\r
}\r
VOID *\r
EFIAPI\r
AllocatePageTableMemory (\r
- IN UINTN Pages\r
+ IN UINTN Pages\r
)\r
{\r
- VOID *Buffer;\r
+ VOID *Buffer;\r
\r
if (Pages == 0) {\r
return NULL;\r
//\r
// Renew the pool if necessary.\r
//\r
- if (mPageTablePool == NULL ||\r
- Pages > mPageTablePool->FreePages) {\r
+ if ((mPageTablePool == NULL) ||\r
+ (Pages > mPageTablePool->FreePages))\r
+ {\r
if (!InitializePageTablePool (Pages)) {\r
return NULL;\r
}\r
\r
Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
\r
- mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
- mPageTablePool->FreePages -= Pages;\r
+ mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
+ mPageTablePool->FreePages -= Pages;\r
\r
DEBUG ((\r
DEBUG_VERBOSE,\r
return Buffer;\r
}\r
\r
-\r
/**\r
Split 2M page to 4K.\r
\r
STATIC\r
VOID\r
Split2MPageTo4K (\r
- IN PHYSICAL_ADDRESS PhysicalAddress,\r
- IN OUT UINT64 *PageEntry2M,\r
- IN PHYSICAL_ADDRESS StackBase,\r
- IN UINTN StackSize\r
+ IN PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry2M,\r
+ IN PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
)\r
{\r
- PHYSICAL_ADDRESS PhysicalAddress4K;\r
- UINTN IndexOfPageTableEntries;\r
- PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
- PAGE_TABLE_4K_ENTRY *PageTableEntry1;\r
- UINT64 AddressEncMask;\r
+ PHYSICAL_ADDRESS PhysicalAddress4K;\r
+ UINTN IndexOfPageTableEntries;\r
+ PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
+ PAGE_TABLE_4K_ENTRY *PageTableEntry1;\r
+ UINT64 AddressEncMask;\r
\r
- PageTableEntry = AllocatePageTableMemory(1);\r
+ PageTableEntry = AllocatePageTableMemory (1);\r
\r
PageTableEntry1 = PageTableEntry;\r
\r
- AddressEncMask = GetMemEncryptionAddressMask ();\r
+ AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
\r
ASSERT (PageTableEntry != NULL);\r
ASSERT (*PageEntry2M & AddressEncMask);\r
IndexOfPageTableEntries < 512;\r
(IndexOfPageTableEntries++,\r
PageTableEntry++,\r
- PhysicalAddress4K += SIZE_4KB)) {\r
+ PhysicalAddress4K += SIZE_4KB))\r
+ {\r
//\r
// Fill in the Page Table entries\r
//\r
- PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
+ PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;\r
PageTableEntry->Bits.ReadWrite = 1;\r
- PageTableEntry->Bits.Present = 1;\r
+ PageTableEntry->Bits.Present = 1;\r
if ((PhysicalAddress4K >= StackBase) &&\r
- (PhysicalAddress4K < StackBase + StackSize)) {\r
+ (PhysicalAddress4K < StackBase + StackSize))\r
+ {\r
//\r
// Set Nx bit for stack.\r
//\r
STATIC\r
VOID\r
SetPageTablePoolReadOnly (\r
- IN UINTN PageTableBase,\r
- IN EFI_PHYSICAL_ADDRESS Address,\r
- IN BOOLEAN Level4Paging\r
+ IN UINTN PageTableBase,\r
+ IN EFI_PHYSICAL_ADDRESS Address,\r
+ IN BOOLEAN Level4Paging\r
)\r
{\r
UINTN Index;\r
LevelSize[3] = SIZE_1GB;\r
LevelSize[4] = SIZE_512GB;\r
\r
- AddressEncMask = GetMemEncryptionAddressMask();\r
- PageTable = (UINT64 *)(UINTN)PageTableBase;\r
- PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
+ AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
+ PageTable = (UINT64 *)(UINTN)PageTableBase;\r
+ PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
\r
for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
- Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
+ Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
Index &= PAGING_PAE_INDEX_MASK;\r
\r
PageAttr = PageTable[Index];\r
ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
\r
PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
- PoolUnitSize -= LevelSize[Level];\r
+ PoolUnitSize -= LevelSize[Level];\r
\r
++Index;\r
}\r
}\r
\r
break;\r
-\r
} else {\r
//\r
// The smaller granularity of page must be needed.\r
\r
PhysicalAddress = PageAttr & LevelMask[Level];\r
for (EntryIndex = 0;\r
- EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
- ++EntryIndex) {\r
+ EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
+ ++EntryIndex)\r
+ {\r
NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
IA32_PG_P | IA32_PG_RW;\r
if (Level > 2) {\r
NewPageTable[EntryIndex] |= IA32_PG_PS;\r
}\r
+\r
PhysicalAddress += LevelSize[Level - 1];\r
}\r
\r
PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
- IA32_PG_P | IA32_PG_RW;\r
+ IA32_PG_P | IA32_PG_RW;\r
PageTable = NewPageTable;\r
}\r
}\r
STATIC\r
VOID\r
EnablePageTableProtection (\r
- IN UINTN PageTableBase,\r
- IN BOOLEAN Level4Paging\r
+ IN UINTN PageTableBase,\r
+ IN BOOLEAN Level4Paging\r
)\r
{\r
- PAGE_TABLE_POOL *HeadPool;\r
- PAGE_TABLE_POOL *Pool;\r
- UINT64 PoolSize;\r
- EFI_PHYSICAL_ADDRESS Address;\r
+ PAGE_TABLE_POOL *HeadPool;\r
+ PAGE_TABLE_POOL *Pool;\r
+ UINT64 PoolSize;\r
+ EFI_PHYSICAL_ADDRESS Address;\r
\r
if (mPageTablePool == NULL) {\r
return;\r
// remember original one in advance.\r
//\r
HeadPool = mPageTablePool;\r
- Pool = HeadPool;\r
+ Pool = HeadPool;\r
do {\r
Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
// the protection to them one by one.\r
//\r
while (PoolSize > 0) {\r
- SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);\r
- Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
- PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
+ SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
+ Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
+ PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
}\r
\r
Pool = Pool->NextPool;\r
} while (Pool != HeadPool);\r
-\r
}\r
\r
-\r
/**\r
Split 1G page to 2M.\r
\r
STATIC\r
VOID\r
Split1GPageTo2M (\r
- IN PHYSICAL_ADDRESS PhysicalAddress,\r
- IN OUT UINT64 *PageEntry1G,\r
- IN PHYSICAL_ADDRESS StackBase,\r
- IN UINTN StackSize\r
+ IN PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN OUT UINT64 *PageEntry1G,\r
+ IN PHYSICAL_ADDRESS StackBase,\r
+ IN UINTN StackSize\r
)\r
{\r
- PHYSICAL_ADDRESS PhysicalAddress2M;\r
- UINTN IndexOfPageDirectoryEntries;\r
- PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
- UINT64 AddressEncMask;\r
+ PHYSICAL_ADDRESS PhysicalAddress2M;\r
+ UINTN IndexOfPageDirectoryEntries;\r
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
+ UINT64 AddressEncMask;\r
\r
- PageDirectoryEntry = AllocatePageTableMemory(1);\r
+ PageDirectoryEntry = AllocatePageTableMemory (1);\r
\r
- AddressEncMask = GetMemEncryptionAddressMask ();\r
+ AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
ASSERT (PageDirectoryEntry != NULL);\r
ASSERT (*PageEntry1G & AddressEncMask);\r
//\r
IndexOfPageDirectoryEntries < 512;\r
(IndexOfPageDirectoryEntries++,\r
PageDirectoryEntry++,\r
- PhysicalAddress2M += SIZE_2MB)) {\r
+ PhysicalAddress2M += SIZE_2MB))\r
+ {\r
if ((PhysicalAddress2M < StackBase + StackSize) &&\r
- ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {\r
+ ((PhysicalAddress2M + SIZE_2MB) > StackBase))\r
+ {\r
//\r
// Need to split this 2M page that covers stack range.\r
//\r
//\r
// Fill in the Page Directory entries\r
//\r
- PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
+ PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;\r
PageDirectoryEntry->Bits.ReadWrite = 1;\r
- PageDirectoryEntry->Bits.Present = 1;\r
- PageDirectoryEntry->Bits.MustBe1 = 1;\r
+ PageDirectoryEntry->Bits.Present = 1;\r
+ PageDirectoryEntry->Bits.MustBe1 = 1;\r
}\r
}\r
}\r
\r
-\r
/**\r
Set or Clear the memory encryption bit\r
\r
\r
**/\r
STATIC VOID\r
-SetOrClearCBit(\r
- IN OUT UINT64* PageTablePointer,\r
- IN MAP_RANGE_MODE Mode\r
+SetOrClearCBit (\r
+ IN OUT UINT64 *PageTablePointer,\r
+ IN MAP_RANGE_MODE Mode\r
)\r
{\r
- UINT64 AddressEncMask;\r
+ UINT64 AddressEncMask;\r
\r
- AddressEncMask = GetMemEncryptionAddressMask ();\r
+ AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
\r
if (Mode == SetCBit) {\r
*PageTablePointer |= AddressEncMask;\r
} else {\r
*PageTablePointer &= ~AddressEncMask;\r
}\r
-\r
}\r
\r
/**\r
return ((AsmReadCr0 () & BIT16) != 0);\r
}\r
\r
-\r
/**\r
Disable Write Protect on pages marked as read-only.\r
**/\r
VOID\r
)\r
{\r
- AsmWriteCr0 (AsmReadCr0() & ~BIT16);\r
+ AsmWriteCr0 (AsmReadCr0 () & ~BIT16);\r
}\r
\r
/**\r
Enable Write Protect on pages marked as read-only.\r
**/\r
+STATIC\r
VOID\r
EnableReadOnlyPageWriteProtect (\r
VOID\r
)\r
{\r
- AsmWriteCr0 (AsmReadCr0() | BIT16);\r
+ AsmWriteCr0 (AsmReadCr0 () | BIT16);\r
}\r
\r
-\r
/**\r
This function either sets or clears memory encryption bit for the memory\r
region specified by PhysicalAddress and Length from the current page table\r
RETURN_STATUS\r
EFIAPI\r
SetMemoryEncDec (\r
- IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
- IN PHYSICAL_ADDRESS PhysicalAddress,\r
- IN UINTN Length,\r
- IN MAP_RANGE_MODE Mode,\r
- IN BOOLEAN CacheFlush\r
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
+ IN PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN UINTN Length,\r
+ IN MAP_RANGE_MODE Mode,\r
+ IN BOOLEAN CacheFlush\r
)\r
{\r
- PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
- PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
- PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
- PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
- PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
- PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
- UINT64 PgTableMask;\r
- UINT64 AddressEncMask;\r
- BOOLEAN IsWpEnabled;\r
- RETURN_STATUS Status;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
+ PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
+ PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
+ PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
+ UINT64 PgTableMask;\r
+ UINT64 AddressEncMask;\r
+ BOOLEAN IsWpEnabled;\r
+ RETURN_STATUS Status;\r
\r
//\r
// Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
//\r
// Check if we have a valid memory encryption mask\r
//\r
- AddressEncMask = GetMemEncryptionAddressMask ();\r
+ AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
if (!AddressEncMask) {\r
return RETURN_ACCESS_DENIED;\r
}\r
// with correct C-bit\r
//\r
if (CacheFlush) {\r
- WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length);\r
+ WriteBackInvalidateDataCacheRange ((VOID *)(UINTN)PhysicalAddress, Length);\r
}\r
\r
//\r
\r
Status = EFI_SUCCESS;\r
\r
- while (Length != 0)\r
- {\r
+ while (Length != 0) {\r
//\r
// If Cr3BaseAddress is not specified then read the current CR3\r
//\r
if (Cr3BaseAddress == 0) {\r
- Cr3BaseAddress = AsmReadCr3();\r
+ Cr3BaseAddress = AsmReadCr3 ();\r
}\r
\r
- PageMapLevel4Entry = (VOID*) (Cr3BaseAddress & ~PgTableMask);\r
- PageMapLevel4Entry += PML4_OFFSET(PhysicalAddress);\r
+ PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);\r
+ PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);\r
if (!PageMapLevel4Entry->Bits.Present) {\r
DEBUG ((\r
DEBUG_ERROR,\r
}\r
\r
PageDirectory1GEntry = (VOID *)(\r
- (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
- 12) & ~PgTableMask\r
- );\r
- PageDirectory1GEntry += PDP_OFFSET(PhysicalAddress);\r
+ (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
+ 12) & ~PgTableMask\r
+ );\r
+ PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);\r
if (!PageDirectory1GEntry->Bits.Present) {\r
DEBUG ((\r
DEBUG_ERROR,\r
// Valid 1GB page\r
// If we have at least 1GB to go, we can just update this entry\r
//\r
- if ((PhysicalAddress & (BIT30 - 1)) == 0 && Length >= BIT30) {\r
- SetOrClearCBit(&PageDirectory1GEntry->Uint64, Mode);\r
+ if (((PhysicalAddress & (BIT30 - 1)) == 0) && (Length >= BIT30)) {\r
+ SetOrClearCBit (&PageDirectory1GEntry->Uint64, Mode);\r
DEBUG ((\r
DEBUG_VERBOSE,\r
"%a:%a: updated 1GB entry for Physical=0x%Lx\n",\r
PhysicalAddress\r
));\r
PhysicalAddress += BIT30;\r
- Length -= BIT30;\r
+ Length -= BIT30;\r
} else {\r
//\r
// We must split the page\r
(PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;\r
PageDirectory2MEntry =\r
(VOID *)(\r
- (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
- 12) & ~PgTableMask\r
- );\r
- PageDirectory2MEntry += PDE_OFFSET(PhysicalAddress);\r
+ (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
+ 12) & ~PgTableMask\r
+ );\r
+ PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);\r
if (!PageDirectory2MEntry->Bits.Present) {\r
DEBUG ((\r
DEBUG_ERROR,\r
Status = RETURN_NO_MAPPING;\r
goto Done;\r
}\r
+\r
//\r
// If the MustBe1 bit is not a 1, it's not a 2MB entry\r
//\r
// Valid 2MB page\r
// If we have at least 2MB left to go, we can just update this entry\r
//\r
- if ((PhysicalAddress & (BIT21-1)) == 0 && Length >= BIT21) {\r
+ if (((PhysicalAddress & (BIT21-1)) == 0) && (Length >= BIT21)) {\r
SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);\r
PhysicalAddress += BIT21;\r
- Length -= BIT21;\r
+ Length -= BIT21;\r
} else {\r
//\r
// We must split up this page into 4K pages\r
(PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;\r
PageTableEntry =\r
(VOID *)(\r
- (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
- 12) & ~PgTableMask\r
- );\r
- PageTableEntry += PTE_OFFSET(PhysicalAddress);\r
+ (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
+ 12) & ~PgTableMask\r
+ );\r
+ PageTableEntry += PTE_OFFSET (PhysicalAddress);\r
if (!PageTableEntry->Bits.Present) {\r
DEBUG ((\r
DEBUG_ERROR,\r
Status = RETURN_NO_MAPPING;\r
goto Done;\r
}\r
+\r
SetOrClearCBit (&PageTableEntry->Uint64, Mode);\r
PhysicalAddress += EFI_PAGE_SIZE;\r
- Length -= EFI_PAGE_SIZE;\r
+ Length -= EFI_PAGE_SIZE;\r
}\r
}\r
}\r
//\r
// Flush TLB\r
//\r
- CpuFlushTlb();\r
+ CpuFlushTlb ();\r
\r
Done:\r
//\r
@param[in] PhysicalAddress The physical address that is the start\r
address of a memory region.\r
@param[in] Length The length of memory region\r
- @param[in] Flush Flush the caches before applying the\r
- encryption mask\r
\r
@retval RETURN_SUCCESS The attributes were cleared for the\r
memory region.\r
RETURN_STATUS\r
EFIAPI\r
InternalMemEncryptSevSetMemoryDecrypted (\r
- IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
- IN PHYSICAL_ADDRESS PhysicalAddress,\r
- IN UINTN Length,\r
- IN BOOLEAN Flush\r
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
+ IN PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN UINTN Length\r
)\r
{\r
-\r
return SetMemoryEncDec (\r
Cr3BaseAddress,\r
PhysicalAddress,\r
Length,\r
ClearCBit,\r
- Flush\r
+ TRUE\r
);\r
}\r
\r
@param[in] PhysicalAddress The physical address that is the start\r
address of a memory region.\r
@param[in] Length The length of memory region\r
- @param[in] Flush Flush the caches before applying the\r
- encryption mask\r
\r
@retval RETURN_SUCCESS The attributes were set for the memory\r
region.\r
RETURN_STATUS\r
EFIAPI\r
InternalMemEncryptSevSetMemoryEncrypted (\r
- IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
- IN PHYSICAL_ADDRESS PhysicalAddress,\r
- IN UINTN Length,\r
- IN BOOLEAN Flush\r
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
+ IN PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN UINTN Length\r
)\r
{\r
return SetMemoryEncDec (\r
PhysicalAddress,\r
Length,\r
SetCBit,\r
- Flush\r
+ TRUE\r
+ );\r
+}\r
+\r
+/**\r
+ This function clears memory encryption bit for the MMIO region specified by\r
+ PhysicalAddress and Length.\r
+\r
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
+ current CR3)\r
+ @param[in] PhysicalAddress The physical address that is the start\r
+ address of a MMIO region.\r
+ @param[in] Length The length of memory region\r
+\r
+ @retval RETURN_SUCCESS The attributes were cleared for the\r
+ memory region.\r
+ @retval RETURN_INVALID_PARAMETER Length is zero.\r
+ @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute\r
+ is not supported\r
+**/\r
+RETURN_STATUS\r
+EFIAPI\r
+InternalMemEncryptSevClearMmioPageEncMask (\r
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
+ IN PHYSICAL_ADDRESS PhysicalAddress,\r
+ IN UINTN Length\r
+ )\r
+{\r
+ return SetMemoryEncDec (\r
+ Cr3BaseAddress,\r
+ PhysicalAddress,\r
+ Length,\r
+ ClearCBit,\r
+ FALSE\r
);\r
}\r