/** @file\r
UEFI Heap Guard functions.\r
\r
-Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
This program and the accompanying materials\r
are licensed and made available under the terms and conditions of the BSD License\r
which accompanies this distribution. The full text of the license may be found at\r
GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
= GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
\r
+//\r
+// Used for promoting freed but not used pages.\r
+//\r
+GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;\r
+\r
/**\r
Set corresponding bits in bitmap table to 1 according to the address.\r
\r
StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
\r
- if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
+ if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
GUARDED_HEAP_MAP_ENTRY_BITS;\r
Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
\r
- if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
+ if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
GUARDED_HEAP_MAP_ENTRY_BITS;\r
Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
Lsbs = 0;\r
}\r
\r
- Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);\r
- if (Lsbs > 0) {\r
- BitMap += 1;\r
- Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
+ if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
+ Result = *BitMap;\r
+ } else {\r
+ Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
+ if (Lsbs > 0) {\r
+ BitMap += 1;\r
+ Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
+ }\r
}\r
\r
return Result;\r
//\r
// Adjust current map table depth according to the address to access\r
//\r
- while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH\r
- &&\r
+ while (AllocMapUnit &&\r
+ mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
RShiftU64 (\r
Address,\r
mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
\r
@return An integer containing the guarded memory bitmap.\r
**/\r
-UINTN\r
+UINT64\r
GetGuardedMemoryBits (\r
IN EFI_PHYSICAL_ADDRESS Address,\r
IN UINTN NumberOfPages\r
{\r
UINT64 *BitMap;\r
UINTN Bits;\r
- UINTN Result;\r
+ UINT64 Result;\r
UINTN Shift;\r
UINTN BitsToUnitEnd;\r
\r
return 0;\r
}\r
\r
-/**\r
- Set the bit in bitmap table for the given address.\r
-\r
- @param[in] Address The address to set for.\r
-\r
- @return VOID.\r
-**/\r
-VOID\r
-EFIAPI\r
-SetGuardMapBit (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- UINT64 *GuardMap;\r
- UINT64 BitMask;\r
-\r
- FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
- if (GuardMap != NULL) {\r
- BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
- *GuardMap |= BitMask;\r
- }\r
-}\r
-\r
-/**\r
- Clear the bit in bitmap table for the given address.\r
-\r
- @param[in] Address The address to clear for.\r
-\r
- @return VOID.\r
-**/\r
-VOID\r
-EFIAPI\r
-ClearGuardMapBit (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- UINT64 *GuardMap;\r
- UINT64 BitMask;\r
-\r
- FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
- if (GuardMap != NULL) {\r
- BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
- *GuardMap &= ~BitMask;\r
- }\r
-}\r
\r
/**\r
Check to see if the page at the given address is a Guard page or not.\r
IN EFI_PHYSICAL_ADDRESS Address\r
)\r
{\r
- UINTN BitMap;\r
+ UINT64 BitMap;\r
\r
//\r
// There must be at least one guarded page before and/or after given\r
return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
}\r
\r
-/**\r
- Check to see if the page at the given address is a head Guard page or not.\r
-\r
- @param[in] Address The address to check for\r
-\r
- @return TRUE The page at Address is a head Guard page\r
- @return FALSE The page at Address is not a head Guard page\r
-**/\r
-BOOLEAN\r
-EFIAPI\r
-IsHeadGuard (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
-}\r
-\r
-/**\r
- Check to see if the page at the given address is a tail Guard page or not.\r
-\r
- @param[in] Address The address to check for.\r
-\r
- @return TRUE The page at Address is a tail Guard page.\r
- @return FALSE The page at Address is not a tail Guard page.\r
-**/\r
-BOOLEAN\r
-EFIAPI\r
-IsTailGuard (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
-}\r
\r
/**\r
Check to see if the page at the given address is guarded or not.\r
IN EFI_PHYSICAL_ADDRESS BaseAddress\r
)\r
{\r
+ EFI_STATUS Status;\r
+\r
+ if (gCpu == NULL) {\r
+ return;\r
+ }\r
+\r
//\r
// Set flag to make sure allocating memory without GUARD for page table\r
// operation; otherwise infinite loops could be caused.\r
mOnGuarding = TRUE;\r
//\r
// Note: This might overwrite other attributes needed by other features,\r
- // such as memory protection (NX). Please make sure they are not enabled\r
- // at the same time.\r
+ // such as NX memory protection.\r
//\r
- gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
+ Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
+ ASSERT_EFI_ERROR (Status);\r
mOnGuarding = FALSE;\r
}\r
\r
IN EFI_PHYSICAL_ADDRESS BaseAddress\r
)\r
{\r
+ UINT64 Attributes;\r
+ EFI_STATUS Status;\r
+\r
+ if (gCpu == NULL) {\r
+ return;\r
+ }\r
+\r
+ //\r
+ // Once the Guard page is unset, it will be freed back to memory pool. NX\r
+ // memory protection must be restored for this page if NX is enabled for free\r
+ // memory.\r
+ //\r
+ Attributes = 0;\r
+ if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
+ Attributes |= EFI_MEMORY_XP;\r
+ }\r
+\r
//\r
// Set flag to make sure allocating memory without GUARD for page table\r
// operation; otherwise infinite loops could be caused.\r
// such as memory protection (NX). Please make sure they are not enabled\r
// at the same time.\r
//\r
- gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, 0);\r
+ Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
+ ASSERT_EFI_ERROR (Status);\r
mOnGuarding = FALSE;\r
}\r
\r
{\r
UINT64 TestBit;\r
UINT64 ConfigBit;\r
- BOOLEAN InSmm;\r
\r
- if (gCpu == NULL || AllocateType == AllocateAddress) {\r
- return FALSE;\r
- }\r
-\r
- InSmm = FALSE;\r
- if (gSmmBase2 != NULL) {\r
- gSmmBase2->InSmm (gSmmBase2, &InSmm);\r
- }\r
-\r
- if (InSmm) {\r
+ if (AllocateType == AllocateAddress) {\r
return FALSE;\r
}\r
\r
return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
}\r
\r
+/**\r
+ Check to see if the heap guard is enabled for page and/or pool allocation.\r
+\r
+ @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
+\r
+ @return TRUE/FALSE.\r
+**/\r
+BOOLEAN\r
+IsHeapGuardEnabled (\r
+ UINT8 GuardType\r
+ )\r
+{\r
+ return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);\r
+}\r
+\r
/**\r
Set head Guard and tail Guard for the given memory range.\r
\r
)\r
{\r
EFI_PHYSICAL_ADDRESS GuardPage;\r
+ UINT64 GuardBitmap;\r
\r
if (NumberOfPages == 0) {\r
return;\r
//\r
// Head Guard must be one page before, if any.\r
//\r
+ // MSB-> 1 0 <-LSB\r
+ // -------------------\r
+ // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
+ // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
+ // 1 X -> Don't free first page (need a new Guard)\r
+ // (it'll be turned into a Guard page later)\r
+ // -------------------\r
+ // Start -> -1 -2\r
+ //\r
GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
- if (IsHeadGuard (GuardPage)) {\r
- if (!IsMemoryGuarded (GuardPage - EFI_PAGES_TO_SIZE (1))) {\r
+ GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
+ if ((GuardBitmap & BIT1) == 0) {\r
+ //\r
+ // Head Guard exists.\r
+ //\r
+ if ((GuardBitmap & BIT0) == 0) {\r
//\r
// If the head Guard is not a tail Guard of adjacent memory block,\r
// unset it.\r
//\r
UnsetGuardPage (GuardPage);\r
}\r
- } else if (IsMemoryGuarded (GuardPage)) {\r
+ } else {\r
//\r
// Pages before memory to free are still in Guard. It's a partial free\r
// case. Turn first page of memory block to free into a new Guard.\r
//\r
// Tail Guard must be the page after this memory block to free, if any.\r
//\r
+ // MSB-> 1 0 <-LSB\r
+ // --------------------\r
+ // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
+ // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
+ // X 1 -> Don't free last page (need a new Guard)\r
+ // (it'll be turned into a Guard page later)\r
+ // --------------------\r
+ // +1 +0 <- End\r
+ //\r
GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
- if (IsTailGuard (GuardPage)) {\r
- if (!IsMemoryGuarded (GuardPage + EFI_PAGES_TO_SIZE (1))) {\r
+ GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
+ if ((GuardBitmap & BIT0) == 0) {\r
+ //\r
+ // Tail Guard exists.\r
+ //\r
+ if ((GuardBitmap & BIT1) == 0) {\r
//\r
// If the tail Guard is not a head Guard of adjacent memory block,\r
// free it; otherwise, keep it.\r
//\r
UnsetGuardPage (GuardPage);\r
}\r
- } else if (IsMemoryGuarded (GuardPage)) {\r
+ } else {\r
//\r
// Pages after memory to free are still in Guard. It's a partial free\r
// case. We need to keep one page to be a head Guard.\r
{\r
UINT64 Target;\r
\r
- Target = Start + Size - SizeRequested;\r
-\r
//\r
- // At least one more page needed for Guard page.\r
+ // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
+ // indicated to put the pool near the Tail Guard, we need extra bytes to\r
+ // make sure alignment of the returned pool address.\r
//\r
- if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {\r
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
+ SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
+ }\r
+\r
+ Target = Start + Size - SizeRequested;\r
+ ASSERT (Target >= Start);\r
+ if (Target == 0) {\r
return 0;\r
}\r
\r
EFI_PHYSICAL_ADDRESS Start;\r
EFI_PHYSICAL_ADDRESS MemoryToTest;\r
UINTN PagesToFree;\r
+ UINT64 GuardBitmap;\r
\r
if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
return;\r
//\r
// Head Guard must be one page before, if any.\r
//\r
- MemoryToTest = Start - EFI_PAGES_TO_SIZE (1);\r
- if (IsHeadGuard (MemoryToTest)) {\r
- if (!IsMemoryGuarded (MemoryToTest - EFI_PAGES_TO_SIZE (1))) {\r
+ // MSB-> 1 0 <-LSB\r
+ // -------------------\r
+ // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
+ // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
+ // 1 X -> Don't free first page (need a new Guard)\r
+ // (it'll be turned into a Guard page later)\r
+ // -------------------\r
+ // Start -> -1 -2\r
+ //\r
+ MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
+ GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
+ if ((GuardBitmap & BIT1) == 0) {\r
+ //\r
+ // Head Guard exists.\r
+ //\r
+ if ((GuardBitmap & BIT0) == 0) {\r
//\r
// If the head Guard is not a tail Guard of adjacent memory block,\r
// free it; otherwise, keep it.\r
Start -= EFI_PAGES_TO_SIZE (1);\r
PagesToFree += 1;\r
}\r
- } else if (IsMemoryGuarded (MemoryToTest)) {\r
+ } else {\r
//\r
- // Pages before memory to free are still in Guard. It's a partial free\r
- // case. We need to keep one page to be a tail Guard.\r
+ // No Head Guard, and pages before memory to free are still in Guard. It's a\r
+ // partial free case. We need to keep one page to be a tail Guard.\r
//\r
Start += EFI_PAGES_TO_SIZE (1);\r
PagesToFree -= 1;\r
//\r
// Tail Guard must be the page after this memory block to free, if any.\r
//\r
+ // MSB-> 1 0 <-LSB\r
+ // --------------------\r
+ // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
+ // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
+ // X 1 -> Don't free last page (need a new Guard)\r
+ // (it'll be turned into a Guard page later)\r
+ // --------------------\r
+ // +1 +0 <- End\r
+ //\r
MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
- if (IsTailGuard (MemoryToTest)) {\r
- if (!IsMemoryGuarded (MemoryToTest + EFI_PAGES_TO_SIZE (1))) {\r
+ GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
+ if ((GuardBitmap & BIT0) == 0) {\r
+ //\r
+ // Tail Guard exists.\r
+ //\r
+ if ((GuardBitmap & BIT1) == 0) {\r
//\r
// If the tail Guard is not a head Guard of adjacent memory block,\r
// free it; otherwise, keep it.\r
//\r
PagesToFree += 1;\r
}\r
- } else if (IsMemoryGuarded (MemoryToTest)) {\r
+ } else if (PagesToFree > 0) {\r
//\r
- // Pages after memory to free are still in Guard. It's a partial free\r
- // case. We need to keep one page to be a head Guard.\r
+ // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
+ // partial free case. We need to keep one page to be a head Guard.\r
//\r
PagesToFree -= 1;\r
}\r
IN UINTN Size\r
)\r
{\r
- if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
+ if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
//\r
// Pool head is put near the head Guard\r
//\r
//\r
// Pool head is put near the tail Guard\r
//\r
+ Size = ALIGN_VALUE (Size, 8);\r
return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
}\r
\r
IN EFI_PHYSICAL_ADDRESS Memory\r
)\r
{\r
- if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
+ if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
//\r
// Pool head is put near the head Guard\r
//\r
IN EFI_MEMORY_TYPE NewType\r
)\r
{\r
+ UINT64 OldStart;\r
+ UINTN OldPages;\r
+\r
if (NewType == EfiConventionalMemory) {\r
+ OldStart = Start;\r
+ OldPages = NumberOfPages;\r
+\r
AdjustMemoryF (&Start, &NumberOfPages);\r
+ //\r
+ // It's safe to unset Guard page inside memory lock because there should\r
+ // be no memory allocation occurred in updating memory page attribute at\r
+ // this point. And unsetting Guard page before free will prevent Guard\r
+ // page just freed back to pool from being allocated right away before\r
+ // marking it usable (from non-present to present).\r
+ //\r
+ UnsetGuardForMemory (OldStart, OldPages);\r
+ if (NumberOfPages == 0) {\r
+ return EFI_SUCCESS;\r
+ }\r
} else {\r
AdjustMemoryA (&Start, &NumberOfPages);\r
}\r
\r
- return CoreConvertPages(Start, NumberOfPages, NewType);\r
+ return CoreConvertPages (Start, NumberOfPages, NewType);\r
+}\r
+\r
+/**\r
+ Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
+**/\r
+VOID\r
+SetAllGuardPages (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 TableEntry;\r
+ UINT64 Address;\r
+ UINT64 GuardPage;\r
+ INTN Level;\r
+ UINTN Index;\r
+ BOOLEAN OnGuarding;\r
+\r
+ if (mGuardedMemoryMap == 0 ||\r
+ mMapLevel == 0 ||\r
+ mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
+ return;\r
+ }\r
+\r
+ CopyMem (Entries, mLevelMask, sizeof (Entries));\r
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
+\r
+ SetMem (Tables, sizeof(Tables), 0);\r
+ SetMem (Addresses, sizeof(Addresses), 0);\r
+ SetMem (Indices, sizeof(Indices), 0);\r
+\r
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Tables[Level] = mGuardedMemoryMap;\r
+ Address = 0;\r
+ OnGuarding = FALSE;\r
+\r
+ DEBUG_CODE (\r
+ DumpGuardedMemoryBitmap ();\r
+ );\r
+\r
+ while (TRUE) {\r
+ if (Indices[Level] > Entries[Level]) {\r
+ Tables[Level] = 0;\r
+ Level -= 1;\r
+ } else {\r
+\r
+ TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
+ Address = Addresses[Level];\r
+\r
+ if (TableEntry == 0) {\r
+\r
+ OnGuarding = FALSE;\r
+\r
+ } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
+\r
+ Level += 1;\r
+ Tables[Level] = TableEntry;\r
+ Addresses[Level] = Address;\r
+ Indices[Level] = 0;\r
+\r
+ continue;\r
+\r
+ } else {\r
+\r
+ Index = 0;\r
+ while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
+ if ((TableEntry & 1) == 1) {\r
+ if (OnGuarding) {\r
+ GuardPage = 0;\r
+ } else {\r
+ GuardPage = Address - EFI_PAGE_SIZE;\r
+ }\r
+ OnGuarding = TRUE;\r
+ } else {\r
+ if (OnGuarding) {\r
+ GuardPage = Address;\r
+ } else {\r
+ GuardPage = 0;\r
+ }\r
+ OnGuarding = FALSE;\r
+ }\r
+\r
+ if (GuardPage != 0) {\r
+ SetGuardPage (GuardPage);\r
+ }\r
+\r
+ if (TableEntry == 0) {\r
+ break;\r
+ }\r
+\r
+ TableEntry = RShiftU64 (TableEntry, 1);\r
+ Address += EFI_PAGE_SIZE;\r
+ Index += 1;\r
+ }\r
+ }\r
+ }\r
+\r
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
+ break;\r
+ }\r
+\r
+ Indices[Level] += 1;\r
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
+ Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
+\r
+ }\r
+}\r
+\r
+/**\r
+ Find the address of top-most guarded free page.\r
+\r
+ @param[out] Address Start address of top-most guarded free page.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+GetLastGuardedFreePageAddress (\r
+ OUT EFI_PHYSICAL_ADDRESS *Address\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS AddressGranularity;\r
+ EFI_PHYSICAL_ADDRESS BaseAddress;\r
+ UINTN Level;\r
+ UINT64 Map;\r
+ INTN Index;\r
+\r
+ ASSERT (mMapLevel >= 1);\r
+\r
+ BaseAddress = 0;\r
+ Map = mGuardedMemoryMap;\r
+ for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
+ ++Level) {\r
+ AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
+\r
+ //\r
+ // Find the non-NULL entry at largest index.\r
+ //\r
+ for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {\r
+ if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
+ BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
+ Map = ((UINT64 *)(UINTN)Map)[Index];\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Find the non-zero MSB then get the page address.\r
+ //\r
+ while (Map != 0) {\r
+ Map = RShiftU64 (Map, 1);\r
+ BaseAddress += EFI_PAGES_TO_SIZE (1);\r
+ }\r
+\r
+ *Address = BaseAddress;\r
+}\r
+\r
+/**\r
+ Record freed pages.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+MarkFreedPages (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ SetGuardedMemoryBits (BaseAddress, Pages);\r
+}\r
+\r
+/**\r
+ Record freed pages as well as mark them as not-present.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+EFIAPI\r
+GuardFreedPages (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
+ // them alone.\r
+ //\r
+ if (BaseAddress < BASE_1MB) {\r
+ return;\r
+ }\r
+\r
+ MarkFreedPages (BaseAddress, Pages);\r
+ if (gCpu != NULL) {\r
+ //\r
+ // Set flag to make sure allocating memory without GUARD for page table\r
+ // operation; otherwise infinite loops could be caused.\r
+ //\r
+ mOnGuarding = TRUE;\r
+ //\r
+ // Note: This might overwrite other attributes needed by other features,\r
+ // such as NX memory protection.\r
+ //\r
+ Status = gCpu->SetMemoryAttributes (\r
+ gCpu,\r
+ BaseAddress,\r
+ EFI_PAGES_TO_SIZE (Pages),\r
+ EFI_MEMORY_RP\r
+ );\r
+ //\r
+ // Normally we should ASSERT the returned Status. But there might be memory\r
+ // alloc/free involved in SetMemoryAttributes(), which might fail this\r
+ // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
+ //\r
+ if (EFI_ERROR (Status)) {\r
+ DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
+ }\r
+ mOnGuarding = FALSE;\r
+ }\r
+}\r
+\r
+/**\r
+ Record freed pages as well as mark them as not-present, if enabled.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+EFIAPI\r
+GuardFreedPagesChecked (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ GuardFreedPages (BaseAddress, Pages);\r
+ }\r
+}\r
+\r
+/**\r
+ Mark all pages freed before CPU Arch Protocol as not-present.\r
+\r
+**/\r
+VOID\r
+GuardAllFreedPages (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 TableEntry;\r
+ UINT64 Address;\r
+ UINT64 GuardPage;\r
+ INTN Level;\r
+ UINT64 BitIndex;\r
+ UINTN GuardPageNumber;\r
+\r
+ if (mGuardedMemoryMap == 0 ||\r
+ mMapLevel == 0 ||\r
+ mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
+ return;\r
+ }\r
+\r
+ CopyMem (Entries, mLevelMask, sizeof (Entries));\r
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
+\r
+ SetMem (Tables, sizeof(Tables), 0);\r
+ SetMem (Addresses, sizeof(Addresses), 0);\r
+ SetMem (Indices, sizeof(Indices), 0);\r
+\r
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Tables[Level] = mGuardedMemoryMap;\r
+ Address = 0;\r
+ GuardPage = (UINT64)-1;\r
+ GuardPageNumber = 0;\r
+\r
+ while (TRUE) {\r
+ if (Indices[Level] > Entries[Level]) {\r
+ Tables[Level] = 0;\r
+ Level -= 1;\r
+ } else {\r
+ TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
+ Address = Addresses[Level];\r
+\r
+ if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
+ Level += 1;\r
+ Tables[Level] = TableEntry;\r
+ Addresses[Level] = Address;\r
+ Indices[Level] = 0;\r
+\r
+ continue;\r
+ } else {\r
+ BitIndex = 1;\r
+ while (BitIndex != 0) {\r
+ if ((TableEntry & BitIndex) != 0) {\r
+ if (GuardPage == (UINT64)-1) {\r
+ GuardPage = Address;\r
+ }\r
+ ++GuardPageNumber;\r
+ } else if (GuardPageNumber > 0) {\r
+ GuardFreedPages (GuardPage, GuardPageNumber);\r
+ GuardPageNumber = 0;\r
+ GuardPage = (UINT64)-1;\r
+ }\r
+\r
+ if (TableEntry == 0) {\r
+ break;\r
+ }\r
+\r
+ Address += EFI_PAGES_TO_SIZE (1);\r
+ BitIndex = LShiftU64 (BitIndex, 1);\r
+ }\r
+ }\r
+ }\r
+\r
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
+ break;\r
+ }\r
+\r
+ Indices[Level] += 1;\r
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
+ Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
+\r
+ }\r
+\r
+ //\r
+ // Update the maximum address of freed page which can be used for memory\r
+ // promotion upon out-of-memory-space.\r
+ //\r
+ GetLastGuardedFreePageAddress (&Address);\r
+ if (Address != 0) {\r
+ mLastPromotedPage = Address;\r
+ }\r
+}\r
+\r
+/**\r
+ This function checks to see if the given memory map descriptor in a memory map\r
+ can be merged with any guarded free pages.\r
+\r
+ @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
+ @param MaxAddress Maximum address to stop the merge.\r
+\r
+ @return VOID\r
+\r
+**/\r
+VOID\r
+MergeGuardPages (\r
+ IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
+ IN EFI_PHYSICAL_ADDRESS MaxAddress\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS EndAddress;\r
+ UINT64 Bitmap;\r
+ INTN Pages;\r
+\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
+ MemoryMapEntry->Type >= EfiMemoryMappedIO) {\r
+ return;\r
+ }\r
+\r
+ Bitmap = 0;\r
+ Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));\r
+ Pages -= (INTN)MemoryMapEntry->NumberOfPages;\r
+ while (Pages > 0) {\r
+ if (Bitmap == 0) {\r
+ EndAddress = MemoryMapEntry->PhysicalStart +\r
+ EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);\r
+ Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ }\r
+\r
+ if ((Bitmap & 1) == 0) {\r
+ break;\r
+ }\r
+\r
+ Pages--;\r
+ MemoryMapEntry->NumberOfPages++;\r
+ Bitmap = RShiftU64 (Bitmap, 1);\r
+ }\r
+}\r
+\r
+/**\r
+ Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
+\r
+ Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
+ makes use of 'Used then throw away' way to detect any illegal access to freed\r
+ memory. The thrown-away memory will be marked as not-present so that any access\r
+ to those memory (after free) will be caught by page-fault exception.\r
+\r
+ The problem is that this will consume lots of memory space. Once no memory\r
+ left in pool to allocate, we have to restore part of the freed pages to their\r
+ normal function. Otherwise the whole system will stop functioning.\r
+\r
+ @param StartAddress Start address of promoted memory.\r
+ @param EndAddress End address of promoted memory.\r
+\r
+ @return TRUE Succeeded to promote memory.\r
+ @return FALSE No free memory found.\r
+\r
+**/\r
+BOOLEAN\r
+PromoteGuardedFreePages (\r
+ OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
+ OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN AvailablePages;\r
+ UINT64 Bitmap;\r
+ EFI_PHYSICAL_ADDRESS Start;\r
+\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Similar to memory allocation service, always search the freed pages in\r
+ // descending direction.\r
+ //\r
+ Start = mLastPromotedPage;\r
+ AvailablePages = 0;\r
+ while (AvailablePages == 0) {\r
+ Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ //\r
+ // If the address wraps around, try the really freed pages at top.\r
+ //\r
+ if (Start > mLastPromotedPage) {\r
+ GetLastGuardedFreePageAddress (&Start);\r
+ ASSERT (Start != 0);\r
+ Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ }\r
+\r
+ Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ while (Bitmap > 0) {\r
+ if ((Bitmap & 1) != 0) {\r
+ ++AvailablePages;\r
+ } else if (AvailablePages == 0) {\r
+ Start += EFI_PAGES_TO_SIZE (1);\r
+ } else {\r
+ break;\r
+ }\r
+\r
+ Bitmap = RShiftU64 (Bitmap, 1);\r
+ }\r
+ }\r
+\r
+ if (AvailablePages) {\r
+ DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
+ ClearGuardedMemoryBits (Start, AvailablePages);\r
+\r
+ if (gCpu != NULL) {\r
+ //\r
+ // Set flag to make sure allocating memory without GUARD for page table\r
+ // operation; otherwise infinite loops could be caused.\r
+ //\r
+ mOnGuarding = TRUE;\r
+ Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);\r
+ ASSERT_EFI_ERROR (Status);\r
+ mOnGuarding = FALSE;\r
+ }\r
+\r
+ mLastPromotedPage = Start;\r
+ *StartAddress = Start;\r
+ *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
+**/\r
+VOID\r
+HeapGuardCpuArchProtocolNotify (\r
+ VOID\r
+ )\r
+{\r
+ ASSERT (gCpu != NULL);\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
+ IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
+ SetAllGuardPages ();\r
+ }\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ GuardAllFreedPages ();\r
+ }\r
}\r
\r
/**\r
CHAR8 *Ruler1;\r
CHAR8 *Ruler2;\r
\r
- if (mGuardedMemoryMap == 0) {\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {\r
+ return;\r
+ }\r
+\r
+ if (mGuardedMemoryMap == 0 ||\r
+ mMapLevel == 0 ||\r
+ mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
return;\r
}\r
\r