UEFI Heap Guard functions.\r
\r
Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
-This program and the accompanying materials\r
-are licensed and made available under the terms and conditions of the BSD License\r
-which accompanies this distribution. The full text of the license may be found at\r
-http://opensource.org/licenses/bsd-license.php\r
-\r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+SPDX-License-Identifier: BSD-2-Clause-Patent\r
\r
**/\r
\r
GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
= GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
\r
+//\r
+// Used for promoting freed but not used pages.\r
+//\r
+GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;\r
+\r
/**\r
Set corresponding bits in bitmap table to 1 according to the address.\r
\r
UINTN BitsToUnitEnd;\r
EFI_STATUS Status;\r
\r
+ MapMemory = 0;\r
+\r
//\r
// Adjust current map table depth according to the address to access\r
//\r
\r
@return An integer containing the guarded memory bitmap.\r
**/\r
-UINTN\r
+UINT64\r
GetGuardedMemoryBits (\r
IN EFI_PHYSICAL_ADDRESS Address,\r
IN UINTN NumberOfPages\r
{\r
UINT64 *BitMap;\r
UINTN Bits;\r
- UINTN Result;\r
+ UINT64 Result;\r
UINTN Shift;\r
UINTN BitsToUnitEnd;\r
\r
return 0;\r
}\r
\r
-/**\r
- Set the bit in bitmap table for the given address.\r
-\r
- @param[in] Address The address to set for.\r
-\r
- @return VOID.\r
-**/\r
-VOID\r
-EFIAPI\r
-SetGuardMapBit (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- UINT64 *GuardMap;\r
- UINT64 BitMask;\r
-\r
- FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
- if (GuardMap != NULL) {\r
- BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
- *GuardMap |= BitMask;\r
- }\r
-}\r
-\r
-/**\r
- Clear the bit in bitmap table for the given address.\r
-\r
- @param[in] Address The address to clear for.\r
-\r
- @return VOID.\r
-**/\r
-VOID\r
-EFIAPI\r
-ClearGuardMapBit (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- UINT64 *GuardMap;\r
- UINT64 BitMask;\r
-\r
- FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
- if (GuardMap != NULL) {\r
- BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
- *GuardMap &= ~BitMask;\r
- }\r
-}\r
\r
/**\r
Check to see if the page at the given address is a Guard page or not.\r
IN EFI_PHYSICAL_ADDRESS Address\r
)\r
{\r
- UINTN BitMap;\r
+ UINT64 BitMap;\r
\r
//\r
// There must be at least one guarded page before and/or after given\r
return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
}\r
\r
-/**\r
- Check to see if the page at the given address is a head Guard page or not.\r
-\r
- @param[in] Address The address to check for\r
-\r
- @return TRUE The page at Address is a head Guard page\r
- @return FALSE The page at Address is not a head Guard page\r
-**/\r
-BOOLEAN\r
-EFIAPI\r
-IsHeadGuard (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
-}\r
-\r
-/**\r
- Check to see if the page at the given address is a tail Guard page or not.\r
-\r
- @param[in] Address The address to check for.\r
-\r
- @return TRUE The page at Address is a tail Guard page.\r
- @return FALSE The page at Address is not a tail Guard page.\r
-**/\r
-BOOLEAN\r
-EFIAPI\r
-IsTailGuard (\r
- IN EFI_PHYSICAL_ADDRESS Address\r
- )\r
-{\r
- return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
-}\r
\r
/**\r
Check to see if the page at the given address is guarded or not.\r
{\r
UINT64 TestBit;\r
UINT64 ConfigBit;\r
- BOOLEAN InSmm;\r
\r
if (AllocateType == AllocateAddress) {\r
return FALSE;\r
}\r
\r
- InSmm = FALSE;\r
- if (gSmmBase2 != NULL) {\r
- gSmmBase2->InSmm (gSmmBase2, &InSmm);\r
- }\r
-\r
- if (InSmm) {\r
- return FALSE;\r
- }\r
-\r
if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
return FALSE;\r
}\r
/**\r
Check to see if the heap guard is enabled for page and/or pool allocation.\r
\r
+ @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
+\r
@return TRUE/FALSE.\r
**/\r
BOOLEAN\r
IsHeapGuardEnabled (\r
- VOID\r
+ UINT8 GuardType\r
)\r
{\r
- return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,\r
- GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);\r
+ return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);\r
}\r
\r
/**\r
}\r
}\r
\r
+/**\r
+ Find the address of top-most guarded free page.\r
+\r
+ @param[out] Address Start address of top-most guarded free page.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+GetLastGuardedFreePageAddress (\r
+ OUT EFI_PHYSICAL_ADDRESS *Address\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS AddressGranularity;\r
+ EFI_PHYSICAL_ADDRESS BaseAddress;\r
+ UINTN Level;\r
+ UINT64 Map;\r
+ INTN Index;\r
+\r
+ ASSERT (mMapLevel >= 1);\r
+\r
+ BaseAddress = 0;\r
+ Map = mGuardedMemoryMap;\r
+ for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
+ ++Level) {\r
+ AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
+\r
+ //\r
+ // Find the non-NULL entry at largest index.\r
+ //\r
+ for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {\r
+ if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
+ BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
+ Map = ((UINT64 *)(UINTN)Map)[Index];\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Find the non-zero MSB then get the page address.\r
+ //\r
+ while (Map != 0) {\r
+ Map = RShiftU64 (Map, 1);\r
+ BaseAddress += EFI_PAGES_TO_SIZE (1);\r
+ }\r
+\r
+ *Address = BaseAddress;\r
+}\r
+\r
+/**\r
+ Record freed pages.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+MarkFreedPages (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ SetGuardedMemoryBits (BaseAddress, Pages);\r
+}\r
+\r
+/**\r
+ Record freed pages as well as mark them as not-present.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+EFIAPI\r
+GuardFreedPages (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
+ // them alone.\r
+ //\r
+ if (BaseAddress < BASE_1MB) {\r
+ return;\r
+ }\r
+\r
+ MarkFreedPages (BaseAddress, Pages);\r
+ if (gCpu != NULL) {\r
+ //\r
+ // Set flag to make sure allocating memory without GUARD for page table\r
+ // operation; otherwise infinite loops could be caused.\r
+ //\r
+ mOnGuarding = TRUE;\r
+ //\r
+ // Note: This might overwrite other attributes needed by other features,\r
+ // such as NX memory protection.\r
+ //\r
+ Status = gCpu->SetMemoryAttributes (\r
+ gCpu,\r
+ BaseAddress,\r
+ EFI_PAGES_TO_SIZE (Pages),\r
+ EFI_MEMORY_RP\r
+ );\r
+ //\r
+ // Normally we should ASSERT the returned Status. But there might be memory\r
+ // alloc/free involved in SetMemoryAttributes(), which might fail this\r
+ // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
+ //\r
+ if (EFI_ERROR (Status)) {\r
+ DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
+ }\r
+ mOnGuarding = FALSE;\r
+ }\r
+}\r
+\r
+/**\r
+ Record freed pages as well as mark them as not-present, if enabled.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+EFIAPI\r
+GuardFreedPagesChecked (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ GuardFreedPages (BaseAddress, Pages);\r
+ }\r
+}\r
+\r
+/**\r
+ Mark all pages freed before CPU Arch Protocol as not-present.\r
+\r
+**/\r
+VOID\r
+GuardAllFreedPages (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 TableEntry;\r
+ UINT64 Address;\r
+ UINT64 GuardPage;\r
+ INTN Level;\r
+ UINT64 BitIndex;\r
+ UINTN GuardPageNumber;\r
+\r
+ if (mGuardedMemoryMap == 0 ||\r
+ mMapLevel == 0 ||\r
+ mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
+ return;\r
+ }\r
+\r
+ CopyMem (Entries, mLevelMask, sizeof (Entries));\r
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
+\r
+ SetMem (Tables, sizeof(Tables), 0);\r
+ SetMem (Addresses, sizeof(Addresses), 0);\r
+ SetMem (Indices, sizeof(Indices), 0);\r
+\r
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Tables[Level] = mGuardedMemoryMap;\r
+ Address = 0;\r
+ GuardPage = (UINT64)-1;\r
+ GuardPageNumber = 0;\r
+\r
+ while (TRUE) {\r
+ if (Indices[Level] > Entries[Level]) {\r
+ Tables[Level] = 0;\r
+ Level -= 1;\r
+ } else {\r
+ TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
+ Address = Addresses[Level];\r
+\r
+ if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
+ Level += 1;\r
+ Tables[Level] = TableEntry;\r
+ Addresses[Level] = Address;\r
+ Indices[Level] = 0;\r
+\r
+ continue;\r
+ } else {\r
+ BitIndex = 1;\r
+ while (BitIndex != 0) {\r
+ if ((TableEntry & BitIndex) != 0) {\r
+ if (GuardPage == (UINT64)-1) {\r
+ GuardPage = Address;\r
+ }\r
+ ++GuardPageNumber;\r
+ } else if (GuardPageNumber > 0) {\r
+ GuardFreedPages (GuardPage, GuardPageNumber);\r
+ GuardPageNumber = 0;\r
+ GuardPage = (UINT64)-1;\r
+ }\r
+\r
+ if (TableEntry == 0) {\r
+ break;\r
+ }\r
+\r
+ Address += EFI_PAGES_TO_SIZE (1);\r
+ BitIndex = LShiftU64 (BitIndex, 1);\r
+ }\r
+ }\r
+ }\r
+\r
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
+ break;\r
+ }\r
+\r
+ Indices[Level] += 1;\r
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
+ Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
+\r
+ }\r
+\r
+ //\r
+ // Update the maximum address of freed page which can be used for memory\r
+ // promotion upon out-of-memory-space.\r
+ //\r
+ GetLastGuardedFreePageAddress (&Address);\r
+ if (Address != 0) {\r
+ mLastPromotedPage = Address;\r
+ }\r
+}\r
+\r
+/**\r
+ This function checks to see if the given memory map descriptor in a memory map\r
+ can be merged with any guarded free pages.\r
+\r
+ @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
+ @param MaxAddress Maximum address to stop the merge.\r
+\r
+ @return VOID\r
+\r
+**/\r
+VOID\r
+MergeGuardPages (\r
+ IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
+ IN EFI_PHYSICAL_ADDRESS MaxAddress\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS EndAddress;\r
+ UINT64 Bitmap;\r
+ INTN Pages;\r
+\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
+ MemoryMapEntry->Type >= EfiMemoryMappedIO) {\r
+ return;\r
+ }\r
+\r
+ Bitmap = 0;\r
+ Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));\r
+ Pages -= (INTN)MemoryMapEntry->NumberOfPages;\r
+ while (Pages > 0) {\r
+ if (Bitmap == 0) {\r
+ EndAddress = MemoryMapEntry->PhysicalStart +\r
+ EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);\r
+ Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ }\r
+\r
+ if ((Bitmap & 1) == 0) {\r
+ break;\r
+ }\r
+\r
+ Pages--;\r
+ MemoryMapEntry->NumberOfPages++;\r
+ Bitmap = RShiftU64 (Bitmap, 1);\r
+ }\r
+}\r
+\r
+/**\r
+ Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
+\r
+ Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
+ makes use of 'Used then throw away' way to detect any illegal access to freed\r
+ memory. The thrown-away memory will be marked as not-present so that any access\r
+ to those memory (after free) will be caught by page-fault exception.\r
+\r
+ The problem is that this will consume lots of memory space. Once no memory\r
+ left in pool to allocate, we have to restore part of the freed pages to their\r
+ normal function. Otherwise the whole system will stop functioning.\r
+\r
+ @param StartAddress Start address of promoted memory.\r
+ @param EndAddress End address of promoted memory.\r
+\r
+ @return TRUE Succeeded to promote memory.\r
+ @return FALSE No free memory found.\r
+\r
+**/\r
+BOOLEAN\r
+PromoteGuardedFreePages (\r
+ OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
+ OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN AvailablePages;\r
+ UINT64 Bitmap;\r
+ EFI_PHYSICAL_ADDRESS Start;\r
+\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Similar to memory allocation service, always search the freed pages in\r
+ // descending direction.\r
+ //\r
+ Start = mLastPromotedPage;\r
+ AvailablePages = 0;\r
+ while (AvailablePages == 0) {\r
+ Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ //\r
+ // If the address wraps around, try the really freed pages at top.\r
+ //\r
+ if (Start > mLastPromotedPage) {\r
+ GetLastGuardedFreePageAddress (&Start);\r
+ ASSERT (Start != 0);\r
+ Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ }\r
+\r
+ Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ while (Bitmap > 0) {\r
+ if ((Bitmap & 1) != 0) {\r
+ ++AvailablePages;\r
+ } else if (AvailablePages == 0) {\r
+ Start += EFI_PAGES_TO_SIZE (1);\r
+ } else {\r
+ break;\r
+ }\r
+\r
+ Bitmap = RShiftU64 (Bitmap, 1);\r
+ }\r
+ }\r
+\r
+ if (AvailablePages != 0) {\r
+ DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
+ ClearGuardedMemoryBits (Start, AvailablePages);\r
+\r
+ if (gCpu != NULL) {\r
+ //\r
+ // Set flag to make sure allocating memory without GUARD for page table\r
+ // operation; otherwise infinite loops could be caused.\r
+ //\r
+ mOnGuarding = TRUE;\r
+ Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);\r
+ ASSERT_EFI_ERROR (Status);\r
+ mOnGuarding = FALSE;\r
+ }\r
+\r
+ mLastPromotedPage = Start;\r
+ *StartAddress = Start;\r
+ *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
/**\r
Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
**/\r
)\r
{\r
ASSERT (gCpu != NULL);\r
- SetAllGuardPages ();\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
+ IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
+ SetAllGuardPages ();\r
+ }\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ GuardAllFreedPages ();\r
+ }\r
}\r
\r
/**\r
CHAR8 *Ruler1;\r
CHAR8 *Ruler2;\r
\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {\r
+ return;\r
+ }\r
+\r
if (mGuardedMemoryMap == 0 ||\r
mMapLevel == 0 ||\r
mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r