+ return CoreConvertPages (Start, NumberOfPages, NewType);\r
+}\r
+\r
+/**\r
+ Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
+**/\r
+VOID\r
+SetAllGuardPages (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 TableEntry;\r
+ UINT64 Address;\r
+ UINT64 GuardPage;\r
+ INTN Level;\r
+ UINTN Index;\r
+ BOOLEAN OnGuarding;\r
+\r
+ if ((mGuardedMemoryMap == 0) ||\r
+ (mMapLevel == 0) ||\r
+ (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))\r
+ {\r
+ return;\r
+ }\r
+\r
+ CopyMem (Entries, mLevelMask, sizeof (Entries));\r
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
+\r
+ SetMem (Tables, sizeof (Tables), 0);\r
+ SetMem (Addresses, sizeof (Addresses), 0);\r
+ SetMem (Indices, sizeof (Indices), 0);\r
+\r
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Tables[Level] = mGuardedMemoryMap;\r
+ Address = 0;\r
+ OnGuarding = FALSE;\r
+\r
+ DEBUG_CODE (\r
+ DumpGuardedMemoryBitmap ();\r
+ );\r
+\r
+ while (TRUE) {\r
+ if (Indices[Level] > Entries[Level]) {\r
+ Tables[Level] = 0;\r
+ Level -= 1;\r
+ } else {\r
+ TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
+ Address = Addresses[Level];\r
+\r
+ if (TableEntry == 0) {\r
+ OnGuarding = FALSE;\r
+ } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
+ Level += 1;\r
+ Tables[Level] = TableEntry;\r
+ Addresses[Level] = Address;\r
+ Indices[Level] = 0;\r
+\r
+ continue;\r
+ } else {\r
+ Index = 0;\r
+ while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
+ if ((TableEntry & 1) == 1) {\r
+ if (OnGuarding) {\r
+ GuardPage = 0;\r
+ } else {\r
+ GuardPage = Address - EFI_PAGE_SIZE;\r
+ }\r
+\r
+ OnGuarding = TRUE;\r
+ } else {\r
+ if (OnGuarding) {\r
+ GuardPage = Address;\r
+ } else {\r
+ GuardPage = 0;\r
+ }\r
+\r
+ OnGuarding = FALSE;\r
+ }\r
+\r
+ if (GuardPage != 0) {\r
+ SetGuardPage (GuardPage);\r
+ }\r
+\r
+ if (TableEntry == 0) {\r
+ break;\r
+ }\r
+\r
+ TableEntry = RShiftU64 (TableEntry, 1);\r
+ Address += EFI_PAGE_SIZE;\r
+ Index += 1;\r
+ }\r
+ }\r
+ }\r
+\r
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
+ break;\r
+ }\r
+\r
+ Indices[Level] += 1;\r
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
+ Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
+ }\r
+}\r
+\r
+/**\r
+ Find the address of top-most guarded free page.\r
+\r
+ @param[out] Address Start address of top-most guarded free page.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+GetLastGuardedFreePageAddress (\r
+ OUT EFI_PHYSICAL_ADDRESS *Address\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS AddressGranularity;\r
+ EFI_PHYSICAL_ADDRESS BaseAddress;\r
+ UINTN Level;\r
+ UINT64 Map;\r
+ INTN Index;\r
+\r
+ ASSERT (mMapLevel >= 1);\r
+\r
+ BaseAddress = 0;\r
+ Map = mGuardedMemoryMap;\r
+ for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
+ ++Level)\r
+ {\r
+ AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
+\r
+ //\r
+ // Find the non-NULL entry at largest index.\r
+ //\r
+ for (Index = (INTN)mLevelMask[Level]; Index >= 0; --Index) {\r
+ if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
+ BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
+ Map = ((UINT64 *)(UINTN)Map)[Index];\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Find the non-zero MSB then get the page address.\r
+ //\r
+ while (Map != 0) {\r
+ Map = RShiftU64 (Map, 1);\r
+ BaseAddress += EFI_PAGES_TO_SIZE (1);\r
+ }\r
+\r
+ *Address = BaseAddress;\r
+}\r
+\r
+/**\r
+ Record freed pages.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+MarkFreedPages (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ SetGuardedMemoryBits (BaseAddress, Pages);\r
+}\r
+\r
+/**\r
+ Record freed pages as well as mark them as not-present.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+EFIAPI\r
+GuardFreedPages (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+\r
+ //\r
+ // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
+ // them alone.\r
+ //\r
+ if (BaseAddress < BASE_1MB) {\r
+ return;\r
+ }\r
+\r
+ MarkFreedPages (BaseAddress, Pages);\r
+ if (gCpu != NULL) {\r
+ //\r
+ // Set flag to make sure allocating memory without GUARD for page table\r
+ // operation; otherwise infinite loops could be caused.\r
+ //\r
+ mOnGuarding = TRUE;\r
+ //\r
+ // Note: This might overwrite other attributes needed by other features,\r
+ // such as NX memory protection.\r
+ //\r
+ Status = gCpu->SetMemoryAttributes (\r
+ gCpu,\r
+ BaseAddress,\r
+ EFI_PAGES_TO_SIZE (Pages),\r
+ EFI_MEMORY_RP\r
+ );\r
+ //\r
+ // Normally we should ASSERT the returned Status. But there might be memory\r
+ // alloc/free involved in SetMemoryAttributes(), which might fail this\r
+ // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
+ //\r
+ if (EFI_ERROR (Status)) {\r
+ DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
+ }\r
+\r
+ mOnGuarding = FALSE;\r
+ }\r
+}\r
+\r
+/**\r
+ Record freed pages as well as mark them as not-present, if enabled.\r
+\r
+ @param[in] BaseAddress Base address of just freed pages.\r
+ @param[in] Pages Number of freed pages.\r
+\r
+ @return VOID.\r
+**/\r
+VOID\r
+EFIAPI\r
+GuardFreedPagesChecked (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINTN Pages\r
+ )\r
+{\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ GuardFreedPages (BaseAddress, Pages);\r
+ }\r
+}\r
+\r
+/**\r
+ Mark all pages freed before CPU Arch Protocol as not-present.\r
+\r
+**/\r
+VOID\r
+GuardAllFreedPages (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
+ UINT64 TableEntry;\r
+ UINT64 Address;\r
+ UINT64 GuardPage;\r
+ INTN Level;\r
+ UINT64 BitIndex;\r
+ UINTN GuardPageNumber;\r
+\r
+ if ((mGuardedMemoryMap == 0) ||\r
+ (mMapLevel == 0) ||\r
+ (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))\r
+ {\r
+ return;\r
+ }\r
+\r
+ CopyMem (Entries, mLevelMask, sizeof (Entries));\r
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
+\r
+ SetMem (Tables, sizeof (Tables), 0);\r
+ SetMem (Addresses, sizeof (Addresses), 0);\r
+ SetMem (Indices, sizeof (Indices), 0);\r
+\r
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
+ Tables[Level] = mGuardedMemoryMap;\r
+ Address = 0;\r
+ GuardPage = (UINT64)-1;\r
+ GuardPageNumber = 0;\r
+\r
+ while (TRUE) {\r
+ if (Indices[Level] > Entries[Level]) {\r
+ Tables[Level] = 0;\r
+ Level -= 1;\r
+ } else {\r
+ TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
+ Address = Addresses[Level];\r
+\r
+ if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
+ Level += 1;\r
+ Tables[Level] = TableEntry;\r
+ Addresses[Level] = Address;\r
+ Indices[Level] = 0;\r
+\r
+ continue;\r
+ } else {\r
+ BitIndex = 1;\r
+ while (BitIndex != 0) {\r
+ if ((TableEntry & BitIndex) != 0) {\r
+ if (GuardPage == (UINT64)-1) {\r
+ GuardPage = Address;\r
+ }\r
+\r
+ ++GuardPageNumber;\r
+ } else if (GuardPageNumber > 0) {\r
+ GuardFreedPages (GuardPage, GuardPageNumber);\r
+ GuardPageNumber = 0;\r
+ GuardPage = (UINT64)-1;\r
+ }\r
+\r
+ if (TableEntry == 0) {\r
+ break;\r
+ }\r
+\r
+ Address += EFI_PAGES_TO_SIZE (1);\r
+ BitIndex = LShiftU64 (BitIndex, 1);\r
+ }\r
+ }\r
+ }\r
+\r
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
+ break;\r
+ }\r
+\r
+ Indices[Level] += 1;\r
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
+ Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
+ }\r
+\r
+ //\r
+ // Update the maximum address of freed page which can be used for memory\r
+ // promotion upon out-of-memory-space.\r
+ //\r
+ GetLastGuardedFreePageAddress (&Address);\r
+ if (Address != 0) {\r
+ mLastPromotedPage = Address;\r
+ }\r
+}\r
+\r
+/**\r
+ This function checks to see if the given memory map descriptor in a memory map\r
+ can be merged with any guarded free pages.\r
+\r
+ @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
+ @param MaxAddress Maximum address to stop the merge.\r
+\r
+ @return VOID\r
+\r
+**/\r
+VOID\r
+MergeGuardPages (\r
+ IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
+ IN EFI_PHYSICAL_ADDRESS MaxAddress\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS EndAddress;\r
+ UINT64 Bitmap;\r
+ INTN Pages;\r
+\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
+ (MemoryMapEntry->Type >= EfiMemoryMappedIO))\r
+ {\r
+ return;\r
+ }\r
+\r
+ Bitmap = 0;\r
+ Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));\r
+ Pages -= (INTN)MemoryMapEntry->NumberOfPages;\r
+ while (Pages > 0) {\r
+ if (Bitmap == 0) {\r
+ EndAddress = MemoryMapEntry->PhysicalStart +\r
+ EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);\r
+ Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ }\r
+\r
+ if ((Bitmap & 1) == 0) {\r
+ break;\r
+ }\r
+\r
+ Pages--;\r
+ MemoryMapEntry->NumberOfPages++;\r
+ Bitmap = RShiftU64 (Bitmap, 1);\r
+ }\r
+}\r
+\r
+/**\r
+ Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
+\r
+ Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
+ makes use of 'Used then throw away' way to detect any illegal access to freed\r
+ memory. The thrown-away memory will be marked as not-present so that any access\r
+ to those memory (after free) will be caught by page-fault exception.\r
+\r
+ The problem is that this will consume lots of memory space. Once no memory\r
+ left in pool to allocate, we have to restore part of the freed pages to their\r
+ normal function. Otherwise the whole system will stop functioning.\r
+\r
+ @param StartAddress Start address of promoted memory.\r
+ @param EndAddress End address of promoted memory.\r
+\r
+ @return TRUE Succeeded to promote memory.\r
+ @return FALSE No free memory found.\r
+\r
+**/\r
+BOOLEAN\r
+PromoteGuardedFreePages (\r
+ OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
+ OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN AvailablePages;\r
+ UINT64 Bitmap;\r
+ EFI_PHYSICAL_ADDRESS Start;\r
+\r
+ if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Similar to memory allocation service, always search the freed pages in\r
+ // descending direction.\r
+ //\r
+ Start = mLastPromotedPage;\r
+ AvailablePages = 0;\r
+ while (AvailablePages == 0) {\r
+ Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ //\r
+ // If the address wraps around, try the really freed pages at top.\r
+ //\r
+ if (Start > mLastPromotedPage) {\r
+ GetLastGuardedFreePageAddress (&Start);\r
+ ASSERT (Start != 0);\r
+ Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ }\r
+\r
+ Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
+ while (Bitmap > 0) {\r
+ if ((Bitmap & 1) != 0) {\r
+ ++AvailablePages;\r
+ } else if (AvailablePages == 0) {\r
+ Start += EFI_PAGES_TO_SIZE (1);\r
+ } else {\r
+ break;\r
+ }\r
+\r
+ Bitmap = RShiftU64 (Bitmap, 1);\r
+ }\r
+ }\r
+\r
+ if (AvailablePages != 0) {\r
+ DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
+ ClearGuardedMemoryBits (Start, AvailablePages);\r
+\r
+ if (gCpu != NULL) {\r
+ //\r
+ // Set flag to make sure allocating memory without GUARD for page table\r
+ // operation; otherwise infinite loops could be caused.\r
+ //\r
+ mOnGuarding = TRUE;\r
+ Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE (AvailablePages), 0);\r
+ ASSERT_EFI_ERROR (Status);\r
+ mOnGuarding = FALSE;\r
+ }\r
+\r
+ mLastPromotedPage = Start;\r
+ *StartAddress = Start;\r
+ *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
+**/\r
+VOID\r
+HeapGuardCpuArchProtocolNotify (\r
+ VOID\r
+ )\r
+{\r
+ ASSERT (gCpu != NULL);\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
+ IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED))\r
+ {\r
+ DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
+ SetAllGuardPages ();\r
+ }\r
+\r
+ if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
+ GuardAllFreedPages ();\r
+ }\r