X-Git-Url: https://git.proxmox.com/?p=mirror_edk2.git;a=blobdiff_plain;f=MdeModulePkg%2FCore%2FDxe%2FMem%2FHeapGuard.c;h=f6068c459ca75385c242da9639146a67508c5bba;hp=0f035043e15a2ead446f9b8d0de89f08a7df2dff;hb=7fef06af4ec100f3f8856e3fa08ef067a9fd40d2;hpb=c44218e5f40880e3100bdf4d112672e8dd56b94a diff --git a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c index 0f035043e1..f6068c459c 100644 --- a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c +++ b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c @@ -1,7 +1,7 @@ /** @file UEFI Heap Guard functions. -Copyright (c) 2017, Intel Corporation. All rights reserved.
+Copyright (c) 2017-2018, Intel Corporation. All rights reserved.
This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at @@ -225,8 +225,8 @@ FindGuardedMemoryMap ( // // Adjust current map table depth according to the address to access // - while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH - && + while (AllocMapUnit && + mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH && RShiftU64 ( Address, mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] @@ -576,6 +576,10 @@ SetGuardPage ( IN EFI_PHYSICAL_ADDRESS BaseAddress ) { + if (gCpu == NULL) { + return; + } + // // Set flag to make sure allocating memory without GUARD for page table // operation; otherwise infinite loops could be caused. @@ -606,6 +610,10 @@ UnsetGuardPage ( { UINT64 Attributes; + if (gCpu == NULL) { + return; + } + // // Once the Guard page is unset, it will be freed back to memory pool. NX // memory protection must be restored for this page if NX is enabled for free @@ -652,7 +660,7 @@ IsMemoryTypeToGuard ( UINT64 ConfigBit; BOOLEAN InSmm; - if (gCpu == NULL || AllocateType == AllocateAddress) { + if (AllocateType == AllocateAddress) { return FALSE; } @@ -728,6 +736,20 @@ IsPageTypeToGuard ( return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE); } +/** + Check to see if the heap guard is enabled for page and/or pool allocation. + + @return TRUE/FALSE. +**/ +BOOLEAN +IsHeapGuardEnabled ( + VOID + ) +{ + return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, + GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE); +} + /** Set head Guard and tail Guard for the given memory range. @@ -890,11 +912,8 @@ AdjustMemoryS ( } Target = Start + Size - SizeRequested; - - // - // At least one more page needed for Guard page. - // - if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) { + ASSERT (Target >= Start); + if (Target == 0) { return 0; } @@ -1127,8 +1146,22 @@ CoreConvertPagesWithGuard ( IN EFI_MEMORY_TYPE NewType ) { + UINT64 OldStart; + UINTN OldPages; + if (NewType == EfiConventionalMemory) { + OldStart = Start; + OldPages = NumberOfPages; + AdjustMemoryF (&Start, &NumberOfPages); + // + // It's safe to unset Guard page inside memory lock because there should + // be no memory allocation occurred in updating memory page attribute at + // this point. And unsetting Guard page before free will prevent Guard + // page just freed back to pool from being allocated right away before + // marking it usable (from non-present to present). + // + UnsetGuardForMemory (OldStart, OldPages); if (NumberOfPages == 0) { return EFI_SUCCESS; } @@ -1139,6 +1172,128 @@ CoreConvertPagesWithGuard ( return CoreConvertPages (Start, NumberOfPages, NewType); } +/** + Set all Guard pages which cannot be set before CPU Arch Protocol installed. +**/ +VOID +SetAllGuardPages ( + VOID + ) +{ + UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH]; + UINT64 TableEntry; + UINT64 Address; + UINT64 GuardPage; + INTN Level; + UINTN Index; + BOOLEAN OnGuarding; + + if (mGuardedMemoryMap == 0 || + mMapLevel == 0 || + mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) { + return; + } + + CopyMem (Entries, mLevelMask, sizeof (Entries)); + CopyMem (Shifts, mLevelShift, sizeof (Shifts)); + + SetMem (Tables, sizeof(Tables), 0); + SetMem (Addresses, sizeof(Addresses), 0); + SetMem (Indices, sizeof(Indices), 0); + + Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel; + Tables[Level] = mGuardedMemoryMap; + Address = 0; + OnGuarding = FALSE; + + DEBUG_CODE ( + DumpGuardedMemoryBitmap (); + ); + + while (TRUE) { + if (Indices[Level] > Entries[Level]) { + Tables[Level] = 0; + Level -= 1; + } else { + + TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]]; + Address = Addresses[Level]; + + if (TableEntry == 0) { + + OnGuarding = FALSE; + + } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) { + + Level += 1; + Tables[Level] = TableEntry; + Addresses[Level] = Address; + Indices[Level] = 0; + + continue; + + } else { + + Index = 0; + while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) { + if ((TableEntry & 1) == 1) { + if (OnGuarding) { + GuardPage = 0; + } else { + GuardPage = Address - EFI_PAGE_SIZE; + } + OnGuarding = TRUE; + } else { + if (OnGuarding) { + GuardPage = Address; + } else { + GuardPage = 0; + } + OnGuarding = FALSE; + } + + if (GuardPage != 0) { + SetGuardPage (GuardPage); + } + + if (TableEntry == 0) { + break; + } + + TableEntry = RShiftU64 (TableEntry, 1); + Address += EFI_PAGE_SIZE; + Index += 1; + } + } + } + + if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) { + break; + } + + Indices[Level] += 1; + Address = (Level == 0) ? 0 : Addresses[Level - 1]; + Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]); + + } +} + +/** + Notify function used to set all Guard pages before CPU Arch Protocol installed. +**/ +VOID +HeapGuardCpuArchProtocolNotify ( + VOID + ) +{ + ASSERT (gCpu != NULL); + SetAllGuardPages (); +} + /** Helper function to convert a UINT64 value in binary to a string.