2 UEFI Heap Guard functions.
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
11 #include "HeapGuard.h"
14 // Global to avoid infinite reentrance of memory allocation when updating
15 // page table attributes, which may need allocate pages for new PDE/PTE.
17 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding
= FALSE
;
20 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
21 // is used to indicate memory guarded. '0' might be free memory or Guard
22 // page itself, depending on status of memory adjacent to it.
24 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap
= 0;
27 // Current depth level of map table pointed by mGuardedMemoryMap.
28 // mMapLevel must be initialized at least by 1. It will be automatically
29 // updated according to the address of memory just tracked.
31 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel
= 1;
34 // Shift and mask for each level of map table
36 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift
[GUARDED_HEAP_MAP_TABLE_DEPTH
]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS
;
38 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask
[GUARDED_HEAP_MAP_TABLE_DEPTH
]
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS
;
42 // Used for promoting freed but not used pages.
44 GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage
= BASE_4GB
;
47 Set corresponding bits in bitmap table to 1 according to the address.
49 @param[in] Address Start address to set for.
50 @param[in] BitNumber Number of bits to set.
51 @param[in] BitMap Pointer to bitmap which covers the Address.
58 IN EFI_PHYSICAL_ADDRESS Address
,
69 StartBit
= (UINTN
)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address
);
70 EndBit
= (StartBit
+ BitNumber
- 1) % GUARDED_HEAP_MAP_ENTRY_BITS
;
72 if ((StartBit
+ BitNumber
) >= GUARDED_HEAP_MAP_ENTRY_BITS
) {
73 Msbs
= (GUARDED_HEAP_MAP_ENTRY_BITS
- StartBit
) %
74 GUARDED_HEAP_MAP_ENTRY_BITS
;
75 Lsbs
= (EndBit
+ 1) % GUARDED_HEAP_MAP_ENTRY_BITS
;
76 Qwords
= (BitNumber
- Msbs
) / GUARDED_HEAP_MAP_ENTRY_BITS
;
84 *BitMap
|= LShiftU64 (LShiftU64 (1, Msbs
) - 1, StartBit
);
89 SetMem64 ((VOID
*)BitMap
, Qwords
* GUARDED_HEAP_MAP_ENTRY_BYTES
,
95 *BitMap
|= (LShiftU64 (1, Lsbs
) - 1);
100 Set corresponding bits in bitmap table to 0 according to the address.
102 @param[in] Address Start address to set for.
103 @param[in] BitNumber Number of bits to set.
104 @param[in] BitMap Pointer to bitmap which covers the Address.
111 IN EFI_PHYSICAL_ADDRESS Address
,
122 StartBit
= (UINTN
)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address
);
123 EndBit
= (StartBit
+ BitNumber
- 1) % GUARDED_HEAP_MAP_ENTRY_BITS
;
125 if ((StartBit
+ BitNumber
) >= GUARDED_HEAP_MAP_ENTRY_BITS
) {
126 Msbs
= (GUARDED_HEAP_MAP_ENTRY_BITS
- StartBit
) %
127 GUARDED_HEAP_MAP_ENTRY_BITS
;
128 Lsbs
= (EndBit
+ 1) % GUARDED_HEAP_MAP_ENTRY_BITS
;
129 Qwords
= (BitNumber
- Msbs
) / GUARDED_HEAP_MAP_ENTRY_BITS
;
137 *BitMap
&= ~LShiftU64 (LShiftU64 (1, Msbs
) - 1, StartBit
);
142 SetMem64 ((VOID
*)BitMap
, Qwords
* GUARDED_HEAP_MAP_ENTRY_BYTES
, 0);
147 *BitMap
&= ~(LShiftU64 (1, Lsbs
) - 1);
152 Get corresponding bits in bitmap table according to the address.
154 The value of bit 0 corresponds to the status of memory at given Address.
155 No more than 64 bits can be retrieved in one call.
157 @param[in] Address Start address to retrieve bits for.
158 @param[in] BitNumber Number of bits to get.
159 @param[in] BitMap Pointer to bitmap which covers the Address.
161 @return An integer containing the bits information.
166 IN EFI_PHYSICAL_ADDRESS Address
,
177 ASSERT (BitNumber
<= GUARDED_HEAP_MAP_ENTRY_BITS
);
179 StartBit
= (UINTN
)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address
);
180 EndBit
= (StartBit
+ BitNumber
- 1) % GUARDED_HEAP_MAP_ENTRY_BITS
;
182 if ((StartBit
+ BitNumber
) > GUARDED_HEAP_MAP_ENTRY_BITS
) {
183 Msbs
= GUARDED_HEAP_MAP_ENTRY_BITS
- StartBit
;
184 Lsbs
= (EndBit
+ 1) % GUARDED_HEAP_MAP_ENTRY_BITS
;
190 if (StartBit
== 0 && BitNumber
== GUARDED_HEAP_MAP_ENTRY_BITS
) {
193 Result
= RShiftU64((*BitMap
), StartBit
) & (LShiftU64(1, Msbs
) - 1);
196 Result
|= LShiftU64 ((*BitMap
) & (LShiftU64 (1, Lsbs
) - 1), Msbs
);
204 Locate the pointer of bitmap from the guarded memory bitmap tables, which
205 covers the given Address.
207 @param[in] Address Start address to search the bitmap for.
208 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
209 @param[out] BitMap Pointer to bitmap which covers the Address.
211 @return The bit number from given Address to the end of current map table.
214 FindGuardedMemoryMap (
215 IN EFI_PHYSICAL_ADDRESS Address
,
216 IN BOOLEAN AllocMapUnit
,
231 // Adjust current map table depth according to the address to access
233 while (AllocMapUnit
&&
234 mMapLevel
< GUARDED_HEAP_MAP_TABLE_DEPTH
&&
237 mLevelShift
[GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
- 1]
240 if (mGuardedMemoryMap
!= 0) {
241 Size
= (mLevelMask
[GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
- 1] + 1)
242 * GUARDED_HEAP_MAP_ENTRY_BYTES
;
243 Status
= CoreInternalAllocatePages (
246 EFI_SIZE_TO_PAGES (Size
),
250 ASSERT_EFI_ERROR (Status
);
251 ASSERT (MapMemory
!= 0);
253 SetMem ((VOID
*)(UINTN
)MapMemory
, Size
, 0);
255 *(UINT64
*)(UINTN
)MapMemory
= mGuardedMemoryMap
;
256 mGuardedMemoryMap
= MapMemory
;
263 GuardMap
= &mGuardedMemoryMap
;
264 for (Level
= GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
;
265 Level
< GUARDED_HEAP_MAP_TABLE_DEPTH
;
268 if (*GuardMap
== 0) {
274 Size
= (mLevelMask
[Level
] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES
;
275 Status
= CoreInternalAllocatePages (
278 EFI_SIZE_TO_PAGES (Size
),
282 ASSERT_EFI_ERROR (Status
);
283 ASSERT (MapMemory
!= 0);
285 SetMem ((VOID
*)(UINTN
)MapMemory
, Size
, 0);
286 *GuardMap
= MapMemory
;
289 Index
= (UINTN
)RShiftU64 (Address
, mLevelShift
[Level
]);
290 Index
&= mLevelMask
[Level
];
291 GuardMap
= (UINT64
*)(UINTN
)((*GuardMap
) + Index
* sizeof (UINT64
));
295 BitsToUnitEnd
= GUARDED_HEAP_MAP_BITS
- GUARDED_HEAP_MAP_BIT_INDEX (Address
);
298 return BitsToUnitEnd
;
302 Set corresponding bits in bitmap table to 1 according to given memory range.
304 @param[in] Address Memory address to guard from.
305 @param[in] NumberOfPages Number of pages to guard.
311 SetGuardedMemoryBits (
312 IN EFI_PHYSICAL_ADDRESS Address
,
313 IN UINTN NumberOfPages
320 while (NumberOfPages
> 0) {
321 BitsToUnitEnd
= FindGuardedMemoryMap (Address
, TRUE
, &BitMap
);
322 ASSERT (BitMap
!= NULL
);
324 if (NumberOfPages
> BitsToUnitEnd
) {
326 Bits
= BitsToUnitEnd
;
328 Bits
= NumberOfPages
;
331 SetBits (Address
, Bits
, BitMap
);
333 NumberOfPages
-= Bits
;
334 Address
+= EFI_PAGES_TO_SIZE (Bits
);
339 Clear corresponding bits in bitmap table according to given memory range.
341 @param[in] Address Memory address to unset from.
342 @param[in] NumberOfPages Number of pages to unset guard.
348 ClearGuardedMemoryBits (
349 IN EFI_PHYSICAL_ADDRESS Address
,
350 IN UINTN NumberOfPages
357 while (NumberOfPages
> 0) {
358 BitsToUnitEnd
= FindGuardedMemoryMap (Address
, TRUE
, &BitMap
);
359 ASSERT (BitMap
!= NULL
);
361 if (NumberOfPages
> BitsToUnitEnd
) {
363 Bits
= BitsToUnitEnd
;
365 Bits
= NumberOfPages
;
368 ClearBits (Address
, Bits
, BitMap
);
370 NumberOfPages
-= Bits
;
371 Address
+= EFI_PAGES_TO_SIZE (Bits
);
376 Retrieve corresponding bits in bitmap table according to given memory range.
378 @param[in] Address Memory address to retrieve from.
379 @param[in] NumberOfPages Number of pages to retrieve.
381 @return An integer containing the guarded memory bitmap.
384 GetGuardedMemoryBits (
385 IN EFI_PHYSICAL_ADDRESS Address
,
386 IN UINTN NumberOfPages
395 ASSERT (NumberOfPages
<= GUARDED_HEAP_MAP_ENTRY_BITS
);
399 while (NumberOfPages
> 0) {
400 BitsToUnitEnd
= FindGuardedMemoryMap (Address
, FALSE
, &BitMap
);
402 if (NumberOfPages
> BitsToUnitEnd
) {
404 Bits
= BitsToUnitEnd
;
406 Bits
= NumberOfPages
;
409 if (BitMap
!= NULL
) {
410 Result
|= LShiftU64 (GetBits (Address
, Bits
, BitMap
), Shift
);
414 NumberOfPages
-= Bits
;
415 Address
+= EFI_PAGES_TO_SIZE (Bits
);
422 Get bit value in bitmap table for the given address.
424 @param[in] Address The address to retrieve for.
431 IN EFI_PHYSICAL_ADDRESS Address
436 FindGuardedMemoryMap (Address
, FALSE
, &GuardMap
);
437 if (GuardMap
!= NULL
) {
438 if (RShiftU64 (*GuardMap
,
439 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address
)) & 1) {
449 Check to see if the page at the given address is a Guard page or not.
451 @param[in] Address The address to check for.
453 @return TRUE The page at Address is a Guard page.
454 @return FALSE The page at Address is not a Guard page.
459 IN EFI_PHYSICAL_ADDRESS Address
465 // There must be at least one guarded page before and/or after given
466 // address if it's a Guard page. The bitmap pattern should be one of
469 BitMap
= GetGuardedMemoryBits (Address
- EFI_PAGE_SIZE
, 3);
470 return ((BitMap
== BIT0
) || (BitMap
== BIT2
) || (BitMap
== (BIT2
| BIT0
)));
475 Check to see if the page at the given address is guarded or not.
477 @param[in] Address The address to check for.
479 @return TRUE The page at Address is guarded.
480 @return FALSE The page at Address is not guarded.
485 IN EFI_PHYSICAL_ADDRESS Address
488 return (GetGuardMapBit (Address
) == 1);
492 Set the page at the given address to be a Guard page.
494 This is done by changing the page table attribute to be NOT PRSENT.
496 @param[in] BaseAddress Page address to Guard at
503 IN EFI_PHYSICAL_ADDRESS BaseAddress
513 // Set flag to make sure allocating memory without GUARD for page table
514 // operation; otherwise infinite loops could be caused.
518 // Note: This might overwrite other attributes needed by other features,
519 // such as NX memory protection.
521 Status
= gCpu
->SetMemoryAttributes (gCpu
, BaseAddress
, EFI_PAGE_SIZE
, EFI_MEMORY_RP
);
522 ASSERT_EFI_ERROR (Status
);
527 Unset the Guard page at the given address to the normal memory.
529 This is done by changing the page table attribute to be PRSENT.
531 @param[in] BaseAddress Page address to Guard at.
538 IN EFI_PHYSICAL_ADDRESS BaseAddress
549 // Once the Guard page is unset, it will be freed back to memory pool. NX
550 // memory protection must be restored for this page if NX is enabled for free
554 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy
) & (1 << EfiConventionalMemory
)) != 0) {
555 Attributes
|= EFI_MEMORY_XP
;
559 // Set flag to make sure allocating memory without GUARD for page table
560 // operation; otherwise infinite loops could be caused.
564 // Note: This might overwrite other attributes needed by other features,
565 // such as memory protection (NX). Please make sure they are not enabled
568 Status
= gCpu
->SetMemoryAttributes (gCpu
, BaseAddress
, EFI_PAGE_SIZE
, Attributes
);
569 ASSERT_EFI_ERROR (Status
);
574 Check to see if the memory at the given address should be guarded or not.
576 @param[in] MemoryType Memory type to check.
577 @param[in] AllocateType Allocation type to check.
578 @param[in] PageOrPool Indicate a page allocation or pool allocation.
581 @return TRUE The given type of memory should be guarded.
582 @return FALSE The given type of memory should not be guarded.
585 IsMemoryTypeToGuard (
586 IN EFI_MEMORY_TYPE MemoryType
,
587 IN EFI_ALLOCATE_TYPE AllocateType
,
594 if (AllocateType
== AllocateAddress
) {
598 if ((PcdGet8 (PcdHeapGuardPropertyMask
) & PageOrPool
) == 0) {
602 if (PageOrPool
== GUARD_HEAP_TYPE_POOL
) {
603 ConfigBit
= PcdGet64 (PcdHeapGuardPoolType
);
604 } else if (PageOrPool
== GUARD_HEAP_TYPE_PAGE
) {
605 ConfigBit
= PcdGet64 (PcdHeapGuardPageType
);
607 ConfigBit
= (UINT64
)-1;
610 if ((UINT32
)MemoryType
>= MEMORY_TYPE_OS_RESERVED_MIN
) {
612 } else if ((UINT32
) MemoryType
>= MEMORY_TYPE_OEM_RESERVED_MIN
) {
614 } else if (MemoryType
< EfiMaxMemoryType
) {
615 TestBit
= LShiftU64 (1, MemoryType
);
616 } else if (MemoryType
== EfiMaxMemoryType
) {
617 TestBit
= (UINT64
)-1;
622 return ((ConfigBit
& TestBit
) != 0);
626 Check to see if the pool at the given address should be guarded or not.
628 @param[in] MemoryType Pool type to check.
631 @return TRUE The given type of pool should be guarded.
632 @return FALSE The given type of pool should not be guarded.
636 IN EFI_MEMORY_TYPE MemoryType
639 return IsMemoryTypeToGuard (MemoryType
, AllocateAnyPages
,
640 GUARD_HEAP_TYPE_POOL
);
644 Check to see if the page at the given address should be guarded or not.
646 @param[in] MemoryType Page type to check.
647 @param[in] AllocateType Allocation type to check.
649 @return TRUE The given type of page should be guarded.
650 @return FALSE The given type of page should not be guarded.
654 IN EFI_MEMORY_TYPE MemoryType
,
655 IN EFI_ALLOCATE_TYPE AllocateType
658 return IsMemoryTypeToGuard (MemoryType
, AllocateType
, GUARD_HEAP_TYPE_PAGE
);
662 Check to see if the heap guard is enabled for page and/or pool allocation.
664 @param[in] GuardType Specify the sub-type(s) of Heap Guard.
673 return IsMemoryTypeToGuard (EfiMaxMemoryType
, AllocateAnyPages
, GuardType
);
677 Set head Guard and tail Guard for the given memory range.
679 @param[in] Memory Base address of memory to set guard for.
680 @param[in] NumberOfPages Memory size in pages.
686 IN EFI_PHYSICAL_ADDRESS Memory
,
687 IN UINTN NumberOfPages
690 EFI_PHYSICAL_ADDRESS GuardPage
;
695 GuardPage
= Memory
+ EFI_PAGES_TO_SIZE (NumberOfPages
);
696 if (!IsGuardPage (GuardPage
)) {
697 SetGuardPage (GuardPage
);
701 GuardPage
= Memory
- EFI_PAGES_TO_SIZE (1);
702 if (!IsGuardPage (GuardPage
)) {
703 SetGuardPage (GuardPage
);
707 // Mark the memory range as Guarded
709 SetGuardedMemoryBits (Memory
, NumberOfPages
);
713 Unset head Guard and tail Guard for the given memory range.
715 @param[in] Memory Base address of memory to unset guard for.
716 @param[in] NumberOfPages Memory size in pages.
721 UnsetGuardForMemory (
722 IN EFI_PHYSICAL_ADDRESS Memory
,
723 IN UINTN NumberOfPages
726 EFI_PHYSICAL_ADDRESS GuardPage
;
729 if (NumberOfPages
== 0) {
734 // Head Guard must be one page before, if any.
737 // -------------------
738 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
739 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
740 // 1 X -> Don't free first page (need a new Guard)
741 // (it'll be turned into a Guard page later)
742 // -------------------
745 GuardPage
= Memory
- EFI_PAGES_TO_SIZE (1);
746 GuardBitmap
= GetGuardedMemoryBits (Memory
- EFI_PAGES_TO_SIZE (2), 2);
747 if ((GuardBitmap
& BIT1
) == 0) {
749 // Head Guard exists.
751 if ((GuardBitmap
& BIT0
) == 0) {
753 // If the head Guard is not a tail Guard of adjacent memory block,
756 UnsetGuardPage (GuardPage
);
760 // Pages before memory to free are still in Guard. It's a partial free
761 // case. Turn first page of memory block to free into a new Guard.
763 SetGuardPage (Memory
);
767 // Tail Guard must be the page after this memory block to free, if any.
770 // --------------------
771 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
772 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
773 // X 1 -> Don't free last page (need a new Guard)
774 // (it'll be turned into a Guard page later)
775 // --------------------
778 GuardPage
= Memory
+ EFI_PAGES_TO_SIZE (NumberOfPages
);
779 GuardBitmap
= GetGuardedMemoryBits (GuardPage
, 2);
780 if ((GuardBitmap
& BIT0
) == 0) {
782 // Tail Guard exists.
784 if ((GuardBitmap
& BIT1
) == 0) {
786 // If the tail Guard is not a head Guard of adjacent memory block,
787 // free it; otherwise, keep it.
789 UnsetGuardPage (GuardPage
);
793 // Pages after memory to free are still in Guard. It's a partial free
794 // case. We need to keep one page to be a head Guard.
796 SetGuardPage (GuardPage
- EFI_PAGES_TO_SIZE (1));
800 // No matter what, we just clear the mark of the Guarded memory.
802 ClearGuardedMemoryBits(Memory
, NumberOfPages
);
806 Adjust address of free memory according to existing and/or required Guard.
808 This function will check if there're existing Guard pages of adjacent
809 memory blocks, and try to use it as the Guard page of the memory to be
812 @param[in] Start Start address of free memory block.
813 @param[in] Size Size of free memory block.
814 @param[in] SizeRequested Size of memory to allocate.
816 @return The end address of memory block found.
817 @return 0 if no enough space for the required size of memory and its Guard.
823 IN UINT64 SizeRequested
829 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
830 // indicated to put the pool near the Tail Guard, we need extra bytes to
831 // make sure alignment of the returned pool address.
833 if ((PcdGet8 (PcdHeapGuardPropertyMask
) & BIT7
) == 0) {
834 SizeRequested
= ALIGN_VALUE(SizeRequested
, 8);
837 Target
= Start
+ Size
- SizeRequested
;
838 ASSERT (Target
>= Start
);
843 if (!IsGuardPage (Start
+ Size
)) {
844 // No Guard at tail to share. One more page is needed.
845 Target
-= EFI_PAGES_TO_SIZE (1);
849 if (Target
< Start
) {
854 if (Target
== Start
) {
855 if (!IsGuardPage (Target
- EFI_PAGES_TO_SIZE (1))) {
856 // No enough space for a new head Guard if no Guard at head to share.
861 // OK, we have enough pages for memory and its Guards. Return the End of the
863 return Target
+ SizeRequested
- 1;
867 Adjust the start address and number of pages to free according to Guard.
869 The purpose of this function is to keep the shared Guard page with adjacent
870 memory block if it's still in guard, or free it if no more sharing. Another
871 is to reserve pages as Guard pages in partial page free situation.
873 @param[in,out] Memory Base address of memory to free.
874 @param[in,out] NumberOfPages Size of memory to free.
880 IN OUT EFI_PHYSICAL_ADDRESS
*Memory
,
881 IN OUT UINTN
*NumberOfPages
884 EFI_PHYSICAL_ADDRESS Start
;
885 EFI_PHYSICAL_ADDRESS MemoryToTest
;
889 if (Memory
== NULL
|| NumberOfPages
== NULL
|| *NumberOfPages
== 0) {
894 PagesToFree
= *NumberOfPages
;
897 // Head Guard must be one page before, if any.
900 // -------------------
901 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
902 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
903 // 1 X -> Don't free first page (need a new Guard)
904 // (it'll be turned into a Guard page later)
905 // -------------------
908 MemoryToTest
= Start
- EFI_PAGES_TO_SIZE (2);
909 GuardBitmap
= GetGuardedMemoryBits (MemoryToTest
, 2);
910 if ((GuardBitmap
& BIT1
) == 0) {
912 // Head Guard exists.
914 if ((GuardBitmap
& BIT0
) == 0) {
916 // If the head Guard is not a tail Guard of adjacent memory block,
917 // free it; otherwise, keep it.
919 Start
-= EFI_PAGES_TO_SIZE (1);
924 // No Head Guard, and pages before memory to free are still in Guard. It's a
925 // partial free case. We need to keep one page to be a tail Guard.
927 Start
+= EFI_PAGES_TO_SIZE (1);
932 // Tail Guard must be the page after this memory block to free, if any.
935 // --------------------
936 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
937 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
938 // X 1 -> Don't free last page (need a new Guard)
939 // (it'll be turned into a Guard page later)
940 // --------------------
943 MemoryToTest
= Start
+ EFI_PAGES_TO_SIZE (PagesToFree
);
944 GuardBitmap
= GetGuardedMemoryBits (MemoryToTest
, 2);
945 if ((GuardBitmap
& BIT0
) == 0) {
947 // Tail Guard exists.
949 if ((GuardBitmap
& BIT1
) == 0) {
951 // If the tail Guard is not a head Guard of adjacent memory block,
952 // free it; otherwise, keep it.
956 } else if (PagesToFree
> 0) {
958 // No Tail Guard, and pages after memory to free are still in Guard. It's a
959 // partial free case. We need to keep one page to be a head Guard.
965 *NumberOfPages
= PagesToFree
;
969 Adjust the base and number of pages to really allocate according to Guard.
971 @param[in,out] Memory Base address of free memory.
972 @param[in,out] NumberOfPages Size of memory to allocate.
978 IN OUT EFI_PHYSICAL_ADDRESS
*Memory
,
979 IN OUT UINTN
*NumberOfPages
983 // FindFreePages() has already taken the Guard into account. It's safe to
984 // adjust the start address and/or number of pages here, to make sure that
985 // the Guards are also "allocated".
987 if (!IsGuardPage (*Memory
+ EFI_PAGES_TO_SIZE (*NumberOfPages
))) {
988 // No tail Guard, add one.
992 if (!IsGuardPage (*Memory
- EFI_PAGE_SIZE
)) {
993 // No head Guard, add one.
994 *Memory
-= EFI_PAGE_SIZE
;
1000 Adjust the pool head position to make sure the Guard page is adjavent to
1001 pool tail or pool head.
1003 @param[in] Memory Base address of memory allocated.
1004 @param[in] NoPages Number of pages actually allocated.
1005 @param[in] Size Size of memory requested.
1006 (plus pool head/tail overhead)
1008 @return Address of pool head.
1012 IN EFI_PHYSICAL_ADDRESS Memory
,
1017 if (Memory
== 0 || (PcdGet8 (PcdHeapGuardPropertyMask
) & BIT7
) != 0) {
1019 // Pool head is put near the head Guard
1021 return (VOID
*)(UINTN
)Memory
;
1025 // Pool head is put near the tail Guard
1027 Size
= ALIGN_VALUE (Size
, 8);
1028 return (VOID
*)(UINTN
)(Memory
+ EFI_PAGES_TO_SIZE (NoPages
) - Size
);
1032 Get the page base address according to pool head address.
1034 @param[in] Memory Head address of pool to free.
1036 @return Address of pool head.
1040 IN EFI_PHYSICAL_ADDRESS Memory
1043 if (Memory
== 0 || (PcdGet8 (PcdHeapGuardPropertyMask
) & BIT7
) != 0) {
1045 // Pool head is put near the head Guard
1047 return (VOID
*)(UINTN
)Memory
;
1051 // Pool head is put near the tail Guard
1053 return (VOID
*)(UINTN
)(Memory
& ~EFI_PAGE_MASK
);
1057 Allocate or free guarded memory.
1059 @param[in] Start Start address of memory to allocate or free.
1060 @param[in] NumberOfPages Memory size in pages.
1061 @param[in] NewType Memory type to convert to.
1066 CoreConvertPagesWithGuard (
1068 IN UINTN NumberOfPages
,
1069 IN EFI_MEMORY_TYPE NewType
1075 if (NewType
== EfiConventionalMemory
) {
1077 OldPages
= NumberOfPages
;
1079 AdjustMemoryF (&Start
, &NumberOfPages
);
1081 // It's safe to unset Guard page inside memory lock because there should
1082 // be no memory allocation occurred in updating memory page attribute at
1083 // this point. And unsetting Guard page before free will prevent Guard
1084 // page just freed back to pool from being allocated right away before
1085 // marking it usable (from non-present to present).
1087 UnsetGuardForMemory (OldStart
, OldPages
);
1088 if (NumberOfPages
== 0) {
1092 AdjustMemoryA (&Start
, &NumberOfPages
);
1095 return CoreConvertPages (Start
, NumberOfPages
, NewType
);
1099 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1106 UINTN Entries
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1107 UINTN Shifts
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1108 UINTN Indices
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1109 UINT64 Tables
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1110 UINT64 Addresses
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1118 if (mGuardedMemoryMap
== 0 ||
1120 mMapLevel
> GUARDED_HEAP_MAP_TABLE_DEPTH
) {
1124 CopyMem (Entries
, mLevelMask
, sizeof (Entries
));
1125 CopyMem (Shifts
, mLevelShift
, sizeof (Shifts
));
1127 SetMem (Tables
, sizeof(Tables
), 0);
1128 SetMem (Addresses
, sizeof(Addresses
), 0);
1129 SetMem (Indices
, sizeof(Indices
), 0);
1131 Level
= GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
;
1132 Tables
[Level
] = mGuardedMemoryMap
;
1137 DumpGuardedMemoryBitmap ();
1141 if (Indices
[Level
] > Entries
[Level
]) {
1146 TableEntry
= ((UINT64
*)(UINTN
)(Tables
[Level
]))[Indices
[Level
]];
1147 Address
= Addresses
[Level
];
1149 if (TableEntry
== 0) {
1153 } else if (Level
< GUARDED_HEAP_MAP_TABLE_DEPTH
- 1) {
1156 Tables
[Level
] = TableEntry
;
1157 Addresses
[Level
] = Address
;
1165 while (Index
< GUARDED_HEAP_MAP_ENTRY_BITS
) {
1166 if ((TableEntry
& 1) == 1) {
1170 GuardPage
= Address
- EFI_PAGE_SIZE
;
1175 GuardPage
= Address
;
1182 if (GuardPage
!= 0) {
1183 SetGuardPage (GuardPage
);
1186 if (TableEntry
== 0) {
1190 TableEntry
= RShiftU64 (TableEntry
, 1);
1191 Address
+= EFI_PAGE_SIZE
;
1197 if (Level
< (GUARDED_HEAP_MAP_TABLE_DEPTH
- (INTN
)mMapLevel
)) {
1201 Indices
[Level
] += 1;
1202 Address
= (Level
== 0) ? 0 : Addresses
[Level
- 1];
1203 Addresses
[Level
] = Address
| LShiftU64(Indices
[Level
], Shifts
[Level
]);
1209 Find the address of top-most guarded free page.
1211 @param[out] Address Start address of top-most guarded free page.
1216 GetLastGuardedFreePageAddress (
1217 OUT EFI_PHYSICAL_ADDRESS
*Address
1220 EFI_PHYSICAL_ADDRESS AddressGranularity
;
1221 EFI_PHYSICAL_ADDRESS BaseAddress
;
1226 ASSERT (mMapLevel
>= 1);
1229 Map
= mGuardedMemoryMap
;
1230 for (Level
= GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
;
1231 Level
< GUARDED_HEAP_MAP_TABLE_DEPTH
;
1233 AddressGranularity
= LShiftU64 (1, mLevelShift
[Level
]);
1236 // Find the non-NULL entry at largest index.
1238 for (Index
= (INTN
)mLevelMask
[Level
]; Index
>= 0 ; --Index
) {
1239 if (((UINT64
*)(UINTN
)Map
)[Index
] != 0) {
1240 BaseAddress
+= MultU64x32 (AddressGranularity
, (UINT32
)Index
);
1241 Map
= ((UINT64
*)(UINTN
)Map
)[Index
];
1248 // Find the non-zero MSB then get the page address.
1251 Map
= RShiftU64 (Map
, 1);
1252 BaseAddress
+= EFI_PAGES_TO_SIZE (1);
1255 *Address
= BaseAddress
;
1261 @param[in] BaseAddress Base address of just freed pages.
1262 @param[in] Pages Number of freed pages.
1268 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
1272 SetGuardedMemoryBits (BaseAddress
, Pages
);
1276 Record freed pages as well as mark them as not-present.
1278 @param[in] BaseAddress Base address of just freed pages.
1279 @param[in] Pages Number of freed pages.
1286 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
1293 // Legacy memory lower than 1MB might be accessed with no allocation. Leave
1296 if (BaseAddress
< BASE_1MB
) {
1300 MarkFreedPages (BaseAddress
, Pages
);
1303 // Set flag to make sure allocating memory without GUARD for page table
1304 // operation; otherwise infinite loops could be caused.
1308 // Note: This might overwrite other attributes needed by other features,
1309 // such as NX memory protection.
1311 Status
= gCpu
->SetMemoryAttributes (
1314 EFI_PAGES_TO_SIZE (Pages
),
1318 // Normally we should ASSERT the returned Status. But there might be memory
1319 // alloc/free involved in SetMemoryAttributes(), which might fail this
1320 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.
1322 if (EFI_ERROR (Status
)) {
1323 DEBUG ((DEBUG_WARN
, "Failed to guard freed pages: %p (%lu)\n", BaseAddress
, (UINT64
)Pages
));
1325 mOnGuarding
= FALSE
;
1330 Record freed pages as well as mark them as not-present, if enabled.
1332 @param[in] BaseAddress Base address of just freed pages.
1333 @param[in] Pages Number of freed pages.
1339 GuardFreedPagesChecked (
1340 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
1344 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED
)) {
1345 GuardFreedPages (BaseAddress
, Pages
);
1350 Mark all pages freed before CPU Arch Protocol as not-present.
1354 GuardAllFreedPages (
1358 UINTN Entries
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1359 UINTN Shifts
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1360 UINTN Indices
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1361 UINT64 Tables
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1362 UINT64 Addresses
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1368 UINTN GuardPageNumber
;
1370 if (mGuardedMemoryMap
== 0 ||
1372 mMapLevel
> GUARDED_HEAP_MAP_TABLE_DEPTH
) {
1376 CopyMem (Entries
, mLevelMask
, sizeof (Entries
));
1377 CopyMem (Shifts
, mLevelShift
, sizeof (Shifts
));
1379 SetMem (Tables
, sizeof(Tables
), 0);
1380 SetMem (Addresses
, sizeof(Addresses
), 0);
1381 SetMem (Indices
, sizeof(Indices
), 0);
1383 Level
= GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
;
1384 Tables
[Level
] = mGuardedMemoryMap
;
1386 GuardPage
= (UINT64
)-1;
1387 GuardPageNumber
= 0;
1390 if (Indices
[Level
] > Entries
[Level
]) {
1394 TableEntry
= ((UINT64
*)(UINTN
)(Tables
[Level
]))[Indices
[Level
]];
1395 Address
= Addresses
[Level
];
1397 if (Level
< GUARDED_HEAP_MAP_TABLE_DEPTH
- 1) {
1399 Tables
[Level
] = TableEntry
;
1400 Addresses
[Level
] = Address
;
1406 while (BitIndex
!= 0) {
1407 if ((TableEntry
& BitIndex
) != 0) {
1408 if (GuardPage
== (UINT64
)-1) {
1409 GuardPage
= Address
;
1412 } else if (GuardPageNumber
> 0) {
1413 GuardFreedPages (GuardPage
, GuardPageNumber
);
1414 GuardPageNumber
= 0;
1415 GuardPage
= (UINT64
)-1;
1418 if (TableEntry
== 0) {
1422 Address
+= EFI_PAGES_TO_SIZE (1);
1423 BitIndex
= LShiftU64 (BitIndex
, 1);
1428 if (Level
< (GUARDED_HEAP_MAP_TABLE_DEPTH
- (INTN
)mMapLevel
)) {
1432 Indices
[Level
] += 1;
1433 Address
= (Level
== 0) ? 0 : Addresses
[Level
- 1];
1434 Addresses
[Level
] = Address
| LShiftU64 (Indices
[Level
], Shifts
[Level
]);
1439 // Update the maximum address of freed page which can be used for memory
1440 // promotion upon out-of-memory-space.
1442 GetLastGuardedFreePageAddress (&Address
);
1444 mLastPromotedPage
= Address
;
1449 This function checks to see if the given memory map descriptor in a memory map
1450 can be merged with any guarded free pages.
1452 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.
1453 @param MaxAddress Maximum address to stop the merge.
1460 IN EFI_MEMORY_DESCRIPTOR
*MemoryMapEntry
,
1461 IN EFI_PHYSICAL_ADDRESS MaxAddress
1464 EFI_PHYSICAL_ADDRESS EndAddress
;
1468 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED
) ||
1469 MemoryMapEntry
->Type
>= EfiMemoryMappedIO
) {
1474 Pages
= EFI_SIZE_TO_PAGES ((UINTN
)(MaxAddress
- MemoryMapEntry
->PhysicalStart
));
1475 Pages
-= (INTN
)MemoryMapEntry
->NumberOfPages
;
1478 EndAddress
= MemoryMapEntry
->PhysicalStart
+
1479 EFI_PAGES_TO_SIZE ((UINTN
)MemoryMapEntry
->NumberOfPages
);
1480 Bitmap
= GetGuardedMemoryBits (EndAddress
, GUARDED_HEAP_MAP_ENTRY_BITS
);
1483 if ((Bitmap
& 1) == 0) {
1488 MemoryMapEntry
->NumberOfPages
++;
1489 Bitmap
= RShiftU64 (Bitmap
, 1);
1494 Put part (at most 64 pages a time) guarded free pages back to free page pool.
1496 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
1497 makes use of 'Used then throw away' way to detect any illegal access to freed
1498 memory. The thrown-away memory will be marked as not-present so that any access
1499 to those memory (after free) will be caught by page-fault exception.
1501 The problem is that this will consume lots of memory space. Once no memory
1502 left in pool to allocate, we have to restore part of the freed pages to their
1503 normal function. Otherwise the whole system will stop functioning.
1505 @param StartAddress Start address of promoted memory.
1506 @param EndAddress End address of promoted memory.
1508 @return TRUE Succeeded to promote memory.
1509 @return FALSE No free memory found.
1513 PromoteGuardedFreePages (
1514 OUT EFI_PHYSICAL_ADDRESS
*StartAddress
,
1515 OUT EFI_PHYSICAL_ADDRESS
*EndAddress
1519 UINTN AvailablePages
;
1521 EFI_PHYSICAL_ADDRESS Start
;
1523 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED
)) {
1528 // Similar to memory allocation service, always search the freed pages in
1529 // descending direction.
1531 Start
= mLastPromotedPage
;
1533 while (AvailablePages
== 0) {
1534 Start
-= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS
);
1536 // If the address wraps around, try the really freed pages at top.
1538 if (Start
> mLastPromotedPage
) {
1539 GetLastGuardedFreePageAddress (&Start
);
1540 ASSERT (Start
!= 0);
1541 Start
-= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS
);
1544 Bitmap
= GetGuardedMemoryBits (Start
, GUARDED_HEAP_MAP_ENTRY_BITS
);
1545 while (Bitmap
> 0) {
1546 if ((Bitmap
& 1) != 0) {
1548 } else if (AvailablePages
== 0) {
1549 Start
+= EFI_PAGES_TO_SIZE (1);
1554 Bitmap
= RShiftU64 (Bitmap
, 1);
1558 if (AvailablePages
!= 0) {
1559 DEBUG ((DEBUG_INFO
, "Promoted pages: %lX (%lx)\r\n", Start
, (UINT64
)AvailablePages
));
1560 ClearGuardedMemoryBits (Start
, AvailablePages
);
1564 // Set flag to make sure allocating memory without GUARD for page table
1565 // operation; otherwise infinite loops could be caused.
1568 Status
= gCpu
->SetMemoryAttributes (gCpu
, Start
, EFI_PAGES_TO_SIZE(AvailablePages
), 0);
1569 ASSERT_EFI_ERROR (Status
);
1570 mOnGuarding
= FALSE
;
1573 mLastPromotedPage
= Start
;
1574 *StartAddress
= Start
;
1575 *EndAddress
= Start
+ EFI_PAGES_TO_SIZE (AvailablePages
) - 1;
1583 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1586 HeapGuardCpuArchProtocolNotify (
1590 ASSERT (gCpu
!= NULL
);
1592 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE
|GUARD_HEAP_TYPE_POOL
) &&
1593 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED
)) {
1594 DEBUG ((DEBUG_ERROR
, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));
1598 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE
|GUARD_HEAP_TYPE_POOL
)) {
1599 SetAllGuardPages ();
1602 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED
)) {
1603 GuardAllFreedPages ();
1608 Helper function to convert a UINT64 value in binary to a string.
1610 @param[in] Value Value of a UINT64 integer.
1611 @param[out] BinString String buffer to contain the conversion result.
1618 OUT CHAR8
*BinString
1623 if (BinString
== NULL
) {
1627 for (Index
= 64; Index
> 0; --Index
) {
1628 BinString
[Index
- 1] = '0' + (Value
& 1);
1629 Value
= RShiftU64 (Value
, 1);
1631 BinString
[64] = '\0';
1635 Dump the guarded memory bit map.
1639 DumpGuardedMemoryBitmap (
1643 UINTN Entries
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1644 UINTN Shifts
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1645 UINTN Indices
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1646 UINT64 Tables
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1647 UINT64 Addresses
[GUARDED_HEAP_MAP_TABLE_DEPTH
];
1652 CHAR8 String
[GUARDED_HEAP_MAP_ENTRY_BITS
+ 1];
1656 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL
)) {
1660 if (mGuardedMemoryMap
== 0 ||
1662 mMapLevel
> GUARDED_HEAP_MAP_TABLE_DEPTH
) {
1666 Ruler1
= " 3 2 1 0";
1667 Ruler2
= "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1669 DEBUG ((HEAP_GUARD_DEBUG_LEVEL
, "============================="
1670 " Guarded Memory Bitmap "
1671 "==============================\r\n"));
1672 DEBUG ((HEAP_GUARD_DEBUG_LEVEL
, " %a\r\n", Ruler1
));
1673 DEBUG ((HEAP_GUARD_DEBUG_LEVEL
, " %a\r\n", Ruler2
));
1675 CopyMem (Entries
, mLevelMask
, sizeof (Entries
));
1676 CopyMem (Shifts
, mLevelShift
, sizeof (Shifts
));
1678 SetMem (Indices
, sizeof(Indices
), 0);
1679 SetMem (Tables
, sizeof(Tables
), 0);
1680 SetMem (Addresses
, sizeof(Addresses
), 0);
1682 Level
= GUARDED_HEAP_MAP_TABLE_DEPTH
- mMapLevel
;
1683 Tables
[Level
] = mGuardedMemoryMap
;
1688 if (Indices
[Level
] > Entries
[Level
]) {
1695 HEAP_GUARD_DEBUG_LEVEL
,
1696 "========================================="
1697 "=========================================\r\n"
1702 TableEntry
= ((UINT64
*)(UINTN
)Tables
[Level
])[Indices
[Level
]];
1703 Address
= Addresses
[Level
];
1705 if (TableEntry
== 0) {
1707 if (Level
== GUARDED_HEAP_MAP_TABLE_DEPTH
- 1) {
1708 if (RepeatZero
== 0) {
1709 Uint64ToBinString(TableEntry
, String
);
1710 DEBUG ((HEAP_GUARD_DEBUG_LEVEL
, "%016lx: %a\r\n", Address
, String
));
1711 } else if (RepeatZero
== 1) {
1712 DEBUG ((HEAP_GUARD_DEBUG_LEVEL
, "... : ...\r\n"));
1717 } else if (Level
< GUARDED_HEAP_MAP_TABLE_DEPTH
- 1) {
1720 Tables
[Level
] = TableEntry
;
1721 Addresses
[Level
] = Address
;
1730 Uint64ToBinString(TableEntry
, String
);
1731 DEBUG ((HEAP_GUARD_DEBUG_LEVEL
, "%016lx: %a\r\n", Address
, String
));
1736 if (Level
< (GUARDED_HEAP_MAP_TABLE_DEPTH
- (INTN
)mMapLevel
)) {
1740 Indices
[Level
] += 1;
1741 Address
= (Level
== 0) ? 0 : Addresses
[Level
- 1];
1742 Addresses
[Level
] = Address
| LShiftU64(Indices
[Level
], Shifts
[Level
]);