]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
9477b94044ba92984f9016e165544e9920b721df
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "DxeMain.h"
10 #include "Imem.h"
11 #include "HeapGuard.h"
12
13 //
14 // Global to avoid infinite reentrance of memory allocation when updating
15 // page table attributes, which may need allocate pages for new PDE/PTE.
16 //
17 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
18
19 //
20 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
21 // is used to indicate memory guarded. '0' might be free memory or Guard
22 // page itself, depending on status of memory adjacent to it.
23 //
24 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
25
26 //
27 // Current depth level of map table pointed by mGuardedMemoryMap.
28 // mMapLevel must be initialized at least by 1. It will be automatically
29 // updated according to the address of memory just tracked.
30 //
31 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
32
33 //
34 // Shift and mask for each level of map table
35 //
36 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
38 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
40
41 //
42 // Used for promoting freed but not used pages.
43 //
44 GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;
45
46 /**
47 Set corresponding bits in bitmap table to 1 according to the address.
48
49 @param[in] Address Start address to set for.
50 @param[in] BitNumber Number of bits to set.
51 @param[in] BitMap Pointer to bitmap which covers the Address.
52
53 @return VOID.
54 **/
55 STATIC
56 VOID
57 SetBits (
58 IN EFI_PHYSICAL_ADDRESS Address,
59 IN UINTN BitNumber,
60 IN UINT64 *BitMap
61 )
62 {
63 UINTN Lsbs;
64 UINTN Qwords;
65 UINTN Msbs;
66 UINTN StartBit;
67 UINTN EndBit;
68
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
71
72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
74 GUARDED_HEAP_MAP_ENTRY_BITS;
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
77 } else {
78 Msbs = BitNumber;
79 Lsbs = 0;
80 Qwords = 0;
81 }
82
83 if (Msbs > 0) {
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
85 BitMap += 1;
86 }
87
88 if (Qwords > 0) {
89 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
90 (UINT64)-1);
91 BitMap += Qwords;
92 }
93
94 if (Lsbs > 0) {
95 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
96 }
97 }
98
99 /**
100 Set corresponding bits in bitmap table to 0 according to the address.
101
102 @param[in] Address Start address to set for.
103 @param[in] BitNumber Number of bits to set.
104 @param[in] BitMap Pointer to bitmap which covers the Address.
105
106 @return VOID.
107 **/
108 STATIC
109 VOID
110 ClearBits (
111 IN EFI_PHYSICAL_ADDRESS Address,
112 IN UINTN BitNumber,
113 IN UINT64 *BitMap
114 )
115 {
116 UINTN Lsbs;
117 UINTN Qwords;
118 UINTN Msbs;
119 UINTN StartBit;
120 UINTN EndBit;
121
122 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
123 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
124
125 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
126 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
127 GUARDED_HEAP_MAP_ENTRY_BITS;
128 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
129 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
130 } else {
131 Msbs = BitNumber;
132 Lsbs = 0;
133 Qwords = 0;
134 }
135
136 if (Msbs > 0) {
137 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
138 BitMap += 1;
139 }
140
141 if (Qwords > 0) {
142 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
143 BitMap += Qwords;
144 }
145
146 if (Lsbs > 0) {
147 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
148 }
149 }
150
151 /**
152 Get corresponding bits in bitmap table according to the address.
153
154 The value of bit 0 corresponds to the status of memory at given Address.
155 No more than 64 bits can be retrieved in one call.
156
157 @param[in] Address Start address to retrieve bits for.
158 @param[in] BitNumber Number of bits to get.
159 @param[in] BitMap Pointer to bitmap which covers the Address.
160
161 @return An integer containing the bits information.
162 **/
163 STATIC
164 UINT64
165 GetBits (
166 IN EFI_PHYSICAL_ADDRESS Address,
167 IN UINTN BitNumber,
168 IN UINT64 *BitMap
169 )
170 {
171 UINTN StartBit;
172 UINTN EndBit;
173 UINTN Lsbs;
174 UINTN Msbs;
175 UINT64 Result;
176
177 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
178
179 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
180 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
181
182 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
183 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
184 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
185 } else {
186 Msbs = BitNumber;
187 Lsbs = 0;
188 }
189
190 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
191 Result = *BitMap;
192 } else {
193 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
194 if (Lsbs > 0) {
195 BitMap += 1;
196 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
197 }
198 }
199
200 return Result;
201 }
202
203 /**
204 Locate the pointer of bitmap from the guarded memory bitmap tables, which
205 covers the given Address.
206
207 @param[in] Address Start address to search the bitmap for.
208 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
209 @param[out] BitMap Pointer to bitmap which covers the Address.
210
211 @return The bit number from given Address to the end of current map table.
212 **/
213 UINTN
214 FindGuardedMemoryMap (
215 IN EFI_PHYSICAL_ADDRESS Address,
216 IN BOOLEAN AllocMapUnit,
217 OUT UINT64 **BitMap
218 )
219 {
220 UINTN Level;
221 UINT64 *GuardMap;
222 UINT64 MapMemory;
223 UINTN Index;
224 UINTN Size;
225 UINTN BitsToUnitEnd;
226 EFI_STATUS Status;
227
228 //
229 // Adjust current map table depth according to the address to access
230 //
231 while (AllocMapUnit &&
232 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
233 RShiftU64 (
234 Address,
235 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
236 ) != 0) {
237
238 if (mGuardedMemoryMap != 0) {
239 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
240 * GUARDED_HEAP_MAP_ENTRY_BYTES;
241 Status = CoreInternalAllocatePages (
242 AllocateAnyPages,
243 EfiBootServicesData,
244 EFI_SIZE_TO_PAGES (Size),
245 &MapMemory,
246 FALSE
247 );
248 ASSERT_EFI_ERROR (Status);
249 ASSERT (MapMemory != 0);
250
251 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
252
253 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
254 mGuardedMemoryMap = MapMemory;
255 }
256
257 mMapLevel++;
258
259 }
260
261 GuardMap = &mGuardedMemoryMap;
262 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
263 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
264 ++Level) {
265
266 if (*GuardMap == 0) {
267 if (!AllocMapUnit) {
268 GuardMap = NULL;
269 break;
270 }
271
272 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
273 Status = CoreInternalAllocatePages (
274 AllocateAnyPages,
275 EfiBootServicesData,
276 EFI_SIZE_TO_PAGES (Size),
277 &MapMemory,
278 FALSE
279 );
280 ASSERT_EFI_ERROR (Status);
281 ASSERT (MapMemory != 0);
282
283 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
284 *GuardMap = MapMemory;
285 }
286
287 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
288 Index &= mLevelMask[Level];
289 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
290
291 }
292
293 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
294 *BitMap = GuardMap;
295
296 return BitsToUnitEnd;
297 }
298
299 /**
300 Set corresponding bits in bitmap table to 1 according to given memory range.
301
302 @param[in] Address Memory address to guard from.
303 @param[in] NumberOfPages Number of pages to guard.
304
305 @return VOID.
306 **/
307 VOID
308 EFIAPI
309 SetGuardedMemoryBits (
310 IN EFI_PHYSICAL_ADDRESS Address,
311 IN UINTN NumberOfPages
312 )
313 {
314 UINT64 *BitMap;
315 UINTN Bits;
316 UINTN BitsToUnitEnd;
317
318 while (NumberOfPages > 0) {
319 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
320 ASSERT (BitMap != NULL);
321
322 if (NumberOfPages > BitsToUnitEnd) {
323 // Cross map unit
324 Bits = BitsToUnitEnd;
325 } else {
326 Bits = NumberOfPages;
327 }
328
329 SetBits (Address, Bits, BitMap);
330
331 NumberOfPages -= Bits;
332 Address += EFI_PAGES_TO_SIZE (Bits);
333 }
334 }
335
336 /**
337 Clear corresponding bits in bitmap table according to given memory range.
338
339 @param[in] Address Memory address to unset from.
340 @param[in] NumberOfPages Number of pages to unset guard.
341
342 @return VOID.
343 **/
344 VOID
345 EFIAPI
346 ClearGuardedMemoryBits (
347 IN EFI_PHYSICAL_ADDRESS Address,
348 IN UINTN NumberOfPages
349 )
350 {
351 UINT64 *BitMap;
352 UINTN Bits;
353 UINTN BitsToUnitEnd;
354
355 while (NumberOfPages > 0) {
356 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
357 ASSERT (BitMap != NULL);
358
359 if (NumberOfPages > BitsToUnitEnd) {
360 // Cross map unit
361 Bits = BitsToUnitEnd;
362 } else {
363 Bits = NumberOfPages;
364 }
365
366 ClearBits (Address, Bits, BitMap);
367
368 NumberOfPages -= Bits;
369 Address += EFI_PAGES_TO_SIZE (Bits);
370 }
371 }
372
373 /**
374 Retrieve corresponding bits in bitmap table according to given memory range.
375
376 @param[in] Address Memory address to retrieve from.
377 @param[in] NumberOfPages Number of pages to retrieve.
378
379 @return An integer containing the guarded memory bitmap.
380 **/
381 UINT64
382 GetGuardedMemoryBits (
383 IN EFI_PHYSICAL_ADDRESS Address,
384 IN UINTN NumberOfPages
385 )
386 {
387 UINT64 *BitMap;
388 UINTN Bits;
389 UINT64 Result;
390 UINTN Shift;
391 UINTN BitsToUnitEnd;
392
393 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
394
395 Result = 0;
396 Shift = 0;
397 while (NumberOfPages > 0) {
398 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
399
400 if (NumberOfPages > BitsToUnitEnd) {
401 // Cross map unit
402 Bits = BitsToUnitEnd;
403 } else {
404 Bits = NumberOfPages;
405 }
406
407 if (BitMap != NULL) {
408 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
409 }
410
411 Shift += Bits;
412 NumberOfPages -= Bits;
413 Address += EFI_PAGES_TO_SIZE (Bits);
414 }
415
416 return Result;
417 }
418
419 /**
420 Get bit value in bitmap table for the given address.
421
422 @param[in] Address The address to retrieve for.
423
424 @return 1 or 0.
425 **/
426 UINTN
427 EFIAPI
428 GetGuardMapBit (
429 IN EFI_PHYSICAL_ADDRESS Address
430 )
431 {
432 UINT64 *GuardMap;
433
434 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
435 if (GuardMap != NULL) {
436 if (RShiftU64 (*GuardMap,
437 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
438 return 1;
439 }
440 }
441
442 return 0;
443 }
444
445
446 /**
447 Check to see if the page at the given address is a Guard page or not.
448
449 @param[in] Address The address to check for.
450
451 @return TRUE The page at Address is a Guard page.
452 @return FALSE The page at Address is not a Guard page.
453 **/
454 BOOLEAN
455 EFIAPI
456 IsGuardPage (
457 IN EFI_PHYSICAL_ADDRESS Address
458 )
459 {
460 UINT64 BitMap;
461
462 //
463 // There must be at least one guarded page before and/or after given
464 // address if it's a Guard page. The bitmap pattern should be one of
465 // 001, 100 and 101
466 //
467 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
468 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
469 }
470
471
472 /**
473 Check to see if the page at the given address is guarded or not.
474
475 @param[in] Address The address to check for.
476
477 @return TRUE The page at Address is guarded.
478 @return FALSE The page at Address is not guarded.
479 **/
480 BOOLEAN
481 EFIAPI
482 IsMemoryGuarded (
483 IN EFI_PHYSICAL_ADDRESS Address
484 )
485 {
486 return (GetGuardMapBit (Address) == 1);
487 }
488
489 /**
490 Set the page at the given address to be a Guard page.
491
492 This is done by changing the page table attribute to be NOT PRSENT.
493
494 @param[in] BaseAddress Page address to Guard at
495
496 @return VOID
497 **/
498 VOID
499 EFIAPI
500 SetGuardPage (
501 IN EFI_PHYSICAL_ADDRESS BaseAddress
502 )
503 {
504 EFI_STATUS Status;
505
506 if (gCpu == NULL) {
507 return;
508 }
509
510 //
511 // Set flag to make sure allocating memory without GUARD for page table
512 // operation; otherwise infinite loops could be caused.
513 //
514 mOnGuarding = TRUE;
515 //
516 // Note: This might overwrite other attributes needed by other features,
517 // such as NX memory protection.
518 //
519 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
520 ASSERT_EFI_ERROR (Status);
521 mOnGuarding = FALSE;
522 }
523
524 /**
525 Unset the Guard page at the given address to the normal memory.
526
527 This is done by changing the page table attribute to be PRSENT.
528
529 @param[in] BaseAddress Page address to Guard at.
530
531 @return VOID.
532 **/
533 VOID
534 EFIAPI
535 UnsetGuardPage (
536 IN EFI_PHYSICAL_ADDRESS BaseAddress
537 )
538 {
539 UINT64 Attributes;
540 EFI_STATUS Status;
541
542 if (gCpu == NULL) {
543 return;
544 }
545
546 //
547 // Once the Guard page is unset, it will be freed back to memory pool. NX
548 // memory protection must be restored for this page if NX is enabled for free
549 // memory.
550 //
551 Attributes = 0;
552 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
553 Attributes |= EFI_MEMORY_XP;
554 }
555
556 //
557 // Set flag to make sure allocating memory without GUARD for page table
558 // operation; otherwise infinite loops could be caused.
559 //
560 mOnGuarding = TRUE;
561 //
562 // Note: This might overwrite other attributes needed by other features,
563 // such as memory protection (NX). Please make sure they are not enabled
564 // at the same time.
565 //
566 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
567 ASSERT_EFI_ERROR (Status);
568 mOnGuarding = FALSE;
569 }
570
571 /**
572 Check to see if the memory at the given address should be guarded or not.
573
574 @param[in] MemoryType Memory type to check.
575 @param[in] AllocateType Allocation type to check.
576 @param[in] PageOrPool Indicate a page allocation or pool allocation.
577
578
579 @return TRUE The given type of memory should be guarded.
580 @return FALSE The given type of memory should not be guarded.
581 **/
582 BOOLEAN
583 IsMemoryTypeToGuard (
584 IN EFI_MEMORY_TYPE MemoryType,
585 IN EFI_ALLOCATE_TYPE AllocateType,
586 IN UINT8 PageOrPool
587 )
588 {
589 UINT64 TestBit;
590 UINT64 ConfigBit;
591
592 if (AllocateType == AllocateAddress) {
593 return FALSE;
594 }
595
596 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
597 return FALSE;
598 }
599
600 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
601 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
602 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
603 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
604 } else {
605 ConfigBit = (UINT64)-1;
606 }
607
608 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
609 TestBit = BIT63;
610 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
611 TestBit = BIT62;
612 } else if (MemoryType < EfiMaxMemoryType) {
613 TestBit = LShiftU64 (1, MemoryType);
614 } else if (MemoryType == EfiMaxMemoryType) {
615 TestBit = (UINT64)-1;
616 } else {
617 TestBit = 0;
618 }
619
620 return ((ConfigBit & TestBit) != 0);
621 }
622
623 /**
624 Check to see if the pool at the given address should be guarded or not.
625
626 @param[in] MemoryType Pool type to check.
627
628
629 @return TRUE The given type of pool should be guarded.
630 @return FALSE The given type of pool should not be guarded.
631 **/
632 BOOLEAN
633 IsPoolTypeToGuard (
634 IN EFI_MEMORY_TYPE MemoryType
635 )
636 {
637 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
638 GUARD_HEAP_TYPE_POOL);
639 }
640
641 /**
642 Check to see if the page at the given address should be guarded or not.
643
644 @param[in] MemoryType Page type to check.
645 @param[in] AllocateType Allocation type to check.
646
647 @return TRUE The given type of page should be guarded.
648 @return FALSE The given type of page should not be guarded.
649 **/
650 BOOLEAN
651 IsPageTypeToGuard (
652 IN EFI_MEMORY_TYPE MemoryType,
653 IN EFI_ALLOCATE_TYPE AllocateType
654 )
655 {
656 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
657 }
658
659 /**
660 Check to see if the heap guard is enabled for page and/or pool allocation.
661
662 @param[in] GuardType Specify the sub-type(s) of Heap Guard.
663
664 @return TRUE/FALSE.
665 **/
666 BOOLEAN
667 IsHeapGuardEnabled (
668 UINT8 GuardType
669 )
670 {
671 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);
672 }
673
674 /**
675 Set head Guard and tail Guard for the given memory range.
676
677 @param[in] Memory Base address of memory to set guard for.
678 @param[in] NumberOfPages Memory size in pages.
679
680 @return VOID
681 **/
682 VOID
683 SetGuardForMemory (
684 IN EFI_PHYSICAL_ADDRESS Memory,
685 IN UINTN NumberOfPages
686 )
687 {
688 EFI_PHYSICAL_ADDRESS GuardPage;
689
690 //
691 // Set tail Guard
692 //
693 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
694 if (!IsGuardPage (GuardPage)) {
695 SetGuardPage (GuardPage);
696 }
697
698 // Set head Guard
699 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
700 if (!IsGuardPage (GuardPage)) {
701 SetGuardPage (GuardPage);
702 }
703
704 //
705 // Mark the memory range as Guarded
706 //
707 SetGuardedMemoryBits (Memory, NumberOfPages);
708 }
709
710 /**
711 Unset head Guard and tail Guard for the given memory range.
712
713 @param[in] Memory Base address of memory to unset guard for.
714 @param[in] NumberOfPages Memory size in pages.
715
716 @return VOID
717 **/
718 VOID
719 UnsetGuardForMemory (
720 IN EFI_PHYSICAL_ADDRESS Memory,
721 IN UINTN NumberOfPages
722 )
723 {
724 EFI_PHYSICAL_ADDRESS GuardPage;
725 UINT64 GuardBitmap;
726
727 if (NumberOfPages == 0) {
728 return;
729 }
730
731 //
732 // Head Guard must be one page before, if any.
733 //
734 // MSB-> 1 0 <-LSB
735 // -------------------
736 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
737 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
738 // 1 X -> Don't free first page (need a new Guard)
739 // (it'll be turned into a Guard page later)
740 // -------------------
741 // Start -> -1 -2
742 //
743 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
744 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
745 if ((GuardBitmap & BIT1) == 0) {
746 //
747 // Head Guard exists.
748 //
749 if ((GuardBitmap & BIT0) == 0) {
750 //
751 // If the head Guard is not a tail Guard of adjacent memory block,
752 // unset it.
753 //
754 UnsetGuardPage (GuardPage);
755 }
756 } else {
757 //
758 // Pages before memory to free are still in Guard. It's a partial free
759 // case. Turn first page of memory block to free into a new Guard.
760 //
761 SetGuardPage (Memory);
762 }
763
764 //
765 // Tail Guard must be the page after this memory block to free, if any.
766 //
767 // MSB-> 1 0 <-LSB
768 // --------------------
769 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
770 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
771 // X 1 -> Don't free last page (need a new Guard)
772 // (it'll be turned into a Guard page later)
773 // --------------------
774 // +1 +0 <- End
775 //
776 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
777 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
778 if ((GuardBitmap & BIT0) == 0) {
779 //
780 // Tail Guard exists.
781 //
782 if ((GuardBitmap & BIT1) == 0) {
783 //
784 // If the tail Guard is not a head Guard of adjacent memory block,
785 // free it; otherwise, keep it.
786 //
787 UnsetGuardPage (GuardPage);
788 }
789 } else {
790 //
791 // Pages after memory to free are still in Guard. It's a partial free
792 // case. We need to keep one page to be a head Guard.
793 //
794 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
795 }
796
797 //
798 // No matter what, we just clear the mark of the Guarded memory.
799 //
800 ClearGuardedMemoryBits(Memory, NumberOfPages);
801 }
802
803 /**
804 Adjust address of free memory according to existing and/or required Guard.
805
806 This function will check if there're existing Guard pages of adjacent
807 memory blocks, and try to use it as the Guard page of the memory to be
808 allocated.
809
810 @param[in] Start Start address of free memory block.
811 @param[in] Size Size of free memory block.
812 @param[in] SizeRequested Size of memory to allocate.
813
814 @return The end address of memory block found.
815 @return 0 if no enough space for the required size of memory and its Guard.
816 **/
817 UINT64
818 AdjustMemoryS (
819 IN UINT64 Start,
820 IN UINT64 Size,
821 IN UINT64 SizeRequested
822 )
823 {
824 UINT64 Target;
825
826 //
827 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
828 // indicated to put the pool near the Tail Guard, we need extra bytes to
829 // make sure alignment of the returned pool address.
830 //
831 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
832 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
833 }
834
835 Target = Start + Size - SizeRequested;
836 ASSERT (Target >= Start);
837 if (Target == 0) {
838 return 0;
839 }
840
841 if (!IsGuardPage (Start + Size)) {
842 // No Guard at tail to share. One more page is needed.
843 Target -= EFI_PAGES_TO_SIZE (1);
844 }
845
846 // Out of range?
847 if (Target < Start) {
848 return 0;
849 }
850
851 // At the edge?
852 if (Target == Start) {
853 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
854 // No enough space for a new head Guard if no Guard at head to share.
855 return 0;
856 }
857 }
858
859 // OK, we have enough pages for memory and its Guards. Return the End of the
860 // free space.
861 return Target + SizeRequested - 1;
862 }
863
864 /**
865 Adjust the start address and number of pages to free according to Guard.
866
867 The purpose of this function is to keep the shared Guard page with adjacent
868 memory block if it's still in guard, or free it if no more sharing. Another
869 is to reserve pages as Guard pages in partial page free situation.
870
871 @param[in,out] Memory Base address of memory to free.
872 @param[in,out] NumberOfPages Size of memory to free.
873
874 @return VOID.
875 **/
876 VOID
877 AdjustMemoryF (
878 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
879 IN OUT UINTN *NumberOfPages
880 )
881 {
882 EFI_PHYSICAL_ADDRESS Start;
883 EFI_PHYSICAL_ADDRESS MemoryToTest;
884 UINTN PagesToFree;
885 UINT64 GuardBitmap;
886
887 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
888 return;
889 }
890
891 Start = *Memory;
892 PagesToFree = *NumberOfPages;
893
894 //
895 // Head Guard must be one page before, if any.
896 //
897 // MSB-> 1 0 <-LSB
898 // -------------------
899 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
900 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
901 // 1 X -> Don't free first page (need a new Guard)
902 // (it'll be turned into a Guard page later)
903 // -------------------
904 // Start -> -1 -2
905 //
906 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
907 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
908 if ((GuardBitmap & BIT1) == 0) {
909 //
910 // Head Guard exists.
911 //
912 if ((GuardBitmap & BIT0) == 0) {
913 //
914 // If the head Guard is not a tail Guard of adjacent memory block,
915 // free it; otherwise, keep it.
916 //
917 Start -= EFI_PAGES_TO_SIZE (1);
918 PagesToFree += 1;
919 }
920 } else {
921 //
922 // No Head Guard, and pages before memory to free are still in Guard. It's a
923 // partial free case. We need to keep one page to be a tail Guard.
924 //
925 Start += EFI_PAGES_TO_SIZE (1);
926 PagesToFree -= 1;
927 }
928
929 //
930 // Tail Guard must be the page after this memory block to free, if any.
931 //
932 // MSB-> 1 0 <-LSB
933 // --------------------
934 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
935 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
936 // X 1 -> Don't free last page (need a new Guard)
937 // (it'll be turned into a Guard page later)
938 // --------------------
939 // +1 +0 <- End
940 //
941 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
942 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
943 if ((GuardBitmap & BIT0) == 0) {
944 //
945 // Tail Guard exists.
946 //
947 if ((GuardBitmap & BIT1) == 0) {
948 //
949 // If the tail Guard is not a head Guard of adjacent memory block,
950 // free it; otherwise, keep it.
951 //
952 PagesToFree += 1;
953 }
954 } else if (PagesToFree > 0) {
955 //
956 // No Tail Guard, and pages after memory to free are still in Guard. It's a
957 // partial free case. We need to keep one page to be a head Guard.
958 //
959 PagesToFree -= 1;
960 }
961
962 *Memory = Start;
963 *NumberOfPages = PagesToFree;
964 }
965
966 /**
967 Adjust the base and number of pages to really allocate according to Guard.
968
969 @param[in,out] Memory Base address of free memory.
970 @param[in,out] NumberOfPages Size of memory to allocate.
971
972 @return VOID.
973 **/
974 VOID
975 AdjustMemoryA (
976 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
977 IN OUT UINTN *NumberOfPages
978 )
979 {
980 //
981 // FindFreePages() has already taken the Guard into account. It's safe to
982 // adjust the start address and/or number of pages here, to make sure that
983 // the Guards are also "allocated".
984 //
985 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
986 // No tail Guard, add one.
987 *NumberOfPages += 1;
988 }
989
990 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
991 // No head Guard, add one.
992 *Memory -= EFI_PAGE_SIZE;
993 *NumberOfPages += 1;
994 }
995 }
996
997 /**
998 Adjust the pool head position to make sure the Guard page is adjavent to
999 pool tail or pool head.
1000
1001 @param[in] Memory Base address of memory allocated.
1002 @param[in] NoPages Number of pages actually allocated.
1003 @param[in] Size Size of memory requested.
1004 (plus pool head/tail overhead)
1005
1006 @return Address of pool head.
1007 **/
1008 VOID *
1009 AdjustPoolHeadA (
1010 IN EFI_PHYSICAL_ADDRESS Memory,
1011 IN UINTN NoPages,
1012 IN UINTN Size
1013 )
1014 {
1015 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1016 //
1017 // Pool head is put near the head Guard
1018 //
1019 return (VOID *)(UINTN)Memory;
1020 }
1021
1022 //
1023 // Pool head is put near the tail Guard
1024 //
1025 Size = ALIGN_VALUE (Size, 8);
1026 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1027 }
1028
1029 /**
1030 Get the page base address according to pool head address.
1031
1032 @param[in] Memory Head address of pool to free.
1033
1034 @return Address of pool head.
1035 **/
1036 VOID *
1037 AdjustPoolHeadF (
1038 IN EFI_PHYSICAL_ADDRESS Memory
1039 )
1040 {
1041 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1042 //
1043 // Pool head is put near the head Guard
1044 //
1045 return (VOID *)(UINTN)Memory;
1046 }
1047
1048 //
1049 // Pool head is put near the tail Guard
1050 //
1051 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1052 }
1053
1054 /**
1055 Allocate or free guarded memory.
1056
1057 @param[in] Start Start address of memory to allocate or free.
1058 @param[in] NumberOfPages Memory size in pages.
1059 @param[in] NewType Memory type to convert to.
1060
1061 @return VOID.
1062 **/
1063 EFI_STATUS
1064 CoreConvertPagesWithGuard (
1065 IN UINT64 Start,
1066 IN UINTN NumberOfPages,
1067 IN EFI_MEMORY_TYPE NewType
1068 )
1069 {
1070 UINT64 OldStart;
1071 UINTN OldPages;
1072
1073 if (NewType == EfiConventionalMemory) {
1074 OldStart = Start;
1075 OldPages = NumberOfPages;
1076
1077 AdjustMemoryF (&Start, &NumberOfPages);
1078 //
1079 // It's safe to unset Guard page inside memory lock because there should
1080 // be no memory allocation occurred in updating memory page attribute at
1081 // this point. And unsetting Guard page before free will prevent Guard
1082 // page just freed back to pool from being allocated right away before
1083 // marking it usable (from non-present to present).
1084 //
1085 UnsetGuardForMemory (OldStart, OldPages);
1086 if (NumberOfPages == 0) {
1087 return EFI_SUCCESS;
1088 }
1089 } else {
1090 AdjustMemoryA (&Start, &NumberOfPages);
1091 }
1092
1093 return CoreConvertPages (Start, NumberOfPages, NewType);
1094 }
1095
1096 /**
1097 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1098 **/
1099 VOID
1100 SetAllGuardPages (
1101 VOID
1102 )
1103 {
1104 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1105 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1106 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1107 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1108 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1109 UINT64 TableEntry;
1110 UINT64 Address;
1111 UINT64 GuardPage;
1112 INTN Level;
1113 UINTN Index;
1114 BOOLEAN OnGuarding;
1115
1116 if (mGuardedMemoryMap == 0 ||
1117 mMapLevel == 0 ||
1118 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1119 return;
1120 }
1121
1122 CopyMem (Entries, mLevelMask, sizeof (Entries));
1123 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1124
1125 SetMem (Tables, sizeof(Tables), 0);
1126 SetMem (Addresses, sizeof(Addresses), 0);
1127 SetMem (Indices, sizeof(Indices), 0);
1128
1129 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1130 Tables[Level] = mGuardedMemoryMap;
1131 Address = 0;
1132 OnGuarding = FALSE;
1133
1134 DEBUG_CODE (
1135 DumpGuardedMemoryBitmap ();
1136 );
1137
1138 while (TRUE) {
1139 if (Indices[Level] > Entries[Level]) {
1140 Tables[Level] = 0;
1141 Level -= 1;
1142 } else {
1143
1144 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1145 Address = Addresses[Level];
1146
1147 if (TableEntry == 0) {
1148
1149 OnGuarding = FALSE;
1150
1151 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1152
1153 Level += 1;
1154 Tables[Level] = TableEntry;
1155 Addresses[Level] = Address;
1156 Indices[Level] = 0;
1157
1158 continue;
1159
1160 } else {
1161
1162 Index = 0;
1163 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1164 if ((TableEntry & 1) == 1) {
1165 if (OnGuarding) {
1166 GuardPage = 0;
1167 } else {
1168 GuardPage = Address - EFI_PAGE_SIZE;
1169 }
1170 OnGuarding = TRUE;
1171 } else {
1172 if (OnGuarding) {
1173 GuardPage = Address;
1174 } else {
1175 GuardPage = 0;
1176 }
1177 OnGuarding = FALSE;
1178 }
1179
1180 if (GuardPage != 0) {
1181 SetGuardPage (GuardPage);
1182 }
1183
1184 if (TableEntry == 0) {
1185 break;
1186 }
1187
1188 TableEntry = RShiftU64 (TableEntry, 1);
1189 Address += EFI_PAGE_SIZE;
1190 Index += 1;
1191 }
1192 }
1193 }
1194
1195 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1196 break;
1197 }
1198
1199 Indices[Level] += 1;
1200 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1201 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1202
1203 }
1204 }
1205
1206 /**
1207 Find the address of top-most guarded free page.
1208
1209 @param[out] Address Start address of top-most guarded free page.
1210
1211 @return VOID.
1212 **/
1213 VOID
1214 GetLastGuardedFreePageAddress (
1215 OUT EFI_PHYSICAL_ADDRESS *Address
1216 )
1217 {
1218 EFI_PHYSICAL_ADDRESS AddressGranularity;
1219 EFI_PHYSICAL_ADDRESS BaseAddress;
1220 UINTN Level;
1221 UINT64 Map;
1222 INTN Index;
1223
1224 ASSERT (mMapLevel >= 1);
1225
1226 BaseAddress = 0;
1227 Map = mGuardedMemoryMap;
1228 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1229 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
1230 ++Level) {
1231 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);
1232
1233 //
1234 // Find the non-NULL entry at largest index.
1235 //
1236 for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {
1237 if (((UINT64 *)(UINTN)Map)[Index] != 0) {
1238 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);
1239 Map = ((UINT64 *)(UINTN)Map)[Index];
1240 break;
1241 }
1242 }
1243 }
1244
1245 //
1246 // Find the non-zero MSB then get the page address.
1247 //
1248 while (Map != 0) {
1249 Map = RShiftU64 (Map, 1);
1250 BaseAddress += EFI_PAGES_TO_SIZE (1);
1251 }
1252
1253 *Address = BaseAddress;
1254 }
1255
1256 /**
1257 Record freed pages.
1258
1259 @param[in] BaseAddress Base address of just freed pages.
1260 @param[in] Pages Number of freed pages.
1261
1262 @return VOID.
1263 **/
1264 VOID
1265 MarkFreedPages (
1266 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1267 IN UINTN Pages
1268 )
1269 {
1270 SetGuardedMemoryBits (BaseAddress, Pages);
1271 }
1272
1273 /**
1274 Record freed pages as well as mark them as not-present.
1275
1276 @param[in] BaseAddress Base address of just freed pages.
1277 @param[in] Pages Number of freed pages.
1278
1279 @return VOID.
1280 **/
1281 VOID
1282 EFIAPI
1283 GuardFreedPages (
1284 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1285 IN UINTN Pages
1286 )
1287 {
1288 EFI_STATUS Status;
1289
1290 //
1291 // Legacy memory lower than 1MB might be accessed with no allocation. Leave
1292 // them alone.
1293 //
1294 if (BaseAddress < BASE_1MB) {
1295 return;
1296 }
1297
1298 MarkFreedPages (BaseAddress, Pages);
1299 if (gCpu != NULL) {
1300 //
1301 // Set flag to make sure allocating memory without GUARD for page table
1302 // operation; otherwise infinite loops could be caused.
1303 //
1304 mOnGuarding = TRUE;
1305 //
1306 // Note: This might overwrite other attributes needed by other features,
1307 // such as NX memory protection.
1308 //
1309 Status = gCpu->SetMemoryAttributes (
1310 gCpu,
1311 BaseAddress,
1312 EFI_PAGES_TO_SIZE (Pages),
1313 EFI_MEMORY_RP
1314 );
1315 //
1316 // Normally we should ASSERT the returned Status. But there might be memory
1317 // alloc/free involved in SetMemoryAttributes(), which might fail this
1318 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.
1319 //
1320 if (EFI_ERROR (Status)) {
1321 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));
1322 }
1323 mOnGuarding = FALSE;
1324 }
1325 }
1326
1327 /**
1328 Record freed pages as well as mark them as not-present, if enabled.
1329
1330 @param[in] BaseAddress Base address of just freed pages.
1331 @param[in] Pages Number of freed pages.
1332
1333 @return VOID.
1334 **/
1335 VOID
1336 EFIAPI
1337 GuardFreedPagesChecked (
1338 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1339 IN UINTN Pages
1340 )
1341 {
1342 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1343 GuardFreedPages (BaseAddress, Pages);
1344 }
1345 }
1346
1347 /**
1348 Mark all pages freed before CPU Arch Protocol as not-present.
1349
1350 **/
1351 VOID
1352 GuardAllFreedPages (
1353 VOID
1354 )
1355 {
1356 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1357 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1358 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1359 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1360 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1361 UINT64 TableEntry;
1362 UINT64 Address;
1363 UINT64 GuardPage;
1364 INTN Level;
1365 UINT64 BitIndex;
1366 UINTN GuardPageNumber;
1367
1368 if (mGuardedMemoryMap == 0 ||
1369 mMapLevel == 0 ||
1370 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1371 return;
1372 }
1373
1374 CopyMem (Entries, mLevelMask, sizeof (Entries));
1375 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1376
1377 SetMem (Tables, sizeof(Tables), 0);
1378 SetMem (Addresses, sizeof(Addresses), 0);
1379 SetMem (Indices, sizeof(Indices), 0);
1380
1381 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1382 Tables[Level] = mGuardedMemoryMap;
1383 Address = 0;
1384 GuardPage = (UINT64)-1;
1385 GuardPageNumber = 0;
1386
1387 while (TRUE) {
1388 if (Indices[Level] > Entries[Level]) {
1389 Tables[Level] = 0;
1390 Level -= 1;
1391 } else {
1392 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1393 Address = Addresses[Level];
1394
1395 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1396 Level += 1;
1397 Tables[Level] = TableEntry;
1398 Addresses[Level] = Address;
1399 Indices[Level] = 0;
1400
1401 continue;
1402 } else {
1403 BitIndex = 1;
1404 while (BitIndex != 0) {
1405 if ((TableEntry & BitIndex) != 0) {
1406 if (GuardPage == (UINT64)-1) {
1407 GuardPage = Address;
1408 }
1409 ++GuardPageNumber;
1410 } else if (GuardPageNumber > 0) {
1411 GuardFreedPages (GuardPage, GuardPageNumber);
1412 GuardPageNumber = 0;
1413 GuardPage = (UINT64)-1;
1414 }
1415
1416 if (TableEntry == 0) {
1417 break;
1418 }
1419
1420 Address += EFI_PAGES_TO_SIZE (1);
1421 BitIndex = LShiftU64 (BitIndex, 1);
1422 }
1423 }
1424 }
1425
1426 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1427 break;
1428 }
1429
1430 Indices[Level] += 1;
1431 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1432 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1433
1434 }
1435
1436 //
1437 // Update the maximum address of freed page which can be used for memory
1438 // promotion upon out-of-memory-space.
1439 //
1440 GetLastGuardedFreePageAddress (&Address);
1441 if (Address != 0) {
1442 mLastPromotedPage = Address;
1443 }
1444 }
1445
1446 /**
1447 This function checks to see if the given memory map descriptor in a memory map
1448 can be merged with any guarded free pages.
1449
1450 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.
1451 @param MaxAddress Maximum address to stop the merge.
1452
1453 @return VOID
1454
1455 **/
1456 VOID
1457 MergeGuardPages (
1458 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
1459 IN EFI_PHYSICAL_ADDRESS MaxAddress
1460 )
1461 {
1462 EFI_PHYSICAL_ADDRESS EndAddress;
1463 UINT64 Bitmap;
1464 INTN Pages;
1465
1466 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||
1467 MemoryMapEntry->Type >= EfiMemoryMappedIO) {
1468 return;
1469 }
1470
1471 Bitmap = 0;
1472 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));
1473 Pages -= (INTN)MemoryMapEntry->NumberOfPages;
1474 while (Pages > 0) {
1475 if (Bitmap == 0) {
1476 EndAddress = MemoryMapEntry->PhysicalStart +
1477 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);
1478 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);
1479 }
1480
1481 if ((Bitmap & 1) == 0) {
1482 break;
1483 }
1484
1485 Pages--;
1486 MemoryMapEntry->NumberOfPages++;
1487 Bitmap = RShiftU64 (Bitmap, 1);
1488 }
1489 }
1490
1491 /**
1492 Put part (at most 64 pages a time) guarded free pages back to free page pool.
1493
1494 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
1495 makes use of 'Used then throw away' way to detect any illegal access to freed
1496 memory. The thrown-away memory will be marked as not-present so that any access
1497 to those memory (after free) will be caught by page-fault exception.
1498
1499 The problem is that this will consume lots of memory space. Once no memory
1500 left in pool to allocate, we have to restore part of the freed pages to their
1501 normal function. Otherwise the whole system will stop functioning.
1502
1503 @param StartAddress Start address of promoted memory.
1504 @param EndAddress End address of promoted memory.
1505
1506 @return TRUE Succeeded to promote memory.
1507 @return FALSE No free memory found.
1508
1509 **/
1510 BOOLEAN
1511 PromoteGuardedFreePages (
1512 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
1513 OUT EFI_PHYSICAL_ADDRESS *EndAddress
1514 )
1515 {
1516 EFI_STATUS Status;
1517 UINTN AvailablePages;
1518 UINT64 Bitmap;
1519 EFI_PHYSICAL_ADDRESS Start;
1520
1521 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1522 return FALSE;
1523 }
1524
1525 //
1526 // Similar to memory allocation service, always search the freed pages in
1527 // descending direction.
1528 //
1529 Start = mLastPromotedPage;
1530 AvailablePages = 0;
1531 while (AvailablePages == 0) {
1532 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1533 //
1534 // If the address wraps around, try the really freed pages at top.
1535 //
1536 if (Start > mLastPromotedPage) {
1537 GetLastGuardedFreePageAddress (&Start);
1538 ASSERT (Start != 0);
1539 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1540 }
1541
1542 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);
1543 while (Bitmap > 0) {
1544 if ((Bitmap & 1) != 0) {
1545 ++AvailablePages;
1546 } else if (AvailablePages == 0) {
1547 Start += EFI_PAGES_TO_SIZE (1);
1548 } else {
1549 break;
1550 }
1551
1552 Bitmap = RShiftU64 (Bitmap, 1);
1553 }
1554 }
1555
1556 if (AvailablePages != 0) {
1557 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));
1558 ClearGuardedMemoryBits (Start, AvailablePages);
1559
1560 if (gCpu != NULL) {
1561 //
1562 // Set flag to make sure allocating memory without GUARD for page table
1563 // operation; otherwise infinite loops could be caused.
1564 //
1565 mOnGuarding = TRUE;
1566 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);
1567 ASSERT_EFI_ERROR (Status);
1568 mOnGuarding = FALSE;
1569 }
1570
1571 mLastPromotedPage = Start;
1572 *StartAddress = Start;
1573 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;
1574 return TRUE;
1575 }
1576
1577 return FALSE;
1578 }
1579
1580 /**
1581 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1582 **/
1583 VOID
1584 HeapGuardCpuArchProtocolNotify (
1585 VOID
1586 )
1587 {
1588 ASSERT (gCpu != NULL);
1589
1590 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&
1591 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1592 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));
1593 CpuDeadLoop ();
1594 }
1595
1596 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {
1597 SetAllGuardPages ();
1598 }
1599
1600 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1601 GuardAllFreedPages ();
1602 }
1603 }
1604
1605 /**
1606 Helper function to convert a UINT64 value in binary to a string.
1607
1608 @param[in] Value Value of a UINT64 integer.
1609 @param[out] BinString String buffer to contain the conversion result.
1610
1611 @return VOID.
1612 **/
1613 VOID
1614 Uint64ToBinString (
1615 IN UINT64 Value,
1616 OUT CHAR8 *BinString
1617 )
1618 {
1619 UINTN Index;
1620
1621 if (BinString == NULL) {
1622 return;
1623 }
1624
1625 for (Index = 64; Index > 0; --Index) {
1626 BinString[Index - 1] = '0' + (Value & 1);
1627 Value = RShiftU64 (Value, 1);
1628 }
1629 BinString[64] = '\0';
1630 }
1631
1632 /**
1633 Dump the guarded memory bit map.
1634 **/
1635 VOID
1636 EFIAPI
1637 DumpGuardedMemoryBitmap (
1638 VOID
1639 )
1640 {
1641 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1642 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1643 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1644 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1645 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1646 UINT64 TableEntry;
1647 UINT64 Address;
1648 INTN Level;
1649 UINTN RepeatZero;
1650 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1651 CHAR8 *Ruler1;
1652 CHAR8 *Ruler2;
1653
1654 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {
1655 return;
1656 }
1657
1658 if (mGuardedMemoryMap == 0 ||
1659 mMapLevel == 0 ||
1660 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1661 return;
1662 }
1663
1664 Ruler1 = " 3 2 1 0";
1665 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1666
1667 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1668 " Guarded Memory Bitmap "
1669 "==============================\r\n"));
1670 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1671 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1672
1673 CopyMem (Entries, mLevelMask, sizeof (Entries));
1674 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1675
1676 SetMem (Indices, sizeof(Indices), 0);
1677 SetMem (Tables, sizeof(Tables), 0);
1678 SetMem (Addresses, sizeof(Addresses), 0);
1679
1680 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1681 Tables[Level] = mGuardedMemoryMap;
1682 Address = 0;
1683 RepeatZero = 0;
1684
1685 while (TRUE) {
1686 if (Indices[Level] > Entries[Level]) {
1687
1688 Tables[Level] = 0;
1689 Level -= 1;
1690 RepeatZero = 0;
1691
1692 DEBUG ((
1693 HEAP_GUARD_DEBUG_LEVEL,
1694 "========================================="
1695 "=========================================\r\n"
1696 ));
1697
1698 } else {
1699
1700 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1701 Address = Addresses[Level];
1702
1703 if (TableEntry == 0) {
1704
1705 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1706 if (RepeatZero == 0) {
1707 Uint64ToBinString(TableEntry, String);
1708 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1709 } else if (RepeatZero == 1) {
1710 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1711 }
1712 RepeatZero += 1;
1713 }
1714
1715 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1716
1717 Level += 1;
1718 Tables[Level] = TableEntry;
1719 Addresses[Level] = Address;
1720 Indices[Level] = 0;
1721 RepeatZero = 0;
1722
1723 continue;
1724
1725 } else {
1726
1727 RepeatZero = 0;
1728 Uint64ToBinString(TableEntry, String);
1729 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1730
1731 }
1732 }
1733
1734 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1735 break;
1736 }
1737
1738 Indices[Level] += 1;
1739 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1740 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1741
1742 }
1743 }
1744