]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/Mem: Initialize the variable MapMemory
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "DxeMain.h"
10 #include "Imem.h"
11 #include "HeapGuard.h"
12
13 //
14 // Global to avoid infinite reentrance of memory allocation when updating
15 // page table attributes, which may need allocate pages for new PDE/PTE.
16 //
17 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
18
19 //
20 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
21 // is used to indicate memory guarded. '0' might be free memory or Guard
22 // page itself, depending on status of memory adjacent to it.
23 //
24 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
25
26 //
27 // Current depth level of map table pointed by mGuardedMemoryMap.
28 // mMapLevel must be initialized at least by 1. It will be automatically
29 // updated according to the address of memory just tracked.
30 //
31 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
32
33 //
34 // Shift and mask for each level of map table
35 //
36 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
38 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
40
41 //
42 // Used for promoting freed but not used pages.
43 //
44 GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;
45
46 /**
47 Set corresponding bits in bitmap table to 1 according to the address.
48
49 @param[in] Address Start address to set for.
50 @param[in] BitNumber Number of bits to set.
51 @param[in] BitMap Pointer to bitmap which covers the Address.
52
53 @return VOID.
54 **/
55 STATIC
56 VOID
57 SetBits (
58 IN EFI_PHYSICAL_ADDRESS Address,
59 IN UINTN BitNumber,
60 IN UINT64 *BitMap
61 )
62 {
63 UINTN Lsbs;
64 UINTN Qwords;
65 UINTN Msbs;
66 UINTN StartBit;
67 UINTN EndBit;
68
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
71
72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
74 GUARDED_HEAP_MAP_ENTRY_BITS;
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
77 } else {
78 Msbs = BitNumber;
79 Lsbs = 0;
80 Qwords = 0;
81 }
82
83 if (Msbs > 0) {
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
85 BitMap += 1;
86 }
87
88 if (Qwords > 0) {
89 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
90 (UINT64)-1);
91 BitMap += Qwords;
92 }
93
94 if (Lsbs > 0) {
95 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
96 }
97 }
98
99 /**
100 Set corresponding bits in bitmap table to 0 according to the address.
101
102 @param[in] Address Start address to set for.
103 @param[in] BitNumber Number of bits to set.
104 @param[in] BitMap Pointer to bitmap which covers the Address.
105
106 @return VOID.
107 **/
108 STATIC
109 VOID
110 ClearBits (
111 IN EFI_PHYSICAL_ADDRESS Address,
112 IN UINTN BitNumber,
113 IN UINT64 *BitMap
114 )
115 {
116 UINTN Lsbs;
117 UINTN Qwords;
118 UINTN Msbs;
119 UINTN StartBit;
120 UINTN EndBit;
121
122 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
123 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
124
125 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
126 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
127 GUARDED_HEAP_MAP_ENTRY_BITS;
128 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
129 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
130 } else {
131 Msbs = BitNumber;
132 Lsbs = 0;
133 Qwords = 0;
134 }
135
136 if (Msbs > 0) {
137 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
138 BitMap += 1;
139 }
140
141 if (Qwords > 0) {
142 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
143 BitMap += Qwords;
144 }
145
146 if (Lsbs > 0) {
147 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
148 }
149 }
150
151 /**
152 Get corresponding bits in bitmap table according to the address.
153
154 The value of bit 0 corresponds to the status of memory at given Address.
155 No more than 64 bits can be retrieved in one call.
156
157 @param[in] Address Start address to retrieve bits for.
158 @param[in] BitNumber Number of bits to get.
159 @param[in] BitMap Pointer to bitmap which covers the Address.
160
161 @return An integer containing the bits information.
162 **/
163 STATIC
164 UINT64
165 GetBits (
166 IN EFI_PHYSICAL_ADDRESS Address,
167 IN UINTN BitNumber,
168 IN UINT64 *BitMap
169 )
170 {
171 UINTN StartBit;
172 UINTN EndBit;
173 UINTN Lsbs;
174 UINTN Msbs;
175 UINT64 Result;
176
177 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
178
179 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
180 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
181
182 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
183 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
184 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
185 } else {
186 Msbs = BitNumber;
187 Lsbs = 0;
188 }
189
190 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
191 Result = *BitMap;
192 } else {
193 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
194 if (Lsbs > 0) {
195 BitMap += 1;
196 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
197 }
198 }
199
200 return Result;
201 }
202
203 /**
204 Locate the pointer of bitmap from the guarded memory bitmap tables, which
205 covers the given Address.
206
207 @param[in] Address Start address to search the bitmap for.
208 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
209 @param[out] BitMap Pointer to bitmap which covers the Address.
210
211 @return The bit number from given Address to the end of current map table.
212 **/
213 UINTN
214 FindGuardedMemoryMap (
215 IN EFI_PHYSICAL_ADDRESS Address,
216 IN BOOLEAN AllocMapUnit,
217 OUT UINT64 **BitMap
218 )
219 {
220 UINTN Level;
221 UINT64 *GuardMap;
222 UINT64 MapMemory;
223 UINTN Index;
224 UINTN Size;
225 UINTN BitsToUnitEnd;
226 EFI_STATUS Status;
227
228 MapMemory = 0;
229
230 //
231 // Adjust current map table depth according to the address to access
232 //
233 while (AllocMapUnit &&
234 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
235 RShiftU64 (
236 Address,
237 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
238 ) != 0) {
239
240 if (mGuardedMemoryMap != 0) {
241 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
242 * GUARDED_HEAP_MAP_ENTRY_BYTES;
243 Status = CoreInternalAllocatePages (
244 AllocateAnyPages,
245 EfiBootServicesData,
246 EFI_SIZE_TO_PAGES (Size),
247 &MapMemory,
248 FALSE
249 );
250 ASSERT_EFI_ERROR (Status);
251 ASSERT (MapMemory != 0);
252
253 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
254
255 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
256 mGuardedMemoryMap = MapMemory;
257 }
258
259 mMapLevel++;
260
261 }
262
263 GuardMap = &mGuardedMemoryMap;
264 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
265 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
266 ++Level) {
267
268 if (*GuardMap == 0) {
269 if (!AllocMapUnit) {
270 GuardMap = NULL;
271 break;
272 }
273
274 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
275 Status = CoreInternalAllocatePages (
276 AllocateAnyPages,
277 EfiBootServicesData,
278 EFI_SIZE_TO_PAGES (Size),
279 &MapMemory,
280 FALSE
281 );
282 ASSERT_EFI_ERROR (Status);
283 ASSERT (MapMemory != 0);
284
285 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
286 *GuardMap = MapMemory;
287 }
288
289 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
290 Index &= mLevelMask[Level];
291 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
292
293 }
294
295 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
296 *BitMap = GuardMap;
297
298 return BitsToUnitEnd;
299 }
300
301 /**
302 Set corresponding bits in bitmap table to 1 according to given memory range.
303
304 @param[in] Address Memory address to guard from.
305 @param[in] NumberOfPages Number of pages to guard.
306
307 @return VOID.
308 **/
309 VOID
310 EFIAPI
311 SetGuardedMemoryBits (
312 IN EFI_PHYSICAL_ADDRESS Address,
313 IN UINTN NumberOfPages
314 )
315 {
316 UINT64 *BitMap;
317 UINTN Bits;
318 UINTN BitsToUnitEnd;
319
320 while (NumberOfPages > 0) {
321 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
322 ASSERT (BitMap != NULL);
323
324 if (NumberOfPages > BitsToUnitEnd) {
325 // Cross map unit
326 Bits = BitsToUnitEnd;
327 } else {
328 Bits = NumberOfPages;
329 }
330
331 SetBits (Address, Bits, BitMap);
332
333 NumberOfPages -= Bits;
334 Address += EFI_PAGES_TO_SIZE (Bits);
335 }
336 }
337
338 /**
339 Clear corresponding bits in bitmap table according to given memory range.
340
341 @param[in] Address Memory address to unset from.
342 @param[in] NumberOfPages Number of pages to unset guard.
343
344 @return VOID.
345 **/
346 VOID
347 EFIAPI
348 ClearGuardedMemoryBits (
349 IN EFI_PHYSICAL_ADDRESS Address,
350 IN UINTN NumberOfPages
351 )
352 {
353 UINT64 *BitMap;
354 UINTN Bits;
355 UINTN BitsToUnitEnd;
356
357 while (NumberOfPages > 0) {
358 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
359 ASSERT (BitMap != NULL);
360
361 if (NumberOfPages > BitsToUnitEnd) {
362 // Cross map unit
363 Bits = BitsToUnitEnd;
364 } else {
365 Bits = NumberOfPages;
366 }
367
368 ClearBits (Address, Bits, BitMap);
369
370 NumberOfPages -= Bits;
371 Address += EFI_PAGES_TO_SIZE (Bits);
372 }
373 }
374
375 /**
376 Retrieve corresponding bits in bitmap table according to given memory range.
377
378 @param[in] Address Memory address to retrieve from.
379 @param[in] NumberOfPages Number of pages to retrieve.
380
381 @return An integer containing the guarded memory bitmap.
382 **/
383 UINT64
384 GetGuardedMemoryBits (
385 IN EFI_PHYSICAL_ADDRESS Address,
386 IN UINTN NumberOfPages
387 )
388 {
389 UINT64 *BitMap;
390 UINTN Bits;
391 UINT64 Result;
392 UINTN Shift;
393 UINTN BitsToUnitEnd;
394
395 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
396
397 Result = 0;
398 Shift = 0;
399 while (NumberOfPages > 0) {
400 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
401
402 if (NumberOfPages > BitsToUnitEnd) {
403 // Cross map unit
404 Bits = BitsToUnitEnd;
405 } else {
406 Bits = NumberOfPages;
407 }
408
409 if (BitMap != NULL) {
410 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
411 }
412
413 Shift += Bits;
414 NumberOfPages -= Bits;
415 Address += EFI_PAGES_TO_SIZE (Bits);
416 }
417
418 return Result;
419 }
420
421 /**
422 Get bit value in bitmap table for the given address.
423
424 @param[in] Address The address to retrieve for.
425
426 @return 1 or 0.
427 **/
428 UINTN
429 EFIAPI
430 GetGuardMapBit (
431 IN EFI_PHYSICAL_ADDRESS Address
432 )
433 {
434 UINT64 *GuardMap;
435
436 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
437 if (GuardMap != NULL) {
438 if (RShiftU64 (*GuardMap,
439 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
440 return 1;
441 }
442 }
443
444 return 0;
445 }
446
447
448 /**
449 Check to see if the page at the given address is a Guard page or not.
450
451 @param[in] Address The address to check for.
452
453 @return TRUE The page at Address is a Guard page.
454 @return FALSE The page at Address is not a Guard page.
455 **/
456 BOOLEAN
457 EFIAPI
458 IsGuardPage (
459 IN EFI_PHYSICAL_ADDRESS Address
460 )
461 {
462 UINT64 BitMap;
463
464 //
465 // There must be at least one guarded page before and/or after given
466 // address if it's a Guard page. The bitmap pattern should be one of
467 // 001, 100 and 101
468 //
469 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
470 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
471 }
472
473
474 /**
475 Check to see if the page at the given address is guarded or not.
476
477 @param[in] Address The address to check for.
478
479 @return TRUE The page at Address is guarded.
480 @return FALSE The page at Address is not guarded.
481 **/
482 BOOLEAN
483 EFIAPI
484 IsMemoryGuarded (
485 IN EFI_PHYSICAL_ADDRESS Address
486 )
487 {
488 return (GetGuardMapBit (Address) == 1);
489 }
490
491 /**
492 Set the page at the given address to be a Guard page.
493
494 This is done by changing the page table attribute to be NOT PRSENT.
495
496 @param[in] BaseAddress Page address to Guard at
497
498 @return VOID
499 **/
500 VOID
501 EFIAPI
502 SetGuardPage (
503 IN EFI_PHYSICAL_ADDRESS BaseAddress
504 )
505 {
506 EFI_STATUS Status;
507
508 if (gCpu == NULL) {
509 return;
510 }
511
512 //
513 // Set flag to make sure allocating memory without GUARD for page table
514 // operation; otherwise infinite loops could be caused.
515 //
516 mOnGuarding = TRUE;
517 //
518 // Note: This might overwrite other attributes needed by other features,
519 // such as NX memory protection.
520 //
521 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
522 ASSERT_EFI_ERROR (Status);
523 mOnGuarding = FALSE;
524 }
525
526 /**
527 Unset the Guard page at the given address to the normal memory.
528
529 This is done by changing the page table attribute to be PRSENT.
530
531 @param[in] BaseAddress Page address to Guard at.
532
533 @return VOID.
534 **/
535 VOID
536 EFIAPI
537 UnsetGuardPage (
538 IN EFI_PHYSICAL_ADDRESS BaseAddress
539 )
540 {
541 UINT64 Attributes;
542 EFI_STATUS Status;
543
544 if (gCpu == NULL) {
545 return;
546 }
547
548 //
549 // Once the Guard page is unset, it will be freed back to memory pool. NX
550 // memory protection must be restored for this page if NX is enabled for free
551 // memory.
552 //
553 Attributes = 0;
554 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
555 Attributes |= EFI_MEMORY_XP;
556 }
557
558 //
559 // Set flag to make sure allocating memory without GUARD for page table
560 // operation; otherwise infinite loops could be caused.
561 //
562 mOnGuarding = TRUE;
563 //
564 // Note: This might overwrite other attributes needed by other features,
565 // such as memory protection (NX). Please make sure they are not enabled
566 // at the same time.
567 //
568 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
569 ASSERT_EFI_ERROR (Status);
570 mOnGuarding = FALSE;
571 }
572
573 /**
574 Check to see if the memory at the given address should be guarded or not.
575
576 @param[in] MemoryType Memory type to check.
577 @param[in] AllocateType Allocation type to check.
578 @param[in] PageOrPool Indicate a page allocation or pool allocation.
579
580
581 @return TRUE The given type of memory should be guarded.
582 @return FALSE The given type of memory should not be guarded.
583 **/
584 BOOLEAN
585 IsMemoryTypeToGuard (
586 IN EFI_MEMORY_TYPE MemoryType,
587 IN EFI_ALLOCATE_TYPE AllocateType,
588 IN UINT8 PageOrPool
589 )
590 {
591 UINT64 TestBit;
592 UINT64 ConfigBit;
593
594 if (AllocateType == AllocateAddress) {
595 return FALSE;
596 }
597
598 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
599 return FALSE;
600 }
601
602 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
603 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
604 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
605 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
606 } else {
607 ConfigBit = (UINT64)-1;
608 }
609
610 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
611 TestBit = BIT63;
612 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
613 TestBit = BIT62;
614 } else if (MemoryType < EfiMaxMemoryType) {
615 TestBit = LShiftU64 (1, MemoryType);
616 } else if (MemoryType == EfiMaxMemoryType) {
617 TestBit = (UINT64)-1;
618 } else {
619 TestBit = 0;
620 }
621
622 return ((ConfigBit & TestBit) != 0);
623 }
624
625 /**
626 Check to see if the pool at the given address should be guarded or not.
627
628 @param[in] MemoryType Pool type to check.
629
630
631 @return TRUE The given type of pool should be guarded.
632 @return FALSE The given type of pool should not be guarded.
633 **/
634 BOOLEAN
635 IsPoolTypeToGuard (
636 IN EFI_MEMORY_TYPE MemoryType
637 )
638 {
639 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
640 GUARD_HEAP_TYPE_POOL);
641 }
642
643 /**
644 Check to see if the page at the given address should be guarded or not.
645
646 @param[in] MemoryType Page type to check.
647 @param[in] AllocateType Allocation type to check.
648
649 @return TRUE The given type of page should be guarded.
650 @return FALSE The given type of page should not be guarded.
651 **/
652 BOOLEAN
653 IsPageTypeToGuard (
654 IN EFI_MEMORY_TYPE MemoryType,
655 IN EFI_ALLOCATE_TYPE AllocateType
656 )
657 {
658 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
659 }
660
661 /**
662 Check to see if the heap guard is enabled for page and/or pool allocation.
663
664 @param[in] GuardType Specify the sub-type(s) of Heap Guard.
665
666 @return TRUE/FALSE.
667 **/
668 BOOLEAN
669 IsHeapGuardEnabled (
670 UINT8 GuardType
671 )
672 {
673 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);
674 }
675
676 /**
677 Set head Guard and tail Guard for the given memory range.
678
679 @param[in] Memory Base address of memory to set guard for.
680 @param[in] NumberOfPages Memory size in pages.
681
682 @return VOID
683 **/
684 VOID
685 SetGuardForMemory (
686 IN EFI_PHYSICAL_ADDRESS Memory,
687 IN UINTN NumberOfPages
688 )
689 {
690 EFI_PHYSICAL_ADDRESS GuardPage;
691
692 //
693 // Set tail Guard
694 //
695 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
696 if (!IsGuardPage (GuardPage)) {
697 SetGuardPage (GuardPage);
698 }
699
700 // Set head Guard
701 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
702 if (!IsGuardPage (GuardPage)) {
703 SetGuardPage (GuardPage);
704 }
705
706 //
707 // Mark the memory range as Guarded
708 //
709 SetGuardedMemoryBits (Memory, NumberOfPages);
710 }
711
712 /**
713 Unset head Guard and tail Guard for the given memory range.
714
715 @param[in] Memory Base address of memory to unset guard for.
716 @param[in] NumberOfPages Memory size in pages.
717
718 @return VOID
719 **/
720 VOID
721 UnsetGuardForMemory (
722 IN EFI_PHYSICAL_ADDRESS Memory,
723 IN UINTN NumberOfPages
724 )
725 {
726 EFI_PHYSICAL_ADDRESS GuardPage;
727 UINT64 GuardBitmap;
728
729 if (NumberOfPages == 0) {
730 return;
731 }
732
733 //
734 // Head Guard must be one page before, if any.
735 //
736 // MSB-> 1 0 <-LSB
737 // -------------------
738 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
739 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
740 // 1 X -> Don't free first page (need a new Guard)
741 // (it'll be turned into a Guard page later)
742 // -------------------
743 // Start -> -1 -2
744 //
745 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
746 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
747 if ((GuardBitmap & BIT1) == 0) {
748 //
749 // Head Guard exists.
750 //
751 if ((GuardBitmap & BIT0) == 0) {
752 //
753 // If the head Guard is not a tail Guard of adjacent memory block,
754 // unset it.
755 //
756 UnsetGuardPage (GuardPage);
757 }
758 } else {
759 //
760 // Pages before memory to free are still in Guard. It's a partial free
761 // case. Turn first page of memory block to free into a new Guard.
762 //
763 SetGuardPage (Memory);
764 }
765
766 //
767 // Tail Guard must be the page after this memory block to free, if any.
768 //
769 // MSB-> 1 0 <-LSB
770 // --------------------
771 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
772 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
773 // X 1 -> Don't free last page (need a new Guard)
774 // (it'll be turned into a Guard page later)
775 // --------------------
776 // +1 +0 <- End
777 //
778 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
779 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
780 if ((GuardBitmap & BIT0) == 0) {
781 //
782 // Tail Guard exists.
783 //
784 if ((GuardBitmap & BIT1) == 0) {
785 //
786 // If the tail Guard is not a head Guard of adjacent memory block,
787 // free it; otherwise, keep it.
788 //
789 UnsetGuardPage (GuardPage);
790 }
791 } else {
792 //
793 // Pages after memory to free are still in Guard. It's a partial free
794 // case. We need to keep one page to be a head Guard.
795 //
796 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
797 }
798
799 //
800 // No matter what, we just clear the mark of the Guarded memory.
801 //
802 ClearGuardedMemoryBits(Memory, NumberOfPages);
803 }
804
805 /**
806 Adjust address of free memory according to existing and/or required Guard.
807
808 This function will check if there're existing Guard pages of adjacent
809 memory blocks, and try to use it as the Guard page of the memory to be
810 allocated.
811
812 @param[in] Start Start address of free memory block.
813 @param[in] Size Size of free memory block.
814 @param[in] SizeRequested Size of memory to allocate.
815
816 @return The end address of memory block found.
817 @return 0 if no enough space for the required size of memory and its Guard.
818 **/
819 UINT64
820 AdjustMemoryS (
821 IN UINT64 Start,
822 IN UINT64 Size,
823 IN UINT64 SizeRequested
824 )
825 {
826 UINT64 Target;
827
828 //
829 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
830 // indicated to put the pool near the Tail Guard, we need extra bytes to
831 // make sure alignment of the returned pool address.
832 //
833 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
834 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
835 }
836
837 Target = Start + Size - SizeRequested;
838 ASSERT (Target >= Start);
839 if (Target == 0) {
840 return 0;
841 }
842
843 if (!IsGuardPage (Start + Size)) {
844 // No Guard at tail to share. One more page is needed.
845 Target -= EFI_PAGES_TO_SIZE (1);
846 }
847
848 // Out of range?
849 if (Target < Start) {
850 return 0;
851 }
852
853 // At the edge?
854 if (Target == Start) {
855 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
856 // No enough space for a new head Guard if no Guard at head to share.
857 return 0;
858 }
859 }
860
861 // OK, we have enough pages for memory and its Guards. Return the End of the
862 // free space.
863 return Target + SizeRequested - 1;
864 }
865
866 /**
867 Adjust the start address and number of pages to free according to Guard.
868
869 The purpose of this function is to keep the shared Guard page with adjacent
870 memory block if it's still in guard, or free it if no more sharing. Another
871 is to reserve pages as Guard pages in partial page free situation.
872
873 @param[in,out] Memory Base address of memory to free.
874 @param[in,out] NumberOfPages Size of memory to free.
875
876 @return VOID.
877 **/
878 VOID
879 AdjustMemoryF (
880 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
881 IN OUT UINTN *NumberOfPages
882 )
883 {
884 EFI_PHYSICAL_ADDRESS Start;
885 EFI_PHYSICAL_ADDRESS MemoryToTest;
886 UINTN PagesToFree;
887 UINT64 GuardBitmap;
888
889 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
890 return;
891 }
892
893 Start = *Memory;
894 PagesToFree = *NumberOfPages;
895
896 //
897 // Head Guard must be one page before, if any.
898 //
899 // MSB-> 1 0 <-LSB
900 // -------------------
901 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
902 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
903 // 1 X -> Don't free first page (need a new Guard)
904 // (it'll be turned into a Guard page later)
905 // -------------------
906 // Start -> -1 -2
907 //
908 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
909 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
910 if ((GuardBitmap & BIT1) == 0) {
911 //
912 // Head Guard exists.
913 //
914 if ((GuardBitmap & BIT0) == 0) {
915 //
916 // If the head Guard is not a tail Guard of adjacent memory block,
917 // free it; otherwise, keep it.
918 //
919 Start -= EFI_PAGES_TO_SIZE (1);
920 PagesToFree += 1;
921 }
922 } else {
923 //
924 // No Head Guard, and pages before memory to free are still in Guard. It's a
925 // partial free case. We need to keep one page to be a tail Guard.
926 //
927 Start += EFI_PAGES_TO_SIZE (1);
928 PagesToFree -= 1;
929 }
930
931 //
932 // Tail Guard must be the page after this memory block to free, if any.
933 //
934 // MSB-> 1 0 <-LSB
935 // --------------------
936 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
937 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
938 // X 1 -> Don't free last page (need a new Guard)
939 // (it'll be turned into a Guard page later)
940 // --------------------
941 // +1 +0 <- End
942 //
943 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
944 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
945 if ((GuardBitmap & BIT0) == 0) {
946 //
947 // Tail Guard exists.
948 //
949 if ((GuardBitmap & BIT1) == 0) {
950 //
951 // If the tail Guard is not a head Guard of adjacent memory block,
952 // free it; otherwise, keep it.
953 //
954 PagesToFree += 1;
955 }
956 } else if (PagesToFree > 0) {
957 //
958 // No Tail Guard, and pages after memory to free are still in Guard. It's a
959 // partial free case. We need to keep one page to be a head Guard.
960 //
961 PagesToFree -= 1;
962 }
963
964 *Memory = Start;
965 *NumberOfPages = PagesToFree;
966 }
967
968 /**
969 Adjust the base and number of pages to really allocate according to Guard.
970
971 @param[in,out] Memory Base address of free memory.
972 @param[in,out] NumberOfPages Size of memory to allocate.
973
974 @return VOID.
975 **/
976 VOID
977 AdjustMemoryA (
978 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
979 IN OUT UINTN *NumberOfPages
980 )
981 {
982 //
983 // FindFreePages() has already taken the Guard into account. It's safe to
984 // adjust the start address and/or number of pages here, to make sure that
985 // the Guards are also "allocated".
986 //
987 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
988 // No tail Guard, add one.
989 *NumberOfPages += 1;
990 }
991
992 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
993 // No head Guard, add one.
994 *Memory -= EFI_PAGE_SIZE;
995 *NumberOfPages += 1;
996 }
997 }
998
999 /**
1000 Adjust the pool head position to make sure the Guard page is adjavent to
1001 pool tail or pool head.
1002
1003 @param[in] Memory Base address of memory allocated.
1004 @param[in] NoPages Number of pages actually allocated.
1005 @param[in] Size Size of memory requested.
1006 (plus pool head/tail overhead)
1007
1008 @return Address of pool head.
1009 **/
1010 VOID *
1011 AdjustPoolHeadA (
1012 IN EFI_PHYSICAL_ADDRESS Memory,
1013 IN UINTN NoPages,
1014 IN UINTN Size
1015 )
1016 {
1017 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1018 //
1019 // Pool head is put near the head Guard
1020 //
1021 return (VOID *)(UINTN)Memory;
1022 }
1023
1024 //
1025 // Pool head is put near the tail Guard
1026 //
1027 Size = ALIGN_VALUE (Size, 8);
1028 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1029 }
1030
1031 /**
1032 Get the page base address according to pool head address.
1033
1034 @param[in] Memory Head address of pool to free.
1035
1036 @return Address of pool head.
1037 **/
1038 VOID *
1039 AdjustPoolHeadF (
1040 IN EFI_PHYSICAL_ADDRESS Memory
1041 )
1042 {
1043 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1044 //
1045 // Pool head is put near the head Guard
1046 //
1047 return (VOID *)(UINTN)Memory;
1048 }
1049
1050 //
1051 // Pool head is put near the tail Guard
1052 //
1053 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1054 }
1055
1056 /**
1057 Allocate or free guarded memory.
1058
1059 @param[in] Start Start address of memory to allocate or free.
1060 @param[in] NumberOfPages Memory size in pages.
1061 @param[in] NewType Memory type to convert to.
1062
1063 @return VOID.
1064 **/
1065 EFI_STATUS
1066 CoreConvertPagesWithGuard (
1067 IN UINT64 Start,
1068 IN UINTN NumberOfPages,
1069 IN EFI_MEMORY_TYPE NewType
1070 )
1071 {
1072 UINT64 OldStart;
1073 UINTN OldPages;
1074
1075 if (NewType == EfiConventionalMemory) {
1076 OldStart = Start;
1077 OldPages = NumberOfPages;
1078
1079 AdjustMemoryF (&Start, &NumberOfPages);
1080 //
1081 // It's safe to unset Guard page inside memory lock because there should
1082 // be no memory allocation occurred in updating memory page attribute at
1083 // this point. And unsetting Guard page before free will prevent Guard
1084 // page just freed back to pool from being allocated right away before
1085 // marking it usable (from non-present to present).
1086 //
1087 UnsetGuardForMemory (OldStart, OldPages);
1088 if (NumberOfPages == 0) {
1089 return EFI_SUCCESS;
1090 }
1091 } else {
1092 AdjustMemoryA (&Start, &NumberOfPages);
1093 }
1094
1095 return CoreConvertPages (Start, NumberOfPages, NewType);
1096 }
1097
1098 /**
1099 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1100 **/
1101 VOID
1102 SetAllGuardPages (
1103 VOID
1104 )
1105 {
1106 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1107 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1108 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1109 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1110 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1111 UINT64 TableEntry;
1112 UINT64 Address;
1113 UINT64 GuardPage;
1114 INTN Level;
1115 UINTN Index;
1116 BOOLEAN OnGuarding;
1117
1118 if (mGuardedMemoryMap == 0 ||
1119 mMapLevel == 0 ||
1120 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1121 return;
1122 }
1123
1124 CopyMem (Entries, mLevelMask, sizeof (Entries));
1125 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1126
1127 SetMem (Tables, sizeof(Tables), 0);
1128 SetMem (Addresses, sizeof(Addresses), 0);
1129 SetMem (Indices, sizeof(Indices), 0);
1130
1131 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1132 Tables[Level] = mGuardedMemoryMap;
1133 Address = 0;
1134 OnGuarding = FALSE;
1135
1136 DEBUG_CODE (
1137 DumpGuardedMemoryBitmap ();
1138 );
1139
1140 while (TRUE) {
1141 if (Indices[Level] > Entries[Level]) {
1142 Tables[Level] = 0;
1143 Level -= 1;
1144 } else {
1145
1146 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1147 Address = Addresses[Level];
1148
1149 if (TableEntry == 0) {
1150
1151 OnGuarding = FALSE;
1152
1153 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1154
1155 Level += 1;
1156 Tables[Level] = TableEntry;
1157 Addresses[Level] = Address;
1158 Indices[Level] = 0;
1159
1160 continue;
1161
1162 } else {
1163
1164 Index = 0;
1165 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1166 if ((TableEntry & 1) == 1) {
1167 if (OnGuarding) {
1168 GuardPage = 0;
1169 } else {
1170 GuardPage = Address - EFI_PAGE_SIZE;
1171 }
1172 OnGuarding = TRUE;
1173 } else {
1174 if (OnGuarding) {
1175 GuardPage = Address;
1176 } else {
1177 GuardPage = 0;
1178 }
1179 OnGuarding = FALSE;
1180 }
1181
1182 if (GuardPage != 0) {
1183 SetGuardPage (GuardPage);
1184 }
1185
1186 if (TableEntry == 0) {
1187 break;
1188 }
1189
1190 TableEntry = RShiftU64 (TableEntry, 1);
1191 Address += EFI_PAGE_SIZE;
1192 Index += 1;
1193 }
1194 }
1195 }
1196
1197 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1198 break;
1199 }
1200
1201 Indices[Level] += 1;
1202 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1203 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1204
1205 }
1206 }
1207
1208 /**
1209 Find the address of top-most guarded free page.
1210
1211 @param[out] Address Start address of top-most guarded free page.
1212
1213 @return VOID.
1214 **/
1215 VOID
1216 GetLastGuardedFreePageAddress (
1217 OUT EFI_PHYSICAL_ADDRESS *Address
1218 )
1219 {
1220 EFI_PHYSICAL_ADDRESS AddressGranularity;
1221 EFI_PHYSICAL_ADDRESS BaseAddress;
1222 UINTN Level;
1223 UINT64 Map;
1224 INTN Index;
1225
1226 ASSERT (mMapLevel >= 1);
1227
1228 BaseAddress = 0;
1229 Map = mGuardedMemoryMap;
1230 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1231 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
1232 ++Level) {
1233 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);
1234
1235 //
1236 // Find the non-NULL entry at largest index.
1237 //
1238 for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {
1239 if (((UINT64 *)(UINTN)Map)[Index] != 0) {
1240 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);
1241 Map = ((UINT64 *)(UINTN)Map)[Index];
1242 break;
1243 }
1244 }
1245 }
1246
1247 //
1248 // Find the non-zero MSB then get the page address.
1249 //
1250 while (Map != 0) {
1251 Map = RShiftU64 (Map, 1);
1252 BaseAddress += EFI_PAGES_TO_SIZE (1);
1253 }
1254
1255 *Address = BaseAddress;
1256 }
1257
1258 /**
1259 Record freed pages.
1260
1261 @param[in] BaseAddress Base address of just freed pages.
1262 @param[in] Pages Number of freed pages.
1263
1264 @return VOID.
1265 **/
1266 VOID
1267 MarkFreedPages (
1268 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1269 IN UINTN Pages
1270 )
1271 {
1272 SetGuardedMemoryBits (BaseAddress, Pages);
1273 }
1274
1275 /**
1276 Record freed pages as well as mark them as not-present.
1277
1278 @param[in] BaseAddress Base address of just freed pages.
1279 @param[in] Pages Number of freed pages.
1280
1281 @return VOID.
1282 **/
1283 VOID
1284 EFIAPI
1285 GuardFreedPages (
1286 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1287 IN UINTN Pages
1288 )
1289 {
1290 EFI_STATUS Status;
1291
1292 //
1293 // Legacy memory lower than 1MB might be accessed with no allocation. Leave
1294 // them alone.
1295 //
1296 if (BaseAddress < BASE_1MB) {
1297 return;
1298 }
1299
1300 MarkFreedPages (BaseAddress, Pages);
1301 if (gCpu != NULL) {
1302 //
1303 // Set flag to make sure allocating memory without GUARD for page table
1304 // operation; otherwise infinite loops could be caused.
1305 //
1306 mOnGuarding = TRUE;
1307 //
1308 // Note: This might overwrite other attributes needed by other features,
1309 // such as NX memory protection.
1310 //
1311 Status = gCpu->SetMemoryAttributes (
1312 gCpu,
1313 BaseAddress,
1314 EFI_PAGES_TO_SIZE (Pages),
1315 EFI_MEMORY_RP
1316 );
1317 //
1318 // Normally we should ASSERT the returned Status. But there might be memory
1319 // alloc/free involved in SetMemoryAttributes(), which might fail this
1320 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.
1321 //
1322 if (EFI_ERROR (Status)) {
1323 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));
1324 }
1325 mOnGuarding = FALSE;
1326 }
1327 }
1328
1329 /**
1330 Record freed pages as well as mark them as not-present, if enabled.
1331
1332 @param[in] BaseAddress Base address of just freed pages.
1333 @param[in] Pages Number of freed pages.
1334
1335 @return VOID.
1336 **/
1337 VOID
1338 EFIAPI
1339 GuardFreedPagesChecked (
1340 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1341 IN UINTN Pages
1342 )
1343 {
1344 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1345 GuardFreedPages (BaseAddress, Pages);
1346 }
1347 }
1348
1349 /**
1350 Mark all pages freed before CPU Arch Protocol as not-present.
1351
1352 **/
1353 VOID
1354 GuardAllFreedPages (
1355 VOID
1356 )
1357 {
1358 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1359 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1360 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1361 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1362 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1363 UINT64 TableEntry;
1364 UINT64 Address;
1365 UINT64 GuardPage;
1366 INTN Level;
1367 UINT64 BitIndex;
1368 UINTN GuardPageNumber;
1369
1370 if (mGuardedMemoryMap == 0 ||
1371 mMapLevel == 0 ||
1372 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1373 return;
1374 }
1375
1376 CopyMem (Entries, mLevelMask, sizeof (Entries));
1377 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1378
1379 SetMem (Tables, sizeof(Tables), 0);
1380 SetMem (Addresses, sizeof(Addresses), 0);
1381 SetMem (Indices, sizeof(Indices), 0);
1382
1383 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1384 Tables[Level] = mGuardedMemoryMap;
1385 Address = 0;
1386 GuardPage = (UINT64)-1;
1387 GuardPageNumber = 0;
1388
1389 while (TRUE) {
1390 if (Indices[Level] > Entries[Level]) {
1391 Tables[Level] = 0;
1392 Level -= 1;
1393 } else {
1394 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1395 Address = Addresses[Level];
1396
1397 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1398 Level += 1;
1399 Tables[Level] = TableEntry;
1400 Addresses[Level] = Address;
1401 Indices[Level] = 0;
1402
1403 continue;
1404 } else {
1405 BitIndex = 1;
1406 while (BitIndex != 0) {
1407 if ((TableEntry & BitIndex) != 0) {
1408 if (GuardPage == (UINT64)-1) {
1409 GuardPage = Address;
1410 }
1411 ++GuardPageNumber;
1412 } else if (GuardPageNumber > 0) {
1413 GuardFreedPages (GuardPage, GuardPageNumber);
1414 GuardPageNumber = 0;
1415 GuardPage = (UINT64)-1;
1416 }
1417
1418 if (TableEntry == 0) {
1419 break;
1420 }
1421
1422 Address += EFI_PAGES_TO_SIZE (1);
1423 BitIndex = LShiftU64 (BitIndex, 1);
1424 }
1425 }
1426 }
1427
1428 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1429 break;
1430 }
1431
1432 Indices[Level] += 1;
1433 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1434 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1435
1436 }
1437
1438 //
1439 // Update the maximum address of freed page which can be used for memory
1440 // promotion upon out-of-memory-space.
1441 //
1442 GetLastGuardedFreePageAddress (&Address);
1443 if (Address != 0) {
1444 mLastPromotedPage = Address;
1445 }
1446 }
1447
1448 /**
1449 This function checks to see if the given memory map descriptor in a memory map
1450 can be merged with any guarded free pages.
1451
1452 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.
1453 @param MaxAddress Maximum address to stop the merge.
1454
1455 @return VOID
1456
1457 **/
1458 VOID
1459 MergeGuardPages (
1460 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
1461 IN EFI_PHYSICAL_ADDRESS MaxAddress
1462 )
1463 {
1464 EFI_PHYSICAL_ADDRESS EndAddress;
1465 UINT64 Bitmap;
1466 INTN Pages;
1467
1468 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||
1469 MemoryMapEntry->Type >= EfiMemoryMappedIO) {
1470 return;
1471 }
1472
1473 Bitmap = 0;
1474 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));
1475 Pages -= (INTN)MemoryMapEntry->NumberOfPages;
1476 while (Pages > 0) {
1477 if (Bitmap == 0) {
1478 EndAddress = MemoryMapEntry->PhysicalStart +
1479 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);
1480 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);
1481 }
1482
1483 if ((Bitmap & 1) == 0) {
1484 break;
1485 }
1486
1487 Pages--;
1488 MemoryMapEntry->NumberOfPages++;
1489 Bitmap = RShiftU64 (Bitmap, 1);
1490 }
1491 }
1492
1493 /**
1494 Put part (at most 64 pages a time) guarded free pages back to free page pool.
1495
1496 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
1497 makes use of 'Used then throw away' way to detect any illegal access to freed
1498 memory. The thrown-away memory will be marked as not-present so that any access
1499 to those memory (after free) will be caught by page-fault exception.
1500
1501 The problem is that this will consume lots of memory space. Once no memory
1502 left in pool to allocate, we have to restore part of the freed pages to their
1503 normal function. Otherwise the whole system will stop functioning.
1504
1505 @param StartAddress Start address of promoted memory.
1506 @param EndAddress End address of promoted memory.
1507
1508 @return TRUE Succeeded to promote memory.
1509 @return FALSE No free memory found.
1510
1511 **/
1512 BOOLEAN
1513 PromoteGuardedFreePages (
1514 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
1515 OUT EFI_PHYSICAL_ADDRESS *EndAddress
1516 )
1517 {
1518 EFI_STATUS Status;
1519 UINTN AvailablePages;
1520 UINT64 Bitmap;
1521 EFI_PHYSICAL_ADDRESS Start;
1522
1523 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1524 return FALSE;
1525 }
1526
1527 //
1528 // Similar to memory allocation service, always search the freed pages in
1529 // descending direction.
1530 //
1531 Start = mLastPromotedPage;
1532 AvailablePages = 0;
1533 while (AvailablePages == 0) {
1534 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1535 //
1536 // If the address wraps around, try the really freed pages at top.
1537 //
1538 if (Start > mLastPromotedPage) {
1539 GetLastGuardedFreePageAddress (&Start);
1540 ASSERT (Start != 0);
1541 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1542 }
1543
1544 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);
1545 while (Bitmap > 0) {
1546 if ((Bitmap & 1) != 0) {
1547 ++AvailablePages;
1548 } else if (AvailablePages == 0) {
1549 Start += EFI_PAGES_TO_SIZE (1);
1550 } else {
1551 break;
1552 }
1553
1554 Bitmap = RShiftU64 (Bitmap, 1);
1555 }
1556 }
1557
1558 if (AvailablePages != 0) {
1559 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));
1560 ClearGuardedMemoryBits (Start, AvailablePages);
1561
1562 if (gCpu != NULL) {
1563 //
1564 // Set flag to make sure allocating memory without GUARD for page table
1565 // operation; otherwise infinite loops could be caused.
1566 //
1567 mOnGuarding = TRUE;
1568 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);
1569 ASSERT_EFI_ERROR (Status);
1570 mOnGuarding = FALSE;
1571 }
1572
1573 mLastPromotedPage = Start;
1574 *StartAddress = Start;
1575 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;
1576 return TRUE;
1577 }
1578
1579 return FALSE;
1580 }
1581
1582 /**
1583 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1584 **/
1585 VOID
1586 HeapGuardCpuArchProtocolNotify (
1587 VOID
1588 )
1589 {
1590 ASSERT (gCpu != NULL);
1591
1592 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&
1593 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1594 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));
1595 CpuDeadLoop ();
1596 }
1597
1598 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {
1599 SetAllGuardPages ();
1600 }
1601
1602 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1603 GuardAllFreedPages ();
1604 }
1605 }
1606
1607 /**
1608 Helper function to convert a UINT64 value in binary to a string.
1609
1610 @param[in] Value Value of a UINT64 integer.
1611 @param[out] BinString String buffer to contain the conversion result.
1612
1613 @return VOID.
1614 **/
1615 VOID
1616 Uint64ToBinString (
1617 IN UINT64 Value,
1618 OUT CHAR8 *BinString
1619 )
1620 {
1621 UINTN Index;
1622
1623 if (BinString == NULL) {
1624 return;
1625 }
1626
1627 for (Index = 64; Index > 0; --Index) {
1628 BinString[Index - 1] = '0' + (Value & 1);
1629 Value = RShiftU64 (Value, 1);
1630 }
1631 BinString[64] = '\0';
1632 }
1633
1634 /**
1635 Dump the guarded memory bit map.
1636 **/
1637 VOID
1638 EFIAPI
1639 DumpGuardedMemoryBitmap (
1640 VOID
1641 )
1642 {
1643 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1644 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1645 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1646 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1647 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1648 UINT64 TableEntry;
1649 UINT64 Address;
1650 INTN Level;
1651 UINTN RepeatZero;
1652 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1653 CHAR8 *Ruler1;
1654 CHAR8 *Ruler2;
1655
1656 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {
1657 return;
1658 }
1659
1660 if (mGuardedMemoryMap == 0 ||
1661 mMapLevel == 0 ||
1662 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1663 return;
1664 }
1665
1666 Ruler1 = " 3 2 1 0";
1667 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1668
1669 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1670 " Guarded Memory Bitmap "
1671 "==============================\r\n"));
1672 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1673 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1674
1675 CopyMem (Entries, mLevelMask, sizeof (Entries));
1676 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1677
1678 SetMem (Indices, sizeof(Indices), 0);
1679 SetMem (Tables, sizeof(Tables), 0);
1680 SetMem (Addresses, sizeof(Addresses), 0);
1681
1682 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1683 Tables[Level] = mGuardedMemoryMap;
1684 Address = 0;
1685 RepeatZero = 0;
1686
1687 while (TRUE) {
1688 if (Indices[Level] > Entries[Level]) {
1689
1690 Tables[Level] = 0;
1691 Level -= 1;
1692 RepeatZero = 0;
1693
1694 DEBUG ((
1695 HEAP_GUARD_DEBUG_LEVEL,
1696 "========================================="
1697 "=========================================\r\n"
1698 ));
1699
1700 } else {
1701
1702 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1703 Address = Addresses[Level];
1704
1705 if (TableEntry == 0) {
1706
1707 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1708 if (RepeatZero == 0) {
1709 Uint64ToBinString(TableEntry, String);
1710 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1711 } else if (RepeatZero == 1) {
1712 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1713 }
1714 RepeatZero += 1;
1715 }
1716
1717 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1718
1719 Level += 1;
1720 Tables[Level] = TableEntry;
1721 Addresses[Level] = Address;
1722 Indices[Level] = 0;
1723 RepeatZero = 0;
1724
1725 continue;
1726
1727 } else {
1728
1729 RepeatZero = 0;
1730 Uint64ToBinString(TableEntry, String);
1731 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1732
1733 }
1734 }
1735
1736 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1737 break;
1738 }
1739
1740 Indices[Level] += 1;
1741 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1742 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1743
1744 }
1745 }
1746